hexsha stringlengths 40 40 | size int64 22 2.4M | ext stringclasses 5
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 260 | max_stars_repo_name stringlengths 5 109 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 260 | max_issues_repo_name stringlengths 5 109 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 260 | max_forks_repo_name stringlengths 5 109 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 22 2.4M | avg_line_length float64 5 169k | max_line_length int64 5 786k | alphanum_fraction float64 0.06 0.95 | matches listlengths 1 11 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff0ad9c9a47b49af1f67e8fc83b2cd960c3a00fd | 5,995 | h | C | source/panel.h | dos-games/vanilla-shadow_warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | 18 | 2015-07-21T03:53:29.000Z | 2021-12-20T18:42:56.000Z | source/panel.h | Azarien/shadow-warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | null | null | null | source/panel.h | Azarien/shadow-warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | 6 | 2016-10-17T09:06:22.000Z | 2022-02-11T10:02:17.000Z | //-------------------------------------------------------------------------
/*
Copyright (C) 1997, 2005 - 3D Realms Entertainment
This file is part of Shadow Warrior version 1.2
Shadow Warrior is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Original Source: 1997 - Frank Maddin and Jim Norwood
Prepared for public release: 03/28/2005 - Charlie Wiederhold, 3D Realms
*/
//-------------------------------------------------------------------------
#ifndef PANEL_H
#define PANEL_H
#include "mytypes.h"
#include "game.h"
#define FRAG_BAR 2920
#define PRI_FRONT_MAX 250
#define PRI_FRONT 192
#define PRI_MID 128
#define PRI_BACK 64
#define PRI_BACK_MAX 0
#define MAKE_CONPIC_ENUM
enum conpic_id
{
#include "conpic.h"
};
typedef enum conpic_id CONPIC_ID;
#undef MAKE_CONPIC_ENUM
enum PanelSpriteIDs
{
ID_BORDER_TOP = 1, ID_BORDER_BOTTOM, ID_BORDER_LEFT, ID_BORDER_RIGHT, ID_BORDER_SHADE,
ID_TEXT, ID_TEXT2, ID_TEXT3, ID_TEXT4
};
struct PANEL_STATEstruct
{
short picndx; // for pip stuff in conpic.h
long tics;
long (*Animator) (PANEL_SPRITEp);
PANEL_STATEp NextState;
ULONG flags;
BYTE xvel;
BYTE yvel;
};
#define PANF_PRIMARY (BIT(0)) // denotes primary weapon
#define PANF_SECONDARY (BIT(1)) // denotes secondary weapon
#define PANF_BOB (BIT(2))
#define PANF_REST_POS (BIT(3)) // used for certain weapons - fireball
#define PANF_RELOAD (BIT(4)) // reload flag used for uzi
#define PANF_TRANS_FLIP (BIT(5)) // translucent flip - matches rotate sprite
#define PANF_ACTION_POS (BIT(6)) // used for certain weapons - fireball
#define PANF_WEAPON_HIDE (BIT(7)) // hide when climbing/driving
#define PANF_TRANSLUCENT (BIT(8)) // turn invisible
#define PANF_INVISIBLE (BIT(9)) // turn invisible
#define PANF_DEATH_HIDE (BIT(10)) // hide done when dead
#define PANF_KILL_AFTER_SHOW (BIT(11)) // kill after showing numpages times
#define PANF_SCREEN_CLIP (BIT(12)) // maintain aspect to the screen
#define PANF_STATUS_AREA (BIT(13)) // maintain aspect to the screen
#define PANF_IGNORE_START_MOST (BIT(14)) // maintain aspect to the screen
#define PANF_XFLIP (BIT(15)) // xflip
#define PANF_SUICIDE (BIT(16)) // kill myself
#define PANF_WEAPON_SPRITE (BIT(17)) // its a weapon sprite - for V mode
#define PANF_CORNER (BIT(18)) // draw from the corner
#define PANF_NOT_IN_VIEW (BIT(19)) // not in view
#define PANF_UNHIDE_SHOOT (BIT(20)) // shoot after un-hiding a weapon
#define PANF_JUMPING (BIT(21))
#define PANF_FALLING (BIT(22))
#define PANF_DRAW_BEFORE_VIEW (BIT(30)) // draw before drawrooms
#define PANF_NOT_ALL_PAGES (BIT(31)) // DONT use permanentwritesprite bit for rotatesprite
typedef long (*PANEL_SPRITE_FUNCp)(PANEL_SPRITEp);
typedef struct
{
PANEL_STATEp State;
long flags;
short tics;
short pic;
short xoff; // from panel sprite center x
short yoff; // from panel sprite center y
}PANEL_SPRITE_OVERLAY, *PANEL_SPRITE_OVERLAYp;
struct PANEL_SPRITEstruct
{
PANEL_SPRITEp Next, Prev;
PANEL_SPRITEp sibling;
PANEL_STATEp State, RetractState, PresentState, ActionState, RestState;
PLAYERp PlayerP;
// Do not change the order of this line
USHORT xfract;
SHORT x;
USHORT yfract;
SHORT y; // Do not change the order of this
// line
PANEL_SPRITE_OVERLAY over[8];
PANEL_SPRITE_FUNCp PanelSpriteFunc;
short ID; // id for finding sprite types on the
// list
short picndx; // for pip stuff in conpic.h
short picnum; // bypass pip stuff in conpic.h
short x1, y1, x2, y2; // for rotatesprites box cliping
short vel, vel_adj;
short numpages;
long xorig, yorig, flags, priority;
long scale;
long jump_speed, jump_grav; // jumping vars
long xspeed;
short tics, delay; // time vars
short ang, rotate_ang;
short sin_ndx, sin_amt, sin_arc_speed;
short bob_height_shift;
short shade, pal;
short kill_tics;
short WeaponType; // remember my own weapon type for weapons with secondary function
};
typedef struct
{
PANEL_STATEp pstate;
short state_size;
}PANEL_STATE_TABLE, *PANEL_STATE_TABLEp;
extern PANEL_STATE_TABLE PanelStateTable[];
// Panel State flags - also used for
#define psf_Invisible BIT(16)
#define psf_QuickCall BIT(23)
#define psf_Xflip BIT(24)
#define psf_ShadeHalf BIT(25)
#define psf_ShadeNone BIT(26)
enum BorderTypes
{
BORDER_NONE = 0,
BORDER_MINI_BAR = 1,
BORDER_BAR = 2
};
#define MICRO_SIGHT_NUM 0
#define MICRO_SIGHT 2075
#define MICRO_SHOT_NUM 2
#define MICRO_SHOT_20 2076
#define MICRO_SHOT_1 2077
#define MICRO_HEAT_NUM 1
#define MICRO_HEAT 2084
#define UZI_COPEN 2040
#define UZI_CCLOSED 2041
#define UZI_CLIT 2042
#define UZI_CRELOAD 2043
#define HEAD_MODE1 2055
#define HEAD_MODE2 2056
#define HEAD_MODE3 2057
#define SHOTGUN_AUTO_NUM 0
#define SHOTGUN_AUTO 2078
#endif
| 32.405405 | 97 | 0.656047 | [
"3d"
] |
ff1bb96f0ba0e2df1945d5ab6f0f97f09eca3697 | 4,037 | h | C | NE/HyperNEAT/NEAT/include/NEAT_FastLayeredNetwork.h | LMBernardo/HyperNEAT | 8ebee6fda17dcf20dd0c6c081dc8681557c1faad | [
"BSD-3-Clause"
] | 85 | 2015-02-08T20:36:17.000Z | 2021-11-14T20:38:31.000Z | NE/HyperNEAT/NEAT/include/NEAT_FastLayeredNetwork.h | LMBernardo/HyperNEAT | 8ebee6fda17dcf20dd0c6c081dc8681557c1faad | [
"BSD-3-Clause"
] | 9 | 2015-01-28T16:33:19.000Z | 2020-04-12T23:03:28.000Z | NE/HyperNEAT/NEAT/include/NEAT_FastLayeredNetwork.h | LMBernardo/HyperNEAT | 8ebee6fda17dcf20dd0c6c081dc8681557c1faad | [
"BSD-3-Clause"
] | 27 | 2015-01-28T16:33:30.000Z | 2021-08-12T05:04:39.000Z | #ifndef FASTLAYEREDNETWORK_H_INCLUDED
#define FASTLAYEREDNETWORK_H_INCLUDED
#include "NEAT_Network.h"
#include "NEAT_NetworkNode.h"
#include "NEAT_NetworkLink.h"
#include "NEAT_NetworkIndexedLink.h"
namespace NEAT
{
template<class Type>
class NetworkLayer
{
public:
string name;
vector<int> fromLayers;
vector< vector< Type > > fromWeights;
vector<Type> nodeValues;
//The node stride is the number of nodes in a single row of a 2-D sheet
int nodeStride;
NetworkLayer()
{
}
NetworkLayer(
const string &_name,
int numNodes,
int _nodeStride,
const vector<int> &_fromLayers,
const vector<JGTL::Vector2<int> > &layerSizes
)
:
name(_name),
nodeStride(_nodeStride),
fromLayers(_fromLayers)
{
nodeValues.resize(numNodes,0.0f);
for(int a=0;a<int(fromLayers.size());a++)
{
fromWeights.push_back(
vector< Type >(
nodeValues.size()*layerSizes[a].x*layerSizes[a].y,0.0f
)
);
}
}
inline void initialize()
{
memset(&nodeValues[0],0,sizeof(Type)*nodeValues.size());
}
};
/**
* The FastLayeredNetwork class is designed to be faster at the cost
* of being less dynamic. Adding/Removing links and nodes
* is not supported with this network.
*/
template<class Type>
class FastLayeredNetwork : public Network<Type>
{
protected:
vector<NetworkLayer<Type> > layers;
public:
/**
* (Constructor) Create a Network with the inputed toplogy
*/
NEAT_DLL_EXPORT FastLayeredNetwork(
const vector<NetworkLayer<Type> > &_layers
);
/**
* (Constructor) Empty Constructor
*/
NEAT_DLL_EXPORT FastLayeredNetwork();
NEAT_DLL_EXPORT virtual ~FastLayeredNetwork();
//NetworkNode *getNode(const string name);
inline int getLayerIndex(const string &layerName)
{
for(int a=0;a<(int)layers.size();a++)
{
if(layers[a].name==layerName)
{
return a;
}
}
throw CREATE_LOCATEDEXCEPTION_INFO("OOPS");
}
/**
* getValue: gets the value for a specified node
*/
NEAT_DLL_EXPORT bool hasNode(const Node &nodeIndex);
/**
* getValue: gets the value for a specified node
*/
NEAT_DLL_EXPORT Type getValue(const Node &nodeIndex);
/**
* setValue: sets the value for a specified node
*/
NEAT_DLL_EXPORT void setValue(const Node &nodeIndex,Type newValue);
/**
* getLink: gets the link weight between two specified nodes
*/
NEAT_DLL_EXPORT Type getLink(const Node &fromNodeIndex,const Node &toNodeIndex);
/**
* setLink: gets the link weight between two specified nodes
*/
NEAT_DLL_EXPORT void setLink(const Node &fromNodeIndex,const Node &toNodeIndex,Type weight);
/**
* reinitialize: This resets the state of the network
* to its initial state
*/
NEAT_DLL_EXPORT void reinitialize();
/**
* update: This updates the network.
* If the network has not been updated since construction or
* reinitialize(), the network will be activated. This means
* it will update (1+ExtraActivationUpdates+iterations) times!
* Otherwise, it will update (iterations) times. If you do not
* want the extra updates, call dummyActivation() before the first
* update.
*/
NEAT_DLL_EXPORT virtual void update();
protected:
};
}
#endif // FASTNETWORK_H_INCLUDED
| 27.462585 | 100 | 0.56106 | [
"vector"
] |
ff1ddf46dff2545b74a130bf4230fac365ecaf3c | 1,104 | h | C | ACW Project Framework/ColourShader.h | Lentono/PhysicsSimulationACW | a73d22571a0742fd960740f04689f6ee095c3986 | [
"MIT"
] | 1 | 2021-06-06T10:29:12.000Z | 2021-06-06T10:29:12.000Z | ACW Project Framework/ColourShader.h | Lentono/PhysicsSimulationACW | a73d22571a0742fd960740f04689f6ee095c3986 | [
"MIT"
] | null | null | null | ACW Project Framework/ColourShader.h | Lentono/PhysicsSimulationACW | a73d22571a0742fd960740f04689f6ee095c3986 | [
"MIT"
] | null | null | null | #pragma once
#include <d3d11.h>
#include <d3dcompiler.h>
#include <DirectXMath.h>
#include <fstream>
#include "Shader.h"
using namespace std;
using namespace DirectX;
class ColourShader : public Shader
{
public:
ColourShader(ID3D11Device* device, HWND hwnd); // Default Constructor
ColourShader(const ColourShader& other); // Copy Constructor
ColourShader(ColourShader && other) noexcept; // Move Constructor
~ColourShader() override;
ColourShader& operator = (const ColourShader& other); // Copy Assignment Operator
ColourShader& operator = (ColourShader && other) noexcept; // Move Assignment Operator
bool Render(ID3D11DeviceContext* deviceContext, int indexCount, XMMATRIX worldMatrix, XMMATRIX viewMatrix, XMMATRIX projectionMatrix, ID3D11ShaderResourceView* texture, XMFLOAT4 diffuseColour, XMFLOAT3 lightDirection) override;
private:
bool SetShaderParameters(ID3D11DeviceContext* deviceContext, XMMATRIX worldMatrix, XMMATRIX viewMatrix, XMMATRIX projectionMatrix);
void RenderShader(ID3D11DeviceContext* deviceContext, int indexCount) const;
ID3D11InputLayout* m_inputLayout;
}; | 35.612903 | 228 | 0.803442 | [
"render"
] |
ff1deaacf0445632ebd952fb3c7c7547a23657dd | 4,105 | h | C | work/mapper/libstreetmap/headers/OSMID.h | Muhammad-Abdullah-Bajwa/6ixMaps | b85e6c368ac7aaa15a0f02c5275c78ece9363438 | [
"MIT"
] | null | null | null | work/mapper/libstreetmap/headers/OSMID.h | Muhammad-Abdullah-Bajwa/6ixMaps | b85e6c368ac7aaa15a0f02c5275c78ece9363438 | [
"MIT"
] | null | null | null | work/mapper/libstreetmap/headers/OSMID.h | Muhammad-Abdullah-Bajwa/6ixMaps | b85e6c368ac7aaa15a0f02c5275c78ece9363438 | [
"MIT"
] | null | null | null | /*
* Copyright 2018 University of Toronto
*
* Permission is hereby granted, to use this software and associated
* documentation files (the "Software") in course work at the University
* of Toronto, or for personal use. Other uses are prohibited, in
* particular the distribution of the Software either publicly or to third
* parties.
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* OSMID.h
*
* Created on: Jan 5, 2017
* Author: jcassidy
*/
#ifndef GOLDEN_SOLUTION_LIBSTREETSDATABASE_SRC_OSMID_H_
#define GOLDEN_SOLUTION_LIBSTREETSDATABASE_SRC_OSMID_H_
#include <cinttypes>
#include <boost/serialization/base_object.hpp>
#include <functional>
// forward declarations of the entity types
class OSMEntity;
class OSMNode;
class OSMWay;
class OSMRelation;
/** OSMID is an opaque typedef around unsigned long long for OSMID to distinguish it from other types, eg sizes & vector indices
* and prevent unintentional casts.
*/
class OSMID
{
public:
explicit constexpr OSMID(uint64_t id=-1ULL) : m_osmID(id){}
OSMID(const OSMID&) = default;
OSMID& operator=(const OSMID&) = default;
explicit operator uint64_t() const;
bool operator==(OSMID rhs) const;
bool operator!=(OSMID rhs) const;
bool operator<(OSMID rhs) const;
bool valid() const;
static const OSMID Invalid;
private:
uint64_t m_osmID=-1ULL;
friend class boost::serialization::access;
template<typename Archive>void serialize(Archive& ar,const unsigned)
{ ar & m_osmID; }
};
inline bool OSMID::operator==(OSMID rhs) const { return m_osmID == rhs.m_osmID; }
inline bool OSMID::operator!=(OSMID rhs) const { return m_osmID != rhs.m_osmID; }
inline bool OSMID::operator<(OSMID rhs) const { return m_osmID < rhs.m_osmID; }
inline OSMID::operator uint64_t() const { return m_osmID; }
inline bool OSMID::valid() const { return *this != OSMID::Invalid; }
class TypedOSMID : public OSMID
{
public:
enum EntityType
{
Unknown = 0,
Node,
Way,
Relation
};
TypedOSMID(){}
TypedOSMID(EntityType type_,OSMID id) :
OSMID(id),
m_type(type_)
{
}
EntityType type() const { return m_type; }
private:
EntityType m_type=Unknown;
static const char typeChar[4];
friend class boost::serialization::access;
template<class Archive>void serialize(Archive& ar,const unsigned)
{ ar & boost::serialization::base_object<OSMID>(*this) & m_type; }
friend std::ostream& operator<<(std::ostream& os,TypedOSMID tid);
};
std::ostream& operator<<(std::ostream& os,OSMID id);
std::ostream& operator<<(std::ostream& os,TypedOSMID tid);
/** std::hash<T> instances for OSMID and TypedOSMID to support unordered_set/_map
*/
namespace std {
template<>struct hash<OSMID>
{
std::size_t operator()(OSMID id) const;
std::size_t operator()(std::size_t i) const;
};
inline std::size_t hash<OSMID>::operator()(OSMID id) const { return std::hash<uint64_t>()(uint64_t(id)); }
inline std::size_t hash<OSMID>::operator()(std::size_t i) const { return i; }
template<>struct hash<TypedOSMID>
{
std::size_t operator()(TypedOSMID tid) const;
std::size_t operator()(std::size_t i) const;
};
inline std::size_t hash<TypedOSMID>::operator()(TypedOSMID tid) const
{ return std::hash<uint64_t>()(uint64_t(tid)) | std::hash<typename std::underlying_type<TypedOSMID::EntityType>::type>()(tid.type()); }
inline std::size_t hash<TypedOSMID>::operator()(std::size_t i) const { return i; }
}
#endif /* GOLDEN_SOLUTION_LIBSTREETSDATABASE_SRC_OSMID_H_ */
| 26.483871 | 137 | 0.717661 | [
"vector"
] |
ff2182caafb4b8a705394350b2df17b9b2522baf | 8,744 | h | C | source/laplace/math/vector.h | automainint/laplace | 66956df506918d48d79527e524ff606bb197d8af | [
"MIT"
] | 3 | 2021-05-17T21:15:28.000Z | 2021-09-06T23:01:52.000Z | source/laplace/math/vector.h | automainint/laplace | 66956df506918d48d79527e524ff606bb197d8af | [
"MIT"
] | 33 | 2021-10-20T10:47:07.000Z | 2022-02-26T02:24:20.000Z | source/laplace/math/vector.h | automainint/laplace | 66956df506918d48d79527e524ff606bb197d8af | [
"MIT"
] | null | null | null | /* Copyright (c) 2021 Mitya Selivanov
*
* This file is part of the Laplace project.
*
* Laplace is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the MIT License for more details.
*/
#ifndef laplace_math_vector_h
#define laplace_math_vector_h
#include "basic.h"
#include <functional>
#include <initializer_list>
namespace laplace::math {
template <sl::whole size_, typename type_>
class basic_vector {
public:
using type = type_;
static constexpr sl::whole size = size_;
type v[size] = {};
constexpr basic_vector() noexcept = default;
constexpr basic_vector(std::initializer_list<type> v_) noexcept;
constexpr explicit basic_vector(type x) noexcept;
};
template <sl::whole size_, typename type_>
class vector : public basic_vector<size_, type_> {
public:
using basic_vector<size_, type_>::basic_vector;
[[nodiscard]] constexpr auto operator[](sl::whole index) noexcept
-> type_ &;
[[nodiscard]] constexpr auto operator[](
sl::whole index) const noexcept -> type_;
};
template <typename type_>
class vector<2, type_> : public basic_vector<2, type_> {
public:
enum _indices : sl::whole { n_x = 0, n_y };
using basic_vector<2, type_>::basic_vector;
[[nodiscard]] constexpr auto x() noexcept -> type_ &;
[[nodiscard]] constexpr auto y() noexcept -> type_ &;
[[nodiscard]] constexpr auto x() const noexcept -> type_;
[[nodiscard]] constexpr auto y() const noexcept -> type_;
[[nodiscard]] constexpr auto operator[](sl::whole index) noexcept
-> type_ &;
[[nodiscard]] constexpr auto operator[](
sl::whole index) const noexcept -> type_;
};
template <typename type_>
class vector<3, type_> : public basic_vector<3, type_> {
public:
enum _indices : sl::whole { n_x = 0, n_y, n_z };
using basic_vector<3, type_>::basic_vector;
[[nodiscard]] constexpr auto x() noexcept -> type_ &;
[[nodiscard]] constexpr auto y() noexcept -> type_ &;
[[nodiscard]] constexpr auto z() noexcept -> type_ &;
[[nodiscard]] constexpr auto x() const noexcept -> type_;
[[nodiscard]] constexpr auto y() const noexcept -> type_;
[[nodiscard]] constexpr auto z() const noexcept -> type_;
[[nodiscard]] constexpr auto operator[](sl::whole index) noexcept
-> type_ &;
[[nodiscard]] constexpr auto operator[](
sl::whole index) const noexcept -> type_;
};
template <typename type_>
class vector<4, type_> : public basic_vector<4, type_> {
public:
enum _indices : sl::whole { n_x = 0, n_y, n_z, n_w };
using basic_vector<4, type_>::basic_vector;
[[nodiscard]] constexpr auto x() noexcept -> type_ &;
[[nodiscard]] constexpr auto y() noexcept -> type_ &;
[[nodiscard]] constexpr auto z() noexcept -> type_ &;
[[nodiscard]] constexpr auto w() noexcept -> type_ &;
[[nodiscard]] constexpr auto x() const noexcept -> type_;
[[nodiscard]] constexpr auto y() const noexcept -> type_;
[[nodiscard]] constexpr auto z() const noexcept -> type_;
[[nodiscard]] constexpr auto w() const noexcept -> type_;
[[nodiscard]] constexpr auto operator[](sl::whole index) noexcept
-> type_ &;
[[nodiscard]] constexpr auto operator[](
sl::whole index) const noexcept -> type_;
};
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator==(
vector<size_, type_> a, vector<size_, type_> b) noexcept
-> bool;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator!=(
vector<size_, type_> a, vector<size_, type_> b) noexcept
-> bool;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator+(
vector<size_, type_> a) noexcept -> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator-(
vector<size_, type_> a) noexcept -> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator+(
vector<size_, type_> a, vector<size_, type_> b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator-(
vector<size_, type_> a, vector<size_, type_> b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator*(vector<size_, type_> a,
type_ b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator/(vector<size_, type_> a,
type_ b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator*(
type_ a, vector<size_, type_> b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
[[nodiscard]] constexpr auto operator/(
type_ a, vector<size_, type_> b) noexcept
-> vector<size_, type_>;
template <sl::whole size_, typename type_>
constexpr auto operator+=(vector<size_, type_> &a,
vector<size_, type_> b) noexcept
-> vector<size_, type_> &;
template <sl::whole size_, typename type_>
constexpr auto operator-=(vector<size_, type_> &a,
vector<size_, type_> b) noexcept
-> vector<size_, type_> &;
template <sl::whole size_, typename type_>
constexpr auto operator*=(vector<size_, type_> &a, type_ b) noexcept
-> vector<size_, type_> &;
template <sl::whole size_, typename type_>
constexpr auto operator/=(vector<size_, type_> &a, type_ b) noexcept
-> vector<size_, type_> &;
template <vector_type type_>
[[nodiscard]] constexpr auto add(type_ a, type_ b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto sub(type_ a, type_ b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto mul(type_ a,
elem_type<type_> b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto div(type_ a,
elem_type<type_> b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto mul(elem_type<type_> a,
type_ b) noexcept -> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto div(elem_type<type_> a,
type_ b) noexcept -> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto mul_by_elem(type_ a, type_ b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto div_by_elem(type_ a, type_ b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto dot(type_ a, type_ b) noexcept
-> elem_type<type_>;
template <vector_type type_>
[[nodiscard]] constexpr auto cross(type_ a, type_ b) noexcept
-> type_;
template <vector_type type_>
[[nodiscard]] constexpr auto square_length(type_ a) noexcept
-> elem_type<type_>;
template <vector_type type_, typename op_>
[[nodiscard]] constexpr auto op(type_ a, type_ b, op_ fn) noexcept
-> type_;
template <vector_type type_, typename op_>
[[nodiscard]] constexpr auto op(type_ a,
elem_type<type_> b,
op_ fn) noexcept -> type_;
template <vector_type type_, typename op_>
[[nodiscard]] constexpr auto op(elem_type<type_> a,
type_ b,
op_ fn) noexcept -> type_;
template <vector_type type_, typename add_, typename mul_>
[[nodiscard]] constexpr auto dot(type_ a,
type_ b,
add_ fn_add,
mul_ fn_mul) noexcept
-> elem_type<type_>;
template <vector_type type_, typename sub_, typename mul_>
[[nodiscard]] constexpr auto cross(type_ a,
type_ b,
sub_ fn_sub,
mul_ fn_mul) noexcept -> type_;
template <vector_type type_, typename add_, typename mul_>
[[nodiscard]] constexpr auto square_length(type_ a,
add_ fn_add,
mul_ fn_mul) noexcept
-> elem_type<type_>;
}
#include "vector.impl.h"
#endif
| 33.630769 | 70 | 0.615279 | [
"vector"
] |
ff226710e6739ffa724f52996343b475bd4c3cf6 | 4,752 | h | C | Sources/Elastos/Packages/Apps/Settings/inc/elastos/droid/settings/CCryptKeeperSettings.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | 7 | 2017-07-13T10:34:54.000Z | 2021-04-16T05:40:35.000Z | Sources/Elastos/Packages/Apps/Settings/inc/elastos/droid/settings/CCryptKeeperSettings.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | null | null | null | Sources/Elastos/Packages/Apps/Settings/inc/elastos/droid/settings/CCryptKeeperSettings.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | 9 | 2017-07-13T12:33:20.000Z | 2021-06-19T02:46:48.000Z | //=========================================================================
// Copyright (C) 2012 The Elastos Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//=========================================================================
#ifndef __ELASTOS_DROID_SETTINGS_CCRYPTKEEPERSETTINGS_H__
#define __ELASTOS_DROID_SETTINGS_CCRYPTKEEPERSETTINGS_H__
#include "Elastos.Droid.Widget.h"
#include "_Elastos_Droid_Settings_CCryptKeeperSettings.h"
#include "elastos/droid/app/Fragment.h"
#include "elastos/droid/content/BroadcastReceiver.h"
using Elastos::Droid::App::Fragment;
using Elastos::Droid::Content::BroadcastReceiver;
using Elastos::Droid::Content::IBroadcastReceiver;
using Elastos::Droid::Content::IContext;
using Elastos::Droid::Content::IIntent;
using Elastos::Droid::Content::IIntentFilter;
using Elastos::Droid::Os::IBundle;
using Elastos::Droid::View::ILayoutInflater;
using Elastos::Droid::View::IView;
using Elastos::Droid::View::IViewGroup;
using Elastos::Droid::View::IViewOnClickListener;
using Elastos::Droid::Widget::IButton;
namespace Elastos {
namespace Droid {
namespace Settings {
CarClass(CCryptKeeperSettings)
, public Fragment
{
private:
class InitBroadcastReceiver
: public BroadcastReceiver
{
public:
InitBroadcastReceiver(
/* [in] */ CCryptKeeperSettings* host);
~InitBroadcastReceiver();
//@Override
CARAPI OnReceive(
/* [in] */ IContext* context,
/* [in] */ IIntent* intent);
private:
CCryptKeeperSettings* mHost;
};
class InitButtonOnClickListener
: public Object
, public IViewOnClickListener
{
public:
CAR_INTERFACE_DECL()
InitButtonOnClickListener(
/* [in] */ CCryptKeeperSettings* host);
~InitButtonOnClickListener();
CARAPI OnClick(
/* [in] */ IView* v);
private:
CCryptKeeperSettings* mHost;
};
public:
CAR_OBJECT_DECL()
CCryptKeeperSettings();
~CCryptKeeperSettings();
CARAPI constructor();
//@Override
CARAPI OnCreateView(
/* [in] */ ILayoutInflater* inflater,
/* [in] */ IViewGroup* container,
/* [in] */ IBundle* savedState,
/* [out] */ IView** result);
//@Override
CARAPI OnResume();
//@Override
CARAPI OnPause();
/**
* If encryption is already started, and this launched via a "start encryption" intent,
* then exit immediately - it's already up and running, so there's no point in "starting" it.
*/
//@Override
CARAPI OnActivityCreated(
/* [in] */ IBundle* savedInstanceState);
//@Override
CARAPI OnActivityResult(
/* [in] */ Int32 requestCode,
/* [in] */ Int32 resultCode,
/* [in] */ IIntent* data);
private:
/**
* Keyguard validation is run using the standard {@link ConfirmLockPattern}
* component as a subactivity
* @param request the request code to be returned once confirmation finishes
* @return TRUE if confirmation launched
*/
CARAPI_(Boolean) RunKeyguardConfirmation(
/* [in] */ Int32 request);
CARAPI_(void) ShowFinalConfirmation(
/* [in] */ Int32 type,
/* [in] */ const String& password);
private:
static const String TAG;
static const Int32 KEYGUARD_REQUEST;
// Minimum battery charge level (in percent) to launch encryption. If the battery charge is
// lower than this, encryption should not be activated.
static const Int32 MIN_BATTERY_LEVEL;
AutoPtr<IView> mContentView;
AutoPtr<IButton> mInitiateButton;
AutoPtr<IView> mPowerWarning;
AutoPtr<IView> mBatteryWarning;
AutoPtr<IIntentFilter> mIntentFilter;
AutoPtr<IBroadcastReceiver> mIntentReceiver;
/**
* If the user clicks to begin the reset sequence, we next require a
* keyguard confirmation if the user has currently enabled one. If there
* is no keyguard available, we prompt the user to set a password.
*/
AutoPtr<InitButtonOnClickListener> mInitiateListener;
};
} // namespace Settings
} // namespace Droid
} // namespace Elastos
#endif //__ELASTOS_DROID_SETTINGS_CCRYPTKEEPERSETTINGS_H__ | 29.333333 | 97 | 0.660985 | [
"object"
] |
ff23409b671093700551f46da48098d2b177635a | 1,277 | h | C | ps8/ps8_1/dha.h | lord-pradhan/CPP_graphics | 1d4ea35c266b987947bf94bd07f4fa6c1c7684f2 | [
"MIT"
] | null | null | null | ps8/ps8_1/dha.h | lord-pradhan/CPP_graphics | 1d4ea35c266b987947bf94bd07f4fa6c1c7684f2 | [
"MIT"
] | null | null | null | ps8/ps8_1/dha.h | lord-pradhan/CPP_graphics | 1d4ea35c266b987947bf94bd07f4fa6c1c7684f2 | [
"MIT"
] | null | null | null | #ifndef DHA_IS_INCLUDED
#define DHA_IS_INCLUDED
#include <ysshellext.h>
#include <unordered_map>
#include <vector>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
// Input parameters are a mesh and dihedral-angle threshold.
// The return value is a map from a polygon search key to a segment identifier.
std::unordered_map <YSHASHKEY,int> MakeDihedralAngleBasedSegmentation(const YsShellExt &mesh,
const double dhaThr);
void segmentDFS(const YsShellExt &mesh, YsShellExt::PolygonHandle& startPoly,
std::unordered_map <YSHASHKEY,int>& faceGrpIn, int groupIDIn, const double dhaThr);
double angleVecs(YsVec3 in1, YsVec3 in2);
// Input parameters are a mesh and the segmentation (face grouping) obtained from MakeDihedralAngleBasedSegmentaion.
// Output is a vertex array that can be drawn as GL_LINES.
std::vector <float> MakeGroupBoundaryVertexArray(const YsShellExt &mesh,const std::unordered_map <YSHASHKEY,int> &faceGroupInfo);
// For bonus questions:
// Input parameters are a mesh and the segmentation (face grouping) obtained from MakeDihedralAngleBasedSegmentaion.
// Paint polygons so that no two neighboring face groups have a same color.
void MakeFaceGroupColorMap(YsShellExt &mesh,const std::unordered_map <YSHASHKEY,int> &faceGroupInfo);
#endif
| 41.193548 | 129 | 0.797964 | [
"mesh",
"vector"
] |
ff417d6693fa094135d52bf1f47f278bd617853f | 5,665 | h | C | aws-cpp-sdk-route53domains/include/aws/route53domains/model/SortCondition.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2022-02-10T08:06:54.000Z | 2022-02-10T08:06:54.000Z | aws-cpp-sdk-route53domains/include/aws/route53domains/model/SortCondition.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2022-01-03T23:59:37.000Z | 2022-01-03T23:59:37.000Z | aws-cpp-sdk-route53domains/include/aws/route53domains/model/SortCondition.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-11-09T11:58:03.000Z | 2021-11-09T11:58:03.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/route53domains/Route53Domains_EXPORTS.h>
#include <aws/route53domains/model/ListDomainsAttributeName.h>
#include <aws/route53domains/model/SortOrder.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Route53Domains
{
namespace Model
{
/**
* <p>Information for sorting a list of domains.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/route53domains-2014-05-15/SortCondition">AWS
* API Reference</a></p>
*/
class AWS_ROUTE53DOMAINS_API SortCondition
{
public:
SortCondition();
SortCondition(Aws::Utils::Json::JsonView jsonValue);
SortCondition& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline const ListDomainsAttributeName& GetName() const{ return m_name; }
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline void SetName(const ListDomainsAttributeName& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline void SetName(ListDomainsAttributeName&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline SortCondition& WithName(const ListDomainsAttributeName& value) { SetName(value); return *this;}
/**
* <p>Field to be used for sorting the list of domains. It can be either the name
* or the expiration for a domain. Note that if <code>filterCondition</code> is
* used in the same <a
* href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains__ListDomains.html">ListDomains</a>
* call, the field used for sorting has to be the same as the field used for
* filtering.</p>
*/
inline SortCondition& WithName(ListDomainsAttributeName&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline const SortOrder& GetSortOrder() const{ return m_sortOrder; }
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline bool SortOrderHasBeenSet() const { return m_sortOrderHasBeenSet; }
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline void SetSortOrder(const SortOrder& value) { m_sortOrderHasBeenSet = true; m_sortOrder = value; }
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline void SetSortOrder(SortOrder&& value) { m_sortOrderHasBeenSet = true; m_sortOrder = std::move(value); }
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline SortCondition& WithSortOrder(const SortOrder& value) { SetSortOrder(value); return *this;}
/**
* <p>The sort order for a list of domains. Either ascending (ASC) or descending
* (DES).</p>
*/
inline SortCondition& WithSortOrder(SortOrder&& value) { SetSortOrder(std::move(value)); return *this;}
private:
ListDomainsAttributeName m_name;
bool m_nameHasBeenSet;
SortOrder m_sortOrder;
bool m_sortOrderHasBeenSet;
};
} // namespace Model
} // namespace Route53Domains
} // namespace Aws
| 37.766667 | 115 | 0.681377 | [
"model"
] |
ff52bd72ccb75cd08152fc6295d825b5421bc340 | 3,448 | h | C | include/org/apache/lucene/search/ScoreCachingWrappingScorer.h | lukhnos/objclucene | 29c7189a0b30ab3d3dd4c8ed148235ee296128b7 | [
"MIT"
] | 9 | 2016-01-13T05:38:05.000Z | 2020-06-04T23:05:03.000Z | include/org/apache/lucene/search/ScoreCachingWrappingScorer.h | lukhnos/objclucene | 29c7189a0b30ab3d3dd4c8ed148235ee296128b7 | [
"MIT"
] | 4 | 2016-05-12T10:40:53.000Z | 2016-06-11T19:08:33.000Z | include/org/apache/lucene/search/ScoreCachingWrappingScorer.h | lukhnos/objclucene | 29c7189a0b30ab3d3dd4c8ed148235ee296128b7 | [
"MIT"
] | 5 | 2016-01-13T05:37:39.000Z | 2019-07-27T16:53:10.000Z | //
// Generated by the J2ObjC translator. DO NOT EDIT!
// source: ./core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
//
#include "J2ObjC_header.h"
#pragma push_macro("INCLUDE_ALL_OrgApacheLuceneSearchScoreCachingWrappingScorer")
#ifdef RESTRICT_OrgApacheLuceneSearchScoreCachingWrappingScorer
#define INCLUDE_ALL_OrgApacheLuceneSearchScoreCachingWrappingScorer 0
#else
#define INCLUDE_ALL_OrgApacheLuceneSearchScoreCachingWrappingScorer 1
#endif
#undef RESTRICT_OrgApacheLuceneSearchScoreCachingWrappingScorer
#if __has_feature(nullability)
#pragma clang diagnostic push
#pragma GCC diagnostic ignored "-Wnullability"
#pragma GCC diagnostic ignored "-Wnullability-completeness"
#endif
#if !defined (OrgApacheLuceneSearchScoreCachingWrappingScorer_) && (INCLUDE_ALL_OrgApacheLuceneSearchScoreCachingWrappingScorer || defined(INCLUDE_OrgApacheLuceneSearchScoreCachingWrappingScorer))
#define OrgApacheLuceneSearchScoreCachingWrappingScorer_
#define RESTRICT_OrgApacheLuceneSearchFilterScorer 1
#define INCLUDE_OrgApacheLuceneSearchFilterScorer 1
#include "org/apache/lucene/search/FilterScorer.h"
@class OrgApacheLuceneSearchScorer;
@class OrgApacheLuceneSearchWeight;
@protocol JavaUtilCollection;
/*!
@brief A <code>Scorer</code> which wraps another scorer and caches the score of the
current document.Successive calls to <code>score()</code> will return the same
result and will not invoke the wrapped Scorer's score() method, unless the
current document has changed.
<br>
This class might be useful due to the changes done to the <code>Collector</code>
interface, in which the score is not computed for a document by default, only
if the collector requests it. Some collectors may need to use the score in
several places, however all they have in hand is a <code>Scorer</code> object, and
might end up computing the score of a document more than once.
*/
@interface OrgApacheLuceneSearchScoreCachingWrappingScorer : OrgApacheLuceneSearchFilterScorer
#pragma mark Public
/*!
@brief Creates a new instance by wrapping the given scorer.
*/
- (instancetype __nonnull)initWithOrgApacheLuceneSearchScorer:(OrgApacheLuceneSearchScorer *)scorer;
- (id<JavaUtilCollection>)getChildren;
- (jfloat)score;
// Disallowed inherited constructors, do not use.
- (instancetype __nonnull)initWithOrgApacheLuceneSearchScorer:(OrgApacheLuceneSearchScorer *)arg0
withOrgApacheLuceneSearchWeight:(OrgApacheLuceneSearchWeight *)arg1 NS_UNAVAILABLE;
@end
J2OBJC_EMPTY_STATIC_INIT(OrgApacheLuceneSearchScoreCachingWrappingScorer)
FOUNDATION_EXPORT void OrgApacheLuceneSearchScoreCachingWrappingScorer_initWithOrgApacheLuceneSearchScorer_(OrgApacheLuceneSearchScoreCachingWrappingScorer *self, OrgApacheLuceneSearchScorer *scorer);
FOUNDATION_EXPORT OrgApacheLuceneSearchScoreCachingWrappingScorer *new_OrgApacheLuceneSearchScoreCachingWrappingScorer_initWithOrgApacheLuceneSearchScorer_(OrgApacheLuceneSearchScorer *scorer) NS_RETURNS_RETAINED;
FOUNDATION_EXPORT OrgApacheLuceneSearchScoreCachingWrappingScorer *create_OrgApacheLuceneSearchScoreCachingWrappingScorer_initWithOrgApacheLuceneSearchScorer_(OrgApacheLuceneSearchScorer *scorer);
J2OBJC_TYPE_LITERAL_HEADER(OrgApacheLuceneSearchScoreCachingWrappingScorer)
#endif
#if __has_feature(nullability)
#pragma clang diagnostic pop
#endif
#pragma pop_macro("INCLUDE_ALL_OrgApacheLuceneSearchScoreCachingWrappingScorer")
| 42.04878 | 213 | 0.849478 | [
"object"
] |
ff58b5ce092043ab23f9ad1d8887f2a9eb467435 | 736 | h | C | src/nimbro_robotcontrol/util/rc_utils/include/rc_utils/ros_time.h | hfarazi/humanoid_op_ros_kinetic | 84712bd541d0130b840ad1935d5bfe301814dbe6 | [
"BSD-3-Clause"
] | 45 | 2015-11-04T01:29:12.000Z | 2022-02-11T05:37:42.000Z | src/nimbro_robotcontrol/util/rc_utils/include/rc_utils/ros_time.h | hfarazi/humanoid_op_ros_kinetic | 84712bd541d0130b840ad1935d5bfe301814dbe6 | [
"BSD-3-Clause"
] | 1 | 2018-11-22T08:34:34.000Z | 2018-11-22T08:34:34.000Z | src/nimbro_robotcontrol/util/rc_utils/include/rc_utils/ros_time.h | hfarazi/humanoid_op_ros_kinetic | 84712bd541d0130b840ad1935d5bfe301814dbe6 | [
"BSD-3-Clause"
] | 20 | 2016-03-05T14:28:45.000Z | 2021-01-30T00:50:47.000Z | // Utilities for ROS time
// File: ros_time.h
// Author: Philipp Allgeuer <pallgeuer@ais.uni-bonn.de>
// Ensure header is only included once
#ifndef ROS_TIME_H
#define ROS_TIME_H
// Includes
#include <ros/time.h>
// Robotcontrol utilities namespace
namespace rc_utils
{
/**
* @name ROS Time Functions (rc_utils/ros_time.h)
**/
///@{
//! @brief Set a ROS `ros::Time` object to zero.
inline void zeroRosTime(ros::Time& time)
{
// Write zero as required
time.sec = time.nsec = 0;
}
//! @brief Set a ROS `ros::WallTime` object to zero.
inline void zeroRosTime(ros::WallTime& time)
{
// Write zero as required
time.sec = time.nsec = 0;
}
///@}
}
#endif /* ROS_TIME_H */
// EOF | 19.368421 | 56 | 0.631793 | [
"object"
] |
ff64102f5a9847c0f0c94c6dc1dff0dae856007f | 4,609 | h | C | training/supplements/src/soem/soem_beckhoff_drivers/src/soem_el6022.h | asct/industrial_training | f69c54cad966382ce93b34138696a99abc66f444 | [
"Apache-2.0"
] | null | null | null | training/supplements/src/soem/soem_beckhoff_drivers/src/soem_el6022.h | asct/industrial_training | f69c54cad966382ce93b34138696a99abc66f444 | [
"Apache-2.0"
] | null | null | null | training/supplements/src/soem/soem_beckhoff_drivers/src/soem_el6022.h | asct/industrial_training | f69c54cad966382ce93b34138696a99abc66f444 | [
"Apache-2.0"
] | null | null | null | /***************************************************************************
tag: Sava Marinkov Sat Feb 19 12:50:00 CET 2011 soem_el6xxx.h
soem_el6022.h - Header for Beckhoff 2-channel serial interfaces RS232/RS422/RS485, D-sub connection
-------------------
begin : Sat February 19 2011
copyright : (C) 2011 Sava Marinkov
email : s.marinkov@student.tue.nl
***************************************************************************
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the GNU Lesser General Public *
* License as published by the Free Software Foundation; either *
* version 2.1 of the License, or (at your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public *
* License along with this library; if not, write to the Free Software *
* Foundation, Inc., 59 Temple Place, *
* Suite 330, Boston, MA 02111-1307 USA *
* *
***************************************************************************/
#ifndef SOEM_EL6022_H
#define SOEM_EL6022_H
#include <soem_master/soem_driver.h>
#include <soem_beckhoff_drivers/CommMsgBig.h>
#include <rtt/Port.hpp>
#include <rtt/Property.hpp>
#include <vector>
#include <queue>
#include "COE_config.h"
#define CHANNEL_1 0
#define CHANNEL_2 1
#define CHANNEL_NUM 2
#define MAX_TRIALS 30
#define MAX_OUT_QUEUE_SIZE 220
#define RS485_MAX_DATA_LENGTH 22
//CONTROL MASKS
#define TRANSMIT_REQUEST 0x01
#define RECEIVE_ACCEPTED 0x02
#define INIT_REQUEST 0x04
#define SEND_CONTINUOUS 0x08
//STATUS MASKS
#define TRANSMIT_ACCEPTED 0x01
#define RECEIVE_REQUEST 0x02
#define INIT_ACCEPTED 0x04
#define BUFFER_FULL 0x08
#define PARITY_ERROR 0x10
#define FRAMING_ERROR 0x20
#define OVERRUN_ERROR 0x40
typedef enum RS485_BAUDRATE {
RS485_300_BAUD = 1,
RS485_600_BAUD,
RS485_1200_BAUD,
RS485_2400_BAUD,
RS485_4800_BAUD,
RS485_9600_BAUD,
RS485_19200_BAUD,
RS485_38400_BAUD,
RS485_57600_BAUD,
RS485_115200_BAUD
} RS485_BAUDRATE;
typedef enum RS485_DATA_FRAME {
RS485_7B_EP_1S = 1,
RS485_7B_OP_1S,
RS485_8B_NP_1S,
RS485_8B_EP_1S,
RS485_8B_OP_1S,
RS485_7B_EP_2S,
RS485_7B_OP_2S,
RS485_8B_NP_2S,
RS485_8B_EP_2S,
RS485_8B_OP_2S
} RS485_DATA_FRAME;
typedef enum RS485_HANDSHAKE {
XON_XOFF_DISABLE = 0,
XON_XOFF_ENABLE
} RS485_HANDSHAKE;
typedef enum RS485_DUPLEX {
RS485_FULL_DUPLEX = 0,
RS485_HALF_DUPLEX
} RS485_DUPLEX;
typedef enum state_el6022t {
START,
INIT_REQ,
INIT_WAIT,
PREP_REQ,
PREP_WAIT,
RUN
} state_el6022t;
typedef struct PACKED {
uint8 control;
uint8 output_length;
uint8 buffer_out[RS485_MAX_DATA_LENGTH];
} out_el6022t;
typedef struct PACKED {
uint8 status;
uint8 input_length;
uint8 buffer_in[RS485_MAX_DATA_LENGTH];
} in_el6022t;
using namespace RTT;
namespace soem_beckhoff_drivers {
class SoemEL6022 : public soem_master::SoemDriver
{
public:
SoemEL6022(ec_slavet* mem_loc);
~SoemEL6022()
{};
void update();
bool configure();
bool readSB(unsigned int chan, uint8 bitmask);
bool readCB(unsigned int chan, uint8 bitmask);
private:
void updateState(unsigned int chan);
void executeStateActions(unsigned int chan);
bool read(unsigned int chan);
bool write(unsigned int chan);
out_el6022t* m_outputs[CHANNEL_NUM];
in_el6022t* m_inputs[CHANNEL_NUM];
CommMsgBig msg_out; //terminal sends this msg to rs485 device
CommMsgBig msg_in; //terminal receives this msg from rs485 device
bool rxReady, txReady;
RTT::OutputPort<CommMsgBig> port_out;
RTT::InputPort<CommMsgBig> port_in;
RTT::OutputPort<bool> port_rx_ready;
RTT::OutputPort<bool> port_running;
std::vector<parameter> m_params;
std::queue<uint8> bytesOut[CHANNEL_NUM];
state_el6022t state[CHANNEL_NUM];
unsigned int trial[CHANNEL_NUM];
};
}
#endif
| 28.103659 | 100 | 0.632675 | [
"vector"
] |
ff6b31f7954be821c45ef13d077a77e197ac8f53 | 15,168 | h | C | Base/PLCore/include/PLCore/Base/ClassImpl.h | ktotheoz/pixellight | 43a661e762034054b47766d7e38d94baf22d2038 | [
"MIT"
] | 83 | 2015-01-08T15:06:14.000Z | 2021-07-20T17:07:00.000Z | Base/PLCore/include/PLCore/Base/ClassImpl.h | PixelLightFoundation/pixellight | 43a661e762034054b47766d7e38d94baf22d2038 | [
"MIT"
] | 27 | 2019-06-18T06:46:07.000Z | 2020-02-02T11:11:28.000Z | Base/PLCore/include/PLCore/Base/ClassImpl.h | naetherm/PixelLight | d7666f5b49020334cbb5debbee11030f34cced56 | [
"MIT"
] | 40 | 2015-02-25T18:24:34.000Z | 2021-03-06T09:01:48.000Z | /*********************************************************\
* File: ClassImpl.h *
*
* Copyright (C) 2002-2013 The PixelLight Team (http://www.pixellight.org/)
*
* This file is part of PixelLight.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\*********************************************************/
#ifndef __PLCORE_CLASS_IMPL_H__
#define __PLCORE_CLASS_IMPL_H__
#pragma once
//[-------------------------------------------------------]
//[ Includes ]
//[-------------------------------------------------------]
#include "PLCore/String/String.h"
#include "PLCore/Container/List.h"
#include "PLCore/Container/HashMap.h"
//[-------------------------------------------------------]
//[ Namespace ]
//[-------------------------------------------------------]
namespace PLCore {
//[-------------------------------------------------------]
//[ Forward declarations ]
//[-------------------------------------------------------]
class Class;
class Object;
class Module;
class VarDesc;
class FuncDesc;
class DynParams;
class EventDesc;
class MemberDesc;
class ConstructorDesc;
class EventHandlerDesc;
//[-------------------------------------------------------]
//[ Classes ]
//[-------------------------------------------------------]
/**
* @brief
* Abstract class implementation base class
*
* @note
* - Implementation of the bridge design pattern, this class is the implementor of the 'Class' abstraction
*/
class ClassImpl {
//[-------------------------------------------------------]
//[ Friends ]
//[-------------------------------------------------------]
friend class Class;
friend class ClassReal;
friend class ClassDummy;
friend class ClassManager;
//[-------------------------------------------------------]
//[ Public functions ]
//[-------------------------------------------------------]
public:
/**
* @brief
* Return the pointer to the owner class instance wrapping this class implementation
*
* @return
* Pointer to the owner class instance (should never be a null pointer, unless something is *terribly* wrong ;-) )
*/
inline Class *GetClass() const;
//[-------------------------------------------------------]
//[ Protected functions ]
//[-------------------------------------------------------]
protected:
/**
* @brief
* Constructor
*
* @param[in] nModuleID
* ID of owner module
* @param[in] sName
* Name
* @param[in] sDescription
* Description
* @param[in] sNamespace
* Namespace
* @param[in] sBaseClass
* Base class
*/
PLCORE_API ClassImpl(uint32 nModuleID, const String &sName, const String &sDescription, const String &sNamespace, const String &sBaseClass);
/**
* @brief
* Destructor
*/
PLCORE_API virtual ~ClassImpl();
/**
* @brief
* Get module the class belongs to
*
* @return
* Module (always valid)
*/
PLCORE_API const Module *GetModule() const;
/**
* @brief
* Get full class name (with namespace)
*
* @return
* Name of class and namespace
*/
inline String GetClassName() const;
/**
* @brief
* Get full name of base class (with namespace)
*
* @return
* Name of base class and namespace
*/
inline String GetBaseClassName() const;
/**
* @brief
* Get class name (without namespace)
*
* @return
* Name of class
*/
inline String GetName() const;
/**
* @brief
* Get class description
*
* @return
* Description
*/
inline String GetDescription() const;
/**
* @brief
* Get namespace
*
* @return
* Namespace
*/
inline String GetNamespace() const;
/**
* @brief
* Get base class
*
* @return
* Pointer to base class (can be a null pointer)
*/
PLCORE_API const Class *GetBaseClass() const;
/**
* @brief
* Check if class is derived from another class
*
* @param[in] cBaseClass
* Base class
*
* @return
* 'true' if class is derived from given base class, else 'false'
*/
PLCORE_API bool IsDerivedFrom(const Class &cBaseClass) const;
/**
* @brief
* Check if class is derived from another class
*
* @param[in] sBaseClass
* Base class name (with namespace)
*
* @return
* 'true' if class is derived from given base class, else 'false'
*/
PLCORE_API bool IsDerivedFrom(const String &sBaseClass) const;
/**
* @brief
* Get properties
*
* @return
* Hash map of properties (name -> value)
*
* @remarks
* A property is a name/value pair of strings, that can be assigned to a class. Use this to
* transport additional information for your class, e.g.
* "PluginType" -> "Widget"
* "FileFormats" -> "avi mpg mp4"
*/
inline const HashMap<String, String> &GetProperties() const;
//[-------------------------------------------------------]
//[ Class management ]
//[-------------------------------------------------------]
/**
* @brief
* Add property
*
* @param[in] sName
* Property name
* @param[in] sValue
* Property value
*/
PLCORE_API void AddProperty(const String &sName, const String &sValue);
//[-------------------------------------------------------]
//[ Protected virtual ClassImpl functions ]
//[-------------------------------------------------------]
protected:
//[-------------------------------------------------------]
//[ Class management ]
//[-------------------------------------------------------]
/**
* @brief
* Return whether or not the class implementation is a dummy used for delayed shared library loading
*
* @return
* 'true' if the class implementation is a dummy used for delayed shared library loading, else 'false'
*/
virtual bool IsDummy() const = 0;
/**
* @brief
* Initialize class and class members
*
* @remarks
* This function is called automatically when it is necessary, e.g. the first time
* any members are being accessed. It will search for the base class of the class
* and initialize all members. If later a class is changed (e.g. a new member is
* registered at one of the base classes), that class and all derived classes will
* destroy their information and must be initialized again.
*/
virtual void InitClass() const = 0;
/**
* @brief
* De-Initialize class and class members
*
* @remarks
* This function destroys all data about the class and it's members. See
* InitClass() for more information about why this is necessary and when.
*/
virtual void DeInitClass() const = 0;
//[-------------------------------------------------------]
//[ Class interface ]
//[-------------------------------------------------------]
/**
* @brief
* Get attributes
*
* @return
* List of attribute descriptors
*/
virtual const List<VarDesc*> &GetAttributes() const = 0;
/**
* @brief
* Get attribute
*
* @param[in] sName
* Attribute name
*
* @return
* Attribute descriptor (can be a null pointer, if no member with that name could be found)
*/
virtual const VarDesc *GetAttribute(const String &sName) const = 0;
/**
* @brief
* Get methods
*
* @return
* List of method descriptors
*/
virtual const List<FuncDesc*> &GetMethods() const = 0;
/**
* @brief
* Get method
*
* @param[in] sName
* Method name
*
* @return
* Method descriptor (can be a null pointer, if no member with that name could be found)
*/
virtual const FuncDesc *GetMethod(const String &sName) const = 0;
/**
* @brief
* Get signals
*
* @return
* List of signal descriptors
*/
virtual const List<EventDesc*> &GetSignals() const = 0;
/**
* @brief
* Get signal
*
* @param[in] sName
* Signal name
*
* @return
* Signal descriptor (can be a null pointer, if no member with that name could be found)
*/
virtual const EventDesc *GetSignal(const String &sName) const = 0;
/**
* @brief
* Get slot
*
* @return
* List of slot descriptors
*/
virtual const List<EventHandlerDesc*> &GetSlots() const = 0;
/**
* @brief
* Get slot
*
* @param[in] sName
* Slot name
*
* @return
* Slot descriptor (can be a null pointer, if no member with that name could be found)
*/
virtual const EventHandlerDesc *GetSlot(const String &sName) const = 0;
/**
* @brief
* Check if class has any constructors
*
* @return
* 'true' if class has at least one constructor, else 'false'
*/
virtual bool HasConstructor() const = 0;
/**
* @brief
* Check if class has a default constructor
*
* @return
* 'true' if class has a default constructor, else 'false'
*/
virtual bool HasDefaultConstructor() const = 0;
/**
* @brief
* Get constructors
*
* @return
* List of constructor descriptors
*/
virtual const List<ConstructorDesc*> &GetConstructors() const = 0;
/**
* @brief
* Get constructor
*
* @param[in] sName
* Constructor name
*
* @return
* Constructor descriptor (can be a null pointer, if no member with that name could be found)
*/
virtual const ConstructorDesc *GetConstructor(const String &sName) const = 0;
/**
* @brief
* Create object by using the default constructor
*
* @return
* Pointer to created object (can be a null pointer)
*
* @remarks
* This function will call the default constructor of the class.
* If the class has no default constructor, the function will fail and return a null pointer.
*/
virtual Object *Create() const = 0;
/**
* @brief
* Create object by using typed constructor parameters in order to identity the constructor automatically
*
* @param[in] cParams
* Constructor parameters
*
* @return
* Pointer to created object (can be a null pointer)
*
* @remarks
* This function will search for a constructor that matches the signature of the given parameters.
* If no such constructor can be found, the function will fail and return a null pointer.
*/
virtual Object *Create(const DynParams &cParams) const = 0;
/**
* @brief
* Create object by using a given constructor name and typed constructor parameters
*
* @param[in] sName
* Constructor name
* @param[in] cParams
* Constructor parameters
*
* @return
* Pointer to created object (can be a null pointer)
*
* @remarks
* This function will search for a constructor with the specified name. If no such constructor can be found, or
* the given parameters do not match the signature of the constructor, the function will fail and return a null pointer.
*/
virtual Object *Create(const String &sName, const DynParams &cParams) const = 0;
/**
* @brief
* Create object by using a given constructor name and typeless constructor parameters
*
* @param[in] sName
* Constructor name
* @param[in] sParams
* Constructor parameters
*
* @return
* Pointer to created object (can be a null pointer, destroy the returned instance when you no longer need it)
*
* @remarks
* This function will search for a constructor with the specified name. If no such constructor can be found,
* the function will fail and return a null pointer.
*/
virtual Object *Create(const String &sName, const String &sParams) const = 0;
//[-------------------------------------------------------]
//[ Protected data ]
//[-------------------------------------------------------]
protected:
// Class information
Class *m_pClass; /**< Class instance wrapping this class implementation (can be a null pointer, set and managed by the class manager) */
String m_sName; /**< Name of class */
String m_sNamespace; /**< Namespace of class */
String m_sClassName; /**< Name of class (with namespace) */
String m_sDescription; /**< Description of class */
String m_sBaseClass; /**< Name of base class (with namespace) */
// Own data (does not include data from base classes)
HashMap<String, String> m_mapOwnProperties; /**< Hash map of properties (name -> value) */
// Runtime data
mutable uint32 m_nModuleID; /**< ID of owner module */
mutable bool m_bInitialized; /**< Is the class initialized? */
mutable const Class *m_pBaseClass; /**< Pointer to base class */
// Member lists (also including the members from base classes)
mutable HashMap<String, String> m_mapProperties; /**< Hash map of properties (name -> value) */
//[-------------------------------------------------------]
//[ Private functions ]
//[-------------------------------------------------------]
private:
/**
* @brief
* Copy constructor
*
* @param[in] cSource
* Source to copy from
*/
ClassImpl(const ClassImpl &cSource);
/**
* @brief
* Copy operator
*
* @param[in] cSource
* Source to copy from
*
* @return
* Reference to this instance
*/
ClassImpl &operator =(const ClassImpl &cSource);
};
//[-------------------------------------------------------]
//[ Namespace ]
//[-------------------------------------------------------]
} // PLCore
//[-------------------------------------------------------]
//[ Implementation ]
//[-------------------------------------------------------]
#include "PLCore/Base/ClassImpl.inl"
#endif // __PLCORE_CLASS_IMPL_H__
| 28.298507 | 147 | 0.541403 | [
"object"
] |
ff803cca50ec63ef17934a0f53dd905f5a9f656a | 1,149 | h | C | QuickStartApp_iOS/HueSDK_iOS.framework/Versions/A/Headers/PHGroup.h | devvyn/PhilipsHueSDK-iOS-OSX | d85db988208d1837717c4c0802371e695d644e62 | [
"Unlicense"
] | 295 | 2015-01-02T10:51:51.000Z | 2022-02-04T13:28:53.000Z | QuickStartApp_iOS/HueSDK_iOS.framework/Versions/A/Headers/PHGroup.h | jimtoepel/RocketGame | 09b7b87affdb12742aa359bfa9d644519665abd0 | [
"Unlicense"
] | 53 | 2015-01-07T09:42:13.000Z | 2021-07-13T07:53:56.000Z | QuickStartApp_iOS/HueSDK_iOS.framework/Versions/A/Headers/PHGroup.h | jimtoepel/RocketGame | 09b7b87affdb12742aa359bfa9d644519665abd0 | [
"Unlicense"
] | 170 | 2015-01-04T04:17:48.000Z | 2021-03-11T01:52:47.000Z | /*******************************************************************************
Copyright (c) 2013-2014 Koninklijke Philips N.V.
All Rights Reserved.
********************************************************************************/
#import <Foundation/Foundation.h>
#import "PHBridgeResource.h"
typedef enum {
GROUP_TYPE_UNKNOWN,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_LIGHTGROUP,
GROUP_TYPE_LIGHTSOURCE
} PHGroupType;
/**
A grouped set of lights
*/
@interface PHGroup : PHBridgeResource<NSCoding, NSCopying>
/**
The identifiers of the lights controlled by this group
*/
@property (nonatomic, strong) NSArray *lightIdentifiers;
/**
The type of this group
*/
@property (nonatomic, assign) PHGroupType type;
/**
This model ID uniquely identifies the hardware model of the luminaire for the given manufacturer. Only present for automatically created Luminaires,
*/
@property (nonatomic, strong) NSString *modelID;
/**
This unique id of the luminaire. This field is only set on a group of type GROUP_TYPE_LUMINAIRE and GROUP_TYPE_LIGHT_SOURCE
*/
@property (nonatomic, strong) NSString *uniqueId;
- (BOOL)isComplete;
@end | 26.72093 | 149 | 0.651001 | [
"model"
] |
ff92815076aeb9fe3a8755f7f8a92365953fb214 | 3,049 | h | C | code_reading/oceanbase-master/src/storage/memtable/ob_memtable_row_reader.h | wangcy6/weekly_read | 3a8837ee9cd957787ee1785e4066dd623e02e13a | [
"Apache-2.0"
] | null | null | null | code_reading/oceanbase-master/src/storage/memtable/ob_memtable_row_reader.h | wangcy6/weekly_read | 3a8837ee9cd957787ee1785e4066dd623e02e13a | [
"Apache-2.0"
] | null | null | null | code_reading/oceanbase-master/src/storage/memtable/ob_memtable_row_reader.h | wangcy6/weekly_read | 3a8837ee9cd957787ee1785e4066dd623e02e13a | [
"Apache-2.0"
] | 1 | 2020-10-18T12:59:31.000Z | 2020-10-18T12:59:31.000Z | /**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_MEMTABLE_OB_MEMTABLE_ROW_READER_H_
#define OCEANBASE_MEMTABLE_OB_MEMTABLE_ROW_READER_H_
#include "common/object/ob_object.h"
#include "share/ob_define.h"
#include "share/schema/ob_table_schema.h"
#include "storage/ob_i_store.h"
#include "storage/memtable/ob_nop_bitmap.h"
namespace oceanbase {
namespace memtable {
class ObMemtableRowReader {
public:
ObMemtableRowReader();
~ObMemtableRowReader()
{}
int set_buf(const char* buf, int64_t buf_size);
void reset();
int get_memtable_row(bool& row_empty, const share::schema::ColumnMap& column_index,
const storage::ObColDescIArray& columns, storage::ObStoreRow& row, memtable::ObNopBitMap& bitmap,
int64_t& filled_column_count, bool& has_null);
int get_memtable_sparse_row(const share::schema::ColumnMap& column_index,
ObFixedBitSet<OB_ALL_MAX_COLUMN_ID>& bit_set, storage::ObStoreRow& row, bool& loop_flag);
TO_STRING_KV(K_(buf), K_(pos));
private:
DISALLOW_COPY_AND_ASSIGN(ObMemtableRowReader);
int parse_no_meta(storage::ObStoreRow& row, bool& has_null, bool& row_empty, int64_t& filled_column_count);
int parse_with_meta(storage::ObStoreRow& row, bool& row_empty, bool& loop_flag);
inline int read_oracle_timestamp(
const common::ObObjType obj_type, const uint8_t meta_attr, const common::ObOTimestampMetaAttrType otmat);
int read_interval_ym();
int read_interval_ds();
int read_urowid();
template <class T>
const T* read();
private:
const char* buf_;
int64_t buf_size_;
int64_t pos_;
uint64_t column_id_;
common::ObObj obj_;
};
class ObMemtableIterRowReader {
public:
ObMemtableIterRowReader();
~ObMemtableIterRowReader();
int init(common::ObArenaAllocator* allocator, const share::schema::ColumnMap* cols_map, ObNopBitMap* bitmap,
const storage::ObColDescArray& columns);
int get_memtable_row(storage::ObStoreRow& row);
int set_buf(const char* buf, int64_t buf_size);
void reset();
void destory();
bool is_iter_end();
int set_nop_pos(storage::ObStoreRow& row);
private:
bool is_inited_;
bool loop_flag_;
bool row_empty_;
bool has_null_;
int64_t column_cnt_;
int64_t filled_column_count_;
ObMemtableRowReader reader_;
const share::schema::ColumnMap* cols_map_;
ObNopBitMap* bitmap_; // for flat row
const storage::ObColDescArray* columns_ptr_; // for flat row
ObFixedBitSet<OB_ALL_MAX_COLUMN_ID>* bit_set_; // for sparse row
};
} // end namespace memtable
} // end namespace oceanbase
#endif
| 33.877778 | 111 | 0.751066 | [
"object"
] |
94e2e2b3d445bdc1e4be42a2db08f96ab160c2ec | 42,756 | c | C | extern/gtk/gtk/gtklevelbar.c | PableteProgramming/download | 013e35bb5c085e5dfdb57a3a0a39cdf2fd3064b8 | [
"MIT"
] | null | null | null | extern/gtk/gtk/gtklevelbar.c | PableteProgramming/download | 013e35bb5c085e5dfdb57a3a0a39cdf2fd3064b8 | [
"MIT"
] | null | null | null | extern/gtk/gtk/gtklevelbar.c | PableteProgramming/download | 013e35bb5c085e5dfdb57a3a0a39cdf2fd3064b8 | [
"MIT"
] | null | null | null | /* GTK - The GIMP Toolkit
* Copyright © 2012 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Cosimo Cecchi <cosimoc@gnome.org>
*
*/
/**
* GtkLevelBar:
*
* `GtkLevelBar` is a widget that can be used as a level indicator.
*
* Typical use cases are displaying the strength of a password, or
* showing the charge level of a battery.
*
* 
*
* Use [method@Gtk.LevelBar.set_value] to set the current value, and
* [method@Gtk.LevelBar.add_offset_value] to set the value offsets at which
* the bar will be considered in a different state. GTK will add a few
* offsets by default on the level bar: %GTK_LEVEL_BAR_OFFSET_LOW,
* %GTK_LEVEL_BAR_OFFSET_HIGH and %GTK_LEVEL_BAR_OFFSET_FULL, with
* values 0.25, 0.75 and 1.0 respectively.
*
* Note that it is your responsibility to update preexisting offsets
* when changing the minimum or maximum value. GTK will simply clamp
* them to the new range.
*
* ## Adding a custom offset on the bar
*
* ```c
* static GtkWidget *
* create_level_bar (void)
* {
* GtkWidget *widget;
* GtkLevelBar *bar;
*
* widget = gtk_level_bar_new ();
* bar = GTK_LEVEL_BAR (widget);
*
* // This changes the value of the default low offset
*
* gtk_level_bar_add_offset_value (bar,
* GTK_LEVEL_BAR_OFFSET_LOW,
* 0.10);
*
* // This adds a new offset to the bar; the application will
* // be able to change its color CSS like this:
* //
* // levelbar block.my-offset {
* // background-color: magenta;
* // border-style: solid;
* // border-color: black;
* // border-style: 1px;
* // }
*
* gtk_level_bar_add_offset_value (bar, "my-offset", 0.60);
*
* return widget;
* }
* ```
*
* The default interval of values is between zero and one, but it’s possible
* to modify the interval using [method@Gtk.LevelBar.set_min_value] and
* [method@Gtk.LevelBar.set_max_value]. The value will be always drawn in
* proportion to the admissible interval, i.e. a value of 15 with a specified
* interval between 10 and 20 is equivalent to a value of 0.5 with an interval
* between 0 and 1. When %GTK_LEVEL_BAR_MODE_DISCRETE is used, the bar level
* is rendered as a finite number of separated blocks instead of a single one.
* The number of blocks that will be rendered is equal to the number of units
* specified by the admissible interval.
*
* For instance, to build a bar rendered with five blocks, it’s sufficient to
* set the minimum value to 0 and the maximum value to 5 after changing the
* indicator mode to discrete.
*
* # GtkLevelBar as GtkBuildable
*
* The `GtkLevelBar` implementation of the `GtkBuildable` interface supports a
* custom <offsets> element, which can contain any number of <offset> elements,
* each of which must have name and value attributes.
*
* # CSS nodes
*
* ```
* levelbar[.discrete]
* ╰── trough
* ├── block.filled.level-name
* ┊
* ├── block.empty
* ┊
* ```
*
* `GtkLevelBar` has a main CSS node with name levelbar and one of the style
* classes .discrete or .continuous and a subnode with name trough. Below the
* trough node are a number of nodes with name block and style class .filled
* or .empty. In continuous mode, there is exactly one node of each, in discrete
* mode, the number of filled and unfilled nodes corresponds to blocks that are
* drawn. The block.filled nodes also get a style class .level-name corresponding
* to the level for the current value.
*
* In horizontal orientation, the nodes are always arranged from left to right,
* regardless of text direction.
*
* # Accessibility
*
* `GtkLevelBar` uses the %GTK_ACCESSIBLE_ROLE_METER role.
*/
#include "config.h"
#include "gtkbinlayout.h"
#include "gtkbuildable.h"
#include "gtkbuilderprivate.h"
#include "gtkgizmoprivate.h"
#include "gtkintl.h"
#include "gtklevelbar.h"
#include "gtkmarshalers.h"
#include "gtkorientable.h"
#include "gtkcssnodeprivate.h"
#include "gtktypebuiltins.h"
#include "gtkwidgetprivate.h"
#include <math.h>
#include <stdlib.h>
enum {
PROP_VALUE = 1,
PROP_MIN_VALUE,
PROP_MAX_VALUE,
PROP_MODE,
PROP_INVERTED,
LAST_PROPERTY,
PROP_ORIENTATION /* overridden */
};
enum {
SIGNAL_OFFSET_CHANGED,
NUM_SIGNALS
};
static GParamSpec *properties[LAST_PROPERTY] = { NULL, };
static guint signals[NUM_SIGNALS] = { 0, };
typedef struct _GtkLevelBarClass GtkLevelBarClass;
typedef struct {
char *name;
double value;
} GtkLevelBarOffset;
struct _GtkLevelBar {
GtkWidget parent_instance;
GtkOrientation orientation;
GtkLevelBarMode bar_mode;
double min_value;
double max_value;
double cur_value;
GList *offsets;
GtkWidget *trough_widget;
GtkWidget **block_widget;
guint n_blocks;
guint inverted : 1;
};
struct _GtkLevelBarClass {
GtkWidgetClass parent_class;
void (* offset_changed) (GtkLevelBar *self,
const char *name);
};
static void gtk_level_bar_set_value_internal (GtkLevelBar *self,
double value);
static void gtk_level_bar_buildable_init (GtkBuildableIface *iface);
G_DEFINE_TYPE_WITH_CODE (GtkLevelBar, gtk_level_bar, GTK_TYPE_WIDGET,
G_IMPLEMENT_INTERFACE (GTK_TYPE_ORIENTABLE, NULL)
G_IMPLEMENT_INTERFACE (GTK_TYPE_BUILDABLE,
gtk_level_bar_buildable_init))
static GtkLevelBarOffset *
gtk_level_bar_offset_new (const char *name,
double value)
{
GtkLevelBarOffset *offset = g_slice_new0 (GtkLevelBarOffset);
offset->name = g_strdup (name);
offset->value = value;
return offset;
}
static void
gtk_level_bar_offset_free (GtkLevelBarOffset *offset)
{
g_free (offset->name);
g_slice_free (GtkLevelBarOffset, offset);
}
static int
offset_find_func (gconstpointer data,
gconstpointer user_data)
{
const GtkLevelBarOffset *offset = data;
const char *name = user_data;
return g_strcmp0 (name, offset->name);
}
static int
offset_sort_func (gconstpointer a,
gconstpointer b)
{
const GtkLevelBarOffset *offset_a = a;
const GtkLevelBarOffset *offset_b = b;
return (offset_a->value > offset_b->value);
}
static gboolean
gtk_level_bar_ensure_offset (GtkLevelBar *self,
const char *name,
double value)
{
GList *existing;
GtkLevelBarOffset *offset = NULL;
GtkLevelBarOffset *new_offset;
existing = g_list_find_custom (self->offsets, name, offset_find_func);
if (existing)
offset = existing->data;
if (offset && (offset->value == value))
return FALSE;
new_offset = gtk_level_bar_offset_new (name, value);
if (offset)
{
gtk_level_bar_offset_free (offset);
self->offsets = g_list_delete_link (self->offsets, existing);
}
self->offsets = g_list_insert_sorted (self->offsets, new_offset, offset_sort_func);
return TRUE;
}
#ifndef G_DISABLE_CHECKS
static gboolean
gtk_level_bar_value_in_interval (GtkLevelBar *self,
double value)
{
return ((value >= self->min_value) &&
(value <= self->max_value));
}
#endif
static int
gtk_level_bar_get_num_blocks (GtkLevelBar *self)
{
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
return 1;
else if (self->bar_mode == GTK_LEVEL_BAR_MODE_DISCRETE)
return MAX (1, (int) (round (self->max_value) - round (self->min_value)));
return 0;
}
static int
gtk_level_bar_get_num_block_nodes (GtkLevelBar *self)
{
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
return 2;
else
return gtk_level_bar_get_num_blocks (self);
}
static void
gtk_level_bar_get_min_block_size (GtkLevelBar *self,
int *block_width,
int *block_height)
{
guint i, n_blocks;
int width, height;
*block_width = *block_height = 0;
n_blocks = gtk_level_bar_get_num_block_nodes (self);
for (i = 0; i < n_blocks; i++)
{
gtk_widget_measure (self->block_widget[i],
GTK_ORIENTATION_HORIZONTAL,
-1,
&width, NULL,
NULL, NULL);
gtk_widget_measure (self->block_widget[i],
GTK_ORIENTATION_VERTICAL,
-1,
&height, NULL,
NULL, NULL);
*block_width = MAX (width, *block_width);
*block_height = MAX (height, *block_height);
}
}
static gboolean
gtk_level_bar_get_real_inverted (GtkLevelBar *self)
{
if (gtk_widget_get_direction (GTK_WIDGET (self)) == GTK_TEXT_DIR_RTL &&
self->orientation == GTK_ORIENTATION_HORIZONTAL)
return !self->inverted;
return self->inverted;
}
static void
gtk_level_bar_render_trough (GtkGizmo *gizmo,
GtkSnapshot *snapshot)
{
GtkWidget *widget = GTK_WIDGET (gizmo);
GtkLevelBar *self = GTK_LEVEL_BAR (gtk_widget_get_parent (GTK_WIDGET (gizmo)));
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
{
gboolean inverted;
inverted = gtk_level_bar_get_real_inverted (self);
/* render the empty (unfilled) part */
gtk_widget_snapshot_child (widget, self->block_widget[inverted ? 0 : 1], snapshot);
/* now render the filled part on top of it */
if (self->cur_value != 0)
gtk_widget_snapshot_child (widget, self->block_widget[inverted ? 1 : 0], snapshot);
}
else
{
int num_blocks, i;
num_blocks = gtk_level_bar_get_num_blocks (self);
for (i = 0; i < num_blocks; i++)
gtk_widget_snapshot_child (widget, self->block_widget[i], snapshot);
}
}
static void
gtk_level_bar_measure_trough (GtkGizmo *gizmo,
GtkOrientation orientation,
int for_size,
int *minimum,
int *natural,
int *minimum_baseline,
int *natural_baseline)
{
GtkWidget *widget = GTK_WIDGET (gizmo);
GtkLevelBar *self = GTK_LEVEL_BAR (gtk_widget_get_parent (widget));
int num_blocks, size;
int block_width, block_height;
num_blocks = gtk_level_bar_get_num_blocks (self);
gtk_level_bar_get_min_block_size (self, &block_width, &block_height);
if (orientation == GTK_ORIENTATION_HORIZONTAL)
{
if (self->orientation == GTK_ORIENTATION_HORIZONTAL)
size = num_blocks * block_width;
else
size = block_width;
}
else
{
if (self->orientation == GTK_ORIENTATION_VERTICAL)
size = num_blocks * block_height;
else
size = block_height;
}
*minimum = size;
*natural = size;
}
static void
gtk_level_bar_allocate_trough_continuous (GtkLevelBar *self,
int width,
int height,
int baseline)
{
GtkAllocation block_area;
double fill_percentage;
gboolean inverted;
int block_min;
inverted = gtk_level_bar_get_real_inverted (self);
/* allocate the empty (unfilled) part */
gtk_widget_size_allocate (self->block_widget[inverted ? 0 : 1],
&(GtkAllocation) {0, 0, width, height},
baseline);
if (self->cur_value == 0)
return;
/* now allocate the filled part */
block_area = (GtkAllocation) {0, 0, width, height};
fill_percentage = (self->cur_value - self->min_value) /
(self->max_value - self->min_value);
gtk_widget_measure (self->block_widget[inverted ? 1 : 0],
self->orientation, -1,
&block_min, NULL,
NULL, NULL);
if (self->orientation == GTK_ORIENTATION_HORIZONTAL)
{
block_area.width = (int) floor (block_area.width * fill_percentage);
block_area.width = MAX (block_area.width, block_min);
if (inverted)
block_area.x += width - block_area.width;
}
else
{
block_area.height = (int) floor (block_area.height * fill_percentage);
block_area.height = MAX (block_area.height, block_min);
if (inverted)
block_area.y += height - block_area.height;
}
gtk_widget_size_allocate (self->block_widget[inverted ? 1 : 0],
&block_area,
baseline);
}
static void
gtk_level_bar_allocate_trough_discrete (GtkLevelBar *self,
int width,
int height,
int baseline)
{
GtkAllocation block_area;
int num_blocks, i;
int block_width, block_height;
gtk_level_bar_get_min_block_size (self, &block_width, &block_height);
num_blocks = gtk_level_bar_get_num_blocks (self);
if (num_blocks == 0)
return;
if (self->orientation == GTK_ORIENTATION_HORIZONTAL)
{
block_width = MAX (block_width, (int) floor (width / num_blocks));
block_height = height;
}
else
{
block_width = width;
block_height = MAX (block_height, (int) floor (height / num_blocks));
}
block_area.x = 0;
block_area.y = 0;
block_area.width = block_width;
block_area.height = block_height;
for (i = 0; i < num_blocks; i++)
{
gtk_widget_size_allocate (self->block_widget[i],
&block_area,
baseline);
if (self->orientation == GTK_ORIENTATION_HORIZONTAL)
block_area.x += block_area.width;
else
block_area.y += block_area.height;
}
}
static void
gtk_level_bar_allocate_trough (GtkGizmo *gizmo,
int width,
int height,
int baseline)
{
GtkWidget *widget = GTK_WIDGET (gizmo);
GtkLevelBar *self = GTK_LEVEL_BAR (gtk_widget_get_parent (widget));
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
gtk_level_bar_allocate_trough_continuous (self, width, height, baseline);
else
gtk_level_bar_allocate_trough_discrete (self, width, height, baseline);
}
static void
update_block_nodes (GtkLevelBar *self)
{
guint n_blocks;
guint i;
n_blocks = gtk_level_bar_get_num_block_nodes (self);
if (self->n_blocks == n_blocks)
return;
else if (n_blocks < self->n_blocks)
{
for (i = n_blocks; i < self->n_blocks; i++)
{
gtk_widget_unparent (self->block_widget[i]);
}
self->block_widget = g_renew (GtkWidget*, self->block_widget, n_blocks);
self->n_blocks = n_blocks;
}
else
{
self->block_widget = g_renew (GtkWidget*, self->block_widget, n_blocks);
for (i = self->n_blocks; i < n_blocks; i++)
{
self->block_widget[i] = gtk_gizmo_new_with_role ("block",
GTK_ACCESSIBLE_ROLE_NONE,
NULL, NULL, NULL, NULL, NULL, NULL);
gtk_widget_insert_before (self->block_widget[i], GTK_WIDGET (self->trough_widget), NULL);
}
self->n_blocks = n_blocks;
}
}
static void
update_mode_style_classes (GtkLevelBar *self)
{
GtkCssNode *widget_node;
widget_node = gtk_widget_get_css_node (GTK_WIDGET (self));
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
{
gtk_css_node_remove_class (widget_node, g_quark_from_static_string ("discrete"));
gtk_css_node_add_class (widget_node, g_quark_from_static_string ("continuous"));
}
else if (self->bar_mode == GTK_LEVEL_BAR_MODE_DISCRETE)
{
gtk_css_node_add_class (widget_node, g_quark_from_static_string ("discrete"));
gtk_css_node_remove_class (widget_node, g_quark_from_static_string ("continuous"));
}
}
static void
update_level_style_classes (GtkLevelBar *self)
{
double value;
const char *value_class = NULL;
GtkLevelBarOffset *offset, *prev_offset;
GList *l;
int num_filled, num_blocks, i;
gboolean inverted;
value = gtk_level_bar_get_value (self);
for (l = self->offsets; l != NULL; l = l->next)
{
offset = l->data;
/* find the right offset for our style class */
if (value <= offset->value)
{
if (l->prev == NULL)
{
value_class = offset->name;
}
else
{
prev_offset = l->prev->data;
if (prev_offset->value < value)
value_class = offset->name;
}
}
if (value_class)
break;
}
inverted = gtk_level_bar_get_real_inverted (self);
num_blocks = gtk_level_bar_get_num_block_nodes (self);
if (self->bar_mode == GTK_LEVEL_BAR_MODE_CONTINUOUS)
num_filled = 1;
else
num_filled = MIN (num_blocks, (int) round (self->cur_value) - (int) round (self->min_value));
for (i = 0; i < num_filled; i++)
{
GtkCssNode *node = gtk_widget_get_css_node (self->block_widget[inverted ? num_blocks - 1 - i : i]);
gtk_css_node_set_classes (node, NULL);
gtk_css_node_add_class (node, g_quark_from_static_string ("filled"));
if (value_class)
gtk_css_node_add_class (node, g_quark_from_string (value_class));
}
for (; i < num_blocks; i++)
{
GtkCssNode *node = gtk_widget_get_css_node (self->block_widget[inverted ? num_blocks - 1 - i : i]);
gtk_css_node_set_classes (node, NULL);
gtk_css_node_add_class (node, g_quark_from_static_string ("empty"));
}
}
static void
gtk_level_bar_direction_changed (GtkWidget *widget,
GtkTextDirection previous_dir)
{
GtkLevelBar *self = GTK_LEVEL_BAR (widget);
update_level_style_classes (self);
GTK_WIDGET_CLASS (gtk_level_bar_parent_class)->direction_changed (widget, previous_dir);
}
static void
gtk_level_bar_ensure_offsets_in_range (GtkLevelBar *self)
{
GtkLevelBarOffset *offset;
GList *l = self->offsets;
while (l != NULL)
{
offset = l->data;
l = l->next;
if (offset->value < self->min_value)
gtk_level_bar_ensure_offset (self, offset->name, self->min_value);
else if (offset->value > self->max_value)
gtk_level_bar_ensure_offset (self, offset->name, self->max_value);
}
}
typedef struct {
GtkLevelBar *self;
GtkBuilder *builder;
GList *offsets;
} OffsetsParserData;
static void
offset_start_element (GtkBuildableParseContext *context,
const char *element_name,
const char **names,
const char **values,
gpointer user_data,
GError **error)
{
OffsetsParserData *data = user_data;
if (strcmp (element_name, "offsets") == 0)
{
if (!_gtk_builder_check_parent (data->builder, context, "object", error))
return;
if (!g_markup_collect_attributes (element_name, names, values, error,
G_MARKUP_COLLECT_INVALID, NULL, NULL,
G_MARKUP_COLLECT_INVALID))
_gtk_builder_prefix_error (data->builder, context, error);
}
else if (strcmp (element_name, "offset") == 0)
{
const char *name;
const char *value;
GValue gvalue = G_VALUE_INIT;
GtkLevelBarOffset *offset;
if (!_gtk_builder_check_parent (data->builder, context, "offsets", error))
return;
if (!g_markup_collect_attributes (element_name, names, values, error,
G_MARKUP_COLLECT_STRING, "name", &name,
G_MARKUP_COLLECT_STRING, "value", &value,
G_MARKUP_COLLECT_INVALID))
{
_gtk_builder_prefix_error (data->builder, context, error);
return;
}
if (!gtk_builder_value_from_string_type (data->builder, G_TYPE_DOUBLE, value, &gvalue, error))
{
_gtk_builder_prefix_error (data->builder, context, error);
return;
}
offset = gtk_level_bar_offset_new (name, g_value_get_double (&gvalue));
data->offsets = g_list_prepend (data->offsets, offset);
}
else
{
_gtk_builder_error_unhandled_tag (data->builder, context,
"GtkLevelBar", element_name,
error);
}
}
static const GtkBuildableParser offset_parser =
{
offset_start_element
};
static GtkBuildableIface *parent_buildable_iface;
static gboolean
gtk_level_bar_buildable_custom_tag_start (GtkBuildable *buildable,
GtkBuilder *builder,
GObject *child,
const char *tagname,
GtkBuildableParser *parser,
gpointer *parser_data)
{
OffsetsParserData *data;
if (parent_buildable_iface->custom_tag_start (buildable, builder, child,
tagname, parser, parser_data))
return TRUE;
if (child)
return FALSE;
if (strcmp (tagname, "offsets") != 0)
return FALSE;
data = g_slice_new0 (OffsetsParserData);
data->self = GTK_LEVEL_BAR (buildable);
data->builder = builder;
data->offsets = NULL;
*parser = offset_parser;
*parser_data = data;
return TRUE;
}
static void
gtk_level_bar_buildable_custom_finished (GtkBuildable *buildable,
GtkBuilder *builder,
GObject *child,
const char *tagname,
gpointer user_data)
{
OffsetsParserData *data = user_data;
GtkLevelBar *self;
GtkLevelBarOffset *offset;
GList *l;
self = data->self;
if (strcmp (tagname, "offsets") != 0)
{
parent_buildable_iface->custom_finished (buildable, builder, child,
tagname, user_data);
return;
}
for (l = data->offsets; l != NULL; l = l->next)
{
offset = l->data;
gtk_level_bar_add_offset_value (self, offset->name, offset->value);
}
g_list_free_full (data->offsets, (GDestroyNotify) gtk_level_bar_offset_free);
g_slice_free (OffsetsParserData, data);
}
static void
gtk_level_bar_buildable_init (GtkBuildableIface *iface)
{
parent_buildable_iface = g_type_interface_peek_parent (iface);
iface->custom_tag_start = gtk_level_bar_buildable_custom_tag_start;
iface->custom_finished = gtk_level_bar_buildable_custom_finished;
}
static void
gtk_level_bar_set_orientation (GtkLevelBar *self,
GtkOrientation orientation)
{
if (self->orientation != orientation)
{
self->orientation = orientation;
gtk_widget_update_orientation (GTK_WIDGET (self), self->orientation);
gtk_widget_queue_resize (GTK_WIDGET (self));
g_object_notify (G_OBJECT (self), "orientation");
}
}
static void
gtk_level_bar_get_property (GObject *obj,
guint property_id,
GValue *value,
GParamSpec *pspec)
{
GtkLevelBar *self = GTK_LEVEL_BAR (obj);
switch (property_id)
{
case PROP_VALUE:
g_value_set_double (value, gtk_level_bar_get_value (self));
break;
case PROP_MIN_VALUE:
g_value_set_double (value, gtk_level_bar_get_min_value (self));
break;
case PROP_MAX_VALUE:
g_value_set_double (value, gtk_level_bar_get_max_value (self));
break;
case PROP_MODE:
g_value_set_enum (value, gtk_level_bar_get_mode (self));
break;
case PROP_INVERTED:
g_value_set_boolean (value, gtk_level_bar_get_inverted (self));
break;
case PROP_ORIENTATION:
g_value_set_enum (value, self->orientation);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (obj, property_id, pspec);
break;
}
}
static void
gtk_level_bar_set_property (GObject *obj,
guint property_id,
const GValue *value,
GParamSpec *pspec)
{
GtkLevelBar *self = GTK_LEVEL_BAR (obj);
switch (property_id)
{
case PROP_VALUE:
gtk_level_bar_set_value (self, g_value_get_double (value));
break;
case PROP_MIN_VALUE:
gtk_level_bar_set_min_value (self, g_value_get_double (value));
break;
case PROP_MAX_VALUE:
gtk_level_bar_set_max_value (self, g_value_get_double (value));
break;
case PROP_MODE:
gtk_level_bar_set_mode (self, g_value_get_enum (value));
break;
case PROP_INVERTED:
gtk_level_bar_set_inverted (self, g_value_get_boolean (value));
break;
case PROP_ORIENTATION:
gtk_level_bar_set_orientation (self, g_value_get_enum (value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (obj, property_id, pspec);
break;
}
}
static void
gtk_level_bar_finalize (GObject *obj)
{
GtkLevelBar *self = GTK_LEVEL_BAR (obj);
int i;
g_list_free_full (self->offsets, (GDestroyNotify) gtk_level_bar_offset_free);
for (i = 0; i < self->n_blocks; i++)
gtk_widget_unparent (self->block_widget[i]);
g_free (self->block_widget);
gtk_widget_unparent (self->trough_widget);
G_OBJECT_CLASS (gtk_level_bar_parent_class)->finalize (obj);
}
static void
gtk_level_bar_class_init (GtkLevelBarClass *klass)
{
GObjectClass *oclass = G_OBJECT_CLASS (klass);
GtkWidgetClass *wclass = GTK_WIDGET_CLASS (klass);
oclass->get_property = gtk_level_bar_get_property;
oclass->set_property = gtk_level_bar_set_property;
oclass->finalize = gtk_level_bar_finalize;
wclass->direction_changed = gtk_level_bar_direction_changed;
g_object_class_override_property (oclass, PROP_ORIENTATION, "orientation");
/**
* GtkLevelBar::offset-changed:
* @self: a `GtkLevelBar`
* @name: the name of the offset that changed value
*
* Emitted when an offset specified on the bar changes value.
*
* This typically is the result of a [method@Gtk.LevelBar.add_offset_value]
* call.
*
* The signal supports detailed connections; you can connect to the
* detailed signal "changed::x" in order to only receive callbacks when
* the value of offset "x" changes.
*/
signals[SIGNAL_OFFSET_CHANGED] =
g_signal_new (I_("offset-changed"),
GTK_TYPE_LEVEL_BAR,
G_SIGNAL_RUN_FIRST | G_SIGNAL_DETAILED,
G_STRUCT_OFFSET (GtkLevelBarClass, offset_changed),
NULL, NULL,
NULL,
G_TYPE_NONE,
1, G_TYPE_STRING);
/**
* GtkLevelBar:value: (attributes org.gtk.Property.get=gtk_level_bar_get_value org.gtk.Property.set=gtk_level_bar_set_value)
*
* Determines the currently filled value of the level bar.
*/
properties[PROP_VALUE] =
g_param_spec_double ("value",
P_("Currently filled value level"),
P_("Currently filled value level of the level bar"),
0.0, G_MAXDOUBLE, 0.0,
G_PARAM_READWRITE|G_PARAM_STATIC_STRINGS|G_PARAM_EXPLICIT_NOTIFY);
/**
* GtkLevelBar:min-value: (attributes org.gtk.Property.get=gtk_level_bar_get_min_value org.gtk.Property.set=gtk_level_bar_set_min_value)
*
* Determines the minimum value of the interval that can be displayed by the bar.
*/
properties[PROP_MIN_VALUE] =
g_param_spec_double ("min-value",
P_("Minimum value level for the bar"),
P_("Minimum value level that can be displayed by the bar"),
0.0, G_MAXDOUBLE, 0.0,
G_PARAM_READWRITE|G_PARAM_STATIC_STRINGS|G_PARAM_EXPLICIT_NOTIFY);
/**
* GtkLevelBar:max-value: (attributes org.gtk.Property.get=gtk_level_bar_get_max_value org.gtk.Property.set=gtk_level_bar_set_max_value)
*
* Determines the maximum value of the interval that can be displayed by the bar.
*/
properties[PROP_MAX_VALUE] =
g_param_spec_double ("max-value",
P_("Maximum value level for the bar"),
P_("Maximum value level that can be displayed by the bar"),
0.0, G_MAXDOUBLE, 1.0,
G_PARAM_READWRITE|G_PARAM_STATIC_STRINGS|G_PARAM_EXPLICIT_NOTIFY);
/**
* GtkLevelBar:mode: (attributes org.gtk.Property.get=gtk_level_bar_get_mode org.gtk.Property.set=gtk_level_bar_set_mode)
*
* Determines the way `GtkLevelBar` interprets the value properties to draw the
* level fill area.
*
* Specifically, when the value is %GTK_LEVEL_BAR_MODE_CONTINUOUS,
* `GtkLevelBar` will draw a single block representing the current value in
* that area; when the value is %GTK_LEVEL_BAR_MODE_DISCRETE,
* the widget will draw a succession of separate blocks filling the
* draw area, with the number of blocks being equal to the units separating
* the integral roundings of [property@Gtk.LevelBar:min-value] and
* [property@Gtk.LevelBar:max-value].
*/
properties[PROP_MODE] =
g_param_spec_enum ("mode",
P_("The mode of the value indicator"),
P_("The mode of the value indicator displayed by the bar"),
GTK_TYPE_LEVEL_BAR_MODE,
GTK_LEVEL_BAR_MODE_CONTINUOUS,
G_PARAM_READWRITE|G_PARAM_STATIC_STRINGS|G_PARAM_EXPLICIT_NOTIFY);
/**
* GtkLevelBar:inverted: (attributes org.gtk.Property.get=gtk_level_bar_get_inverted org.gtk.Property.set=gtk_level_bar_set_inverted)
*
* Whether the `GtkLeveBar` is inverted.
*
* Level bars normally grow from top to bottom or left to right.
* Inverted level bars grow in the opposite direction.
*/
properties[PROP_INVERTED] =
g_param_spec_boolean ("inverted",
P_("Inverted"),
P_("Invert the direction in which the level bar grows"),
FALSE,
G_PARAM_READWRITE|G_PARAM_STATIC_STRINGS|G_PARAM_EXPLICIT_NOTIFY);
g_object_class_install_properties (oclass, LAST_PROPERTY, properties);
gtk_widget_class_set_layout_manager_type (wclass, GTK_TYPE_BIN_LAYOUT);
gtk_widget_class_set_css_name (wclass, I_("levelbar"));
gtk_widget_class_set_accessible_role (wclass, GTK_ACCESSIBLE_ROLE_METER);
}
static void
gtk_level_bar_init (GtkLevelBar *self)
{
self->cur_value = 0.0;
self->min_value = 0.0;
self->max_value = 1.0;
/* set initial orientation and style classes */
self->orientation = GTK_ORIENTATION_HORIZONTAL;
gtk_widget_update_orientation (GTK_WIDGET (self), self->orientation);
self->inverted = FALSE;
self->trough_widget = gtk_gizmo_new_with_role ("trough",
GTK_ACCESSIBLE_ROLE_NONE,
gtk_level_bar_measure_trough,
gtk_level_bar_allocate_trough,
gtk_level_bar_render_trough,
NULL,
NULL, NULL);
gtk_widget_set_parent (self->trough_widget, GTK_WIDGET (self));
gtk_level_bar_ensure_offset (self, GTK_LEVEL_BAR_OFFSET_LOW, 0.25);
gtk_level_bar_ensure_offset (self, GTK_LEVEL_BAR_OFFSET_HIGH, 0.75);
gtk_level_bar_ensure_offset (self, GTK_LEVEL_BAR_OFFSET_FULL, 1.0);
self->block_widget = NULL;
self->n_blocks = 0;
self->bar_mode = GTK_LEVEL_BAR_MODE_CONTINUOUS;
update_mode_style_classes (self);
update_block_nodes (self);
update_level_style_classes (self);
gtk_accessible_update_property (GTK_ACCESSIBLE (self),
GTK_ACCESSIBLE_PROPERTY_VALUE_MAX, 1.0,
GTK_ACCESSIBLE_PROPERTY_VALUE_MIN, 0.0,
GTK_ACCESSIBLE_PROPERTY_VALUE_NOW, 0.0,
-1);
}
/**
* gtk_level_bar_new:
*
* Creates a new `GtkLevelBar`.
*
* Returns: a `GtkLevelBar`.
*/
GtkWidget *
gtk_level_bar_new (void)
{
return g_object_new (GTK_TYPE_LEVEL_BAR, NULL);
}
/**
* gtk_level_bar_new_for_interval:
* @min_value: a positive value
* @max_value: a positive value
*
* Creates a new `GtkLevelBar` for the specified interval.
*
* Returns: a `GtkLevelBar`
*/
GtkWidget *
gtk_level_bar_new_for_interval (double min_value,
double max_value)
{
return g_object_new (GTK_TYPE_LEVEL_BAR,
"min-value", min_value,
"max-value", max_value,
NULL);
}
/**
* gtk_level_bar_get_min_value: (attributes org.gtk.Method.get_property=min-value)
* @self: a `GtkLevelBar`
*
* Returns the `min-value of the `GtkLevelBar`.
*
* Returns: a positive value
*/
double
gtk_level_bar_get_min_value (GtkLevelBar *self)
{
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), 0.0);
return self->min_value;
}
/**
* gtk_level_bar_get_max_value: (attributes org.gtk.Method.get_property=max-value)
* @self: a `GtkLevelBar`
*
* Returns the `max-value` of the `GtkLevelBar`.
*
* Returns: a positive value
*/
double
gtk_level_bar_get_max_value (GtkLevelBar *self)
{
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), 0.0);
return self->max_value;
}
/**
* gtk_level_bar_get_value: (attributes org.gtk.Method.get_property=value)
* @self: a `GtkLevelBar`
*
* Returns the `value` of the `GtkLevelBar`.
*
* Returns: a value in the interval between
* [property@Gtk.LevelBar:min-value[ and [property@Gtk.LevelBar:max-value]
*/
double
gtk_level_bar_get_value (GtkLevelBar *self)
{
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), 0.0);
return self->cur_value;
}
static void
gtk_level_bar_set_value_internal (GtkLevelBar *self,
double value)
{
self->cur_value = value;
g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_VALUE]);
gtk_widget_queue_allocate (GTK_WIDGET (self->trough_widget));
}
/**
* gtk_level_bar_set_min_value: (attributes org.gtk.Method.set_property=min-value)
* @self: a `GtkLevelBar`
* @value: a positive value
*
* Sets the `min-value` of the `GtkLevelBar`.
*
* You probably want to update preexisting level offsets after calling
* this function.
*/
void
gtk_level_bar_set_min_value (GtkLevelBar *self,
double value)
{
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
g_return_if_fail (value >= 0.0);
if (value == self->min_value)
return;
self->min_value = value;
if (self->min_value > self->cur_value)
gtk_level_bar_set_value_internal (self, self->min_value);
update_block_nodes (self);
update_level_style_classes (self);
gtk_accessible_update_property (GTK_ACCESSIBLE (self),
GTK_ACCESSIBLE_PROPERTY_VALUE_MIN, self->min_value,
GTK_ACCESSIBLE_PROPERTY_VALUE_NOW, self->cur_value,
-1);
g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_MIN_VALUE]);
}
/**
* gtk_level_bar_set_max_value: (attributes org.gtk.Method.set_property=max-value)
* @self: a `GtkLevelBar`
* @value: a positive value
*
* Sets the `max-value` of the `GtkLevelBar`.
*
* You probably want to update preexisting level offsets after calling
* this function.
*/
void
gtk_level_bar_set_max_value (GtkLevelBar *self,
double value)
{
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
g_return_if_fail (value >= 0.0);
if (value == self->max_value)
return;
self->max_value = value;
if (self->max_value < self->cur_value)
gtk_level_bar_set_value_internal (self, self->max_value);
gtk_level_bar_ensure_offsets_in_range (self);
update_block_nodes (self);
update_level_style_classes (self);
gtk_accessible_update_property (GTK_ACCESSIBLE (self),
GTK_ACCESSIBLE_PROPERTY_VALUE_MAX, self->max_value,
GTK_ACCESSIBLE_PROPERTY_VALUE_NOW, self->cur_value,
-1);
g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_MAX_VALUE]);
}
/**
* gtk_level_bar_set_value: (attributes org.gtk.Method.set_property=value)
* @self: a `GtkLevelBar`
* @value: a value in the interval between
* [property@Gtk.LevelBar:min-value] and [property@Gtk.LevelBar:max-value]
*
* Sets the value of the `GtkLevelBar`.
*/
void
gtk_level_bar_set_value (GtkLevelBar *self,
double value)
{
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
if (value == self->cur_value)
return;
gtk_level_bar_set_value_internal (self, value);
update_level_style_classes (self);
gtk_accessible_update_property (GTK_ACCESSIBLE (self),
GTK_ACCESSIBLE_PROPERTY_VALUE_NOW, self->cur_value,
-1);
}
/**
* gtk_level_bar_get_mode: (attributes org.gtk.Method.get_property=mode)
* @self: a `GtkLevelBar`
*
* Returns the `mode` of the `GtkLevelBar`.
*
* Returns: a `GtkLevelBarMode`
*/
GtkLevelBarMode
gtk_level_bar_get_mode (GtkLevelBar *self)
{
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), 0);
return self->bar_mode;
}
/**
* gtk_level_bar_set_mode: (attributes org.gtk.Method.set_property=mode)
* @self: a `GtkLevelBar`
* @mode: a `GtkLevelBarMode`
*
* Sets the `mode` of the `GtkLevelBar`.
*/
void
gtk_level_bar_set_mode (GtkLevelBar *self,
GtkLevelBarMode mode)
{
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
if (self->bar_mode == mode)
return;
self->bar_mode = mode;
update_mode_style_classes (self);
update_block_nodes (self);
update_level_style_classes (self);
gtk_widget_queue_resize (GTK_WIDGET (self));
g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_MODE]);
}
/**
* gtk_level_bar_get_inverted: (attributes org.gtk.Method.get_property=inverted)
* @self: a `GtkLevelBar`
*
* Returns whether the levelbar is inverted.
*
* Returns: %TRUE if the level bar is inverted
*/
gboolean
gtk_level_bar_get_inverted (GtkLevelBar *self)
{
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), FALSE);
return self->inverted;
}
/**
* gtk_level_bar_set_inverted: (attributes org.gtk.Method.set_property=inverted)
* @self: a `GtkLevelBar`
* @inverted: %TRUE to invert the level bar
*
* Sets whether the `GtkLevelBar` is inverted.
*/
void
gtk_level_bar_set_inverted (GtkLevelBar *self,
gboolean inverted)
{
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
if (self->inverted == inverted)
return;
self->inverted = inverted;
gtk_widget_queue_resize (GTK_WIDGET (self));
update_level_style_classes (self);
g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_INVERTED]);
}
/**
* gtk_level_bar_remove_offset_value:
* @self: a `GtkLevelBar`
* @name: (nullable): the name of an offset in the bar
*
* Removes an offset marker from a `GtkLevelBar`.
*
* The marker must have been previously added with
* [method@Gtk.LevelBar.add_offset_value].
*/
void
gtk_level_bar_remove_offset_value (GtkLevelBar *self,
const char *name)
{
GList *existing;
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
existing = g_list_find_custom (self->offsets, name, offset_find_func);
if (existing)
{
gtk_level_bar_offset_free (existing->data);
self->offsets = g_list_delete_link (self->offsets, existing);
update_level_style_classes (self);
}
}
/**
* gtk_level_bar_add_offset_value:
* @self: a `GtkLevelBar`
* @name: the name of the new offset
* @value: the value for the new offset
*
* Adds a new offset marker on @self at the position specified by @value.
*
* When the bar value is in the interval topped by @value (or between @value
* and [property@Gtk.LevelBar:max-value] in case the offset is the last one
* on the bar) a style class named `level-`@name will be applied
* when rendering the level bar fill.
*
* If another offset marker named @name exists, its value will be
* replaced by @value.
*/
void
gtk_level_bar_add_offset_value (GtkLevelBar *self,
const char *name,
double value)
{
GQuark name_quark;
g_return_if_fail (GTK_IS_LEVEL_BAR (self));
g_return_if_fail (gtk_level_bar_value_in_interval (self, value));
if (!gtk_level_bar_ensure_offset (self, name, value))
return;
update_level_style_classes (self);
name_quark = g_quark_from_string (name);
g_signal_emit (self, signals[SIGNAL_OFFSET_CHANGED], name_quark, name);
}
/**
* gtk_level_bar_get_offset_value:
* @self: a `GtkLevelBar`
* @name: (nullable): the name of an offset in the bar
* @value: (out): location where to store the value
*
* Fetches the value specified for the offset marker @name in @self.
*
* Returns: %TRUE if the specified offset is found
*/
gboolean
gtk_level_bar_get_offset_value (GtkLevelBar *self,
const char *name,
double *value)
{
GList *existing;
GtkLevelBarOffset *offset = NULL;
g_return_val_if_fail (GTK_IS_LEVEL_BAR (self), FALSE);
existing = g_list_find_custom (self->offsets, name, offset_find_func);
if (existing)
offset = existing->data;
if (!offset)
return FALSE;
if (value)
*value = offset->value;
return TRUE;
}
| 30.046381 | 138 | 0.640004 | [
"render",
"object",
"solid"
] |
94f3dd95314ad5a435ff12bda76fcfda00cbe255 | 503 | h | C | memory_pool.h | senhorsolar/dlx | d3dd4ee77624a66668c38a7aae7a0f15c8afa80a | [
"MIT"
] | null | null | null | memory_pool.h | senhorsolar/dlx | d3dd4ee77624a66668c38a7aae7a0f15c8afa80a | [
"MIT"
] | null | null | null | memory_pool.h | senhorsolar/dlx | d3dd4ee77624a66668c38a7aae7a0f15c8afa80a | [
"MIT"
] | null | null | null | // Simple memory pool for dlx nodes
#ifndef MEMORY_POOL_H
#define MEMORY_POOL_H
#include <vector>
namespace Dlx {
template<class T>
class MemoryPool
{
public:
MemoryPool() = default;
MemoryPool(size_t n)
{
m_data.resize(n);
}
void Resize(size_t n) { m_data.resize(n); }
T* New()
{
if (m_ptr < m_data.size())
return &m_data[m_ptr++];
else
return nullptr;
}
private:
std::vector<T> m_data;
size_t m_ptr = 0;
};
} // namespace
#endif
| 12.575 | 47 | 0.60835 | [
"vector"
] |
a2285f60b72dae76a0fba7edae78274e1c655e06 | 4,553 | h | C | fitp/fitp.h | BeeeOn/fitplib | 71f8ab7ca2a35d97a9f56a9c8aac3b25fde0cb9f | [
"BSD-3-Clause"
] | null | null | null | fitp/fitp.h | BeeeOn/fitplib | 71f8ab7ca2a35d97a9f56a9c8aac3b25fde0cb9f | [
"BSD-3-Clause"
] | null | null | null | fitp/fitp.h | BeeeOn/fitplib | 71f8ab7ca2a35d97a9f56a9c8aac3b25fde0cb9f | [
"BSD-3-Clause"
] | null | null | null | /**
* @file fitp.h
*/
//#ifndef FITP_LAYER_H
//#define FITP_LAYER_H
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <string>
#include <map>
#include <vector>
#include "fitp/common/phy_layer/phy.h"
#include "pan/link_layer/link.h"
//#include "pan/net_layer/net.h"
//#include "net_common.h"
#include <unistd.h>
#include <mutex>
#include <deque>
#include <condition_variable>
/*! end device ID in case of addressing using coordinator ID */
#define FITP_DIRECT_COORD (uint8_t*)"\x00\x00\x00\x00"
//uint8_t FITP_ED_ALL[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
/*! broadcast address */
#define FITP_COORD_ALL 0x3F
/*! MOVE REQUEST message */
#define FITP_MOVE_REQUEST 0x00
/*! MOVE RESPONSE message */
#define FITP_MOVE_RESPONSE 0x01
/*! MOVE RESPONSE RESPONSE message */
#define FITP_MOVE_RESPONSE_ROUTE 0x02
#define MAX_DATA_LENGTH 32
#define MAX_MESSAGES 10
enum fitp_packet_type {
FITP_DATA = 0x00,
FITP_DATA_DR = 0x01,
FITP_JOIN_REQUEST = 0x03
};
struct fitp_received_messages_t {
fitp_packet_type msg_type;
uint8_t data[MAX_DATA_LENGTH];
uint8_t len;
uint8_t sedid[4];
uint8_t device_type;
};
enum DeviceType {
NONE,
END_DEVICE,
COORDINATOR,
};
extern bool array_cmp (uint8_t* array1, uint8_t* array2);
/**
* Ensures initialization of network, link and physical layer.
* @param phy_params Parameters of physical layer.
* @param link_params Parameters of link layer.
*/
void fitp_init (struct PHY_init_t *phy_params, struct LINK_init_t *link_params);
/**
* Ensures deinitialization of network, link and physical layer.
* Releasing allocated resources and terminating running threads.
*/
void fitp_deinit ();
/**
* Returns a protocol version.
*/
std::string fitp_version();
/**
* Sends data.
* @param tocoord Destination coordinator ID.
* @param toed Destination end device ID.
* @param data Data.
* @param len Data length.
* @return Returns true if data sending is successful, false otherwise.
*/
bool fitp_send (uint8_t tocoord, uint8_t * toed, uint8_t * data, uint8_t len);
/**
* Checks if end device has been already joined a network.
* @return Returns true if end device has been already joined a network, false otherwise.
*/
bool fitp_joined ();
extern void fitp_received (const uint8_t from_cid,
const uint8_t from_edid[4], const uint8_t * data,
const uint8_t len);
extern bool NET_accept_device (uint8_t parent_cid);
extern void fitp_notify_send_done();
/**
* Enables pair mode.
* @param timeout Duration of pair mode (in seconds).
*/
void fitp_joining_enable(uint8_t timeout);
/**
* Disables pair mode.
*/
void fitp_joining_disable();
/**
* Removes device from network.
* @param edid End device ID.
* @return Returns true if device is successfully removed from network.
*/
bool fitp_unpair(uint32_t edid);
/**
* Processes received data.
* @param data Data sent from end device.
*/
void NET_received_data(uint8_t* data);
/**
* Reacts to listen command sent from server.
* @param timeout Duration of pair mode (in seconds).
*/
void fitp_listen(int timeout);
/**
* Reacts to accept command sent from server.
* @param edid Destination end device ID.
*/
void NET_accepted_device(uint8_t* edid);
/**
* Reacts to unpair command sent from server.
* @param edid Destination end device ID.
* @return Return true, if end device is unpaired successfully, false otherwise.
*/
bool NET_unpair(uint8_t* edid);
/**
* Processes received data.
* @param data Data sent from end device.
*/
void fitp_received_data(std::vector<uint8_t> &data);
/**
* Reacts to accept command sent from server.
* @param edid Destination end device ID.
*/
void fitp_accepted_device(std::vector<uint8_t> edid);
/**
* Checks if message type is DATA or DATA_DR.
* @param data Data containing message type.
* @return Return true, if message type is DATA or DATA_DR, false otherwise.
*/
bool isDataMessage(const std::vector <uint8_t> &data);
/**
* Checks if message type is JOIN_REQUEST.
* @param data Data containing message type.
* @return Return true, if message type is JOIN_REQUEST, false otherwise.
*/
bool isJoinMessage(const std::vector <uint8_t> &data);
std::map<uint64_t, DeviceType> fitp_device_list();
void print_device_table();
bool save_device_table();
bool add_device (uint8_t* edid, uint8_t cid, uint8_t parent_cid, bool sleepy, bool coord);
bool fitp_is_coord(uint8_t * edid, uint8_t cid);
void fitp_set_config_path(const std::string &configPath);
double fitp_get_measured_noise();
void fitp_set_nid(uint32_t nid);
//#endif
| 24.744565 | 90 | 0.731386 | [
"vector"
] |
a22b27a4ad237909e10b9f07b9a1a91696e31883 | 2,327 | h | C | 3rd_party/nek5000_parRSB/src/precond/genmap-multigrid-precon.h | roystgnr/nekRS | 280acd21c3088d7658a8a113e544fce05853d7b4 | [
"BSD-3-Clause"
] | null | null | null | 3rd_party/nek5000_parRSB/src/precond/genmap-multigrid-precon.h | roystgnr/nekRS | 280acd21c3088d7658a8a113e544fce05853d7b4 | [
"BSD-3-Clause"
] | null | null | null | 3rd_party/nek5000_parRSB/src/precond/genmap-multigrid-precon.h | roystgnr/nekRS | 280acd21c3088d7658a8a113e544fce05853d7b4 | [
"BSD-3-Clause"
] | null | null | null | #ifndef _GENMAP_PRECON_H_
#define _GENMAP_PRECON_H_
#include <genmap-impl.h>
typedef struct csr_mat_ *csr_mat;
typedef struct mgData_ *mgData;
typedef struct mgLevel_ *mgLevel;
struct csr_mat_{
uint rn;
ulong row_start;
uint *row_off;
ulong *col;
GenmapScalar *v,*diag;
struct gs_data *gsh;
};
// for the coarse level
void csr_mat_setup(struct array *entries,struct comm *c,csr_mat *M);
void csr_mat_apply(GenmapScalar *y,csr_mat M,GenmapScalar *x);
void csr_mat_print(csr_mat M,struct comm *c);
int csr_mat_free(csr_mat M);
void csr_mat_gather(csr_mat M,struct gs_data *gsh,GenmapScalar *x,
GenmapScalar *buf,buffer *bfr);
struct gs_data *get_csr_top(csr_mat M,struct comm *c);
struct mgLevel_{
mgData data;
int nsmooth;
GenmapScalar sigma;
struct gs_data *J; // interpolation from level i to i+1
struct gs_data *Q; // global to local conversion of a vector
csr_mat M;
};
struct mgData_{
struct comm c;
genmap_handle h;
struct gs_data *top;
buffer bfr;
int nlevels;
mgLevel *levels;
uint *level_off;
GenmapScalar *y,*x,*b,*u,*rhs,*buf;
};
void mgSetup(GenmapComm c,csr_mat M,mgData *d);
void mgLevelSetup(mgData data,uint level);
void mgFree(mgData d);
int log2i(sint i);
typedef struct{
ulong r,c;
uint proc;
} csr_entry;
typedef struct{
ulong r,c,rn,cn;
uint p;
GenmapScalar v;
} entry;
#define GETLNG(p,i,off) (*((ulong*)((char*)(p)+(off)+(i)*sizeof(entry))))
#define GETPTR(p,i,off) ((char*)(p)+(off)+(i)*sizeof(entry))
void setOwner(char *ptr,sint n,size_t inOffset,size_t outOffset,
slong lelg,sint np);
void mg_vcycle(GenmapScalar *u,GenmapScalar *rhs,mgData d);
void mg_vcycle_lvl(GenmapScalar *u1,GenmapScalar *rhs,mgData d,
int lvl_start);
int flex_cg(genmap_handle h,GenmapComm c,mgData d,GenmapVector r,
int maxIter,int verbose,GenmapVector x);
int project_pf(genmap_handle h,GenmapComm c,mgData d,GenmapVector r,
int maxIter,int verbose,GenmapVector x);
int project_pf_lvl(genmap_handle h,GenmapComm c,mgData d,GenmapScalar *ri,
int maxIter,int verbose,int lvl_start,GenmapScalar *xo);
int rqi(genmap_handle h,GenmapComm c,mgData d,GenmapVector z,
int maxIter,int verbose,GenmapVector fiedler);
int fmg(genmap_handle h,GenmapComm c,mgData d,GenmapScalar *z,
int maxIter,int verbose,GenmapScalar *fiedler);
#endif
| 25.293478 | 74 | 0.739579 | [
"vector"
] |
a22bf2a588c712b53acd0e310645d3fa675ee6ea | 4,612 | h | C | src/necsim/file_system.h | thompsonsed/rcoalescence | 28947f39e2ed6a075642ae613ec2a415ac500d51 | [
"MIT"
] | 1 | 2021-03-03T07:20:27.000Z | 2021-03-03T07:20:27.000Z | src/necsim/file_system.h | thompsonsed/rcoalescence | 28947f39e2ed6a075642ae613ec2a415ac500d51 | [
"MIT"
] | 1 | 2021-06-23T00:07:20.000Z | 2021-06-25T09:11:33.000Z | src/necsim/file_system.h | thompsonsed/rcoalescence | 28947f39e2ed6a075642ae613ec2a415ac500d51 | [
"MIT"
] | null | null | null | // This file is part of necsim project which is released under MIT license.
// See file **LICENSE.txt** or visit https://opensource.org/licenses/MIT) for full license details.
/**
* @author Samuel Thompson
* @date 19/07/2017
* @file Filesystem.h
*
* @copyright <a href="https://opensource.org/licenses/MIT"> MIT Licence.</a>
* @brief Contains routines for checking files and folder exist, opening sqlite databases safely, with support for various
* virtual filesystems, and checking parents of a file exist.
*
* Contact: samuel.thompson14@imperial.ac.uk or thompsonsed@gmail.com
*/
#define _USE_MATH_DEFINES
#include <cmath>
#include <sqlite3.h>
#include <string>
#include <cstdio>
#include <iostream>
#include <vector>
#ifndef SPECIATIONCOUNTER_FILESYSTEM_H
#define SPECIATIONCOUNTER_FILESYSTEM_H
using std::string;
namespace necsim
{
/**
* @brief Safely opens a connection to the provided SQLite database.
*
* Adds type safety for usage on different filesystems.
* @param database_name
* @param database
*/
void openSQLiteDatabase(const string &database_name, sqlite3*&database);
/**
* @brief Checks that parent folder to the supplied file exists, and if it doesn't creates it.
* @param file the file path to check for
*/
void createParent(string file);
/**
* @brief Checks the existance of a file on the hard drive.
* @param testfile the file to examine
* @return should always return true, or raise an error (if the file doesn't exist)
*/
bool doesExist(string testfile);
/**
* @brief Checks for the existance of a file, but returns true if the file name is 'null'.
* Note: this function just calls doesExist().
* @param testfile the file to examine
* @return if true, file exists (or is null).
*/
bool doesExistNull(string testfile);
/**
* @brief Generates a unique ID for the pair of provided parameters.
*
* Maps ZxZ -> N, so only relevant for positive numbers.
For any A and B, generates C such that no D and E produce C unless D=A and B=E.
*@deprecated Should not be used for large integers, or of unknown size, as integer overflows are likely. Cantor pairing
* explodes in size of return value.
*
* @param x1 the first integer reference
* @param x2 the second integer reference
* @return a unique reference for the two provided integers
*/
unsigned long cantorPairing(const unsigned long &x1, const unsigned long &x2);
/**
* @brief A more elegant version of cantor pairing, which allows for storing of a greater number of digits without
* experiencing integer overflow issues.
*
* @note For scenarios when performance is critical, and the integers are known to be small, cantorPairing provides
* equivalent functionality.
*
* @param x1 the first integer
* @param x2
* @return
*/
unsigned long elegantPairing(const unsigned long &x1, const unsigned long &x2);
/**
* @brief Gets the next line from a csv filestream and splits the row into a vector of strings, where each string is the
* value from the csv file, delimited by a comma (i.e. each column of the row).
* @param str the input stream from the csv file.
* @return a vector where each element corresponds to the respective row from the csv.
*/
std::vector<string> getCsvLineAndSplitIntoTokens(std::istream &str);
/**
* @brief Overload the output operator for vectors
* @tparam T the template type of the vector
* @param os the output stream to write to
* @param v the vector to write out
* @return the modified output operator
*/
template<class T> std::ostream &operator<<(std::ostream &os, const std::vector<T> &v)
{
os << v.size() << ",";
for(const auto &item: v)
{
os << item << ",";
}
return os;
}
/**
* @brief Overloaded input stream operator for a vector of objects
* @tparam T the objects stored in a vector
* @param is the input stream
* @param v the vector of objects to read in to
* @return the input stream
*/
template<class T> std::istream &operator>>(std::istream &is, std::vector<T> &v)
{
char delim;
int n;
is >> n;
v.resize(n);
is >> delim;
for(unsigned long c = 0; c < static_cast<unsigned long>(n); c++)
{
is >> v[c];
is >> delim;
}
return is;
}
}
#endif //SPECIATIONCOUNTER_FILESYSTEM_H
| 33.664234 | 124 | 0.652212 | [
"vector"
] |
a235a25e0c4ef62f8b749847d2719dc08c6513ae | 3,975 | h | C | src/CANJagServer/CANJaguarServer.h | ZjlZ/FRC-2605-Robot-Code-2014 | 5903f2640edac30fc7cbbf18fe8eb8c70125eb69 | [
"BSD-2-Clause"
] | 1 | 2017-12-27T21:34:42.000Z | 2017-12-27T21:34:42.000Z | src/CANJagServer/CANJaguarServer.h | ZjlZ/FRC-2605-Robot-Code-2014 | 5903f2640edac30fc7cbbf18fe8eb8c70125eb69 | [
"BSD-2-Clause"
] | null | null | null | src/CANJagServer/CANJaguarServer.h | ZjlZ/FRC-2605-Robot-Code-2014 | 5903f2640edac30fc7cbbf18fe8eb8c70125eb69 | [
"BSD-2-Clause"
] | null | null | null | #ifndef SHS_2605_CANJAGUAR_SERVER_H
#define SHS_2605_CANJAGUAR_SERVER_H
/*
* Copyright (C) 2014 Liam Taylor
* FRC Team Sehome Semonsters 2605
*/
#include "WPILib.h"
#include "src/CANJaguarUtils/CANJaguarUtils.h"
#include "src/Util/Vector.h"
#include "src/Logging/Logger.h"
#define CANJAGSERVER_PARSE_TIMEOUT_DEFAULT 100
#define CANJAGSERVER_COMMAND_TIMEOUT_DEFAULT 200
#define CANJAGSERVER_CHECKINTERVAL_DEFAULT 0.1
#define CANJAGSERVER_CANBUS_UPDATEINTERVAL_DEFAULT 0.01
#define CANJAGSERVER_MESSAGEQUEUE_LENGTH 200
#define CANJAGSERVER_PRIORITY 10
#define CANJAGSERVER_STACKSIZE 0x40000
/*
* This "server" is essentially a motor control thread. It implements a continuous message loop for receiving commands to send to the jaguars.
* I initially implemented it as a way to keep CANJaguar objects in one thread context, due to problems with cross-thread usage not properly setting speeds.
*/
class CANJaguarServer
{
public:
CANJaguarServer ( bool DoBrownOutCHeck = true, double BrownOutCheckInterval = CANJAGSERVER_CHECKINTERVAL_DEFAULT, double CANBusUpdateInterval = CANJAGSERVER_CANBUS_UPDATEINTERVAL_DEFAULT, uint32_t CommandTimeout = CANJAGSERVER_COMMAND_TIMEOUT_DEFAULT, uint32_t ParseTimeout = CANJAGSERVER_PARSE_TIMEOUT_DEFAULT );
~CANJaguarServer ();
void SetParseMessageTimeout ( uint32_t ParseTimeout );
void SetCommandMessageTimeout ( uint32_t CommandTimeout );
void SetBrownOutCheckEnabled ( bool DoBrownOutCheck );
void SetJagCheckInterval ( double Interval );
void SetCANBusUpdateInterval ( double Interval );
bool Start ();
void Stop ();
bool WaitForServerActive ();
void IsRunning ();
void AddJag ( CAN_ID ID, CANJagConfigInfo Info );
void RemoveJag ( CAN_ID ID );
void DisableJag ( CAN_ID ID );
void EnableJag ( CAN_ID ID, double EncoderInitialPosition = 0.0 );
void ConfigJag ( CAN_ID, CANJagConfigInfo );
void SetJag ( CAN_ID ID, float Speed, uint8_t SyncGroup = 0 );
float GetJag ( CAN_ID ID );
float GetJagPosition ( CAN_ID ID );
float GetJagBusVoltage ( CAN_ID ID );
float GetJagOutputVoltage ( CAN_ID ID );
float GetJagOutputCurrent ( CAN_ID ID );
bool CheckSendError ();
void ClearSendError ();
void UpdateJagSyncGroup ( uint8_t SyncGroup );
void RunLoop ();
enum CANJagServerSendMessageType
{
SEND_MESSAGE_NOP = 0,
SEND_MESSAGE_JAG_DISABLE,
SEND_MESSAGE_JAG_ENABLE,
SEND_MESSAGE_JAG_GET,
SEND_MESSAGE_JAG_SET,
SEND_MESSAGE_JAG_ADD,
SEND_MESSAGE_JAG_REMOVE,
SEND_MESSAGE_JAG_CONFIG,
SEND_MESSAGE_JAG_UPDATE_SYNC_GROUP,
SEND_MESSAGE_JAG_GET_BUS_VOLTAGE,
SEND_MESSAGE_JAG_GET_OUTPUT_VOLTAGE,
SEND_MESSAGE_JAG_GET_OUTPUT_CURRENT,
SEND_MESSAGE_JAG_GET_POSITION,
SEND_MESSAGE_WAIT_SERVER_UP,
};
typedef struct CANJagServerMessage
{
uint32_t Command;
uint32_t Data;
} CanJagServerMessage;
typedef struct EnableCANJagMessage
{
CAN_ID ID;
double EncoderInitialPosition;
} EnableCANJagMessage;
typedef struct ServerCANJagInfo
{
CAN_ID ID;
CANJaguar * Jag;
CANJagConfigInfo Info;
} ServerCanJagInfo;
typedef struct SetCANJagMessage
{
CAN_ID ID;
float Speed;
uint8_t SyncGroup;
} SetJagMessage;
typedef struct ConfigCANJagMessage
{
CAN_ID ID;
CANJagConfigInfo Config;
} ConfigCANJagMessage;
typedef ConfigCANJagMessage AddCANJagMessage;
typedef struct GetCANJagMessage
{
CAN_ID ID;
float Value;
};
typedef GetCANJagMessage GetCANJagPositionMessage;
typedef GetCANJagMessage GetCANJagBusVoltageMessage;
typedef GetCANJagMessage GetCANJagOutputVoltageMessage;
typedef GetCANJagMessage GetCANJagOutputCurrentMessage;
private:
bool Running;
bool SendError;
Task * ServerTask;
MSG_Q_ID MessageSendQueue;
MSG_Q_ID MessageReceiveQueue;
SEM_ID ResponseSemaphore;
double CANUpdateInterval;
double JagCheckInterval;
bool CheckJags;
uint32_t ParseWait;
uint32_t CommandWait;
Vector <ServerCanJagInfo> * Jags;
static void _StartServerTask ( CANJaguarServer * Server );
Logger * Log;
};
#endif
| 22.206704 | 314 | 0.794717 | [
"vector"
] |
a23de61c80acec7a10ce3aa49a47113999ec0479 | 34,731 | h | C | src/frr/zebra/zebra_dplane.h | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/zebra/zebra_dplane.h | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/zebra/zebra_dplane.h | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | /*
* Zebra dataplane layer api interfaces.
* Copyright (c) 2018 Volta Networks, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _ZEBRA_DPLANE_H
#define _ZEBRA_DPLANE_H 1
#include "lib/zebra.h"
#include "lib/prefix.h"
#include "lib/nexthop.h"
#include "lib/nexthop_group.h"
#include "lib/queue.h"
#include "lib/vlan.h"
#include "zebra/zebra_ns.h"
#include "zebra/rib.h"
#include "zebra/zserv.h"
#include "zebra/zebra_mpls.h"
#include "zebra/zebra_nhg.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Key netlink info from zebra ns */
struct zebra_dplane_info {
ns_id_t ns_id;
#if defined(HAVE_NETLINK)
struct nlsock nls;
bool is_cmd;
#endif
};
/* Utility to fill in zns info from main zns struct */
static inline void
zebra_dplane_info_from_zns(struct zebra_dplane_info *zns_info,
const struct zebra_ns *zns, bool is_cmd)
{
zns_info->ns_id = zns->ns_id;
#if defined(HAVE_NETLINK)
zns_info->is_cmd = is_cmd;
if (is_cmd) {
zns_info->nls = zns->netlink_cmd;
} else {
zns_info->nls = zns->netlink;
}
#endif /* NETLINK */
}
/*
* Notify dplane when namespaces are enabled and disabled. The dplane
* needs to start and stop reading incoming events from the ns.
*/
void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled);
/*
* Result codes used when returning status back to the main zebra context.
*/
/*
* Philosophy Note:
*
* Flags being SET/UNSET do not belong in the South Bound
* Interface. This Setting belongs at the calling level
* because we can and will have multiple different interfaces
* and we will have potentially multiple different
* modules/filters to call. As such Setting/Unsetting
* success failure should be handled by the caller.
*/
enum zebra_dplane_status {
ZEBRA_DPLANE_STATUS_NONE = 0,
ZEBRA_DPLANE_INSTALL_SUCCESS,
ZEBRA_DPLANE_INSTALL_FAILURE,
ZEBRA_DPLANE_DELETE_SUCCESS,
ZEBRA_DPLANE_DELETE_FAILURE,
};
enum zebra_dplane_result {
ZEBRA_DPLANE_REQUEST_QUEUED,
ZEBRA_DPLANE_REQUEST_SUCCESS,
ZEBRA_DPLANE_REQUEST_FAILURE,
};
/*
* API between the zebra dataplane system and the main zebra processing
* context.
*/
/*
* Operations that the dataplane can process.
*/
enum dplane_op_e {
DPLANE_OP_NONE = 0,
/* Route update */
DPLANE_OP_ROUTE_INSTALL,
DPLANE_OP_ROUTE_UPDATE,
DPLANE_OP_ROUTE_DELETE,
DPLANE_OP_ROUTE_NOTIFY,
/* Nexthop update */
DPLANE_OP_NH_INSTALL,
DPLANE_OP_NH_UPDATE,
DPLANE_OP_NH_DELETE,
/* LSP update */
DPLANE_OP_LSP_INSTALL,
DPLANE_OP_LSP_UPDATE,
DPLANE_OP_LSP_DELETE,
DPLANE_OP_LSP_NOTIFY,
/* Pseudowire update */
DPLANE_OP_PW_INSTALL,
DPLANE_OP_PW_UNINSTALL,
/* System route notification */
DPLANE_OP_SYS_ROUTE_ADD,
DPLANE_OP_SYS_ROUTE_DELETE,
/* Interface address update */
DPLANE_OP_ADDR_INSTALL,
DPLANE_OP_ADDR_UNINSTALL,
/* MAC address update */
DPLANE_OP_MAC_INSTALL,
DPLANE_OP_MAC_DELETE,
/* EVPN neighbor updates */
DPLANE_OP_NEIGH_INSTALL,
DPLANE_OP_NEIGH_UPDATE,
DPLANE_OP_NEIGH_DELETE,
/* EVPN VTEP updates */
DPLANE_OP_VTEP_ADD,
DPLANE_OP_VTEP_DELETE,
/* Policy based routing rule update */
DPLANE_OP_RULE_ADD,
DPLANE_OP_RULE_DELETE,
DPLANE_OP_RULE_UPDATE,
/* Link layer address discovery */
DPLANE_OP_NEIGH_DISCOVER,
/* bridge port update */
DPLANE_OP_BR_PORT_UPDATE,
/* Policy based routing iptable update */
DPLANE_OP_IPTABLE_ADD,
DPLANE_OP_IPTABLE_DELETE,
/* Policy based routing ipset update */
DPLANE_OP_IPSET_ADD,
DPLANE_OP_IPSET_DELETE,
DPLANE_OP_IPSET_ENTRY_ADD,
DPLANE_OP_IPSET_ENTRY_DELETE,
/* LINK LAYER IP address update */
DPLANE_OP_NEIGH_IP_INSTALL,
DPLANE_OP_NEIGH_IP_DELETE,
DPLANE_OP_NEIGH_TABLE_UPDATE,
DPLANE_OP_GRE_SET,
/* Incoming interface address events */
DPLANE_OP_INTF_ADDR_ADD,
DPLANE_OP_INTF_ADDR_DEL,
};
/*
* The vxlan/evpn neighbor management code needs some values to use
* when programming neighbor changes. Offer some platform-neutral values
* here for use within the dplane apis and plugins.
*/
/* Neighbor cache flags */
#define DPLANE_NTF_EXT_LEARNED 0x01
#define DPLANE_NTF_ROUTER 0x02
#define DPLANE_NTF_USE 0x04
/* Neighbor cache states */
#define DPLANE_NUD_REACHABLE 0x01
#define DPLANE_NUD_STALE 0x02
#define DPLANE_NUD_NOARP 0x04
#define DPLANE_NUD_PROBE 0x08
#define DPLANE_NUD_INCOMPLETE 0x10
#define DPLANE_NUD_PERMANENT 0x20
#define DPLANE_NUD_FAILED 0x40
/* MAC update flags - dplane_mac_info.update_flags */
#define DPLANE_MAC_REMOTE (1 << 0)
#define DPLANE_MAC_WAS_STATIC (1 << 1)
#define DPLANE_MAC_SET_STATIC (1 << 2)
#define DPLANE_MAC_SET_INACTIVE (1 << 3)
/* Neigh update flags - dplane_neigh_info.update_flags */
#define DPLANE_NEIGH_REMOTE (1 << 0)
#define DPLANE_NEIGH_WAS_STATIC (1 << 1)
#define DPLANE_NEIGH_SET_STATIC (1 << 2)
#define DPLANE_NEIGH_SET_INACTIVE (1 << 3)
#define DPLANE_NEIGH_NO_EXTENSION (1 << 4)
#define DPLANE_BR_PORT_NON_DF (1 << 0)
/* Enable system route notifications */
void dplane_enable_sys_route_notifs(void);
/*
* The dataplane context struct is used to exchange info between the main zebra
* context and the dataplane module(s). If these are two independent pthreads,
* they cannot share existing global data structures safely.
*/
/* Define a tailq list type for context blocks. The list is exposed/public,
* but the internal linkage in the context struct is private, so there
* are accessor apis that support enqueue and dequeue.
*/
TAILQ_HEAD(dplane_ctx_q, zebra_dplane_ctx);
/* Declare a type for (optional) extended interface info objects. */
TAILQ_HEAD(dplane_intf_extra_q, dplane_intf_extra);
/* Allocate a context object */
struct zebra_dplane_ctx *dplane_ctx_alloc(void);
/*
* Reset an allocated context object for re-use. All internal allocations are
* freed.
*/
void dplane_ctx_reset(struct zebra_dplane_ctx *ctx);
/*
* Allow zebra code to walk the queue of pending contexts, evaluate each one
* using a callback function. The caller can supply an optional void* arg also.
* If the function returns 'true', the context will be dequeued and freed
* without being processed.
*/
int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
void *arg), void *val);
/* Return a dataplane results context block after use; the caller's pointer will
* be cleared.
*/
void dplane_ctx_fini(struct zebra_dplane_ctx **pctx);
/* Enqueue a context block to caller's tailq. This exists so that the
* context struct can remain opaque.
*/
void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
const struct zebra_dplane_ctx *ctx);
/* Append a list of context blocks to another list - again, just keeping
* the context struct opaque.
*/
void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
struct dplane_ctx_q *from_list);
/* Dequeue a context block from the head of caller's tailq */
struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q);
/*
* Accessors for information from the context object
*/
enum zebra_dplane_result dplane_ctx_get_status(
const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
enum zebra_dplane_result status);
const char *dplane_res2str(enum zebra_dplane_result res);
enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op);
const char *dplane_op2str(enum dplane_op_e op);
const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
const struct prefix *dest);
const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_ifname(struct zebra_dplane_ctx *ctx, const char *ifname);
ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_ifindex(struct zebra_dplane_ctx *ctx, ifindex_t ifindex);
/* Retrieve last/current provider id */
uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx);
/* Providers running before the kernel can control whether a kernel
* update should be done.
*/
void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx);
bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx);
/* Source prefix is a little special - use convention to return NULL
* to mean "no src prefix"
*/
const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src);
bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf);
vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx);
/* In some paths we have only a namespace id */
void dplane_ctx_set_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t nsid);
ns_id_t dplane_ctx_get_ns_id(const struct zebra_dplane_ctx *ctx);
bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
uint32_t id);
uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx);
/* Accessors for route update information */
void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type);
int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi);
afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi);
safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table);
uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx);
route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag);
route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx);
uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance);
uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance);
uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh);
void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx,
const struct nexthop_group *nhg);
uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_ng(
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_old_ng(
const struct zebra_dplane_ctx *ctx);
/* Optional extra info about interfaces in nexthops - a plugin must enable
* this extra info.
*/
const struct dplane_intf_extra *
dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx);
const struct dplane_intf_extra *
dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
const struct dplane_intf_extra *ptr);
vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr);
uint32_t dplane_intf_extra_get_ifindex(const struct dplane_intf_extra *ptr);
uint32_t dplane_intf_extra_get_flags(const struct dplane_intf_extra *ptr);
uint32_t dplane_intf_extra_get_status(const struct dplane_intf_extra *ptr);
/* Backup nexthop information (list of nexthops) if present. */
const struct nexthop_group *
dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *
dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx);
/* Accessors for nexthop information */
uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_nhe_id(const struct zebra_dplane_ctx *ctx);
afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx);
vrf_id_t dplane_ctx_get_nhe_vrf_id(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_nhe_type(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *
dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx);
const struct nh_grp *
dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx);
/* Accessors for LSP information */
/* Init the internal LSP data struct - necessary before adding to it.
* If 'lsp' is non-NULL, info will be copied from it to the internal
* context data area.
*/
int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct zebra_lsp *lsp);
mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx,
mpls_label_t label);
uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
uint8_t family);
uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
uint32_t flags);
const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
const struct zebra_dplane_ctx *ctx);
const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
const struct zebra_dplane_ctx *ctx);
struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
enum lsp_types_t lsp_type,
enum nexthop_types_t nh_type,
const union g_addr *gate,
ifindex_t ifindex, uint8_t num_labels,
mpls_label_t *out_labels);
struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
enum nexthop_types_t nh_type, const union g_addr *gate,
ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels);
const struct zebra_nhlfe *
dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx);
const struct zebra_nhlfe *
dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
struct zebra_nhlfe *nhlfe);
uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx);
/* Accessors for pseudowire information */
mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx);
mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_pw_status(struct zebra_dplane_ctx *ctx, int status);
const union g_addr *dplane_ctx_get_pw_dest(
const struct zebra_dplane_ctx *ctx);
const union pw_protocol_fields *dplane_ctx_get_pw_proto(
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_pw_nhg(
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *
dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *
dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx);
/* Accessors for interface information */
uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric);
/* Is interface addr p2p? */
bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx);
bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_intf_set_secondary(struct zebra_dplane_ctx *ctx);
bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_intf_set_broadcast(struct zebra_dplane_ctx *ctx);
const struct prefix *dplane_ctx_get_intf_addr(
const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_intf_addr(struct zebra_dplane_ctx *ctx,
const struct prefix *p);
bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx);
const struct prefix *dplane_ctx_get_intf_dest(
const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_intf_dest(struct zebra_dplane_ctx *ctx,
const struct prefix *p);
bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx);
const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_intf_label(struct zebra_dplane_ctx *ctx, const char *label);
/* Accessors for MAC information */
vlanid_t dplane_ctx_mac_get_vlan(const struct zebra_dplane_ctx *ctx);
bool dplane_ctx_mac_is_sticky(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_mac_get_update_flags(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_mac_get_nhg_id(const struct zebra_dplane_ctx *ctx);
const struct ethaddr *dplane_ctx_mac_get_addr(
const struct zebra_dplane_ctx *ctx);
const struct in_addr *dplane_ctx_mac_get_vtep_ip(
const struct zebra_dplane_ctx *ctx);
ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx);
/* Accessors for neighbor information */
const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
const struct zebra_dplane_ctx *ctx);
const struct ethaddr *dplane_ctx_neigh_get_mac(
const struct zebra_dplane_ctx *ctx);
const struct ipaddr *
dplane_ctx_neigh_get_link_ip(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx);
uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_neigh_get_update_flags(const struct zebra_dplane_ctx *ctx);
/* Accessors for policy based routing rule information */
int dplane_ctx_rule_get_sock(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_rule_get_unique(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_rule_get_seq(const struct zebra_dplane_ctx *ctx);
const char *dplane_ctx_rule_get_ifname(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_priority(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_old_priority(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_table(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_old_table(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_filter_bm(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_old_filter_bm(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx);
const struct prefix *
dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx);
const struct prefix *
dplane_ctx_rule_get_old_src_ip(const struct zebra_dplane_ctx *ctx);
const struct prefix *
dplane_ctx_rule_get_dst_ip(const struct zebra_dplane_ctx *ctx);
const struct prefix *
dplane_ctx_rule_get_old_dst_ip(const struct zebra_dplane_ctx *ctx);
/* Accessors for policy based routing iptable information */
struct zebra_pbr_iptable;
bool
dplane_ctx_get_pbr_iptable(const struct zebra_dplane_ctx *ctx,
struct zebra_pbr_iptable *table);
struct zebra_pbr_ipset;
bool
dplane_ctx_get_pbr_ipset(const struct zebra_dplane_ctx *ctx,
struct zebra_pbr_ipset *ipset);
struct zebra_pbr_ipset_entry;
bool
dplane_ctx_get_pbr_ipset_entry(const struct zebra_dplane_ctx *ctx,
struct zebra_pbr_ipset_entry *entry);
/* Accessors for bridge port information */
uint32_t dplane_ctx_get_br_port_flags(const struct zebra_dplane_ctx *ctx);
uint32_t
dplane_ctx_get_br_port_sph_filter_cnt(const struct zebra_dplane_ctx *ctx);
const struct in_addr *
dplane_ctx_get_br_port_sph_filters(const struct zebra_dplane_ctx *ctx);
uint32_t
dplane_ctx_get_br_port_backup_nhg_id(const struct zebra_dplane_ctx *ctx);
/* Accessors for neighbor table information */
uint8_t dplane_ctx_neightable_get_family(const struct zebra_dplane_ctx *ctx);
uint32_t
dplane_ctx_neightable_get_app_probes(const struct zebra_dplane_ctx *ctx);
uint32_t
dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx);
uint32_t
dplane_ctx_neightable_get_ucast_probes(const struct zebra_dplane_ctx *ctx);
/* Accessor for GRE set */
uint32_t
dplane_ctx_gre_get_link_ifindex(const struct zebra_dplane_ctx *ctx);
unsigned int
dplane_ctx_gre_get_mtu(const struct zebra_dplane_ctx *ctx);
const struct zebra_l2info_gre *
dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx);
/* Namespace info - esp. for netlink communication */
const struct zebra_dplane_info *dplane_ctx_get_ns(
const struct zebra_dplane_ctx *ctx);
/* Indicates zebra shutdown/exit is in progress. Some operations may be
* simplified or skipped during shutdown processing.
*/
bool dplane_is_in_shutdown(void);
/*
* Enqueue route change operations for the dataplane.
*/
enum zebra_dplane_result dplane_route_add(struct route_node *rn,
struct route_entry *re);
enum zebra_dplane_result dplane_route_update(struct route_node *rn,
struct route_entry *re,
struct route_entry *old_re);
enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
struct route_entry *re);
/* Notify the dplane when system/connected routes change */
enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
struct route_entry *re);
enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
struct route_entry *re);
/* Update from an async notification, to bring other fibs up-to-date */
enum zebra_dplane_result dplane_route_notif_update(
struct route_node *rn,
struct route_entry *re,
enum dplane_op_e op,
struct zebra_dplane_ctx *ctx);
/*
* Enqueue bridge port changes for the dataplane.
*/
enum zebra_dplane_result dplane_br_port_update(
const struct interface *ifp, bool non_df, uint32_t sph_filter_cnt,
const struct in_addr *sph_filters, uint32_t backup_nhg_id);
/* Forward ref of nhg_hash_entry */
struct nhg_hash_entry;
/*
* Enqueue a nexthop change operation for the dataplane.
*/
enum zebra_dplane_result dplane_nexthop_add(struct nhg_hash_entry *nhe);
enum zebra_dplane_result dplane_nexthop_update(struct nhg_hash_entry *nhe);
enum zebra_dplane_result dplane_nexthop_delete(struct nhg_hash_entry *nhe);
/*
* Enqueue LSP change operations for the dataplane.
*/
enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp);
enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp);
enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp);
/* Update or un-install resulting from an async notification */
enum zebra_dplane_result dplane_lsp_notif_update(struct zebra_lsp *lsp,
enum dplane_op_e op,
struct zebra_dplane_ctx *ctx);
/*
* Enqueue pseudowire operations for the dataplane.
*/
enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw);
enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw);
/*
* Enqueue interface address changes for the dataplane.
*/
enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
const struct connected *ifc);
enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
const struct connected *ifc);
/*
* Link layer operations for the dataplane.
*/
enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
const struct interface *ifp,
struct ipaddr *link_ip,
struct ipaddr *ip,
uint32_t ndm_state,
int protocol);
/*
* Enqueue evpn mac operations for the dataplane.
*/
enum zebra_dplane_result dplane_rem_mac_add(const struct interface *ifp,
const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip,
bool sticky,
uint32_t nhg_id,
bool was_static);
enum zebra_dplane_result dplane_local_mac_add(const struct interface *ifp,
const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
bool sticky,
uint32_t set_static,
uint32_t set_inactive);
enum zebra_dplane_result
dplane_local_mac_del(const struct interface *ifp,
const struct interface *bridge_ifp, vlanid_t vid,
const struct ethaddr *mac);
enum zebra_dplane_result dplane_rem_mac_del(const struct interface *ifp,
const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip);
/* Helper api to init an empty or new context for a MAC update */
void dplane_mac_init(struct zebra_dplane_ctx *ctx,
const struct interface *ifp,
const struct interface *br_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip,
bool sticky,
uint32_t nhg_id, uint32_t update_flags);
/*
* Enqueue evpn neighbor updates for the dataplane.
*/
enum zebra_dplane_result dplane_rem_neigh_add(const struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *mac,
uint32_t flags, bool was_static);
enum zebra_dplane_result dplane_local_neigh_add(const struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *mac,
bool set_router, bool set_static,
bool set_inactive);
enum zebra_dplane_result dplane_rem_neigh_delete(const struct interface *ifp,
const struct ipaddr *ip);
/*
* Enqueue evpn VTEP updates for the dataplane.
*/
enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
const struct in_addr *ip,
vni_t vni);
enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
const struct in_addr *ip,
vni_t vni);
/*
* Enqueue a neighbour discovery request for the dataplane.
*/
enum zebra_dplane_result dplane_neigh_discover(const struct interface *ifp,
const struct ipaddr *ip);
/*
* Enqueue a neighbor table parameter set
*/
enum zebra_dplane_result dplane_neigh_table_update(const struct interface *ifp,
const uint8_t family,
const uint32_t app_probes,
const uint32_t ucast_probes,
const uint32_t mcast_probes);
/*
* Enqueue a GRE set
*/
enum zebra_dplane_result
dplane_gre_set(struct interface *ifp, struct interface *ifp_link,
unsigned int mtu, const struct zebra_l2info_gre *gre_info);
/* Forward ref of zebra_pbr_rule */
struct zebra_pbr_rule;
/*
* Enqueue policy based routing rule for the dataplane.
* It is possible that the user-defined sequence number and the one in the
* forwarding plane may not coincide, hence the API requires a separate
* rule priority - maps to preference/FRA_PRIORITY on Linux.
*/
enum zebra_dplane_result dplane_pbr_rule_add(struct zebra_pbr_rule *rule);
enum zebra_dplane_result dplane_pbr_rule_delete(struct zebra_pbr_rule *rule);
enum zebra_dplane_result
dplane_pbr_rule_update(struct zebra_pbr_rule *old_rule,
struct zebra_pbr_rule *new_rule);
/* iptable */
enum zebra_dplane_result
dplane_pbr_iptable_add(struct zebra_pbr_iptable *iptable);
enum zebra_dplane_result
dplane_pbr_iptable_delete(struct zebra_pbr_iptable *iptable);
/* ipset */
struct zebra_pbr_ipset;
enum zebra_dplane_result dplane_pbr_ipset_add(struct zebra_pbr_ipset *ipset);
enum zebra_dplane_result dplane_pbr_ipset_delete(struct zebra_pbr_ipset *ipset);
/* ipset entry */
struct zebra_pbr_ipset_entry;
enum zebra_dplane_result
dplane_pbr_ipset_entry_add(struct zebra_pbr_ipset_entry *ipset);
enum zebra_dplane_result
dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset);
/* Encode route information into data plane context. */
int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct route_node *rn, struct route_entry *re);
/* Encode next hop information into data plane context. */
int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct nhg_hash_entry *nhe);
/* Retrieve the limit on the number of pending, unprocessed updates. */
uint32_t dplane_get_in_queue_limit(void);
/* Configure limit on the number of pending, queued updates. If 'unset', reset
* to default value.
*/
void dplane_set_in_queue_limit(uint32_t limit, bool set);
/* Retrieve the current queue depth of incoming, unprocessed updates */
uint32_t dplane_get_in_queue_len(void);
/*
* Vty/cli apis
*/
int dplane_show_helper(struct vty *vty, bool detailed);
int dplane_show_provs_helper(struct vty *vty, bool detailed);
int dplane_config_write_helper(struct vty *vty);
/*
* Dataplane providers: modules that process or consume dataplane events.
*/
struct zebra_dplane_provider;
/* Support string name for a dataplane provider */
#define DPLANE_PROVIDER_NAMELEN 64
/* Priority or ordering values for providers. The idea is that there may be
* some pre-processing, followed by an external or remote dataplane,
* followed by the kernel, followed by some post-processing step (such as
* the fpm output stream.)
*/
enum dplane_provider_prio {
DPLANE_PRIO_NONE = 0,
DPLANE_PRIO_PREPROCESS,
DPLANE_PRIO_PRE_KERNEL,
DPLANE_PRIO_KERNEL,
DPLANE_PRIO_POSTPROCESS,
DPLANE_PRIO_LAST
};
/* Flags values used during provider registration. */
#define DPLANE_PROV_FLAGS_DEFAULT 0x0
/* Provider will be spawning its own worker thread */
#define DPLANE_PROV_FLAG_THREADED 0x1
/* Provider registration: ordering or priority value, callbacks, and optional
* opaque data value. If 'prov_p', return the newly-allocated provider object
* on success.
*/
/* Providers offer an entry-point for incoming work, called in the context of
* the dataplane pthread. The dataplane pthread enqueues any new work to the
* provider's 'inbound' queue, then calls the callback. The dataplane
* then checks the provider's outbound queue for completed work.
*/
/*
* Providers can offer a 'start' callback; if present, the dataplane will
* call it when it is starting - when its pthread and event-scheduling
* thread_master are available.
*/
/* Providers can offer an entry-point for shutdown and cleanup. This is called
* with 'early' during shutdown, to indicate that the dataplane subsystem
* is allowing work to move through the providers and finish.
* When called without 'early', the provider should release
* all resources (if it has any allocated).
*/
int dplane_provider_register(const char *name,
enum dplane_provider_prio prio,
int flags,
int (*start_fp)(struct zebra_dplane_provider *),
int (*fp)(struct zebra_dplane_provider *),
int (*fini_fp)(struct zebra_dplane_provider *,
bool early),
void *data,
struct zebra_dplane_provider **prov_p);
/* Accessors for provider attributes */
const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov);
uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov);
void *dplane_provider_get_data(const struct zebra_dplane_provider *prov);
bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov);
/* Lock/unlock a provider's mutex - iff the provider was registered with
* the THREADED flag.
*/
void dplane_provider_lock(struct zebra_dplane_provider *prov);
void dplane_provider_unlock(struct zebra_dplane_provider *prov);
/* Obtain thread_master for dataplane thread */
struct thread_master *dplane_get_thread_master(void);
/* Providers should (generally) limit number of updates per work cycle */
int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov);
/* Provider api to signal that work/events are available
* for the dataplane pthread.
*/
int dplane_provider_work_ready(void);
/* Dequeue, maintain associated counter and locking */
struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
struct zebra_dplane_provider *prov);
/* Dequeue work to a list, maintain counter and locking, return count */
int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
struct dplane_ctx_q *listp);
/* Current completed work queue length */
uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov);
/* Enqueue completed work, maintain associated counter and locking */
void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
struct zebra_dplane_ctx *ctx);
/* Enqueue a context directly to zebra main. */
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx);
/* Enable collection of extra info about interfaces in route updates;
* this allows a provider/plugin to see some extra info in route update
* context objects.
*/
void dplane_enable_intf_extra_info(void);
/*
* Initialize the dataplane modules at zebra startup. This is currently called
* by the rib module. Zebra registers a results callback with the dataplane.
* The callback is called in the dataplane pthread context,
* so the expectation is that the contexts are queued for the zebra
* main pthread.
*/
void zebra_dplane_init(int (*) (struct dplane_ctx_q *));
/*
* Start the dataplane pthread. This step needs to be run later than the
* 'init' step, in case zebra has fork-ed.
*/
void zebra_dplane_start(void);
/* Finalize/cleanup apis, one called early as shutdown is starting,
* one called late at the end of zebra shutdown, and then one called
* from the zebra main pthread to stop the dplane pthread and
* free all resources.
*
* Zebra expects to try to clean up all vrfs and all routes during
* shutdown, so the dplane must be available until very late.
*/
void zebra_dplane_pre_finish(void);
void zebra_dplane_finish(void);
void zebra_dplane_shutdown(void);
#ifdef __cplusplus
}
#endif
#endif /* _ZEBRA_DPLANE_H */
| 36.947872 | 80 | 0.795543 | [
"object"
] |
9e04117e347195f250cd4424086fcd9a33a3f6de | 2,311 | h | C | aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/GetBucketOwnershipControlsResult.h | blinemedical/aws-sdk-cpp | c7c814b2d6862b4cb48f3fb3ac083a9e419674e8 | [
"Apache-2.0"
] | 4 | 2021-04-16T14:23:06.000Z | 2022-03-02T11:17:13.000Z | aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/GetBucketOwnershipControlsResult.h | blinemedical/aws-sdk-cpp | c7c814b2d6862b4cb48f3fb3ac083a9e419674e8 | [
"Apache-2.0"
] | 3 | 2021-04-21T07:20:21.000Z | 2021-06-15T10:06:27.000Z | aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/GetBucketOwnershipControlsResult.h | blinemedical/aws-sdk-cpp | c7c814b2d6862b4cb48f3fb3ac083a9e419674e8 | [
"Apache-2.0"
] | 1 | 2021-05-22T00:09:27.000Z | 2021-05-22T00:09:27.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/s3-crt/S3Crt_EXPORTS.h>
#include <aws/s3-crt/model/OwnershipControls.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Xml
{
class XmlDocument;
} // namespace Xml
} // namespace Utils
namespace S3Crt
{
namespace Model
{
class AWS_S3CRT_API GetBucketOwnershipControlsResult
{
public:
GetBucketOwnershipControlsResult();
GetBucketOwnershipControlsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
GetBucketOwnershipControlsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
* <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
* currently in effect for this Amazon S3 bucket.</p>
*/
inline const OwnershipControls& GetOwnershipControls() const{ return m_ownershipControls; }
/**
* <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
* currently in effect for this Amazon S3 bucket.</p>
*/
inline void SetOwnershipControls(const OwnershipControls& value) { m_ownershipControls = value; }
/**
* <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
* currently in effect for this Amazon S3 bucket.</p>
*/
inline void SetOwnershipControls(OwnershipControls&& value) { m_ownershipControls = std::move(value); }
/**
* <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
* currently in effect for this Amazon S3 bucket.</p>
*/
inline GetBucketOwnershipControlsResult& WithOwnershipControls(const OwnershipControls& value) { SetOwnershipControls(value); return *this;}
/**
* <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
* currently in effect for this Amazon S3 bucket.</p>
*/
inline GetBucketOwnershipControlsResult& WithOwnershipControls(OwnershipControls&& value) { SetOwnershipControls(std::move(value)); return *this;}
private:
OwnershipControls m_ownershipControls;
};
} // namespace Model
} // namespace S3Crt
} // namespace Aws
| 31.657534 | 150 | 0.731285 | [
"model"
] |
9e045d323d51ae817b020349ee4372c3d7d163df | 3,756 | h | C | LAB 6/00254915Laboratorio6/c++/classes.h | lopezosv/SIMU | 4004eb67fe2e8191ae8851e33f3f917dc82fb40e | [
"Apache-2.0"
] | null | null | null | LAB 6/00254915Laboratorio6/c++/classes.h | lopezosv/SIMU | 4004eb67fe2e8191ae8851e33f3f917dc82fb40e | [
"Apache-2.0"
] | null | null | null | LAB 6/00254915Laboratorio6/c++/classes.h | lopezosv/SIMU | 4004eb67fe2e8191ae8851e33f3f917dc82fb40e | [
"Apache-2.0"
] | null | null | null | enum lines {NOLINE,SINGLELINE,DOUBLELINE};
enum modes {NOMODE,INT_FLOAT,INT_FLOAT_FLOAT,INT_INT_INT_INT};
enum parameters {CONS_A,CONS_E,CONS_F};
enum sizes {NODES,ELEMENTS,DIRICHLET,NEUMANN};
class item{
protected:
int id;
float x;
float y;
int node1;
int node2;
int node3;
float value;
public:
void setId(int identifier) {
id = identifier;
}
void setX(float x_coord) {
x = x_coord;
}
void setY(float y_coord) {
y = y_coord;
}
void setNode1(int node_1) {
node1 = node_1;
}
void setNode2(int node_2) {
node2 = node_2;
}
void setNode3(int node_3) {
node3 = node_3;
}
void setValue(float value_to_assign) {
value = value_to_assign;
}
int getId() {
return id;
}
float getX() {
return x;
}
float getY() {
return y;
}
int getNode1() {
return node1;
}
int getNode2() {
return node2;
}
int getNode3() {
return node3;
}
float getValue() {
return value;
}
virtual void setValues(int a,float b,float c,int d,int e,int f,float g)=0;
};
class node: public item{
public:
void setValues(int a,float b,float c,int d,int e,int f,float g){
id = a;
x = b;
y = c;
}
};
class element: public item{
public:
void setValues(int a,float b,float c,int d,int e,int f,float g){
id = a;
node1 = d;
node2 = e;
node3 = f;
}
};
class condition: public item{
public:
void setValues(int a,float b,float c,int d,int e,int f,float g){
node1 = d;
value = g;
}
};
class mesh{
float parameters[2];
int sizes[4];
node *node_list;
element *element_list;
int *indices_dirich;
condition *dirichlet_list;
condition *neumann_list;
public:
void setParameters(float a,float e, float f){
parameters[CONS_A]=a;
parameters[CONS_E]=e;
parameters[CONS_F]=f;
}
void setSizes(int nnodes,int neltos,int ndirich,int nneu){
sizes[NODES] = nnodes;
sizes[ELEMENTS] = neltos;
sizes[DIRICHLET] = ndirich;
sizes[NEUMANN] = nneu;
}
int getSize(int s){
return sizes[s];
}
float getParameter(int p){
return parameters[p];
}
void createData(){
node_list = new node[sizes[NODES]];
element_list = new element[sizes[ELEMENTS]];
indices_dirich = new int[DIRICHLET];
dirichlet_list = new condition[sizes[DIRICHLET]];
neumann_list = new condition[sizes[NEUMANN]];
}
node* getNodes(){
return node_list;
}
element* getElements(){
return element_list;
}
int* getDirichletIndices(){
return indices_dirich;
}
condition* getDirichlet(){
return dirichlet_list;
}
condition* getNeumann(){
return neumann_list;
}
node getNode(int i){
return node_list[i];
}
element getElement(int i){
return element_list[i];
}
condition getCondition(int i, int type){
if(type == DIRICHLET) return dirichlet_list[i];
else return neumann_list[i];
}
};
| 22.224852 | 82 | 0.494143 | [
"mesh"
] |
9e06c8024cbcda39c1459401b9cd3902f0cc00d5 | 194,711 | h | C | aws-cpp-sdk-forecast/include/aws/forecast/ForecastServiceClient.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2022-02-12T08:09:30.000Z | 2022-02-12T08:09:30.000Z | aws-cpp-sdk-forecast/include/aws/forecast/ForecastServiceClient.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2021-10-14T16:57:00.000Z | 2021-10-18T10:47:24.000Z | aws-cpp-sdk-forecast/include/aws/forecast/ForecastServiceClient.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-12-30T04:25:33.000Z | 2021-12-30T04:25:33.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/forecast/ForecastService_EXPORTS.h>
#include <aws/forecast/ForecastServiceErrors.h>
#include <aws/core/client/AWSError.h>
#include <aws/core/client/ClientConfiguration.h>
#include <aws/core/client/AWSClient.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/forecast/model/CreateAutoPredictorResult.h>
#include <aws/forecast/model/CreateDatasetResult.h>
#include <aws/forecast/model/CreateDatasetGroupResult.h>
#include <aws/forecast/model/CreateDatasetImportJobResult.h>
#include <aws/forecast/model/CreateExplainabilityResult.h>
#include <aws/forecast/model/CreateExplainabilityExportResult.h>
#include <aws/forecast/model/CreateForecastResult.h>
#include <aws/forecast/model/CreateForecastExportJobResult.h>
#include <aws/forecast/model/CreatePredictorResult.h>
#include <aws/forecast/model/CreatePredictorBacktestExportJobResult.h>
#include <aws/forecast/model/DescribeAutoPredictorResult.h>
#include <aws/forecast/model/DescribeDatasetResult.h>
#include <aws/forecast/model/DescribeDatasetGroupResult.h>
#include <aws/forecast/model/DescribeDatasetImportJobResult.h>
#include <aws/forecast/model/DescribeExplainabilityResult.h>
#include <aws/forecast/model/DescribeExplainabilityExportResult.h>
#include <aws/forecast/model/DescribeForecastResult.h>
#include <aws/forecast/model/DescribeForecastExportJobResult.h>
#include <aws/forecast/model/DescribePredictorResult.h>
#include <aws/forecast/model/DescribePredictorBacktestExportJobResult.h>
#include <aws/forecast/model/GetAccuracyMetricsResult.h>
#include <aws/forecast/model/ListDatasetGroupsResult.h>
#include <aws/forecast/model/ListDatasetImportJobsResult.h>
#include <aws/forecast/model/ListDatasetsResult.h>
#include <aws/forecast/model/ListExplainabilitiesResult.h>
#include <aws/forecast/model/ListExplainabilityExportsResult.h>
#include <aws/forecast/model/ListForecastExportJobsResult.h>
#include <aws/forecast/model/ListForecastsResult.h>
#include <aws/forecast/model/ListPredictorBacktestExportJobsResult.h>
#include <aws/forecast/model/ListPredictorsResult.h>
#include <aws/forecast/model/ListTagsForResourceResult.h>
#include <aws/forecast/model/TagResourceResult.h>
#include <aws/forecast/model/UntagResourceResult.h>
#include <aws/forecast/model/UpdateDatasetGroupResult.h>
#include <aws/core/NoResult.h>
#include <aws/core/client/AsyncCallerContext.h>
#include <aws/core/http/HttpTypes.h>
#include <future>
#include <functional>
namespace Aws
{
namespace Http
{
class HttpClient;
class HttpClientFactory;
} // namespace Http
namespace Utils
{
template< typename R, typename E> class Outcome;
namespace Threading
{
class Executor;
} // namespace Threading
} // namespace Utils
namespace Auth
{
class AWSCredentials;
class AWSCredentialsProvider;
} // namespace Auth
namespace Client
{
class RetryStrategy;
} // namespace Client
namespace ForecastService
{
namespace Model
{
class CreateAutoPredictorRequest;
class CreateDatasetRequest;
class CreateDatasetGroupRequest;
class CreateDatasetImportJobRequest;
class CreateExplainabilityRequest;
class CreateExplainabilityExportRequest;
class CreateForecastRequest;
class CreateForecastExportJobRequest;
class CreatePredictorRequest;
class CreatePredictorBacktestExportJobRequest;
class DeleteDatasetRequest;
class DeleteDatasetGroupRequest;
class DeleteDatasetImportJobRequest;
class DeleteExplainabilityRequest;
class DeleteExplainabilityExportRequest;
class DeleteForecastRequest;
class DeleteForecastExportJobRequest;
class DeletePredictorRequest;
class DeletePredictorBacktestExportJobRequest;
class DeleteResourceTreeRequest;
class DescribeAutoPredictorRequest;
class DescribeDatasetRequest;
class DescribeDatasetGroupRequest;
class DescribeDatasetImportJobRequest;
class DescribeExplainabilityRequest;
class DescribeExplainabilityExportRequest;
class DescribeForecastRequest;
class DescribeForecastExportJobRequest;
class DescribePredictorRequest;
class DescribePredictorBacktestExportJobRequest;
class GetAccuracyMetricsRequest;
class ListDatasetGroupsRequest;
class ListDatasetImportJobsRequest;
class ListDatasetsRequest;
class ListExplainabilitiesRequest;
class ListExplainabilityExportsRequest;
class ListForecastExportJobsRequest;
class ListForecastsRequest;
class ListPredictorBacktestExportJobsRequest;
class ListPredictorsRequest;
class ListTagsForResourceRequest;
class StopResourceRequest;
class TagResourceRequest;
class UntagResourceRequest;
class UpdateDatasetGroupRequest;
typedef Aws::Utils::Outcome<CreateAutoPredictorResult, ForecastServiceError> CreateAutoPredictorOutcome;
typedef Aws::Utils::Outcome<CreateDatasetResult, ForecastServiceError> CreateDatasetOutcome;
typedef Aws::Utils::Outcome<CreateDatasetGroupResult, ForecastServiceError> CreateDatasetGroupOutcome;
typedef Aws::Utils::Outcome<CreateDatasetImportJobResult, ForecastServiceError> CreateDatasetImportJobOutcome;
typedef Aws::Utils::Outcome<CreateExplainabilityResult, ForecastServiceError> CreateExplainabilityOutcome;
typedef Aws::Utils::Outcome<CreateExplainabilityExportResult, ForecastServiceError> CreateExplainabilityExportOutcome;
typedef Aws::Utils::Outcome<CreateForecastResult, ForecastServiceError> CreateForecastOutcome;
typedef Aws::Utils::Outcome<CreateForecastExportJobResult, ForecastServiceError> CreateForecastExportJobOutcome;
typedef Aws::Utils::Outcome<CreatePredictorResult, ForecastServiceError> CreatePredictorOutcome;
typedef Aws::Utils::Outcome<CreatePredictorBacktestExportJobResult, ForecastServiceError> CreatePredictorBacktestExportJobOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteDatasetOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteDatasetGroupOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteDatasetImportJobOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteExplainabilityOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteExplainabilityExportOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteForecastOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteForecastExportJobOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeletePredictorOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeletePredictorBacktestExportJobOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> DeleteResourceTreeOutcome;
typedef Aws::Utils::Outcome<DescribeAutoPredictorResult, ForecastServiceError> DescribeAutoPredictorOutcome;
typedef Aws::Utils::Outcome<DescribeDatasetResult, ForecastServiceError> DescribeDatasetOutcome;
typedef Aws::Utils::Outcome<DescribeDatasetGroupResult, ForecastServiceError> DescribeDatasetGroupOutcome;
typedef Aws::Utils::Outcome<DescribeDatasetImportJobResult, ForecastServiceError> DescribeDatasetImportJobOutcome;
typedef Aws::Utils::Outcome<DescribeExplainabilityResult, ForecastServiceError> DescribeExplainabilityOutcome;
typedef Aws::Utils::Outcome<DescribeExplainabilityExportResult, ForecastServiceError> DescribeExplainabilityExportOutcome;
typedef Aws::Utils::Outcome<DescribeForecastResult, ForecastServiceError> DescribeForecastOutcome;
typedef Aws::Utils::Outcome<DescribeForecastExportJobResult, ForecastServiceError> DescribeForecastExportJobOutcome;
typedef Aws::Utils::Outcome<DescribePredictorResult, ForecastServiceError> DescribePredictorOutcome;
typedef Aws::Utils::Outcome<DescribePredictorBacktestExportJobResult, ForecastServiceError> DescribePredictorBacktestExportJobOutcome;
typedef Aws::Utils::Outcome<GetAccuracyMetricsResult, ForecastServiceError> GetAccuracyMetricsOutcome;
typedef Aws::Utils::Outcome<ListDatasetGroupsResult, ForecastServiceError> ListDatasetGroupsOutcome;
typedef Aws::Utils::Outcome<ListDatasetImportJobsResult, ForecastServiceError> ListDatasetImportJobsOutcome;
typedef Aws::Utils::Outcome<ListDatasetsResult, ForecastServiceError> ListDatasetsOutcome;
typedef Aws::Utils::Outcome<ListExplainabilitiesResult, ForecastServiceError> ListExplainabilitiesOutcome;
typedef Aws::Utils::Outcome<ListExplainabilityExportsResult, ForecastServiceError> ListExplainabilityExportsOutcome;
typedef Aws::Utils::Outcome<ListForecastExportJobsResult, ForecastServiceError> ListForecastExportJobsOutcome;
typedef Aws::Utils::Outcome<ListForecastsResult, ForecastServiceError> ListForecastsOutcome;
typedef Aws::Utils::Outcome<ListPredictorBacktestExportJobsResult, ForecastServiceError> ListPredictorBacktestExportJobsOutcome;
typedef Aws::Utils::Outcome<ListPredictorsResult, ForecastServiceError> ListPredictorsOutcome;
typedef Aws::Utils::Outcome<ListTagsForResourceResult, ForecastServiceError> ListTagsForResourceOutcome;
typedef Aws::Utils::Outcome<Aws::NoResult, ForecastServiceError> StopResourceOutcome;
typedef Aws::Utils::Outcome<TagResourceResult, ForecastServiceError> TagResourceOutcome;
typedef Aws::Utils::Outcome<UntagResourceResult, ForecastServiceError> UntagResourceOutcome;
typedef Aws::Utils::Outcome<UpdateDatasetGroupResult, ForecastServiceError> UpdateDatasetGroupOutcome;
typedef std::future<CreateAutoPredictorOutcome> CreateAutoPredictorOutcomeCallable;
typedef std::future<CreateDatasetOutcome> CreateDatasetOutcomeCallable;
typedef std::future<CreateDatasetGroupOutcome> CreateDatasetGroupOutcomeCallable;
typedef std::future<CreateDatasetImportJobOutcome> CreateDatasetImportJobOutcomeCallable;
typedef std::future<CreateExplainabilityOutcome> CreateExplainabilityOutcomeCallable;
typedef std::future<CreateExplainabilityExportOutcome> CreateExplainabilityExportOutcomeCallable;
typedef std::future<CreateForecastOutcome> CreateForecastOutcomeCallable;
typedef std::future<CreateForecastExportJobOutcome> CreateForecastExportJobOutcomeCallable;
typedef std::future<CreatePredictorOutcome> CreatePredictorOutcomeCallable;
typedef std::future<CreatePredictorBacktestExportJobOutcome> CreatePredictorBacktestExportJobOutcomeCallable;
typedef std::future<DeleteDatasetOutcome> DeleteDatasetOutcomeCallable;
typedef std::future<DeleteDatasetGroupOutcome> DeleteDatasetGroupOutcomeCallable;
typedef std::future<DeleteDatasetImportJobOutcome> DeleteDatasetImportJobOutcomeCallable;
typedef std::future<DeleteExplainabilityOutcome> DeleteExplainabilityOutcomeCallable;
typedef std::future<DeleteExplainabilityExportOutcome> DeleteExplainabilityExportOutcomeCallable;
typedef std::future<DeleteForecastOutcome> DeleteForecastOutcomeCallable;
typedef std::future<DeleteForecastExportJobOutcome> DeleteForecastExportJobOutcomeCallable;
typedef std::future<DeletePredictorOutcome> DeletePredictorOutcomeCallable;
typedef std::future<DeletePredictorBacktestExportJobOutcome> DeletePredictorBacktestExportJobOutcomeCallable;
typedef std::future<DeleteResourceTreeOutcome> DeleteResourceTreeOutcomeCallable;
typedef std::future<DescribeAutoPredictorOutcome> DescribeAutoPredictorOutcomeCallable;
typedef std::future<DescribeDatasetOutcome> DescribeDatasetOutcomeCallable;
typedef std::future<DescribeDatasetGroupOutcome> DescribeDatasetGroupOutcomeCallable;
typedef std::future<DescribeDatasetImportJobOutcome> DescribeDatasetImportJobOutcomeCallable;
typedef std::future<DescribeExplainabilityOutcome> DescribeExplainabilityOutcomeCallable;
typedef std::future<DescribeExplainabilityExportOutcome> DescribeExplainabilityExportOutcomeCallable;
typedef std::future<DescribeForecastOutcome> DescribeForecastOutcomeCallable;
typedef std::future<DescribeForecastExportJobOutcome> DescribeForecastExportJobOutcomeCallable;
typedef std::future<DescribePredictorOutcome> DescribePredictorOutcomeCallable;
typedef std::future<DescribePredictorBacktestExportJobOutcome> DescribePredictorBacktestExportJobOutcomeCallable;
typedef std::future<GetAccuracyMetricsOutcome> GetAccuracyMetricsOutcomeCallable;
typedef std::future<ListDatasetGroupsOutcome> ListDatasetGroupsOutcomeCallable;
typedef std::future<ListDatasetImportJobsOutcome> ListDatasetImportJobsOutcomeCallable;
typedef std::future<ListDatasetsOutcome> ListDatasetsOutcomeCallable;
typedef std::future<ListExplainabilitiesOutcome> ListExplainabilitiesOutcomeCallable;
typedef std::future<ListExplainabilityExportsOutcome> ListExplainabilityExportsOutcomeCallable;
typedef std::future<ListForecastExportJobsOutcome> ListForecastExportJobsOutcomeCallable;
typedef std::future<ListForecastsOutcome> ListForecastsOutcomeCallable;
typedef std::future<ListPredictorBacktestExportJobsOutcome> ListPredictorBacktestExportJobsOutcomeCallable;
typedef std::future<ListPredictorsOutcome> ListPredictorsOutcomeCallable;
typedef std::future<ListTagsForResourceOutcome> ListTagsForResourceOutcomeCallable;
typedef std::future<StopResourceOutcome> StopResourceOutcomeCallable;
typedef std::future<TagResourceOutcome> TagResourceOutcomeCallable;
typedef std::future<UntagResourceOutcome> UntagResourceOutcomeCallable;
typedef std::future<UpdateDatasetGroupOutcome> UpdateDatasetGroupOutcomeCallable;
} // namespace Model
class ForecastServiceClient;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateAutoPredictorRequest&, const Model::CreateAutoPredictorOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateAutoPredictorResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateDatasetRequest&, const Model::CreateDatasetOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateDatasetResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateDatasetGroupRequest&, const Model::CreateDatasetGroupOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateDatasetGroupResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateDatasetImportJobRequest&, const Model::CreateDatasetImportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateDatasetImportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateExplainabilityRequest&, const Model::CreateExplainabilityOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateExplainabilityResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateExplainabilityExportRequest&, const Model::CreateExplainabilityExportOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateExplainabilityExportResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateForecastRequest&, const Model::CreateForecastOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateForecastResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreateForecastExportJobRequest&, const Model::CreateForecastExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateForecastExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreatePredictorRequest&, const Model::CreatePredictorOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreatePredictorResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::CreatePredictorBacktestExportJobRequest&, const Model::CreatePredictorBacktestExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreatePredictorBacktestExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteDatasetRequest&, const Model::DeleteDatasetOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteDatasetResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteDatasetGroupRequest&, const Model::DeleteDatasetGroupOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteDatasetGroupResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteDatasetImportJobRequest&, const Model::DeleteDatasetImportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteDatasetImportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteExplainabilityRequest&, const Model::DeleteExplainabilityOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteExplainabilityResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteExplainabilityExportRequest&, const Model::DeleteExplainabilityExportOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteExplainabilityExportResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteForecastRequest&, const Model::DeleteForecastOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteForecastResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteForecastExportJobRequest&, const Model::DeleteForecastExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteForecastExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeletePredictorRequest&, const Model::DeletePredictorOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeletePredictorResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeletePredictorBacktestExportJobRequest&, const Model::DeletePredictorBacktestExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeletePredictorBacktestExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DeleteResourceTreeRequest&, const Model::DeleteResourceTreeOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteResourceTreeResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeAutoPredictorRequest&, const Model::DescribeAutoPredictorOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeAutoPredictorResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeDatasetRequest&, const Model::DescribeDatasetOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeDatasetResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeDatasetGroupRequest&, const Model::DescribeDatasetGroupOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeDatasetGroupResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeDatasetImportJobRequest&, const Model::DescribeDatasetImportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeDatasetImportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeExplainabilityRequest&, const Model::DescribeExplainabilityOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeExplainabilityResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeExplainabilityExportRequest&, const Model::DescribeExplainabilityExportOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeExplainabilityExportResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeForecastRequest&, const Model::DescribeForecastOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeForecastResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribeForecastExportJobRequest&, const Model::DescribeForecastExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribeForecastExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribePredictorRequest&, const Model::DescribePredictorOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribePredictorResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::DescribePredictorBacktestExportJobRequest&, const Model::DescribePredictorBacktestExportJobOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DescribePredictorBacktestExportJobResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::GetAccuracyMetricsRequest&, const Model::GetAccuracyMetricsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetAccuracyMetricsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListDatasetGroupsRequest&, const Model::ListDatasetGroupsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListDatasetGroupsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListDatasetImportJobsRequest&, const Model::ListDatasetImportJobsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListDatasetImportJobsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListDatasetsRequest&, const Model::ListDatasetsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListDatasetsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListExplainabilitiesRequest&, const Model::ListExplainabilitiesOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListExplainabilitiesResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListExplainabilityExportsRequest&, const Model::ListExplainabilityExportsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListExplainabilityExportsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListForecastExportJobsRequest&, const Model::ListForecastExportJobsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListForecastExportJobsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListForecastsRequest&, const Model::ListForecastsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListForecastsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListPredictorBacktestExportJobsRequest&, const Model::ListPredictorBacktestExportJobsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListPredictorBacktestExportJobsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListPredictorsRequest&, const Model::ListPredictorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListPredictorsResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::ListTagsForResourceRequest&, const Model::ListTagsForResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListTagsForResourceResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::StopResourceRequest&, const Model::StopResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > StopResourceResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::TagResourceRequest&, const Model::TagResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > TagResourceResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::UntagResourceRequest&, const Model::UntagResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UntagResourceResponseReceivedHandler;
typedef std::function<void(const ForecastServiceClient*, const Model::UpdateDatasetGroupRequest&, const Model::UpdateDatasetGroupOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UpdateDatasetGroupResponseReceivedHandler;
/**
* <p>Provides APIs for creating and managing Amazon Forecast resources.</p>
*/
class AWS_FORECASTSERVICE_API ForecastServiceClient : public Aws::Client::AWSJsonClient
{
public:
typedef Aws::Client::AWSJsonClient BASECLASS;
/**
* Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config
* is not specified, it will be initialized to default values.
*/
ForecastServiceClient(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration());
/**
* Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config
* is not specified, it will be initialized to default values.
*/
ForecastServiceClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration());
/**
* Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied,
* the default http client factory will be used
*/
ForecastServiceClient(const std::shared_ptr<Aws::Auth::AWSCredentialsProvider>& credentialsProvider,
const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration());
virtual ~ForecastServiceClient();
/**
* <p>Creates an Amazon Forecast predictor.</p> <p>Amazon Forecast creates
* predictors with AutoPredictor, which involves applying the optimal combination
* of algorithms to each time series in your datasets. You can use
* <a>CreateAutoPredictor</a> to create new predictors or upgrade/retrain existing
* predictors.</p> <p> <b>Creating new predictors</b> </p> <p>The following
* parameters are required when creating a new predictor:</p> <ul> <li> <p>
* <code>PredictorName</code> - A unique name for the predictor.</p> </li> <li> <p>
* <code>DatasetGroupArn</code> - The ARN of the dataset group used to train the
* predictor.</p> </li> <li> <p> <code>ForecastFrequency</code> - The granularity
* of your forecasts (hourly, daily, weekly, etc).</p> </li> <li> <p>
* <code>ForecastHorizon</code> - The number of time steps being forecasted.</p>
* </li> </ul> <p>When creating a new predictor, do not specify a value for
* <code>ReferencePredictorArn</code>.</p> <p> <b>Upgrading and retraining
* predictors</b> </p> <p>The following parameters are required when retraining or
* upgrading a predictor:</p> <ul> <li> <p> <code>PredictorName</code> - A unique
* name for the predictor.</p> </li> <li> <p> <code>ReferencePredictorArn</code> -
* The ARN of the predictor to retrain or upgrade.</p> </li> </ul> <p>When
* upgrading or retraining a predictor, only specify values for the
* <code>ReferencePredictorArn</code> and <code>PredictorName</code>.
* </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateAutoPredictor">AWS
* API Reference</a></p>
*/
virtual Model::CreateAutoPredictorOutcome CreateAutoPredictor(const Model::CreateAutoPredictorRequest& request) const;
/**
* <p>Creates an Amazon Forecast predictor.</p> <p>Amazon Forecast creates
* predictors with AutoPredictor, which involves applying the optimal combination
* of algorithms to each time series in your datasets. You can use
* <a>CreateAutoPredictor</a> to create new predictors or upgrade/retrain existing
* predictors.</p> <p> <b>Creating new predictors</b> </p> <p>The following
* parameters are required when creating a new predictor:</p> <ul> <li> <p>
* <code>PredictorName</code> - A unique name for the predictor.</p> </li> <li> <p>
* <code>DatasetGroupArn</code> - The ARN of the dataset group used to train the
* predictor.</p> </li> <li> <p> <code>ForecastFrequency</code> - The granularity
* of your forecasts (hourly, daily, weekly, etc).</p> </li> <li> <p>
* <code>ForecastHorizon</code> - The number of time steps being forecasted.</p>
* </li> </ul> <p>When creating a new predictor, do not specify a value for
* <code>ReferencePredictorArn</code>.</p> <p> <b>Upgrading and retraining
* predictors</b> </p> <p>The following parameters are required when retraining or
* upgrading a predictor:</p> <ul> <li> <p> <code>PredictorName</code> - A unique
* name for the predictor.</p> </li> <li> <p> <code>ReferencePredictorArn</code> -
* The ARN of the predictor to retrain or upgrade.</p> </li> </ul> <p>When
* upgrading or retraining a predictor, only specify values for the
* <code>ReferencePredictorArn</code> and <code>PredictorName</code>.
* </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateAutoPredictor">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateAutoPredictorOutcomeCallable CreateAutoPredictorCallable(const Model::CreateAutoPredictorRequest& request) const;
/**
* <p>Creates an Amazon Forecast predictor.</p> <p>Amazon Forecast creates
* predictors with AutoPredictor, which involves applying the optimal combination
* of algorithms to each time series in your datasets. You can use
* <a>CreateAutoPredictor</a> to create new predictors or upgrade/retrain existing
* predictors.</p> <p> <b>Creating new predictors</b> </p> <p>The following
* parameters are required when creating a new predictor:</p> <ul> <li> <p>
* <code>PredictorName</code> - A unique name for the predictor.</p> </li> <li> <p>
* <code>DatasetGroupArn</code> - The ARN of the dataset group used to train the
* predictor.</p> </li> <li> <p> <code>ForecastFrequency</code> - The granularity
* of your forecasts (hourly, daily, weekly, etc).</p> </li> <li> <p>
* <code>ForecastHorizon</code> - The number of time steps being forecasted.</p>
* </li> </ul> <p>When creating a new predictor, do not specify a value for
* <code>ReferencePredictorArn</code>.</p> <p> <b>Upgrading and retraining
* predictors</b> </p> <p>The following parameters are required when retraining or
* upgrading a predictor:</p> <ul> <li> <p> <code>PredictorName</code> - A unique
* name for the predictor.</p> </li> <li> <p> <code>ReferencePredictorArn</code> -
* The ARN of the predictor to retrain or upgrade.</p> </li> </ul> <p>When
* upgrading or retraining a predictor, only specify values for the
* <code>ReferencePredictorArn</code> and <code>PredictorName</code>.
* </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateAutoPredictor">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateAutoPredictorAsync(const Model::CreateAutoPredictorRequest& request, const CreateAutoPredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Creates an Amazon Forecast dataset. The information about the dataset that
* you provide helps Forecast understand how to consume the data for model
* training. This includes the following:</p> <ul> <li> <p> <i>
* <code>DataFrequency</code> </i> - How frequently your historical time-series
* data is collected.</p> </li> <li> <p> <i> <code>Domain</code> </i> and <i>
* <code>DatasetType</code> </i> - Each dataset has an associated dataset domain
* and a type within the domain. Amazon Forecast provides a list of predefined
* domains and types within each domain. For each unique dataset domain and type
* within the domain, Amazon Forecast requires your data to include a minimum set
* of predefined fields.</p> </li> <li> <p> <i> <code>Schema</code> </i> - A schema
* specifies the fields in the dataset, including the field name and data type.</p>
* </li> </ul> <p>After creating a dataset, you import your training data into it
* and add the dataset to a dataset group. You use the dataset group to create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets, use the <a>ListDatasets</a>
* operation.</p> <p>For example Forecast datasets, see the <a
* href="https://github.com/aws-samples/amazon-forecast-samples">Amazon Forecast
* Sample GitHub repository</a>.</p> <p>The <code>Status</code> of a dataset
* must be <code>ACTIVE</code> before you can import training data. Use the
* <a>DescribeDataset</a> operation to get the status.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDataset">AWS
* API Reference</a></p>
*/
virtual Model::CreateDatasetOutcome CreateDataset(const Model::CreateDatasetRequest& request) const;
/**
* <p>Creates an Amazon Forecast dataset. The information about the dataset that
* you provide helps Forecast understand how to consume the data for model
* training. This includes the following:</p> <ul> <li> <p> <i>
* <code>DataFrequency</code> </i> - How frequently your historical time-series
* data is collected.</p> </li> <li> <p> <i> <code>Domain</code> </i> and <i>
* <code>DatasetType</code> </i> - Each dataset has an associated dataset domain
* and a type within the domain. Amazon Forecast provides a list of predefined
* domains and types within each domain. For each unique dataset domain and type
* within the domain, Amazon Forecast requires your data to include a minimum set
* of predefined fields.</p> </li> <li> <p> <i> <code>Schema</code> </i> - A schema
* specifies the fields in the dataset, including the field name and data type.</p>
* </li> </ul> <p>After creating a dataset, you import your training data into it
* and add the dataset to a dataset group. You use the dataset group to create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets, use the <a>ListDatasets</a>
* operation.</p> <p>For example Forecast datasets, see the <a
* href="https://github.com/aws-samples/amazon-forecast-samples">Amazon Forecast
* Sample GitHub repository</a>.</p> <p>The <code>Status</code> of a dataset
* must be <code>ACTIVE</code> before you can import training data. Use the
* <a>DescribeDataset</a> operation to get the status.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDataset">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateDatasetOutcomeCallable CreateDatasetCallable(const Model::CreateDatasetRequest& request) const;
/**
* <p>Creates an Amazon Forecast dataset. The information about the dataset that
* you provide helps Forecast understand how to consume the data for model
* training. This includes the following:</p> <ul> <li> <p> <i>
* <code>DataFrequency</code> </i> - How frequently your historical time-series
* data is collected.</p> </li> <li> <p> <i> <code>Domain</code> </i> and <i>
* <code>DatasetType</code> </i> - Each dataset has an associated dataset domain
* and a type within the domain. Amazon Forecast provides a list of predefined
* domains and types within each domain. For each unique dataset domain and type
* within the domain, Amazon Forecast requires your data to include a minimum set
* of predefined fields.</p> </li> <li> <p> <i> <code>Schema</code> </i> - A schema
* specifies the fields in the dataset, including the field name and data type.</p>
* </li> </ul> <p>After creating a dataset, you import your training data into it
* and add the dataset to a dataset group. You use the dataset group to create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets, use the <a>ListDatasets</a>
* operation.</p> <p>For example Forecast datasets, see the <a
* href="https://github.com/aws-samples/amazon-forecast-samples">Amazon Forecast
* Sample GitHub repository</a>.</p> <p>The <code>Status</code> of a dataset
* must be <code>ACTIVE</code> before you can import training data. Use the
* <a>DescribeDataset</a> operation to get the status.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDataset">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateDatasetAsync(const Model::CreateDatasetRequest& request, const CreateDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Creates a dataset group, which holds a collection of related datasets. You
* can add datasets to the dataset group when you create the dataset group, or
* later by using the <a>UpdateDatasetGroup</a> operation.</p> <p>After creating a
* dataset group and adding datasets, you use the dataset group when you create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets groups, use the <a>ListDatasetGroups</a>
* operation.</p> <p>The <code>Status</code> of a dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetGroup">AWS
* API Reference</a></p>
*/
virtual Model::CreateDatasetGroupOutcome CreateDatasetGroup(const Model::CreateDatasetGroupRequest& request) const;
/**
* <p>Creates a dataset group, which holds a collection of related datasets. You
* can add datasets to the dataset group when you create the dataset group, or
* later by using the <a>UpdateDatasetGroup</a> operation.</p> <p>After creating a
* dataset group and adding datasets, you use the dataset group when you create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets groups, use the <a>ListDatasetGroups</a>
* operation.</p> <p>The <code>Status</code> of a dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetGroup">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateDatasetGroupOutcomeCallable CreateDatasetGroupCallable(const Model::CreateDatasetGroupRequest& request) const;
/**
* <p>Creates a dataset group, which holds a collection of related datasets. You
* can add datasets to the dataset group when you create the dataset group, or
* later by using the <a>UpdateDatasetGroup</a> operation.</p> <p>After creating a
* dataset group and adding datasets, you use the dataset group when you create a
* predictor. For more information, see <a>howitworks-datasets-groups</a>.</p>
* <p>To get a list of all your datasets groups, use the <a>ListDatasetGroups</a>
* operation.</p> <p>The <code>Status</code> of a dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetGroup">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateDatasetGroupAsync(const Model::CreateDatasetGroupRequest& request, const CreateDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Imports your training data to an Amazon Forecast dataset. You provide the
* location of your training data in an Amazon Simple Storage Service (Amazon S3)
* bucket and the Amazon Resource Name (ARN) of the dataset that you want to import
* the data to.</p> <p>You must specify a <a>DataSource</a> object that includes an
* AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to
* access the data, as Amazon Forecast makes a copy of your data and processes it
* in an internal AWS system. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>The training data must be in CSV format.
* The delimiter must be a comma (,).</p> <p>You can specify the path to a specific
* CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two
* cases, Amazon Forecast imports all files up to the limit of 10,000 files.</p>
* <p>Because dataset imports are not aggregated, your most recent dataset import
* is the one that is used when training a predictor or generating a forecast. Make
* sure that your most recent dataset import contains all of the data you want to
* model off of, and not just the new data collected since the previous import.</p>
* <p>To get a list of all your dataset import jobs, filtered by specified
* criteria, use the <a>ListDatasetImportJobs</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetImportJob">AWS
* API Reference</a></p>
*/
virtual Model::CreateDatasetImportJobOutcome CreateDatasetImportJob(const Model::CreateDatasetImportJobRequest& request) const;
/**
* <p>Imports your training data to an Amazon Forecast dataset. You provide the
* location of your training data in an Amazon Simple Storage Service (Amazon S3)
* bucket and the Amazon Resource Name (ARN) of the dataset that you want to import
* the data to.</p> <p>You must specify a <a>DataSource</a> object that includes an
* AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to
* access the data, as Amazon Forecast makes a copy of your data and processes it
* in an internal AWS system. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>The training data must be in CSV format.
* The delimiter must be a comma (,).</p> <p>You can specify the path to a specific
* CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two
* cases, Amazon Forecast imports all files up to the limit of 10,000 files.</p>
* <p>Because dataset imports are not aggregated, your most recent dataset import
* is the one that is used when training a predictor or generating a forecast. Make
* sure that your most recent dataset import contains all of the data you want to
* model off of, and not just the new data collected since the previous import.</p>
* <p>To get a list of all your dataset import jobs, filtered by specified
* criteria, use the <a>ListDatasetImportJobs</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetImportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateDatasetImportJobOutcomeCallable CreateDatasetImportJobCallable(const Model::CreateDatasetImportJobRequest& request) const;
/**
* <p>Imports your training data to an Amazon Forecast dataset. You provide the
* location of your training data in an Amazon Simple Storage Service (Amazon S3)
* bucket and the Amazon Resource Name (ARN) of the dataset that you want to import
* the data to.</p> <p>You must specify a <a>DataSource</a> object that includes an
* AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to
* access the data, as Amazon Forecast makes a copy of your data and processes it
* in an internal AWS system. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>The training data must be in CSV format.
* The delimiter must be a comma (,).</p> <p>You can specify the path to a specific
* CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two
* cases, Amazon Forecast imports all files up to the limit of 10,000 files.</p>
* <p>Because dataset imports are not aggregated, your most recent dataset import
* is the one that is used when training a predictor or generating a forecast. Make
* sure that your most recent dataset import contains all of the data you want to
* model off of, and not just the new data collected since the previous import.</p>
* <p>To get a list of all your dataset import jobs, filtered by specified
* criteria, use the <a>ListDatasetImportJobs</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetImportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateDatasetImportJobAsync(const Model::CreateDatasetImportJobRequest& request, const CreateDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Explainability is only available for Forecasts and Predictors
* generated from an AutoPredictor (<a>CreateAutoPredictor</a>)</p>
* <p>Creates an Amazon Forecast Explainability.</p> <p>Explainability helps you
* better understand how the attributes in your datasets impact forecast. Amazon
* Forecast uses a metric called Impact scores to quantify the relative impact of
* each attribute and determine whether they increase or decrease forecast
* values.</p> <p>To enable Forecast Explainability, your predictor must include at
* least one of the following: related time series, item metadata, or additional
* datasets like Holidays and the Weather Index.</p> <p>CreateExplainability
* accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact
* scores for all time series and time points in your datasets, provide a Predictor
* ARN. To receive Impact scores for specific time series and time points, provide
* a Forecast ARN.</p> <p> <b>CreateExplainability with a Predictor ARN</b> </p>
* <p>You can only have one Explainability resource per predictor. If you
* already enabled <code>ExplainPredictor</code> in <a>CreateAutoPredictor</a>,
* that predictor already has an Explainability resource.</p> <p>The
* following parameters are required when providing a Predictor ARN:</p> <ul> <li>
* <p> <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the predictor.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Must be set to “ALL”.</p> </li>
* <li> <p> <code>TimeSeriesGranularity</code> - Must be set to “ALL”.</p> </li>
* </ul> <p>Do not specify a value for the following parameters:</p> <ul> <li> <p>
* <code>DataSource</code> - Only valid when TimeSeriesGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>Schema</code> - Only valid when
* TimeSeriesGranularity is “SPECIFIC”.</p> </li> <li> <p>
* <code>StartDateTime</code> - Only valid when TimePointGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>EndDateTime</code> - Only valid when
* TimePointGranularity is “SPECIFIC”.</p> </li> </ul> <p> <b>CreateExplainability
* with a Forecast ARN</b> </p> <p>You can specify a maximum of 50 time
* series and 500 time points.</p> <p>The following parameters are required
* when providing a Predictor ARN:</p> <ul> <li> <p>
* <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the forecast.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Either “ALL” or “SPECIFIC”.</p>
* </li> <li> <p> <code>TimeSeriesGranularity</code> - Either “ALL” or
* “SPECIFIC”.</p> </li> </ul> <p>If you set TimeSeriesGranularity to “SPECIFIC”,
* you must also provide the following:</p> <ul> <li> <p> <code>DataSource</code> -
* The S3 location of the CSV file specifying your time series.</p> </li> <li> <p>
* <code>Schema</code> - The Schema defines the attributes and attribute types
* listed in the Data Source.</p> </li> </ul> <p>If you set TimePointGranularity to
* “SPECIFIC”, you must also provide the following:</p> <ul> <li> <p>
* <code>StartDateTime</code> - The first timestamp in the range of time
* points.</p> </li> <li> <p> <code>EndDateTime</code> - The last timestamp in the
* range of time points.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainability">AWS
* API Reference</a></p>
*/
virtual Model::CreateExplainabilityOutcome CreateExplainability(const Model::CreateExplainabilityRequest& request) const;
/**
* <p>Explainability is only available for Forecasts and Predictors
* generated from an AutoPredictor (<a>CreateAutoPredictor</a>)</p>
* <p>Creates an Amazon Forecast Explainability.</p> <p>Explainability helps you
* better understand how the attributes in your datasets impact forecast. Amazon
* Forecast uses a metric called Impact scores to quantify the relative impact of
* each attribute and determine whether they increase or decrease forecast
* values.</p> <p>To enable Forecast Explainability, your predictor must include at
* least one of the following: related time series, item metadata, or additional
* datasets like Holidays and the Weather Index.</p> <p>CreateExplainability
* accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact
* scores for all time series and time points in your datasets, provide a Predictor
* ARN. To receive Impact scores for specific time series and time points, provide
* a Forecast ARN.</p> <p> <b>CreateExplainability with a Predictor ARN</b> </p>
* <p>You can only have one Explainability resource per predictor. If you
* already enabled <code>ExplainPredictor</code> in <a>CreateAutoPredictor</a>,
* that predictor already has an Explainability resource.</p> <p>The
* following parameters are required when providing a Predictor ARN:</p> <ul> <li>
* <p> <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the predictor.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Must be set to “ALL”.</p> </li>
* <li> <p> <code>TimeSeriesGranularity</code> - Must be set to “ALL”.</p> </li>
* </ul> <p>Do not specify a value for the following parameters:</p> <ul> <li> <p>
* <code>DataSource</code> - Only valid when TimeSeriesGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>Schema</code> - Only valid when
* TimeSeriesGranularity is “SPECIFIC”.</p> </li> <li> <p>
* <code>StartDateTime</code> - Only valid when TimePointGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>EndDateTime</code> - Only valid when
* TimePointGranularity is “SPECIFIC”.</p> </li> </ul> <p> <b>CreateExplainability
* with a Forecast ARN</b> </p> <p>You can specify a maximum of 50 time
* series and 500 time points.</p> <p>The following parameters are required
* when providing a Predictor ARN:</p> <ul> <li> <p>
* <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the forecast.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Either “ALL” or “SPECIFIC”.</p>
* </li> <li> <p> <code>TimeSeriesGranularity</code> - Either “ALL” or
* “SPECIFIC”.</p> </li> </ul> <p>If you set TimeSeriesGranularity to “SPECIFIC”,
* you must also provide the following:</p> <ul> <li> <p> <code>DataSource</code> -
* The S3 location of the CSV file specifying your time series.</p> </li> <li> <p>
* <code>Schema</code> - The Schema defines the attributes and attribute types
* listed in the Data Source.</p> </li> </ul> <p>If you set TimePointGranularity to
* “SPECIFIC”, you must also provide the following:</p> <ul> <li> <p>
* <code>StartDateTime</code> - The first timestamp in the range of time
* points.</p> </li> <li> <p> <code>EndDateTime</code> - The last timestamp in the
* range of time points.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainability">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateExplainabilityOutcomeCallable CreateExplainabilityCallable(const Model::CreateExplainabilityRequest& request) const;
/**
* <p>Explainability is only available for Forecasts and Predictors
* generated from an AutoPredictor (<a>CreateAutoPredictor</a>)</p>
* <p>Creates an Amazon Forecast Explainability.</p> <p>Explainability helps you
* better understand how the attributes in your datasets impact forecast. Amazon
* Forecast uses a metric called Impact scores to quantify the relative impact of
* each attribute and determine whether they increase or decrease forecast
* values.</p> <p>To enable Forecast Explainability, your predictor must include at
* least one of the following: related time series, item metadata, or additional
* datasets like Holidays and the Weather Index.</p> <p>CreateExplainability
* accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact
* scores for all time series and time points in your datasets, provide a Predictor
* ARN. To receive Impact scores for specific time series and time points, provide
* a Forecast ARN.</p> <p> <b>CreateExplainability with a Predictor ARN</b> </p>
* <p>You can only have one Explainability resource per predictor. If you
* already enabled <code>ExplainPredictor</code> in <a>CreateAutoPredictor</a>,
* that predictor already has an Explainability resource.</p> <p>The
* following parameters are required when providing a Predictor ARN:</p> <ul> <li>
* <p> <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the predictor.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Must be set to “ALL”.</p> </li>
* <li> <p> <code>TimeSeriesGranularity</code> - Must be set to “ALL”.</p> </li>
* </ul> <p>Do not specify a value for the following parameters:</p> <ul> <li> <p>
* <code>DataSource</code> - Only valid when TimeSeriesGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>Schema</code> - Only valid when
* TimeSeriesGranularity is “SPECIFIC”.</p> </li> <li> <p>
* <code>StartDateTime</code> - Only valid when TimePointGranularity is
* “SPECIFIC”.</p> </li> <li> <p> <code>EndDateTime</code> - Only valid when
* TimePointGranularity is “SPECIFIC”.</p> </li> </ul> <p> <b>CreateExplainability
* with a Forecast ARN</b> </p> <p>You can specify a maximum of 50 time
* series and 500 time points.</p> <p>The following parameters are required
* when providing a Predictor ARN:</p> <ul> <li> <p>
* <code>ExplainabilityName</code> - A unique name for the Explainability.</p>
* </li> <li> <p> <code>ResourceArn</code> - The Arn of the forecast.</p> </li>
* <li> <p> <code>TimePointGranularity</code> - Either “ALL” or “SPECIFIC”.</p>
* </li> <li> <p> <code>TimeSeriesGranularity</code> - Either “ALL” or
* “SPECIFIC”.</p> </li> </ul> <p>If you set TimeSeriesGranularity to “SPECIFIC”,
* you must also provide the following:</p> <ul> <li> <p> <code>DataSource</code> -
* The S3 location of the CSV file specifying your time series.</p> </li> <li> <p>
* <code>Schema</code> - The Schema defines the attributes and attribute types
* listed in the Data Source.</p> </li> </ul> <p>If you set TimePointGranularity to
* “SPECIFIC”, you must also provide the following:</p> <ul> <li> <p>
* <code>StartDateTime</code> - The first timestamp in the range of time
* points.</p> </li> <li> <p> <code>EndDateTime</code> - The last timestamp in the
* range of time points.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainability">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateExplainabilityAsync(const Model::CreateExplainabilityRequest& request, const CreateExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Exports an Explainability resource created by the <a>CreateExplainability</a>
* operation. Exported files are exported to an Amazon Simple Storage Service
* (Amazon S3) bucket.</p> <p>You must specify a <a>DataDestination</a> object that
* includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM)
* role that Amazon Forecast can assume to access the Amazon S3 bucket. For more
* information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribeExplainabilityExport</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainabilityExport">AWS
* API Reference</a></p>
*/
virtual Model::CreateExplainabilityExportOutcome CreateExplainabilityExport(const Model::CreateExplainabilityExportRequest& request) const;
/**
* <p>Exports an Explainability resource created by the <a>CreateExplainability</a>
* operation. Exported files are exported to an Amazon Simple Storage Service
* (Amazon S3) bucket.</p> <p>You must specify a <a>DataDestination</a> object that
* includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM)
* role that Amazon Forecast can assume to access the Amazon S3 bucket. For more
* information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribeExplainabilityExport</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainabilityExport">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateExplainabilityExportOutcomeCallable CreateExplainabilityExportCallable(const Model::CreateExplainabilityExportRequest& request) const;
/**
* <p>Exports an Explainability resource created by the <a>CreateExplainability</a>
* operation. Exported files are exported to an Amazon Simple Storage Service
* (Amazon S3) bucket.</p> <p>You must specify a <a>DataDestination</a> object that
* includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM)
* role that Amazon Forecast can assume to access the Amazon S3 bucket. For more
* information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribeExplainabilityExport</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateExplainabilityExport">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateExplainabilityExportAsync(const Model::CreateExplainabilityExportRequest& request, const CreateExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Creates a forecast for each item in the <code>TARGET_TIME_SERIES</code>
* dataset that was used to train the predictor. This is known as inference. To
* retrieve the forecast for a single item at low latency, use the operation. To
* export the complete forecast into your Amazon Simple Storage Service (Amazon S3)
* bucket, use the <a>CreateForecastExportJob</a> operation.</p> <p>The range of
* the forecast is determined by the <code>ForecastHorizon</code> value, which you
* specify in the <a>CreatePredictor</a> request. When you query a forecast, you
* can request a specific date range within the forecast.</p> <p>To get a list of
* all your forecasts, use the <a>ListForecasts</a> operation.</p> <p>The
* forecasts generated by Amazon Forecast are in the same time zone as the dataset
* that was used to create the predictor.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>The <code>Status</code> of the
* forecast must be <code>ACTIVE</code> before you can query or export the
* forecast. Use the <a>DescribeForecast</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecast">AWS
* API Reference</a></p>
*/
virtual Model::CreateForecastOutcome CreateForecast(const Model::CreateForecastRequest& request) const;
/**
* <p>Creates a forecast for each item in the <code>TARGET_TIME_SERIES</code>
* dataset that was used to train the predictor. This is known as inference. To
* retrieve the forecast for a single item at low latency, use the operation. To
* export the complete forecast into your Amazon Simple Storage Service (Amazon S3)
* bucket, use the <a>CreateForecastExportJob</a> operation.</p> <p>The range of
* the forecast is determined by the <code>ForecastHorizon</code> value, which you
* specify in the <a>CreatePredictor</a> request. When you query a forecast, you
* can request a specific date range within the forecast.</p> <p>To get a list of
* all your forecasts, use the <a>ListForecasts</a> operation.</p> <p>The
* forecasts generated by Amazon Forecast are in the same time zone as the dataset
* that was used to create the predictor.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>The <code>Status</code> of the
* forecast must be <code>ACTIVE</code> before you can query or export the
* forecast. Use the <a>DescribeForecast</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecast">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateForecastOutcomeCallable CreateForecastCallable(const Model::CreateForecastRequest& request) const;
/**
* <p>Creates a forecast for each item in the <code>TARGET_TIME_SERIES</code>
* dataset that was used to train the predictor. This is known as inference. To
* retrieve the forecast for a single item at low latency, use the operation. To
* export the complete forecast into your Amazon Simple Storage Service (Amazon S3)
* bucket, use the <a>CreateForecastExportJob</a> operation.</p> <p>The range of
* the forecast is determined by the <code>ForecastHorizon</code> value, which you
* specify in the <a>CreatePredictor</a> request. When you query a forecast, you
* can request a specific date range within the forecast.</p> <p>To get a list of
* all your forecasts, use the <a>ListForecasts</a> operation.</p> <p>The
* forecasts generated by Amazon Forecast are in the same time zone as the dataset
* that was used to create the predictor.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>The <code>Status</code> of the
* forecast must be <code>ACTIVE</code> before you can query or export the
* forecast. Use the <a>DescribeForecast</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecast">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateForecastAsync(const Model::CreateForecastRequest& request, const CreateForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Exports a forecast created by the <a>CreateForecast</a> operation to your
* Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will
* match the following conventions:</p>
* <p><ForecastExportJobName>_<ExportTimestamp>_<PartNumber></p>
* <p>where the <ExportTimestamp> component is in Java SimpleDateFormat
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an AWS Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the Amazon S3 bucket. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>To get a list of all your forecast export
* jobs, use the <a>ListForecastExportJobs</a> operation.</p> <p>The
* <code>Status</code> of the forecast export job must be <code>ACTIVE</code>
* before you can access the forecast in your Amazon S3 bucket. To get the status,
* use the <a>DescribeForecastExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecastExportJob">AWS
* API Reference</a></p>
*/
virtual Model::CreateForecastExportJobOutcome CreateForecastExportJob(const Model::CreateForecastExportJobRequest& request) const;
/**
* <p>Exports a forecast created by the <a>CreateForecast</a> operation to your
* Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will
* match the following conventions:</p>
* <p><ForecastExportJobName>_<ExportTimestamp>_<PartNumber></p>
* <p>where the <ExportTimestamp> component is in Java SimpleDateFormat
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an AWS Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the Amazon S3 bucket. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>To get a list of all your forecast export
* jobs, use the <a>ListForecastExportJobs</a> operation.</p> <p>The
* <code>Status</code> of the forecast export job must be <code>ACTIVE</code>
* before you can access the forecast in your Amazon S3 bucket. To get the status,
* use the <a>DescribeForecastExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecastExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreateForecastExportJobOutcomeCallable CreateForecastExportJobCallable(const Model::CreateForecastExportJobRequest& request) const;
/**
* <p>Exports a forecast created by the <a>CreateForecast</a> operation to your
* Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will
* match the following conventions:</p>
* <p><ForecastExportJobName>_<ExportTimestamp>_<PartNumber></p>
* <p>where the <ExportTimestamp> component is in Java SimpleDateFormat
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an AWS Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the Amazon S3 bucket. For more information, see
* <a>aws-forecast-iam-roles</a>.</p> <p>For more information, see
* <a>howitworks-forecast</a>.</p> <p>To get a list of all your forecast export
* jobs, use the <a>ListForecastExportJobs</a> operation.</p> <p>The
* <code>Status</code> of the forecast export job must be <code>ACTIVE</code>
* before you can access the forecast in your Amazon S3 bucket. To get the status,
* use the <a>DescribeForecastExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecastExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreateForecastExportJobAsync(const Model::CreateForecastExportJobRequest& request, const CreateForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p> This operation creates a legacy predictor that does not include all
* the predictor functionalities provided by Amazon Forecast. To create a predictor
* that is compatible with all aspects of Forecast, use
* <a>CreateAutoPredictor</a>.</p> <p>Creates an Amazon Forecast
* predictor.</p> <p>In the request, provide a dataset group and either specify an
* algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If
* you specify an algorithm, you also can override algorithm-specific
* hyperparameters.</p> <p>Amazon Forecast uses the algorithm to train a predictor
* using the latest version of the datasets in the specified dataset group. You can
* then generate a forecast using the <a>CreateForecast</a> operation.</p> <p> To
* see the evaluation metrics, use the <a>GetAccuracyMetrics</a> operation. </p>
* <p>You can specify a featurization configuration to fill and aggregate the data
* fields in the <code>TARGET_TIME_SERIES</code> dataset to improve model training.
* For more information, see <a>FeaturizationConfig</a>.</p> <p>For
* RELATED_TIME_SERIES datasets, <code>CreatePredictor</code> verifies that the
* <code>DataFrequency</code> specified when the dataset was created matches the
* <code>ForecastFrequency</code>. TARGET_TIME_SERIES datasets don't have this
* restriction. Amazon Forecast also verifies the delimiter and timestamp format.
* For more information, see <a>howitworks-datasets-groups</a>.</p> <p>By default,
* predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90)
* quantiles. You can choose custom forecast types to train and evaluate your
* predictor by setting the <code>ForecastTypes</code>. </p> <p> <b>AutoML</b> </p>
* <p>If you want Amazon Forecast to evaluate each algorithm and choose the one
* that minimizes the <code>objective function</code>, set
* <code>PerformAutoML</code> to <code>true</code>. The <code>objective
* function</code> is defined as the mean of the weighted losses over the forecast
* types. By default, these are the p10, p50, and p90 quantile losses. For more
* information, see <a>EvaluationResult</a>.</p> <p>When AutoML is enabled, the
* following properties are disallowed:</p> <ul> <li> <p> <code>AlgorithmArn</code>
* </p> </li> <li> <p> <code>HPOConfig</code> </p> </li> <li> <p>
* <code>PerformHPO</code> </p> </li> <li> <p> <code>TrainingParameters</code> </p>
* </li> </ul> <p>To get a list of all of your predictors, use the
* <a>ListPredictors</a> operation.</p> <p>Before you can use the predictor
* to create a forecast, the <code>Status</code> of the predictor must be
* <code>ACTIVE</code>, signifying that training has completed. To get the status,
* use the <a>DescribePredictor</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictor">AWS
* API Reference</a></p>
*/
virtual Model::CreatePredictorOutcome CreatePredictor(const Model::CreatePredictorRequest& request) const;
/**
* <p> This operation creates a legacy predictor that does not include all
* the predictor functionalities provided by Amazon Forecast. To create a predictor
* that is compatible with all aspects of Forecast, use
* <a>CreateAutoPredictor</a>.</p> <p>Creates an Amazon Forecast
* predictor.</p> <p>In the request, provide a dataset group and either specify an
* algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If
* you specify an algorithm, you also can override algorithm-specific
* hyperparameters.</p> <p>Amazon Forecast uses the algorithm to train a predictor
* using the latest version of the datasets in the specified dataset group. You can
* then generate a forecast using the <a>CreateForecast</a> operation.</p> <p> To
* see the evaluation metrics, use the <a>GetAccuracyMetrics</a> operation. </p>
* <p>You can specify a featurization configuration to fill and aggregate the data
* fields in the <code>TARGET_TIME_SERIES</code> dataset to improve model training.
* For more information, see <a>FeaturizationConfig</a>.</p> <p>For
* RELATED_TIME_SERIES datasets, <code>CreatePredictor</code> verifies that the
* <code>DataFrequency</code> specified when the dataset was created matches the
* <code>ForecastFrequency</code>. TARGET_TIME_SERIES datasets don't have this
* restriction. Amazon Forecast also verifies the delimiter and timestamp format.
* For more information, see <a>howitworks-datasets-groups</a>.</p> <p>By default,
* predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90)
* quantiles. You can choose custom forecast types to train and evaluate your
* predictor by setting the <code>ForecastTypes</code>. </p> <p> <b>AutoML</b> </p>
* <p>If you want Amazon Forecast to evaluate each algorithm and choose the one
* that minimizes the <code>objective function</code>, set
* <code>PerformAutoML</code> to <code>true</code>. The <code>objective
* function</code> is defined as the mean of the weighted losses over the forecast
* types. By default, these are the p10, p50, and p90 quantile losses. For more
* information, see <a>EvaluationResult</a>.</p> <p>When AutoML is enabled, the
* following properties are disallowed:</p> <ul> <li> <p> <code>AlgorithmArn</code>
* </p> </li> <li> <p> <code>HPOConfig</code> </p> </li> <li> <p>
* <code>PerformHPO</code> </p> </li> <li> <p> <code>TrainingParameters</code> </p>
* </li> </ul> <p>To get a list of all of your predictors, use the
* <a>ListPredictors</a> operation.</p> <p>Before you can use the predictor
* to create a forecast, the <code>Status</code> of the predictor must be
* <code>ACTIVE</code>, signifying that training has completed. To get the status,
* use the <a>DescribePredictor</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictor">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreatePredictorOutcomeCallable CreatePredictorCallable(const Model::CreatePredictorRequest& request) const;
/**
* <p> This operation creates a legacy predictor that does not include all
* the predictor functionalities provided by Amazon Forecast. To create a predictor
* that is compatible with all aspects of Forecast, use
* <a>CreateAutoPredictor</a>.</p> <p>Creates an Amazon Forecast
* predictor.</p> <p>In the request, provide a dataset group and either specify an
* algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If
* you specify an algorithm, you also can override algorithm-specific
* hyperparameters.</p> <p>Amazon Forecast uses the algorithm to train a predictor
* using the latest version of the datasets in the specified dataset group. You can
* then generate a forecast using the <a>CreateForecast</a> operation.</p> <p> To
* see the evaluation metrics, use the <a>GetAccuracyMetrics</a> operation. </p>
* <p>You can specify a featurization configuration to fill and aggregate the data
* fields in the <code>TARGET_TIME_SERIES</code> dataset to improve model training.
* For more information, see <a>FeaturizationConfig</a>.</p> <p>For
* RELATED_TIME_SERIES datasets, <code>CreatePredictor</code> verifies that the
* <code>DataFrequency</code> specified when the dataset was created matches the
* <code>ForecastFrequency</code>. TARGET_TIME_SERIES datasets don't have this
* restriction. Amazon Forecast also verifies the delimiter and timestamp format.
* For more information, see <a>howitworks-datasets-groups</a>.</p> <p>By default,
* predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90)
* quantiles. You can choose custom forecast types to train and evaluate your
* predictor by setting the <code>ForecastTypes</code>. </p> <p> <b>AutoML</b> </p>
* <p>If you want Amazon Forecast to evaluate each algorithm and choose the one
* that minimizes the <code>objective function</code>, set
* <code>PerformAutoML</code> to <code>true</code>. The <code>objective
* function</code> is defined as the mean of the weighted losses over the forecast
* types. By default, these are the p10, p50, and p90 quantile losses. For more
* information, see <a>EvaluationResult</a>.</p> <p>When AutoML is enabled, the
* following properties are disallowed:</p> <ul> <li> <p> <code>AlgorithmArn</code>
* </p> </li> <li> <p> <code>HPOConfig</code> </p> </li> <li> <p>
* <code>PerformHPO</code> </p> </li> <li> <p> <code>TrainingParameters</code> </p>
* </li> </ul> <p>To get a list of all of your predictors, use the
* <a>ListPredictors</a> operation.</p> <p>Before you can use the predictor
* to create a forecast, the <code>Status</code> of the predictor must be
* <code>ACTIVE</code>, signifying that training has completed. To get the status,
* use the <a>DescribePredictor</a> operation.</p> <p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictor">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreatePredictorAsync(const Model::CreatePredictorRequest& request, const CreatePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Exports backtest forecasts and accuracy metrics generated by the
* <a>CreateAutoPredictor</a> or <a>CreatePredictor</a> operations. Two folders
* containing CSV files are exported to your specified S3 bucket.</p> <p> The
* export file names will match the following conventions:</p> <p>
* <code><ExportJobName>_<ExportTimestamp>_<PartNumber>.csv</code>
* </p> <p>The <ExportTimestamp> component is in Java SimpleDate format
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an Amazon S3 bucket and an AWS Identity and Access Management
* (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For
* more information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribePredictorBacktestExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictorBacktestExportJob">AWS
* API Reference</a></p>
*/
virtual Model::CreatePredictorBacktestExportJobOutcome CreatePredictorBacktestExportJob(const Model::CreatePredictorBacktestExportJobRequest& request) const;
/**
* <p>Exports backtest forecasts and accuracy metrics generated by the
* <a>CreateAutoPredictor</a> or <a>CreatePredictor</a> operations. Two folders
* containing CSV files are exported to your specified S3 bucket.</p> <p> The
* export file names will match the following conventions:</p> <p>
* <code><ExportJobName>_<ExportTimestamp>_<PartNumber>.csv</code>
* </p> <p>The <ExportTimestamp> component is in Java SimpleDate format
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an Amazon S3 bucket and an AWS Identity and Access Management
* (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For
* more information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribePredictorBacktestExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::CreatePredictorBacktestExportJobOutcomeCallable CreatePredictorBacktestExportJobCallable(const Model::CreatePredictorBacktestExportJobRequest& request) const;
/**
* <p>Exports backtest forecasts and accuracy metrics generated by the
* <a>CreateAutoPredictor</a> or <a>CreatePredictor</a> operations. Two folders
* containing CSV files are exported to your specified S3 bucket.</p> <p> The
* export file names will match the following conventions:</p> <p>
* <code><ExportJobName>_<ExportTimestamp>_<PartNumber>.csv</code>
* </p> <p>The <ExportTimestamp> component is in Java SimpleDate format
* (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object
* that includes an Amazon S3 bucket and an AWS Identity and Access Management
* (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For
* more information, see <a>aws-forecast-iam-roles</a>.</p> <p>The
* <code>Status</code> of the export job must be <code>ACTIVE</code> before you can
* access the export in your Amazon S3 bucket. To get the status, use the
* <a>DescribePredictorBacktestExportJob</a> operation.</p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void CreatePredictorBacktestExportJobAsync(const Model::CreatePredictorBacktestExportJobRequest& request, const CreatePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes an Amazon Forecast dataset that was created using the
* <a>CreateDataset</a> operation. You can only delete datasets that have a status
* of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status use the
* <a>DescribeDataset</a> operation.</p> <p>Forecast does not automatically
* update any dataset groups that contain the deleted dataset. In order to update
* the dataset group, use the operation, omitting the deleted dataset's ARN.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDataset">AWS
* API Reference</a></p>
*/
virtual Model::DeleteDatasetOutcome DeleteDataset(const Model::DeleteDatasetRequest& request) const;
/**
* <p>Deletes an Amazon Forecast dataset that was created using the
* <a>CreateDataset</a> operation. You can only delete datasets that have a status
* of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status use the
* <a>DescribeDataset</a> operation.</p> <p>Forecast does not automatically
* update any dataset groups that contain the deleted dataset. In order to update
* the dataset group, use the operation, omitting the deleted dataset's ARN.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDataset">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteDatasetOutcomeCallable DeleteDatasetCallable(const Model::DeleteDatasetRequest& request) const;
/**
* <p>Deletes an Amazon Forecast dataset that was created using the
* <a>CreateDataset</a> operation. You can only delete datasets that have a status
* of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status use the
* <a>DescribeDataset</a> operation.</p> <p>Forecast does not automatically
* update any dataset groups that contain the deleted dataset. In order to update
* the dataset group, use the operation, omitting the deleted dataset's ARN.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDataset">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteDatasetAsync(const Model::DeleteDatasetRequest& request, const DeleteDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a dataset group created using the <a>CreateDatasetGroup</a>
* operation. You can only delete dataset groups that have a status of
* <code>ACTIVE</code>, <code>CREATE_FAILED</code>, or <code>UPDATE_FAILED</code>.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> <p>This
* operation deletes only the dataset group, not the datasets in the
* group.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetGroup">AWS
* API Reference</a></p>
*/
virtual Model::DeleteDatasetGroupOutcome DeleteDatasetGroup(const Model::DeleteDatasetGroupRequest& request) const;
/**
* <p>Deletes a dataset group created using the <a>CreateDatasetGroup</a>
* operation. You can only delete dataset groups that have a status of
* <code>ACTIVE</code>, <code>CREATE_FAILED</code>, or <code>UPDATE_FAILED</code>.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> <p>This
* operation deletes only the dataset group, not the datasets in the
* group.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetGroup">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteDatasetGroupOutcomeCallable DeleteDatasetGroupCallable(const Model::DeleteDatasetGroupRequest& request) const;
/**
* <p>Deletes a dataset group created using the <a>CreateDatasetGroup</a>
* operation. You can only delete dataset groups that have a status of
* <code>ACTIVE</code>, <code>CREATE_FAILED</code>, or <code>UPDATE_FAILED</code>.
* To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> <p>This
* operation deletes only the dataset group, not the datasets in the
* group.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetGroup">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteDatasetGroupAsync(const Model::DeleteDatasetGroupRequest& request, const DeleteDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a dataset import job created using the <a>CreateDatasetImportJob</a>
* operation. You can delete only dataset import jobs that have a status of
* <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the
* <a>DescribeDatasetImportJob</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetImportJob">AWS
* API Reference</a></p>
*/
virtual Model::DeleteDatasetImportJobOutcome DeleteDatasetImportJob(const Model::DeleteDatasetImportJobRequest& request) const;
/**
* <p>Deletes a dataset import job created using the <a>CreateDatasetImportJob</a>
* operation. You can delete only dataset import jobs that have a status of
* <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the
* <a>DescribeDatasetImportJob</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetImportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteDatasetImportJobOutcomeCallable DeleteDatasetImportJobCallable(const Model::DeleteDatasetImportJobRequest& request) const;
/**
* <p>Deletes a dataset import job created using the <a>CreateDatasetImportJob</a>
* operation. You can delete only dataset import jobs that have a status of
* <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the
* <a>DescribeDatasetImportJob</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetImportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteDatasetImportJobAsync(const Model::DeleteDatasetImportJobRequest& request, const DeleteDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes an Explainability resource.</p> <p>You can delete only predictor that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeExplainability</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainability">AWS
* API Reference</a></p>
*/
virtual Model::DeleteExplainabilityOutcome DeleteExplainability(const Model::DeleteExplainabilityRequest& request) const;
/**
* <p>Deletes an Explainability resource.</p> <p>You can delete only predictor that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeExplainability</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainability">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteExplainabilityOutcomeCallable DeleteExplainabilityCallable(const Model::DeleteExplainabilityRequest& request) const;
/**
* <p>Deletes an Explainability resource.</p> <p>You can delete only predictor that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeExplainability</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainability">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteExplainabilityAsync(const Model::DeleteExplainabilityRequest& request, const DeleteExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes an Explainability export.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainabilityExport">AWS
* API Reference</a></p>
*/
virtual Model::DeleteExplainabilityExportOutcome DeleteExplainabilityExport(const Model::DeleteExplainabilityExportRequest& request) const;
/**
* <p>Deletes an Explainability export.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainabilityExport">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteExplainabilityExportOutcomeCallable DeleteExplainabilityExportCallable(const Model::DeleteExplainabilityExportRequest& request) const;
/**
* <p>Deletes an Explainability export.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteExplainabilityExport">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteExplainabilityExportAsync(const Model::DeleteExplainabilityExportRequest& request, const DeleteExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a forecast created using the <a>CreateForecast</a> operation. You can
* delete only forecasts that have a status of <code>ACTIVE</code> or
* <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecast</a>
* operation.</p> <p>You can't delete a forecast while it is being exported. After
* a forecast is deleted, you can no longer query the forecast.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecast">AWS
* API Reference</a></p>
*/
virtual Model::DeleteForecastOutcome DeleteForecast(const Model::DeleteForecastRequest& request) const;
/**
* <p>Deletes a forecast created using the <a>CreateForecast</a> operation. You can
* delete only forecasts that have a status of <code>ACTIVE</code> or
* <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecast</a>
* operation.</p> <p>You can't delete a forecast while it is being exported. After
* a forecast is deleted, you can no longer query the forecast.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecast">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteForecastOutcomeCallable DeleteForecastCallable(const Model::DeleteForecastRequest& request) const;
/**
* <p>Deletes a forecast created using the <a>CreateForecast</a> operation. You can
* delete only forecasts that have a status of <code>ACTIVE</code> or
* <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecast</a>
* operation.</p> <p>You can't delete a forecast while it is being exported. After
* a forecast is deleted, you can no longer query the forecast.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecast">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteForecastAsync(const Model::DeleteForecastRequest& request, const DeleteForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation. You can delete only export jobs that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeForecastExportJob</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecastExportJob">AWS
* API Reference</a></p>
*/
virtual Model::DeleteForecastExportJobOutcome DeleteForecastExportJob(const Model::DeleteForecastExportJobRequest& request) const;
/**
* <p>Deletes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation. You can delete only export jobs that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeForecastExportJob</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecastExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteForecastExportJobOutcomeCallable DeleteForecastExportJobCallable(const Model::DeleteForecastExportJobRequest& request) const;
/**
* <p>Deletes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation. You can delete only export jobs that
* have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the
* status, use the <a>DescribeForecastExportJob</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecastExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteForecastExportJobAsync(const Model::DeleteForecastExportJobRequest& request, const DeleteForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a predictor created using the <a>DescribePredictor</a> or
* <a>CreatePredictor</a> operations. You can delete only predictor that have a
* status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status,
* use the <a>DescribePredictor</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictor">AWS
* API Reference</a></p>
*/
virtual Model::DeletePredictorOutcome DeletePredictor(const Model::DeletePredictorRequest& request) const;
/**
* <p>Deletes a predictor created using the <a>DescribePredictor</a> or
* <a>CreatePredictor</a> operations. You can delete only predictor that have a
* status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status,
* use the <a>DescribePredictor</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictor">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeletePredictorOutcomeCallable DeletePredictorCallable(const Model::DeletePredictorRequest& request) const;
/**
* <p>Deletes a predictor created using the <a>DescribePredictor</a> or
* <a>CreatePredictor</a> operations. You can delete only predictor that have a
* status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status,
* use the <a>DescribePredictor</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictor">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeletePredictorAsync(const Model::DeletePredictorRequest& request, const DeletePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes a predictor backtest export job.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictorBacktestExportJob">AWS
* API Reference</a></p>
*/
virtual Model::DeletePredictorBacktestExportJobOutcome DeletePredictorBacktestExportJob(const Model::DeletePredictorBacktestExportJobRequest& request) const;
/**
* <p>Deletes a predictor backtest export job.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeletePredictorBacktestExportJobOutcomeCallable DeletePredictorBacktestExportJobCallable(const Model::DeletePredictorBacktestExportJobRequest& request) const;
/**
* <p>Deletes a predictor backtest export job.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeletePredictorBacktestExportJobAsync(const Model::DeletePredictorBacktestExportJobRequest& request, const DeletePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes an entire resource tree. This operation will delete the parent
* resource and its child resources.</p> <p>Child resources are resources that were
* created from another resource. For example, when a forecast is generated from a
* predictor, the forecast is the child resource and the predictor is the parent
* resource.</p> <p>Amazon Forecast resources possess the following parent-child
* resource hierarchies:</p> <ul> <li> <p> <b>Dataset</b>: dataset import jobs</p>
* </li> <li> <p> <b>Dataset Group</b>: predictors, predictor backtest export jobs,
* forecasts, forecast export jobs</p> </li> <li> <p> <b>Predictor</b>: predictor
* backtest export jobs, forecasts, forecast export jobs</p> </li> <li> <p>
* <b>Forecast</b>: forecast export jobs</p> </li> </ul> <p>
* <code>DeleteResourceTree</code> will only delete Amazon Forecast resources, and
* will not delete datasets or exported files stored in Amazon S3. </p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteResourceTree">AWS
* API Reference</a></p>
*/
virtual Model::DeleteResourceTreeOutcome DeleteResourceTree(const Model::DeleteResourceTreeRequest& request) const;
/**
* <p>Deletes an entire resource tree. This operation will delete the parent
* resource and its child resources.</p> <p>Child resources are resources that were
* created from another resource. For example, when a forecast is generated from a
* predictor, the forecast is the child resource and the predictor is the parent
* resource.</p> <p>Amazon Forecast resources possess the following parent-child
* resource hierarchies:</p> <ul> <li> <p> <b>Dataset</b>: dataset import jobs</p>
* </li> <li> <p> <b>Dataset Group</b>: predictors, predictor backtest export jobs,
* forecasts, forecast export jobs</p> </li> <li> <p> <b>Predictor</b>: predictor
* backtest export jobs, forecasts, forecast export jobs</p> </li> <li> <p>
* <b>Forecast</b>: forecast export jobs</p> </li> </ul> <p>
* <code>DeleteResourceTree</code> will only delete Amazon Forecast resources, and
* will not delete datasets or exported files stored in Amazon S3. </p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteResourceTree">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DeleteResourceTreeOutcomeCallable DeleteResourceTreeCallable(const Model::DeleteResourceTreeRequest& request) const;
/**
* <p>Deletes an entire resource tree. This operation will delete the parent
* resource and its child resources.</p> <p>Child resources are resources that were
* created from another resource. For example, when a forecast is generated from a
* predictor, the forecast is the child resource and the predictor is the parent
* resource.</p> <p>Amazon Forecast resources possess the following parent-child
* resource hierarchies:</p> <ul> <li> <p> <b>Dataset</b>: dataset import jobs</p>
* </li> <li> <p> <b>Dataset Group</b>: predictors, predictor backtest export jobs,
* forecasts, forecast export jobs</p> </li> <li> <p> <b>Predictor</b>: predictor
* backtest export jobs, forecasts, forecast export jobs</p> </li> <li> <p>
* <b>Forecast</b>: forecast export jobs</p> </li> </ul> <p>
* <code>DeleteResourceTree</code> will only delete Amazon Forecast resources, and
* will not delete datasets or exported files stored in Amazon S3. </p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteResourceTree">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DeleteResourceTreeAsync(const Model::DeleteResourceTreeRequest& request, const DeleteResourceTreeResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a predictor created using the CreateAutoPredictor
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeAutoPredictor">AWS
* API Reference</a></p>
*/
virtual Model::DescribeAutoPredictorOutcome DescribeAutoPredictor(const Model::DescribeAutoPredictorRequest& request) const;
/**
* <p>Describes a predictor created using the CreateAutoPredictor
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeAutoPredictor">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeAutoPredictorOutcomeCallable DescribeAutoPredictorCallable(const Model::DescribeAutoPredictorRequest& request) const;
/**
* <p>Describes a predictor created using the CreateAutoPredictor
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeAutoPredictor">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeAutoPredictorAsync(const Model::DescribeAutoPredictorRequest& request, const DescribeAutoPredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes an Amazon Forecast dataset created using the <a>CreateDataset</a>
* operation.</p> <p>In addition to listing the parameters specified in the
* <code>CreateDataset</code> request, this operation includes the following
* dataset properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li>
* <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDataset">AWS
* API Reference</a></p>
*/
virtual Model::DescribeDatasetOutcome DescribeDataset(const Model::DescribeDatasetRequest& request) const;
/**
* <p>Describes an Amazon Forecast dataset created using the <a>CreateDataset</a>
* operation.</p> <p>In addition to listing the parameters specified in the
* <code>CreateDataset</code> request, this operation includes the following
* dataset properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li>
* <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDataset">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeDatasetOutcomeCallable DescribeDatasetCallable(const Model::DescribeDatasetRequest& request) const;
/**
* <p>Describes an Amazon Forecast dataset created using the <a>CreateDataset</a>
* operation.</p> <p>In addition to listing the parameters specified in the
* <code>CreateDataset</code> request, this operation includes the following
* dataset properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li>
* <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDataset">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeDatasetAsync(const Model::DescribeDatasetRequest& request, const DescribeDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a dataset group created using the <a>CreateDatasetGroup</a>
* operation.</p> <p>In addition to listing the parameters provided in the
* <code>CreateDatasetGroup</code> request, this operation includes the following
* properties:</p> <ul> <li> <p> <code>DatasetArns</code> - The datasets belonging
* to the group.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p>
* <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p>
* </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroup">AWS
* API Reference</a></p>
*/
virtual Model::DescribeDatasetGroupOutcome DescribeDatasetGroup(const Model::DescribeDatasetGroupRequest& request) const;
/**
* <p>Describes a dataset group created using the <a>CreateDatasetGroup</a>
* operation.</p> <p>In addition to listing the parameters provided in the
* <code>CreateDatasetGroup</code> request, this operation includes the following
* properties:</p> <ul> <li> <p> <code>DatasetArns</code> - The datasets belonging
* to the group.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p>
* <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p>
* </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroup">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeDatasetGroupOutcomeCallable DescribeDatasetGroupCallable(const Model::DescribeDatasetGroupRequest& request) const;
/**
* <p>Describes a dataset group created using the <a>CreateDatasetGroup</a>
* operation.</p> <p>In addition to listing the parameters provided in the
* <code>CreateDatasetGroup</code> request, this operation includes the following
* properties:</p> <ul> <li> <p> <code>DatasetArns</code> - The datasets belonging
* to the group.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p>
* <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p>
* </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroup">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeDatasetGroupAsync(const Model::DescribeDatasetGroupRequest& request, const DescribeDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a dataset import job created using the
* <a>CreateDatasetImportJob</a> operation.</p> <p>In addition to listing the
* parameters provided in the <code>CreateDatasetImportJob</code> request, this
* operation includes the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>DataSize</code> </p> </li> <li> <p>
* <code>FieldStatistics</code> </p> </li> <li> <p> <code>Status</code> </p> </li>
* <li> <p> <code>Message</code> - If an error occurred, information about the
* error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetImportJob">AWS
* API Reference</a></p>
*/
virtual Model::DescribeDatasetImportJobOutcome DescribeDatasetImportJob(const Model::DescribeDatasetImportJobRequest& request) const;
/**
* <p>Describes a dataset import job created using the
* <a>CreateDatasetImportJob</a> operation.</p> <p>In addition to listing the
* parameters provided in the <code>CreateDatasetImportJob</code> request, this
* operation includes the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>DataSize</code> </p> </li> <li> <p>
* <code>FieldStatistics</code> </p> </li> <li> <p> <code>Status</code> </p> </li>
* <li> <p> <code>Message</code> - If an error occurred, information about the
* error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetImportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeDatasetImportJobOutcomeCallable DescribeDatasetImportJobCallable(const Model::DescribeDatasetImportJobRequest& request) const;
/**
* <p>Describes a dataset import job created using the
* <a>CreateDatasetImportJob</a> operation.</p> <p>In addition to listing the
* parameters provided in the <code>CreateDatasetImportJob</code> request, this
* operation includes the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>DataSize</code> </p> </li> <li> <p>
* <code>FieldStatistics</code> </p> </li> <li> <p> <code>Status</code> </p> </li>
* <li> <p> <code>Message</code> - If an error occurred, information about the
* error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetImportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeDatasetImportJobAsync(const Model::DescribeDatasetImportJobRequest& request, const DescribeDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes an Explainability resource created using the
* <a>CreateExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainability">AWS
* API Reference</a></p>
*/
virtual Model::DescribeExplainabilityOutcome DescribeExplainability(const Model::DescribeExplainabilityRequest& request) const;
/**
* <p>Describes an Explainability resource created using the
* <a>CreateExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainability">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeExplainabilityOutcomeCallable DescribeExplainabilityCallable(const Model::DescribeExplainabilityRequest& request) const;
/**
* <p>Describes an Explainability resource created using the
* <a>CreateExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainability">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeExplainabilityAsync(const Model::DescribeExplainabilityRequest& request, const DescribeExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes an Explainability export created using the
* <a>CreateExplainabilityExport</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainabilityExport">AWS
* API Reference</a></p>
*/
virtual Model::DescribeExplainabilityExportOutcome DescribeExplainabilityExport(const Model::DescribeExplainabilityExportRequest& request) const;
/**
* <p>Describes an Explainability export created using the
* <a>CreateExplainabilityExport</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainabilityExport">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeExplainabilityExportOutcomeCallable DescribeExplainabilityExportCallable(const Model::DescribeExplainabilityExportRequest& request) const;
/**
* <p>Describes an Explainability export created using the
* <a>CreateExplainabilityExport</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeExplainabilityExport">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeExplainabilityExportAsync(const Model::DescribeExplainabilityExportRequest& request, const DescribeExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a forecast created using the <a>CreateForecast</a> operation.</p>
* <p>In addition to listing the properties provided in the
* <code>CreateForecast</code> request, this operation lists the following
* properties:</p> <ul> <li> <p> <code>DatasetGroupArn</code> - The dataset group
* that provided the training data.</p> </li> <li> <p> <code>CreationTime</code>
* </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error
* occurred, information about the error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecast">AWS
* API Reference</a></p>
*/
virtual Model::DescribeForecastOutcome DescribeForecast(const Model::DescribeForecastRequest& request) const;
/**
* <p>Describes a forecast created using the <a>CreateForecast</a> operation.</p>
* <p>In addition to listing the properties provided in the
* <code>CreateForecast</code> request, this operation lists the following
* properties:</p> <ul> <li> <p> <code>DatasetGroupArn</code> - The dataset group
* that provided the training data.</p> </li> <li> <p> <code>CreationTime</code>
* </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error
* occurred, information about the error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecast">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeForecastOutcomeCallable DescribeForecastCallable(const Model::DescribeForecastRequest& request) const;
/**
* <p>Describes a forecast created using the <a>CreateForecast</a> operation.</p>
* <p>In addition to listing the properties provided in the
* <code>CreateForecast</code> request, this operation lists the following
* properties:</p> <ul> <li> <p> <code>DatasetGroupArn</code> - The dataset group
* that provided the training data.</p> </li> <li> <p> <code>CreationTime</code>
* </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error
* occurred, information about the error.</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecast">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeForecastAsync(const Model::DescribeForecastRequest& request, const DescribeForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation.</p> <p>In addition to listing the
* properties provided by the user in the <code>CreateForecastExportJob</code>
* request, this operation lists the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecastExportJob">AWS
* API Reference</a></p>
*/
virtual Model::DescribeForecastExportJobOutcome DescribeForecastExportJob(const Model::DescribeForecastExportJobRequest& request) const;
/**
* <p>Describes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation.</p> <p>In addition to listing the
* properties provided by the user in the <code>CreateForecastExportJob</code>
* request, this operation lists the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecastExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribeForecastExportJobOutcomeCallable DescribeForecastExportJobCallable(const Model::DescribeForecastExportJobRequest& request) const;
/**
* <p>Describes a forecast export job created using the
* <a>CreateForecastExportJob</a> operation.</p> <p>In addition to listing the
* properties provided by the user in the <code>CreateForecastExportJob</code>
* request, this operation lists the following properties:</p> <ul> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecastExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribeForecastExportJobAsync(const Model::DescribeForecastExportJobRequest& request, const DescribeForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p> This operation is only valid for legacy predictors created with
* CreatePredictor. If you are not using a legacy predictor, use
* <a>DescribeAutoPredictor</a>.</p> <p>Describes a predictor created using
* the <a>CreatePredictor</a> operation.</p> <p>In addition to listing the
* properties provided in the <code>CreatePredictor</code> request, this operation
* lists the following properties:</p> <ul> <li> <p>
* <code>DatasetImportJobArns</code> - The dataset import jobs used to import
* training data.</p> </li> <li> <p> <code>AutoMLAlgorithmArns</code> - If AutoML
* is performed, the algorithms that were evaluated.</p> </li> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictor">AWS
* API Reference</a></p>
*/
virtual Model::DescribePredictorOutcome DescribePredictor(const Model::DescribePredictorRequest& request) const;
/**
* <p> This operation is only valid for legacy predictors created with
* CreatePredictor. If you are not using a legacy predictor, use
* <a>DescribeAutoPredictor</a>.</p> <p>Describes a predictor created using
* the <a>CreatePredictor</a> operation.</p> <p>In addition to listing the
* properties provided in the <code>CreatePredictor</code> request, this operation
* lists the following properties:</p> <ul> <li> <p>
* <code>DatasetImportJobArns</code> - The dataset import jobs used to import
* training data.</p> </li> <li> <p> <code>AutoMLAlgorithmArns</code> - If AutoML
* is performed, the algorithms that were evaluated.</p> </li> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictor">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribePredictorOutcomeCallable DescribePredictorCallable(const Model::DescribePredictorRequest& request) const;
/**
* <p> This operation is only valid for legacy predictors created with
* CreatePredictor. If you are not using a legacy predictor, use
* <a>DescribeAutoPredictor</a>.</p> <p>Describes a predictor created using
* the <a>CreatePredictor</a> operation.</p> <p>In addition to listing the
* properties provided in the <code>CreatePredictor</code> request, this operation
* lists the following properties:</p> <ul> <li> <p>
* <code>DatasetImportJobArns</code> - The dataset import jobs used to import
* training data.</p> </li> <li> <p> <code>AutoMLAlgorithmArns</code> - If AutoML
* is performed, the algorithms that were evaluated.</p> </li> <li> <p>
* <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code>
* </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code>
* - If an error occurred, information about the error.</p> </li> </ul><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictor">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribePredictorAsync(const Model::DescribePredictorRequest& request, const DescribePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Describes a predictor backtest export job created using the
* <a>CreatePredictorBacktestExportJob</a> operation.</p> <p>In addition to listing
* the properties provided by the user in the
* <code>CreatePredictorBacktestExportJob</code> request, this operation lists the
* following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li>
* <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> (if an error
* occurred)</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictorBacktestExportJob">AWS
* API Reference</a></p>
*/
virtual Model::DescribePredictorBacktestExportJobOutcome DescribePredictorBacktestExportJob(const Model::DescribePredictorBacktestExportJobRequest& request) const;
/**
* <p>Describes a predictor backtest export job created using the
* <a>CreatePredictorBacktestExportJob</a> operation.</p> <p>In addition to listing
* the properties provided by the user in the
* <code>CreatePredictorBacktestExportJob</code> request, this operation lists the
* following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li>
* <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> (if an error
* occurred)</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::DescribePredictorBacktestExportJobOutcomeCallable DescribePredictorBacktestExportJobCallable(const Model::DescribePredictorBacktestExportJobRequest& request) const;
/**
* <p>Describes a predictor backtest export job created using the
* <a>CreatePredictorBacktestExportJob</a> operation.</p> <p>In addition to listing
* the properties provided by the user in the
* <code>CreatePredictorBacktestExportJob</code> request, this operation lists the
* following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li>
* <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p>
* <code>Status</code> </p> </li> <li> <p> <code>Message</code> (if an error
* occurred)</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictorBacktestExportJob">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void DescribePredictorBacktestExportJobAsync(const Model::DescribePredictorBacktestExportJobRequest& request, const DescribePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Provides metrics on the accuracy of the models that were trained by the
* <a>CreatePredictor</a> operation. Use metrics to see how well the model
* performed and to decide whether to use the predictor to generate a forecast. For
* more information, see <a
* href="https://docs.aws.amazon.com/forecast/latest/dg/metrics.html">Predictor
* Metrics</a>.</p> <p>This operation generates metrics for each backtest window
* that was evaluated. The number of backtest windows
* (<code>NumberOfBacktestWindows</code>) is specified using the
* <a>EvaluationParameters</a> object, which is optionally included in the
* <code>CreatePredictor</code> request. If <code>NumberOfBacktestWindows</code>
* isn't specified, the number defaults to one.</p> <p>The parameters of the
* <code>filling</code> method determine which items contribute to the metrics. If
* you want all items to contribute, specify <code>zero</code>. If you want only
* those items that have complete data in the range being evaluated to contribute,
* specify <code>nan</code>. For more information, see
* <a>FeaturizationMethod</a>.</p> <p>Before you can get accuracy metrics,
* the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying
* that training has completed. To get the status, use the <a>DescribePredictor</a>
* operation.</p> <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/GetAccuracyMetrics">AWS
* API Reference</a></p>
*/
virtual Model::GetAccuracyMetricsOutcome GetAccuracyMetrics(const Model::GetAccuracyMetricsRequest& request) const;
/**
* <p>Provides metrics on the accuracy of the models that were trained by the
* <a>CreatePredictor</a> operation. Use metrics to see how well the model
* performed and to decide whether to use the predictor to generate a forecast. For
* more information, see <a
* href="https://docs.aws.amazon.com/forecast/latest/dg/metrics.html">Predictor
* Metrics</a>.</p> <p>This operation generates metrics for each backtest window
* that was evaluated. The number of backtest windows
* (<code>NumberOfBacktestWindows</code>) is specified using the
* <a>EvaluationParameters</a> object, which is optionally included in the
* <code>CreatePredictor</code> request. If <code>NumberOfBacktestWindows</code>
* isn't specified, the number defaults to one.</p> <p>The parameters of the
* <code>filling</code> method determine which items contribute to the metrics. If
* you want all items to contribute, specify <code>zero</code>. If you want only
* those items that have complete data in the range being evaluated to contribute,
* specify <code>nan</code>. For more information, see
* <a>FeaturizationMethod</a>.</p> <p>Before you can get accuracy metrics,
* the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying
* that training has completed. To get the status, use the <a>DescribePredictor</a>
* operation.</p> <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/GetAccuracyMetrics">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::GetAccuracyMetricsOutcomeCallable GetAccuracyMetricsCallable(const Model::GetAccuracyMetricsRequest& request) const;
/**
* <p>Provides metrics on the accuracy of the models that were trained by the
* <a>CreatePredictor</a> operation. Use metrics to see how well the model
* performed and to decide whether to use the predictor to generate a forecast. For
* more information, see <a
* href="https://docs.aws.amazon.com/forecast/latest/dg/metrics.html">Predictor
* Metrics</a>.</p> <p>This operation generates metrics for each backtest window
* that was evaluated. The number of backtest windows
* (<code>NumberOfBacktestWindows</code>) is specified using the
* <a>EvaluationParameters</a> object, which is optionally included in the
* <code>CreatePredictor</code> request. If <code>NumberOfBacktestWindows</code>
* isn't specified, the number defaults to one.</p> <p>The parameters of the
* <code>filling</code> method determine which items contribute to the metrics. If
* you want all items to contribute, specify <code>zero</code>. If you want only
* those items that have complete data in the range being evaluated to contribute,
* specify <code>nan</code>. For more information, see
* <a>FeaturizationMethod</a>.</p> <p>Before you can get accuracy metrics,
* the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying
* that training has completed. To get the status, use the <a>DescribePredictor</a>
* operation.</p> <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/GetAccuracyMetrics">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void GetAccuracyMetricsAsync(const Model::GetAccuracyMetricsRequest& request, const GetAccuracyMetricsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of dataset groups created using the <a>CreateDatasetGroup</a>
* operation. For each dataset group, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). You can retrieve the
* complete set of properties by using the dataset group ARN with the
* <a>DescribeDatasetGroup</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetGroups">AWS
* API Reference</a></p>
*/
virtual Model::ListDatasetGroupsOutcome ListDatasetGroups(const Model::ListDatasetGroupsRequest& request) const;
/**
* <p>Returns a list of dataset groups created using the <a>CreateDatasetGroup</a>
* operation. For each dataset group, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). You can retrieve the
* complete set of properties by using the dataset group ARN with the
* <a>DescribeDatasetGroup</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetGroups">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListDatasetGroupsOutcomeCallable ListDatasetGroupsCallable(const Model::ListDatasetGroupsRequest& request) const;
/**
* <p>Returns a list of dataset groups created using the <a>CreateDatasetGroup</a>
* operation. For each dataset group, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). You can retrieve the
* complete set of properties by using the dataset group ARN with the
* <a>DescribeDatasetGroup</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetGroups">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListDatasetGroupsAsync(const Model::ListDatasetGroupsRequest& request, const ListDatasetGroupsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of dataset import jobs created using the
* <a>CreateDatasetImportJob</a> operation. For each import job, this operation
* returns a summary of its properties, including its Amazon Resource Name (ARN).
* You can retrieve the complete set of properties by using the ARN with the
* <a>DescribeDatasetImportJob</a> operation. You can filter the list by providing
* an array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetImportJobs">AWS
* API Reference</a></p>
*/
virtual Model::ListDatasetImportJobsOutcome ListDatasetImportJobs(const Model::ListDatasetImportJobsRequest& request) const;
/**
* <p>Returns a list of dataset import jobs created using the
* <a>CreateDatasetImportJob</a> operation. For each import job, this operation
* returns a summary of its properties, including its Amazon Resource Name (ARN).
* You can retrieve the complete set of properties by using the ARN with the
* <a>DescribeDatasetImportJob</a> operation. You can filter the list by providing
* an array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetImportJobs">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListDatasetImportJobsOutcomeCallable ListDatasetImportJobsCallable(const Model::ListDatasetImportJobsRequest& request) const;
/**
* <p>Returns a list of dataset import jobs created using the
* <a>CreateDatasetImportJob</a> operation. For each import job, this operation
* returns a summary of its properties, including its Amazon Resource Name (ARN).
* You can retrieve the complete set of properties by using the ARN with the
* <a>DescribeDatasetImportJob</a> operation. You can filter the list by providing
* an array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetImportJobs">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListDatasetImportJobsAsync(const Model::ListDatasetImportJobsRequest& request, const ListDatasetImportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of datasets created using the <a>CreateDataset</a> operation.
* For each dataset, a summary of its properties, including its Amazon Resource
* Name (ARN), is returned. To retrieve the complete set of properties, use the ARN
* with the <a>DescribeDataset</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasets">AWS
* API Reference</a></p>
*/
virtual Model::ListDatasetsOutcome ListDatasets(const Model::ListDatasetsRequest& request) const;
/**
* <p>Returns a list of datasets created using the <a>CreateDataset</a> operation.
* For each dataset, a summary of its properties, including its Amazon Resource
* Name (ARN), is returned. To retrieve the complete set of properties, use the ARN
* with the <a>DescribeDataset</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasets">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListDatasetsOutcomeCallable ListDatasetsCallable(const Model::ListDatasetsRequest& request) const;
/**
* <p>Returns a list of datasets created using the <a>CreateDataset</a> operation.
* For each dataset, a summary of its properties, including its Amazon Resource
* Name (ARN), is returned. To retrieve the complete set of properties, use the ARN
* with the <a>DescribeDataset</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasets">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListDatasetsAsync(const Model::ListDatasetsRequest& request, const ListDatasetsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of Explainability resources created using the
* <a>CreateExplainability</a> operation. This operation returns a summary for each
* Explainability. You can filter the list using an array of <a>Filter</a>
* objects.</p> <p>To retrieve the complete set of properties for a particular
* Explainability resource, use the ARN with the <a>DescribeExplainability</a>
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilities">AWS
* API Reference</a></p>
*/
virtual Model::ListExplainabilitiesOutcome ListExplainabilities(const Model::ListExplainabilitiesRequest& request) const;
/**
* <p>Returns a list of Explainability resources created using the
* <a>CreateExplainability</a> operation. This operation returns a summary for each
* Explainability. You can filter the list using an array of <a>Filter</a>
* objects.</p> <p>To retrieve the complete set of properties for a particular
* Explainability resource, use the ARN with the <a>DescribeExplainability</a>
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilities">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListExplainabilitiesOutcomeCallable ListExplainabilitiesCallable(const Model::ListExplainabilitiesRequest& request) const;
/**
* <p>Returns a list of Explainability resources created using the
* <a>CreateExplainability</a> operation. This operation returns a summary for each
* Explainability. You can filter the list using an array of <a>Filter</a>
* objects.</p> <p>To retrieve the complete set of properties for a particular
* Explainability resource, use the ARN with the <a>DescribeExplainability</a>
* operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilities">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListExplainabilitiesAsync(const Model::ListExplainabilitiesRequest& request, const ListExplainabilitiesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of Explainability exports created using the
* <a>CreateExplainabilityExport</a> operation. This operation returns a summary
* for each Explainability export. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular Explainability export, use the ARN with the
* <a>DescribeExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilityExports">AWS
* API Reference</a></p>
*/
virtual Model::ListExplainabilityExportsOutcome ListExplainabilityExports(const Model::ListExplainabilityExportsRequest& request) const;
/**
* <p>Returns a list of Explainability exports created using the
* <a>CreateExplainabilityExport</a> operation. This operation returns a summary
* for each Explainability export. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular Explainability export, use the ARN with the
* <a>DescribeExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilityExports">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListExplainabilityExportsOutcomeCallable ListExplainabilityExportsCallable(const Model::ListExplainabilityExportsRequest& request) const;
/**
* <p>Returns a list of Explainability exports created using the
* <a>CreateExplainabilityExport</a> operation. This operation returns a summary
* for each Explainability export. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular Explainability export, use the ARN with the
* <a>DescribeExplainability</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListExplainabilityExports">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListExplainabilityExportsAsync(const Model::ListExplainabilityExportsRequest& request, const ListExplainabilityExportsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of forecast export jobs created using the
* <a>CreateForecastExportJob</a> operation. For each forecast export job, this
* operation returns a summary of its properties, including its Amazon Resource
* Name (ARN). To retrieve the complete set of properties, use the ARN with the
* <a>DescribeForecastExportJob</a> operation. You can filter the list using an
* array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecastExportJobs">AWS
* API Reference</a></p>
*/
virtual Model::ListForecastExportJobsOutcome ListForecastExportJobs(const Model::ListForecastExportJobsRequest& request) const;
/**
* <p>Returns a list of forecast export jobs created using the
* <a>CreateForecastExportJob</a> operation. For each forecast export job, this
* operation returns a summary of its properties, including its Amazon Resource
* Name (ARN). To retrieve the complete set of properties, use the ARN with the
* <a>DescribeForecastExportJob</a> operation. You can filter the list using an
* array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecastExportJobs">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListForecastExportJobsOutcomeCallable ListForecastExportJobsCallable(const Model::ListForecastExportJobsRequest& request) const;
/**
* <p>Returns a list of forecast export jobs created using the
* <a>CreateForecastExportJob</a> operation. For each forecast export job, this
* operation returns a summary of its properties, including its Amazon Resource
* Name (ARN). To retrieve the complete set of properties, use the ARN with the
* <a>DescribeForecastExportJob</a> operation. You can filter the list using an
* array of <a>Filter</a> objects.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecastExportJobs">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListForecastExportJobsAsync(const Model::ListForecastExportJobsRequest& request, const ListForecastExportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of forecasts created using the <a>CreateForecast</a>
* operation. For each forecast, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). To retrieve the complete
* set of properties, specify the ARN with the <a>DescribeForecast</a> operation.
* You can filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecasts">AWS
* API Reference</a></p>
*/
virtual Model::ListForecastsOutcome ListForecasts(const Model::ListForecastsRequest& request) const;
/**
* <p>Returns a list of forecasts created using the <a>CreateForecast</a>
* operation. For each forecast, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). To retrieve the complete
* set of properties, specify the ARN with the <a>DescribeForecast</a> operation.
* You can filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecasts">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListForecastsOutcomeCallable ListForecastsCallable(const Model::ListForecastsRequest& request) const;
/**
* <p>Returns a list of forecasts created using the <a>CreateForecast</a>
* operation. For each forecast, this operation returns a summary of its
* properties, including its Amazon Resource Name (ARN). To retrieve the complete
* set of properties, specify the ARN with the <a>DescribeForecast</a> operation.
* You can filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecasts">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListForecastsAsync(const Model::ListForecastsRequest& request, const ListForecastsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of predictor backtest export jobs created using the
* <a>CreatePredictorBacktestExportJob</a> operation. This operation returns a
* summary for each backtest export job. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular backtest export job, use the ARN with the
* <a>DescribePredictorBacktestExportJob</a> operation.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictorBacktestExportJobs">AWS
* API Reference</a></p>
*/
virtual Model::ListPredictorBacktestExportJobsOutcome ListPredictorBacktestExportJobs(const Model::ListPredictorBacktestExportJobsRequest& request) const;
/**
* <p>Returns a list of predictor backtest export jobs created using the
* <a>CreatePredictorBacktestExportJob</a> operation. This operation returns a
* summary for each backtest export job. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular backtest export job, use the ARN with the
* <a>DescribePredictorBacktestExportJob</a> operation.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictorBacktestExportJobs">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListPredictorBacktestExportJobsOutcomeCallable ListPredictorBacktestExportJobsCallable(const Model::ListPredictorBacktestExportJobsRequest& request) const;
/**
* <p>Returns a list of predictor backtest export jobs created using the
* <a>CreatePredictorBacktestExportJob</a> operation. This operation returns a
* summary for each backtest export job. You can filter the list using an array of
* <a>Filter</a> objects.</p> <p>To retrieve the complete set of properties for a
* particular backtest export job, use the ARN with the
* <a>DescribePredictorBacktestExportJob</a> operation.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictorBacktestExportJobs">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListPredictorBacktestExportJobsAsync(const Model::ListPredictorBacktestExportJobsRequest& request, const ListPredictorBacktestExportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns a list of predictors created using the <a>CreateAutoPredictor</a> or
* <a>CreatePredictor</a> operations. For each predictor, this operation returns a
* summary of its properties, including its Amazon Resource Name (ARN). </p> <p>You
* can retrieve the complete set of properties by using the ARN with the
* <a>DescribeAutoPredictor</a> and <a>DescribePredictor</a> operations. You can
* filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictors">AWS
* API Reference</a></p>
*/
virtual Model::ListPredictorsOutcome ListPredictors(const Model::ListPredictorsRequest& request) const;
/**
* <p>Returns a list of predictors created using the <a>CreateAutoPredictor</a> or
* <a>CreatePredictor</a> operations. For each predictor, this operation returns a
* summary of its properties, including its Amazon Resource Name (ARN). </p> <p>You
* can retrieve the complete set of properties by using the ARN with the
* <a>DescribeAutoPredictor</a> and <a>DescribePredictor</a> operations. You can
* filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictors">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListPredictorsOutcomeCallable ListPredictorsCallable(const Model::ListPredictorsRequest& request) const;
/**
* <p>Returns a list of predictors created using the <a>CreateAutoPredictor</a> or
* <a>CreatePredictor</a> operations. For each predictor, this operation returns a
* summary of its properties, including its Amazon Resource Name (ARN). </p> <p>You
* can retrieve the complete set of properties by using the ARN with the
* <a>DescribeAutoPredictor</a> and <a>DescribePredictor</a> operations. You can
* filter the list using an array of <a>Filter</a> objects.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictors">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListPredictorsAsync(const Model::ListPredictorsRequest& request, const ListPredictorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Lists the tags for an Amazon Forecast resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListTagsForResource">AWS
* API Reference</a></p>
*/
virtual Model::ListTagsForResourceOutcome ListTagsForResource(const Model::ListTagsForResourceRequest& request) const;
/**
* <p>Lists the tags for an Amazon Forecast resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListTagsForResource">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::ListTagsForResourceOutcomeCallable ListTagsForResourceCallable(const Model::ListTagsForResourceRequest& request) const;
/**
* <p>Lists the tags for an Amazon Forecast resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListTagsForResource">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void ListTagsForResourceAsync(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Stops a resource.</p> <p>The resource undergoes the following states:
* <code>CREATE_STOPPING</code> and <code>CREATE_STOPPED</code>. You cannot resume
* a resource once it has been stopped.</p> <p>This operation can be applied to the
* following resources (and their corresponding child resources):</p> <ul> <li>
* <p>Dataset Import Job</p> </li> <li> <p>Predictor Job</p> </li> <li> <p>Forecast
* Job</p> </li> <li> <p>Forecast Export Job</p> </li> <li> <p>Predictor Backtest
* Export Job</p> </li> <li> <p>Explainability Job</p> </li> <li> <p>Explainability
* Export Job</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/StopResource">AWS
* API Reference</a></p>
*/
virtual Model::StopResourceOutcome StopResource(const Model::StopResourceRequest& request) const;
/**
* <p>Stops a resource.</p> <p>The resource undergoes the following states:
* <code>CREATE_STOPPING</code> and <code>CREATE_STOPPED</code>. You cannot resume
* a resource once it has been stopped.</p> <p>This operation can be applied to the
* following resources (and their corresponding child resources):</p> <ul> <li>
* <p>Dataset Import Job</p> </li> <li> <p>Predictor Job</p> </li> <li> <p>Forecast
* Job</p> </li> <li> <p>Forecast Export Job</p> </li> <li> <p>Predictor Backtest
* Export Job</p> </li> <li> <p>Explainability Job</p> </li> <li> <p>Explainability
* Export Job</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/StopResource">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::StopResourceOutcomeCallable StopResourceCallable(const Model::StopResourceRequest& request) const;
/**
* <p>Stops a resource.</p> <p>The resource undergoes the following states:
* <code>CREATE_STOPPING</code> and <code>CREATE_STOPPED</code>. You cannot resume
* a resource once it has been stopped.</p> <p>This operation can be applied to the
* following resources (and their corresponding child resources):</p> <ul> <li>
* <p>Dataset Import Job</p> </li> <li> <p>Predictor Job</p> </li> <li> <p>Forecast
* Job</p> </li> <li> <p>Forecast Export Job</p> </li> <li> <p>Predictor Backtest
* Export Job</p> </li> <li> <p>Explainability Job</p> </li> <li> <p>Explainability
* Export Job</p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/StopResource">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void StopResourceAsync(const Model::StopResourceRequest& request, const StopResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Associates the specified tags to a resource with the specified
* <code>resourceArn</code>. If existing tags on a resource are not specified in
* the request parameters, they are not changed. When a resource is deleted, the
* tags associated with that resource are also deleted.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/TagResource">AWS
* API Reference</a></p>
*/
virtual Model::TagResourceOutcome TagResource(const Model::TagResourceRequest& request) const;
/**
* <p>Associates the specified tags to a resource with the specified
* <code>resourceArn</code>. If existing tags on a resource are not specified in
* the request parameters, they are not changed. When a resource is deleted, the
* tags associated with that resource are also deleted.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/TagResource">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::TagResourceOutcomeCallable TagResourceCallable(const Model::TagResourceRequest& request) const;
/**
* <p>Associates the specified tags to a resource with the specified
* <code>resourceArn</code>. If existing tags on a resource are not specified in
* the request parameters, they are not changed. When a resource is deleted, the
* tags associated with that resource are also deleted.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/TagResource">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void TagResourceAsync(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Deletes the specified tags from a resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UntagResource">AWS
* API Reference</a></p>
*/
virtual Model::UntagResourceOutcome UntagResource(const Model::UntagResourceRequest& request) const;
/**
* <p>Deletes the specified tags from a resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UntagResource">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::UntagResourceOutcomeCallable UntagResourceCallable(const Model::UntagResourceRequest& request) const;
/**
* <p>Deletes the specified tags from a resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UntagResource">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void UntagResourceAsync(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Replaces the datasets in a dataset group with the specified datasets.</p>
* <p>The <code>Status</code> of the dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* Use the <a>DescribeDatasetGroup</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UpdateDatasetGroup">AWS
* API Reference</a></p>
*/
virtual Model::UpdateDatasetGroupOutcome UpdateDatasetGroup(const Model::UpdateDatasetGroupRequest& request) const;
/**
* <p>Replaces the datasets in a dataset group with the specified datasets.</p>
* <p>The <code>Status</code> of the dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* Use the <a>DescribeDatasetGroup</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UpdateDatasetGroup">AWS
* API Reference</a></p>
*
* returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::UpdateDatasetGroupOutcomeCallable UpdateDatasetGroupCallable(const Model::UpdateDatasetGroupRequest& request) const;
/**
* <p>Replaces the datasets in a dataset group with the specified datasets.</p>
* <p>The <code>Status</code> of the dataset group must be
* <code>ACTIVE</code> before you can use the dataset group to create a predictor.
* Use the <a>DescribeDatasetGroup</a> operation to get the status.</p>
* <p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UpdateDatasetGroup">AWS
* API Reference</a></p>
*
* Queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void UpdateDatasetGroupAsync(const Model::UpdateDatasetGroupRequest& request, const UpdateDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
void OverrideEndpoint(const Aws::String& endpoint);
private:
void init(const Aws::Client::ClientConfiguration& clientConfiguration);
void CreateAutoPredictorAsyncHelper(const Model::CreateAutoPredictorRequest& request, const CreateAutoPredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateDatasetAsyncHelper(const Model::CreateDatasetRequest& request, const CreateDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateDatasetGroupAsyncHelper(const Model::CreateDatasetGroupRequest& request, const CreateDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateDatasetImportJobAsyncHelper(const Model::CreateDatasetImportJobRequest& request, const CreateDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateExplainabilityAsyncHelper(const Model::CreateExplainabilityRequest& request, const CreateExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateExplainabilityExportAsyncHelper(const Model::CreateExplainabilityExportRequest& request, const CreateExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateForecastAsyncHelper(const Model::CreateForecastRequest& request, const CreateForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreateForecastExportJobAsyncHelper(const Model::CreateForecastExportJobRequest& request, const CreateForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreatePredictorAsyncHelper(const Model::CreatePredictorRequest& request, const CreatePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void CreatePredictorBacktestExportJobAsyncHelper(const Model::CreatePredictorBacktestExportJobRequest& request, const CreatePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteDatasetAsyncHelper(const Model::DeleteDatasetRequest& request, const DeleteDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteDatasetGroupAsyncHelper(const Model::DeleteDatasetGroupRequest& request, const DeleteDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteDatasetImportJobAsyncHelper(const Model::DeleteDatasetImportJobRequest& request, const DeleteDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteExplainabilityAsyncHelper(const Model::DeleteExplainabilityRequest& request, const DeleteExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteExplainabilityExportAsyncHelper(const Model::DeleteExplainabilityExportRequest& request, const DeleteExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteForecastAsyncHelper(const Model::DeleteForecastRequest& request, const DeleteForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteForecastExportJobAsyncHelper(const Model::DeleteForecastExportJobRequest& request, const DeleteForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeletePredictorAsyncHelper(const Model::DeletePredictorRequest& request, const DeletePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeletePredictorBacktestExportJobAsyncHelper(const Model::DeletePredictorBacktestExportJobRequest& request, const DeletePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DeleteResourceTreeAsyncHelper(const Model::DeleteResourceTreeRequest& request, const DeleteResourceTreeResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeAutoPredictorAsyncHelper(const Model::DescribeAutoPredictorRequest& request, const DescribeAutoPredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeDatasetAsyncHelper(const Model::DescribeDatasetRequest& request, const DescribeDatasetResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeDatasetGroupAsyncHelper(const Model::DescribeDatasetGroupRequest& request, const DescribeDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeDatasetImportJobAsyncHelper(const Model::DescribeDatasetImportJobRequest& request, const DescribeDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeExplainabilityAsyncHelper(const Model::DescribeExplainabilityRequest& request, const DescribeExplainabilityResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeExplainabilityExportAsyncHelper(const Model::DescribeExplainabilityExportRequest& request, const DescribeExplainabilityExportResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeForecastAsyncHelper(const Model::DescribeForecastRequest& request, const DescribeForecastResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribeForecastExportJobAsyncHelper(const Model::DescribeForecastExportJobRequest& request, const DescribeForecastExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribePredictorAsyncHelper(const Model::DescribePredictorRequest& request, const DescribePredictorResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void DescribePredictorBacktestExportJobAsyncHelper(const Model::DescribePredictorBacktestExportJobRequest& request, const DescribePredictorBacktestExportJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void GetAccuracyMetricsAsyncHelper(const Model::GetAccuracyMetricsRequest& request, const GetAccuracyMetricsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListDatasetGroupsAsyncHelper(const Model::ListDatasetGroupsRequest& request, const ListDatasetGroupsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListDatasetImportJobsAsyncHelper(const Model::ListDatasetImportJobsRequest& request, const ListDatasetImportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListDatasetsAsyncHelper(const Model::ListDatasetsRequest& request, const ListDatasetsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListExplainabilitiesAsyncHelper(const Model::ListExplainabilitiesRequest& request, const ListExplainabilitiesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListExplainabilityExportsAsyncHelper(const Model::ListExplainabilityExportsRequest& request, const ListExplainabilityExportsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListForecastExportJobsAsyncHelper(const Model::ListForecastExportJobsRequest& request, const ListForecastExportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListForecastsAsyncHelper(const Model::ListForecastsRequest& request, const ListForecastsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListPredictorBacktestExportJobsAsyncHelper(const Model::ListPredictorBacktestExportJobsRequest& request, const ListPredictorBacktestExportJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListPredictorsAsyncHelper(const Model::ListPredictorsRequest& request, const ListPredictorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void ListTagsForResourceAsyncHelper(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void StopResourceAsyncHelper(const Model::StopResourceRequest& request, const StopResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void TagResourceAsyncHelper(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void UntagResourceAsyncHelper(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
void UpdateDatasetGroupAsyncHelper(const Model::UpdateDatasetGroupRequest& request, const UpdateDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
Aws::String m_uri;
Aws::String m_configScheme;
std::shared_ptr<Aws::Utils::Threading::Executor> m_executor;
};
} // namespace ForecastService
} // namespace Aws
| 75.23609 | 298 | 0.690973 | [
"object",
"model"
] |
9e071e052af058e6793b65a0992efc60aa500bf8 | 18,855 | h | C | src/integrators/bdpt.h | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-10-12T15:29:22.000Z | 2022-03-16T11:24:10.000Z | src/integrators/bdpt.h | mistajuliax/pbrt-v3-IILE | afda605d92517d2396e494d81465ead22d0c25e1 | [
"BSD-2-Clause"
] | 16 | 2018-02-02T11:49:36.000Z | 2018-04-21T09:07:08.000Z | src/integrators/bdpt.h | giuliojiang/pbrt-v3-IISPT | b9be01096293ab0f50b14b9043556c93ff9e07ec | [
"BSD-2-Clause"
] | 2 | 2018-12-12T08:49:43.000Z | 2019-12-03T12:20:04.000Z |
/*
pbrt source code is Copyright(c) 1998-2016
Matt Pharr, Greg Humphreys, and Wenzel Jakob.
This file is part of pbrt.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(_MSC_VER)
#define NOMINMAX
#pragma once
#endif
#ifndef PBRT_INTEGRATORS_BDPT_H
#define PBRT_INTEGRATORS_BDPT_H
// integrators/bdpt.h*
#include <unordered_map>
#include "camera.h"
#include "integrator.h"
#include "interaction.h"
#include "light.h"
#include "pbrt.h"
#include "reflection.h"
#include "sampling.h"
#include "scene.h"
namespace pbrt {
/// Forward declaration (correction term for adjoint BSDF with shading normals)
extern Float CorrectShadingNormal(const SurfaceInteraction &isect,
const Vector3f &wo, const Vector3f &wi,
TransportMode mode);
// EndpointInteraction Declarations
struct EndpointInteraction : Interaction {
union {
const Camera *camera;
const Light *light;
};
// EndpointInteraction Public Methods
EndpointInteraction() : Interaction(), light(nullptr) {}
EndpointInteraction(const Interaction &it, const Camera *camera)
: Interaction(it), camera(camera) {}
EndpointInteraction(const Camera *camera, const Ray &ray)
: Interaction(ray.o, ray.time, ray.medium), camera(camera) {}
EndpointInteraction(const Light *light, const Ray &r, const Normal3f &nl)
: Interaction(r.o, r.time, r.medium), light(light) {
n = nl;
}
EndpointInteraction(const Interaction &it, const Light *light)
: Interaction(it), light(light) {}
EndpointInteraction(const Ray &ray)
: Interaction(ray(1), ray.time, ray.medium), light(nullptr) {
n = Normal3f(-ray.d);
}
};
// BDPT Helper Definitions
enum class VertexType { Camera, Light, Surface, Medium };
struct Vertex;
template <typename Type>
class ScopedAssignment {
public:
// ScopedAssignment Public Methods
ScopedAssignment(Type *target = nullptr, Type value = Type())
: target(target) {
if (target) {
backup = *target;
*target = value;
}
}
~ScopedAssignment() {
if (target) *target = backup;
}
ScopedAssignment(const ScopedAssignment &) = delete;
ScopedAssignment &operator=(const ScopedAssignment &) = delete;
ScopedAssignment &operator=(ScopedAssignment &&other) {
target = other.target;
backup = other.backup;
other.target = nullptr;
return *this;
}
private:
Type *target, backup;
};
inline Float InfiniteLightDensity(
const Scene &scene, const Distribution1D &lightDistr,
const std::unordered_map<const Light *, size_t> &lightToDistrIndex,
const Vector3f &w) {
Float pdf = 0;
for (const auto &light : scene.infiniteLights) {
CHECK(lightToDistrIndex.find(light.get()) != lightToDistrIndex.end());
size_t index = lightToDistrIndex.find(light.get())->second;
pdf += light->Pdf_Li(Interaction(), -w) * lightDistr.func[index];
}
return pdf / (lightDistr.funcInt * lightDistr.Count());
}
// BDPT Declarations
class BDPTIntegrator : public Integrator {
public:
// BDPTIntegrator Public Methods
BDPTIntegrator(std::shared_ptr<Sampler> sampler,
std::shared_ptr<const Camera> camera, int maxDepth,
bool visualizeStrategies, bool visualizeWeights,
const Bounds2i &pixelBounds,
const std::string &lightSampleStrategy = "power")
: sampler(sampler),
camera(camera),
maxDepth(maxDepth),
visualizeStrategies(visualizeStrategies),
visualizeWeights(visualizeWeights),
pixelBounds(pixelBounds),
lightSampleStrategy(lightSampleStrategy) {}
void Render(const Scene &scene);
private:
// BDPTIntegrator Private Data
std::shared_ptr<Sampler> sampler;
std::shared_ptr<const Camera> camera;
const int maxDepth;
const bool visualizeStrategies;
const bool visualizeWeights;
const Bounds2i pixelBounds;
const std::string lightSampleStrategy;
};
struct Vertex {
// Vertex Public Data
VertexType type;
Spectrum beta;
#ifdef PBRT_HAVE_NONPOD_IN_UNIONS
union {
#else
struct {
#endif // PBRT_HAVE_NONPOD_IN_UNIONS
EndpointInteraction ei;
MediumInteraction mi;
SurfaceInteraction si;
};
bool delta = false;
Float pdfFwd = 0, pdfRev = 0;
// Vertex Public Methods
Vertex() : ei() {}
Vertex(VertexType type, const EndpointInteraction &ei, const Spectrum &beta)
: type(type), beta(beta), ei(ei) {}
Vertex(const SurfaceInteraction &si, const Spectrum &beta)
: type(VertexType::Surface), beta(beta), si(si) {}
// Need to define these two to make compilers happy with the non-POD
// objects in the anonymous union above.
Vertex(const Vertex &v) { memcpy(this, &v, sizeof(Vertex)); }
Vertex &operator=(const Vertex &v) {
memcpy(this, &v, sizeof(Vertex));
return *this;
}
static inline Vertex CreateCamera(const Camera *camera, const Ray &ray,
const Spectrum &beta);
static inline Vertex CreateCamera(const Camera *camera,
const Interaction &it,
const Spectrum &beta);
static inline Vertex CreateLight(const Light *light, const Ray &ray,
const Normal3f &nLight, const Spectrum &Le,
Float pdf);
static inline Vertex CreateLight(const EndpointInteraction &ei,
const Spectrum &beta, Float pdf);
static inline Vertex CreateMedium(const MediumInteraction &mi,
const Spectrum &beta, Float pdf,
const Vertex &prev);
static inline Vertex CreateSurface(const SurfaceInteraction &si,
const Spectrum &beta, Float pdf,
const Vertex &prev);
Vertex(const MediumInteraction &mi, const Spectrum &beta)
: type(VertexType::Medium), beta(beta), mi(mi) {}
const Interaction &GetInteraction() const {
switch (type) {
case VertexType::Medium:
return mi;
case VertexType::Surface:
return si;
default:
return ei;
}
}
const Point3f &p() const { return GetInteraction().p; }
Float time() const { return GetInteraction().time; }
const Normal3f &ng() const { return GetInteraction().n; }
const Normal3f &ns() const {
if (type == VertexType::Surface)
return si.shading.n;
else
return GetInteraction().n;
}
bool IsOnSurface() const { return ng() != Normal3f(); }
Spectrum f(const Vertex &next, TransportMode mode) const {
Vector3f wi = next.p() - p();
if (wi.LengthSquared() == 0) return 0.;
wi = Normalize(wi);
switch (type) {
case VertexType::Surface:
return si.bsdf->f(si.wo, wi) *
CorrectShadingNormal(si, si.wo, wi, mode);
case VertexType::Medium:
return mi.phase->p(mi.wo, wi);
default:
LOG(FATAL) << "Vertex::f(): Unimplemented";
return Spectrum(0.f);
}
}
bool IsConnectible() const {
switch (type) {
case VertexType::Medium:
return true;
case VertexType::Light:
return (ei.light->flags & (int)LightFlags::DeltaDirection) == 0;
case VertexType::Camera:
return true;
case VertexType::Surface:
return si.bsdf->NumComponents(BxDFType(BSDF_DIFFUSE | BSDF_GLOSSY |
BSDF_REFLECTION |
BSDF_TRANSMISSION)) > 0;
}
LOG(FATAL) << "Unhandled vertex type in IsConnectable()";
return false; // NOTREACHED
}
bool IsLight() const {
return type == VertexType::Light ||
(type == VertexType::Surface && si.primitive->GetAreaLight());
}
bool IsDeltaLight() const {
return type == VertexType::Light && ei.light &&
pbrt::IsDeltaLight(ei.light->flags);
}
bool IsInfiniteLight() const {
return type == VertexType::Light &&
(!ei.light || ei.light->flags & (int)LightFlags::Infinite ||
ei.light->flags & (int)LightFlags::DeltaDirection);
}
Spectrum Le(const Scene &scene, const Vertex &v) const {
if (!IsLight()) return Spectrum(0.f);
Vector3f w = v.p() - p();
if (w.LengthSquared() == 0) return 0.;
w = Normalize(w);
if (IsInfiniteLight()) {
// Return emitted radiance for infinite light sources
Spectrum Le(0.f);
for (const auto &light : scene.infiniteLights)
Le += light->Le(Ray(p(), -w));
return Le;
} else {
const AreaLight *light = si.primitive->GetAreaLight();
CHECK_NOTNULL(light);
return light->L(si, w);
}
}
friend std::ostream &operator<<(std::ostream &os, const Vertex &v) {
return os << v.ToString();
}
std::string ToString() const {
std::string s = std::string("[Vertex type: ");
switch (type) {
case VertexType::Camera:
s += "camera";
break;
case VertexType::Light:
s += "light";
break;
case VertexType::Surface:
s += "surface";
break;
case VertexType::Medium:
s += "medium";
break;
}
s += std::string(" connectible: ") +
std::string(IsConnectible() ? "true" : "false");
s += StringPrintf("\n p: [ %f, %f, %f ] ng: [ %f, %f, %f ]", p().x, p().y,
p().z, ng().x, ng().y, ng().z);
s += StringPrintf("\n pdfFwd: %f pdfRev: %f beta: ", pdfFwd, pdfRev) +
beta.ToString();
switch (type) {
case VertexType::Camera:
// TODO
break;
case VertexType::Light:
// TODO
break;
case VertexType::Surface:
s += std::string("\n bsdf: ") + si.bsdf->ToString();
break;
case VertexType::Medium:
s += std::string("\n phase: ") + mi.phase->ToString();
break;
}
s += std::string(" ]");
return s;
}
Float ConvertDensity(Float pdf, const Vertex &next) const {
// Return solid angle density if _next_ is an infinite area light
if (next.IsInfiniteLight()) return pdf;
Vector3f w = next.p() - p();
if (w.LengthSquared() == 0) return 0;
Float invDist2 = 1 / w.LengthSquared();
if (next.IsOnSurface())
pdf *= AbsDot(next.ng(), w * std::sqrt(invDist2));
return pdf * invDist2;
}
Float Pdf(const Scene &scene, const Vertex *prev,
const Vertex &next) const {
if (type == VertexType::Light) return PdfLight(scene, next);
// Compute directions to preceding and next vertex
Vector3f wn = next.p() - p();
if (wn.LengthSquared() == 0) return 0;
wn = Normalize(wn);
Vector3f wp;
if (prev) {
wp = prev->p() - p();
if (wp.LengthSquared() == 0) return 0;
wp = Normalize(wp);
} else
CHECK(type == VertexType::Camera);
// Compute directional density depending on the vertex types
Float pdf = 0, unused;
if (type == VertexType::Camera)
ei.camera->Pdf_We(ei.SpawnRay(wn), &unused, &pdf);
else if (type == VertexType::Surface)
pdf = si.bsdf->Pdf(wp, wn);
else if (type == VertexType::Medium)
pdf = mi.phase->p(wp, wn);
else
LOG(FATAL) << "Vertex::Pdf(): Unimplemented";
// Return probability per unit area at vertex _next_
return ConvertDensity(pdf, next);
}
Float PdfLight(const Scene &scene, const Vertex &v) const {
Vector3f w = v.p() - p();
Float invDist2 = 1 / w.LengthSquared();
w *= std::sqrt(invDist2);
Float pdf;
if (IsInfiniteLight()) {
// Compute planar sampling density for infinite light sources
Point3f worldCenter;
Float worldRadius;
scene.WorldBound().BoundingSphere(&worldCenter, &worldRadius);
pdf = 1 / (Pi * worldRadius * worldRadius);
} else {
// Get pointer _light_ to the light source at the vertex
CHECK(IsLight());
const Light *light = type == VertexType::Light
? ei.light
: si.primitive->GetAreaLight();
CHECK_NOTNULL(light);
// Compute sampling density for non-infinite light sources
Float pdfPos, pdfDir;
light->Pdf_Le(Ray(p(), w, Infinity, time()), ng(), &pdfPos, &pdfDir);
pdf = pdfDir * invDist2;
}
if (v.IsOnSurface()) pdf *= AbsDot(v.ng(), w);
return pdf;
}
Float PdfLightOrigin(const Scene &scene, const Vertex &v,
const Distribution1D &lightDistr,
const std::unordered_map<const Light *, size_t>
&lightToDistrIndex) const {
Vector3f w = v.p() - p();
if (w.LengthSquared() == 0) return 0.;
w = Normalize(w);
if (IsInfiniteLight()) {
// Return solid angle density for infinite light sources
return InfiniteLightDensity(scene, lightDistr, lightToDistrIndex,
w);
} else {
// Return solid angle density for non-infinite light sources
Float pdfPos, pdfDir, pdfChoice = 0;
// Get pointer _light_ to the light source at the vertex
CHECK(IsLight());
const Light *light = type == VertexType::Light
? ei.light
: si.primitive->GetAreaLight();
CHECK_NOTNULL(light);
// Compute the discrete probability of sampling _light_, _pdfChoice_
CHECK(lightToDistrIndex.find(light) != lightToDistrIndex.end());
size_t index = lightToDistrIndex.find(light)->second;
pdfChoice = lightDistr.DiscretePDF(index);
light->Pdf_Le(Ray(p(), w, Infinity, time()), ng(), &pdfPos, &pdfDir);
return pdfPos * pdfChoice;
}
}
};
extern int GenerateCameraSubpath(const Scene &scene, Sampler &sampler,
MemoryArena &arena, int maxDepth,
const Camera &camera, const Point2f &pFilm,
Vertex *path);
extern int GenerateLightSubpath(
const Scene &scene, Sampler &sampler, MemoryArena &arena, int maxDepth,
Float time, const Distribution1D &lightDistr,
const std::unordered_map<const Light *, size_t> &lightToIndex,
Vertex *path);
Spectrum ConnectBDPT(
const Scene &scene, Vertex *lightVertices, Vertex *cameraVertices, int s,
int t, const Distribution1D &lightDistr,
const std::unordered_map<const Light *, size_t> &lightToIndex,
const Camera &camera, Sampler &sampler, Point2f *pRaster,
Float *misWeight = nullptr);
BDPTIntegrator *CreateBDPTIntegrator(const ParamSet ¶ms,
std::shared_ptr<Sampler> sampler,
std::shared_ptr<const Camera> camera);
BDPTIntegrator *CreateBDPTIntegrator(
std::shared_ptr<Sampler> sampler,
std::shared_ptr<Camera> camera
);
// Vertex Inline Method Definitions
inline Vertex Vertex::CreateCamera(const Camera *camera, const Ray &ray,
const Spectrum &beta) {
return Vertex(VertexType::Camera, EndpointInteraction(camera, ray), beta);
}
inline Vertex Vertex::CreateCamera(const Camera *camera, const Interaction &it,
const Spectrum &beta) {
return Vertex(VertexType::Camera, EndpointInteraction(it, camera), beta);
}
inline Vertex Vertex::CreateLight(const Light *light, const Ray &ray,
const Normal3f &Nl, const Spectrum &Le,
Float pdf) {
Vertex v(VertexType::Light, EndpointInteraction(light, ray, Nl), Le);
v.pdfFwd = pdf;
return v;
}
inline Vertex Vertex::CreateSurface(const SurfaceInteraction &si,
const Spectrum &beta, Float pdf,
const Vertex &prev) {
Vertex v(si, beta);
v.pdfFwd = prev.ConvertDensity(pdf, v);
return v;
}
inline Vertex Vertex::CreateMedium(const MediumInteraction &mi,
const Spectrum &beta, Float pdf,
const Vertex &prev) {
Vertex v(mi, beta);
v.pdfFwd = prev.ConvertDensity(pdf, v);
return v;
}
inline Vertex Vertex::CreateLight(const EndpointInteraction &ei,
const Spectrum &beta, Float pdf) {
Vertex v(VertexType::Light, ei, beta);
v.pdfFwd = pdf;
return v;
}
} // namespace pbrt
#endif // PBRT_INTEGRATORS_BDPT_H
| 37.937626 | 83 | 0.581066 | [
"render",
"solid"
] |
9e0b081c00d9451d998d89fdcba9735bcfbcfd15 | 12,970 | h | C | lightcrafts/jnisrc/macstl/algorithm.h | keyboardcowboy42/LightZone | 7e4212dcc6aeddc3eed76254e7ed1b48bbc53ec4 | [
"BSD-3-Clause"
] | 32 | 2015-02-08T21:32:02.000Z | 2021-07-11T05:30:36.000Z | lightcrafts/jnisrc/macstl/algorithm.h | keyboardcowboy42/LightZone | 7e4212dcc6aeddc3eed76254e7ed1b48bbc53ec4 | [
"BSD-3-Clause"
] | 7 | 2020-03-23T14:41:57.000Z | 2022-03-22T02:08:11.000Z | lightcrafts/jnisrc/macstl/algorithm.h | keyboardcowboy42/LightZone | 7e4212dcc6aeddc3eed76254e7ed1b48bbc53ec4 | [
"BSD-3-Clause"
] | 14 | 2015-03-17T01:46:56.000Z | 2022-03-09T08:20:13.000Z | /*
* algorithm.h
* macstl
*
* Created by Glen Low on Apr 19 2003.
*
* Copyright (c) 2003-2005 Pixelglow Software, all rights reserved.
* http://www.pixelglow.com/macstl/
* macstl@pixelglow.com
*
* Unless explicitly acquired and licensed from Licensor under the Pixelglow
* Software License ("PSL") Version 2.0 or greater, the contents of this file
* are subject to the Reciprocal Public License ("RPL") Version 1.1, or
* subsequent versions as allowed by the RPL, and You may not copy or use this
* file in either source code or executable form, except in compliance with the
* terms and conditions of the RPL.
*
* While it is an open-source license, the RPL prohibits you from keeping your
* derivations of this file proprietary even if you only deploy them in-house.
* You may obtain a copy of both the PSL and the RPL ("the Licenses") from
* Pixelglow Software ("the Licensor") at http://www.pixelglow.com/.
*
* Software distributed under the Licenses is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the Licenses
* for the specific language governing rights and limitations under the
* Licenses. Notwithstanding anything else in the Licenses, if any clause of
* the Licenses which purports to disclaim or limit the Licensor's liability
* for breach of any condition or warranty (whether express or implied by law)
* would otherwise be void, that clause is deemed to be subject to the
* reservation of liability of the Licensor to supply the software again or to
* repair the software or to pay the cost of having the software supplied again
* or repaired, at the Licensor's option.
*/
#ifndef MACSTL_ALGORITHM_H
#define MACSTL_ALGORITHM_H
#include <algorithm>
#include <functional>
#include "impl/meta.h"
#include "functional.h"
namespace stdext
{
namespace impl
{
template <typename InIter, typename Size, typename Enable = void>
struct destroy_n_dispatch
{
static void call (InIter first, Size n)
{
typedef typename std::iterator_traits <InIter>::value_type value_type;
for (; n > 0; --n)
{
(&*first)->~value_type ();
++first;
}
}
};
template <typename InIter, typename Size>
struct destroy_n_dispatch <InIter, Size, typename enable_if <
is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value>::type>
{
static void call (InIter first, Size n)
{
typedef typename std::iterator_traits <InIter>::value_type value_type;
for (Size count = 0; count != n; ++count)
(&first [count])->~value_type ();
}
};
}
template <typename InIter, typename Size> inline void destroy_n (InIter first, Size n)
{
impl::destroy_n_dispatch <InIter, Size>::call (first, n);
}
namespace impl
{
// default: use standard uninitialized copy
template <typename InIter, typename Size, typename OutIter, typename Enable = void>
struct uninitialized_copy_n_dispatch
{
static void call (const InIter& first, Size n, const OutIter& result)
{
typedef typename std::iterator_traits <OutIter>::value_type value_type;
InIter first_copy = first;
OutIter result_copy = result;
Size index = 0;
try
{
for (; n > 0; --n)
{
new (&*result_copy) value_type (*first);
++first_copy;
++result_copy;
}
}
catch (...)
{
for (Size unwind = 0; unwind != index; ++unwind)
result [unwind].~value_type ();
throw;
}
}
};
template <typename InIter, typename Size, typename OutIter>
struct uninitialized_copy_n_dispatch <InIter, Size, OutIter, typename enable_if <
is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value != 0
&& is_same <typename std::iterator_traits <OutIter>::iterator_category, std::random_access_iterator_tag>::value != 0>::type>
{
static void call (const InIter& first, Size n, const OutIter& result)
{
typedef typename std::iterator_traits <OutIter>::value_type value_type;
Size index = 0;
try
{
for (; index != n; ++index)
new (&result [index]) value_type (first [index]);
}
catch (...)
{
for (Size unwind = 0; unwind != index; ++unwind)
result [unwind].~value_type ();
throw;
}
}
};
}
template <typename InIter, typename Size, typename OutIter>
inline void uninitialized_copy_n (const InIter& first, Size n, const OutIter& result)
{
impl::uninitialized_copy_n_dispatch <InIter, Size, OutIter>::call (first, n, result);
}
// default case
namespace impl
{
template <typename OutIter, typename Size, typename T, typename Enable1 = void, typename Enable2 = void>
struct uninitialized_fill_n_dispatch
{
static void call (const OutIter& first, Size n, const T& val)
{
std::uninitialized_fill_n (first, n, val);
}
};
template <typename OutIter, typename Size, typename T, typename Enable2>
struct uninitialized_fill_n_dispatch <OutIter, Size, T,
typename enable_if <is_same <typename std::iterator_traits <OutIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static void call (const OutIter& first, Size n, const T& val)
{
Size index = 0;
try
{
for (; index != n; ++index)
new (&first [index]) T (val);
}
catch (...)
{
for (Size unwind = 0; unwind != index; ++unwind)
(&first [unwind])->~T ();
throw;
}
}
};
}
template <typename OutIter, typename Size, typename T>
inline void uninitialized_fill_n (const OutIter& first, Size n, const T& val)
{
impl::uninitialized_fill_n_dispatch <OutIter, Size, T>::call (first, n, val);
}
namespace impl
{
template <typename InIter, typename Size, typename T, typename Enable1 = void, typename Enable2 = void>
struct count_n_dispatch
{
static Size call (const InIter& first, Size n, const T& value)
{
InIter first_copy = first;
Size counter = 0;
for (; n > 0; --n)
{
if (*first_copy == value)
++counter;
++first_copy;
}
return counter;
}
};
template <typename InIter, typename Size, typename T, typename Enable2>
struct count_n_dispatch <InIter, Size, T, typename enable_if <
is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static Size call (const InIter& first, Size n, const T& value)
{
InIter first_copy = first;
Size counter = 0;
for (Size index = 0; index != n; ++index)
if (first_copy [index] == value)
++counter;
return counter;
}
};
}
template <typename InIter, typename Size, typename T>
inline Size count_n (const InIter& first, Size n, const T& value)
{
return impl::count_n_dispatch <InIter, Size, T>::call (first, n, value);
}
namespace impl
{
template <typename InIter, typename Size, typename OutIter, typename Enable1 = void, typename Enable2 = void>
struct copy_n_dispatch
{
static void call (const InIter& first, Size n, const OutIter& result)
{
InIter first_copy = first;
OutIter result_copy = result;
for (; n > 0; --n)
{
*result_copy = *first_copy;
++first_copy;
++result_copy;
}
}
};
// both in & out iterators are random access
// optimization: use explicit loop count, index into instead of incrementing iterators,
template <typename InIter, typename Size, typename OutIter, typename Enable2>
struct copy_n_dispatch <InIter, Size, OutIter, typename enable_if <
is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value != 0
&& is_same <typename std::iterator_traits <OutIter>::iterator_category, std::random_access_iterator_tag>::value != 0>::type,
Enable2>
{
static void call (const InIter& first, Size n, const OutIter& result)
{
for (Size index = 0; index != n; ++index)
result [index] = first [index];
}
};
}
template <typename InIter, typename Size, typename OutIter>
inline void copy_n (const InIter& first, Size n, const OutIter& result)
{
impl::copy_n_dispatch <InIter, Size, OutIter>::call (first, n, result);
}
namespace impl
{
// default case
template <typename OutIter, typename Size, typename T, typename Enable1 = void, typename Enable2 = void>
struct fill_n_dispatch
{
static OutIter call (const OutIter& first, Size n, const T& val)
{
return std::fill_n (first, n, val);
}
};
// out iterators is random access
// optimization: use explicit loop count, index into instead of incrementing iterators
template <typename OutIter, typename Size, typename T, typename Enable2>
struct fill_n_dispatch <OutIter, Size, T,
typename enable_if <is_same <typename std::iterator_traits <OutIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static void call (const OutIter& first, Size n, const T& val)
{
for (Size index = 0; index != n; ++index)
first [index] = val;
}
};
}
template <typename OutIter, typename Size, typename T>
inline void fill_n (const OutIter& first, Size n, const T& val)
{
impl::fill_n_dispatch <OutIter, Size, T>::call (first, n, val);
}
namespace impl
{
template <typename InIter, typename Size, typename T, typename BOp, typename Enable1 = void, typename Enable2 = void>
struct accumulate_n_dispatch
{
static T call (const InIter& first, Size n, const T& init, BOp op)
{
InIter first_copy = first;
T init_copy = init; // param init may be an aligned object, which causes MSC to choke and die...
for (; n > 0; --n)
{
init_copy = op (init_copy, *first_copy);
++first_copy;
}
return init_copy;
}
};
template <typename InIter, typename Size, typename T, typename BOp, typename Enable2>
struct accumulate_n_dispatch <InIter, Size, T, BOp,
typename enable_if <is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static T call (const InIter& first, Size n, const T& init, BOp op)
{
InIter first_copy = first;
T init_copy = init; // param init may be an aligned object, which causes MSC to choke and die...
for (Size index = 0; index != n; ++index)
init_copy = op (init_copy, first_copy [index]);
return init_copy;
}
};
template <typename InIter, typename Size, typename Enable2>
struct accumulate_n_dispatch <InIter, Size, bool, stdext::minimum <bool>,
typename enable_if <is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static bool call (const InIter& first, Size n, bool init, stdext::minimum <bool>)
{
if (init)
{
InIter first_copy = first;
for (Size index = 0; index != n; ++index)
if (!first_copy [index])
return false;
return true;
}
else
return false;
}
};
template <typename InIter, typename Size, typename Enable2>
struct accumulate_n_dispatch <InIter, Size, bool, stdext::maximum <bool>,
typename enable_if <is_same <typename std::iterator_traits <InIter>::iterator_category, std::random_access_iterator_tag>::value>::type,
Enable2>
{
static bool call (const InIter& first, Size n, bool init, stdext::maximum <bool>)
{
if (init)
return true;
else
{
InIter first_copy = first;
for (Size index = 0; index != n; ++index)
if (first_copy [index])
return true;
return false;
}
}
};
}
template <typename InIter, typename Size, typename T, typename BOp>
inline T accumulate_n (const InIter& first, Size n, const T& init, BOp op)
{
return impl::accumulate_n_dispatch <InIter, Size, T, BOp>::call (first, n, init, op);
}
}
#endif
| 33.776042 | 141 | 0.621511 | [
"object"
] |
9e0c7435c19b7061c6a2cf2fb6b00c3ae1ba275d | 4,340 | c | C | src/pydistorm.c | WolfgangSt/distorm64 | 6cb633a33c80f32e42b74ae088c6fa079a053894 | [
"BSD-3-Clause"
] | 4 | 2015-10-20T02:30:48.000Z | 2020-12-07T19:49:41.000Z | src/pydistorm.c | WolfgangSt/distorm64 | 6cb633a33c80f32e42b74ae088c6fa079a053894 | [
"BSD-3-Clause"
] | 1 | 2015-03-07T20:52:30.000Z | 2015-03-07T20:52:30.000Z | src/pydistorm.c | WolfgangSt/distorm64 | 6cb633a33c80f32e42b74ae088c6fa079a053894 | [
"BSD-3-Clause"
] | 6 | 2016-04-08T18:39:47.000Z | 2022-02-21T12:30:26.000Z | /*
pydistorm.c
:[diStorm64}: Python Module Extension
The ultimate disassembler library (80x86, AMD64)
Copyright (C) 2003-2008 Gil Dabah, http://ragestorm.net/distorm/
This library is licensed under the BSD license. See the file COPYING.
*/
#include "decoder.h"
#include "textdefs.h"
#include "wstring.h"
#include "pydistorm.h"
/* PYTHON MODULE EXPORTS */
_DLLEXPORT_ void initdistorm()
{
PyObject* distormModule = Py_InitModule3("distorm", distormModulebMethods, ":[diStorm64}:");
PyModule_AddIntConstant(distormModule, "Decode16Bits", Decode16Bits);
PyModule_AddIntConstant(distormModule, "Decode32Bits", Decode32Bits);
PyModule_AddIntConstant(distormModule, "Decode64Bits", Decode64Bits);
PyModule_AddIntConstant(distormModule, "OffsetTypeSize", sizeof(_OffsetType) * 8);
PyModule_AddStringConstant(distormModule, "info", ":[diStorm64 1.7.30}:\r\nCopyright RageStorm (C) 2008, Gil Dabah \r\n\r\ndiStorm is licensed under the BSD license.\r\nhttp://ragestorm.net/distorm/\r\n");
}
#define MAX_INSTRUCTIONS 1000
PyObject* distorm_Decode(PyObject* pSelf, PyObject* pArgs)
{
_DecodeType dt;
uint8_t* code;
int codeLen;
_OffsetType codeOffset;
_DecodeResult res = DECRES_NONE;
_DecodedInst decodedInstructions[MAX_INSTRUCTIONS];
unsigned int decodedInstructionsCount = 0, i = 0, next = 0;
uint8_t instructionText[MAX_TEXT_SIZE*2];
PyObject *ret = NULL, *pyObj = NULL, *dtObj = NULL;
pSelf = pSelf; /* UNREFERENCED_PARAMETER */
/* Decode(int32/64 offset, string code, int type=Decode32Bits) */
if (!PyArg_ParseTuple(pArgs, _PY_OFF_INT_SIZE_ "s#|O", &codeOffset, &code, &codeLen, &dtObj)) return NULL;
if (code == NULL) {
PyErr_SetString(PyExc_IOError, "Error while reading code buffer.");
return NULL;
}
if (codeLen < 0) {
PyErr_SetString(PyExc_OverflowError, "Code buffer is too long.");
return NULL;
}
/* Default parameter. */
if (dtObj == NULL) dt = Decode32Bits;
else if (!PyInt_Check(dtObj)) {
PyErr_SetString(PyExc_IndexError, "Third parameter must be either Decode16Bits, Decode32Bits or Decode64Bits (integer type).");
return NULL;
} else dt = (_DecodeType)PyInt_AsUnsignedLongMask(dtObj);
if ((dt != Decode16Bits) && (dt != Decode32Bits) && (dt != Decode64Bits)) {
PyErr_SetString(PyExc_IndexError, "Decoding-type must be either Decode16Bits, Decode32Bits or Decode64Bits.");
return NULL;
}
/* Construct an empty list, which later will be filled with tuples of (offset, size, mnemonic, hex). */
ret = PyList_New(0);
if (ret == NULL) {
PyErr_SetString(PyExc_MemoryError, "Not enough memory to initialize a list.");
return NULL;
}
while (res != DECRES_SUCCESS) {
res = internal_decode(codeOffset, code, codeLen, dt, decodedInstructions, MAX_INSTRUCTIONS, &decodedInstructionsCount);
if ((res == DECRES_MEMORYERR) && (decodedInstructionsCount == 0)) break;
for (i = 0; i < decodedInstructionsCount; i++) {
if (decodedInstructions[i].mnemonic.pos > 0) {
memcpy(instructionText, decodedInstructions[i].mnemonic.p, decodedInstructions[i].mnemonic.pos + 1); /* Include \0. */
if (decodedInstructions[i].operands.pos > 0)
instructionText[decodedInstructions[i].mnemonic.pos] = SP_CHR;
memcpy(&instructionText[decodedInstructions[i].mnemonic.pos+1], decodedInstructions[i].operands.p, decodedInstructions[i].operands.pos + 1);
} else instructionText[0] = '\0';
pyObj = Py_BuildValue("(" _PY_OFF_INT_SIZE_ "bss)", decodedInstructions[i].offset, decodedInstructions[i].size, instructionText, decodedInstructions[i].instructionHex.p);
if (pyObj == NULL) {
Py_DECREF(ret);
PyErr_SetString(PyExc_MemoryError, "Not enough memory to append an item into the list.");
return NULL;
}
if (PyList_Append(ret, pyObj) == -1) {
Py_DECREF(pyObj);
Py_DECREF(ret);
PyErr_SetString(PyExc_MemoryError, "Not enough memory to append an item into the list.");
return NULL;
}
// V 1.7.25 - Memleak fixed, it is necessary to DECREF the object, because PyList_Append INCREFs it on its own.
Py_DECREF(pyObj);
}
/* Get offset difference. */
next = (unsigned int)(decodedInstructions[decodedInstructionsCount-1].offset - codeOffset);
next += decodedInstructions[decodedInstructionsCount-1].size;
/* Advance ptr and recalc offset. */
code += next;
codeLen -= next;
codeOffset += next;
}
return ret;
}
| 36.166667 | 206 | 0.729954 | [
"object"
] |
9e0ddbcf276c48880cbd2cf414a98c51c79d625d | 2,357 | h | C | shell/platform/fuchsia/flutter/software_surface_producer.h | charafau/engine | c4a1a72da5dde44cc6288f8c4c0020b03e1e9279 | [
"BSD-3-Clause"
] | 5,823 | 2015-09-20T02:43:18.000Z | 2022-03-31T23:38:55.000Z | shell/platform/fuchsia/flutter/software_surface_producer.h | MasahideMori-SimpleAppli/engine | 1adccf1592cd980af3e446a344f738f7b2ac853c | [
"BSD-3-Clause"
] | 20,081 | 2015-09-19T16:07:59.000Z | 2022-03-31T23:33:26.000Z | shell/platform/fuchsia/flutter/software_surface_producer.h | MasahideMori-SimpleAppli/engine | 1adccf1592cd980af3e446a344f738f7b2ac853c | [
"BSD-3-Clause"
] | 5,383 | 2015-09-24T22:49:53.000Z | 2022-03-31T14:33:51.000Z | // Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#pragma once
#include <fuchsia/sysmem/cpp/fidl.h>
#include <fuchsia/ui/composition/cpp/fidl.h>
#include <lib/ui/scenic/cpp/resources.h>
#include <lib/ui/scenic/cpp/session.h>
#include <unordered_map>
#include "flutter/fml/macros.h"
#include "software_surface.h"
namespace flutter_runner {
class SoftwareSurfaceProducer final : public SurfaceProducer {
public:
// Only keep 12 surfaces at a time.
static constexpr int kMaxSurfaces = 12;
// If a surface doesn't get used for 3 or more generations, we discard it.
static constexpr int kMaxSurfaceAge = 3;
explicit SoftwareSurfaceProducer(scenic::Session* scenic_session);
~SoftwareSurfaceProducer() override;
bool IsValid() const { return valid_; }
// |SurfaceProducer|
GrDirectContext* gr_context() const override { return nullptr; }
// |SurfaceProducer|
std::unique_ptr<SurfaceProducerSurface> ProduceOffscreenSurface(
const SkISize& size) override;
// |SurfaceProducer|
std::unique_ptr<SurfaceProducerSurface> ProduceSurface(
const SkISize& size) override;
// |SurfaceProducer|
void SubmitSurfaces(
std::vector<std::unique_ptr<SurfaceProducerSurface>> surfaces) override;
private:
void SubmitSurface(std::unique_ptr<SurfaceProducerSurface> surface);
std::unique_ptr<SoftwareSurface> CreateSurface(const SkISize& size);
void RecycleSurface(std::unique_ptr<SoftwareSurface> surface);
void RecyclePendingSurface(uintptr_t surface_key);
void AgeAndCollectOldBuffers();
void TraceStats();
scenic::Session* scenic_session_; // Legacy gfx API endpoint.
fuchsia::sysmem::AllocatorSyncPtr sysmem_allocator_;
fuchsia::ui::composition::AllocatorPtr flatland_allocator_;
// These surfaces are available for re-use.
std::vector<std::unique_ptr<SoftwareSurface>> available_surfaces_;
// These surfaces have been written to, but scenic is not finished reading
// from them yet.
std::unordered_map<uintptr_t, std::unique_ptr<SoftwareSurface>>
pending_surfaces_;
size_t trace_surfaces_created_ = 0;
size_t trace_surfaces_reused_ = 0;
bool valid_ = false;
FML_DISALLOW_COPY_AND_ASSIGN(SoftwareSurfaceProducer);
};
} // namespace flutter_runner
| 30.217949 | 78 | 0.762834 | [
"vector"
] |
9e0e86ee519b20ad9b6e6e6ad6c9304c1c36facb | 954 | h | C | src/qt/qvaluecombobox.h | Bitkincoin/bitkincoin | dcfd8575e03ffe654e16cabb5ff6daabae7e24ab | [
"MIT"
] | 1 | 2021-03-26T05:12:57.000Z | 2021-03-26T05:12:57.000Z | src/qt/qvaluecombobox.h | Bitkincoin/bitkincoin | dcfd8575e03ffe654e16cabb5ff6daabae7e24ab | [
"MIT"
] | null | null | null | src/qt/qvaluecombobox.h | Bitkincoin/bitkincoin | dcfd8575e03ffe654e16cabb5ff6daabae7e24ab | [
"MIT"
] | 1 | 2021-10-01T16:42:29.000Z | 2021-10-01T16:42:29.000Z | // Copyright (c) 2011-2015 The Bitkincoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_QT_QVALUECOMBOBOX_H
#define BITCOIN_QT_QVALUECOMBOBOX_H
#include <QComboBox>
#include <QVariant>
/* QComboBox that can be used with QDataWidgetMapper to select ordinal values from a model. */
class QValueComboBox : public QComboBox
{
Q_OBJECT
Q_PROPERTY(QVariant value READ value WRITE setValue NOTIFY valueChanged USER true)
public:
explicit QValueComboBox(QWidget *parent = 0);
QVariant value() const;
void setValue(const QVariant &value);
/** Specify model role to use as ordinal value (defaults to Qt::UserRole) */
void setRole(int role);
Q_SIGNALS:
void valueChanged();
private:
int role;
private Q_SLOTS:
void handleSelectionChanged(int idx);
};
#endif // BITCOIN_QT_QVALUECOMBOBOX_H
| 25.105263 | 94 | 0.752621 | [
"model"
] |
9e1497e7ab15709e4547ed8d7267070f0eb55c85 | 165,688 | h | C | src/code-stub-assembler.h | RiyoCoder/v8 | e073edfc7dc990cc5f71c4e51ac27b19be16fcb7 | [
"BSD-3-Clause"
] | null | null | null | src/code-stub-assembler.h | RiyoCoder/v8 | e073edfc7dc990cc5f71c4e51ac27b19be16fcb7 | [
"BSD-3-Clause"
] | null | null | null | src/code-stub-assembler.h | RiyoCoder/v8 | e073edfc7dc990cc5f71c4e51ac27b19be16fcb7 | [
"BSD-3-Clause"
] | null | null | null | // Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODE_STUB_ASSEMBLER_H_
#define V8_CODE_STUB_ASSEMBLER_H_
#include <functional>
#include "src/bailout-reason.h"
#include "src/base/macros.h"
#include "src/compiler/code-assembler.h"
#include "src/frames.h"
#include "src/globals.h"
#include "src/message-template.h"
#include "src/objects.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
#include "src/roots.h"
#include "torque-generated/builtins-base-from-dsl-gen.h"
namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
class CodeStubArguments;
class CodeStubAssembler;
class StatsCounter;
class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
V(PromiseSpeciesProtector, promise_species_protector, \
PromiseSpeciesProtector) \
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector) \
V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector)
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
AllocationSiteWithoutWeakNextMap) \
V(BooleanMap, boolean_map, BooleanMap) \
V(CodeMap, code_map, CodeMap) \
V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
V(EmptyPropertyDictionary, empty_property_dictionary, \
EmptyPropertyDictionary) \
V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
V(FalseValue, false_value, False) \
V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
V(FunctionTemplateInfoMap, function_template_info_map, \
FunctionTemplateInfoMap) \
V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
V(HeapNumberMap, heap_number_map, HeapNumberMap) \
V(iterator_symbol, iterator_symbol, IteratorSymbol) \
V(length_string, length_string, LengthString) \
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
V(MetaMap, meta_map, MetaMap) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
V(NanValue, nan_value, Nan) \
V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
V(NoFeedbackCellMap, no_feedback_cell_map, NoFeedbackCellMap) \
V(NullValue, null_value, Null) \
V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
V(prototype_string, prototype_string, PrototypeString) \
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
V(SymbolMap, symbol_map, SymbolMap) \
V(TheHoleValue, the_hole_value, TheHole) \
V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
V(TrueValue, true_value, True) \
V(Tuple2Map, tuple2_map, Tuple2Map) \
V(Tuple3Map, tuple3_map, Tuple3Map) \
V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
ArrayBoilerplateDescriptionMap) \
V(UncompiledDataWithoutPreParsedScopeMap, \
uncompiled_data_without_pre_parsed_scope_map, \
UncompiledDataWithoutPreParsedScopeMap) \
V(UncompiledDataWithPreParsedScopeMap, \
uncompiled_data_with_pre_parsed_scope_map, \
UncompiledDataWithPreParsedScopeMap) \
V(UndefinedValue, undefined_value, Undefined) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
#ifdef DEBUG
#define CSA_CHECK(csa, x) \
(csa)->Check( \
[&]() -> compiler::Node* { \
return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
}, \
#x, __FILE__, __LINE__)
#else
#define CSA_CHECK(csa, x) (csa)->FastCheck(x)
#endif
#ifdef DEBUG
// Add stringified versions to the given values, except the first. That is,
// transform
// x, a, b, c, d, e, f
// to
// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
//
// __VA_ARGS__ is ignored to allow the caller to pass through too many
// parameters, and the first element is ignored to support having no extra
// values without empty __VA_ARGS__ (which cause all sorts of problems with
// extra commas).
#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
// Stringify the given variable number of arguments. The arguments are trimmed
// to 5 if there are too many, and padded with nullptr if there are not enough.
#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
nullptr, nullptr)
#define CSA_ASSERT_GET_FIRST(x, ...) (x)
#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
// CSA_ASSERT(csa, <condition>, <extra values to print...>)
// We have to jump through some hoops to allow <extra values to print...> to be
// empty.
#define CSA_ASSERT(csa, ...) \
(csa)->Assert( \
[&]() -> compiler::Node* { \
return implicit_cast<compiler::SloppyTNode<Word32T>>( \
EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
}, \
EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
// <extra values to print...>)
#define CSA_ASSERT_BRANCH(csa, ...) \
(csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
__LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
(csa)->Assert( \
[&]() -> compiler::Node* { \
compiler::Node* const argc = \
(csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
}, \
"argc " #op " " #expected, __FILE__, __LINE__, \
SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
"argc")
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
#define CSA_DEBUG_INFO(name) \
{ #name, __FILE__, __LINE__ }
#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
#define VARIABLE(name, ...) \
Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
#define VARIABLE_CONSTRUCTOR(name, ...) \
name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) \
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
#define CSA_ASSERT(csa, ...) ((void)0)
#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
#endif // DEBUG
#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
#ifdef ENABLE_SLOW_DCHECKS
#define CSA_SLOW_ASSERT(csa, ...) \
if (FLAG_enable_slow_asserts) { \
CSA_ASSERT(csa, __VA_ARGS__); \
}
#else
#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
#endif
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
// without modifying files in the compiler directory (and requiring a review
// from a compiler directory OWNER).
class V8_EXPORT_PRIVATE CodeStubAssembler
: public compiler::CodeAssembler,
public BaseBuiltinsFromDSLAssembler {
public:
using Node = compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
template <typename T>
using LazyNode = std::function<TNode<T>()>;
CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1,
kAllowLargeObjectAllocation = 1 << 2,
};
enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking };
typedef base::Flags<AllocationFlag> AllocationFlags;
enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
// On 32-bit platforms, there is a slight performance advantage to doing all
// of the array offset/index arithmetic with SMIs, since it's possible
// to save a few tag/untag operations without paying an extra expense when
// calculating array offset (the smi math can be folded away) and there are
// fewer live ranges. Thus only convert indices to untagged value on 64-bit
// platforms.
ParameterMode OptimalParameterMode() const {
return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
}
MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
: MachineRepresentation::kTaggedSigned;
}
MachineRepresentation OptimalParameterRepresentation() const {
return ParameterRepresentation(OptimalParameterMode());
}
TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
return UncheckedCast<IntPtrT>(value);
}
Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) return SmiTag(value);
return value;
}
Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
}
TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiTag(value);
return UncheckedCast<Smi>(value);
}
Node* TaggedToParameter(SloppyTNode<Smi> value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiUntag(value);
return value;
}
TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsNotSmi(value), fail);
return UncheckedCast<Smi>(value);
}
TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
GotoIfNot(IsNumber(value), fail);
return UncheckedCast<Number>(value);
}
TNode<HeapObject> TaggedToHeapObject(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
return UncheckedCast<HeapObject>(value);
}
TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsJSArray(heap_object), fail);
return UncheckedCast<JSArray>(heap_object);
}
TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
TNode<HeapObject> heap_object = CAST(value);
GotoIfNot(IsFastJSArray(heap_object, context), fail);
return UncheckedCast<JSArray>(heap_object);
}
TNode<JSDataView> HeapObjectToJSDataView(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsJSDataView(heap_object), fail);
return CAST(heap_object);
}
TNode<JSReceiver> HeapObjectToCallable(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsCallable(heap_object), fail);
return CAST(heap_object);
}
TNode<String> HeapObjectToString(TNode<HeapObject> heap_object, Label* fail) {
GotoIfNot(IsString(heap_object), fail);
return CAST(heap_object);
}
TNode<JSReceiver> HeapObjectToConstructor(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsConstructor(heap_object), fail);
return CAST(heap_object);
}
Node* MatchesParameterMode(Node* value, ParameterMode mode);
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
Node* OpName(Node* a, Node* b, ParameterMode mode) { \
if (mode == SMI_PARAMETERS) { \
return SmiOpName(CAST(a), CAST(b)); \
} else { \
DCHECK_EQ(INTPTR_PARAMETERS, mode); \
return IntPtrOpName(a, b); \
} \
}
PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
SmiLessThanOrEqual)
PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
SmiGreaterThanOrEqual)
PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow)
PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual,
SmiAboveOrEqual)
#undef PARAMETER_BINOP
uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; }
uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; }
TNode<Object> NoContextConstant();
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<Heap>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
TNode<BoolT> Is##name(SloppyTNode<Object> value); \
TNode<BoolT> IsNot##name(SloppyTNode<Object> value);
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
// Round the 32bits payload of the provided word up to the next power of two.
TNode<IntPtrT> IntPtrRoundUpToPowerOfTwo32(TNode<IntPtrT> value);
// Select the maximum of the two provided IntPtr values.
TNode<IntPtrT> IntPtrMax(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right);
// Select the minimum of the two provided IntPtr values.
TNode<IntPtrT> IntPtrMin(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right);
// Float64 operations.
TNode<Float64T> Float64Ceil(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Floor(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Round(SloppyTNode<Float64T> x);
TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
// Select the minimum of the two provided Number values.
TNode<Number> NumberMax(SloppyTNode<Number> left, SloppyTNode<Number> right);
// Select the minimum of the two provided Number values.
TNode<Number> NumberMin(SloppyTNode<Number> left, SloppyTNode<Number> right);
// After converting an index to an integer, calculate a relative index: if
// index < 0, max(length + index, 0); else min(index, length)
TNode<IntPtrT> ConvertToRelativeIndex(TNode<Context> context,
TNode<Object> index,
TNode<IntPtrT> length);
// Returns true iff the given value fits into smi range and is >= 0.
TNode<BoolT> IsValidPositiveSmi(TNode<IntPtrT> value);
// Tag an IntPtr as a Smi value.
TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
// Untag a Smi value as an IntPtr.
TNode<IntPtrT> SmiUntag(SloppyTNode<Smi> value);
// Smi conversions.
TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
if (SmiValuesAre32Bits()) { \
return BitcastWordToTaggedSigned( \
IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
} else { \
DCHECK(SmiValuesAre31Bits()); \
if (kPointerSize == kInt64Size) { \
CSA_ASSERT(this, IsValidSmi(a)); \
CSA_ASSERT(this, IsValidSmi(b)); \
} \
return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \
} \
}
SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And)
SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or)
#undef SMI_ARITHMETIC_BINOP
TNode<Smi> SmiInc(TNode<Smi> value) { return SmiAdd(value, SmiConstant(1)); }
TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
Label* if_overflow);
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
WordAnd(WordShr(BitcastTaggedToWord(a), shift),
BitcastTaggedToWord(SmiConstant(-1))));
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
WordAnd(WordSar(BitcastTaggedToWord(a), shift),
BitcastTaggedToWord(SmiConstant(-1))));
}
Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiShl(CAST(a), shift);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return WordShl(a, shift);
}
}
Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiShr(CAST(a), shift);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return WordShr(a, shift);
}
}
#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
if (SmiValuesAre32Bits()) { \
return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
} else { \
DCHECK(SmiValuesAre31Bits()); \
if (kPointerSize == kInt64Size) { \
CSA_ASSERT(this, IsValidSmi(a)); \
CSA_ASSERT(this, IsValidSmi(b)); \
} \
return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \
} \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan)
SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual,
Uint32GreaterThanOrEqual)
SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan)
SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan)
SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual,
Int32LessThanOrEqual)
SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan)
SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
Int32GreaterThanOrEqual)
#undef SMI_COMPARISON_OP
TNode<Smi> SmiMax(TNode<Smi> a, TNode<Smi> b);
TNode<Smi> SmiMin(TNode<Smi> a, TNode<Smi> b);
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
TNode<Number> SmiMod(TNode<Smi> a, TNode<Smi> b);
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
TNode<Number> SmiMul(TNode<Smi> a, TNode<Smi> b);
// Tries to compute dividend / divisor for Smi inputs; branching to bailout
// if the division needs to be performed as a floating point operation.
TNode<Smi> TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, Label* bailout);
// Compares two Smis a and b as if they were converted to strings and then
// compared lexicographically. Returns:
// -1 iff x < y.
// 0 iff x == y.
// 1 iff x > y.
TNode<Smi> SmiLexicographicCompare(TNode<Smi> x, TNode<Smi> y);
// Smi | HeapNumber operations.
TNode<Number> NumberInc(SloppyTNode<Number> value);
TNode<Number> NumberDec(SloppyTNode<Number> value);
TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
TNode<Number> BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
// Allocate an object of the given size.
TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
TNode<IntPtrT> offset);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
typedef std::function<void(Label*, Label*)> BranchGenerator;
typedef std::function<Node*()> NodeGenerator;
void Assert(const BranchGenerator& branch, const char* message = nullptr,
const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
Node* extra_node2 = nullptr, const char* extra_node2_name = "",
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Assert(const NodeGenerator& condition_body,
const char* message = nullptr, const char* file = nullptr,
int line = 0, Node* extra_node1 = nullptr,
const char* extra_node1_name = "", Node* extra_node2 = nullptr,
const char* extra_node2_name = "", Node* extra_node3 = nullptr,
const char* extra_node3_name = "", Node* extra_node4 = nullptr,
const char* extra_node4_name = "", Node* extra_node5 = nullptr,
const char* extra_node5_name = "");
void Check(const BranchGenerator& branch, const char* message = nullptr,
const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
Node* extra_node2 = nullptr, const char* extra_node2_name = "",
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Check(const NodeGenerator& condition_body, const char* message = nullptr,
const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
Node* extra_node2 = nullptr, const char* extra_node2_name = "",
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void FailAssert(
const char* message = nullptr, const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
Node* extra_node2 = nullptr, const char* extra_node2_name = "",
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void FastCheck(TNode<BoolT> condition);
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
// JSFunction or proxy) rather than a Code object.
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<JSReceiver> receiver, TArgs... args) {
return UncheckedCast<Object>(CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
context, callable, receiver, args...));
}
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<Object> receiver, TArgs... args) {
if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) {
return UncheckedCast<Object>(CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
context, callable, receiver, args...));
}
return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
callable, receiver, args...));
}
template <class... TArgs>
TNode<JSReceiver> Construct(TNode<Context> context,
TNode<JSReceiver> new_target, TArgs... args) {
return CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
new_target, implicit_cast<TNode<Object>>(args)...));
}
template <class A, class F, class G>
TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
const G& false_body) {
return UncheckedCast<A>(SelectImpl(
condition,
[&]() -> Node* { return implicit_cast<TNode<A>>(true_body()); },
[&]() -> Node* { return implicit_cast<TNode<A>>(false_body()); },
MachineRepresentationOf<A>::value));
}
template <class A>
TNode<A> SelectConstant(TNode<BoolT> condition, TNode<A> true_value,
TNode<A> false_value) {
return Select<A>(condition, [=] { return true_value; },
[=] { return false_value; });
}
TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
int true_value, int false_value);
TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
int true_value, int false_value);
TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
Smi false_value);
TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
Smi false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
}
TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
int false_value) {
return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
}
TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
int false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value),
Smi::FromInt(false_value));
}
TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a);
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
// Check that a word has a word-aligned address.
TNode<BoolT> WordIsWordAligned(SloppyTNode<WordT> word);
TNode<BoolT> WordIsPowerOfTwo(SloppyTNode<IntPtrT> value);
#if DEBUG
void Bind(Label* label, AssemblerDebugInfo debug_info);
#endif // DEBUG
void Bind(Label* label);
template <class... T>
void Bind(compiler::CodeAssemblerParameterizedLabel<T...>* label,
TNode<T>*... phis) {
CodeAssembler::Bind(label, phis...);
}
void BranchIfSmiEqual(TNode<Smi> a, TNode<Smi> b, Label* if_true,
Label* if_false) {
Branch(SmiEqual(a, b), if_true, if_false);
}
void BranchIfSmiLessThan(TNode<Smi> a, TNode<Smi> b, Label* if_true,
Label* if_false) {
Branch(SmiLessThan(a, b), if_true, if_false);
}
void BranchIfSmiLessThanOrEqual(TNode<Smi> a, TNode<Smi> b, Label* if_true,
Label* if_false) {
Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
}
void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
Branch(Float64Equal(value, value), if_false, if_true);
}
// Branches to {if_true} if ToBoolean applied to {value} yields true,
// otherwise goes to {if_false}.
void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
// Branches to {if_true} when --force-slow-path flag has been passed.
// It's used for testing to ensure that slow path implementation behave
// equivalent to corresponding fast paths (where applicable).
//
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
// Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
// Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset,
MachineType rep = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
return UncheckedCast<RawPtrT>(
LoadBufferObject(buffer, offset, MachineType::Pointer()));
}
TNode<Smi> LoadBufferSmi(TNode<RawPtrT> buffer, int offset) {
return CAST(LoadBufferObject(buffer, offset, MachineType::TaggedSigned()));
}
// Load a field from an object on the heap.
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
MachineType rep);
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
return CAST(LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object, int offset) {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
Node* LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset, MachineType rep);
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset) {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
// Load a SMI field and untag it.
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
int offset);
// Load a SMI field, untag it, and convert to Word32.
TNode<Int32T> LoadAndUntagToWord32ObjectField(Node* object, int offset);
// Load a SMI and untag it.
TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
// Load a SMI root, untag it, and convert to Word32.
TNode<Int32T> LoadAndUntagToWord32Root(RootIndex root_index);
TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
int offset) {
return UncheckedCast<MaybeObject>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
// Tag a smi and store it.
Node* StoreAndTagSmi(Node* base, int offset, Node* value);
// Load the floating point value of a HeapNumber.
TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapNumber> object);
// Load the Map of an HeapObject.
TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
// Load the instance type of an HeapObject.
TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
InstanceType type);
TNode<BoolT> DoesntHaveInstanceType(SloppyTNode<HeapObject> object,
InstanceType type);
TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
InstanceType type);
// Load the properties backing store of a JSObject.
TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object);
// Load the length of a JSArray instance.
TNode<Object> LoadJSArgumentsObjectWithLength(
SloppyTNode<JSArgumentsObjectWithLength> array);
// Load the length of a JSArray instance.
TNode<Number> LoadJSArrayLength(SloppyTNode<JSArray> array);
// Load the length of a fast JSArray instance. Returns a positive Smi.
TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
// Load the length of a fixed array base instance.
TNode<Smi> LoadFixedArrayBaseLength(SloppyTNode<FixedArrayBase> array);
// Load the length of a fixed array base instance.
TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(
SloppyTNode<FixedArrayBase> array);
// Load the length of a WeakFixedArray.
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
SloppyTNode<WeakFixedArray> array);
// Load the number of descriptors in DescriptorArray.
TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
// Load the bit field of a Map.
TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
// Load bit field 2 of a map.
TNode<Int32T> LoadMapBitField2(SloppyTNode<Map> map);
// Load bit field 3 of a map.
TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
// Load the instance type of a map.
TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
// Load the ElementsKind of a map.
TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
// Load the instance descriptors of a map.
TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
// Load the prototype of a map.
TNode<HeapObject> LoadMapPrototype(SloppyTNode<Map> map);
// Load the prototype info of a map. The result has to be checked if it is a
// prototype info object or not.
TNode<PrototypeInfo> LoadMapPrototypeInfo(SloppyTNode<Map> map,
Label* if_has_no_proto_info);
// Load the instance size of a Map.
TNode<IntPtrT> LoadMapInstanceSizeInWords(SloppyTNode<Map> map);
// Load the inobject properties start of a Map (valid only for JSObjects).
TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(SloppyTNode<Map> map);
// Load the constructor function index of a Map (only for primitive maps).
TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
// Load the EnumLength of a Map.
Node* LoadMapEnumLength(SloppyTNode<Map> map);
// Load the back-pointer of a Map.
TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
// Checks that |map| has only simple properties, returns bitfield3.
TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
TNode<Int32T> instance_type,
Label* bailout);
// Load the identity hash of a JSRececiver.
TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
Label* if_no_hash = nullptr);
// This is only used on a newly allocated PropertyArray which
// doesn't have an existing hash.
void InitializePropertyArrayLength(Node* property_array, Node* length,
ParameterMode mode);
// Check if the map is set for slow properties.
TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
// Load the hash field of a name as an uint32 value.
TNode<Uint32T> LoadNameHashField(SloppyTNode<Name> name);
// Load the hash value of a name as an uint32 value.
// If {if_hash_not_computed} label is specified then it also checks if
// hash is actually computed.
TNode<Uint32T> LoadNameHash(SloppyTNode<Name> name,
Label* if_hash_not_computed = nullptr);
// Load length field of a String object as Smi value.
TNode<Smi> LoadStringLengthAsSmi(SloppyTNode<String> string);
// Load length field of a String object as intptr_t value.
TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
// Load length field of a String object as uint32_t value.
TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
// Loads a pointer to the sequential String char array.
Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSValue object.
Node* LoadJSValueValue(Node* object);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
// - a cleared weak reference (jump to "if_cleared", "extracted" will be
// untouched)
// - a weak reference (jump to "if_weak", "extracted" will be the object
// pointed to)
// - a strong reference (jump to "if_strong", "extracted" will be the object
// pointed to)
void DispatchMaybeObject(TNode<MaybeObject> maybe_object, Label* if_smi,
Label* if_cleared, Label* if_weak, Label* if_strong,
TVariable<Object>* extracted);
// See MaybeObject for semantics of these functions.
TNode<BoolT> IsStrong(TNode<MaybeObject> value);
// This variant is for overzealous checking.
TNode<BoolT> IsStrong(TNode<Object> value) {
return IsStrong(ReinterpretCast<MaybeObject>(value));
}
TNode<HeapObject> GetHeapObjectIfStrong(TNode<MaybeObject> value,
Label* if_not_strong);
TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value);
TNode<BoolT> IsCleared(TNode<MaybeObject> value);
TNode<BoolT> IsNotCleared(TNode<MaybeObject> value);
// Removes the weak bit + asserts it was set.
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value);
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value,
Label* if_cleared);
TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
TNode<Object> value);
TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object,
TNode<Object> value);
TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object,
TNode<Object> value);
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
void FixedArrayBoundsCheck(TNode<FixedArrayBase> array, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array>
TNode<IntPtrT> LoadArrayLength(TNode<Array> array);
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array>
TNode<MaybeObject> LoadArrayElement(
TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
TNode<IntPtrT> index,
LoadSensitivity needs_poisoning) {
return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
needs_poisoning);
}
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, index, additional_offset,
INTPTR_PARAMETERS, needs_poisoning);
}
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
additional_offset, INTPTR_PARAMETERS,
needs_poisoning);
}
TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
TNode<Smi> index) {
return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
SloppyTNode<IntPtrT> index);
TNode<IntPtrT> LoadPropertyArrayLength(TNode<PropertyArray> object);
// Load an element from an array and untag it and return it as Word32.
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array>
TNode<Int32T> LoadAndUntagToWord32ArrayElement(
TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Load an array element from a FixedArray, untag it and return it as Word32.
TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
TNode<FixedArray> object, int index, int additional_offset = 0) {
return LoadAndUntagToWord32FixedArrayElement(
object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS);
}
// Load an array element from a WeakFixedArray.
TNode<MaybeObject> LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<MaybeObject> LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadWeakFixedArrayElement(object, IntPtrConstant(index),
additional_offset, INTPTR_PARAMETERS,
needs_poisoning);
}
// Load an array element from a FixedDoubleArray.
TNode<Float64T> LoadFixedDoubleArrayElement(
SloppyTNode<FixedDoubleArray> object, Node* index,
MachineType machine_type, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
Label* if_hole = nullptr);
Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
TNode<Smi> index,
Label* if_hole = nullptr) {
return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
SMI_PARAMETERS, if_hole);
}
Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
TNode<IntPtrT> index,
Label* if_hole = nullptr) {
return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
INTPTR_PARAMETERS, if_hole);
}
// Load an array element from a FixedArray, FixedDoubleArray or a
// NumberDictionary (depending on the |elements_kind|) and return
// it as a tagged value. Assumes that the |index| passed a length
// check before. Bails out to |if_accessor| if the element that
// was found is an accessor, or to |if_hole| if the element at
// the given |index| is not found in |elements|.
TNode<Object> LoadFixedArrayBaseElementAsTagged(
TNode<FixedArrayBase> elements, TNode<IntPtrT> index,
TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole);
// Load a feedback slot from a FeedbackVector.
TNode<MaybeObject> LoadFeedbackVectorSlot(
Node* object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>);
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
TNode<Smi> index,
Label* if_hole = nullptr);
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
TNode<IntPtrT> index,
Label* if_hole = nullptr);
// Load Float64 value by |base| + |offset| address. If the value is a double
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
// check is generated.
TNode<Float64T> LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
TNode<FixedTypedArrayBase> typed_array);
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind);
// Parts of the above, factored out for readability:
Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
Node* offset);
// 64-bit platforms only:
TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
// 32-bit platforms only:
TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
void StoreFixedTypedArrayElementFromTagged(
TNode<Context> context, TNode<FixedTypedArrayBase> elements,
TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
ParameterMode parameter_mode);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
int slot_index);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index);
TNode<Object> LoadContextElement(TNode<Context> context,
TNode<Smi> slot_index);
void StoreContextElement(SloppyTNode<Context> context, int slot_index,
SloppyTNode<Object> value);
void StoreContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index,
SloppyTNode<Object> value);
void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
int slot_index,
SloppyTNode<Object> value);
TNode<Context> LoadNativeContext(SloppyTNode<Context> context);
// Calling this is only valid if there's a module context in the chain.
TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
void GotoIfContextElementEqual(Node* value, Node* native_context,
int slot_index, Label* if_equal) {
GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)),
if_equal);
}
TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
SloppyTNode<Context> native_context);
TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
SloppyTNode<Context> native_context);
TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
TNode<BoolT> HasPrototypeProperty(TNode<JSFunction> function, TNode<Map> map);
void GotoIfPrototypeRequiresRuntimeLookup(TNode<JSFunction> function,
TNode<Map> map, Label* runtime);
// Load the "prototype" property of a JSFunction.
Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
SloppyTNode<SharedFunctionInfo> shared);
TNode<Object> LoadJSFunctionPrototypeOrInitialMap(
TNode<JSFunction> function) {
return LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
}
TNode<SharedFunctionInfo> LoadJSFunctionSharedFunctionInfo(
TNode<JSFunction> function) {
return CAST(
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
}
TNode<Int32T> LoadSharedFunctionInfoFormalParameterCount(
TNode<SharedFunctionInfo> function) {
return TNode<Int32T>::UncheckedCast(LoadObjectField(
function, SharedFunctionInfo::kFormalParameterCountOffset,
MachineType::Uint16()));
}
void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<Word32T> value);
// Store the floating point value of a HeapNumber.
void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value);
void StoreMutableHeapNumberValue(SloppyTNode<MutableHeapNumber> object,
SloppyTNode<Float64T> value);
// Store a field to an object on the heap.
Node* StoreObjectField(Node* object, int offset, Node* value);
Node* StoreObjectField(Node* object, Node* offset, Node* value);
Node* StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
Node* StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
TNode<T> StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
TNode<IntPtrT> offset,
TNode<T> value) {
return UncheckedCast<T>(StoreObjectFieldNoWriteBarrier(
object, offset, value, MachineRepresentationOf<T>::value));
}
// Store the Map of an HeapObject.
Node* StoreMap(Node* object, Node* map);
Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
Node* StoreMapNoWriteBarrier(Node* object, Node* map);
Node* StoreObjectFieldRoot(Node* object, int offset, RootIndex root);
// Store an array element to a FixedArray.
void StoreFixedArrayElement(
TNode<FixedArray> object, int index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
barrier_mode);
}
void StoreFixedArrayElement(TNode<FixedArray> object, int index,
TNode<Smi> value) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
SKIP_WRITE_BARRIER);
}
Node* StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
Node* StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
void StoreFixedArrayOrPropertyArrayElement(
Node* array, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
void StoreFixedArrayElement(
TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS) {
FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode);
StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
additional_offset, parameter_mode);
}
void StorePropertyArrayElement(
TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS) {
StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
additional_offset, parameter_mode);
}
void StoreFixedArrayElementSmi(
TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
StoreFixedArrayElement(array, index, value, barrier_mode, 0,
SMI_PARAMETERS);
}
void StoreFixedArrayElement(TNode<FixedArray> array, TNode<IntPtrT> index,
TNode<Smi> value) {
StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
}
void StoreFixedDoubleArrayElement(
TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
void StoreFixedDoubleArrayElementSmi(TNode<FixedDoubleArray> object,
TNode<Smi> index,
TNode<Float64T> value) {
StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS);
}
void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array, Node* index,
ParameterMode mode = INTPTR_PARAMETERS);
void StoreFixedDoubleArrayHoleSmi(TNode<FixedDoubleArray> array,
TNode<Smi> index) {
StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS);
}
Node* StoreFeedbackVectorSlot(
Node* object, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout);
// EnsureArrayPushable verifies that receiver with this map is:
// 1. Is not a prototype.
// 2. Is not a dictionary.
// 3. Has a writeable length property.
// It returns ElementsKind as a node for further division into cases.
TNode<Int32T> EnsureArrayPushable(TNode<Map> map, Label* bailout);
void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
Label* bailout, Node* elements, Node* index,
Node* value);
// Consumes args into the array, and returns tagged new length.
TNode<Smi> BuildAppendJSArray(ElementsKind kind, SloppyTNode<JSArray> array,
CodeStubArguments* args,
TVariable<IntPtrT>* arg_index, Label* bailout);
// Pushes value onto the end of array.
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout);
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
Node* value);
Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
Node* AllocateSmiCell(int value = 0) {
return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER);
}
Node* LoadCellValue(Node* cell);
Node* StoreCellValue(Node* cell, Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Allocate a HeapNumber without initializing its value.
TNode<HeapNumber> AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value);
TNode<HeapNumber> AllocateHeapNumberWithValue(double value) {
return AllocateHeapNumberWithValue(Float64Constant(value));
}
// Allocate a MutableHeapNumber with a specific value.
TNode<MutableHeapNumber> AllocateMutableHeapNumberWithValue(
SloppyTNode<Float64T> value);
// Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
// Does not initialize the digits.
TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
// Like above, but allowing custom bitfield initialization.
TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<Word32T> bitfield);
void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
TNode<UintPtrT> digit);
TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqOneByteString(Node* context, TNode<Uint32T> length,
AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
TNode<String> AllocateSeqTwoByteString(uint32_t length,
AllocationFlags flags = kNone);
TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Uint32T> length,
AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
TNode<String> AllocateSlicedOneByteString(TNode<Uint32T> length,
TNode<String> parent,
TNode<Smi> offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
TNode<String> AllocateSlicedTwoByteString(TNode<Uint32T> length,
TNode<String> parent,
TNode<Smi> offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
TNode<String> AllocateOneByteConsString(TNode<Uint32T> length,
TNode<String> first,
TNode<String> second,
AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
TNode<String> AllocateTwoByteConsString(TNode<Uint32T> length,
TNode<String> first,
TNode<String> second,
AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |left| and |right|.
TNode<String> NewConsString(TNode<Uint32T> length, TNode<String> left,
TNode<String> right,
AllocationFlags flags = kNone);
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
TNode<IntPtrT> capacity);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
template <typename CollectionType>
Node* AllocateOrderedHashTable();
// Builds code that finds OrderedHashTable entry for a key with hash code
// {hash} with using the comparison code generated by {key_compare}. The code
// jumps to {entry_found} if the key is found, or to {not_found} if the key
// was not found. In the {entry_found} branch, the variable
// entry_start_position will be bound to the index of the entry (relative to
// OrderedHashTable::kHashTableStartIndex).
//
// The {CollectionType} template parameter stands for the particular instance
// of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
template <typename CollectionType>
void FindOrderedHashTableEntry(
Node* table, Node* hash,
const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found);
template <typename CollectionType>
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size,
int start_offset = Struct::kHeaderSize);
Node* AllocateJSObjectFromMap(
Node* map, Node* properties = nullptr, Node* elements = nullptr,
AllocationFlags flags = kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
Node* instance_size);
void InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
ParameterMode capacity_mode);
// Allocate a JSArray without elements and initialize the header fields.
TNode<JSArray> AllocateUninitializedJSArrayWithoutElements(
TNode<Map> array_map, TNode<Smi> length, Node* allocation_site = nullptr);
//
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
// The ParameterMode argument is only used for the capacity parameter.
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
Node* allocation_site, Node* capacity,
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone);
// Allocate a JSArray and fill elements with the hole.
// The ParameterMode argument is only used for the capacity parameter.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, Node* capacity,
TNode<Smi> length, Node* allocation_site = nullptr,
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone);
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length) {
return AllocateJSArray(kind, array_map, capacity, length, nullptr,
SMI_PARAMETERS);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length) {
return AllocateJSArray(kind, array_map, capacity, length, nullptr,
INTPTR_PARAMETERS);
}
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
// |convert_holes| tells the function to convert holes into undefined or not.
// If |convert_holes| is set to kConvertToUndefined, but the function did not
// find any hole in |array|, the resulting array will have the same elements
// kind as |array|. If the function did find a hole, it will convert holes in
// |array| to undefined in the resulting array, who will now have
// PACKED_ELEMENTS kind.
// If |convert_holes| is set kDontConvert, holes are also copied to the
// resulting array, who will have the same elements kind as |array|. The
// function generates significantly less code in this case.
Node* CloneFastJSArray(
Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS,
Node* allocation_site = nullptr,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
ParameterMode mode = INTPTR_PARAMETERS,
Node* capacity = nullptr,
Node* allocation_site = nullptr);
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone,
SloppyTNode<Map> fixed_array_map = nullptr);
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, TNode<IntPtrT> capacity, AllocationFlags flags,
SloppyTNode<Map> fixed_array_map = nullptr) {
return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags,
fixed_array_map);
}
TNode<FixedArray> AllocateZeroedFixedArray(TNode<IntPtrT> capacity) {
TNode<FixedArray> result = UncheckedCast<FixedArray>(
AllocateFixedArray(PACKED_ELEMENTS, capacity,
AllocationFlag::kAllowLargeObjectAllocation));
FillFixedArrayWithSmiZero(result, capacity);
return result;
}
TNode<FixedDoubleArray> AllocateZeroedFixedDoubleArray(
TNode<IntPtrT> capacity) {
TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity,
AllocationFlag::kAllowLargeObjectAllocation));
FillFixedDoubleArrayWithZero(result, capacity);
return result;
}
TNode<FixedArray> AllocateFixedArrayWithHoles(TNode<IntPtrT> capacity,
AllocationFlags flags) {
TNode<FixedArray> result = UncheckedCast<FixedArray>(
AllocateFixedArray(PACKED_ELEMENTS, capacity, flags));
FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0),
capacity, RootIndex::kTheHoleValue);
return result;
}
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
// Perform CreateArrayIterator (ES #sec-createarrayiterator).
TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
TNode<Object> object,
IterationKind mode);
Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
TNode<Object> originalArray,
TNode<Number> len);
TNode<JSReceiver> InternalArrayCreate(TNode<Context> context,
TNode<Number> len);
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index, RootIndex value_root_index,
ParameterMode mode = INTPTR_PARAMETERS);
// Uses memset to effectively initialize the given FixedArray with zeroes.
void FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<IntPtrT> length);
void FillFixedDoubleArrayWithZero(TNode<FixedDoubleArray> array,
TNode<IntPtrT> length);
void FillPropertyArrayWithUndefined(Node* array, Node* from_index,
Node* to_index,
ParameterMode mode = INTPTR_PARAMETERS);
enum class DestroySource { kNo, kYes };
// Specify DestroySource::kYes if {from_array} is being supplanted by
// {to_array}. This offers a slight performance benefit by simply copying the
// array word by word. The source may be destroyed at the end of this macro.
//
// Otherwise, specify DestroySource::kNo for operations where an Object is
// being cloned, to ensure that MutableHeapNumbers are unique between the
// source and cloned object.
void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length,
WriteBarrierMode barrier_mode,
ParameterMode mode,
DestroySource destroy_source);
// Copies all elements from |from_array| of |length| size to
// |to_array| of the same size respecting the elements kind.
void CopyFixedArrayElements(
ElementsKind kind, Node* from_array, Node* to_array, Node* length,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS) {
CopyFixedArrayElements(kind, from_array, kind, to_array,
IntPtrOrSmiConstant(0, mode), length, length,
barrier_mode, mode);
}
// Copies |element_count| elements from |from_array| starting from element
// zero to |to_array| of |capacity| size respecting both array's elements
// kinds.
void CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS) {
CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
IntPtrOrSmiConstant(0, mode), element_count,
capacity, barrier_mode, mode);
}
// Copies |element_count| elements from |from_array| starting from element
// |first_element| to |to_array| of |capacity| size respecting both array's
// elements kinds.
// |convert_holes| tells the function whether to convert holes to undefined.
// |var_holes_converted| can be used to signify that the conversion happened
// (i.e. that there were holes). If |convert_holes_to_undefined| is
// HoleConversionMode::kConvertToUndefined, then it must not be the case that
// IsDoubleElementsKind(to_kind).
void CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* first_element, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
TVariable<BoolT>* var_holes_converted = nullptr);
void CopyFixedArrayElements(
ElementsKind from_kind, TNode<FixedArrayBase> from_array,
ElementsKind to_kind, TNode<FixedArrayBase> to_array,
TNode<Smi> first_element, TNode<Smi> element_count, TNode<Smi> capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
first_element, element_count, capacity, barrier_mode,
SMI_PARAMETERS);
}
void JumpIfPointersFromHereAreInteresting(TNode<Object> object,
Label* interesting);
// Efficiently copy elements within a single array. The regions
// [src_index, src_index + length) and [dst_index, dst_index + length)
// can be overlapping.
void MoveElements(ElementsKind kind, TNode<FixedArrayBase> elements,
TNode<IntPtrT> dst_index, TNode<IntPtrT> src_index,
TNode<IntPtrT> length);
// Efficiently copy elements from one array to another. The ElementsKind
// needs to be the same. Copy from src_elements at
// [src_index, src_index + length) to dst_elements at
// [dst_index, dst_index + length).
// The function decides whether it can use memcpy. In case it cannot,
// |write_barrier| can help it to skip write barrier. SKIP_WRITE_BARRIER is
// only safe when copying to new space, or when copying to old space and the
// array does not contain object pointers.
void CopyElements(ElementsKind kind, TNode<FixedArrayBase> dst_elements,
TNode<IntPtrT> dst_index,
TNode<FixedArrayBase> src_elements,
TNode<IntPtrT> src_index, TNode<IntPtrT> length,
WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER);
TNode<FixedArray> HeapObjectToFixedArray(TNode<HeapObject> base,
Label* cast_fail);
TNode<FixedDoubleArray> HeapObjectToFixedDoubleArray(TNode<HeapObject> base,
Label* cast_fail) {
GotoIf(
WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)),
cast_fail);
return UncheckedCast<FixedDoubleArray>(base);
}
TNode<FixedArray> HeapObjectToSloppyArgumentsElements(TNode<HeapObject> base,
Label* cast_fail) {
GotoIf(WordNotEqual(LoadMap(base),
LoadRoot(RootIndex::kSloppyArgumentsElementsMap)),
cast_fail);
return UncheckedCast<FixedArray>(base);
}
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
return UncheckedCast<Int32T>(elements_kind);
}
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
kDontCopyCOW = 4,
kNewSpaceAllocationOnly = 8,
kAllFixedArrays = kFixedArrays | kFixedDoubleArrays,
kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
};
typedef base::Flags<ExtractFixedArrayFlag> ExtractFixedArrayFlags;
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// array, including special appropriate handling for empty arrays and COW
// arrays. The result array will be of the same type as the original array.
//
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
// elements.
// * |first| is the starting element index to copy from, if nullptr is passed
// then index zero is used by default.
// * |count| is the number of elements to copy out of the source array
// starting from and including the element indexed by |start|. If |count| is
// nullptr, then all of the elements from |start| to the end of |source| are
// copied.
// * |capacity| determines the size of the allocated result array, with
// |capacity| >= |count|. If |capacity| is nullptr, then |count| is used as
// the destination array's capacity.
// * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both
// are detected and copied. Although it's always correct to pass
// kAllFixedArrays, the generated code is more compact and efficient if the
// caller can specify whether only FixedArrays or FixedDoubleArrays will be
// passed as the |source| parameter.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
// * If |var_holes_converted| is given, any holes will be converted to
// undefined and the variable will be set according to whether or not there
// were any hole.
// * If |source_elements_kind| is given, the function will try to use the
// runtime elements kind of source to make copy faster. More specifically, it
// can skip write barriers.
TNode<FixedArrayBase> ExtractFixedArray(
Node* source, Node* first, Node* count = nullptr,
Node* capacity = nullptr,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
TVariable<BoolT>* var_holes_converted = nullptr,
Node* source_elements_kind = nullptr);
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
TNode<Smi> capacity,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays) {
return ExtractFixedArray(source, first, count, capacity, extract_flags,
SMI_PARAMETERS);
}
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// FixedArray, including special appropriate handling for COW arrays.
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
// elements. |source| is assumed to be non-empty.
// * |first| is the starting element index to copy from.
// * |count| is the number of elements to copy out of the source array
// starting from and including the element indexed by |start|.
// * |capacity| determines the size of the allocated result array, with
// |capacity| >= |count|.
// * |source_map| is the map of the |source|.
// * |from_kind| is the elements kind that is consistent with |source| being
// a FixedArray or FixedDoubleArray. This function only cares about double vs.
// non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does
// not care about holeyness. For example, when |source| is a FixedArray,
// PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS.
// * |allocation_flags| and |extract_flags| influence how the target
// FixedArray is allocated.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
// * |convert_holes| is used to signify that the target array should use
// undefined in places of holes.
// * If |convert_holes| is true and |var_holes_converted| not nullptr, then
// |var_holes_converted| is used to signal whether any holes were found and
// converted. The caller should use this information to decide which map is
// compatible with the result array. For example, if the input was of
// HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be
// compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
TNode<FixedArray> ExtractToFixedArray(
Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
ElementsKind from_kind = PACKED_ELEMENTS,
AllocationFlags allocation_flags = AllocationFlag::kNone,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
TVariable<BoolT>* var_holes_converted = nullptr,
Node* source_runtime_kind = nullptr);
// Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case
// where the source array has a hole, produce a FixedArray instead where holes
// are replaced with undefined.
// * |source| is a FixedDoubleArray from which to copy elements.
// * |first| is the starting element index to copy from.
// * |count| is the number of elements to copy out of the source array
// starting from and including the element indexed by |start|.
// * |capacity| determines the size of the allocated result array, with
// |capacity| >= |count|.
// * |source_map| is the map of |source|. It will be used as the map of the
// target array if the target can stay a FixedDoubleArray. Otherwise if the
// target array needs to be a FixedArray, the FixedArrayMap will be used.
// * |var_holes_converted| is used to signal whether a FixedAray
// is produced or not.
// * |allocation_flags| and |extract_flags| influence how the target array is
// allocated.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
TNode<FixedArrayBase> ExtractFixedDoubleArrayFillingHoles(
Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
TVariable<BoolT>* var_holes_converted, AllocationFlags allocation_flags,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Copy the entire contents of a FixedArray or FixedDoubleArray to a new
// array, including special appropriate handling for empty arrays and COW
// arrays.
//
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
// elements.
// * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both
// are detected and copied. Although it's always correct to pass
// kAllFixedArrays, the generated code is more compact and efficient if the
// caller can specify whether only FixedArrays or FixedDoubleArrays will be
// passed as the |source| parameter.
Node* CloneFixedArray(Node* source,
ExtractFixedArrayFlags flags =
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
ParameterMode mode = OptimalParameterMode();
return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr,
nullptr, flags, mode);
}
// Copies |character_count| elements from |from_string| to |to_string|
// starting at the |from_index|'th character. |from_string| and |to_string|
// can either be one-byte strings or two-byte strings, although if
// |from_string| is two-byte, then |to_string| must be two-byte.
// |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <=
// |from_index| <= |from_index| + |character_count| <= from_string.length and
// 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length.
void CopyStringCharacters(Node* from_string, Node* to_string,
TNode<IntPtrT> from_index, TNode<IntPtrT> to_index,
TNode<IntPtrT> character_count,
String::Encoding from_encoding,
String::Encoding to_encoding);
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole);
Node* CalculateNewElementsCapacity(Node* old_capacity,
ParameterMode mode = INTPTR_PARAMETERS);
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
}
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
Node* key, Label* bailout);
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
Node* key, Node* capacity, ParameterMode mode,
Label* bailout);
// Grows elements capacity of given object. Returns new elements.
Node* GrowElementsCapacity(Node* object, Node* elements,
ElementsKind from_kind, ElementsKind to_kind,
Node* capacity, Node* new_capacity,
ParameterMode mode, Label* bailout);
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
// is followed for allocation failure.
void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
Node* array, Node* length,
Variable* var_elements, Node* growth,
Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(Node* base_allocation,
Node* base_allocation_size,
Node* allocation_site);
Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
Node* TruncateTaggedToFloat64(Node* context, Node* value);
Node* TruncateTaggedToWord32(Node* context, Node* value);
void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number,
Variable* var_word32, Label* if_bigint,
Variable* var_bigint);
void TaggedToWord32OrBigIntWithFeedback(
Node* context, Node* value, Label* if_number, Variable* var_word32,
Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
// Truncate the floating point value of a HeapNumber to an Int32.
Node* TruncateHeapNumberValueToWord32(Node* object);
// Conversions.
void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>& output,
Label* if_smi);
void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>& output,
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
void TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric);
void TaggedToNumericWithFeedback(Node* context, Node* value, Label* done,
Variable* var_numeric,
Variable* var_feedback);
TNode<WordT> TimesPointerSize(SloppyTNode<WordT> value);
TNode<IntPtrT> TimesPointerSize(TNode<IntPtrT> value) {
return Signed(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
}
TNode<UintPtrT> TimesPointerSize(TNode<UintPtrT> value) {
return Unsigned(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
}
TNode<WordT> TimesDoubleSize(SloppyTNode<WordT> value);
TNode<UintPtrT> TimesDoubleSize(TNode<UintPtrT> value) {
return Unsigned(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
}
TNode<IntPtrT> TimesDoubleSize(TNode<IntPtrT> value) {
return Signed(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
}
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
TNode<String> ToThisString(Node* context, Node* value,
char const* method_name);
// Throws a TypeError for {method_name} if {value} is neither of the given
// {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
// returns the {value} (or wrapped value) otherwise.
Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
char const* method_name);
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
Node* ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name);
// Throws a TypeError for {method_name} if {value} is not a JSReceiver.
// Returns the {value}'s map.
Node* ThrowIfNotJSReceiver(Node* context, Node* value,
MessageTemplate msg_template,
const char* method_name = nullptr);
void ThrowRangeError(Node* context, MessageTemplate message,
Node* arg0 = nullptr, Node* arg1 = nullptr,
Node* arg2 = nullptr);
void ThrowTypeError(Node* context, MessageTemplate message,
char const* arg0 = nullptr, char const* arg1 = nullptr);
void ThrowTypeError(Node* context, MessageTemplate message, Node* arg0,
Node* arg1 = nullptr, Node* arg2 = nullptr);
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
TNode<BoolT> IsAccessorInfo(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAccessorPair(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAnyHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(SloppyTNode<HeapObject> object);
TNode<BoolT> IsBoolean(SloppyTNode<HeapObject> object);
TNode<BoolT> IsCallableMap(SloppyTNode<Map> map);
TNode<BoolT> IsCallable(SloppyTNode<HeapObject> object);
TNode<BoolT> TaggedIsCallable(TNode<Object> object);
TNode<BoolT> IsCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsCode(SloppyTNode<HeapObject> object);
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFeedbackVector(SloppyTNode<HeapObject> object);
TNode<BoolT> IsContext(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArrayWithKind(SloppyTNode<HeapObject> object,
ElementsKind kind);
TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<HeapObject> object,
ElementsKind kind);
TNode<BoolT> IsFixedDoubleArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedTypedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSPromiseMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSRegExp(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSValueInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSValueMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSValue(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMutableHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsNativeContext(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrJSReceiver(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> HasOnlyOneByteChars(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPrototypeInitialArrayPrototype(SloppyTNode<Context> context,
SloppyTNode<Map> map);
TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
SloppyTNode<Map> map);
TNode<BoolT> IsFastAliasedArgumentsMap(TNode<Context> context,
TNode<Map> map);
TNode<BoolT> IsSlowAliasedArgumentsMap(TNode<Context> context,
TNode<Map> map);
TNode<BoolT> IsSloppyArgumentsMap(TNode<Context> context, TNode<Map> map);
TNode<BoolT> IsStrictArgumentsMap(TNode<Context> context, TNode<Map> map);
TNode<BoolT> IsSequentialStringInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsUncachedExternalStringInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
// Returns true if the map corresponds to non-special fast or dictionary
// object.
TNode<BoolT> IsSimpleObjectMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
inline Node* IsSharedFunctionInfo(Node* object) {
return IsSharedFunctionInfoMap(LoadMap(object));
}
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
// True iff |object| is a Smi or a HeapNumber.
TNode<BoolT> IsNumber(SloppyTNode<Object> object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
TNode<BoolT> IsNumeric(SloppyTNode<Object> object);
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
TNode<BoolT> IsNumberNormalized(SloppyTNode<Number> number);
TNode<BoolT> IsNumberPositive(SloppyTNode<Number> number);
TNode<BoolT> IsHeapNumberPositive(TNode<HeapNumber> number);
// True iff {number} is non-negative and less or equal than 2**53-1.
TNode<BoolT> IsNumberNonNegativeSafeInteger(TNode<Number> number);
// True iff {number} represents an integer value.
TNode<BoolT> IsInteger(TNode<Object> number);
TNode<BoolT> IsInteger(TNode<HeapNumber> number);
// True iff abs({number}) <= 2**53 -1
TNode<BoolT> IsSafeInteger(TNode<Object> number);
TNode<BoolT> IsSafeInteger(TNode<HeapNumber> number);
// True iff {number} represents a valid uint32t value.
TNode<BoolT> IsHeapNumberUint32(TNode<HeapNumber> number);
// True iff {number} is a positive number and a valid array index in the range
// [0, 2^32-1).
TNode<BoolT> IsNumberArrayIndex(TNode<Number> number);
Node* FixedArraySizeDoesntFitInNewSpace(
Node* element_count, int base_size = FixedArray::kHeaderSize,
ParameterMode mode = INTPTR_PARAMETERS);
// ElementsKind helpers:
TNode<BoolT> ElementsKindEqual(TNode<Int32T> a, TNode<Int32T> b) {
return Word32Equal(a, b);
}
bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
Node* IsFastElementsKind(Node* elements_kind);
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
TNode<BoolT> IsDictionaryElementsKind(TNode<Int32T> elements_kind) {
return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS));
}
TNode<BoolT> IsDoubleElementsKind(TNode<Int32T> elements_kind);
bool IsDoubleElementsKind(ElementsKind kind) {
return v8::internal::IsDoubleElementsKind(kind);
}
Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
Node* IsFastSmiElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
Node* IsElementsKindGreaterThan(Node* target_kind,
ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
ElementsKind reference_kind);
// String helpers.
// Load a character from a String (might flatten a ConsString).
TNode<Int32T> StringCharCodeAt(SloppyTNode<String> string,
SloppyTNode<IntPtrT> index);
// Return the single character string with only {code}.
TNode<String> StringFromSingleCharCode(TNode<Int32T> code);
// Return a new string object which holds a substring containing the range
// [from,to[ of string.
TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
TNode<IntPtrT> to);
// Return a new string object produced by concatenating |first| with |second|.
TNode<String> StringAdd(Node* context, TNode<String> first,
TNode<String> second, AllocationFlags flags = kNone);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
void BranchIfCanDerefIndirectString(Node* string, Node* instance_type,
Label* can_deref, Label* cannot_deref);
// Unpack an indirect (thin or flat cons) string type.
void DerefIndirectString(Variable* var_string, Node* instance_type);
// Check if |var_string| has an indirect (thin or flat cons) string type,
// and unpack it if so.
void MaybeDerefIndirectString(Variable* var_string, Node* instance_type,
Label* did_deref, Label* cannot_deref);
// Check if |var_left| or |var_right| has an indirect (thin or flat cons)
// string type, and unpack it/them if so. Fall through if nothing was done.
void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
Variable* var_right, Node* right_instance_type,
Label* did_something);
Node* DerefIndirectString(TNode<String> string, TNode<Int32T> instance_type,
Label* cannot_deref);
TNode<String> StringFromSingleCodePoint(TNode<Int32T> codepoint,
UnicodeEncoding encoding);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
TNode<String> NumberToString(TNode<Number> input);
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
// Convert a Non-Number object to a Numeric.
TNode<Numeric> NonNumberToNumeric(SloppyTNode<Context> context,
SloppyTNode<HeapObject> input);
// Convert any object to a Number.
// Conforms to ES#sec-tonumber if {bigint_handling} == kThrow.
// With {bigint_handling} == kConvertToNumber, matches behavior of
// tc39.github.io/proposal-bigint/#sec-number-constructor-number-value.
TNode<Number> ToNumber(
SloppyTNode<Context> context, SloppyTNode<Object> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
TNode<Number> ToNumber_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers).
// https://tc39.github.io/proposal-bigint/#sec-to-bigint
TNode<BigInt> ToBigInt(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
// ES#sec-touint32
TNode<Number> ToUint32(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Convert any object to a String.
TNode<String> ToString(SloppyTNode<Context> context,
SloppyTNode<Object> input);
TNode<String> ToString_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Same as ToObject but avoids the Builtin call if |input| is already a
// JSReceiver.
TNode<JSReceiver> ToObject_Inline(TNode<Context> context,
TNode<Object> input);
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
};
// ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi.
TNode<Smi> ToSmiIndex(TNode<Object> input, TNode<Context> context,
Label* range_error);
// ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi.
TNode<Smi> ToSmiLength(TNode<Object> input, TNode<Context> context,
Label* range_error);
// ES6 7.1.15 ToLength, but with inlined fast path.
TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// ES6 7.1.4 ToInteger ( argument )
TNode<Number> ToInteger_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
TNode<Number> ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32) {
return DecodeWord32(word32, BitField::kShift, BitField::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as a word-size node.
template <typename BitField>
TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word) {
return DecodeWord(word, BitField::kShift, BitField::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as a word-size node.
template <typename BitField>
TNode<UintPtrT> DecodeWordFromWord32(SloppyTNode<Word32T> word32) {
return DecodeWord<BitField>(ChangeUint32ToWord(word32));
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as an uint32 node.
template <typename BitField>
TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
return UncheckedCast<Uint32T>(
TruncateIntPtrToInt32(Signed(DecodeWord<BitField>(word))));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32, uint32_t shift,
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
uint32_t mask);
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value) {
return UpdateWord(word, value, BitField::kShift, BitField::kMask);
}
// Returns a node that contains the updated {value} inside {word} starting
// at {shift} and fitting in {mask}.
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value, uint32_t shift,
uint32_t mask);
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32) {
return IsSetWord32(word32, T::kMask);
}
// Returns true if any of the mask's bits in given |word32| are set.
TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if none of the mask's bits in given |word32| are set.
TNode<BoolT> IsNotSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if all of the mask's bits in a given |word32| are set.
TNode<BoolT> IsAllSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
TNode<Int32T> const_mask = Int32Constant(mask);
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) {
return IsSetWord(word, T::kMask);
}
// Returns true if any of the mask's bits in given |word| are set.
TNode<BoolT> IsSetWord(SloppyTNode<WordT> word, uint32_t mask) {
return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
}
// Returns true if any of the mask's bit are set in the given Smi.
// Smi-encoding of the mask is performed implicitly!
TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
return WordNotEqual(
WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)),
IntPtrConstant(0));
}
// Returns true if all of the |T|'s bits in given |word32| are clear.
template <typename T>
TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32) {
return IsClearWord32(word32, T::kMask);
}
// Returns true if all of the mask's bits in given |word32| are clear.
TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if all of the |T|'s bits in given |word| are clear.
template <typename T>
TNode<BoolT> IsClearWord(SloppyTNode<WordT> word) {
return IsClearWord(word, T::kMask);
}
// Returns true if all of the mask's bits in given |word| are clear.
TNode<BoolT> IsClearWord(SloppyTNode<WordT> word, uint32_t mask) {
return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
}
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int delta);
void DecrementCounter(StatsCounter* counter, int delta);
void Increment(Variable* variable, int value = 1,
ParameterMode mode = INTPTR_PARAMETERS);
void Decrement(Variable* variable, int value = 1,
ParameterMode mode = INTPTR_PARAMETERS) {
Increment(variable, -value, mode);
}
// Generates "if (false) goto label" code. Useful for marking a label as
// "live" to avoid assertion failures during graph building. In the resulting
// code this check will be eliminated.
void Use(Label* label);
// Various building blocks for stubs doing property lookups.
// |if_notinternalized| is optional; |if_bailout| will be used by default.
void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
Label* if_keyisunique, Variable* var_unique, Label* if_bailout,
Label* if_notinternalized = nullptr);
// Performs a hash computation and string table lookup for the given string,
// and jumps to:
// - |if_index| if the string is an array index like "123"; |var_index|
// will contain the intptr representation of that index.
// - |if_internalized| if the string exists in the string table; the
// internalized version will be in |var_internalized|.
// - |if_not_internalized| if the string is not in the string table (but
// does not add it).
// - |if_bailout| for unsupported cases (e.g. uncachable array index).
void TryInternalizeString(Node* string, Label* if_index, Variable* var_index,
Label* if_internalized, Variable* var_internalized,
Label* if_not_internalized, Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
// See Dictionary::EntryToIndex().
template <typename Dictionary>
TNode<IntPtrT> EntryToIndex(TNode<IntPtrT> entry, int field_index);
template <typename Dictionary>
TNode<IntPtrT> EntryToIndex(TNode<IntPtrT> entry) {
return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
}
// Loads the details for the entry with the given key_index.
// Returns an untagged int32.
template <class ContainerType>
TNode<Uint32T> LoadDetailsByKeyIndex(Node* container, Node* key_index) {
static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
"Use the non-templatized version for DescriptorArray");
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
return Unsigned(LoadAndUntagToWord32FixedArrayElement(
CAST(container), key_index, kKeyToDetailsOffset));
}
// Loads the value for the entry with the given key_index.
// Returns a tagged value.
template <class ContainerType>
TNode<Object> LoadValueByKeyIndex(Node* container, Node* key_index) {
static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
"Use the non-templatized version for DescriptorArray");
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
return LoadFixedArrayElement(CAST(container), key_index, kKeyToValueOffset);
}
// Stores the details for the entry with the given key_index.
// |details| must be a Smi.
template <class ContainerType>
void StoreDetailsByKeyIndex(TNode<ContainerType> container,
TNode<IntPtrT> key_index, TNode<Smi> details) {
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER,
kKeyToDetailsOffset);
}
// Stores the value for the entry with the given key_index.
template <class ContainerType>
void StoreValueByKeyIndex(
TNode<ContainerType> container, TNode<IntPtrT> key_index,
TNode<Object> value,
WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
StoreFixedArrayElement(container, key_index, value, write_barrier,
kKeyToValueOffset);
}
// Calculate a valid size for the a hash table.
TNode<IntPtrT> HashTableComputeCapacity(TNode<IntPtrT> at_least_space_for);
template <class Dictionary>
TNode<Smi> GetNumberOfElements(TNode<Dictionary> dictionary) {
return CAST(
LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex));
}
TNode<Smi> GetNumberDictionaryNumberOfElements(
TNode<NumberDictionary> dictionary) {
return GetNumberOfElements<NumberDictionary>(dictionary);
}
template <class Dictionary>
void SetNumberOfElements(TNode<Dictionary> dictionary,
TNode<Smi> num_elements_smi) {
StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
num_elements_smi, SKIP_WRITE_BARRIER);
}
template <class Dictionary>
TNode<Smi> GetNumberOfDeletedElements(TNode<Dictionary> dictionary) {
return CAST(LoadFixedArrayElement(
dictionary, Dictionary::kNumberOfDeletedElementsIndex));
}
template <class Dictionary>
void SetNumberOfDeletedElements(TNode<Dictionary> dictionary,
TNode<Smi> num_deleted_smi) {
StoreFixedArrayElement(dictionary,
Dictionary::kNumberOfDeletedElementsIndex,
num_deleted_smi, SKIP_WRITE_BARRIER);
}
template <class Dictionary>
TNode<Smi> GetCapacity(TNode<Dictionary> dictionary) {
return CAST(LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex));
}
template <class Dictionary>
TNode<Smi> GetNextEnumerationIndex(TNode<Dictionary> dictionary) {
return CAST(LoadFixedArrayElement(dictionary,
Dictionary::kNextEnumerationIndexIndex));
}
template <class Dictionary>
void SetNextEnumerationIndex(TNode<Dictionary> dictionary,
TNode<Smi> next_enum_index_smi) {
StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex,
next_enum_index_smi, SKIP_WRITE_BARRIER);
}
// Looks up an entry in a NameDictionaryBase successor. If the entry is found
// control goes to {if_found} and {var_name_index} contains an index of the
// key field of the entry found. If the key is not found control goes to
// {if_not_found}.
static const int kInlinedDictionaryProbes = 4;
enum LookupMode { kFindExisting, kFindInsertionIndex };
template <typename Dictionary>
TNode<HeapObject> LoadName(TNode<HeapObject> key);
template <typename Dictionary>
void NameDictionaryLookup(TNode<Dictionary> dictionary,
TNode<Name> unique_name, Label* if_found,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found,
int inlined_probes = kInlinedDictionaryProbes,
LookupMode mode = kFindExisting);
Node* ComputeUnseededHash(Node* key);
Node* ComputeSeededHash(Node* key);
void NumberDictionaryLookup(TNode<NumberDictionary> dictionary,
TNode<IntPtrT> intptr_index, Label* if_found,
TVariable<IntPtrT>* var_entry,
Label* if_not_found);
TNode<Object> BasicLoadNumberDictionaryElement(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* not_data, Label* if_hole);
void BasicStoreNumberDictionaryElement(TNode<NumberDictionary> dictionary,
TNode<IntPtrT> intptr_index,
TNode<Object> value, Label* not_data,
Label* if_hole, Label* read_only);
template <class Dictionary>
void FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key,
TVariable<IntPtrT>* var_key_index);
template <class Dictionary>
void InsertEntry(TNode<Dictionary> dictionary, TNode<Name> key,
TNode<Object> value, TNode<IntPtrT> index,
TNode<Smi> enum_index);
template <class Dictionary>
void Add(TNode<Dictionary> dictionary, TNode<Name> key, TNode<Object> value,
Label* bailout);
// Tries to check if {object} has own {unique_name} property.
void TryHasOwnProperty(Node* object, Node* map, Node* instance_type,
Node* unique_name, Label* if_found,
Label* if_not_found, Label* if_bailout);
// Operating mode for TryGetOwnProperty and CallGetterIfAccessor
// kReturnAccessorPair is used when we're only getting the property descriptor
enum GetOwnPropertyMode { kCallJSGetter, kReturnAccessorPair };
// Tries to get {object}'s own {unique_name} property value. If the property
// is an accessor then it also calls a getter. If the property is a double
// field it re-wraps value in an immutable heap number.
void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map,
Node* instance_type, Node* unique_name,
Label* if_found, Variable* var_value,
Label* if_not_found, Label* if_bailout);
void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map,
Node* instance_type, Node* unique_name,
Label* if_found, Variable* var_value,
Variable* var_details, Variable* var_raw_value,
Label* if_not_found, Label* if_bailout,
GetOwnPropertyMode mode);
TNode<Object> GetProperty(SloppyTNode<Context> context,
SloppyTNode<Object> receiver, Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
TNode<Object> GetProperty(SloppyTNode<Context> context,
SloppyTNode<Object> receiver,
SloppyTNode<Object> name) {
return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
}
TNode<Object> SetPropertyStrict(TNode<Context> context,
TNode<Object> receiver, TNode<Object> key,
TNode<Object> value) {
return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value);
}
TNode<Object> SetPropertyInLiteral(TNode<Context> context,
TNode<JSObject> receiver,
TNode<Object> key, TNode<Object> value) {
return CallBuiltin(Builtins::kSetPropertyInLiteral, context, receiver, key,
value);
}
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
template <class... TArgs>
TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
return CallStub<Object>(Builtins::CallableFor(isolate(), id), context,
args...);
}
template <class... TArgs>
void TailCallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
void LoadPropertyFromFastObject(Node* object, Node* map,
TNode<DescriptorArray> descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
void LoadPropertyFromFastObject(Node* object, Node* map,
TNode<DescriptorArray> descriptors,
Node* name_index, Node* details,
Variable* var_value);
void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
void LoadPropertyFromGlobalDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value, Label* if_deleted);
// Generic property lookup generator. If the {object} is fast and
// {unique_name} property is found then the control goes to {if_found_fast}
// label and {var_meta_storage} and {var_name_index} will contain
// DescriptorArray and an index of the descriptor's name respectively.
// If the {object} is slow or global then the control goes to {if_found_dict}
// or {if_found_global} and the {var_meta_storage} and {var_name_index} will
// contain a dictionary and an index of the key field of the found entry.
// If property is not found or given lookup is not supported then
// the control goes to {if_not_found} or {if_bailout} respectively.
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
void TryLookupProperty(SloppyTNode<JSObject> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type,
SloppyTNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found, Label* if_bailout);
// This is a building block for TryLookupProperty() above. Supports only
// non-special fast and dictionary objects.
void TryLookupPropertyInSimpleObject(TNode<JSObject> object, TNode<Map> map,
TNode<Name> unique_name,
Label* if_found_fast,
Label* if_found_dict,
TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// This method jumps to if_found if the element is known to exist. To
// if_absent if it's known to not exist. To if_not_found if the prototype
// chain needs to be checked. And if_bailout if the lookup is unsupported.
void TryLookupElement(Node* object, Node* map,
SloppyTNode<Int32T> instance_type,
SloppyTNode<IntPtrT> intptr_index, Label* if_found,
Label* if_absent, Label* if_not_found,
Label* if_bailout);
// This is a type of a lookup in holder generator function. In case of a
// property lookup the {key} is guaranteed to be an unique name and in case of
// element lookup the key is an Int32 index.
typedef std::function<void(Node* receiver, Node* holder, Node* map,
Node* instance_type, Node* key, Label* next_holder,
Label* if_bailout)>
LookupInHolder;
// For integer indexed exotic cases, check if the given string cannot be a
// special index. If we are not sure that the given string is not a special
// index with a simple check, return False. Note that "False" return value
// does not mean that the name_string is a special index in the current
// implementation.
void BranchIfMaybeSpecialIndex(TNode<String> name_string,
Label* if_maybe_special_index,
Label* if_not_special_index);
// Generic property prototype chain lookup generator.
// For properties it generates lookup using given {lookup_property_in_holder}
// and for elements it uses {lookup_element_in_holder}.
// Upon reaching the end of prototype chain the control goes to {if_end}.
// If it can't handle the case {receiver}/{key} case then the control goes
// to {if_bailout}.
// If {if_proxy} is nullptr, proxies go to if_bailout.
void TryPrototypeChainLookup(Node* receiver, Node* key,
const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder,
Label* if_end, Label* if_bailout,
Label* if_proxy = nullptr);
// Instanceof helpers.
// Returns true if {object} has {prototype} somewhere in it's prototype
// chain, otherwise false is returned. Might cause arbitrary side effects
// due to [[GetPrototypeOf]] invocations.
Node* HasInPrototypeChain(Node* context, Node* object, Node* prototype);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O)
Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
// Load type feedback vector from the stub caller's frame.
TNode<FeedbackVector> LoadFeedbackVectorForStub();
// Load type feedback vector for the given closure.
TNode<FeedbackVector> LoadFeedbackVector(SloppyTNode<JSFunction> closure,
Label* if_undefined = nullptr);
// Load the object from feedback vector cell for the given closure.
// The returned object could be undefined if the closure does not have
// a feedback vector associated with it.
TNode<Object> LoadFeedbackVectorUnchecked(SloppyTNode<JSFunction> closure);
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
SloppyTNode<IntPtrT> slot_id, const char* reason);
// Combine the new feedback with the existing_feedback. Do nothing if
// existing_feedback is nullptr.
void CombineFeedback(Variable* existing_feedback, int feedback);
void CombineFeedback(Variable* existing_feedback, Node* feedback);
// Overwrite the existing feedback with new_feedback. Do nothing if
// existing_feedback is nullptr.
void OverwriteFeedback(Variable* existing_feedback, int new_feedback);
// Check if a property name might require protector invalidation when it is
// used for a property store or deletion.
void CheckForAssociatedProtector(Node* name, Label* if_protector);
TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
// Emits keyed sloppy arguments load. Returns either the loaded value.
Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
}
// Emits keyed sloppy arguments store.
void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout) {
DCHECK_NOT_NULL(value);
EmitKeyedSloppyArguments(receiver, key, value, bailout);
}
// Loads script context from the script context table.
TNode<Context> LoadScriptContext(TNode<Context> context,
TNode<IntPtrT> context_index);
Node* Int32ToUint8Clamped(Node* int32_value);
Node* Float64ToUint8Clamped(Node* float64_value);
Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
ElementsKind elements_kind,
TNode<Context> context);
// Store value to an elements array with given elements kind.
void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
ParameterMode mode);
void EmitBigTypedArrayElementStore(TNode<JSTypedArray> object,
TNode<FixedTypedArrayBase> elements,
TNode<IntPtrT> intptr_key,
TNode<Object> value,
TNode<Context> context,
Label* opt_if_neutered);
// Part of the above, refactored out to reuse in another place.
void EmitBigTypedArrayElementStore(TNode<FixedTypedArrayBase> elements,
TNode<RawPtrT> backing_store,
TNode<IntPtrT> offset,
TNode<BigInt> bigint_value);
// Implements the BigInt part of
// https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
// including truncation to 64 bits (i.e. modulo 2^64).
// {var_high} is only used on 32-bit platforms.
void BigIntToRawBytes(TNode<BigInt> bigint, TVariable<UintPtrT>* var_low,
TVariable<UintPtrT>* var_high);
void EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode, Label* bailout,
Node* context);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
Node* length, Node* key, ParameterMode mode,
Label* bailout);
Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
Node* length, ParameterMode mode, Label* bailout);
void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind,
ElementsKind to_kind, Label* bailout);
void TrapAllocationMemento(Node* object, Label* memento_found);
TNode<IntPtrT> PageFromAddress(TNode<IntPtrT> address);
// Store a weak in-place reference into the FeedbackVector.
TNode<MaybeObject> StoreWeakReferenceInFeedbackVector(
SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
SloppyTNode<HeapObject> value, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Create a new AllocationSite and install it into a feedback vector.
TNode<AllocationSite> CreateAllocationSiteInFeedbackVector(
SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot);
// TODO(ishell, cbruni): Change to HasBoilerplate.
TNode<BoolT> NotHasBoilerplate(TNode<Object> maybe_literal_site);
TNode<Smi> LoadTransitionInfo(TNode<AllocationSite> allocation_site);
TNode<JSObject> LoadBoilerplate(TNode<AllocationSite> allocation_site);
TNode<Int32T> LoadElementsKind(TNode<AllocationSite> allocation_site);
enum class IndexAdvanceMode { kPre, kPost };
typedef std::function<void(Node* index)> FastLoopBody;
Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
Node* BuildFastLoop(Node* start_index, Node* end_index,
const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, parameter_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
typedef std::function<void(Node* fixed_array, Node* offset)>
FastFixedArrayForEachBody;
void BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
void BuildFastFixedArrayForEach(
Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse) {
CodeStubAssembler::VariableList list(0, zone());
BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
last_element_exclusive, body, mode, direction);
}
TNode<IntPtrT> GetArrayAllocationSize(Node* element_count, ElementsKind kind,
ParameterMode mode, int header_size) {
return ElementOffsetFromIndex(element_count, kind, mode, header_size);
}
TNode<IntPtrT> GetFixedArrayAllocationSize(Node* element_count,
ElementsKind kind,
ParameterMode mode) {
return GetArrayAllocationSize(element_count, kind, mode,
FixedArray::kHeaderSize);
}
TNode<IntPtrT> GetPropertyArrayAllocationSize(Node* element_count,
ParameterMode mode) {
return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode,
PropertyArray::kHeaderSize);
}
void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count,
Label* doesnt_fit, int base_size,
ParameterMode mode);
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, RootIndex root);
Node* RelationalComparison(Operation op, Node* left, Node* right,
Node* context,
Variable* var_type_feedback = nullptr);
void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
Label* if_true, Label* if_false);
void BranchIfNumberEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true,
if_false);
}
void BranchIfNumberNotEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberEqual(left, right, if_false, if_true);
}
void BranchIfNumberLessThan(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kLessThan, left, right,
if_true, if_false);
}
void BranchIfNumberLessThanOrEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right,
if_true, if_false);
}
void BranchIfNumberGreaterThan(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right,
if_true, if_false);
}
void BranchIfNumberGreaterThanOrEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
right, if_true, if_false);
}
void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
Label* if_not_accessor_pair) {
GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
}
void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
Node* StrictEqual(Node* lhs, Node* rhs,
Variable* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
// differs from positive zero.
void BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true, Label* if_false);
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
TNode<Oddball> HasProperty(SloppyTNode<Context> context,
SloppyTNode<Object> object,
SloppyTNode<Object> key,
HasPropertyLookupMode mode);
// Due to naming conflict with the builtin function namespace.
TNode<Oddball> HasProperty_Inline(TNode<Context> context,
TNode<JSReceiver> object,
TNode<Object> key) {
return HasProperty(context, object, key,
HasPropertyLookupMode::kHasProperty);
}
Node* Typeof(Node* value);
TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
SloppyTNode<JSFunction> active_function);
TNode<JSReceiver> SpeciesConstructor(
SloppyTNode<Context> context, SloppyTNode<Object> object,
SloppyTNode<JSReceiver> default_constructor);
Node* InstanceOf(Node* object, Node* callable, Node* context);
// Debug helpers
Node* IsDebugActive();
TNode<BoolT> IsRuntimeCallStatsEnabled();
// JSArrayBuffer helpers
TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
TNode<RawPtrT> LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer);
Node* IsDetachedBuffer(Node* buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
// JSArrayBufferView helpers
TNode<JSArrayBuffer> LoadJSArrayBufferViewBuffer(
TNode<JSArrayBufferView> array_buffer_view);
TNode<UintPtrT> LoadJSArrayBufferViewByteLength(
TNode<JSArrayBufferView> array_buffer_view);
TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name);
// JSTypedArray helpers
TNode<Smi> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
// Check that a field offset is within the bounds of the an object.
TNode<BoolT> IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
SloppyTNode<IntPtrT> length, int header_size,
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
TNode<Code> GetSharedFunctionInfoCode(
SloppyTNode<SharedFunctionInfo> shared_info,
Label* if_compile_lazy = nullptr);
Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
Node* context);
// Promise helpers
Node* IsPromiseHookEnabled();
Node* HasAsyncEventDelegate();
Node* IsPromiseHookEnabledOrHasAsyncEventDelegate();
Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
// Helpers for StackFrame markers.
Node* MarkerIsFrameType(Node* marker_or_function,
StackFrame::Type frame_type);
Node* MarkerIsNotFrameType(Node* marker_or_function,
StackFrame::Type frame_type);
// for..in helpers
void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
Label* if_fast, Label* if_slow);
Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
TNode<IntPtrT> GetArgumentsLength(CodeStubArguments* args);
TNode<Object> GetArgumentValue(CodeStubArguments* args, TNode<IntPtrT> index);
// Support for printf-style debugging
void Print(const char* s);
void Print(const char* prefix, Node* tagged_value);
inline void Print(SloppyTNode<Object> tagged_value) {
return Print(nullptr, tagged_value);
}
inline void Print(TNode<MaybeObject> tagged_value) {
return Print(nullptr, tagged_value);
}
template <class... TArgs>
Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) {
STATIC_ASSERT(sizeof...(TArgs) <= 3);
Node* const make_type_error = LoadContextElement(
LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
return CallJS(CodeFactory::Call(isolate()), context, make_type_error,
UndefinedConstant(), SmiConstant(message), args...);
}
void Abort(AbortReason reason) {
CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason));
Unreachable();
}
bool ConstexprBoolNot(bool value) { return !value; }
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
void PerformStackCheck(TNode<Context> context);
void SetPropertyLength(TNode<Context> context, TNode<Object> array,
TNode<Number> length);
// Checks that {object_map}'s prototype map is the {initial_prototype_map} and
// makes sure that the field with name at index {descriptor} is still
// constant. If it is not, go to label {if_modified}.
//
// To make the checks robust, the method also asserts that the descriptor has
// the right key, the caller must pass the root index of the key
// in {field_name_root_index}.
//
// This is useful for checking that given function has not been patched
// on the prototype.
void GotoIfInitialPrototypePropertyModified(TNode<Map> object_map,
TNode<Map> initial_prototype_map,
int descfriptor,
RootIndex field_name_root_index,
Label* if_modified);
struct DescriptorIndexAndName {
DescriptorIndexAndName() {}
DescriptorIndexAndName(int descriptor_index, RootIndex name_root_index)
: descriptor_index(descriptor_index),
name_root_index(name_root_index) {}
int descriptor_index;
RootIndex name_root_index;
};
void GotoIfInitialPrototypePropertiesModified(
TNode<Map> object_map, TNode<Map> initial_prototype_map,
Vector<DescriptorIndexAndName> properties, Label* if_modified);
// Implements DescriptorArray::Search().
void DescriptorLookup(SloppyTNode<Name> unique_name,
SloppyTNode<DescriptorArray> descriptors,
SloppyTNode<Uint32T> bitfield3, Label* if_found,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements TransitionArray::SearchName() - searches for first transition
// entry with given name (note that there could be multiple entries with
// the same name).
void TransitionLookup(SloppyTNode<Name> unique_name,
SloppyTNode<TransitionArray> transitions,
Label* if_found, TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements generic search procedure like i::Search<Array>().
template <typename Array>
void Lookup(TNode<Name> unique_name, TNode<Array> array,
TNode<Uint32T> number_of_valid_entries, Label* if_found,
TVariable<IntPtrT>* var_name_index, Label* if_not_found);
// Implements generic linear search procedure like i::LinearSearch<Array>().
template <typename Array>
void LookupLinear(TNode<Name> unique_name, TNode<Array> array,
TNode<Uint32T> number_of_valid_entries, Label* if_found,
TVariable<IntPtrT>* var_name_index, Label* if_not_found);
// Implements generic binary search procedure like i::BinarySearch<Array>().
template <typename Array>
void LookupBinary(TNode<Name> unique_name, TNode<Array> array,
TNode<Uint32T> number_of_valid_entries, Label* if_found,
TVariable<IntPtrT>* var_name_index, Label* if_not_found);
// Converts [Descriptor/Transition]Array entry number to a fixed array index.
template <typename Array>
TNode<IntPtrT> EntryIndexToIndex(TNode<Uint32T> entry_index);
// Implements [Descriptor/Transition]Array::ToKeyIndex.
template <typename Array>
TNode<IntPtrT> ToKeyIndex(TNode<Uint32T> entry_index);
// Implements [Descriptor/Transition]Array::GetKey.
template <typename Array>
TNode<Name> GetKey(TNode<Array> array, TNode<Uint32T> entry_index);
// Implements DescriptorArray::GetDetails.
TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
TNode<Uint32T> descriptor_number);
typedef std::function<void(TNode<IntPtrT> descriptor_key_index)>
ForEachDescriptorBodyFunction;
void DescriptorArrayForEach(VariableList& variable_list,
TNode<Uint32T> start_descriptor,
TNode<Uint32T> end_descriptor,
const ForEachDescriptorBodyFunction& body);
// Descriptor array accessors based on key_index, which is equal to
// DescriptorArray::ToKeyIndex(descriptor).
TNode<Name> LoadKeyByKeyIndex(TNode<DescriptorArray> container,
TNode<IntPtrT> key_index);
TNode<Uint32T> LoadDetailsByKeyIndex(TNode<DescriptorArray> container,
TNode<IntPtrT> key_index);
TNode<Object> LoadValueByKeyIndex(TNode<DescriptorArray> container,
TNode<IntPtrT> key_index);
TNode<MaybeObject> LoadFieldTypeByKeyIndex(TNode<DescriptorArray> container,
TNode<IntPtrT> key_index);
TNode<IntPtrT> DescriptorEntryToIndex(TNode<IntPtrT> descriptor);
// Descriptor array accessors based on descriptor.
TNode<Name> LoadKeyByDescriptorEntry(TNode<DescriptorArray> descriptors,
TNode<IntPtrT> descriptor);
TNode<Name> LoadKeyByDescriptorEntry(TNode<DescriptorArray> descriptors,
int descriptor);
TNode<Uint32T> LoadDetailsByDescriptorEntry(
TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
TNode<Uint32T> LoadDetailsByDescriptorEntry(
TNode<DescriptorArray> descriptors, int descriptor);
TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
int descriptor);
TNode<MaybeObject> LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
typedef std::function<void(TNode<Name> key, TNode<Object> value)>
ForEachKeyValueFunction;
// For each JSObject property (in DescriptorArray order), check if the key is
// enumerable, and if so, load the value from the receiver and evaluate the
// closure.
void ForEachEnumerableOwnProperty(TNode<Context> context, TNode<Map> map,
TNode<JSObject> object,
const ForEachKeyValueFunction& body,
Label* bailout);
TNode<Object> CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
void BranchIfPrototypesHaveNoElements(Node* receiver_map,
Label* definitely_no_elements,
Label* possibly_elements);
void InitializeFunctionContext(Node* native_context, Node* context,
int slots);
TNode<JSArray> ArrayCreate(TNode<Context> context, TNode<Number> length);
// Allocate a clone of a mutable primitive, if {object} is a
// MutableHeapNumber.
TNode<Object> CloneIfMutablePrimitive(TNode<Object> object);
private:
friend class CodeStubArguments;
void HandleBreakOnNode();
TNode<HeapObject> AllocateRawDoubleAligned(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<HeapObject> AllocateRawUnaligned(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
TNode<HeapObject> AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
TNode<RawPtrT> limit_address);
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
TNode<JSArray> AllocateUninitializedJSArray(TNode<Map> array_map,
TNode<Smi> length,
Node* allocation_site,
TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
Node* SmiShiftBitsConstant();
// Emits keyed sloppy arguments load if the |value| is nullptr or store
// otherwise. Returns either the loaded value or |value|.
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout);
TNode<String> AllocateSlicedString(RootIndex map_root_index,
TNode<Uint32T> length,
TNode<String> parent, TNode<Smi> offset);
TNode<String> AllocateConsString(RootIndex map_root_index,
TNode<Uint32T> length, TNode<String> first,
TNode<String> second, AllocationFlags flags);
// Allocate a MutableHeapNumber without initializing its value.
TNode<MutableHeapNumber> AllocateMutableHeapNumber();
Node* SelectImpl(TNode<BoolT> condition, const NodeGenerator& true_body,
const NodeGenerator& false_body, MachineRepresentation rep);
// Implements [Descriptor/Transition]Array::number_of_entries.
template <typename Array>
TNode<Uint32T> NumberOfEntries(TNode<Array> array);
// Implements [Descriptor/Transition]Array::GetSortedKeyIndex.
template <typename Array>
TNode<Uint32T> GetSortedKeyIndex(TNode<Array> descriptors,
TNode<Uint32T> entry_index);
TNode<Smi> CollectFeedbackForString(SloppyTNode<Int32T> instance_type);
void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
Variable* var_type_feedback = nullptr);
TNode<String> AllocAndCopyStringCharacters(Node* from,
Node* from_instance_type,
TNode<IntPtrT> from_index,
TNode<IntPtrT> character_count);
static const int kElementLoopUnrollThreshold = 8;
// {convert_bigint} is only meaningful when {mode} == kToNumber.
Node* NonNumberToNumberOrNumeric(
Node* context, Node* input, Object::Conversion mode,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
void TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric, Variable* var_feedback);
template <Object::Conversion conversion>
void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
Variable* var_word32,
Label* if_bigint = nullptr,
Variable* var_bigint = nullptr,
Variable* var_feedback = nullptr);
private:
// Low-level accessors for Descriptor arrays.
TNode<MaybeObject> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
Node* index,
int additional_offset = 0);
};
class CodeStubArguments {
public:
typedef compiler::Node Node;
template <class T>
using TNode = compiler::TNode<T>;
template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
enum ReceiverMode { kHasReceiver, kNoReceiver };
// |argc| is an intptr value which specifies the number of arguments passed
// to the builtin excluding the receiver. The arguments will include a
// receiver iff |receiver_mode| is kHasReceiver.
CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
: CodeStubArguments(assembler, argc, nullptr,
CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
}
// |argc| is either a smi or intptr depending on |param_mode|. The arguments
// include a receiver iff |receiver_mode| is kHasReceiver.
CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
TNode<Object> GetReceiver() const;
// Replaces receiver argument on the expression stack. Should be used only
// for manipulating arguments in trampoline builtins before tail calling
// further with passing all the JS arguments as is.
void SetReceiver(TNode<Object> object) const;
TNode<RawPtr<Object>> AtIndexPtr(
Node* index, CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS) const;
// |index| is zero-based and does not include the receiver
TNode<Object> AtIndex(Node* index,
CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS) const;
TNode<Object> AtIndex(int index) const;
TNode<Object> GetOptionalArgumentValue(int index) {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
TNode<Object> GetOptionalArgumentValue(int index,
TNode<Object> default_value);
Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
DCHECK_EQ(mode, argc_mode_);
return argc_;
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
TNode<Object> default_value);
TNode<IntPtrT> GetLength() const {
DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
return assembler_->UncheckedCast<IntPtrT>(argc_);
}
typedef std::function<void(Node* arg)> ForEachBodyFunction;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
Node* last = nullptr,
CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS) {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
const ForEachBodyFunction& body, Node* first = nullptr,
Node* last = nullptr,
CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS);
void PopAndReturn(Node* value);
private:
Node* GetArguments();
CodeStubAssembler* assembler_;
CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
Node* argc_;
TNode<RawPtr<Object>> arguments_;
Node* fp_;
};
class ToDirectStringAssembler : public CodeStubAssembler {
private:
enum StringPointerKind { PTR_TO_DATA, PTR_TO_STRING };
public:
enum Flag {
kDontUnpackSlicedStrings = 1 << 0,
};
typedef base::Flags<Flag> Flags;
ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string,
Flags flags = Flags());
// Converts flat cons, thin, and sliced strings and returns the direct
// string. The result can be either a sequential or external string.
// Jumps to if_bailout if the string if the string is indirect and cannot
// be unpacked.
TNode<String> TryToDirect(Label* if_bailout);
// Returns a pointer to the beginning of the string data.
// Jumps to if_bailout if the external string cannot be unpacked.
TNode<RawPtrT> PointerToData(Label* if_bailout) {
return TryToSequential(PTR_TO_DATA, if_bailout);
}
// Returns a pointer that, offset-wise, looks like a String.
// Jumps to if_bailout if the external string cannot be unpacked.
TNode<RawPtrT> PointerToString(Label* if_bailout) {
return TryToSequential(PTR_TO_STRING, if_bailout);
}
Node* string() { return var_string_.value(); }
Node* instance_type() { return var_instance_type_.value(); }
TNode<IntPtrT> offset() {
return UncheckedCast<IntPtrT>(var_offset_.value());
}
Node* is_external() { return var_is_external_.value(); }
private:
TNode<RawPtrT> TryToSequential(StringPointerKind ptr_kind, Label* if_bailout);
Variable var_string_;
Variable var_instance_type_;
Variable var_offset_;
Variable var_is_external_;
const Flags flags_;
};
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
} // namespace internal
} // namespace v8
#endif // V8_CODE_STUB_ASSEMBLER_H_
| 48.221187 | 80 | 0.667067 | [
"object",
"vector",
"transform"
] |
9e17acf4f7cc4b62494eac883b61019114b47d8c | 15,126 | h | C | inference-engine/thirdparty/clDNN/api/CPP/cldnn_defs.h | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | inference-engine/thirdparty/clDNN/api/CPP/cldnn_defs.h | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | inference-engine/thirdparty/clDNN/api/CPP/cldnn_defs.h | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | /*
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/
/*! @mainpage clDNN Documentation
* @section intro Introduction
* Compute Library for Deep Neural Networks (clDNN) is a middle-ware software
* for accelerating DNN inference on Intel® HD and Iris™ Pro Graphics.
* This project includes CNN primitives implementations on Intel GPUs with C and C++ interfaces.
*
* clDNN Library implements set of primitives:
* - Convolution
* - Fully connected (inner product)
* - Pooling
* * average
* * maximum
* - Normalization
* * across channel
* * within channel
* * batch
* - Activation
* * logistic
* * tanh
* * rectified linear unit (ReLU)
* * softplus (softReLU)
* * abs
* * square
* * sqrt
* * linear
* - Softmax
* - Crop
* - Deconvolution
* - Depth concatenation
* - Eltwise
* - ROI pooling
* - Simpler NMS
* - Prior box
* - Detection output
*
* With this primitive set, user can build and execute most common image recognition, semantic segmentation and object detection networks topologies like:
* - Alexnet
* - Googlenet(v1-v3)
* - ResNet
* - VGG
* - faster-rCNN
* and other.
*
*
* @section model Programming Model
* Intel® clDNN is graph oriented library. To execute CNN you have to build, compile graph/topology and run to get results.
*
* <B> Terminology: </B>
* - Primitive - dnn base functionality i.e. convolution, pooling, softmax.
* - Data - special primitive type representing primitive parameters (weights and biases), inputs and outputs
* - Engine - type of accelerator that is executing network. Currently ocl engine is the only available.
* - Topology - container of primitives, data, and relations between them. Topology represents graph.
* - Program - optional step between Topology and Network. It is compiled Topology without memory allocation.
* - Network - compiled Topology with memory allocation. Ready to be executed. During compilation, buidling parameters trigger special optimizations like fusing, data reordering.
*
* <B> Execution Steps: </B>
*
* \image html workflow.jpg
* -# Create Engine
* -# Declare or define primitives parameters (weights and biases) if needed.
* -# Create primitives. It is required to provide name for each primitive. This is a name of primitive which output will be input to current one. Name can be used before primitive definition.
* -# Create topology
* -# Add primitives to topology
* -# Build Network from topology
* -# Set Inputs data
* -# Execute Network
*
*
* @section graph_compilation Graph compilation
*
* If user choose build option optimize_data when program is being created - explicit or implicit over network creation, clDNN perform some graph optimizations as follows:
* * <B> Stage 0: Graph initiation:</B>
* * build nodes from primitives
* * node replacement:
* * replace each split node with series of crop nodes. Name of crop primitive will be concatenation of split + port names.
* * replace upsampling node with deconvolution node if upsampling mode is bilinear.
* * set outputs - mark nodes that are defined by user as output (blocks fusing etc) or have no users (leafs).
* * calculate processing order - using dfs on graph to establish processing order
* * <B> Stage 1: Priorboxes:</B>
* * priorbox is primitive that is executed during network compilation. Node is removed from a network execution.
* * <B> Stage 2: Graph analysis:</B>
* * mark constatns
* * mark data flow
* * <B> Stage 3: Trimming:</B>
* * apply backward bfs on each output to find unnecessary nodes/branches, then remove those.
* * <B> Stage 4: Inputs and biases:</B>
* * reorder input - format of convolution's input/output is being selected.
* * reorder biases for conv,fc and deconv nodes
* * <B> Stage 5: Redundant reorders:</B>
* * previous stages can provide additional reorders due to format changes per primitive. This stage removes redundant and fuses series of reorders into one.
* * <B> Stage 6: Constant propagation:</B>
* * prepare padding - goes thrugh all primitves and checks if its user requires padding, if so, set output padding.
* * prepare depthwise separable opt - if split param is greater than 16 and number of IFM <= 8*split in conv or deconv, this stage changes execution from multi kernels into one.
* * constant propagation - replace constant nodes, that are not outputs with data type nodes. Constant primitive is the primitive that doesn't depend on any non-constant primitive and doesn't have to be executed: priorbox, data.
* * <B> Stage 7: Fusing:</B>
* * buffer fusing
* * concat - if concatenation is the only user of its dependencies then remove concat node and setting proper output paddings in every dependencies.
* * crop - if crop has only one dependecy, and its users doesn't require padding, remove crop and set proper output padding in its dependecy.
* * reorder - if primitive before reorder supports different input vs output type reorder can be fused with previous node.
* * primitive fusing - right now this stage fuses activation node with previous node only, only if previous node supports activation fusing.
* * <B> Stage 8: Compile graph:</B>
* * at this stage using kernel selector, graph chooses the best kernel implementation for each node.
* * <B> Stage 9: reorder weights:</B>
* * at this stage weights are converted into format suitable for selected kernel implementation.
* * <B> Stage 10 & 11: Redundant reorders and constant propagation:</B>
* * check again if whole graph compilation didn't provide any redundant reorders and constants.
* * <B> Stage 12: Compile program:</B>
* * at this stage engine compiles cl_kernels.
*
* @section example C++ API Example MNIST network
* @include example_cldnn.cpp
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
#include <functional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <stdexcept>
#include "../C/cldnn.h"
namespace cldnn {
// There is no portable half precision floating point support.
// Using wrapped integral type with the same size and alignment restrictions.
class half_impl {
public:
half_impl() = default;
template <typename T, typename = typename std::enable_if<!std::is_floating_point<T>::value>::type>
explicit half_impl(T data) : _data(data) {}
operator uint16_t() const { return _data; }
operator float() const {
cldnn_status status = CLDNN_SUCCESS;
auto value = cldnn_half_to_float(_data, &status);
if (status != CLDNN_SUCCESS)
throw std::runtime_error("Conversion from half failed");
return value;
}
explicit half_impl(float value) {
cldnn_status status = CLDNN_SUCCESS;
_data = cldnn_float_to_half(value, &status);
if (status != CLDNN_SUCCESS)
throw std::runtime_error("Conversion to half failed");
}
private:
uint16_t _data;
};
} // namespace cldnn
// Use complete implementation if necessary.
#if defined HALF_HALF_HPP
typedef half half_t;
#else
typedef cldnn::half_impl half_t;
#endif
namespace cldnn {
/// @addtogroup cpp_api C++ API
/// @{
/// @defgroup cpp_error Error Handling
/// @{
using status_t = ::cldnn_status;
/// @brief clDNN specific exception type.
class error : public std::runtime_error {
public:
explicit error(const std::string& _Message, status_t status = CLDNN_ERROR)
: runtime_error(_Message), _status(status) {
}
explicit error(const char* _Message, status_t status = CLDNN_ERROR)
: runtime_error(_Message), _status(status) {
}
/// @brief Returns clDNN status code.
const status_t& status() const { return _status; }
private:
status_t _status;
};
#define CLDNN_THROW(msg, status) throw cldnn::error(msg, status);
template <class T>
T check_status(std::string err_msg, std::function<T(status_t*)> func) {
status_t status = CLDNN_SUCCESS;
auto result = func(&status);
if (status != CLDNN_SUCCESS)
CLDNN_THROW(err_msg.append(": ").append(cldnn_get_last_error_message()), status);
return result;
}
template <>
inline void check_status<void>(std::string err_msg, std::function<void(status_t*)> func) {
status_t status = CLDNN_SUCCESS;
func(&status);
if (status != CLDNN_SUCCESS)
CLDNN_THROW(err_msg.append(": ").append(cldnn_get_last_error_message()), status);
}
/// @}
/// @defgroup cpp_version Version Information
/// @{
using version_t = ::cldnn_version;
/// @brief Get information about version of clDNN.
inline version_t get_version() {
return check_status<version_t>("get_version: fetching version information failed",
[](status_t* status) {
return ::cldnn_get_version(status);
});
}
/// @}
/// @cond CPP_HELPERS
/// @defgroup cpp_helpers Helpers
/// @{
#define CLDNN_API_CLASS(the_class) static_assert(std::is_standard_layout<the_class>::value, #the_class " has to be 'standart layout' class");
template <typename T>
typename std::enable_if<std::is_integral<T>::value, T>::type align_to(T size, size_t align) {
return static_cast<T>((size % align == 0) ? size : size - size % align + align);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, T>::type pad_to(T size, size_t align) {
return static_cast<T>((size % align == 0) ? 0 : align - size % align);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, bool>::type is_aligned_to(T size, size_t align) {
return !(size % align);
}
/// Computes ceil(@p val / @p divider) on unsigned integral numbers.
///
/// Computes division of unsigned integral numbers and rounds result up to full number (ceiling).
/// The function works for unsigned integrals only. Signed integrals are converted to corresponding
/// unsigned ones.
///
/// @tparam T1 Type of @p val. Type must be integral (SFINAE).
/// @tparam T2 Type of @p divider. Type must be integral (SFINAE).
///
/// @param val Divided value. If value is signed, it will be converted to corresponding unsigned type.
/// @param divider Divider value. If value is signed, it will be converted to corresponding unsigned type.
///
/// @return Result of ceil(@p val / @p divider). The type of result is determined as if in normal integral
/// division, except each operand is converted to unsigned type if necessary.
template <typename T1, typename T2>
constexpr auto ceil_div(T1 val, T2 divider)
-> typename std::enable_if<std::is_integral<T1>::value && std::is_integral<T2>::value,
decltype(std::declval<typename std::make_unsigned<T1>::type>() / std::declval<typename std::make_unsigned<T2>::type>())>::type {
typedef typename std::make_unsigned<T1>::type UT1;
typedef typename std::make_unsigned<T2>::type UT2;
typedef decltype(std::declval<UT1>() / std::declval<UT2>()) RetT;
return static_cast<RetT>((static_cast<UT1>(val) + static_cast<UT2>(divider) - 1U) / static_cast<UT2>(divider));
}
/// Rounds @p val to nearest multiply of @p rounding that is greater or equal to @p val.
///
/// The function works for unsigned integrals only. Signed integrals are converted to corresponding
/// unsigned ones.
///
/// @tparam T1 Type of @p val. Type must be integral (SFINAE).
/// @tparam T2 Type of @p rounding. Type must be integral (SFINAE).
///
/// @param val Value to round up. If value is signed, it will be converted to corresponding unsigned type.
/// @param rounding Rounding value. If value is signed, it will be converted to corresponding unsigned type.
///
/// @return @p val rounded up to nearest multiply of @p rounding. The type of result is determined as if in normal integral
/// division, except each operand is converted to unsigned type if necessary.
template <typename T1, typename T2>
constexpr auto round_up_to(T1 val, T2 rounding)
-> typename std::enable_if<std::is_integral<T1>::value && std::is_integral<T2>::value,
decltype(std::declval<typename std::make_unsigned<T1>::type>() / std::declval<typename std::make_unsigned<T2>::type>())>::type {
typedef typename std::make_unsigned<T1>::type UT1;
typedef typename std::make_unsigned<T2>::type UT2;
typedef decltype(std::declval<UT1>() / std::declval<UT2>()) RetT;
return static_cast<RetT>(ceil_div(val, rounding) * static_cast<UT2>(rounding));
}
///
/// \brief Converts C API float array to std::vector<float>
///
inline std::vector<float> float_arr_to_vector(const cldnn_float_arr& arr) {
std::vector<float> result(arr.size);
for (size_t i = 0; i < arr.size; i++) {
result[i] = arr.data[i];
}
return result;
}
///
/// \brief Converts C API float array to std::vector<uint16_t>
///
inline std::vector<uint16_t> uint16_t_arr_to_vector(const cldnn_uint16_t_arr& arr) {
std::vector<uint16_t> result(arr.size);
for (size_t i = 0; i < arr.size; i++) {
result[i] = arr.data[i];
}
return result;
}
///
/// \brief Converts C API uint8_t array to std::vector<uint8_t>
///
inline std::vector<uint8_t> uint8_t_arr_to_vector(const cldnn_uint8_t_arr& arr) {
std::vector<uint8_t> result(arr.size);
for (size_t i = 0; i < arr.size; i++) {
result[i] = arr.data[i];
}
return result;
}
///
/// \brief Converts std::vector<float> to C API float_array
///
inline cldnn_float_arr float_vector_to_arr(const std::vector<float>& stor) {
return {stor.data(), stor.size()};
}
///
/// \brief Converts std::vector<uint16_t> to C API float_array
///
inline cldnn_uint16_t_arr uint16_t_vector_to_arr(const std::vector<uint16_t>& stor) {
return {stor.data(), stor.size()};
}
///
/// \brief Converts std::vector<uint8_t> to C API uint8_t array
///
inline cldnn_uint8_t_arr uint8_t_vector_to_arr(const std::vector<uint8_t>& stor) {
return {stor.data(), stor.size()};
}
///
/// \brief Converts std::vector<tensor> to C API tensor_array
///
inline cldnn_tensor_arr tensor_vector_to_arr(const std::vector<cldnn_tensor>& stor) {
return cldnn_tensor_arr{stor.data(), stor.size()};
}
///
/// \brief Converts C API tensor_array to std::vector of C API tensor
///
inline std::vector<cldnn_tensor> tensor_arr_to_cldnn_vector(const cldnn_tensor_arr& arr) {
std::vector<cldnn_tensor> result(arr.size);
for (size_t i = 0; i < arr.size; i++)
result[i] = arr.data[i];
return result;
}
/// @}
/// @endcond
/// @}
} // namespace cldnn
| 38.884319 | 229 | 0.695557 | [
"object",
"vector",
"model"
] |
9e181585ca1c9dda9419a2fd99862c13842c171f | 129,220 | h | C | tests/cpp17/generated_cpp17/monster_test_generated.h | lijinglue/flatbuffers | bc5cbe1a9d1b80a088174e64e505e28719c189da | [
"Apache-2.0"
] | 1 | 2016-04-10T15:06:06.000Z | 2016-04-10T15:06:06.000Z | tests/cpp17/generated_cpp17/monster_test_generated.h | lijinglue/flatbuffers | bc5cbe1a9d1b80a088174e64e505e28719c189da | [
"Apache-2.0"
] | null | null | null | tests/cpp17/generated_cpp17/monster_test_generated.h | lijinglue/flatbuffers | bc5cbe1a9d1b80a088174e64e505e28719c189da | [
"Apache-2.0"
] | null | null | null | // automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_MONSTERTEST_MYGAME_EXAMPLE_H_
#define FLATBUFFERS_GENERATED_MONSTERTEST_MYGAME_EXAMPLE_H_
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
namespace MyGame {
struct InParentNamespace;
struct InParentNamespaceBuilder;
struct InParentNamespaceT;
namespace Example2 {
struct Monster;
struct MonsterBuilder;
struct MonsterT;
} // namespace Example2
namespace Example {
struct Test;
struct TestSimpleTableWithEnum;
struct TestSimpleTableWithEnumBuilder;
struct TestSimpleTableWithEnumT;
struct Vec3;
struct Ability;
struct Stat;
struct StatBuilder;
struct StatT;
struct Referrable;
struct ReferrableBuilder;
struct ReferrableT;
struct Monster;
struct MonsterBuilder;
struct MonsterT;
struct TypeAliases;
struct TypeAliasesBuilder;
struct TypeAliasesT;
} // namespace Example
inline const flatbuffers::TypeTable *InParentNamespaceTypeTable();
namespace Example2 {
inline const flatbuffers::TypeTable *MonsterTypeTable();
} // namespace Example2
namespace Example {
inline const flatbuffers::TypeTable *TestTypeTable();
inline const flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable();
inline const flatbuffers::TypeTable *Vec3TypeTable();
inline const flatbuffers::TypeTable *AbilityTypeTable();
inline const flatbuffers::TypeTable *StatTypeTable();
inline const flatbuffers::TypeTable *ReferrableTypeTable();
inline const flatbuffers::TypeTable *MonsterTypeTable();
inline const flatbuffers::TypeTable *TypeAliasesTypeTable();
/// Composite components of Monster color.
enum class Color : uint8_t {
Red = 1,
/// \brief color Green
/// Green is bit_flag with value (1u << 1)
Green = 2,
/// \brief color Blue (1u << 3)
Blue = 8,
NONE = 0,
ANY = 11
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(Color, uint8_t)
inline const Color (&EnumValuesColor())[3] {
static const Color values[] = {
Color::Red,
Color::Green,
Color::Blue
};
return values;
}
inline const char * const *EnumNamesColor() {
static const char * const names[9] = {
"Red",
"Green",
"",
"",
"",
"",
"",
"Blue",
nullptr
};
return names;
}
inline const char *EnumNameColor(Color e) {
if (flatbuffers::IsOutRange(e, Color::Red, Color::Blue)) return "";
const size_t index = static_cast<size_t>(e) - static_cast<size_t>(Color::Red);
return EnumNamesColor()[index];
}
enum class Race : int8_t {
None = -1,
Human = 0,
Dwarf = 1,
Elf = 2,
MIN = None,
MAX = Elf
};
inline const Race (&EnumValuesRace())[4] {
static const Race values[] = {
Race::None,
Race::Human,
Race::Dwarf,
Race::Elf
};
return values;
}
inline const char * const *EnumNamesRace() {
static const char * const names[5] = {
"None",
"Human",
"Dwarf",
"Elf",
nullptr
};
return names;
}
inline const char *EnumNameRace(Race e) {
if (flatbuffers::IsOutRange(e, Race::None, Race::Elf)) return "";
const size_t index = static_cast<size_t>(e) - static_cast<size_t>(Race::None);
return EnumNamesRace()[index];
}
enum class Any : uint8_t {
NONE = 0,
Monster = 1,
TestSimpleTableWithEnum = 2,
MyGame_Example2_Monster = 3,
MIN = NONE,
MAX = MyGame_Example2_Monster
};
inline const Any (&EnumValuesAny())[4] {
static const Any values[] = {
Any::NONE,
Any::Monster,
Any::TestSimpleTableWithEnum,
Any::MyGame_Example2_Monster
};
return values;
}
inline const char * const *EnumNamesAny() {
static const char * const names[5] = {
"NONE",
"Monster",
"TestSimpleTableWithEnum",
"MyGame_Example2_Monster",
nullptr
};
return names;
}
inline const char *EnumNameAny(Any e) {
if (flatbuffers::IsOutRange(e, Any::NONE, Any::MyGame_Example2_Monster)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesAny()[index];
}
template<typename T> struct AnyTraits {
static const Any enum_value = Any::NONE;
};
template<> struct AnyTraits<MyGame::Example::Monster> {
static const Any enum_value = Any::Monster;
};
template<> struct AnyTraits<MyGame::Example::TestSimpleTableWithEnum> {
static const Any enum_value = Any::TestSimpleTableWithEnum;
};
template<> struct AnyTraits<MyGame::Example2::Monster> {
static const Any enum_value = Any::MyGame_Example2_Monster;
};
struct AnyUnion {
Any type;
void *value;
AnyUnion() : type(Any::NONE), value(nullptr) {}
AnyUnion(AnyUnion&& u) FLATBUFFERS_NOEXCEPT :
type(Any::NONE), value(nullptr)
{ std::swap(type, u.type); std::swap(value, u.value); }
AnyUnion(const AnyUnion &);
AnyUnion &operator=(const AnyUnion &u)
{ AnyUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }
AnyUnion &operator=(AnyUnion &&u) FLATBUFFERS_NOEXCEPT
{ std::swap(type, u.type); std::swap(value, u.value); return *this; }
~AnyUnion() { Reset(); }
void Reset();
#ifndef FLATBUFFERS_CPP98_STL
template <typename T>
void Set(T&& val) {
using RT = typename std::remove_reference<T>::type;
Reset();
type = AnyTraits<typename RT::TableType>::enum_value;
if (type != Any::NONE) {
value = new RT(std::forward<T>(val));
}
}
#endif // FLATBUFFERS_CPP98_STL
static void *UnPack(const void *obj, Any type, const flatbuffers::resolver_function_t *resolver);
flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
MyGame::Example::MonsterT *AsMonster() {
return type == Any::Monster ?
reinterpret_cast<MyGame::Example::MonsterT *>(value) : nullptr;
}
const MyGame::Example::MonsterT *AsMonster() const {
return type == Any::Monster ?
reinterpret_cast<const MyGame::Example::MonsterT *>(value) : nullptr;
}
MyGame::Example::TestSimpleTableWithEnumT *AsTestSimpleTableWithEnum() {
return type == Any::TestSimpleTableWithEnum ?
reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(value) : nullptr;
}
const MyGame::Example::TestSimpleTableWithEnumT *AsTestSimpleTableWithEnum() const {
return type == Any::TestSimpleTableWithEnum ?
reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnumT *>(value) : nullptr;
}
MyGame::Example2::MonsterT *AsMyGame_Example2_Monster() {
return type == Any::MyGame_Example2_Monster ?
reinterpret_cast<MyGame::Example2::MonsterT *>(value) : nullptr;
}
const MyGame::Example2::MonsterT *AsMyGame_Example2_Monster() const {
return type == Any::MyGame_Example2_Monster ?
reinterpret_cast<const MyGame::Example2::MonsterT *>(value) : nullptr;
}
};
bool VerifyAny(flatbuffers::Verifier &verifier, const void *obj, Any type);
bool VerifyAnyVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
enum class AnyUniqueAliases : uint8_t {
NONE = 0,
M = 1,
TS = 2,
M2 = 3,
MIN = NONE,
MAX = M2
};
inline const AnyUniqueAliases (&EnumValuesAnyUniqueAliases())[4] {
static const AnyUniqueAliases values[] = {
AnyUniqueAliases::NONE,
AnyUniqueAliases::M,
AnyUniqueAliases::TS,
AnyUniqueAliases::M2
};
return values;
}
inline const char * const *EnumNamesAnyUniqueAliases() {
static const char * const names[5] = {
"NONE",
"M",
"TS",
"M2",
nullptr
};
return names;
}
inline const char *EnumNameAnyUniqueAliases(AnyUniqueAliases e) {
if (flatbuffers::IsOutRange(e, AnyUniqueAliases::NONE, AnyUniqueAliases::M2)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesAnyUniqueAliases()[index];
}
template<typename T> struct AnyUniqueAliasesTraits {
static const AnyUniqueAliases enum_value = AnyUniqueAliases::NONE;
};
template<> struct AnyUniqueAliasesTraits<MyGame::Example::Monster> {
static const AnyUniqueAliases enum_value = AnyUniqueAliases::M;
};
template<> struct AnyUniqueAliasesTraits<MyGame::Example::TestSimpleTableWithEnum> {
static const AnyUniqueAliases enum_value = AnyUniqueAliases::TS;
};
template<> struct AnyUniqueAliasesTraits<MyGame::Example2::Monster> {
static const AnyUniqueAliases enum_value = AnyUniqueAliases::M2;
};
struct AnyUniqueAliasesUnion {
AnyUniqueAliases type;
void *value;
AnyUniqueAliasesUnion() : type(AnyUniqueAliases::NONE), value(nullptr) {}
AnyUniqueAliasesUnion(AnyUniqueAliasesUnion&& u) FLATBUFFERS_NOEXCEPT :
type(AnyUniqueAliases::NONE), value(nullptr)
{ std::swap(type, u.type); std::swap(value, u.value); }
AnyUniqueAliasesUnion(const AnyUniqueAliasesUnion &);
AnyUniqueAliasesUnion &operator=(const AnyUniqueAliasesUnion &u)
{ AnyUniqueAliasesUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }
AnyUniqueAliasesUnion &operator=(AnyUniqueAliasesUnion &&u) FLATBUFFERS_NOEXCEPT
{ std::swap(type, u.type); std::swap(value, u.value); return *this; }
~AnyUniqueAliasesUnion() { Reset(); }
void Reset();
#ifndef FLATBUFFERS_CPP98_STL
template <typename T>
void Set(T&& val) {
using RT = typename std::remove_reference<T>::type;
Reset();
type = AnyUniqueAliasesTraits<typename RT::TableType>::enum_value;
if (type != AnyUniqueAliases::NONE) {
value = new RT(std::forward<T>(val));
}
}
#endif // FLATBUFFERS_CPP98_STL
static void *UnPack(const void *obj, AnyUniqueAliases type, const flatbuffers::resolver_function_t *resolver);
flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
MyGame::Example::MonsterT *AsM() {
return type == AnyUniqueAliases::M ?
reinterpret_cast<MyGame::Example::MonsterT *>(value) : nullptr;
}
const MyGame::Example::MonsterT *AsM() const {
return type == AnyUniqueAliases::M ?
reinterpret_cast<const MyGame::Example::MonsterT *>(value) : nullptr;
}
MyGame::Example::TestSimpleTableWithEnumT *AsTS() {
return type == AnyUniqueAliases::TS ?
reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(value) : nullptr;
}
const MyGame::Example::TestSimpleTableWithEnumT *AsTS() const {
return type == AnyUniqueAliases::TS ?
reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnumT *>(value) : nullptr;
}
MyGame::Example2::MonsterT *AsM2() {
return type == AnyUniqueAliases::M2 ?
reinterpret_cast<MyGame::Example2::MonsterT *>(value) : nullptr;
}
const MyGame::Example2::MonsterT *AsM2() const {
return type == AnyUniqueAliases::M2 ?
reinterpret_cast<const MyGame::Example2::MonsterT *>(value) : nullptr;
}
};
bool VerifyAnyUniqueAliases(flatbuffers::Verifier &verifier, const void *obj, AnyUniqueAliases type);
bool VerifyAnyUniqueAliasesVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
enum class AnyAmbiguousAliases : uint8_t {
NONE = 0,
M1 = 1,
M2 = 2,
M3 = 3,
MIN = NONE,
MAX = M3
};
inline const AnyAmbiguousAliases (&EnumValuesAnyAmbiguousAliases())[4] {
static const AnyAmbiguousAliases values[] = {
AnyAmbiguousAliases::NONE,
AnyAmbiguousAliases::M1,
AnyAmbiguousAliases::M2,
AnyAmbiguousAliases::M3
};
return values;
}
inline const char * const *EnumNamesAnyAmbiguousAliases() {
static const char * const names[5] = {
"NONE",
"M1",
"M2",
"M3",
nullptr
};
return names;
}
inline const char *EnumNameAnyAmbiguousAliases(AnyAmbiguousAliases e) {
if (flatbuffers::IsOutRange(e, AnyAmbiguousAliases::NONE, AnyAmbiguousAliases::M3)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesAnyAmbiguousAliases()[index];
}
struct AnyAmbiguousAliasesUnion {
AnyAmbiguousAliases type;
void *value;
AnyAmbiguousAliasesUnion() : type(AnyAmbiguousAliases::NONE), value(nullptr) {}
AnyAmbiguousAliasesUnion(AnyAmbiguousAliasesUnion&& u) FLATBUFFERS_NOEXCEPT :
type(AnyAmbiguousAliases::NONE), value(nullptr)
{ std::swap(type, u.type); std::swap(value, u.value); }
AnyAmbiguousAliasesUnion(const AnyAmbiguousAliasesUnion &);
AnyAmbiguousAliasesUnion &operator=(const AnyAmbiguousAliasesUnion &u)
{ AnyAmbiguousAliasesUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }
AnyAmbiguousAliasesUnion &operator=(AnyAmbiguousAliasesUnion &&u) FLATBUFFERS_NOEXCEPT
{ std::swap(type, u.type); std::swap(value, u.value); return *this; }
~AnyAmbiguousAliasesUnion() { Reset(); }
void Reset();
static void *UnPack(const void *obj, AnyAmbiguousAliases type, const flatbuffers::resolver_function_t *resolver);
flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
MyGame::Example::MonsterT *AsM1() {
return type == AnyAmbiguousAliases::M1 ?
reinterpret_cast<MyGame::Example::MonsterT *>(value) : nullptr;
}
const MyGame::Example::MonsterT *AsM1() const {
return type == AnyAmbiguousAliases::M1 ?
reinterpret_cast<const MyGame::Example::MonsterT *>(value) : nullptr;
}
MyGame::Example::MonsterT *AsM2() {
return type == AnyAmbiguousAliases::M2 ?
reinterpret_cast<MyGame::Example::MonsterT *>(value) : nullptr;
}
const MyGame::Example::MonsterT *AsM2() const {
return type == AnyAmbiguousAliases::M2 ?
reinterpret_cast<const MyGame::Example::MonsterT *>(value) : nullptr;
}
MyGame::Example::MonsterT *AsM3() {
return type == AnyAmbiguousAliases::M3 ?
reinterpret_cast<MyGame::Example::MonsterT *>(value) : nullptr;
}
const MyGame::Example::MonsterT *AsM3() const {
return type == AnyAmbiguousAliases::M3 ?
reinterpret_cast<const MyGame::Example::MonsterT *>(value) : nullptr;
}
};
bool VerifyAnyAmbiguousAliases(flatbuffers::Verifier &verifier, const void *obj, AnyAmbiguousAliases type);
bool VerifyAnyAmbiguousAliasesVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(2) Test FLATBUFFERS_FINAL_CLASS {
private:
int16_t a_;
int8_t b_;
int8_t padding0__;
public:
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return TestTypeTable();
}
Test()
: a_(0),
b_(0),
padding0__(0) {
(void)padding0__;
}
Test(int16_t _a, int8_t _b)
: a_(flatbuffers::EndianScalar(_a)),
b_(flatbuffers::EndianScalar(_b)),
padding0__(0) {
}
int16_t a() const {
return flatbuffers::EndianScalar(a_);
}
void mutate_a(int16_t _a) {
flatbuffers::WriteScalar(&a_, _a);
}
int8_t b() const {
return flatbuffers::EndianScalar(b_);
}
void mutate_b(int8_t _b) {
flatbuffers::WriteScalar(&b_, _b);
}
};
FLATBUFFERS_STRUCT_END(Test, 4);
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Vec3 FLATBUFFERS_FINAL_CLASS {
private:
float x_;
float y_;
float z_;
int32_t padding0__;
double test1_;
uint8_t test2_;
int8_t padding1__;
MyGame::Example::Test test3_;
int16_t padding2__;
public:
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return Vec3TypeTable();
}
Vec3()
: x_(0),
y_(0),
z_(0),
padding0__(0),
test1_(0),
test2_(0),
padding1__(0),
test3_(),
padding2__(0) {
(void)padding0__;
(void)padding1__;
(void)padding2__;
}
Vec3(float _x, float _y, float _z, double _test1, MyGame::Example::Color _test2, const MyGame::Example::Test &_test3)
: x_(flatbuffers::EndianScalar(_x)),
y_(flatbuffers::EndianScalar(_y)),
z_(flatbuffers::EndianScalar(_z)),
padding0__(0),
test1_(flatbuffers::EndianScalar(_test1)),
test2_(flatbuffers::EndianScalar(static_cast<uint8_t>(_test2))),
padding1__(0),
test3_(_test3),
padding2__(0) {
}
float x() const {
return flatbuffers::EndianScalar(x_);
}
void mutate_x(float _x) {
flatbuffers::WriteScalar(&x_, _x);
}
float y() const {
return flatbuffers::EndianScalar(y_);
}
void mutate_y(float _y) {
flatbuffers::WriteScalar(&y_, _y);
}
float z() const {
return flatbuffers::EndianScalar(z_);
}
void mutate_z(float _z) {
flatbuffers::WriteScalar(&z_, _z);
}
double test1() const {
return flatbuffers::EndianScalar(test1_);
}
void mutate_test1(double _test1) {
flatbuffers::WriteScalar(&test1_, _test1);
}
MyGame::Example::Color test2() const {
return static_cast<MyGame::Example::Color>(flatbuffers::EndianScalar(test2_));
}
void mutate_test2(MyGame::Example::Color _test2) {
flatbuffers::WriteScalar(&test2_, static_cast<uint8_t>(_test2));
}
const MyGame::Example::Test &test3() const {
return test3_;
}
MyGame::Example::Test &mutable_test3() {
return test3_;
}
};
FLATBUFFERS_STRUCT_END(Vec3, 32);
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Ability FLATBUFFERS_FINAL_CLASS {
private:
uint32_t id_;
uint32_t distance_;
public:
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return AbilityTypeTable();
}
Ability()
: id_(0),
distance_(0) {
}
Ability(uint32_t _id, uint32_t _distance)
: id_(flatbuffers::EndianScalar(_id)),
distance_(flatbuffers::EndianScalar(_distance)) {
}
uint32_t id() const {
return flatbuffers::EndianScalar(id_);
}
void mutate_id(uint32_t _id) {
flatbuffers::WriteScalar(&id_, _id);
}
bool KeyCompareLessThan(const Ability *o) const {
return id() < o->id();
}
int KeyCompareWithValue(uint32_t val) const {
return static_cast<int>(id() > val) - static_cast<int>(id() < val);
}
uint32_t distance() const {
return flatbuffers::EndianScalar(distance_);
}
void mutate_distance(uint32_t _distance) {
flatbuffers::WriteScalar(&distance_, _distance);
}
};
FLATBUFFERS_STRUCT_END(Ability, 8);
} // namespace Example
struct InParentNamespaceT : public flatbuffers::NativeTable {
typedef InParentNamespace TableType;
};
struct InParentNamespace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef InParentNamespaceT NativeTableType;
typedef InParentNamespaceBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return InParentNamespaceTypeTable();
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
}
InParentNamespaceT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(InParentNamespaceT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<InParentNamespace> Pack(flatbuffers::FlatBufferBuilder &_fbb, const InParentNamespaceT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct InParentNamespaceBuilder {
typedef InParentNamespace Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
explicit InParentNamespaceBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<InParentNamespace> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<InParentNamespace>(end);
return o;
}
};
inline flatbuffers::Offset<InParentNamespace> CreateInParentNamespace(
flatbuffers::FlatBufferBuilder &_fbb) {
InParentNamespaceBuilder builder_(_fbb);
return builder_.Finish();
}
struct InParentNamespace::Traits {
using type = InParentNamespace;
static auto constexpr Create = CreateInParentNamespace;
};
flatbuffers::Offset<InParentNamespace> CreateInParentNamespace(flatbuffers::FlatBufferBuilder &_fbb, const InParentNamespaceT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
namespace Example2 {
struct MonsterT : public flatbuffers::NativeTable {
typedef Monster TableType;
};
struct Monster FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MonsterT NativeTableType;
typedef MonsterBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return MonsterTypeTable();
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
}
MonsterT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(MonsterT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Monster> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct MonsterBuilder {
typedef Monster Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
explicit MonsterBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Monster> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Monster>(end);
return o;
}
};
inline flatbuffers::Offset<Monster> CreateMonster(
flatbuffers::FlatBufferBuilder &_fbb) {
MonsterBuilder builder_(_fbb);
return builder_.Finish();
}
struct Monster::Traits {
using type = Monster;
static auto constexpr Create = CreateMonster;
};
flatbuffers::Offset<Monster> CreateMonster(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
} // namespace Example2
namespace Example {
struct TestSimpleTableWithEnumT : public flatbuffers::NativeTable {
typedef TestSimpleTableWithEnum TableType;
MyGame::Example::Color color = MyGame::Example::Color::Green;
};
struct TestSimpleTableWithEnum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TestSimpleTableWithEnumT NativeTableType;
typedef TestSimpleTableWithEnumBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return TestSimpleTableWithEnumTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_COLOR = 4
};
MyGame::Example::Color color() const {
return static_cast<MyGame::Example::Color>(GetField<uint8_t>(VT_COLOR, 2));
}
bool mutate_color(MyGame::Example::Color _color) {
return SetField<uint8_t>(VT_COLOR, static_cast<uint8_t>(_color), 2);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_COLOR) &&
verifier.EndTable();
}
TestSimpleTableWithEnumT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(TestSimpleTableWithEnumT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<TestSimpleTableWithEnum> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TestSimpleTableWithEnumT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct TestSimpleTableWithEnumBuilder {
typedef TestSimpleTableWithEnum Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_color(MyGame::Example::Color color) {
fbb_.AddElement<uint8_t>(TestSimpleTableWithEnum::VT_COLOR, static_cast<uint8_t>(color), 2);
}
explicit TestSimpleTableWithEnumBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TestSimpleTableWithEnum> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TestSimpleTableWithEnum>(end);
return o;
}
};
inline flatbuffers::Offset<TestSimpleTableWithEnum> CreateTestSimpleTableWithEnum(
flatbuffers::FlatBufferBuilder &_fbb,
MyGame::Example::Color color = MyGame::Example::Color::Green) {
TestSimpleTableWithEnumBuilder builder_(_fbb);
builder_.add_color(color);
return builder_.Finish();
}
struct TestSimpleTableWithEnum::Traits {
using type = TestSimpleTableWithEnum;
static auto constexpr Create = CreateTestSimpleTableWithEnum;
};
flatbuffers::Offset<TestSimpleTableWithEnum> CreateTestSimpleTableWithEnum(flatbuffers::FlatBufferBuilder &_fbb, const TestSimpleTableWithEnumT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct StatT : public flatbuffers::NativeTable {
typedef Stat TableType;
std::string id{};
int64_t val = 0;
uint16_t count = 0;
};
struct Stat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef StatT NativeTableType;
typedef StatBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return StatTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ID = 4,
VT_VAL = 6,
VT_COUNT = 8
};
const flatbuffers::String *id() const {
return GetPointer<const flatbuffers::String *>(VT_ID);
}
flatbuffers::String *mutable_id() {
return GetPointer<flatbuffers::String *>(VT_ID);
}
int64_t val() const {
return GetField<int64_t>(VT_VAL, 0);
}
bool mutate_val(int64_t _val) {
return SetField<int64_t>(VT_VAL, _val, 0);
}
uint16_t count() const {
return GetField<uint16_t>(VT_COUNT, 0);
}
bool mutate_count(uint16_t _count) {
return SetField<uint16_t>(VT_COUNT, _count, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ID) &&
verifier.VerifyString(id()) &&
VerifyField<int64_t>(verifier, VT_VAL) &&
VerifyField<uint16_t>(verifier, VT_COUNT) &&
verifier.EndTable();
}
StatT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(StatT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Stat> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StatT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct StatBuilder {
typedef Stat Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_id(flatbuffers::Offset<flatbuffers::String> id) {
fbb_.AddOffset(Stat::VT_ID, id);
}
void add_val(int64_t val) {
fbb_.AddElement<int64_t>(Stat::VT_VAL, val, 0);
}
void add_count(uint16_t count) {
fbb_.AddElement<uint16_t>(Stat::VT_COUNT, count, 0);
}
explicit StatBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Stat> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Stat>(end);
return o;
}
};
inline flatbuffers::Offset<Stat> CreateStat(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> id = 0,
int64_t val = 0,
uint16_t count = 0) {
StatBuilder builder_(_fbb);
builder_.add_val(val);
builder_.add_id(id);
builder_.add_count(count);
return builder_.Finish();
}
struct Stat::Traits {
using type = Stat;
static auto constexpr Create = CreateStat;
};
inline flatbuffers::Offset<Stat> CreateStatDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *id = nullptr,
int64_t val = 0,
uint16_t count = 0) {
auto id__ = id ? _fbb.CreateString(id) : 0;
return MyGame::Example::CreateStat(
_fbb,
id__,
val,
count);
}
flatbuffers::Offset<Stat> CreateStat(flatbuffers::FlatBufferBuilder &_fbb, const StatT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct ReferrableT : public flatbuffers::NativeTable {
typedef Referrable TableType;
uint64_t id = 0;
};
struct Referrable FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ReferrableT NativeTableType;
typedef ReferrableBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return ReferrableTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ID = 4
};
uint64_t id() const {
return GetField<uint64_t>(VT_ID, 0);
}
bool mutate_id(uint64_t _id) {
return SetField<uint64_t>(VT_ID, _id, 0);
}
bool KeyCompareLessThan(const Referrable *o) const {
return id() < o->id();
}
int KeyCompareWithValue(uint64_t val) const {
return static_cast<int>(id() > val) - static_cast<int>(id() < val);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint64_t>(verifier, VT_ID) &&
verifier.EndTable();
}
ReferrableT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(ReferrableT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Referrable> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct ReferrableBuilder {
typedef Referrable Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_id(uint64_t id) {
fbb_.AddElement<uint64_t>(Referrable::VT_ID, id, 0);
}
explicit ReferrableBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Referrable> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Referrable>(end);
return o;
}
};
inline flatbuffers::Offset<Referrable> CreateReferrable(
flatbuffers::FlatBufferBuilder &_fbb,
uint64_t id = 0) {
ReferrableBuilder builder_(_fbb);
builder_.add_id(id);
return builder_.Finish();
}
struct Referrable::Traits {
using type = Referrable;
static auto constexpr Create = CreateReferrable;
};
flatbuffers::Offset<Referrable> CreateReferrable(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct MonsterT : public flatbuffers::NativeTable {
typedef Monster TableType;
std::unique_ptr<MyGame::Example::Vec3> pos{};
int16_t mana = 150;
int16_t hp = 100;
std::string name{};
std::vector<uint8_t> inventory{};
MyGame::Example::Color color = MyGame::Example::Color::Blue;
MyGame::Example::AnyUnion test{};
std::vector<MyGame::Example::Test> test4{};
std::vector<std::string> testarrayofstring{};
std::vector<std::unique_ptr<MyGame::Example::MonsterT>> testarrayoftables{};
std::unique_ptr<MyGame::Example::MonsterT> enemy{};
std::vector<uint8_t> testnestedflatbuffer{};
std::unique_ptr<MyGame::Example::StatT> testempty{};
bool testbool = false;
int32_t testhashs32_fnv1 = 0;
uint32_t testhashu32_fnv1 = 0;
int64_t testhashs64_fnv1 = 0;
uint64_t testhashu64_fnv1 = 0;
int32_t testhashs32_fnv1a = 0;
Stat *testhashu32_fnv1a = nullptr;
int64_t testhashs64_fnv1a = 0;
uint64_t testhashu64_fnv1a = 0;
std::vector<bool> testarrayofbools{};
float testf = 3.14159f;
float testf2 = 3.0f;
float testf3 = 0.0f;
std::vector<std::string> testarrayofstring2{};
std::vector<MyGame::Example::Ability> testarrayofsortedstruct{};
std::vector<uint8_t> flex{};
std::vector<MyGame::Example::Test> test5{};
std::vector<int64_t> vector_of_longs{};
std::vector<double> vector_of_doubles{};
std::unique_ptr<MyGame::InParentNamespaceT> parent_namespace_test{};
std::vector<std::unique_ptr<MyGame::Example::ReferrableT>> vector_of_referrables{};
ReferrableT *single_weak_reference = nullptr;
std::vector<ReferrableT *> vector_of_weak_references{};
std::vector<std::unique_ptr<MyGame::Example::ReferrableT>> vector_of_strong_referrables{};
ReferrableT *co_owning_reference = nullptr;
std::vector<std::unique_ptr<ReferrableT>> vector_of_co_owning_references{};
ReferrableT *non_owning_reference = nullptr;
std::vector<ReferrableT *> vector_of_non_owning_references{};
MyGame::Example::AnyUniqueAliasesUnion any_unique{};
MyGame::Example::AnyAmbiguousAliasesUnion any_ambiguous{};
std::vector<MyGame::Example::Color> vector_of_enums{};
MyGame::Example::Race signed_enum = MyGame::Example::Race::None;
};
/// an example documentation comment: "monster object"
struct Monster FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MonsterT NativeTableType;
typedef MonsterBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return MonsterTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_POS = 4,
VT_MANA = 6,
VT_HP = 8,
VT_NAME = 10,
VT_INVENTORY = 14,
VT_COLOR = 16,
VT_TEST_TYPE = 18,
VT_TEST = 20,
VT_TEST4 = 22,
VT_TESTARRAYOFSTRING = 24,
VT_TESTARRAYOFTABLES = 26,
VT_ENEMY = 28,
VT_TESTNESTEDFLATBUFFER = 30,
VT_TESTEMPTY = 32,
VT_TESTBOOL = 34,
VT_TESTHASHS32_FNV1 = 36,
VT_TESTHASHU32_FNV1 = 38,
VT_TESTHASHS64_FNV1 = 40,
VT_TESTHASHU64_FNV1 = 42,
VT_TESTHASHS32_FNV1A = 44,
VT_TESTHASHU32_FNV1A = 46,
VT_TESTHASHS64_FNV1A = 48,
VT_TESTHASHU64_FNV1A = 50,
VT_TESTARRAYOFBOOLS = 52,
VT_TESTF = 54,
VT_TESTF2 = 56,
VT_TESTF3 = 58,
VT_TESTARRAYOFSTRING2 = 60,
VT_TESTARRAYOFSORTEDSTRUCT = 62,
VT_FLEX = 64,
VT_TEST5 = 66,
VT_VECTOR_OF_LONGS = 68,
VT_VECTOR_OF_DOUBLES = 70,
VT_PARENT_NAMESPACE_TEST = 72,
VT_VECTOR_OF_REFERRABLES = 74,
VT_SINGLE_WEAK_REFERENCE = 76,
VT_VECTOR_OF_WEAK_REFERENCES = 78,
VT_VECTOR_OF_STRONG_REFERRABLES = 80,
VT_CO_OWNING_REFERENCE = 82,
VT_VECTOR_OF_CO_OWNING_REFERENCES = 84,
VT_NON_OWNING_REFERENCE = 86,
VT_VECTOR_OF_NON_OWNING_REFERENCES = 88,
VT_ANY_UNIQUE_TYPE = 90,
VT_ANY_UNIQUE = 92,
VT_ANY_AMBIGUOUS_TYPE = 94,
VT_ANY_AMBIGUOUS = 96,
VT_VECTOR_OF_ENUMS = 98,
VT_SIGNED_ENUM = 100
};
const MyGame::Example::Vec3 *pos() const {
return GetStruct<const MyGame::Example::Vec3 *>(VT_POS);
}
MyGame::Example::Vec3 *mutable_pos() {
return GetStruct<MyGame::Example::Vec3 *>(VT_POS);
}
int16_t mana() const {
return GetField<int16_t>(VT_MANA, 150);
}
bool mutate_mana(int16_t _mana) {
return SetField<int16_t>(VT_MANA, _mana, 150);
}
int16_t hp() const {
return GetField<int16_t>(VT_HP, 100);
}
bool mutate_hp(int16_t _hp) {
return SetField<int16_t>(VT_HP, _hp, 100);
}
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
flatbuffers::String *mutable_name() {
return GetPointer<flatbuffers::String *>(VT_NAME);
}
bool KeyCompareLessThan(const Monster *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
}
const flatbuffers::Vector<uint8_t> *inventory() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_INVENTORY);
}
flatbuffers::Vector<uint8_t> *mutable_inventory() {
return GetPointer<flatbuffers::Vector<uint8_t> *>(VT_INVENTORY);
}
MyGame::Example::Color color() const {
return static_cast<MyGame::Example::Color>(GetField<uint8_t>(VT_COLOR, 8));
}
bool mutate_color(MyGame::Example::Color _color) {
return SetField<uint8_t>(VT_COLOR, static_cast<uint8_t>(_color), 8);
}
MyGame::Example::Any test_type() const {
return static_cast<MyGame::Example::Any>(GetField<uint8_t>(VT_TEST_TYPE, 0));
}
const void *test() const {
return GetPointer<const void *>(VT_TEST);
}
template<typename T> const T *test_as() const;
const MyGame::Example::Monster *test_as_Monster() const {
return test_type() == MyGame::Example::Any::Monster ? static_cast<const MyGame::Example::Monster *>(test()) : nullptr;
}
const MyGame::Example::TestSimpleTableWithEnum *test_as_TestSimpleTableWithEnum() const {
return test_type() == MyGame::Example::Any::TestSimpleTableWithEnum ? static_cast<const MyGame::Example::TestSimpleTableWithEnum *>(test()) : nullptr;
}
const MyGame::Example2::Monster *test_as_MyGame_Example2_Monster() const {
return test_type() == MyGame::Example::Any::MyGame_Example2_Monster ? static_cast<const MyGame::Example2::Monster *>(test()) : nullptr;
}
void *mutable_test() {
return GetPointer<void *>(VT_TEST);
}
const flatbuffers::Vector<const MyGame::Example::Test *> *test4() const {
return GetPointer<const flatbuffers::Vector<const MyGame::Example::Test *> *>(VT_TEST4);
}
flatbuffers::Vector<const MyGame::Example::Test *> *mutable_test4() {
return GetPointer<flatbuffers::Vector<const MyGame::Example::Test *> *>(VT_TEST4);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *testarrayofstring() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_TESTARRAYOFSTRING);
}
flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *mutable_testarrayofstring() {
return GetPointer<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_TESTARRAYOFSTRING);
}
/// an example documentation comment: this will end up in the generated code
/// multiline too
const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>> *testarrayoftables() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>> *>(VT_TESTARRAYOFTABLES);
}
flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>> *mutable_testarrayoftables() {
return GetPointer<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>> *>(VT_TESTARRAYOFTABLES);
}
const MyGame::Example::Monster *enemy() const {
return GetPointer<const MyGame::Example::Monster *>(VT_ENEMY);
}
MyGame::Example::Monster *mutable_enemy() {
return GetPointer<MyGame::Example::Monster *>(VT_ENEMY);
}
const flatbuffers::Vector<uint8_t> *testnestedflatbuffer() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_TESTNESTEDFLATBUFFER);
}
flatbuffers::Vector<uint8_t> *mutable_testnestedflatbuffer() {
return GetPointer<flatbuffers::Vector<uint8_t> *>(VT_TESTNESTEDFLATBUFFER);
}
const MyGame::Example::Monster *testnestedflatbuffer_nested_root() const {
return flatbuffers::GetRoot<MyGame::Example::Monster>(testnestedflatbuffer()->Data());
}
const MyGame::Example::Stat *testempty() const {
return GetPointer<const MyGame::Example::Stat *>(VT_TESTEMPTY);
}
MyGame::Example::Stat *mutable_testempty() {
return GetPointer<MyGame::Example::Stat *>(VT_TESTEMPTY);
}
bool testbool() const {
return GetField<uint8_t>(VT_TESTBOOL, 0) != 0;
}
bool mutate_testbool(bool _testbool) {
return SetField<uint8_t>(VT_TESTBOOL, static_cast<uint8_t>(_testbool), 0);
}
int32_t testhashs32_fnv1() const {
return GetField<int32_t>(VT_TESTHASHS32_FNV1, 0);
}
bool mutate_testhashs32_fnv1(int32_t _testhashs32_fnv1) {
return SetField<int32_t>(VT_TESTHASHS32_FNV1, _testhashs32_fnv1, 0);
}
uint32_t testhashu32_fnv1() const {
return GetField<uint32_t>(VT_TESTHASHU32_FNV1, 0);
}
bool mutate_testhashu32_fnv1(uint32_t _testhashu32_fnv1) {
return SetField<uint32_t>(VT_TESTHASHU32_FNV1, _testhashu32_fnv1, 0);
}
int64_t testhashs64_fnv1() const {
return GetField<int64_t>(VT_TESTHASHS64_FNV1, 0);
}
bool mutate_testhashs64_fnv1(int64_t _testhashs64_fnv1) {
return SetField<int64_t>(VT_TESTHASHS64_FNV1, _testhashs64_fnv1, 0);
}
uint64_t testhashu64_fnv1() const {
return GetField<uint64_t>(VT_TESTHASHU64_FNV1, 0);
}
bool mutate_testhashu64_fnv1(uint64_t _testhashu64_fnv1) {
return SetField<uint64_t>(VT_TESTHASHU64_FNV1, _testhashu64_fnv1, 0);
}
int32_t testhashs32_fnv1a() const {
return GetField<int32_t>(VT_TESTHASHS32_FNV1A, 0);
}
bool mutate_testhashs32_fnv1a(int32_t _testhashs32_fnv1a) {
return SetField<int32_t>(VT_TESTHASHS32_FNV1A, _testhashs32_fnv1a, 0);
}
uint32_t testhashu32_fnv1a() const {
return GetField<uint32_t>(VT_TESTHASHU32_FNV1A, 0);
}
bool mutate_testhashu32_fnv1a(uint32_t _testhashu32_fnv1a) {
return SetField<uint32_t>(VT_TESTHASHU32_FNV1A, _testhashu32_fnv1a, 0);
}
int64_t testhashs64_fnv1a() const {
return GetField<int64_t>(VT_TESTHASHS64_FNV1A, 0);
}
bool mutate_testhashs64_fnv1a(int64_t _testhashs64_fnv1a) {
return SetField<int64_t>(VT_TESTHASHS64_FNV1A, _testhashs64_fnv1a, 0);
}
uint64_t testhashu64_fnv1a() const {
return GetField<uint64_t>(VT_TESTHASHU64_FNV1A, 0);
}
bool mutate_testhashu64_fnv1a(uint64_t _testhashu64_fnv1a) {
return SetField<uint64_t>(VT_TESTHASHU64_FNV1A, _testhashu64_fnv1a, 0);
}
const flatbuffers::Vector<uint8_t> *testarrayofbools() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_TESTARRAYOFBOOLS);
}
flatbuffers::Vector<uint8_t> *mutable_testarrayofbools() {
return GetPointer<flatbuffers::Vector<uint8_t> *>(VT_TESTARRAYOFBOOLS);
}
float testf() const {
return GetField<float>(VT_TESTF, 3.14159f);
}
bool mutate_testf(float _testf) {
return SetField<float>(VT_TESTF, _testf, 3.14159f);
}
float testf2() const {
return GetField<float>(VT_TESTF2, 3.0f);
}
bool mutate_testf2(float _testf2) {
return SetField<float>(VT_TESTF2, _testf2, 3.0f);
}
float testf3() const {
return GetField<float>(VT_TESTF3, 0.0f);
}
bool mutate_testf3(float _testf3) {
return SetField<float>(VT_TESTF3, _testf3, 0.0f);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *testarrayofstring2() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_TESTARRAYOFSTRING2);
}
flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *mutable_testarrayofstring2() {
return GetPointer<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_TESTARRAYOFSTRING2);
}
const flatbuffers::Vector<const MyGame::Example::Ability *> *testarrayofsortedstruct() const {
return GetPointer<const flatbuffers::Vector<const MyGame::Example::Ability *> *>(VT_TESTARRAYOFSORTEDSTRUCT);
}
flatbuffers::Vector<const MyGame::Example::Ability *> *mutable_testarrayofsortedstruct() {
return GetPointer<flatbuffers::Vector<const MyGame::Example::Ability *> *>(VT_TESTARRAYOFSORTEDSTRUCT);
}
const flatbuffers::Vector<uint8_t> *flex() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_FLEX);
}
flatbuffers::Vector<uint8_t> *mutable_flex() {
return GetPointer<flatbuffers::Vector<uint8_t> *>(VT_FLEX);
}
flexbuffers::Reference flex_flexbuffer_root() const {
return flexbuffers::GetRoot(flex()->Data(), flex()->size());
}
const flatbuffers::Vector<const MyGame::Example::Test *> *test5() const {
return GetPointer<const flatbuffers::Vector<const MyGame::Example::Test *> *>(VT_TEST5);
}
flatbuffers::Vector<const MyGame::Example::Test *> *mutable_test5() {
return GetPointer<flatbuffers::Vector<const MyGame::Example::Test *> *>(VT_TEST5);
}
const flatbuffers::Vector<int64_t> *vector_of_longs() const {
return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_VECTOR_OF_LONGS);
}
flatbuffers::Vector<int64_t> *mutable_vector_of_longs() {
return GetPointer<flatbuffers::Vector<int64_t> *>(VT_VECTOR_OF_LONGS);
}
const flatbuffers::Vector<double> *vector_of_doubles() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_VECTOR_OF_DOUBLES);
}
flatbuffers::Vector<double> *mutable_vector_of_doubles() {
return GetPointer<flatbuffers::Vector<double> *>(VT_VECTOR_OF_DOUBLES);
}
const MyGame::InParentNamespace *parent_namespace_test() const {
return GetPointer<const MyGame::InParentNamespace *>(VT_PARENT_NAMESPACE_TEST);
}
MyGame::InParentNamespace *mutable_parent_namespace_test() {
return GetPointer<MyGame::InParentNamespace *>(VT_PARENT_NAMESPACE_TEST);
}
const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *vector_of_referrables() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *>(VT_VECTOR_OF_REFERRABLES);
}
flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *mutable_vector_of_referrables() {
return GetPointer<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *>(VT_VECTOR_OF_REFERRABLES);
}
uint64_t single_weak_reference() const {
return GetField<uint64_t>(VT_SINGLE_WEAK_REFERENCE, 0);
}
bool mutate_single_weak_reference(uint64_t _single_weak_reference) {
return SetField<uint64_t>(VT_SINGLE_WEAK_REFERENCE, _single_weak_reference, 0);
}
const flatbuffers::Vector<uint64_t> *vector_of_weak_references() const {
return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_WEAK_REFERENCES);
}
flatbuffers::Vector<uint64_t> *mutable_vector_of_weak_references() {
return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_WEAK_REFERENCES);
}
const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *vector_of_strong_referrables() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *>(VT_VECTOR_OF_STRONG_REFERRABLES);
}
flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *mutable_vector_of_strong_referrables() {
return GetPointer<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>> *>(VT_VECTOR_OF_STRONG_REFERRABLES);
}
uint64_t co_owning_reference() const {
return GetField<uint64_t>(VT_CO_OWNING_REFERENCE, 0);
}
bool mutate_co_owning_reference(uint64_t _co_owning_reference) {
return SetField<uint64_t>(VT_CO_OWNING_REFERENCE, _co_owning_reference, 0);
}
const flatbuffers::Vector<uint64_t> *vector_of_co_owning_references() const {
return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_CO_OWNING_REFERENCES);
}
flatbuffers::Vector<uint64_t> *mutable_vector_of_co_owning_references() {
return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_CO_OWNING_REFERENCES);
}
uint64_t non_owning_reference() const {
return GetField<uint64_t>(VT_NON_OWNING_REFERENCE, 0);
}
bool mutate_non_owning_reference(uint64_t _non_owning_reference) {
return SetField<uint64_t>(VT_NON_OWNING_REFERENCE, _non_owning_reference, 0);
}
const flatbuffers::Vector<uint64_t> *vector_of_non_owning_references() const {
return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_NON_OWNING_REFERENCES);
}
flatbuffers::Vector<uint64_t> *mutable_vector_of_non_owning_references() {
return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_NON_OWNING_REFERENCES);
}
MyGame::Example::AnyUniqueAliases any_unique_type() const {
return static_cast<MyGame::Example::AnyUniqueAliases>(GetField<uint8_t>(VT_ANY_UNIQUE_TYPE, 0));
}
const void *any_unique() const {
return GetPointer<const void *>(VT_ANY_UNIQUE);
}
template<typename T> const T *any_unique_as() const;
const MyGame::Example::Monster *any_unique_as_M() const {
return any_unique_type() == MyGame::Example::AnyUniqueAliases::M ? static_cast<const MyGame::Example::Monster *>(any_unique()) : nullptr;
}
const MyGame::Example::TestSimpleTableWithEnum *any_unique_as_TS() const {
return any_unique_type() == MyGame::Example::AnyUniqueAliases::TS ? static_cast<const MyGame::Example::TestSimpleTableWithEnum *>(any_unique()) : nullptr;
}
const MyGame::Example2::Monster *any_unique_as_M2() const {
return any_unique_type() == MyGame::Example::AnyUniqueAliases::M2 ? static_cast<const MyGame::Example2::Monster *>(any_unique()) : nullptr;
}
void *mutable_any_unique() {
return GetPointer<void *>(VT_ANY_UNIQUE);
}
MyGame::Example::AnyAmbiguousAliases any_ambiguous_type() const {
return static_cast<MyGame::Example::AnyAmbiguousAliases>(GetField<uint8_t>(VT_ANY_AMBIGUOUS_TYPE, 0));
}
const void *any_ambiguous() const {
return GetPointer<const void *>(VT_ANY_AMBIGUOUS);
}
const MyGame::Example::Monster *any_ambiguous_as_M1() const {
return any_ambiguous_type() == MyGame::Example::AnyAmbiguousAliases::M1 ? static_cast<const MyGame::Example::Monster *>(any_ambiguous()) : nullptr;
}
const MyGame::Example::Monster *any_ambiguous_as_M2() const {
return any_ambiguous_type() == MyGame::Example::AnyAmbiguousAliases::M2 ? static_cast<const MyGame::Example::Monster *>(any_ambiguous()) : nullptr;
}
const MyGame::Example::Monster *any_ambiguous_as_M3() const {
return any_ambiguous_type() == MyGame::Example::AnyAmbiguousAliases::M3 ? static_cast<const MyGame::Example::Monster *>(any_ambiguous()) : nullptr;
}
void *mutable_any_ambiguous() {
return GetPointer<void *>(VT_ANY_AMBIGUOUS);
}
const flatbuffers::Vector<MyGame::Example::Color> *vector_of_enums() const {
return GetPointer<const flatbuffers::Vector<MyGame::Example::Color> *>(VT_VECTOR_OF_ENUMS);
}
flatbuffers::Vector<MyGame::Example::Color> *mutable_vector_of_enums() {
return GetPointer<flatbuffers::Vector<MyGame::Example::Color> *>(VT_VECTOR_OF_ENUMS);
}
MyGame::Example::Race signed_enum() const {
return static_cast<MyGame::Example::Race>(GetField<int8_t>(VT_SIGNED_ENUM, -1));
}
bool mutate_signed_enum(MyGame::Example::Race _signed_enum) {
return SetField<int8_t>(VT_SIGNED_ENUM, static_cast<int8_t>(_signed_enum), -1);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<MyGame::Example::Vec3>(verifier, VT_POS) &&
VerifyField<int16_t>(verifier, VT_MANA) &&
VerifyField<int16_t>(verifier, VT_HP) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_INVENTORY) &&
verifier.VerifyVector(inventory()) &&
VerifyField<uint8_t>(verifier, VT_COLOR) &&
VerifyField<uint8_t>(verifier, VT_TEST_TYPE) &&
VerifyOffset(verifier, VT_TEST) &&
VerifyAny(verifier, test(), test_type()) &&
VerifyOffset(verifier, VT_TEST4) &&
verifier.VerifyVector(test4()) &&
VerifyOffset(verifier, VT_TESTARRAYOFSTRING) &&
verifier.VerifyVector(testarrayofstring()) &&
verifier.VerifyVectorOfStrings(testarrayofstring()) &&
VerifyOffset(verifier, VT_TESTARRAYOFTABLES) &&
verifier.VerifyVector(testarrayoftables()) &&
verifier.VerifyVectorOfTables(testarrayoftables()) &&
VerifyOffset(verifier, VT_ENEMY) &&
verifier.VerifyTable(enemy()) &&
VerifyOffset(verifier, VT_TESTNESTEDFLATBUFFER) &&
verifier.VerifyVector(testnestedflatbuffer()) &&
VerifyOffset(verifier, VT_TESTEMPTY) &&
verifier.VerifyTable(testempty()) &&
VerifyField<uint8_t>(verifier, VT_TESTBOOL) &&
VerifyField<int32_t>(verifier, VT_TESTHASHS32_FNV1) &&
VerifyField<uint32_t>(verifier, VT_TESTHASHU32_FNV1) &&
VerifyField<int64_t>(verifier, VT_TESTHASHS64_FNV1) &&
VerifyField<uint64_t>(verifier, VT_TESTHASHU64_FNV1) &&
VerifyField<int32_t>(verifier, VT_TESTHASHS32_FNV1A) &&
VerifyField<uint32_t>(verifier, VT_TESTHASHU32_FNV1A) &&
VerifyField<int64_t>(verifier, VT_TESTHASHS64_FNV1A) &&
VerifyField<uint64_t>(verifier, VT_TESTHASHU64_FNV1A) &&
VerifyOffset(verifier, VT_TESTARRAYOFBOOLS) &&
verifier.VerifyVector(testarrayofbools()) &&
VerifyField<float>(verifier, VT_TESTF) &&
VerifyField<float>(verifier, VT_TESTF2) &&
VerifyField<float>(verifier, VT_TESTF3) &&
VerifyOffset(verifier, VT_TESTARRAYOFSTRING2) &&
verifier.VerifyVector(testarrayofstring2()) &&
verifier.VerifyVectorOfStrings(testarrayofstring2()) &&
VerifyOffset(verifier, VT_TESTARRAYOFSORTEDSTRUCT) &&
verifier.VerifyVector(testarrayofsortedstruct()) &&
VerifyOffset(verifier, VT_FLEX) &&
verifier.VerifyVector(flex()) &&
VerifyOffset(verifier, VT_TEST5) &&
verifier.VerifyVector(test5()) &&
VerifyOffset(verifier, VT_VECTOR_OF_LONGS) &&
verifier.VerifyVector(vector_of_longs()) &&
VerifyOffset(verifier, VT_VECTOR_OF_DOUBLES) &&
verifier.VerifyVector(vector_of_doubles()) &&
VerifyOffset(verifier, VT_PARENT_NAMESPACE_TEST) &&
verifier.VerifyTable(parent_namespace_test()) &&
VerifyOffset(verifier, VT_VECTOR_OF_REFERRABLES) &&
verifier.VerifyVector(vector_of_referrables()) &&
verifier.VerifyVectorOfTables(vector_of_referrables()) &&
VerifyField<uint64_t>(verifier, VT_SINGLE_WEAK_REFERENCE) &&
VerifyOffset(verifier, VT_VECTOR_OF_WEAK_REFERENCES) &&
verifier.VerifyVector(vector_of_weak_references()) &&
VerifyOffset(verifier, VT_VECTOR_OF_STRONG_REFERRABLES) &&
verifier.VerifyVector(vector_of_strong_referrables()) &&
verifier.VerifyVectorOfTables(vector_of_strong_referrables()) &&
VerifyField<uint64_t>(verifier, VT_CO_OWNING_REFERENCE) &&
VerifyOffset(verifier, VT_VECTOR_OF_CO_OWNING_REFERENCES) &&
verifier.VerifyVector(vector_of_co_owning_references()) &&
VerifyField<uint64_t>(verifier, VT_NON_OWNING_REFERENCE) &&
VerifyOffset(verifier, VT_VECTOR_OF_NON_OWNING_REFERENCES) &&
verifier.VerifyVector(vector_of_non_owning_references()) &&
VerifyField<uint8_t>(verifier, VT_ANY_UNIQUE_TYPE) &&
VerifyOffset(verifier, VT_ANY_UNIQUE) &&
VerifyAnyUniqueAliases(verifier, any_unique(), any_unique_type()) &&
VerifyField<uint8_t>(verifier, VT_ANY_AMBIGUOUS_TYPE) &&
VerifyOffset(verifier, VT_ANY_AMBIGUOUS) &&
VerifyAnyAmbiguousAliases(verifier, any_ambiguous(), any_ambiguous_type()) &&
VerifyOffset(verifier, VT_VECTOR_OF_ENUMS) &&
verifier.VerifyVector(vector_of_enums()) &&
VerifyField<int8_t>(verifier, VT_SIGNED_ENUM) &&
verifier.EndTable();
}
MonsterT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(MonsterT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Monster> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
template<> inline const MyGame::Example::Monster *Monster::test_as<MyGame::Example::Monster>() const {
return test_as_Monster();
}
template<> inline const MyGame::Example::TestSimpleTableWithEnum *Monster::test_as<MyGame::Example::TestSimpleTableWithEnum>() const {
return test_as_TestSimpleTableWithEnum();
}
template<> inline const MyGame::Example2::Monster *Monster::test_as<MyGame::Example2::Monster>() const {
return test_as_MyGame_Example2_Monster();
}
template<> inline const MyGame::Example::Monster *Monster::any_unique_as<MyGame::Example::Monster>() const {
return any_unique_as_M();
}
template<> inline const MyGame::Example::TestSimpleTableWithEnum *Monster::any_unique_as<MyGame::Example::TestSimpleTableWithEnum>() const {
return any_unique_as_TS();
}
template<> inline const MyGame::Example2::Monster *Monster::any_unique_as<MyGame::Example2::Monster>() const {
return any_unique_as_M2();
}
struct MonsterBuilder {
typedef Monster Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_pos(const MyGame::Example::Vec3 *pos) {
fbb_.AddStruct(Monster::VT_POS, pos);
}
void add_mana(int16_t mana) {
fbb_.AddElement<int16_t>(Monster::VT_MANA, mana, 150);
}
void add_hp(int16_t hp) {
fbb_.AddElement<int16_t>(Monster::VT_HP, hp, 100);
}
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(Monster::VT_NAME, name);
}
void add_inventory(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> inventory) {
fbb_.AddOffset(Monster::VT_INVENTORY, inventory);
}
void add_color(MyGame::Example::Color color) {
fbb_.AddElement<uint8_t>(Monster::VT_COLOR, static_cast<uint8_t>(color), 8);
}
void add_test_type(MyGame::Example::Any test_type) {
fbb_.AddElement<uint8_t>(Monster::VT_TEST_TYPE, static_cast<uint8_t>(test_type), 0);
}
void add_test(flatbuffers::Offset<void> test) {
fbb_.AddOffset(Monster::VT_TEST, test);
}
void add_test4(flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Test *>> test4) {
fbb_.AddOffset(Monster::VT_TEST4, test4);
}
void add_testarrayofstring(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> testarrayofstring) {
fbb_.AddOffset(Monster::VT_TESTARRAYOFSTRING, testarrayofstring);
}
void add_testarrayoftables(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>>> testarrayoftables) {
fbb_.AddOffset(Monster::VT_TESTARRAYOFTABLES, testarrayoftables);
}
void add_enemy(flatbuffers::Offset<MyGame::Example::Monster> enemy) {
fbb_.AddOffset(Monster::VT_ENEMY, enemy);
}
void add_testnestedflatbuffer(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> testnestedflatbuffer) {
fbb_.AddOffset(Monster::VT_TESTNESTEDFLATBUFFER, testnestedflatbuffer);
}
void add_testempty(flatbuffers::Offset<MyGame::Example::Stat> testempty) {
fbb_.AddOffset(Monster::VT_TESTEMPTY, testempty);
}
void add_testbool(bool testbool) {
fbb_.AddElement<uint8_t>(Monster::VT_TESTBOOL, static_cast<uint8_t>(testbool), 0);
}
void add_testhashs32_fnv1(int32_t testhashs32_fnv1) {
fbb_.AddElement<int32_t>(Monster::VT_TESTHASHS32_FNV1, testhashs32_fnv1, 0);
}
void add_testhashu32_fnv1(uint32_t testhashu32_fnv1) {
fbb_.AddElement<uint32_t>(Monster::VT_TESTHASHU32_FNV1, testhashu32_fnv1, 0);
}
void add_testhashs64_fnv1(int64_t testhashs64_fnv1) {
fbb_.AddElement<int64_t>(Monster::VT_TESTHASHS64_FNV1, testhashs64_fnv1, 0);
}
void add_testhashu64_fnv1(uint64_t testhashu64_fnv1) {
fbb_.AddElement<uint64_t>(Monster::VT_TESTHASHU64_FNV1, testhashu64_fnv1, 0);
}
void add_testhashs32_fnv1a(int32_t testhashs32_fnv1a) {
fbb_.AddElement<int32_t>(Monster::VT_TESTHASHS32_FNV1A, testhashs32_fnv1a, 0);
}
void add_testhashu32_fnv1a(uint32_t testhashu32_fnv1a) {
fbb_.AddElement<uint32_t>(Monster::VT_TESTHASHU32_FNV1A, testhashu32_fnv1a, 0);
}
void add_testhashs64_fnv1a(int64_t testhashs64_fnv1a) {
fbb_.AddElement<int64_t>(Monster::VT_TESTHASHS64_FNV1A, testhashs64_fnv1a, 0);
}
void add_testhashu64_fnv1a(uint64_t testhashu64_fnv1a) {
fbb_.AddElement<uint64_t>(Monster::VT_TESTHASHU64_FNV1A, testhashu64_fnv1a, 0);
}
void add_testarrayofbools(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> testarrayofbools) {
fbb_.AddOffset(Monster::VT_TESTARRAYOFBOOLS, testarrayofbools);
}
void add_testf(float testf) {
fbb_.AddElement<float>(Monster::VT_TESTF, testf, 3.14159f);
}
void add_testf2(float testf2) {
fbb_.AddElement<float>(Monster::VT_TESTF2, testf2, 3.0f);
}
void add_testf3(float testf3) {
fbb_.AddElement<float>(Monster::VT_TESTF3, testf3, 0.0f);
}
void add_testarrayofstring2(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> testarrayofstring2) {
fbb_.AddOffset(Monster::VT_TESTARRAYOFSTRING2, testarrayofstring2);
}
void add_testarrayofsortedstruct(flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Ability *>> testarrayofsortedstruct) {
fbb_.AddOffset(Monster::VT_TESTARRAYOFSORTEDSTRUCT, testarrayofsortedstruct);
}
void add_flex(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> flex) {
fbb_.AddOffset(Monster::VT_FLEX, flex);
}
void add_test5(flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Test *>> test5) {
fbb_.AddOffset(Monster::VT_TEST5, test5);
}
void add_vector_of_longs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> vector_of_longs) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_LONGS, vector_of_longs);
}
void add_vector_of_doubles(flatbuffers::Offset<flatbuffers::Vector<double>> vector_of_doubles) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_DOUBLES, vector_of_doubles);
}
void add_parent_namespace_test(flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test) {
fbb_.AddOffset(Monster::VT_PARENT_NAMESPACE_TEST, parent_namespace_test);
}
void add_vector_of_referrables(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>>> vector_of_referrables) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_REFERRABLES, vector_of_referrables);
}
void add_single_weak_reference(uint64_t single_weak_reference) {
fbb_.AddElement<uint64_t>(Monster::VT_SINGLE_WEAK_REFERENCE, single_weak_reference, 0);
}
void add_vector_of_weak_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_weak_references) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_WEAK_REFERENCES, vector_of_weak_references);
}
void add_vector_of_strong_referrables(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>>> vector_of_strong_referrables) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_STRONG_REFERRABLES, vector_of_strong_referrables);
}
void add_co_owning_reference(uint64_t co_owning_reference) {
fbb_.AddElement<uint64_t>(Monster::VT_CO_OWNING_REFERENCE, co_owning_reference, 0);
}
void add_vector_of_co_owning_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_co_owning_references) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_CO_OWNING_REFERENCES, vector_of_co_owning_references);
}
void add_non_owning_reference(uint64_t non_owning_reference) {
fbb_.AddElement<uint64_t>(Monster::VT_NON_OWNING_REFERENCE, non_owning_reference, 0);
}
void add_vector_of_non_owning_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_non_owning_references) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_NON_OWNING_REFERENCES, vector_of_non_owning_references);
}
void add_any_unique_type(MyGame::Example::AnyUniqueAliases any_unique_type) {
fbb_.AddElement<uint8_t>(Monster::VT_ANY_UNIQUE_TYPE, static_cast<uint8_t>(any_unique_type), 0);
}
void add_any_unique(flatbuffers::Offset<void> any_unique) {
fbb_.AddOffset(Monster::VT_ANY_UNIQUE, any_unique);
}
void add_any_ambiguous_type(MyGame::Example::AnyAmbiguousAliases any_ambiguous_type) {
fbb_.AddElement<uint8_t>(Monster::VT_ANY_AMBIGUOUS_TYPE, static_cast<uint8_t>(any_ambiguous_type), 0);
}
void add_any_ambiguous(flatbuffers::Offset<void> any_ambiguous) {
fbb_.AddOffset(Monster::VT_ANY_AMBIGUOUS, any_ambiguous);
}
void add_vector_of_enums(flatbuffers::Offset<flatbuffers::Vector<MyGame::Example::Color>> vector_of_enums) {
fbb_.AddOffset(Monster::VT_VECTOR_OF_ENUMS, vector_of_enums);
}
void add_signed_enum(MyGame::Example::Race signed_enum) {
fbb_.AddElement<int8_t>(Monster::VT_SIGNED_ENUM, static_cast<int8_t>(signed_enum), -1);
}
explicit MonsterBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Monster> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Monster>(end);
fbb_.Required(o, Monster::VT_NAME);
return o;
}
};
inline flatbuffers::Offset<Monster> CreateMonster(
flatbuffers::FlatBufferBuilder &_fbb,
const MyGame::Example::Vec3 *pos = 0,
int16_t mana = 150,
int16_t hp = 100,
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> inventory = 0,
MyGame::Example::Color color = MyGame::Example::Color::Blue,
MyGame::Example::Any test_type = MyGame::Example::Any::NONE,
flatbuffers::Offset<void> test = 0,
flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Test *>> test4 = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> testarrayofstring = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Monster>>> testarrayoftables = 0,
flatbuffers::Offset<MyGame::Example::Monster> enemy = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> testnestedflatbuffer = 0,
flatbuffers::Offset<MyGame::Example::Stat> testempty = 0,
bool testbool = false,
int32_t testhashs32_fnv1 = 0,
uint32_t testhashu32_fnv1 = 0,
int64_t testhashs64_fnv1 = 0,
uint64_t testhashu64_fnv1 = 0,
int32_t testhashs32_fnv1a = 0,
uint32_t testhashu32_fnv1a = 0,
int64_t testhashs64_fnv1a = 0,
uint64_t testhashu64_fnv1a = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> testarrayofbools = 0,
float testf = 3.14159f,
float testf2 = 3.0f,
float testf3 = 0.0f,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> testarrayofstring2 = 0,
flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Ability *>> testarrayofsortedstruct = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> flex = 0,
flatbuffers::Offset<flatbuffers::Vector<const MyGame::Example::Test *>> test5 = 0,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> vector_of_longs = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> vector_of_doubles = 0,
flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>>> vector_of_referrables = 0,
uint64_t single_weak_reference = 0,
flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_weak_references = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MyGame::Example::Referrable>>> vector_of_strong_referrables = 0,
uint64_t co_owning_reference = 0,
flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_co_owning_references = 0,
uint64_t non_owning_reference = 0,
flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_non_owning_references = 0,
MyGame::Example::AnyUniqueAliases any_unique_type = MyGame::Example::AnyUniqueAliases::NONE,
flatbuffers::Offset<void> any_unique = 0,
MyGame::Example::AnyAmbiguousAliases any_ambiguous_type = MyGame::Example::AnyAmbiguousAliases::NONE,
flatbuffers::Offset<void> any_ambiguous = 0,
flatbuffers::Offset<flatbuffers::Vector<MyGame::Example::Color>> vector_of_enums = 0,
MyGame::Example::Race signed_enum = MyGame::Example::Race::None) {
MonsterBuilder builder_(_fbb);
builder_.add_non_owning_reference(non_owning_reference);
builder_.add_co_owning_reference(co_owning_reference);
builder_.add_single_weak_reference(single_weak_reference);
builder_.add_testhashu64_fnv1a(testhashu64_fnv1a);
builder_.add_testhashs64_fnv1a(testhashs64_fnv1a);
builder_.add_testhashu64_fnv1(testhashu64_fnv1);
builder_.add_testhashs64_fnv1(testhashs64_fnv1);
builder_.add_vector_of_enums(vector_of_enums);
builder_.add_any_ambiguous(any_ambiguous);
builder_.add_any_unique(any_unique);
builder_.add_vector_of_non_owning_references(vector_of_non_owning_references);
builder_.add_vector_of_co_owning_references(vector_of_co_owning_references);
builder_.add_vector_of_strong_referrables(vector_of_strong_referrables);
builder_.add_vector_of_weak_references(vector_of_weak_references);
builder_.add_vector_of_referrables(vector_of_referrables);
builder_.add_parent_namespace_test(parent_namespace_test);
builder_.add_vector_of_doubles(vector_of_doubles);
builder_.add_vector_of_longs(vector_of_longs);
builder_.add_test5(test5);
builder_.add_flex(flex);
builder_.add_testarrayofsortedstruct(testarrayofsortedstruct);
builder_.add_testarrayofstring2(testarrayofstring2);
builder_.add_testf3(testf3);
builder_.add_testf2(testf2);
builder_.add_testf(testf);
builder_.add_testarrayofbools(testarrayofbools);
builder_.add_testhashu32_fnv1a(testhashu32_fnv1a);
builder_.add_testhashs32_fnv1a(testhashs32_fnv1a);
builder_.add_testhashu32_fnv1(testhashu32_fnv1);
builder_.add_testhashs32_fnv1(testhashs32_fnv1);
builder_.add_testempty(testempty);
builder_.add_testnestedflatbuffer(testnestedflatbuffer);
builder_.add_enemy(enemy);
builder_.add_testarrayoftables(testarrayoftables);
builder_.add_testarrayofstring(testarrayofstring);
builder_.add_test4(test4);
builder_.add_test(test);
builder_.add_inventory(inventory);
builder_.add_name(name);
builder_.add_pos(pos);
builder_.add_hp(hp);
builder_.add_mana(mana);
builder_.add_signed_enum(signed_enum);
builder_.add_any_ambiguous_type(any_ambiguous_type);
builder_.add_any_unique_type(any_unique_type);
builder_.add_testbool(testbool);
builder_.add_test_type(test_type);
builder_.add_color(color);
return builder_.Finish();
}
struct Monster::Traits {
using type = Monster;
static auto constexpr Create = CreateMonster;
};
inline flatbuffers::Offset<Monster> CreateMonsterDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const MyGame::Example::Vec3 *pos = 0,
int16_t mana = 150,
int16_t hp = 100,
const char *name = nullptr,
const std::vector<uint8_t> *inventory = nullptr,
MyGame::Example::Color color = MyGame::Example::Color::Blue,
MyGame::Example::Any test_type = MyGame::Example::Any::NONE,
flatbuffers::Offset<void> test = 0,
const std::vector<MyGame::Example::Test> *test4 = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *testarrayofstring = nullptr,
std::vector<flatbuffers::Offset<MyGame::Example::Monster>> *testarrayoftables = nullptr,
flatbuffers::Offset<MyGame::Example::Monster> enemy = 0,
const std::vector<uint8_t> *testnestedflatbuffer = nullptr,
flatbuffers::Offset<MyGame::Example::Stat> testempty = 0,
bool testbool = false,
int32_t testhashs32_fnv1 = 0,
uint32_t testhashu32_fnv1 = 0,
int64_t testhashs64_fnv1 = 0,
uint64_t testhashu64_fnv1 = 0,
int32_t testhashs32_fnv1a = 0,
uint32_t testhashu32_fnv1a = 0,
int64_t testhashs64_fnv1a = 0,
uint64_t testhashu64_fnv1a = 0,
const std::vector<uint8_t> *testarrayofbools = nullptr,
float testf = 3.14159f,
float testf2 = 3.0f,
float testf3 = 0.0f,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *testarrayofstring2 = nullptr,
std::vector<MyGame::Example::Ability> *testarrayofsortedstruct = nullptr,
const std::vector<uint8_t> *flex = nullptr,
const std::vector<MyGame::Example::Test> *test5 = nullptr,
const std::vector<int64_t> *vector_of_longs = nullptr,
const std::vector<double> *vector_of_doubles = nullptr,
flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0,
std::vector<flatbuffers::Offset<MyGame::Example::Referrable>> *vector_of_referrables = nullptr,
uint64_t single_weak_reference = 0,
const std::vector<uint64_t> *vector_of_weak_references = nullptr,
std::vector<flatbuffers::Offset<MyGame::Example::Referrable>> *vector_of_strong_referrables = nullptr,
uint64_t co_owning_reference = 0,
const std::vector<uint64_t> *vector_of_co_owning_references = nullptr,
uint64_t non_owning_reference = 0,
const std::vector<uint64_t> *vector_of_non_owning_references = nullptr,
MyGame::Example::AnyUniqueAliases any_unique_type = MyGame::Example::AnyUniqueAliases::NONE,
flatbuffers::Offset<void> any_unique = 0,
MyGame::Example::AnyAmbiguousAliases any_ambiguous_type = MyGame::Example::AnyAmbiguousAliases::NONE,
flatbuffers::Offset<void> any_ambiguous = 0,
const std::vector<MyGame::Example::Color> *vector_of_enums = nullptr,
MyGame::Example::Race signed_enum = MyGame::Example::Race::None) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto inventory__ = inventory ? _fbb.CreateVector<uint8_t>(*inventory) : 0;
auto test4__ = test4 ? _fbb.CreateVectorOfStructs<MyGame::Example::Test>(*test4) : 0;
auto testarrayofstring__ = testarrayofstring ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*testarrayofstring) : 0;
auto testarrayoftables__ = testarrayoftables ? _fbb.CreateVectorOfSortedTables<MyGame::Example::Monster>(testarrayoftables) : 0;
auto testnestedflatbuffer__ = testnestedflatbuffer ? _fbb.CreateVector<uint8_t>(*testnestedflatbuffer) : 0;
auto testarrayofbools__ = testarrayofbools ? _fbb.CreateVector<uint8_t>(*testarrayofbools) : 0;
auto testarrayofstring2__ = testarrayofstring2 ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*testarrayofstring2) : 0;
auto testarrayofsortedstruct__ = testarrayofsortedstruct ? _fbb.CreateVectorOfSortedStructs<MyGame::Example::Ability>(testarrayofsortedstruct) : 0;
auto flex__ = flex ? _fbb.CreateVector<uint8_t>(*flex) : 0;
auto test5__ = test5 ? _fbb.CreateVectorOfStructs<MyGame::Example::Test>(*test5) : 0;
auto vector_of_longs__ = vector_of_longs ? _fbb.CreateVector<int64_t>(*vector_of_longs) : 0;
auto vector_of_doubles__ = vector_of_doubles ? _fbb.CreateVector<double>(*vector_of_doubles) : 0;
auto vector_of_referrables__ = vector_of_referrables ? _fbb.CreateVectorOfSortedTables<MyGame::Example::Referrable>(vector_of_referrables) : 0;
auto vector_of_weak_references__ = vector_of_weak_references ? _fbb.CreateVector<uint64_t>(*vector_of_weak_references) : 0;
auto vector_of_strong_referrables__ = vector_of_strong_referrables ? _fbb.CreateVectorOfSortedTables<MyGame::Example::Referrable>(vector_of_strong_referrables) : 0;
auto vector_of_co_owning_references__ = vector_of_co_owning_references ? _fbb.CreateVector<uint64_t>(*vector_of_co_owning_references) : 0;
auto vector_of_non_owning_references__ = vector_of_non_owning_references ? _fbb.CreateVector<uint64_t>(*vector_of_non_owning_references) : 0;
auto vector_of_enums__ = vector_of_enums ? _fbb.CreateVector<MyGame::Example::Color>(*vector_of_enums) : 0;
return MyGame::Example::CreateMonster(
_fbb,
pos,
mana,
hp,
name__,
inventory__,
color,
test_type,
test,
test4__,
testarrayofstring__,
testarrayoftables__,
enemy,
testnestedflatbuffer__,
testempty,
testbool,
testhashs32_fnv1,
testhashu32_fnv1,
testhashs64_fnv1,
testhashu64_fnv1,
testhashs32_fnv1a,
testhashu32_fnv1a,
testhashs64_fnv1a,
testhashu64_fnv1a,
testarrayofbools__,
testf,
testf2,
testf3,
testarrayofstring2__,
testarrayofsortedstruct__,
flex__,
test5__,
vector_of_longs__,
vector_of_doubles__,
parent_namespace_test,
vector_of_referrables__,
single_weak_reference,
vector_of_weak_references__,
vector_of_strong_referrables__,
co_owning_reference,
vector_of_co_owning_references__,
non_owning_reference,
vector_of_non_owning_references__,
any_unique_type,
any_unique,
any_ambiguous_type,
any_ambiguous,
vector_of_enums__,
signed_enum);
}
flatbuffers::Offset<Monster> CreateMonster(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct TypeAliasesT : public flatbuffers::NativeTable {
typedef TypeAliases TableType;
int8_t i8 = 0;
uint8_t u8 = 0;
int16_t i16 = 0;
uint16_t u16 = 0;
int32_t i32 = 0;
uint32_t u32 = 0;
int64_t i64 = 0;
uint64_t u64 = 0;
float f32 = 0.0f;
double f64 = 0.0;
std::vector<int8_t> v8{};
std::vector<double> vf64{};
};
struct TypeAliases FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TypeAliasesT NativeTableType;
typedef TypeAliasesBuilder Builder;
struct Traits;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return TypeAliasesTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_I8 = 4,
VT_U8 = 6,
VT_I16 = 8,
VT_U16 = 10,
VT_I32 = 12,
VT_U32 = 14,
VT_I64 = 16,
VT_U64 = 18,
VT_F32 = 20,
VT_F64 = 22,
VT_V8 = 24,
VT_VF64 = 26
};
int8_t i8() const {
return GetField<int8_t>(VT_I8, 0);
}
bool mutate_i8(int8_t _i8) {
return SetField<int8_t>(VT_I8, _i8, 0);
}
uint8_t u8() const {
return GetField<uint8_t>(VT_U8, 0);
}
bool mutate_u8(uint8_t _u8) {
return SetField<uint8_t>(VT_U8, _u8, 0);
}
int16_t i16() const {
return GetField<int16_t>(VT_I16, 0);
}
bool mutate_i16(int16_t _i16) {
return SetField<int16_t>(VT_I16, _i16, 0);
}
uint16_t u16() const {
return GetField<uint16_t>(VT_U16, 0);
}
bool mutate_u16(uint16_t _u16) {
return SetField<uint16_t>(VT_U16, _u16, 0);
}
int32_t i32() const {
return GetField<int32_t>(VT_I32, 0);
}
bool mutate_i32(int32_t _i32) {
return SetField<int32_t>(VT_I32, _i32, 0);
}
uint32_t u32() const {
return GetField<uint32_t>(VT_U32, 0);
}
bool mutate_u32(uint32_t _u32) {
return SetField<uint32_t>(VT_U32, _u32, 0);
}
int64_t i64() const {
return GetField<int64_t>(VT_I64, 0);
}
bool mutate_i64(int64_t _i64) {
return SetField<int64_t>(VT_I64, _i64, 0);
}
uint64_t u64() const {
return GetField<uint64_t>(VT_U64, 0);
}
bool mutate_u64(uint64_t _u64) {
return SetField<uint64_t>(VT_U64, _u64, 0);
}
float f32() const {
return GetField<float>(VT_F32, 0.0f);
}
bool mutate_f32(float _f32) {
return SetField<float>(VT_F32, _f32, 0.0f);
}
double f64() const {
return GetField<double>(VT_F64, 0.0);
}
bool mutate_f64(double _f64) {
return SetField<double>(VT_F64, _f64, 0.0);
}
const flatbuffers::Vector<int8_t> *v8() const {
return GetPointer<const flatbuffers::Vector<int8_t> *>(VT_V8);
}
flatbuffers::Vector<int8_t> *mutable_v8() {
return GetPointer<flatbuffers::Vector<int8_t> *>(VT_V8);
}
const flatbuffers::Vector<double> *vf64() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_VF64);
}
flatbuffers::Vector<double> *mutable_vf64() {
return GetPointer<flatbuffers::Vector<double> *>(VT_VF64);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_I8) &&
VerifyField<uint8_t>(verifier, VT_U8) &&
VerifyField<int16_t>(verifier, VT_I16) &&
VerifyField<uint16_t>(verifier, VT_U16) &&
VerifyField<int32_t>(verifier, VT_I32) &&
VerifyField<uint32_t>(verifier, VT_U32) &&
VerifyField<int64_t>(verifier, VT_I64) &&
VerifyField<uint64_t>(verifier, VT_U64) &&
VerifyField<float>(verifier, VT_F32) &&
VerifyField<double>(verifier, VT_F64) &&
VerifyOffset(verifier, VT_V8) &&
verifier.VerifyVector(v8()) &&
VerifyOffset(verifier, VT_VF64) &&
verifier.VerifyVector(vf64()) &&
verifier.EndTable();
}
TypeAliasesT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(TypeAliasesT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<TypeAliases> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TypeAliasesT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct TypeAliasesBuilder {
typedef TypeAliases Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_i8(int8_t i8) {
fbb_.AddElement<int8_t>(TypeAliases::VT_I8, i8, 0);
}
void add_u8(uint8_t u8) {
fbb_.AddElement<uint8_t>(TypeAliases::VT_U8, u8, 0);
}
void add_i16(int16_t i16) {
fbb_.AddElement<int16_t>(TypeAliases::VT_I16, i16, 0);
}
void add_u16(uint16_t u16) {
fbb_.AddElement<uint16_t>(TypeAliases::VT_U16, u16, 0);
}
void add_i32(int32_t i32) {
fbb_.AddElement<int32_t>(TypeAliases::VT_I32, i32, 0);
}
void add_u32(uint32_t u32) {
fbb_.AddElement<uint32_t>(TypeAliases::VT_U32, u32, 0);
}
void add_i64(int64_t i64) {
fbb_.AddElement<int64_t>(TypeAliases::VT_I64, i64, 0);
}
void add_u64(uint64_t u64) {
fbb_.AddElement<uint64_t>(TypeAliases::VT_U64, u64, 0);
}
void add_f32(float f32) {
fbb_.AddElement<float>(TypeAliases::VT_F32, f32, 0.0f);
}
void add_f64(double f64) {
fbb_.AddElement<double>(TypeAliases::VT_F64, f64, 0.0);
}
void add_v8(flatbuffers::Offset<flatbuffers::Vector<int8_t>> v8) {
fbb_.AddOffset(TypeAliases::VT_V8, v8);
}
void add_vf64(flatbuffers::Offset<flatbuffers::Vector<double>> vf64) {
fbb_.AddOffset(TypeAliases::VT_VF64, vf64);
}
explicit TypeAliasesBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TypeAliases> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TypeAliases>(end);
return o;
}
};
inline flatbuffers::Offset<TypeAliases> CreateTypeAliases(
flatbuffers::FlatBufferBuilder &_fbb,
int8_t i8 = 0,
uint8_t u8 = 0,
int16_t i16 = 0,
uint16_t u16 = 0,
int32_t i32 = 0,
uint32_t u32 = 0,
int64_t i64 = 0,
uint64_t u64 = 0,
float f32 = 0.0f,
double f64 = 0.0,
flatbuffers::Offset<flatbuffers::Vector<int8_t>> v8 = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> vf64 = 0) {
TypeAliasesBuilder builder_(_fbb);
builder_.add_f64(f64);
builder_.add_u64(u64);
builder_.add_i64(i64);
builder_.add_vf64(vf64);
builder_.add_v8(v8);
builder_.add_f32(f32);
builder_.add_u32(u32);
builder_.add_i32(i32);
builder_.add_u16(u16);
builder_.add_i16(i16);
builder_.add_u8(u8);
builder_.add_i8(i8);
return builder_.Finish();
}
struct TypeAliases::Traits {
using type = TypeAliases;
static auto constexpr Create = CreateTypeAliases;
};
inline flatbuffers::Offset<TypeAliases> CreateTypeAliasesDirect(
flatbuffers::FlatBufferBuilder &_fbb,
int8_t i8 = 0,
uint8_t u8 = 0,
int16_t i16 = 0,
uint16_t u16 = 0,
int32_t i32 = 0,
uint32_t u32 = 0,
int64_t i64 = 0,
uint64_t u64 = 0,
float f32 = 0.0f,
double f64 = 0.0,
const std::vector<int8_t> *v8 = nullptr,
const std::vector<double> *vf64 = nullptr) {
auto v8__ = v8 ? _fbb.CreateVector<int8_t>(*v8) : 0;
auto vf64__ = vf64 ? _fbb.CreateVector<double>(*vf64) : 0;
return MyGame::Example::CreateTypeAliases(
_fbb,
i8,
u8,
i16,
u16,
i32,
u32,
i64,
u64,
f32,
f64,
v8__,
vf64__);
}
flatbuffers::Offset<TypeAliases> CreateTypeAliases(flatbuffers::FlatBufferBuilder &_fbb, const TypeAliasesT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
} // namespace Example
inline InParentNamespaceT *InParentNamespace::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<InParentNamespaceT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void InParentNamespace::UnPackTo(InParentNamespaceT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
}
inline flatbuffers::Offset<InParentNamespace> InParentNamespace::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InParentNamespaceT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateInParentNamespace(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<InParentNamespace> CreateInParentNamespace(flatbuffers::FlatBufferBuilder &_fbb, const InParentNamespaceT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InParentNamespaceT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
return MyGame::CreateInParentNamespace(
_fbb);
}
namespace Example2 {
inline MonsterT *Monster::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<MonsterT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void Monster::UnPackTo(MonsterT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
}
inline flatbuffers::Offset<Monster> Monster::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateMonster(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Monster> CreateMonster(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MonsterT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
return MyGame::Example2::CreateMonster(
_fbb);
}
} // namespace Example2
namespace Example {
inline TestSimpleTableWithEnumT *TestSimpleTableWithEnum::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<TestSimpleTableWithEnumT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void TestSimpleTableWithEnum::UnPackTo(TestSimpleTableWithEnumT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = color(); _o->color = _e; }
}
inline flatbuffers::Offset<TestSimpleTableWithEnum> TestSimpleTableWithEnum::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TestSimpleTableWithEnumT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateTestSimpleTableWithEnum(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<TestSimpleTableWithEnum> CreateTestSimpleTableWithEnum(flatbuffers::FlatBufferBuilder &_fbb, const TestSimpleTableWithEnumT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TestSimpleTableWithEnumT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _color = _o->color;
return MyGame::Example::CreateTestSimpleTableWithEnum(
_fbb,
_color);
}
inline StatT *Stat::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<StatT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void Stat::UnPackTo(StatT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = id(); if (_e) _o->id = _e->str(); }
{ auto _e = val(); _o->val = _e; }
{ auto _e = count(); _o->count = _e; }
}
inline flatbuffers::Offset<Stat> Stat::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StatT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateStat(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Stat> CreateStat(flatbuffers::FlatBufferBuilder &_fbb, const StatT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StatT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _id = _o->id.empty() ? 0 : _fbb.CreateString(_o->id);
auto _val = _o->val;
auto _count = _o->count;
return MyGame::Example::CreateStat(
_fbb,
_id,
_val,
_count);
}
inline ReferrableT *Referrable::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<ReferrableT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void Referrable::UnPackTo(ReferrableT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = id(); _o->id = _e; }
}
inline flatbuffers::Offset<Referrable> Referrable::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateReferrable(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Referrable> CreateReferrable(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReferrableT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _id = _o->id;
return MyGame::Example::CreateReferrable(
_fbb,
_id);
}
inline MonsterT *Monster::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<MonsterT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void Monster::UnPackTo(MonsterT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = pos(); if (_e) _o->pos = std::unique_ptr<MyGame::Example::Vec3>(new MyGame::Example::Vec3(*_e)); }
{ auto _e = mana(); _o->mana = _e; }
{ auto _e = hp(); _o->hp = _e; }
{ auto _e = name(); if (_e) _o->name = _e->str(); }
{ auto _e = inventory(); if (_e) { _o->inventory.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inventory[_i] = _e->Get(_i); } } }
{ auto _e = color(); _o->color = _e; }
{ auto _e = test_type(); _o->test.type = _e; }
{ auto _e = test(); if (_e) _o->test.value = MyGame::Example::AnyUnion::UnPack(_e, test_type(), _resolver); }
{ auto _e = test4(); if (_e) { _o->test4.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->test4[_i] = *_e->Get(_i); } } }
{ auto _e = testarrayofstring(); if (_e) { _o->testarrayofstring.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayofstring[_i] = _e->Get(_i)->str(); } } }
{ auto _e = testarrayoftables(); if (_e) { _o->testarrayoftables.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayoftables[_i] = std::unique_ptr<MyGame::Example::MonsterT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = enemy(); if (_e) _o->enemy = std::unique_ptr<MyGame::Example::MonsterT>(_e->UnPack(_resolver)); }
{ auto _e = testnestedflatbuffer(); if (_e) { _o->testnestedflatbuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testnestedflatbuffer[_i] = _e->Get(_i); } } }
{ auto _e = testempty(); if (_e) _o->testempty = std::unique_ptr<MyGame::Example::StatT>(_e->UnPack(_resolver)); }
{ auto _e = testbool(); _o->testbool = _e; }
{ auto _e = testhashs32_fnv1(); _o->testhashs32_fnv1 = _e; }
{ auto _e = testhashu32_fnv1(); _o->testhashu32_fnv1 = _e; }
{ auto _e = testhashs64_fnv1(); _o->testhashs64_fnv1 = _e; }
{ auto _e = testhashu64_fnv1(); _o->testhashu64_fnv1 = _e; }
{ auto _e = testhashs32_fnv1a(); _o->testhashs32_fnv1a = _e; }
{ auto _e = testhashu32_fnv1a(); //scalar resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->testhashu32_fnv1a), static_cast<flatbuffers::hash_value_t>(_e)); else _o->testhashu32_fnv1a = nullptr; }
{ auto _e = testhashs64_fnv1a(); _o->testhashs64_fnv1a = _e; }
{ auto _e = testhashu64_fnv1a(); _o->testhashu64_fnv1a = _e; }
{ auto _e = testarrayofbools(); if (_e) { _o->testarrayofbools.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayofbools[_i] = _e->Get(_i) != 0; } } }
{ auto _e = testf(); _o->testf = _e; }
{ auto _e = testf2(); _o->testf2 = _e; }
{ auto _e = testf3(); _o->testf3 = _e; }
{ auto _e = testarrayofstring2(); if (_e) { _o->testarrayofstring2.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayofstring2[_i] = _e->Get(_i)->str(); } } }
{ auto _e = testarrayofsortedstruct(); if (_e) { _o->testarrayofsortedstruct.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayofsortedstruct[_i] = *_e->Get(_i); } } }
{ auto _e = flex(); if (_e) { _o->flex.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->flex[_i] = _e->Get(_i); } } }
{ auto _e = test5(); if (_e) { _o->test5.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->test5[_i] = *_e->Get(_i); } } }
{ auto _e = vector_of_longs(); if (_e) { _o->vector_of_longs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_longs[_i] = _e->Get(_i); } } }
{ auto _e = vector_of_doubles(); if (_e) { _o->vector_of_doubles.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_doubles[_i] = _e->Get(_i); } } }
{ auto _e = parent_namespace_test(); if (_e) _o->parent_namespace_test = std::unique_ptr<MyGame::InParentNamespaceT>(_e->UnPack(_resolver)); }
{ auto _e = vector_of_referrables(); if (_e) { _o->vector_of_referrables.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_referrables[_i] = std::unique_ptr<MyGame::Example::ReferrableT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = single_weak_reference(); //scalar resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->single_weak_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->single_weak_reference = nullptr; }
{ auto _e = vector_of_weak_references(); if (_e) { _o->vector_of_weak_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_weak_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i))); else _o->vector_of_weak_references[_i] = nullptr; } } }
{ auto _e = vector_of_strong_referrables(); if (_e) { _o->vector_of_strong_referrables.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_strong_referrables[_i] = std::unique_ptr<MyGame::Example::ReferrableT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = co_owning_reference(); //scalar resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->co_owning_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->co_owning_reference = nullptr; }
{ auto _e = vector_of_co_owning_references(); if (_e) { _o->vector_of_co_owning_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, default_ptr_type
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_co_owning_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i)));/* else do nothing */; } } }
{ auto _e = non_owning_reference(); //scalar resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->non_owning_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->non_owning_reference = nullptr; }
{ auto _e = vector_of_non_owning_references(); if (_e) { _o->vector_of_non_owning_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, naked
if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_non_owning_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i))); else _o->vector_of_non_owning_references[_i] = nullptr; } } }
{ auto _e = any_unique_type(); _o->any_unique.type = _e; }
{ auto _e = any_unique(); if (_e) _o->any_unique.value = MyGame::Example::AnyUniqueAliasesUnion::UnPack(_e, any_unique_type(), _resolver); }
{ auto _e = any_ambiguous_type(); _o->any_ambiguous.type = _e; }
{ auto _e = any_ambiguous(); if (_e) _o->any_ambiguous.value = MyGame::Example::AnyAmbiguousAliasesUnion::UnPack(_e, any_ambiguous_type(), _resolver); }
{ auto _e = vector_of_enums(); if (_e) { _o->vector_of_enums.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_enums[_i] = static_cast<MyGame::Example::Color>(_e->Get(_i)); } } }
{ auto _e = signed_enum(); _o->signed_enum = _e; }
}
inline flatbuffers::Offset<Monster> Monster::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateMonster(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Monster> CreateMonster(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MonsterT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _pos = _o->pos ? _o->pos.get() : 0;
auto _mana = _o->mana;
auto _hp = _o->hp;
auto _name = _fbb.CreateString(_o->name);
auto _inventory = _o->inventory.size() ? _fbb.CreateVector(_o->inventory) : 0;
auto _color = _o->color;
auto _test_type = _o->test.type;
auto _test = _o->test.Pack(_fbb);
auto _test4 = _o->test4.size() ? _fbb.CreateVectorOfStructs(_o->test4) : 0;
auto _testarrayofstring = _o->testarrayofstring.size() ? _fbb.CreateVectorOfStrings(_o->testarrayofstring) : 0;
auto _testarrayoftables = _o->testarrayoftables.size() ? _fbb.CreateVector<flatbuffers::Offset<MyGame::Example::Monster>> (_o->testarrayoftables.size(), [](size_t i, _VectorArgs *__va) { return CreateMonster(*__va->__fbb, __va->__o->testarrayoftables[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _enemy = _o->enemy ? CreateMonster(_fbb, _o->enemy.get(), _rehasher) : 0;
auto _testnestedflatbuffer = _o->testnestedflatbuffer.size() ? _fbb.CreateVector(_o->testnestedflatbuffer) : 0;
auto _testempty = _o->testempty ? CreateStat(_fbb, _o->testempty.get(), _rehasher) : 0;
auto _testbool = _o->testbool;
auto _testhashs32_fnv1 = _o->testhashs32_fnv1;
auto _testhashu32_fnv1 = _o->testhashu32_fnv1;
auto _testhashs64_fnv1 = _o->testhashs64_fnv1;
auto _testhashu64_fnv1 = _o->testhashu64_fnv1;
auto _testhashs32_fnv1a = _o->testhashs32_fnv1a;
auto _testhashu32_fnv1a = _rehasher ? static_cast<uint32_t>((*_rehasher)(_o->testhashu32_fnv1a)) : 0;
auto _testhashs64_fnv1a = _o->testhashs64_fnv1a;
auto _testhashu64_fnv1a = _o->testhashu64_fnv1a;
auto _testarrayofbools = _o->testarrayofbools.size() ? _fbb.CreateVector(_o->testarrayofbools) : 0;
auto _testf = _o->testf;
auto _testf2 = _o->testf2;
auto _testf3 = _o->testf3;
auto _testarrayofstring2 = _o->testarrayofstring2.size() ? _fbb.CreateVectorOfStrings(_o->testarrayofstring2) : 0;
auto _testarrayofsortedstruct = _o->testarrayofsortedstruct.size() ? _fbb.CreateVectorOfStructs(_o->testarrayofsortedstruct) : 0;
auto _flex = _o->flex.size() ? _fbb.CreateVector(_o->flex) : 0;
auto _test5 = _o->test5.size() ? _fbb.CreateVectorOfStructs(_o->test5) : 0;
auto _vector_of_longs = _o->vector_of_longs.size() ? _fbb.CreateVector(_o->vector_of_longs) : 0;
auto _vector_of_doubles = _o->vector_of_doubles.size() ? _fbb.CreateVector(_o->vector_of_doubles) : 0;
auto _parent_namespace_test = _o->parent_namespace_test ? CreateInParentNamespace(_fbb, _o->parent_namespace_test.get(), _rehasher) : 0;
auto _vector_of_referrables = _o->vector_of_referrables.size() ? _fbb.CreateVector<flatbuffers::Offset<MyGame::Example::Referrable>> (_o->vector_of_referrables.size(), [](size_t i, _VectorArgs *__va) { return CreateReferrable(*__va->__fbb, __va->__o->vector_of_referrables[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _single_weak_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->single_weak_reference)) : 0;
auto _vector_of_weak_references = _o->vector_of_weak_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_weak_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_weak_references[i])) : 0; }, &_va ) : 0;
auto _vector_of_strong_referrables = _o->vector_of_strong_referrables.size() ? _fbb.CreateVector<flatbuffers::Offset<MyGame::Example::Referrable>> (_o->vector_of_strong_referrables.size(), [](size_t i, _VectorArgs *__va) { return CreateReferrable(*__va->__fbb, __va->__o->vector_of_strong_referrables[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _co_owning_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->co_owning_reference)) : 0;
auto _vector_of_co_owning_references = _o->vector_of_co_owning_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_co_owning_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_co_owning_references[i].get())) : 0; }, &_va ) : 0;
auto _non_owning_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->non_owning_reference)) : 0;
auto _vector_of_non_owning_references = _o->vector_of_non_owning_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_non_owning_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_non_owning_references[i])) : 0; }, &_va ) : 0;
auto _any_unique_type = _o->any_unique.type;
auto _any_unique = _o->any_unique.Pack(_fbb);
auto _any_ambiguous_type = _o->any_ambiguous.type;
auto _any_ambiguous = _o->any_ambiguous.Pack(_fbb);
auto _vector_of_enums = _o->vector_of_enums.size() ? _fbb.CreateVector(_o->vector_of_enums) : 0;
auto _signed_enum = _o->signed_enum;
return MyGame::Example::CreateMonster(
_fbb,
_pos,
_mana,
_hp,
_name,
_inventory,
_color,
_test_type,
_test,
_test4,
_testarrayofstring,
_testarrayoftables,
_enemy,
_testnestedflatbuffer,
_testempty,
_testbool,
_testhashs32_fnv1,
_testhashu32_fnv1,
_testhashs64_fnv1,
_testhashu64_fnv1,
_testhashs32_fnv1a,
_testhashu32_fnv1a,
_testhashs64_fnv1a,
_testhashu64_fnv1a,
_testarrayofbools,
_testf,
_testf2,
_testf3,
_testarrayofstring2,
_testarrayofsortedstruct,
_flex,
_test5,
_vector_of_longs,
_vector_of_doubles,
_parent_namespace_test,
_vector_of_referrables,
_single_weak_reference,
_vector_of_weak_references,
_vector_of_strong_referrables,
_co_owning_reference,
_vector_of_co_owning_references,
_non_owning_reference,
_vector_of_non_owning_references,
_any_unique_type,
_any_unique,
_any_ambiguous_type,
_any_ambiguous,
_vector_of_enums,
_signed_enum);
}
inline TypeAliasesT *TypeAliases::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::make_unique<TypeAliasesT>();
UnPackTo(_o.get(), _resolver);
return _o.release();
}
inline void TypeAliases::UnPackTo(TypeAliasesT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = i8(); _o->i8 = _e; }
{ auto _e = u8(); _o->u8 = _e; }
{ auto _e = i16(); _o->i16 = _e; }
{ auto _e = u16(); _o->u16 = _e; }
{ auto _e = i32(); _o->i32 = _e; }
{ auto _e = u32(); _o->u32 = _e; }
{ auto _e = i64(); _o->i64 = _e; }
{ auto _e = u64(); _o->u64 = _e; }
{ auto _e = f32(); _o->f32 = _e; }
{ auto _e = f64(); _o->f64 = _e; }
{ auto _e = v8(); if (_e) { _o->v8.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->v8[_i] = _e->Get(_i); } } }
{ auto _e = vf64(); if (_e) { _o->vf64.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vf64[_i] = _e->Get(_i); } } }
}
inline flatbuffers::Offset<TypeAliases> TypeAliases::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TypeAliasesT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateTypeAliases(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<TypeAliases> CreateTypeAliases(flatbuffers::FlatBufferBuilder &_fbb, const TypeAliasesT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TypeAliasesT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _i8 = _o->i8;
auto _u8 = _o->u8;
auto _i16 = _o->i16;
auto _u16 = _o->u16;
auto _i32 = _o->i32;
auto _u32 = _o->u32;
auto _i64 = _o->i64;
auto _u64 = _o->u64;
auto _f32 = _o->f32;
auto _f64 = _o->f64;
auto _v8 = _o->v8.size() ? _fbb.CreateVector(_o->v8) : 0;
auto _vf64 = _o->vf64.size() ? _fbb.CreateVector(_o->vf64) : 0;
return MyGame::Example::CreateTypeAliases(
_fbb,
_i8,
_u8,
_i16,
_u16,
_i32,
_u32,
_i64,
_u64,
_f32,
_f64,
_v8,
_vf64);
}
inline bool VerifyAny(flatbuffers::Verifier &verifier, const void *obj, Any type) {
switch (type) {
case Any::NONE: {
return true;
}
case Any::Monster: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
case Any::TestSimpleTableWithEnum: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnum *>(obj);
return verifier.VerifyTable(ptr);
}
case Any::MyGame_Example2_Monster: {
auto ptr = reinterpret_cast<const MyGame::Example2::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
default: return true;
}
}
inline bool VerifyAnyVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
if (!values || !types) return !values && !types;
if (values->size() != types->size()) return false;
for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
if (!VerifyAny(
verifier, values->Get(i), types->GetEnum<Any>(i))) {
return false;
}
}
return true;
}
inline void *AnyUnion::UnPack(const void *obj, Any type, const flatbuffers::resolver_function_t *resolver) {
switch (type) {
case Any::Monster: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return ptr->UnPack(resolver);
}
case Any::TestSimpleTableWithEnum: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnum *>(obj);
return ptr->UnPack(resolver);
}
case Any::MyGame_Example2_Monster: {
auto ptr = reinterpret_cast<const MyGame::Example2::Monster *>(obj);
return ptr->UnPack(resolver);
}
default: return nullptr;
}
}
inline flatbuffers::Offset<void> AnyUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
switch (type) {
case Any::Monster: {
auto ptr = reinterpret_cast<const MyGame::Example::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
case Any::TestSimpleTableWithEnum: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnumT *>(value);
return CreateTestSimpleTableWithEnum(_fbb, ptr, _rehasher).Union();
}
case Any::MyGame_Example2_Monster: {
auto ptr = reinterpret_cast<const MyGame::Example2::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
default: return 0;
}
}
inline AnyUnion::AnyUnion(const AnyUnion &u) : type(u.type), value(nullptr) {
switch (type) {
case Any::Monster: {
FLATBUFFERS_ASSERT(false); // MyGame::Example::MonsterT not copyable.
break;
}
case Any::TestSimpleTableWithEnum: {
value = new MyGame::Example::TestSimpleTableWithEnumT(*reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(u.value));
break;
}
case Any::MyGame_Example2_Monster: {
value = new MyGame::Example2::MonsterT(*reinterpret_cast<MyGame::Example2::MonsterT *>(u.value));
break;
}
default:
break;
}
}
inline void AnyUnion::Reset() {
switch (type) {
case Any::Monster: {
auto ptr = reinterpret_cast<MyGame::Example::MonsterT *>(value);
delete ptr;
break;
}
case Any::TestSimpleTableWithEnum: {
auto ptr = reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(value);
delete ptr;
break;
}
case Any::MyGame_Example2_Monster: {
auto ptr = reinterpret_cast<MyGame::Example2::MonsterT *>(value);
delete ptr;
break;
}
default: break;
}
value = nullptr;
type = Any::NONE;
}
inline bool VerifyAnyUniqueAliases(flatbuffers::Verifier &verifier, const void *obj, AnyUniqueAliases type) {
switch (type) {
case AnyUniqueAliases::NONE: {
return true;
}
case AnyUniqueAliases::M: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
case AnyUniqueAliases::TS: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnum *>(obj);
return verifier.VerifyTable(ptr);
}
case AnyUniqueAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example2::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
default: return true;
}
}
inline bool VerifyAnyUniqueAliasesVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
if (!values || !types) return !values && !types;
if (values->size() != types->size()) return false;
for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
if (!VerifyAnyUniqueAliases(
verifier, values->Get(i), types->GetEnum<AnyUniqueAliases>(i))) {
return false;
}
}
return true;
}
inline void *AnyUniqueAliasesUnion::UnPack(const void *obj, AnyUniqueAliases type, const flatbuffers::resolver_function_t *resolver) {
switch (type) {
case AnyUniqueAliases::M: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return ptr->UnPack(resolver);
}
case AnyUniqueAliases::TS: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnum *>(obj);
return ptr->UnPack(resolver);
}
case AnyUniqueAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example2::Monster *>(obj);
return ptr->UnPack(resolver);
}
default: return nullptr;
}
}
inline flatbuffers::Offset<void> AnyUniqueAliasesUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
switch (type) {
case AnyUniqueAliases::M: {
auto ptr = reinterpret_cast<const MyGame::Example::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
case AnyUniqueAliases::TS: {
auto ptr = reinterpret_cast<const MyGame::Example::TestSimpleTableWithEnumT *>(value);
return CreateTestSimpleTableWithEnum(_fbb, ptr, _rehasher).Union();
}
case AnyUniqueAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example2::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
default: return 0;
}
}
inline AnyUniqueAliasesUnion::AnyUniqueAliasesUnion(const AnyUniqueAliasesUnion &u) : type(u.type), value(nullptr) {
switch (type) {
case AnyUniqueAliases::M: {
FLATBUFFERS_ASSERT(false); // MyGame::Example::MonsterT not copyable.
break;
}
case AnyUniqueAliases::TS: {
value = new MyGame::Example::TestSimpleTableWithEnumT(*reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(u.value));
break;
}
case AnyUniqueAliases::M2: {
value = new MyGame::Example2::MonsterT(*reinterpret_cast<MyGame::Example2::MonsterT *>(u.value));
break;
}
default:
break;
}
}
inline void AnyUniqueAliasesUnion::Reset() {
switch (type) {
case AnyUniqueAliases::M: {
auto ptr = reinterpret_cast<MyGame::Example::MonsterT *>(value);
delete ptr;
break;
}
case AnyUniqueAliases::TS: {
auto ptr = reinterpret_cast<MyGame::Example::TestSimpleTableWithEnumT *>(value);
delete ptr;
break;
}
case AnyUniqueAliases::M2: {
auto ptr = reinterpret_cast<MyGame::Example2::MonsterT *>(value);
delete ptr;
break;
}
default: break;
}
value = nullptr;
type = AnyUniqueAliases::NONE;
}
inline bool VerifyAnyAmbiguousAliases(flatbuffers::Verifier &verifier, const void *obj, AnyAmbiguousAliases type) {
switch (type) {
case AnyAmbiguousAliases::NONE: {
return true;
}
case AnyAmbiguousAliases::M1: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
case AnyAmbiguousAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
case AnyAmbiguousAliases::M3: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return verifier.VerifyTable(ptr);
}
default: return true;
}
}
inline bool VerifyAnyAmbiguousAliasesVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
if (!values || !types) return !values && !types;
if (values->size() != types->size()) return false;
for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
if (!VerifyAnyAmbiguousAliases(
verifier, values->Get(i), types->GetEnum<AnyAmbiguousAliases>(i))) {
return false;
}
}
return true;
}
inline void *AnyAmbiguousAliasesUnion::UnPack(const void *obj, AnyAmbiguousAliases type, const flatbuffers::resolver_function_t *resolver) {
switch (type) {
case AnyAmbiguousAliases::M1: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return ptr->UnPack(resolver);
}
case AnyAmbiguousAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return ptr->UnPack(resolver);
}
case AnyAmbiguousAliases::M3: {
auto ptr = reinterpret_cast<const MyGame::Example::Monster *>(obj);
return ptr->UnPack(resolver);
}
default: return nullptr;
}
}
inline flatbuffers::Offset<void> AnyAmbiguousAliasesUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
switch (type) {
case AnyAmbiguousAliases::M1: {
auto ptr = reinterpret_cast<const MyGame::Example::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
case AnyAmbiguousAliases::M2: {
auto ptr = reinterpret_cast<const MyGame::Example::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
case AnyAmbiguousAliases::M3: {
auto ptr = reinterpret_cast<const MyGame::Example::MonsterT *>(value);
return CreateMonster(_fbb, ptr, _rehasher).Union();
}
default: return 0;
}
}
inline AnyAmbiguousAliasesUnion::AnyAmbiguousAliasesUnion(const AnyAmbiguousAliasesUnion &u) : type(u.type), value(nullptr) {
switch (type) {
case AnyAmbiguousAliases::M1: {
FLATBUFFERS_ASSERT(false); // MyGame::Example::MonsterT not copyable.
break;
}
case AnyAmbiguousAliases::M2: {
FLATBUFFERS_ASSERT(false); // MyGame::Example::MonsterT not copyable.
break;
}
case AnyAmbiguousAliases::M3: {
FLATBUFFERS_ASSERT(false); // MyGame::Example::MonsterT not copyable.
break;
}
default:
break;
}
}
inline void AnyAmbiguousAliasesUnion::Reset() {
switch (type) {
case AnyAmbiguousAliases::M1: {
auto ptr = reinterpret_cast<MyGame::Example::MonsterT *>(value);
delete ptr;
break;
}
case AnyAmbiguousAliases::M2: {
auto ptr = reinterpret_cast<MyGame::Example::MonsterT *>(value);
delete ptr;
break;
}
case AnyAmbiguousAliases::M3: {
auto ptr = reinterpret_cast<MyGame::Example::MonsterT *>(value);
delete ptr;
break;
}
default: break;
}
value = nullptr;
type = AnyAmbiguousAliases::NONE;
}
inline const flatbuffers::TypeTable *ColorTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_UCHAR, 0, 0 },
{ flatbuffers::ET_UCHAR, 0, 0 },
{ flatbuffers::ET_UCHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::ColorTypeTable
};
static const int64_t values[] = { 1, 2, 8 };
static const char * const names[] = {
"Red",
"Green",
"Blue"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, values, names
};
return &tt;
}
inline const flatbuffers::TypeTable *RaceTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::RaceTypeTable
};
static const int64_t values[] = { -1, 0, 1, 2 };
static const char * const names[] = {
"None",
"Human",
"Dwarf",
"Elf"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 4, type_codes, type_refs, values, names
};
return &tt;
}
inline const flatbuffers::TypeTable *AnyTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::MonsterTypeTable,
MyGame::Example::TestSimpleTableWithEnumTypeTable,
MyGame::Example2::MonsterTypeTable
};
static const char * const names[] = {
"NONE",
"Monster",
"TestSimpleTableWithEnum",
"MyGame_Example2_Monster"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 4, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *AnyUniqueAliasesTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::MonsterTypeTable,
MyGame::Example::TestSimpleTableWithEnumTypeTable,
MyGame::Example2::MonsterTypeTable
};
static const char * const names[] = {
"NONE",
"M",
"TS",
"M2"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 4, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *AnyAmbiguousAliasesTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::MonsterTypeTable
};
static const char * const names[] = {
"NONE",
"M1",
"M2",
"M3"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 4, type_codes, type_refs, nullptr, names
};
return &tt;
}
} // namespace Example
inline const flatbuffers::TypeTable *InParentNamespaceTypeTable() {
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
}
namespace Example2 {
inline const flatbuffers::TypeTable *MonsterTypeTable() {
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
}
} // namespace Example2
namespace Example {
inline const flatbuffers::TypeTable *TestTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, -1 }
};
static const int64_t values[] = { 0, 2, 4 };
static const char * const names[] = {
"a",
"b"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 2, type_codes, nullptr, values, names
};
return &tt;
}
inline const flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_UCHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::ColorTypeTable
};
static const char * const names[] = {
"color"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *Vec3TypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_DOUBLE, 0, -1 },
{ flatbuffers::ET_UCHAR, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::ColorTypeTable,
MyGame::Example::TestTypeTable
};
static const int64_t values[] = { 0, 4, 8, 16, 24, 26, 32 };
static const char * const names[] = {
"x",
"y",
"z",
"test1",
"test2",
"test3"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 6, type_codes, type_refs, values, names
};
return &tt;
}
inline const flatbuffers::TypeTable *AbilityTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_UINT, 0, -1 },
{ flatbuffers::ET_UINT, 0, -1 }
};
static const int64_t values[] = { 0, 4, 8 };
static const char * const names[] = {
"id",
"distance"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 2, type_codes, nullptr, values, names
};
return &tt;
}
inline const flatbuffers::TypeTable *StatTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_LONG, 0, -1 },
{ flatbuffers::ET_USHORT, 0, -1 }
};
static const char * const names[] = {
"id",
"val",
"count"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *ReferrableTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_ULONG, 0, -1 }
};
static const char * const names[] = {
"id"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *MonsterTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_BOOL, 0, -1 },
{ flatbuffers::ET_UCHAR, 1, -1 },
{ flatbuffers::ET_UCHAR, 0, 1 },
{ flatbuffers::ET_UTYPE, 0, 2 },
{ flatbuffers::ET_SEQUENCE, 0, 2 },
{ flatbuffers::ET_SEQUENCE, 1, 3 },
{ flatbuffers::ET_STRING, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 4 },
{ flatbuffers::ET_SEQUENCE, 0, 4 },
{ flatbuffers::ET_UCHAR, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 5 },
{ flatbuffers::ET_BOOL, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_UINT, 0, -1 },
{ flatbuffers::ET_LONG, 0, -1 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_UINT, 0, -1 },
{ flatbuffers::ET_LONG, 0, -1 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_BOOL, 1, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_STRING, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 6 },
{ flatbuffers::ET_UCHAR, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 3 },
{ flatbuffers::ET_LONG, 1, -1 },
{ flatbuffers::ET_DOUBLE, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 7 },
{ flatbuffers::ET_SEQUENCE, 1, 8 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_ULONG, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 8 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_ULONG, 1, -1 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_ULONG, 1, -1 },
{ flatbuffers::ET_UTYPE, 0, 9 },
{ flatbuffers::ET_SEQUENCE, 0, 9 },
{ flatbuffers::ET_UTYPE, 0, 10 },
{ flatbuffers::ET_SEQUENCE, 0, 10 },
{ flatbuffers::ET_UCHAR, 1, 1 },
{ flatbuffers::ET_CHAR, 0, 11 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MyGame::Example::Vec3TypeTable,
MyGame::Example::ColorTypeTable,
MyGame::Example::AnyTypeTable,
MyGame::Example::TestTypeTable,
MyGame::Example::MonsterTypeTable,
MyGame::Example::StatTypeTable,
MyGame::Example::AbilityTypeTable,
MyGame::InParentNamespaceTypeTable,
MyGame::Example::ReferrableTypeTable,
MyGame::Example::AnyUniqueAliasesTypeTable,
MyGame::Example::AnyAmbiguousAliasesTypeTable,
MyGame::Example::RaceTypeTable
};
static const char * const names[] = {
"pos",
"mana",
"hp",
"name",
"friendly",
"inventory",
"color",
"test_type",
"test",
"test4",
"testarrayofstring",
"testarrayoftables",
"enemy",
"testnestedflatbuffer",
"testempty",
"testbool",
"testhashs32_fnv1",
"testhashu32_fnv1",
"testhashs64_fnv1",
"testhashu64_fnv1",
"testhashs32_fnv1a",
"testhashu32_fnv1a",
"testhashs64_fnv1a",
"testhashu64_fnv1a",
"testarrayofbools",
"testf",
"testf2",
"testf3",
"testarrayofstring2",
"testarrayofsortedstruct",
"flex",
"test5",
"vector_of_longs",
"vector_of_doubles",
"parent_namespace_test",
"vector_of_referrables",
"single_weak_reference",
"vector_of_weak_references",
"vector_of_strong_referrables",
"co_owning_reference",
"vector_of_co_owning_references",
"non_owning_reference",
"vector_of_non_owning_references",
"any_unique_type",
"any_unique",
"any_ambiguous_type",
"any_ambiguous",
"vector_of_enums",
"signed_enum"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 49, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *TypeAliasesTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, -1 },
{ flatbuffers::ET_UCHAR, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_USHORT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_UINT, 0, -1 },
{ flatbuffers::ET_LONG, 0, -1 },
{ flatbuffers::ET_ULONG, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_DOUBLE, 0, -1 },
{ flatbuffers::ET_CHAR, 1, -1 },
{ flatbuffers::ET_DOUBLE, 1, -1 }
};
static const char * const names[] = {
"i8",
"u8",
"i16",
"u16",
"i32",
"u32",
"i64",
"u64",
"f32",
"f64",
"v8",
"vf64"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 12, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const MyGame::Example::Monster *GetMonster(const void *buf) {
return flatbuffers::GetRoot<MyGame::Example::Monster>(buf);
}
inline const MyGame::Example::Monster *GetSizePrefixedMonster(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<MyGame::Example::Monster>(buf);
}
inline Monster *GetMutableMonster(void *buf) {
return flatbuffers::GetMutableRoot<Monster>(buf);
}
inline const char *MonsterIdentifier() {
return "MONS";
}
inline bool MonsterBufferHasIdentifier(const void *buf) {
return flatbuffers::BufferHasIdentifier(
buf, MonsterIdentifier());
}
inline bool VerifyMonsterBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<MyGame::Example::Monster>(MonsterIdentifier());
}
inline bool VerifySizePrefixedMonsterBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<MyGame::Example::Monster>(MonsterIdentifier());
}
inline const char *MonsterExtension() {
return "mon";
}
inline void FinishMonsterBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<MyGame::Example::Monster> root) {
fbb.Finish(root, MonsterIdentifier());
}
inline void FinishSizePrefixedMonsterBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<MyGame::Example::Monster> root) {
fbb.FinishSizePrefixed(root, MonsterIdentifier());
}
inline std::unique_ptr<MyGame::Example::MonsterT> UnPackMonster(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
return std::unique_ptr<MyGame::Example::MonsterT>(GetMonster(buf)->UnPack(res));
}
inline std::unique_ptr<MyGame::Example::MonsterT> UnPackSizePrefixedMonster(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
return std::unique_ptr<MyGame::Example::MonsterT>(GetSizePrefixedMonster(buf)->UnPack(res));
}
} // namespace Example
} // namespace MyGame
#endif // FLATBUFFERS_GENERATED_MONSTERTEST_MYGAME_EXAMPLE_H_
| 38.956889 | 346 | 0.712568 | [
"object",
"vector"
] |
9e190464921e8eb14d52623b0a6d24530e3dc34f | 3,748 | h | C | arch/arm/src/armv6-m/ram_vectors.h | smartether/NuttX | 066937ba01f84ed00cecc32efc4487d7786a64a6 | [
"Apache-2.0"
] | 201 | 2015-01-23T06:06:31.000Z | 2022-01-28T22:25:51.000Z | arch/arm/src/armv6-m/ram_vectors.h | smartether/NuttX | 066937ba01f84ed00cecc32efc4487d7786a64a6 | [
"Apache-2.0"
] | 126 | 2015-01-02T12:54:29.000Z | 2022-02-15T15:01:00.000Z | arch/arm/src/armv6-m/ram_vectors.h | smartether/NuttX | 066937ba01f84ed00cecc32efc4487d7786a64a6 | [
"Apache-2.0"
] | 380 | 2015-01-08T10:40:04.000Z | 2022-03-19T06:59:50.000Z | /****************************************************************************
* arch/arm/src/armv6-m/ram_vectors.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_SRC_ARMV6_M_RAM_VECTORS_H
#define __ARCH_ARM_SRC_ARMV6_M_RAM_VECTORS_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include "arm_internal.h"
#include "chip.h"
#ifdef CONFIG_ARCH_RAMVECTORS
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* This is the size of the vector table (in 4-byte entries). This size
* includes the (1) the peripheral interrupts, (2) space for 15 Cortex-M
* exceptions, and (3) IDLE stack pointer which lies at the beginning of the
* table.
*/
#define ARMV6M_VECTAB_SIZE (32 + 16)
/****************************************************************************
* Public Data
****************************************************************************/
/* If CONFIG_ARCH_RAMVECTORS is defined, then the ARM logic must provide
* ARM-specific implementations of irq_initialize(), irq_attach(), and
* irq_dispatch. In this case, it is also assumed that the ARM vector
* table resides in RAM, has the name g_ram_vectors, and has been
* properly positioned and aligned in memory by the linker script.
*/
extern up_vector_t g_ram_vectors[ARMV6M_VECTAB_SIZE]
__attribute__ ((section (".ram_vectors"), aligned (128)));
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm_ramvec_initialize
*
* Description:
* Copy vectors to RAM an configure the NVIC to use the RAM vectors.
*
****************************************************************************/
void arm_ramvec_initialize(void);
/****************************************************************************
* Name: exception_common
*
* Description:
* This is the default, common vector handling entrypoint.
*
****************************************************************************/
void exception_common(void);
/****************************************************************************
* Name: arm_ramvec_attach
*
* Description:
* Configure the ram vector table so that IRQ number 'irq' will be
* dipatched by hardware to 'vector'
*
****************************************************************************/
int arm_ramvec_attach(int irq, up_vector_t vector);
#endif /* CONFIG_ARCH_RAMVECTORS */
#endif /* __ARCH_ARM_SRC_ARMV6_M_RAM_VECTORS_H */
| 37.858586 | 78 | 0.5 | [
"vector"
] |
9e23b40a90ea1a13f5656d79510fdc6e47eb9b4e | 6,645 | h | C | src/checkqueue.h | altcuim/Ant7POS | d54f093e46258244f8d0605dcd26aded8ad83809 | [
"MIT"
] | null | null | null | src/checkqueue.h | altcuim/Ant7POS | d54f093e46258244f8d0605dcd26aded8ad83809 | [
"MIT"
] | null | null | null | src/checkqueue.h | altcuim/Ant7POS | d54f093e46258244f8d0605dcd26aded8ad83809 | [
"MIT"
] | null | null | null | // Copyright (c) 2012-2014 The Bitcoin developers -*- c++ -*-
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_CHECKQUEUE_H
#define BITCOIN_CHECKQUEUE_H
#include <algorithm>
#include <vector>
#include <boost/foreach.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/mutex.hpp>
template <typename T>
class CCheckQueueControl;
/**
* Queue for verifications that have to be performed.
* The verifications are represented by a type T, which must provide an
* operator(), returning a bool.
*
* One thread (the master) is assumed to push batches of verifications
* onto the queue, where they are processed by N-1 worker threads. When
* the master is done adding work, it temporarily joins the worker pool
* as an N'th worker, until all jobs are done.
*/
template <typename T>
class CCheckQueue
{
private:
//! Mutex to protect the inner state
boost::mutex mutex;
//! Worker threads block on this when out of work
boost::condition_variable condWorker;
//! Master thread blocks on this when out of work
boost::condition_variable condMaster;
//! The queue of elements to be processed.
//! As the order of booleans doesn't matter, it is used as a LIFO (stack)
std::vector<T> queue;
//! The number of workers (including the master) that are idle.
int nIdle;
//! The total number of workers (including the master).
int nTotal;
//! The temporary evaluation result.
bool fAllOk;
/**
* Number of verifications that haven't completed yet.
* This includes elements that are not anymore in queue, but still in
* worker's own batches.
*/
unsigned int nTodo;
//! Whether we're shutting down.
bool fQuit;
//! The maximum number of elements to be processed in one batch
unsigned int nBatchSize;
/** Internal function that does bulk of the verification work. */
bool Loop(bool fMaster = false)
{
boost::condition_variable& cond = fMaster ? condMaster : condWorker;
std::vector<T> vChecks;
vChecks.reserve(nBatchSize);
unsigned int nNow = 0;
bool fOk = true;
do {
{
boost::unique_lock<boost::mutex> lock(mutex);
// first do the clean-up of the previous loop run (allowing us to do it in the same critsect)
if (nNow) {
fAllOk &= fOk;
nTodo -= nNow;
if (nTodo == 0 && !fMaster)
// We processed the last element; inform the master he can exit and return the result
condMaster.notify_one();
} else {
// first iteration
nTotal++;
}
// logically, the do loop starts here
while (queue.empty()) {
if ((fMaster || fQuit) && nTodo == 0) {
nTotal--;
bool fRet = fAllOk;
// reset the status for new work later
if (fMaster)
fAllOk = true;
// return the current status
return fRet;
}
nIdle++;
cond.wait(lock); // wait
nIdle--;
}
// Decide how many work units to process now.
// * Do not try to do everything at once, but aim for increasingly smaller batches so
// all workers finish approximately simultaneously.
// * Try to account for idle jobs which will instantly start helping.
// * Don't do batches smaller than 1 (duh), or larger than nBatchSize.
nNow = std::max(1U, std::min(nBatchSize, (unsigned int)queue.size() / (nTotal + nIdle + 1)));
vChecks.resize(nNow);
for (unsigned int i = 0; i < nNow; i++) {
// We want the lock on the mutex to be as short as possible, so swap jobs from the global
// queue to the local batch vector instead of copying.
vChecks[i].swap(queue.back());
queue.pop_back();
}
// Check whether we need to do work at all
fOk = fAllOk;
}
// execute work
BOOST_FOREACH (T& check, vChecks)
if (fOk)
fOk = check();
vChecks.clear();
} while (true);
}
public:
//! Create a new check queue
CCheckQueue(unsigned int nBatchSizeIn) : nIdle(0), nTotal(0), fAllOk(true), nTodo(0), fQuit(false), nBatchSize(nBatchSizeIn) {}
//! Worker thread
void Thread()
{
Loop();
}
//! Wait until execution finishes, and return whether all evaluations where successful.
bool Wait()
{
return Loop(true);
}
//! Add a batch of checks to the queue
void Add(std::vector<T>& vChecks)
{
boost::unique_lock<boost::mutex> lock(mutex);
BOOST_FOREACH (T& check, vChecks) {
queue.push_back(T());
check.swap(queue.back());
}
nTodo += vChecks.size();
if (vChecks.size() == 1)
condWorker.notify_one();
else if (vChecks.size() > 1)
condWorker.notify_all();
}
~CCheckQueue()
{
}
bool IsIdle()
{
boost::unique_lock<boost::mutex> lock(mutex);
return (nTotal == nIdle && nTodo == 0 && fAllOk == true);
}
};
/**
* RAII-style controller object for a CCheckQueue that guarantees the passed
* queue is finished before continuing.
*/
template <typename T>
class CCheckQueueControl
{
private:
CCheckQueue<T>* pqueue;
bool fDone;
public:
CCheckQueueControl(CCheckQueue<T>* pqueueIn) : pqueue(pqueueIn), fDone(false)
{
// passed queue is supposed to be unused, or NULL
if (pqueue != NULL) {
bool isIdle = pqueue->IsIdle();
assert(isIdle);
}
}
bool Wait()
{
if (pqueue == NULL)
return true;
bool fRet = pqueue->Wait();
fDone = true;
return fRet;
}
void Add(std::vector<T>& vChecks)
{
if (pqueue != NULL)
pqueue->Add(vChecks);
}
~CCheckQueueControl()
{
if (!fDone)
Wait();
}
};
#endif // BITCOIN_CHECKQUEUE_H
| 30.906977 | 131 | 0.554853 | [
"object",
"vector"
] |
9e23cd0500607e8af5679a1ff0b1c2f054005034 | 11,760 | h | C | qemu/src/qemu.git/target-openrisc/cpu.h | valuta1995/p2im | aede1371e63fd70eb9f0473aca046c5e65f3b1e8 | [
"Apache-2.0"
] | 80 | 2020-03-15T11:51:23.000Z | 2022-03-30T03:46:16.000Z | qemu/src/qemu.git/target-openrisc/cpu.h | valuta1995/p2im | aede1371e63fd70eb9f0473aca046c5e65f3b1e8 | [
"Apache-2.0"
] | 17 | 2020-08-07T07:47:24.000Z | 2022-01-04T04:54:33.000Z | qemu/src/qemu.git/target-openrisc/cpu.h | valuta1995/p2im | aede1371e63fd70eb9f0473aca046c5e65f3b1e8 | [
"Apache-2.0"
] | 20 | 2020-04-08T12:44:48.000Z | 2022-02-09T19:13:00.000Z | /*
* OpenRISC virtual CPU header.
*
* Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CPU_OPENRISC_H
#define CPU_OPENRISC_H
#define TARGET_LONG_BITS 32
#define ELF_MACHINE EM_OPENRISC
#define CPUArchState struct CPUOpenRISCState
/* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */
struct OpenRISCCPU;
#include "config.h"
#include "qemu-common.h"
#include "exec/cpu-defs.h"
#include "fpu/softfloat.h"
#include "qom/cpu.h"
#define TYPE_OPENRISC_CPU "or32-cpu"
#define OPENRISC_CPU_CLASS(klass) \
OBJECT_CLASS_CHECK(OpenRISCCPUClass, (klass), TYPE_OPENRISC_CPU)
#define OPENRISC_CPU(obj) \
OBJECT_CHECK(OpenRISCCPU, (obj), TYPE_OPENRISC_CPU)
#define OPENRISC_CPU_GET_CLASS(obj) \
OBJECT_GET_CLASS(OpenRISCCPUClass, (obj), TYPE_OPENRISC_CPU)
/**
* OpenRISCCPUClass:
* @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler.
*
* A OpenRISC CPU model.
*/
typedef struct OpenRISCCPUClass {
/*< private >*/
CPUClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu);
} OpenRISCCPUClass;
#define NB_MMU_MODES 3
enum {
MMU_NOMMU_IDX = 0,
MMU_SUPERVISOR_IDX = 1,
MMU_USER_IDX = 2,
};
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#define SET_FP_CAUSE(reg, v) do {\
(reg) = ((reg) & ~(0x3f << 12)) | \
((v & 0x3f) << 12);\
} while (0)
#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
#define UPDATE_FP_FLAGS(reg, v) do {\
(reg) |= ((v & 0x1f) << 2);\
} while (0)
/* Version Register */
#define SPR_VR 0xFFFF003F
/* Internal flags, delay slot flag */
#define D_FLAG 1
/* Interrupt */
#define NR_IRQS 32
/* Unit presece register */
enum {
UPR_UP = (1 << 0),
UPR_DCP = (1 << 1),
UPR_ICP = (1 << 2),
UPR_DMP = (1 << 3),
UPR_IMP = (1 << 4),
UPR_MP = (1 << 5),
UPR_DUP = (1 << 6),
UPR_PCUR = (1 << 7),
UPR_PMP = (1 << 8),
UPR_PICP = (1 << 9),
UPR_TTP = (1 << 10),
UPR_CUP = (255 << 24),
};
/* CPU configure register */
enum {
CPUCFGR_NSGF = (15 << 0),
CPUCFGR_CGF = (1 << 4),
CPUCFGR_OB32S = (1 << 5),
CPUCFGR_OB64S = (1 << 6),
CPUCFGR_OF32S = (1 << 7),
CPUCFGR_OF64S = (1 << 8),
CPUCFGR_OV64S = (1 << 9),
};
/* DMMU configure register */
enum {
DMMUCFGR_NTW = (3 << 0),
DMMUCFGR_NTS = (7 << 2),
DMMUCFGR_NAE = (7 << 5),
DMMUCFGR_CRI = (1 << 8),
DMMUCFGR_PRI = (1 << 9),
DMMUCFGR_TEIRI = (1 << 10),
DMMUCFGR_HTR = (1 << 11),
};
/* IMMU configure register */
enum {
IMMUCFGR_NTW = (3 << 0),
IMMUCFGR_NTS = (7 << 2),
IMMUCFGR_NAE = (7 << 5),
IMMUCFGR_CRI = (1 << 8),
IMMUCFGR_PRI = (1 << 9),
IMMUCFGR_TEIRI = (1 << 10),
IMMUCFGR_HTR = (1 << 11),
};
/* Float point control status register */
enum {
FPCSR_FPEE = 1,
FPCSR_RM = (3 << 1),
FPCSR_OVF = (1 << 3),
FPCSR_UNF = (1 << 4),
FPCSR_SNF = (1 << 5),
FPCSR_QNF = (1 << 6),
FPCSR_ZF = (1 << 7),
FPCSR_IXF = (1 << 8),
FPCSR_IVF = (1 << 9),
FPCSR_INF = (1 << 10),
FPCSR_DZF = (1 << 11),
};
/* Exceptions indices */
enum {
EXCP_RESET = 0x1,
EXCP_BUSERR = 0x2,
EXCP_DPF = 0x3,
EXCP_IPF = 0x4,
EXCP_TICK = 0x5,
EXCP_ALIGN = 0x6,
EXCP_ILLEGAL = 0x7,
EXCP_INT = 0x8,
EXCP_DTLBMISS = 0x9,
EXCP_ITLBMISS = 0xa,
EXCP_RANGE = 0xb,
EXCP_SYSCALL = 0xc,
EXCP_FPE = 0xd,
EXCP_TRAP = 0xe,
EXCP_NR,
};
/* Supervisor register */
enum {
SR_SM = (1 << 0),
SR_TEE = (1 << 1),
SR_IEE = (1 << 2),
SR_DCE = (1 << 3),
SR_ICE = (1 << 4),
SR_DME = (1 << 5),
SR_IME = (1 << 6),
SR_LEE = (1 << 7),
SR_CE = (1 << 8),
SR_F = (1 << 9),
SR_CY = (1 << 10),
SR_OV = (1 << 11),
SR_OVE = (1 << 12),
SR_DSX = (1 << 13),
SR_EPH = (1 << 14),
SR_FO = (1 << 15),
SR_SUMRA = (1 << 16),
SR_SCE = (1 << 17),
};
/* OpenRISC Hardware Capabilities */
enum {
OPENRISC_FEATURE_NSGF = (15 << 0),
OPENRISC_FEATURE_CGF = (1 << 4),
OPENRISC_FEATURE_OB32S = (1 << 5),
OPENRISC_FEATURE_OB64S = (1 << 6),
OPENRISC_FEATURE_OF32S = (1 << 7),
OPENRISC_FEATURE_OF64S = (1 << 8),
OPENRISC_FEATURE_OV64S = (1 << 9),
};
/* Tick Timer Mode Register */
enum {
TTMR_TP = (0xfffffff),
TTMR_IP = (1 << 28),
TTMR_IE = (1 << 29),
TTMR_M = (3 << 30),
};
/* Timer Mode */
enum {
TIMER_NONE = (0 << 30),
TIMER_INTR = (1 << 30),
TIMER_SHOT = (2 << 30),
TIMER_CONT = (3 << 30),
};
/* TLB size */
enum {
DTLB_WAYS = 1,
DTLB_SIZE = 64,
DTLB_MASK = (DTLB_SIZE-1),
ITLB_WAYS = 1,
ITLB_SIZE = 64,
ITLB_MASK = (ITLB_SIZE-1),
};
/* TLB prot */
enum {
URE = (1 << 6),
UWE = (1 << 7),
SRE = (1 << 8),
SWE = (1 << 9),
SXE = (1 << 6),
UXE = (1 << 7),
};
/* check if tlb available */
enum {
TLBRET_INVALID = -3,
TLBRET_NOMATCH = -2,
TLBRET_BADADDR = -1,
TLBRET_MATCH = 0
};
typedef struct OpenRISCTLBEntry {
uint32_t mr;
uint32_t tr;
} OpenRISCTLBEntry;
#ifndef CONFIG_USER_ONLY
typedef struct CPUOpenRISCTLBContext {
OpenRISCTLBEntry itlb[ITLB_WAYS][ITLB_SIZE];
OpenRISCTLBEntry dtlb[DTLB_WAYS][DTLB_SIZE];
int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu,
hwaddr *physical,
int *prot,
target_ulong address, int rw);
int (*cpu_openrisc_map_address_data)(struct OpenRISCCPU *cpu,
hwaddr *physical,
int *prot,
target_ulong address, int rw);
} CPUOpenRISCTLBContext;
#endif
typedef struct CPUOpenRISCState {
target_ulong gpr[32]; /* General registers */
target_ulong pc; /* Program counter */
target_ulong npc; /* Next PC */
target_ulong ppc; /* Prev PC */
target_ulong jmp_pc; /* Jump PC */
target_ulong machi; /* Multiply register MACHI */
target_ulong maclo; /* Multiply register MACLO */
target_ulong fpmaddhi; /* Multiply and add float register FPMADDHI */
target_ulong fpmaddlo; /* Multiply and add float register FPMADDLO */
target_ulong epcr; /* Exception PC register */
target_ulong eear; /* Exception EA register */
uint32_t sr; /* Supervisor register */
uint32_t vr; /* Version register */
uint32_t upr; /* Unit presence register */
uint32_t cpucfgr; /* CPU configure register */
uint32_t dmmucfgr; /* DMMU configure register */
uint32_t immucfgr; /* IMMU configure register */
uint32_t esr; /* Exception supervisor register */
uint32_t fpcsr; /* Float register */
float_status fp_status;
uint32_t flags; /* cpu_flags, we only use it for exception
in solt so far. */
uint32_t btaken; /* the SR_F bit */
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
#ifndef CONFIG_USER_ONLY
CPUOpenRISCTLBContext * tlb;
QEMUTimer *timer;
uint32_t ttmr; /* Timer tick mode register */
uint32_t ttcr; /* Timer tick count register */
uint32_t picmr; /* Interrupt mask register */
uint32_t picsr; /* Interrupt contrl register*/
#endif
void *irq[32]; /* Interrupt irq input */
} CPUOpenRISCState;
/**
* OpenRISCCPU:
* @env: #CPUOpenRISCState
*
* A OpenRISC CPU.
*/
typedef struct OpenRISCCPU {
/*< private >*/
CPUState parent_obj;
/*< public >*/
CPUOpenRISCState env;
uint32_t feature; /* CPU Capabilities */
} OpenRISCCPU;
static inline OpenRISCCPU *openrisc_env_get_cpu(CPUOpenRISCState *env)
{
return container_of(env, OpenRISCCPU, env);
}
#define ENV_GET_CPU(e) CPU(openrisc_env_get_cpu(e))
#define ENV_OFFSET offsetof(OpenRISCCPU, env)
OpenRISCCPU *cpu_openrisc_init(const char *cpu_model);
void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf);
int cpu_openrisc_exec(CPUOpenRISCState *s);
void openrisc_cpu_do_interrupt(CPUState *cpu);
bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void openrisc_cpu_dump_state(CPUState *cpu, FILE *f,
fprintf_function cpu_fprintf, int flags);
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int rw, int mmu_idx);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_list cpu_openrisc_list
#define cpu_exec cpu_openrisc_exec
#define cpu_gen_code cpu_openrisc_gen_code
#define cpu_signal_handler cpu_openrisc_signal_handler
#ifndef CONFIG_USER_ONLY
extern const struct VMStateDescription vmstate_openrisc_cpu;
/* hw/openrisc_pic.c */
void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
/* hw/openrisc_timer.c */
void cpu_openrisc_clock_init(OpenRISCCPU *cpu);
void cpu_openrisc_count_update(OpenRISCCPU *cpu);
void cpu_openrisc_timer_update(OpenRISCCPU *cpu);
void cpu_openrisc_count_start(OpenRISCCPU *cpu);
void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
void cpu_openrisc_mmu_init(OpenRISCCPU *cpu);
int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
#endif
#define cpu_init(cpu_model) CPU(cpu_openrisc_init(cpu_model))
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*pc = env->pc;
*cs_base = 0;
/* D_FLAG -- branch instruction exception */
*flags = (env->flags & D_FLAG);
}
static inline int cpu_mmu_index(CPUOpenRISCState *env)
{
if (!(env->sr & SR_IME)) {
return MMU_NOMMU_IDX;
}
return (env->sr & SR_SM) == 0 ? MMU_USER_IDX : MMU_SUPERVISOR_IDX;
}
#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
#include "exec/exec-all.h"
#endif /* CPU_OPENRISC_H */
| 28.066826 | 79 | 0.601616 | [
"model"
] |
9e251c81fcf63b4ce4a96a9769bf18087131b1f4 | 2,088 | h | C | stratum/hal/lib/phal/system_real.h | cholve/stratum | 09ddb5acb604f7e694a6b7d2fe93fea79f801794 | [
"Apache-2.0"
] | 1 | 2020-07-14T09:32:38.000Z | 2020-07-14T09:32:38.000Z | stratum/hal/lib/phal/system_real.h | xiaozhitaba/stratum | 9f5bd2b285badbef11e81eca6c31d4a3c4342843 | [
"Apache-2.0"
] | 3 | 2020-07-09T07:37:27.000Z | 2020-08-18T10:05:26.000Z | stratum/hal/lib/phal/system_real.h | xiaozhitaba/stratum | 9f5bd2b285badbef11e81eca6c31d4a3c4342843 | [
"Apache-2.0"
] | 1 | 2021-03-29T16:36:59.000Z | 2021-03-29T16:36:59.000Z | // Copyright 2018 Google LLC
// Copyright 2018-present Open Networking Foundation
// SPDX-License-Identifier: Apache-2.0
#ifndef STRATUM_HAL_LIB_PHAL_SYSTEM_REAL_H_
#define STRATUM_HAL_LIB_PHAL_SYSTEM_REAL_H_
#include <libudev.h>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <memory>
#include "stratum/glue/status/status.h"
#include "stratum/glue/status/statusor.h"
#include "stratum/hal/lib/phal/system_interface.h"
namespace stratum {
namespace hal {
namespace phal {
class UdevReal : public Udev {
public:
explicit UdevReal(struct udev* udev) : udev_(udev) {}
~UdevReal() override;
::util::StatusOr<std::unique_ptr<UdevMonitor>> MakeUdevMonitor() override;
::util::StatusOr<std::vector<std::pair<std::string, std::string>>>
EnumerateSubsystem(const std::string& subsystem) override;
protected:
struct udev* udev_;
};
class UdevMonitorReal : public UdevMonitor {
public:
UdevMonitorReal(struct udev_monitor* m, int monitor_fd)
: receiving_(false), monitor_(m), fd_(monitor_fd), filters_({}) {}
~UdevMonitorReal() override;
::util::Status AddFilter(const std::string& subsystem) override;
::util::Status EnableReceiving() override;
::util::StatusOr<bool> GetUdevEvent(Udev::Event* event) override;
protected:
bool receiving_;
struct udev_monitor* monitor_;
int fd_;
std::set<std::string> filters_;
};
// A thin wrapper for real system calls.
class SystemReal : public SystemInterface {
public:
static const SystemInterface* GetSingleton();
bool PathExists(const std::string& path) const override;
::util::Status ReadFileToString(const std::string& path,
std::string* buffer) const override;
::util::Status WriteStringToFile(const std::string& buffer,
const std::string& path) const override;
::util::StatusOr<std::unique_ptr<Udev>> MakeUdev() const override;
private:
SystemReal() {}
};
} // namespace phal
} // namespace hal
} // namespace stratum
#endif // STRATUM_HAL_LIB_PHAL_SYSTEM_REAL_H_
| 27.473684 | 76 | 0.715996 | [
"vector"
] |
9e2743b7b6e3ea168fdb9c8fc69e68dae54ef8ac | 89,209 | c | C | private/ntos/fsrtl/fastio.c | King0987654/windows2000 | 01f9c2e62c4289194e33244aade34b7d19e7c9b8 | [
"MIT"
] | 11 | 2017-09-02T11:27:08.000Z | 2022-01-02T15:25:24.000Z | private/ntos/fsrtl/fastio.c | King0987654/windows2000 | 01f9c2e62c4289194e33244aade34b7d19e7c9b8 | [
"MIT"
] | null | null | null | private/ntos/fsrtl/fastio.c | King0987654/windows2000 | 01f9c2e62c4289194e33244aade34b7d19e7c9b8 | [
"MIT"
] | 14 | 2019-01-16T01:01:23.000Z | 2022-02-20T15:54:27.000Z | /*++
Copyright (c) 1989 Microsoft Corporation
Module Name:
FastIo.c
Abstract:
The Fast I/O path is used to avoid calling the file systems directly to
do a cached read. This module is only used if the file object indicates
that caching is enabled (i.e., the private cache map is not null).
Author:
Gary Kimura [GaryKi] 25-Feb-1991
Revision History:
Tom Miller [TomM] 14-Apr-1991 Added Fast Write routines
--*/
#include "FsRtlP.h"
//
// Trace level for the module
//
#define Dbg (0x04000000)
#ifdef ALLOC_PRAGMA
#pragma alloc_text(PAGE, FsRtlCopyRead)
#pragma alloc_text(PAGE, FsRtlCopyWrite)
#pragma alloc_text(PAGE, FsRtlMdlRead)
#pragma alloc_text(PAGE, FsRtlMdlReadDev)
#pragma alloc_text(PAGE, FsRtlPrepareMdlWrite)
#pragma alloc_text(PAGE, FsRtlPrepareMdlWriteDev)
#pragma alloc_text(PAGE, FsRtlMdlWriteComplete)
#pragma alloc_text(PAGE, FsRtlMdlWriteCompleteDev)
#pragma alloc_text(PAGE, FsRtlAcquireFileForModWrite)
#pragma alloc_text(PAGE, FsRtlReleaseFileForModWrite)
#pragma alloc_text(PAGE, FsRtlAcquireFileForCcFlush)
#pragma alloc_text(PAGE, FsRtlReleaseFileForCcFlush)
#pragma alloc_text(PAGE, FsRtlAcquireFileExclusive)
#pragma alloc_text(PAGE, FsRtlReleaseFile)
#pragma alloc_text(PAGE, FsRtlGetFileSize)
#pragma alloc_text(PAGE, FsRtlSetFileSize)
#endif
BOOLEAN
FsRtlCopyRead (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN BOOLEAN Wait,
IN ULONG LockKey,
OUT PVOID Buffer,
OUT PIO_STATUS_BLOCK IoStatus,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine does a fast cached read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object. For a complete description of the arguments
see CcCopyRead.
Arguments:
FileObject - Pointer to the file object being read.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
Wait - FALSE if caller may not block, TRUE otherwise
Buffer - Pointer to output buffer to which data should be copied.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
Return Value:
FALSE - if Wait was supplied as FALSE and the data was not delivered, or
if there is an I/O error.
TRUE - if the data is being delivered
--*/
{
PFSRTL_COMMON_FCB_HEADER Header;
BOOLEAN Status = TRUE;
ULONG PageCount = COMPUTE_PAGES_SPANNED( FileOffset->QuadPart, Length );
LARGE_INTEGER BeyondLastByte;
PDEVICE_OBJECT targetVdo;
PAGED_CODE();
//
// Special case a read of zero length
//
if (Length != 0) {
//
// Check for overflow. Returning false here will re-route this request through the
// IRP based path, but this isn't performance critical.
//
if (MAXLONGLONG - FileOffset->QuadPart < (LONGLONG)Length) {
IoStatus->Status = STATUS_INVALID_PARAMETER;
IoStatus->Information = 0;
return FALSE;
}
BeyondLastByte.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext;
//
// Enter the file system
//
FsRtlEnterFileSystem();
//
// Increment performance counters and get the resource
//
if (Wait) {
HOT_STATISTIC(CcFastReadWait) += 1;
//
// Acquired shared on the common fcb header
//
(VOID)ExAcquireResourceShared( Header->Resource, TRUE );
} else {
HOT_STATISTIC(CcFastReadNoWait) += 1;
//
// Acquired shared on the common fcb header, and return if we
// don't get it
//
if (!ExAcquireResourceShared( Header->Resource, FALSE )) {
FsRtlExitFileSystem();
CcFastReadResourceMiss += 1;
return FALSE;
}
}
//
// Now that the File is acquired shared, we can safely test if it
// is really cached and if we can do fast i/o and if not, then
// release the fcb and return.
//
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible)) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
HOT_STATISTIC(CcFastReadNotPossible) += 1;
return FALSE;
}
//
// Check if fast I/O is questionable and if so then go ask the
// file system the answer
//
if (Header->IsFastIoPossible == FastIoIsQuestionable) {
PFAST_IO_DISPATCH FastIoDispatch;
ASSERT(!KeIsExecutingDpc());
targetVdo = IoGetRelatedDeviceObject( FileObject );
FastIoDispatch = targetVdo->DriverObject->FastIoDispatch;
//
// All file systems that set "Is Questionable" had better support
// fast I/O
//
ASSERT(FastIoDispatch != NULL);
ASSERT(FastIoDispatch->FastIoCheckIfPossible != NULL);
//
// Call the file system to check for fast I/O. If the answer is
// anything other than GoForIt then we cannot take the fast I/O
// path.
//
if (!FastIoDispatch->FastIoCheckIfPossible( FileObject,
FileOffset,
Length,
Wait,
LockKey,
TRUE, // read operation
IoStatus,
targetVdo )) {
//
// Fast I/O is not possible so release the Fcb and return.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
HOT_STATISTIC(CcFastReadNotPossible) += 1;
return FALSE;
}
}
//
// Check for read past file size.
//
if ( BeyondLastByte.QuadPart > Header->FileSize.QuadPart ) {
if ( FileOffset->QuadPart >= Header->FileSize.QuadPart ) {
IoStatus->Status = STATUS_END_OF_FILE;
IoStatus->Information = 0;
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return TRUE;
}
Length = (ULONG)( Header->FileSize.QuadPart - FileOffset->QuadPart );
}
//
// We can do fast i/o so call the cc routine to do the work and then
// release the fcb when we've done. If for whatever reason the
// copy read fails, then return FALSE to our caller.
//
// Also mark this as the top level "Irp" so that lower file system
// levels will not attempt a pop-up
//
PsGetCurrentThread()->TopLevelIrp = FSRTL_FAST_IO_TOP_LEVEL_IRP;
try {
if (Wait && ((BeyondLastByte.HighPart | Header->FileSize.HighPart) == 0)) {
CcFastCopyRead( FileObject,
FileOffset->LowPart,
Length,
PageCount,
Buffer,
IoStatus );
FileObject->Flags |= FO_FILE_FAST_IO_READ;
ASSERT( (IoStatus->Status == STATUS_END_OF_FILE) ||
((FileOffset->LowPart + IoStatus->Information) <= Header->FileSize.LowPart));
} else {
Status = CcCopyRead( FileObject,
FileOffset,
Length,
Wait,
Buffer,
IoStatus );
FileObject->Flags |= FO_FILE_FAST_IO_READ;
ASSERT( !Status || (IoStatus->Status == STATUS_END_OF_FILE) ||
((LONGLONG)(FileOffset->QuadPart + IoStatus->Information) <= Header->FileSize.QuadPart));
}
if (Status) {
FileObject->CurrentByteOffset.QuadPart = FileOffset->QuadPart + IoStatus->Information;
}
} except( FsRtlIsNtstatusExpected(GetExceptionCode())
? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH ) {
Status = FALSE;
}
PsGetCurrentThread()->TopLevelIrp = 0;
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return Status;
} else {
//
// A zero length transfer was requested.
//
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = 0;
return TRUE;
}
}
BOOLEAN
FsRtlCopyWrite (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN BOOLEAN Wait,
IN ULONG LockKey,
IN PVOID Buffer,
OUT PIO_STATUS_BLOCK IoStatus,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine does a fast cached write bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy write
of a cached file object. For a complete description of the arguments
see CcCopyWrite.
Arguments:
FileObject - Pointer to the file object being write.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
Wait - FALSE if caller may not block, TRUE otherwise
Buffer - Pointer to output buffer to which data should be copied.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
Return Value:
FALSE - if Wait was supplied as FALSE and the data was not delivered, or
if there is an I/O error.
TRUE - if the data is being delivered
--*/
{
PFSRTL_COMMON_FCB_HEADER Header;
BOOLEAN AcquiredShared = FALSE;
BOOLEAN Status = TRUE;
BOOLEAN FileSizeChanged = FALSE;
BOOLEAN WriteToEndOfFile = (BOOLEAN)((FileOffset->LowPart == FILE_WRITE_TO_END_OF_FILE) &&
(FileOffset->HighPart == -1));
PAGED_CODE();
//
// Get a real pointer to the common fcb header
//
Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext;
//
// Do we need to verify the volume? If so, we must go to the file
// system. Also return FALSE if FileObject is write through, the
// File System must do that.
//
if (CcCanIWrite( FileObject, Length, Wait, FALSE ) &&
!FlagOn(FileObject->Flags, FO_WRITE_THROUGH) &&
CcCopyWriteWontFlush(FileObject, FileOffset, Length)) {
//
// Assume our transfer will work
//
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = Length;
//
// Special case the zero byte length
//
if (Length != 0) {
//
// Enter the file system
//
FsRtlEnterFileSystem();
//
// Split into separate paths for increased performance. First
// we have the faster path which only supports Wait == TRUE and
// 32 bits. We will make an unsafe test on whether the fast path
// is ok, then just return FALSE later if we were wrong. This
// should virtually never happen.
//
// IMPORTANT NOTE: It is very important that any changes made to
// this path also be applied to the 64-bit path
// which is the else of this test!
//
if (Wait && (Header->AllocationSize.HighPart == 0)) {
ULONG Offset, NewFileSize;
ULONG OldFileSize;
ULONG OldValidDataLength;
BOOLEAN Wrapped;
//
// Make our best guess on whether we need the file exclusive
// or shared. Note that we do not check FileOffset->HighPart
// until below.
//
NewFileSize = FileOffset->LowPart + Length;
if (WriteToEndOfFile || (NewFileSize > Header->ValidDataLength.LowPart)) {
//
// Acquired shared on the common fcb header
//
ExAcquireResourceExclusive( Header->Resource, TRUE );
} else {
//
// Acquired shared on the common fcb header
//
ExAcquireResourceShared( Header->Resource, TRUE );
AcquiredShared = TRUE;
}
//
// We have the fcb shared now check if we can do fast i/o
// and if the file space is allocated, and if not then
// release the fcb and return.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize.LowPart;
NewFileSize = Header->FileSize.LowPart + Length;
Wrapped = NewFileSize < Header->FileSize.LowPart;
} else {
Offset = FileOffset->LowPart;
NewFileSize = FileOffset->LowPart + Length;
Wrapped = (NewFileSize < FileOffset->LowPart) || (FileOffset->HighPart != 0);
}
//
// Now that the File is acquired shared, we can safely test
// if it is really cached and if we can do fast i/o and we
// do not have to extend. If not then release the fcb and
// return.
//
// Get out if we have too much to zero. This case is not important
// for performance, and a file system supporting sparseness may have
// a way to do this more efficiently.
//
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
(NewFileSize > Header->AllocationSize.LowPart) ||
(Offset >= (Header->ValidDataLength.LowPart + 0x2000)) ||
(Header->AllocationSize.HighPart != 0) || Wrapped) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
//
// If we will be extending ValidDataLength, we will have to
// get the Fcb exclusive, and make sure that FastIo is still
// possible. We should only execute this block of code very
// rarely, when the unsafe test for ValidDataLength failed
// above.
//
if (AcquiredShared && (NewFileSize > Header->ValidDataLength.LowPart)) {
ExReleaseResource( Header->Resource );
ExAcquireResourceExclusive( Header->Resource, TRUE );
//
// If writing to end of file, we must recalculate new size.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize.LowPart;
NewFileSize = Header->FileSize.LowPart + Length;
Wrapped = NewFileSize < Header->FileSize.LowPart;
}
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
(NewFileSize > Header->AllocationSize.LowPart) ||
(Header->AllocationSize.HighPart != 0) || Wrapped) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Check if fast I/O is questionable and if so then go ask
// the file system the answer
//
if (Header->IsFastIoPossible == FastIoIsQuestionable) {
PDEVICE_OBJECT targetVdo = IoGetRelatedDeviceObject( FileObject );
PFAST_IO_DISPATCH FastIoDispatch = targetVdo->DriverObject->FastIoDispatch;
IO_STATUS_BLOCK IoStatus;
//
// All file system then set "Is Questionable" had better
// support fast I/O
//
ASSERT(FastIoDispatch != NULL);
ASSERT(FastIoDispatch->FastIoCheckIfPossible != NULL);
//
// Call the file system to check for fast I/O. If the
// answer is anything other than GoForIt then we cannot
// take the fast I/O path.
//
ASSERT(FILE_WRITE_TO_END_OF_FILE == 0xffffffff);
if (!FastIoDispatch->FastIoCheckIfPossible( FileObject,
FileOffset->QuadPart != (LONGLONG)-1 ?
FileOffset : &Header->FileSize,
Length,
TRUE,
LockKey,
FALSE, // write operation
&IoStatus,
targetVdo )) {
//
// Fast I/O is not possible so release the Fcb and
// return.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Now see if we will change FileSize. We have to do it now
// so that our reads are not nooped.
//
if (NewFileSize > Header->FileSize.LowPart) {
FileSizeChanged = TRUE;
OldFileSize = Header->FileSize.LowPart;
OldValidDataLength = Header->ValidDataLength.LowPart;
Header->FileSize.LowPart = NewFileSize;
}
//
// We can do fast i/o so call the cc routine to do the work
// and then release the fcb when we've done. If for whatever
// reason the copy write fails, then return FALSE to our
// caller.
//
// Also mark this as the top level "Irp" so that lower file
// system levels will not attempt a pop-up
//
PsGetCurrentThread()->TopLevelIrp = FSRTL_FAST_IO_TOP_LEVEL_IRP;
try {
//
// See if we have to do some zeroing
//
if (Offset > Header->ValidDataLength.LowPart) {
LARGE_INTEGER ZeroEnd;
ZeroEnd.LowPart = Offset;
ZeroEnd.HighPart = 0;
CcZeroData( FileObject,
&Header->ValidDataLength,
&ZeroEnd,
TRUE );
}
CcFastCopyWrite( FileObject,
Offset,
Length,
Buffer );
} except( FsRtlIsNtstatusExpected(GetExceptionCode())
? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH ) {
Status = FALSE;
}
PsGetCurrentThread()->TopLevelIrp = 0;
//
// If we succeeded, see if we have to update FileSize or
// ValidDataLength.
//
if (Status) {
//
// In the case of ValidDataLength, we really have to
// check again since we did not do this when we acquired
// the resource exclusive.
//
if (NewFileSize > Header->ValidDataLength.LowPart) {
Header->ValidDataLength.LowPart = NewFileSize;
}
//
// Set this handle as having modified the file
//
FileObject->Flags |= FO_FILE_MODIFIED;
if (FileSizeChanged) {
CcGetFileSizePointer(FileObject)->LowPart = NewFileSize;
FileObject->Flags |= FO_FILE_SIZE_CHANGED;
}
//
// Also update the file position pointer
//
FileObject->CurrentByteOffset.LowPart = Offset + Length;
FileObject->CurrentByteOffset.HighPart = 0;
//
// If we did not succeed, then we must restore the original
// FileSize while holding the PagingIoResource exclusive if
// it exists.
//
} else if (FileSizeChanged) {
if ( Header->PagingIoResource != NULL ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->FileSize.LowPart = OldFileSize;
Header->ValidDataLength.LowPart = OldValidDataLength;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->FileSize.LowPart = OldFileSize;
Header->ValidDataLength.LowPart = OldValidDataLength;
}
}
//
// Here is the 64-bit or no-wait path.
//
} else {
LARGE_INTEGER Offset, NewFileSize;
LARGE_INTEGER OldFileSize;
LARGE_INTEGER OldValidDataLength;
ASSERT(!KeIsExecutingDpc());
//
// Make our best guess on whether we need the file exclusive
// or shared.
//
NewFileSize.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
if (WriteToEndOfFile || (NewFileSize.QuadPart > Header->ValidDataLength.QuadPart)) {
//
// Acquired shared on the common fcb header, and return
// if we don't get it.
//
if (!ExAcquireResourceExclusive( Header->Resource, Wait )) {
FsRtlExitFileSystem();
return FALSE;
}
} else {
//
// Acquired shared on the common fcb header, and return
// if we don't get it.
//
if (!ExAcquireResourceShared( Header->Resource, Wait )) {
FsRtlExitFileSystem();
return FALSE;
}
AcquiredShared = TRUE;
}
//
// We have the fcb shared now check if we can do fast i/o
// and if the file space is allocated, and if not then
// release the fcb and return.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize;
NewFileSize.QuadPart = Header->FileSize.QuadPart + (LONGLONG)Length;
} else {
Offset = *FileOffset;
NewFileSize.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
}
//
// Now that the File is acquired shared, we can safely test
// if it is really cached and if we can do fast i/o and we
// do not have to extend. If not then release the fcb and
// return.
//
// Get out if we are about to zero too much as well, as commented above.
// Likewise, for NewFileSizes that exceed MAXLONGLONG.
//
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
(Offset.QuadPart >= (Header->ValidDataLength.QuadPart + 0x2000)) ||
(MAXLONGLONG - Offset.QuadPart < (LONGLONG)Length) ||
(NewFileSize.QuadPart > Header->AllocationSize.QuadPart) ) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
//
// If we will be extending ValidDataLength, we will have to
// get the Fcb exclusive, and make sure that FastIo is still
// possible. We should only execute this block of code very
// rarely, when the unsafe test for ValidDataLength failed
// above.
//
if (AcquiredShared && ( NewFileSize.QuadPart > Header->ValidDataLength.QuadPart )) {
ExReleaseResource( Header->Resource );
if (!ExAcquireResourceExclusive( Header->Resource, Wait )) {
FsRtlExitFileSystem();
return FALSE;
}
//
// If writing to end of file, we must recalculate new size.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize;
NewFileSize.QuadPart = Header->FileSize.QuadPart + (LONGLONG)Length;
}
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
( NewFileSize.QuadPart > Header->AllocationSize.QuadPart ) ) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Check if fast I/O is questionable and if so then go ask
// the file system the answer
//
if (Header->IsFastIoPossible == FastIoIsQuestionable) {
PFAST_IO_DISPATCH FastIoDispatch = IoGetRelatedDeviceObject( FileObject )->DriverObject->FastIoDispatch;
IO_STATUS_BLOCK IoStatus;
//
// All file system then set "Is Questionable" had better
// support fast I/O
//
ASSERT(FastIoDispatch != NULL);
ASSERT(FastIoDispatch->FastIoCheckIfPossible != NULL);
//
// Call the file system to check for fast I/O. If the
// answer is anything other than GoForIt then we cannot
// take the fast I/O path.
//
ASSERT(FILE_WRITE_TO_END_OF_FILE == 0xffffffff);
if (!FastIoDispatch->FastIoCheckIfPossible( FileObject,
FileOffset->QuadPart != (LONGLONG)-1 ?
FileOffset : &Header->FileSize,
Length,
Wait,
LockKey,
FALSE, // write operation
&IoStatus,
DeviceObject )) {
//
// Fast I/O is not possible so release the Fcb and
// return.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Now see if we will change FileSize. We have to do it now
// so that our reads are not nooped.
//
if ( NewFileSize.QuadPart > Header->FileSize.QuadPart ) {
FileSizeChanged = TRUE;
OldFileSize = Header->FileSize;
OldValidDataLength = Header->ValidDataLength;
//
// Deal with an extremely rare pathalogical case here the
// file size wraps.
//
if ( (Header->FileSize.HighPart != NewFileSize.HighPart) &&
(Header->PagingIoResource != NULL) ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->FileSize = NewFileSize;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->FileSize = NewFileSize;
}
}
//
// We can do fast i/o so call the cc routine to do the work
// and then release the fcb when we've done. If for whatever
// reason the copy write fails, then return FALSE to our
// caller.
//
// Also mark this as the top level "Irp" so that lower file
// system levels will not attempt a pop-up
//
PsGetCurrentThread()->TopLevelIrp = FSRTL_FAST_IO_TOP_LEVEL_IRP;
try {
//
// See if we have to do some zeroing
//
if ( Offset.QuadPart > Header->ValidDataLength.QuadPart ) {
Status = CcZeroData( FileObject,
&Header->ValidDataLength,
&Offset,
Wait );
}
if (Status) {
Status = CcCopyWrite( FileObject,
&Offset,
Length,
Wait,
Buffer );
}
} except( FsRtlIsNtstatusExpected(GetExceptionCode())
? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH ) {
Status = FALSE;
}
PsGetCurrentThread()->TopLevelIrp = 0;
//
// If we succeeded, see if we have to update FileSize or
// ValidDataLength.
//
if (Status) {
//
// In the case of ValidDataLength, we really have to
// check again since we did not do this when we acquired
// the resource exclusive.
//
if ( NewFileSize.QuadPart > Header->ValidDataLength.QuadPart ) {
//
// Deal with an extremely rare pathalogical case here
// the ValidDataLength wraps.
//
if ( (Header->ValidDataLength.HighPart != NewFileSize.HighPart) &&
(Header->PagingIoResource != NULL) ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->ValidDataLength = NewFileSize;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->ValidDataLength = NewFileSize;
}
}
//
// Set this handle as having modified the file
//
FileObject->Flags |= FO_FILE_MODIFIED;
if (FileSizeChanged) {
*CcGetFileSizePointer(FileObject) = NewFileSize;
FileObject->Flags |= FO_FILE_SIZE_CHANGED;
}
//
// Also update the current file position pointer
//
FileObject->CurrentByteOffset.QuadPart = Offset.QuadPart + Length;
//
// If we did not succeed, then we must restore the original
// FileSize while holding the PagingIoResource exclusive if
// it exists.
//
} else if (FileSizeChanged) {
if ( Header->PagingIoResource != NULL ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->FileSize = OldFileSize;
Header->ValidDataLength = OldValidDataLength;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->FileSize = OldFileSize;
Header->ValidDataLength = OldValidDataLength;
}
}
}
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return Status;
} else {
//
// A zero length transfer was requested.
//
return TRUE;
}
} else {
//
// The volume must be verified or the file is write through.
//
return FALSE;
}
}
BOOLEAN
FsRtlMdlReadDev (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG LockKey,
OUT PMDL *MdlChain,
OUT PIO_STATUS_BLOCK IoStatus,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object. For a complete description of the arguments
see CcMdlRead.
Arguments:
FileObject - Pointer to the file object being read.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
MdlChain - On output it returns a pointer to an MDL chain describing
the desired data.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
DeviceObject - Supplies DeviceObject for callee.
Return Value:
FALSE - if the data was not delivered, or if there is an I/O error.
TRUE - if the data is being delivered
--*/
{
PFSRTL_COMMON_FCB_HEADER Header;
BOOLEAN Status = TRUE;
LARGE_INTEGER BeyondLastByte;
PAGED_CODE();
//
// Special case a read of zero length
//
if (Length == 0) {
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = 0;
return TRUE;
}
//
// Overflows should've been handled by caller.
//
ASSERT(MAXLONGLONG - FileOffset->QuadPart >= (LONGLONG)Length);
//
// Get a real pointer to the common fcb header
//
BeyondLastByte.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext;
//
// Enter the file system
//
FsRtlEnterFileSystem();
CcFastMdlReadWait += 1;
//
// Acquired shared on the common fcb header
//
(VOID)ExAcquireResourceShared( Header->Resource, TRUE );
//
// Now that the File is acquired shared, we can safely test if it is
// really cached and if we can do fast i/o and if not
// then release the fcb and return.
//
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible)) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
CcFastMdlReadNotPossible += 1;
return FALSE;
}
//
// Check if fast I/O is questionable and if so then go ask the file system
// the answer
//
if (Header->IsFastIoPossible == FastIoIsQuestionable) {
PFAST_IO_DISPATCH FastIoDispatch;
ASSERT(!KeIsExecutingDpc());
FastIoDispatch = IoGetRelatedDeviceObject( FileObject )->DriverObject->FastIoDispatch;
//
// All file system then set "Is Questionable" had better support fast I/O
//
ASSERT(FastIoDispatch != NULL);
ASSERT(FastIoDispatch->FastIoCheckIfPossible != NULL);
//
// Call the file system to check for fast I/O. If the answer is anything
// other than GoForIt then we cannot take the fast I/O path.
//
if (!FastIoDispatch->FastIoCheckIfPossible( FileObject,
FileOffset,
Length,
TRUE,
LockKey,
TRUE, // read operation
IoStatus,
IoGetRelatedDeviceObject( FileObject ) )) {
//
// Fast I/O is not possible so release the Fcb and return.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
CcFastMdlReadNotPossible += 1;
return FALSE;
}
}
//
// Check for read past file size.
//
if ( BeyondLastByte.QuadPart > Header->FileSize.QuadPart ) {
if ( FileOffset->QuadPart >= Header->FileSize.QuadPart ) {
IoStatus->Status = STATUS_END_OF_FILE;
IoStatus->Information = 0;
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return TRUE;
}
Length = (ULONG)( Header->FileSize.QuadPart - FileOffset->QuadPart );
}
//
// We can do fast i/o so call the cc routine to do the work and then
// release the fcb when we've done. If for whatever reason the
// mdl read fails, then return FALSE to our caller.
//
//
// Also mark this as the top level "Irp" so that lower file system levels
// will not attempt a pop-up
//
PsGetCurrentThread()->TopLevelIrp = FSRTL_FAST_IO_TOP_LEVEL_IRP;
try {
CcMdlRead( FileObject, FileOffset, Length, MdlChain, IoStatus );
FileObject->Flags |= FO_FILE_FAST_IO_READ;
} except( FsRtlIsNtstatusExpected(GetExceptionCode())
? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH ) {
Status = FALSE;
}
PsGetCurrentThread()->TopLevelIrp = 0;
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return Status;
}
//
// The old routine will either dispatch or call FsRtlMdlReadDev
//
BOOLEAN
FsRtlMdlRead (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG LockKey,
OUT PMDL *MdlChain,
OUT PIO_STATUS_BLOCK IoStatus
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object. For a complete description of the arguments
see CcMdlRead.
Arguments:
FileObject - Pointer to the file object being read.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
MdlChain - On output it returns a pointer to an MDL chain describing
the desired data.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
Return Value:
FALSE - if the data was not delivered, or if there is an I/O error.
TRUE - if the data is being delivered
--*/
{
PDEVICE_OBJECT DeviceObject, VolumeDeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
DeviceObject = IoGetRelatedDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
//
// See if the (top-level) FileSystem has a FastIo routine, and if so, call it.
//
if ((FastIoDispatch != NULL) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlRead)) &&
(FastIoDispatch->MdlRead != NULL)) {
return FastIoDispatch->MdlRead( FileObject, FileOffset, Length, LockKey, MdlChain, IoStatus, DeviceObject );
} else {
//
// Get the DeviceObject for the volume. If that DeviceObject is different, and
// it specifies the FastIo routine, then we have to return FALSE here and cause
// an Irp to get generated.
//
VolumeDeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((VolumeDeviceObject != DeviceObject) &&
(FastIoDispatch = VolumeDeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlRead)) &&
(FastIoDispatch->MdlRead != NULL)) {
return FALSE;
//
// Otherwise, call the default routine.
//
} else {
return FsRtlMdlReadDev( FileObject, FileOffset, Length, LockKey, MdlChain, IoStatus, DeviceObject );
}
}
}
//
// The old routine will either dispatch or call FsRtlMdlReadCompleteDev
//
BOOLEAN
FsRtlMdlReadComplete (
IN PFILE_OBJECT FileObject,
IN PMDL MdlChain
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object.
Arguments:
FileObject - Pointer to the file object being read.
MdlChain - Supplies a pointer to an MDL chain returned from CcMdlRead.
Return Value:
None
--*/
{
PDEVICE_OBJECT DeviceObject, VolumeDeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
DeviceObject = IoGetRelatedDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
//
// See if the (top-level) FileSystem has a FastIo routine, and if so, call it.
//
if ((FastIoDispatch != NULL) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlReadComplete)) &&
(FastIoDispatch->MdlReadComplete != NULL)) {
return FastIoDispatch->MdlReadComplete( FileObject, MdlChain, DeviceObject );
} else {
//
// Get the DeviceObject for the volume. If that DeviceObject is different, and
// it specifies the FastIo routine, then we have to return FALSE here and cause
// an Irp to get generated.
//
VolumeDeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((VolumeDeviceObject != DeviceObject) &&
(FastIoDispatch = VolumeDeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlReadComplete)) &&
(FastIoDispatch->MdlReadComplete != NULL)) {
return FALSE;
//
// Otherwise, call the default routine.
//
} else {
return FsRtlMdlReadCompleteDev( FileObject, MdlChain, DeviceObject );
}
}
}
BOOLEAN
FsRtlMdlReadCompleteDev (
IN PFILE_OBJECT FileObject,
IN PMDL MdlChain,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object.
Arguments:
FileObject - Pointer to the file object being read.
MdlChain - Supplies a pointer to an MDL chain returned from CcMdlRead.
DeviceObject - Supplies the DeviceObject for the callee.
Return Value:
None
--*/
{
CcMdlReadComplete2( FileObject, MdlChain );
return TRUE;
}
BOOLEAN
FsRtlPrepareMdlWriteDev (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG LockKey,
OUT PMDL *MdlChain,
OUT PIO_STATUS_BLOCK IoStatus,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object. For a complete description of the arguments
see CcMdlRead.
Arguments:
FileObject - Pointer to the file object being read.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
MdlChain - On output it returns a pointer to an MDL chain describing
the desired data.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
DeviceObject - Supplies the DeviceObject for the callee.
Return Value:
FALSE - if the data was not written, or if there is an I/O error.
TRUE - if the data is being written
--*/
{
PFSRTL_COMMON_FCB_HEADER Header;
LARGE_INTEGER Offset, NewFileSize;
LARGE_INTEGER OldFileSize;
LARGE_INTEGER OldValidDataLength;
BOOLEAN Status = TRUE;
BOOLEAN AcquiredShared = FALSE;
BOOLEAN FileSizeChanged = FALSE;
BOOLEAN WriteToEndOfFile = (BOOLEAN)((FileOffset->LowPart == FILE_WRITE_TO_END_OF_FILE) &&
(FileOffset->HighPart == -1));
PAGED_CODE();
//
// Call CcCanIWrite. Also return FALSE if FileObject is write through,
// the File System must do that.
//
if ( !CcCanIWrite( FileObject, Length, TRUE, FALSE ) ||
FlagOn( FileObject->Flags, FO_WRITE_THROUGH )) {
return FALSE;
}
//
// Assume our transfer will work
//
IoStatus->Status = STATUS_SUCCESS;
//
// Special case the zero byte length
//
if (Length == 0) {
return TRUE;
}
//
// Get a real pointer to the common fcb header
//
Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext;
//
// Enter the file system
//
FsRtlEnterFileSystem();
//
// Make our best guess on whether we need the file exclusive or
// shared.
//
NewFileSize.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
if (WriteToEndOfFile || (NewFileSize.QuadPart > Header->ValidDataLength.QuadPart)) {
//
// Acquired exclusive on the common fcb header, and return if we don't
// get it.
//
ExAcquireResourceExclusive( Header->Resource, TRUE );
} else {
//
// Acquired shared on the common fcb header, and return if we don't
// get it.
//
ExAcquireResourceShared( Header->Resource, TRUE );
AcquiredShared = TRUE;
}
//
// We have the fcb shared now check if we can do fast i/o and if the file
// space is allocated, and if not then release the fcb and return.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize;
NewFileSize.QuadPart = Header->FileSize.QuadPart + (LONGLONG)Length;
} else {
Offset = *FileOffset;
NewFileSize.QuadPart = FileOffset->QuadPart + (LONGLONG)Length;
}
//
// Now that the File is acquired shared, we can safely test if it is
// really cached and if we can do fast i/o and we do not have to extend.
// If not then release the fcb and return.
//
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
(MAXLONGLONG - Offset.QuadPart < (LONGLONG)Length) ||
( NewFileSize.QuadPart > Header->AllocationSize.QuadPart ) ) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
//
// If we will be extending ValidDataLength, we will have to get the
// Fcb exclusive, and make sure that FastIo is still possible.
//
if (AcquiredShared && ( NewFileSize.QuadPart > Header->ValidDataLength.QuadPart )) {
ExReleaseResource( Header->Resource );
ExAcquireResourceExclusive( Header->Resource, TRUE );
AcquiredShared = FALSE;
//
// If writing to end of file, we must recalculate new size.
//
if (WriteToEndOfFile) {
Offset = Header->FileSize;
NewFileSize.QuadPart = Header->FileSize.QuadPart + (LONGLONG)Length;
}
if ((FileObject->PrivateCacheMap == NULL) ||
(Header->IsFastIoPossible == FastIoIsNotPossible) ||
( NewFileSize.QuadPart > Header->AllocationSize.QuadPart )) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Check if fast I/O is questionable and if so then go ask the file system
// the answer
//
if (Header->IsFastIoPossible == FastIoIsQuestionable) {
PFAST_IO_DISPATCH FastIoDispatch = IoGetRelatedDeviceObject( FileObject )->DriverObject->FastIoDispatch;
//
// All file system then set "Is Questionable" had better support fast I/O
//
ASSERT(FastIoDispatch != NULL);
ASSERT(FastIoDispatch->FastIoCheckIfPossible != NULL);
//
// Call the file system to check for fast I/O. If the answer is anything
// other than GoForIt then we cannot take the fast I/O path.
//
if (!FastIoDispatch->FastIoCheckIfPossible( FileObject,
FileOffset,
Length,
TRUE,
LockKey,
FALSE, // write operation
IoStatus,
IoGetRelatedDeviceObject( FileObject ) )) {
//
// Fast I/O is not possible so release the Fcb and return.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return FALSE;
}
}
//
// Now see if we will change FileSize. We have to do it now so that our
// reads are not nooped.
//
if ( NewFileSize.QuadPart > Header->FileSize.QuadPart ) {
FileSizeChanged = TRUE;
OldFileSize = Header->FileSize;
OldValidDataLength = Header->ValidDataLength;
//
// Deal with an extremely rare pathalogical case here the file
// size wraps.
//
if ( (Header->FileSize.HighPart != NewFileSize.HighPart) &&
(Header->PagingIoResource != NULL) ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->FileSize = NewFileSize;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->FileSize = NewFileSize;
}
}
//
// We can do fast i/o so call the cc routine to do the work and then
// release the fcb when we've done. If for whatever reason the
// copy write fails, then return FALSE to our caller.
//
//
// Also mark this as the top level "Irp" so that lower file system levels
// will not attempt a pop-up
//
PsGetCurrentThread()->TopLevelIrp = FSRTL_FAST_IO_TOP_LEVEL_IRP;
try {
//
// See if we have to do some zeroing
//
if ( Offset.QuadPart > Header->ValidDataLength.QuadPart ) {
Status = CcZeroData( FileObject,
&Header->ValidDataLength,
&Offset,
TRUE );
}
if (Status) {
CcPrepareMdlWrite( FileObject, &Offset, Length, MdlChain, IoStatus );
}
} except( FsRtlIsNtstatusExpected(GetExceptionCode())
? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH ) {
Status = FALSE;
}
PsGetCurrentThread()->TopLevelIrp = 0;
//
// If we succeeded, see if we have to update FileSize or ValidDataLength.
//
if (Status) {
//
// In the case of ValidDataLength, we really have to check again
// since we did not do this when we acquired the resource exclusive.
//
if ( NewFileSize.QuadPart > Header->ValidDataLength.QuadPart ) {
//
// Deal with an extremely rare pathalogical case here the
// ValidDataLength wraps.
//
if ( (Header->ValidDataLength.HighPart != NewFileSize.HighPart) &&
(Header->PagingIoResource != NULL) ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->ValidDataLength = NewFileSize;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->ValidDataLength = NewFileSize;
}
}
//
// Set this handle as having modified the file
//
FileObject->Flags |= FO_FILE_MODIFIED;
if (FileSizeChanged) {
*CcGetFileSizePointer(FileObject) = NewFileSize;
FileObject->Flags |= FO_FILE_SIZE_CHANGED;
}
//
// If we did not succeed, then we must restore the original FileSize
// and release the resource. In the success path, the cache manager
// will release the resource.
//
} else {
if (FileSizeChanged) {
if ( Header->PagingIoResource != NULL ) {
(VOID)ExAcquireResourceExclusive( Header->PagingIoResource, TRUE );
Header->FileSize = OldFileSize;
Header->ValidDataLength = OldValidDataLength;
ExReleaseResource( Header->PagingIoResource );
} else {
Header->FileSize = OldFileSize;
Header->ValidDataLength = OldValidDataLength;
}
}
}
//
// Now we can release the resource.
//
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return Status;
}
//
// The old routine will either dispatch or call FsRtlPrepareMdlWriteDev
//
BOOLEAN
FsRtlPrepareMdlWrite (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG LockKey,
OUT PMDL *MdlChain,
OUT PIO_STATUS_BLOCK IoStatus
)
/*++
Routine Description:
This routine does a fast cached mdl read bypassing the usual file system
entry routine (i.e., without the Irp). It is used to do a copy read
of a cached file object. For a complete description of the arguments
see CcMdlRead.
Arguments:
FileObject - Pointer to the file object being read.
FileOffset - Byte offset in file for desired data.
Length - Length of desired data in bytes.
MdlChain - On output it returns a pointer to an MDL chain describing
the desired data.
IoStatus - Pointer to standard I/O status block to receive the status
for the transfer.
Return Value:
FALSE - if the data was not written, or if there is an I/O error.
TRUE - if the data is being written
--*/
{
PDEVICE_OBJECT DeviceObject, VolumeDeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
DeviceObject = IoGetRelatedDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
//
// See if the (top-level) FileSystem has a FastIo routine, and if so, call it.
//
if ((FastIoDispatch != NULL) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, PrepareMdlWrite)) &&
(FastIoDispatch->PrepareMdlWrite != NULL)) {
return FastIoDispatch->PrepareMdlWrite( FileObject, FileOffset, Length, LockKey, MdlChain, IoStatus, DeviceObject );
} else {
//
// Get the DeviceObject for the volume. If that DeviceObject is different, and
// it specifies the FastIo routine, then we have to return FALSE here and cause
// an Irp to get generated.
//
VolumeDeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((VolumeDeviceObject != DeviceObject) &&
(FastIoDispatch = VolumeDeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, PrepareMdlWrite)) &&
(FastIoDispatch->PrepareMdlWrite != NULL)) {
return FALSE;
//
// Otherwise, call the default routine.
//
} else {
return FsRtlPrepareMdlWriteDev( FileObject, FileOffset, Length, LockKey, MdlChain, IoStatus, DeviceObject );
}
}
}
//
// The old routine will either dispatch or call FsRtlMdlWriteCompleteDev
//
BOOLEAN
FsRtlMdlWriteComplete (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN PMDL MdlChain
)
/*++
Routine Description:
This routine completes an Mdl write.
Arguments:
FileObject - Pointer to the file object being read.
MdlChain - Supplies a pointer to an MDL chain returned from CcMdlPrepareMdlWrite.
Return Value:
--*/
{
PDEVICE_OBJECT DeviceObject, VolumeDeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
DeviceObject = IoGetRelatedDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
//
// See if the (top-level) FileSystem has a FastIo routine, and if so, call it.
//
if ((FastIoDispatch != NULL) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlWriteComplete)) &&
(FastIoDispatch->MdlWriteComplete != NULL)) {
return FastIoDispatch->MdlWriteComplete( FileObject, FileOffset, MdlChain, DeviceObject );
} else {
//
// Get the DeviceObject for the volume. If that DeviceObject is different, and
// it specifies the FastIo routine, then we have to return FALSE here and cause
// an Irp to get generated.
//
VolumeDeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((VolumeDeviceObject != DeviceObject) &&
(FastIoDispatch = VolumeDeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlWriteComplete)) &&
(FastIoDispatch->MdlWriteComplete != NULL)) {
return FALSE;
//
// Otherwise, call the default routine.
//
} else {
return FsRtlMdlWriteCompleteDev( FileObject, FileOffset, MdlChain, DeviceObject );
}
}
}
BOOLEAN
FsRtlMdlWriteCompleteDev (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN PMDL MdlChain,
IN PDEVICE_OBJECT DeviceObject
)
/*++
Routine Description:
This routine completes an Mdl write.
Arguments:
FileObject - Pointer to the file object being read.
MdlChain - Supplies a pointer to an MDL chain returned from CcMdlPrepareMdlWrite.
DeviceObject - Supplies the DeviceObject for the callee.
Return Value:
--*/
{
//
// Do not support WRITE_THROUGH in the fast path call.
//
if (FlagOn( FileObject->Flags, FO_WRITE_THROUGH )) {
return FALSE;
}
CcMdlWriteComplete2( FileObject, FileOffset, MdlChain );
return TRUE;
}
NTKERNELAPI
BOOLEAN
FsRtlAcquireFileForModWrite (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER EndingOffset,
OUT PERESOURCE *ResourceToRelease
)
/*++
Routine Description:
This routine decides which file system resource the modified page
writer should acquire and acquires it if possible. Wait is always
specified as FALSE. We pass back the resource Mm has to release
when the write completes.
Arguments:
FileObject - Pointer to the file object being written.
EndingOffset - The offset of the last byte being written + 1.
ByteCount - Length of data in bytes.
ResourceToRelease - Returns the resource to release. Not defined if
FALSE is returned.
Return Value:
FALSE - The resource could not be acquired without waiting.
TRUE - The returned resource has been acquired.
--*/
{
PFSRTL_COMMON_FCB_HEADER Header;
PERESOURCE ResourceAcquired;
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
BOOLEAN AcquireExclusive;
PAGED_CODE();
Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext;
//
// First see if we have to call the file system.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
if ((FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, AcquireForModWrite )) &&
(FastIoDispatch->AcquireForModWrite != NULL)) {
NTSTATUS Status;
Status = FastIoDispatch->AcquireForModWrite(FileObject,
EndingOffset,
ResourceToRelease,
DeviceObject);
if (Status == STATUS_SUCCESS) {
return( TRUE );
} else if (Status == STATUS_CANT_WAIT) {
return( FALSE );
} else {
//
// Fall through. When dealing with layered file systems, it might
// be the case that the layered file system has the above dispatch
// routine, but the FS it is layered on top of does not. In that
// case, the layered file system will return an error code other
// than STATUS_SUCCESS or STATUS_CANT_WAIT, and we simply handle
// it as if the file system did not have the dispatch routine to
// begin with.
//
NOTHING;
}
}
//
// We follow the following rules to determine which resource
// to acquire. We use the flags in the common header. These
// flags can't change once we have acquired any resource.
// This means we can do an unsafe test and optimisticly
// acquire a resource. At that point we can test the bits
// to see if we have what we want.
//
// 0 - If there is no main resource, acquire nothing.
//
// 1 - Acquire the main resource exclusively if the
// ACQUIRE_MAIN_RSRC_EX flag is set or we are extending
// valid data.
//
// 2 - Acquire the main resource shared if there is
// no paging io resource or the
// ACQUIRE_MAIN_RSRC_SH flag is set.
//
// 3 - Otherwise acquire the paging io resource shared.
//
if (Header->Resource == NULL) {
*ResourceToRelease = NULL;
return TRUE;
}
if (FlagOn( Header->Flags, FSRTL_FLAG_ACQUIRE_MAIN_RSRC_EX ) ||
(EndingOffset->QuadPart > Header->ValidDataLength.QuadPart &&
Header->ValidDataLength.QuadPart != Header->FileSize.QuadPart)) {
ResourceAcquired = Header->Resource;
AcquireExclusive = TRUE;
} else if (FlagOn( Header->Flags, FSRTL_FLAG_ACQUIRE_MAIN_RSRC_SH ) ||
Header->PagingIoResource == NULL) {
ResourceAcquired = Header->Resource;
AcquireExclusive = FALSE;
} else {
ResourceAcquired = Header->PagingIoResource;
AcquireExclusive = FALSE;
}
//
// Perform the following in a loop in case we need to back and
// check the state of the resource acquisition. In most cases
// the initial checks will succeed and we can proceed immediately.
// We have to worry about the two FsRtl bits changing but
// if there is no paging io resource before there won't ever be
// one.
//
while (TRUE) {
//
// Now acquire the desired resource.
//
if (AcquireExclusive) {
if (!ExAcquireResourceExclusive( ResourceAcquired, FALSE )) {
return FALSE;
}
} else if (!ExAcquireSharedWaitForExclusive( ResourceAcquired, FALSE )) {
return FALSE;
}
//
// If the valid data length is changing or the exclusive bit is
// set and we don't have the main resource exclusive then
// release the current resource and acquire the main resource
// exclusively and move to the top of the loop.
//
if (FlagOn( Header->Flags, FSRTL_FLAG_ACQUIRE_MAIN_RSRC_EX ) ||
(EndingOffset->QuadPart > Header->ValidDataLength.QuadPart &&
Header->ValidDataLength.QuadPart != Header->FileSize.QuadPart)) {
//
// If we don't have the main resource exclusively then
// release the current resource and attempt to acquire
// the main resource exclusively.
//
if (!AcquireExclusive) {
ExReleaseResource( ResourceAcquired );
AcquireExclusive = TRUE;
ResourceAcquired = Header->Resource;
continue;
}
//
// We have the correct resource. Exit the loop.
//
//
// If we should be acquiring the main resource shared then move
// to acquire the correct resource and proceed to the top of the loop.
//
} else if (FlagOn( Header->Flags, FSRTL_FLAG_ACQUIRE_MAIN_RSRC_SH )) {
//
// If we have the main resource exclusively then downgrade to
// shared and exit the loop.
//
if (AcquireExclusive) {
ExConvertExclusiveToShared( ResourceAcquired );
//
// If we have the paging io resource then give up this resource
// and acquire the main resource exclusively. This is going
// at it with a large hammer but is guaranteed to be resolved
// in the next pass through the loop.
//
} else if (ResourceAcquired != Header->Resource) {
ExReleaseResource( ResourceAcquired );
ResourceAcquired = Header->Resource;
AcquireExclusive = TRUE;
continue;
}
//
// We have the correct resource. Exit the loop.
//
//
// At this point we should have the paging Io resource shared
// if it exists. If not then acquire it shared and release the
// other resource and exit the loop.
//
} else if (Header->PagingIoResource != NULL
&& ResourceAcquired != Header->PagingIoResource) {
ResourceAcquired = NULL;
if (ExAcquireSharedWaitForExclusive( Header->PagingIoResource, FALSE )) {
ResourceAcquired = Header->PagingIoResource;
}
ExReleaseResource( Header->Resource );
if (ResourceAcquired == NULL) {
return FALSE;
}
//
// We now have the correct resource. Exit the loop.
//
//
// We should have the main resource shared. If we don't then
// degrade our lock to shared access.
//
} else if (AcquireExclusive) {
ExConvertExclusiveToShared( ResourceAcquired );
//
// We now have the correct resource. Exit the loop.
//
}
//
// We have the correct resource. Exit the loop.
//
break;
}
*ResourceToRelease = ResourceAcquired;
return TRUE;
}
NTKERNELAPI
VOID
FsRtlReleaseFileForModWrite (
IN PFILE_OBJECT FileObject,
IN PERESOURCE ResourceToRelease
)
/*++
Routine Description:
This routine releases a file system resource previously acquired for
the modified page writer.
Arguments:
FileObject - Pointer to the file object being written.
ResourceToRelease - Supplies the resource to release. Not defined if
FALSE is returned.
Return Value:
None.
--*/
{
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
NTSTATUS Status = STATUS_INVALID_DEVICE_REQUEST;
PAGED_CODE();
//
// First see if we have to call the file system. Note that in the case
// of layered file systems, the layered file system might have the
// dispatch routine, but the file system on which it is layered on may
// not. In that case, the layered file system will return
// STATUS_INVALID_DEVICE_REQUEST.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
if ((FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, ReleaseForModWrite )) &&
(FastIoDispatch->ReleaseForModWrite != NULL)) {
Status = FastIoDispatch->ReleaseForModWrite( FileObject, ResourceToRelease, DeviceObject );
}
ASSERT( (Status == STATUS_SUCCESS) || (Status == STATUS_INVALID_DEVICE_REQUEST) );
if (Status == STATUS_INVALID_DEVICE_REQUEST) {
ExReleaseResource( ResourceToRelease );
}
}
NTKERNELAPI
VOID
FsRtlAcquireFileForCcFlush (
IN PFILE_OBJECT FileObject
)
/*++
Routine Description:
This routine acquires a file system resource prior to a call to CcFlush.
Arguments:
FileObject - Pointer to the file object being written.
Return Value:
None.
--*/
{
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
NTSTATUS Status = STATUS_INVALID_DEVICE_REQUEST;
PAGED_CODE();
//
// First see if we have to call the file system. Note that in the case
// of layered file systems, the layered file system might have the
// dispatch routine, but the file system on which it is layered on may
// not. In that case, the layered file system will return
// STATUS_INVALID_DEVICE_REQUEST.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
FsRtlEnterFileSystem();
if ((FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, AcquireForCcFlush )) &&
(FastIoDispatch->AcquireForCcFlush != NULL)) {
Status = FastIoDispatch->AcquireForCcFlush( FileObject, DeviceObject );
}
ASSERT( (Status == STATUS_SUCCESS) || (Status == STATUS_INVALID_DEVICE_REQUEST) );
if (Status == STATUS_INVALID_DEVICE_REQUEST) {
PFSRTL_COMMON_FCB_HEADER Header = FileObject->FsContext;
//
// If not already owned get the main resource exclusive because me may
// extend ValidDataLength. Otherwise acquire it one more time recursively.
//
if (Header->Resource != NULL) {
if (!ExIsResourceAcquiredShared(Header->Resource)) {
ExAcquireResourceExclusive( Header->Resource, TRUE );
} else {
ExAcquireResourceShared( Header->Resource, TRUE );
}
}
//
// Also get the paging I/O resource ahead of any MM resources.
//
if (Header->PagingIoResource != NULL) {
ExAcquireResourceShared( Header->PagingIoResource, TRUE );
}
}
}
NTKERNELAPI
VOID
FsRtlReleaseFileForCcFlush (
IN PFILE_OBJECT FileObject
)
/*++
Routine Description:
This routine releases a file system resource previously acquired for
the CcFlush.
Arguments:
FileObject - Pointer to the file object being written.
Return Value:
None.
--*/
{
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
NTSTATUS Status = STATUS_INVALID_DEVICE_REQUEST;
PAGED_CODE();
//
// First see if we have to call the file system. Note that in the case
// of layered file systems, the layered file system might have the
// dispatch routine, but the file system on which it is layered on may
// not. In that case, the layered file system will return
// STATUS_INVALID_DEVICE_REQUEST.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, ReleaseForCcFlush )) &&
(FastIoDispatch->ReleaseForCcFlush != NULL)) {
Status = FastIoDispatch->ReleaseForCcFlush( FileObject, DeviceObject );
}
ASSERT( (Status == STATUS_SUCCESS) || (Status == STATUS_INVALID_DEVICE_REQUEST) );
if (Status == STATUS_INVALID_DEVICE_REQUEST) {
PFSRTL_COMMON_FCB_HEADER Header = FileObject->FsContext;
//
// Free whatever we acquired.
//
if (Header->PagingIoResource != NULL) {
ExReleaseResource( Header->PagingIoResource );
}
if (Header->Resource != NULL) {
ExReleaseResource( Header->Resource );
}
}
FsRtlExitFileSystem();
}
NTKERNELAPI
VOID
FsRtlAcquireFileExclusive (
IN PFILE_OBJECT FileObject
)
/*++
Routine Description:
This routine is used by NtCreateSection to pre-acquire file system
resources in order to avoid deadlocks. If there is a FastIo entry
for AcquireFileForNtCreateSection then that routine will be called.
Otherwise, we will simply acquire the main file resource exclusive.
If there is no main resource then we acquire nothing and return
FALSE. In the cases that we acquire a resource, we also set the
TopLevelIrp field in the thread local storage to indicate to file
systems beneath us that we have acquired file system resources.
Arguments:
FileObject - Pointer to the file object being written.
Return Value:
NONE
--*/
{
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
PFSRTL_COMMON_FCB_HEADER Header;
PAGED_CODE();
//
// First see if we have to call the file system.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, AcquireFileForNtCreateSection )) &&
(FastIoDispatch->AcquireFileForNtCreateSection != NULL)) {
FsRtlEnterFileSystem();
FastIoDispatch->AcquireFileForNtCreateSection( FileObject );
return;
}
//
// If there is a main file resource, acquire that.
//
if ((Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext) &&
(Header->Resource != NULL)) {
FsRtlEnterFileSystem();
ExAcquireResourceExclusive( Header->Resource, TRUE );
return;
}
//
// Nothing to acquire.
//
return;
}
NTKERNELAPI
VOID
FsRtlReleaseFile (
IN PFILE_OBJECT FileObject
)
/*++
Routine Description:
This routine releases resources acquired by FsRtlAcquireFileExclusive.
Arguments:
FileObject - Pointer to the file object being written.
Return Value:
None.
--*/
{
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
PFSRTL_COMMON_FCB_HEADER Header;
PAGED_CODE();
//
// First see if we have to call the file system.
//
DeviceObject = IoGetBaseFileSystemDeviceObject( FileObject );
if ((FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch) &&
(FastIoDispatch->SizeOfFastIoDispatch >
FIELD_OFFSET( FAST_IO_DISPATCH, ReleaseFileForNtCreateSection )) &&
(FastIoDispatch->ReleaseFileForNtCreateSection != NULL)) {
FastIoDispatch->ReleaseFileForNtCreateSection( FileObject );
FsRtlExitFileSystem();
return;
}
//
// If there is a main file resource, release that.
//
if ((Header = (PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext) &&
(Header->Resource != NULL)) {
ExReleaseResource( Header->Resource );
FsRtlExitFileSystem();
return;
}
//
// Nothing to release.
//
return;
}
NTSTATUS
FsRtlGetFileSize(
IN PFILE_OBJECT FileObject,
IN OUT PLARGE_INTEGER FileSize
)
/*++
Routine Description:
This routine is used to call the File System to get the FileSize
for a file.
It does this without acquiring the file object lock on synchronous file
objects. This routine is therefore safe to call if you already own
file system resources, while IoQueryFileInformation could (and does)
lead to deadlocks.
Arguments:
FileObject - The file to query
FileSize - Receives the file size.
Return Value:
NTSTATUS - The final I/O status of the operation. If the FileObject
refers to a directory, STATUS_FILE_IS_A_DIRECTORY is returned.
--*/
{
IO_STATUS_BLOCK IoStatus;
PDEVICE_OBJECT DeviceObject;
PFAST_IO_DISPATCH FastIoDispatch;
FILE_STANDARD_INFORMATION FileInformation;
PAGED_CODE();
//
// Get the address of the target device object.
//
DeviceObject = IoGetRelatedDeviceObject( FileObject );
//
// Try the fast query call if it exists.
//
FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch;
if (FastIoDispatch &&
FastIoDispatch->FastIoQueryStandardInfo &&
FastIoDispatch->FastIoQueryStandardInfo( FileObject,
TRUE,
&FileInformation,
&IoStatus,
DeviceObject )) {
//
// Cool, it worked.
//
} else {
//
// Life's tough, take the long path.
//
PIRP Irp;
KEVENT Event;
NTSTATUS Status;
PIO_STACK_LOCATION IrpSp;
//
// Initialize the event.
//
KeInitializeEvent( &Event, NotificationEvent, FALSE );
//
// Allocate an I/O Request Packet (IRP) for this in-page operation.
//
Irp = IoAllocateIrp( DeviceObject->StackSize, FALSE );
if (Irp == NULL) {
return STATUS_INSUFFICIENT_RESOURCES;
}
//
// Get a pointer to the first stack location in the packet. This location
// will be used to pass the function codes and parameters to the first
// driver.
//
IrpSp = IoGetNextIrpStackLocation( Irp );
//
// Fill in the IRP according to this request, setting the flags to
// just cause IO to set the event and deallocate the Irp.
//
Irp->Flags = IRP_PAGING_IO | IRP_SYNCHRONOUS_PAGING_IO;
Irp->RequestorMode = KernelMode;
Irp->UserIosb = &IoStatus;
Irp->UserEvent = &Event;
Irp->Tail.Overlay.OriginalFileObject = FileObject;
Irp->Tail.Overlay.Thread = PsGetCurrentThread();
Irp->AssociatedIrp.SystemBuffer = &FileInformation;
//
// Fill in the normal query parameters.
//
IrpSp->MajorFunction = IRP_MJ_QUERY_INFORMATION;
IrpSp->FileObject = FileObject;
IrpSp->DeviceObject = DeviceObject;
IrpSp->Parameters.SetFile.Length = sizeof(FILE_STANDARD_INFORMATION);
IrpSp->Parameters.SetFile.FileInformationClass = FileStandardInformation;
//
// Queue the packet to the appropriate driver based. This routine
// should not raise.
//
Status = IoCallDriver( DeviceObject, Irp );
//
// If pending is returned (which is a successful status),
// we must wait for the request to complete.
//
if (Status == STATUS_PENDING) {
KeWaitForSingleObject( &Event,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER)NULL);
}
//
// If we got an error back in Status, then the Iosb
// was not written, so we will just copy the status
// there, then test the final status after that.
//
if (!NT_SUCCESS(Status)) {
IoStatus.Status = Status;
}
}
//
// If the call worked, check to make sure it wasn't a directory and
// if not, fill in the FileSize parameter.
//
if (NT_SUCCESS(IoStatus.Status)) {
if (FileInformation.Directory) {
//
// Can't get file size for a directory. Return error.
//
IoStatus.Status = STATUS_FILE_IS_A_DIRECTORY;
} else {
*FileSize = FileInformation.EndOfFile;
}
}
return IoStatus.Status;
}
NTSTATUS
FsRtlSetFileSize(
IN PFILE_OBJECT FileObject,
IN OUT PLARGE_INTEGER FileSize
)
/*++
Routine Description:
This routine is used to call the File System to update FileSize
for a file.
It does this without acquiring the file object lock on synchronous file
objects. This routine is therefore safe to call if you already own
file system resources, while IoSetInformation could (and does) lead
to deadlocks.
Arguments:
FileObject - A pointer to a referenced file object.
ValidDataLength - Pointer to new FileSize.
Return Value:
Status of operation.
--*/
{
PIO_STACK_LOCATION IrpSp;
PDEVICE_OBJECT DeviceObject;
NTSTATUS Status;
FILE_END_OF_FILE_INFORMATION Buffer;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
PIRP Irp;
PAGED_CODE();
//
// Copy FileSize to our buffer.
//
Buffer.EndOfFile = *FileSize;
//
// Initialize the event.
//
KeInitializeEvent( &Event, NotificationEvent, FALSE );
//
// Begin by getting a pointer to the device object that the file resides
// on.
//
DeviceObject = IoGetRelatedDeviceObject( FileObject );
//
// Allocate an I/O Request Packet (IRP) for this in-page operation.
//
Irp = IoAllocateIrp( DeviceObject->StackSize, FALSE );
if (Irp == NULL) {
return STATUS_INSUFFICIENT_RESOURCES;
}
//
// Get a pointer to the first stack location in the packet. This location
// will be used to pass the function codes and parameters to the first
// driver.
//
IrpSp = IoGetNextIrpStackLocation( Irp );
//
// Fill in the IRP according to this request, setting the flags to
// just cause IO to set the event and deallocate the Irp.
//
Irp->Flags = IRP_PAGING_IO | IRP_SYNCHRONOUS_PAGING_IO;
Irp->RequestorMode = KernelMode;
Irp->UserIosb = &IoStatus;
Irp->UserEvent = &Event;
Irp->Tail.Overlay.OriginalFileObject = FileObject;
Irp->Tail.Overlay.Thread = PsGetCurrentThread();
Irp->AssociatedIrp.SystemBuffer = &Buffer;
//
// Fill in the normal set file parameters.
//
IrpSp->MajorFunction = IRP_MJ_SET_INFORMATION;
IrpSp->FileObject = FileObject;
IrpSp->DeviceObject = DeviceObject;
IrpSp->Parameters.SetFile.Length = sizeof(FILE_END_OF_FILE_INFORMATION);
IrpSp->Parameters.SetFile.FileInformationClass = FileEndOfFileInformation;
//
// Queue the packet to the appropriate driver based on whether or not there
// is a VPB associated with the device. This routine should not raise.
//
Status = IoCallDriver( DeviceObject, Irp );
//
// If pending is returned (which is a successful status),
// we must wait for the request to complete.
//
if (Status == STATUS_PENDING) {
KeWaitForSingleObject( &Event,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER)NULL);
}
//
// If we got an error back in Status, then the Iosb
// was not written, so we will just copy the status
// there, then test the final status after that.
//
if (!NT_SUCCESS(Status)) {
IoStatus.Status = Status;
}
return IoStatus.Status;
}
| 29.776035 | 125 | 0.53813 | [
"object"
] |
9e287927b7f988c8c13e06914670b30b42bc2a70 | 25,484 | c | C | Code/drivers/scsi/ch.c | jonggyup/RequestOrganizer | 6bb83a6711681d91d49e6f4f405d4a761d182d97 | [
"MIT"
] | 3 | 2020-11-06T05:17:03.000Z | 2020-11-06T07:32:34.000Z | Code/drivers/scsi/ch.c | jonggyup/RequestOrganizer | 6bb83a6711681d91d49e6f4f405d4a761d182d97 | [
"MIT"
] | null | null | null | Code/drivers/scsi/ch.c | jonggyup/RequestOrganizer | 6bb83a6711681d91d49e6f4f405d4a761d182d97 | [
"MIT"
] | 1 | 2020-11-06T07:32:55.000Z | 2020-11-06T07:32:55.000Z | /*
* SCSI Media Changer device driver for Linux 2.6
*
* (c) 1996-2003 Gerd Knorr <kraxel@bytesex.org>
*
*/
#define VERSION "0.25"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/compat.h>
#include <linux/chio.h> /* here are all the ioctls */
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#define CH_DT_MAX 16
#define CH_TYPES 8
#define CH_MAX_DEVS 128
MODULE_DESCRIPTION("device driver for scsi media changer devices");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
static DEFINE_MUTEX(ch_mutex);
static int init = 1;
module_param(init, int, 0444);
MODULE_PARM_DESC(init, \
"initialize element status on driver load (default: on)");
static int timeout_move = 300;
module_param(timeout_move, int, 0644);
MODULE_PARM_DESC(timeout_move,"timeout for move commands "
"(default: 300 seconds)");
static int timeout_init = 3600;
module_param(timeout_init, int, 0644);
MODULE_PARM_DESC(timeout_init,"timeout for INITIALIZE ELEMENT STATUS "
"(default: 3600 seconds)");
static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
static int debug = 0;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
static int dt_id[CH_DT_MAX] = { [ 0 ... (CH_DT_MAX-1) ] = -1 };
static int dt_lun[CH_DT_MAX];
module_param_array(dt_id, int, NULL, 0444);
module_param_array(dt_lun, int, NULL, 0444);
/* tell the driver about vendor-specific slots */
static int vendor_firsts[CH_TYPES-4];
static int vendor_counts[CH_TYPES-4];
module_param_array(vendor_firsts, int, NULL, 0444);
module_param_array(vendor_counts, int, NULL, 0444);
static const char * vendor_labels[CH_TYPES-4] = {
"v0", "v1", "v2", "v3"
};
// module_param_string_array(vendor_labels, NULL, 0444);
#define ch_printk(prefix, ch, fmt, a...) \
sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a)
#define DPRINTK(fmt, arg...) \
do { \
if (debug) \
ch_printk(KERN_DEBUG, ch, fmt, ##arg); \
} while (0)
#define VPRINTK(level, fmt, arg...) \
do { \
if (verbose) \
ch_printk(level, ch, fmt, ##arg); \
} while (0)
/* ------------------------------------------------------------------- */
#define MAX_RETRIES 1
static struct class * ch_sysfs_class;
typedef struct {
struct kref ref;
struct list_head list;
int minor;
char name[8];
struct scsi_device *device;
struct scsi_device **dt; /* ptrs to data transfer elements */
u_int firsts[CH_TYPES];
u_int counts[CH_TYPES];
u_int unit_attention;
u_int voltags;
struct mutex lock;
} scsi_changer;
static DEFINE_IDR(ch_index_idr);
static DEFINE_SPINLOCK(ch_index_lock);
static const struct {
unsigned char sense;
unsigned char asc;
unsigned char ascq;
int errno;
} ch_err[] = {
/* Just filled in what looks right. Hav'nt checked any standard paper for
these errno assignments, so they may be wrong... */
{
.sense = ILLEGAL_REQUEST,
.asc = 0x21,
.ascq = 0x01,
.errno = EBADSLT, /* Invalid element address */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x28,
.ascq = 0x01,
.errno = EBADE, /* Import or export element accessed */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x3B,
.ascq = 0x0D,
.errno = EXFULL, /* Medium destination element full */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x3B,
.ascq = 0x0E,
.errno = EBADE, /* Medium source element empty */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x20,
.ascq = 0x00,
.errno = EBADRQC, /* Invalid command operation code */
},{
/* end of list */
}
};
/* ------------------------------------------------------------------- */
static int ch_find_errno(struct scsi_sense_hdr *sshdr)
{
int i,errno = 0;
/* Check to see if additional sense information is available */
if (scsi_sense_valid(sshdr) &&
sshdr->asc != 0) {
for (i = 0; ch_err[i].errno != 0; i++) {
if (ch_err[i].sense == sshdr->sense_key &&
ch_err[i].asc == sshdr->asc &&
ch_err[i].ascq == sshdr->ascq) {
errno = -ch_err[i].errno;
break;
}
}
}
if (errno == 0)
errno = -EIO;
return errno;
}
static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
void *buffer, unsigned buflength,
enum dma_data_direction direction)
{
int errno, retries = 0, timeout, result;
struct scsi_sense_hdr sshdr;
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
retry:
errno = 0;
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
if (driver_byte(result) == DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
switch(sshdr.sense_key) {
case UNIT_ATTENTION:
ch->unit_attention = 1;
if (retries++ < 3)
goto retry;
break;
}
}
return errno;
}
/* ------------------------------------------------------------------------ */
static int
ch_elem_to_typecode(scsi_changer *ch, u_int elem)
{
int i;
for (i = 0; i < CH_TYPES; i++) {
if (elem >= ch->firsts[i] &&
elem < ch->firsts[i] +
ch->counts[i])
return i+1;
}
return 0;
}
static int
ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
{
u_char cmd[12];
u_char *buffer;
int result;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if(!buffer)
return -ENOMEM;
retry:
memset(cmd,0,sizeof(cmd));
cmd[0] = READ_ELEMENT_STATUS;
cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
cmd[5] = 1;
cmd[9] = 255;
if (0 == (result = ch_do_scsi(ch, cmd, 12,
buffer, 256, DMA_FROM_DEVICE))) {
if (((buffer[16] << 8) | buffer[17]) != elem) {
DPRINTK("asked for element 0x%02x, got 0x%02x\n",
elem,(buffer[16] << 8) | buffer[17]);
kfree(buffer);
return -EIO;
}
memcpy(data,buffer+16,16);
} else {
if (ch->voltags) {
ch->voltags = 0;
VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto retry;
}
DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem);
}
kfree(buffer);
return result;
}
static int
ch_init_elem(scsi_changer *ch)
{
int err;
u_char cmd[6];
VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n");
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
cmd[1] = (ch->device->lun & 0x7) << 5;
err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE);
VPRINTK(KERN_INFO, "... finished\n");
return err;
}
static int
ch_readconfig(scsi_changer *ch)
{
u_char cmd[10], data[16];
u_char *buffer;
int result,id,lun,i;
u_int elem;
buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
memset(cmd,0,sizeof(cmd));
cmd[0] = MODE_SENSE;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = 0x1d;
cmd[4] = 255;
result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
if (0 != result) {
cmd[1] |= (1<<3);
result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
}
if (0 == result) {
ch->firsts[CHET_MT] =
(buffer[buffer[3]+ 6] << 8) | buffer[buffer[3]+ 7];
ch->counts[CHET_MT] =
(buffer[buffer[3]+ 8] << 8) | buffer[buffer[3]+ 9];
ch->firsts[CHET_ST] =
(buffer[buffer[3]+10] << 8) | buffer[buffer[3]+11];
ch->counts[CHET_ST] =
(buffer[buffer[3]+12] << 8) | buffer[buffer[3]+13];
ch->firsts[CHET_IE] =
(buffer[buffer[3]+14] << 8) | buffer[buffer[3]+15];
ch->counts[CHET_IE] =
(buffer[buffer[3]+16] << 8) | buffer[buffer[3]+17];
ch->firsts[CHET_DT] =
(buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19];
ch->counts[CHET_DT] =
(buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21];
VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n",
ch->firsts[CHET_MT],
ch->counts[CHET_MT]);
VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n",
ch->firsts[CHET_ST],
ch->counts[CHET_ST]);
VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n",
ch->firsts[CHET_IE],
ch->counts[CHET_IE]);
VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n",
ch->firsts[CHET_DT],
ch->counts[CHET_DT]);
} else {
VPRINTK(KERN_INFO, "reading element address assignment page failed!\n");
}
/* vendor specific element types */
for (i = 0; i < 4; i++) {
if (0 == vendor_counts[i])
continue;
if (NULL == vendor_labels[i])
continue;
ch->firsts[CHET_V1+i] = vendor_firsts[i];
ch->counts[CHET_V1+i] = vendor_counts[i];
VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n",
i+5,i+1,vendor_firsts[i],vendor_counts[i],
vendor_labels[i]);
}
/* look up the devices of the data transfer elements */
ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt),
GFP_KERNEL);
if (!ch->dt) {
kfree(buffer);
return -ENOMEM;
}
for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
id = -1;
lun = 0;
if (elem < CH_DT_MAX && -1 != dt_id[elem]) {
id = dt_id[elem];
lun = dt_lun[elem];
VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ",
elem+ch->firsts[CHET_DT]);
} else if (0 != ch_read_element_status
(ch,elem+ch->firsts[CHET_DT],data)) {
VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n",
elem+ch->firsts[CHET_DT]);
} else {
VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]);
if (data[6] & 0x80) {
VPRINTK(KERN_CONT, "not this SCSI bus\n");
ch->dt[elem] = NULL;
} else if (0 == (data[6] & 0x30)) {
VPRINTK(KERN_CONT, "ID/LUN unknown\n");
ch->dt[elem] = NULL;
} else {
id = ch->device->id;
lun = 0;
if (data[6] & 0x20) id = data[7];
if (data[6] & 0x10) lun = data[6] & 7;
}
}
if (-1 != id) {
VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun);
ch->dt[elem] =
scsi_device_lookup(ch->device->host,
ch->device->channel,
id,lun);
if (!ch->dt[elem]) {
/* should not happen */
VPRINTK(KERN_CONT, "Huh? device not found!\n");
} else {
VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n",
ch->dt[elem]->vendor,
ch->dt[elem]->model,
ch->dt[elem]->rev);
}
}
}
ch->voltags = 1;
kfree(buffer);
return 0;
}
/* ------------------------------------------------------------------------ */
static int
ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
{
u_char cmd[10];
DPRINTK("position: 0x%x\n",elem);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = POSITION_TO_ELEMENT;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (elem >> 8) & 0xff;
cmd[5] = elem & 0xff;
cmd[8] = rotate ? 1 : 0;
return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE);
}
static int
ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
{
u_char cmd[12];
DPRINTK("move: 0x%x => 0x%x\n",src,dest);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = MOVE_MEDIUM;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
cmd[5] = src & 0xff;
cmd[6] = (dest >> 8) & 0xff;
cmd[7] = dest & 0xff;
cmd[10] = rotate ? 1 : 0;
return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE);
}
static int
ch_exchange(scsi_changer *ch, u_int trans, u_int src,
u_int dest1, u_int dest2, int rotate1, int rotate2)
{
u_char cmd[12];
DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n",
src,dest1,dest2);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = EXCHANGE_MEDIUM;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
cmd[5] = src & 0xff;
cmd[6] = (dest1 >> 8) & 0xff;
cmd[7] = dest1 & 0xff;
cmd[8] = (dest2 >> 8) & 0xff;
cmd[9] = dest2 & 0xff;
cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE);
}
static void
ch_check_voltag(char *tag)
{
int i;
for (i = 0; i < 32; i++) {
/* restrict to ascii */
if (tag[i] >= 0x7f || tag[i] < 0x20)
tag[i] = ' ';
/* don't allow search wildcards */
if (tag[i] == '?' ||
tag[i] == '*')
tag[i] = ' ';
}
}
static int
ch_set_voltag(scsi_changer *ch, u_int elem,
int alternate, int clear, u_char *tag)
{
u_char cmd[12];
u_char *buffer;
int result;
buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
DPRINTK("%s %s voltag: 0x%x => \"%s\"\n",
clear ? "clear" : "set",
alternate ? "alternate" : "primary",
elem, tag);
memset(cmd,0,sizeof(cmd));
cmd[0] = SEND_VOLUME_TAG;
cmd[1] = ((ch->device->lun & 0x7) << 5) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
cmd[5] = clear
? (alternate ? 0x0d : 0x0c)
: (alternate ? 0x0b : 0x0a);
cmd[9] = 255;
memcpy(buffer,tag,32);
ch_check_voltag(buffer);
result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE);
kfree(buffer);
return result;
}
static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
{
int retval = 0;
u_char data[16];
unsigned int i;
mutex_lock(&ch->lock);
for (i = 0; i < ch->counts[type]; i++) {
if (0 != ch_read_element_status
(ch, ch->firsts[type]+i,data)) {
retval = -EIO;
break;
}
put_user(data[2], dest+i);
if (data[2] & CESTATUS_EXCEPT)
VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n",
ch->firsts[type]+i,
(int)data[4],(int)data[5]);
retval = ch_read_element_status
(ch, ch->firsts[type]+i,data);
if (0 != retval)
break;
}
mutex_unlock(&ch->lock);
return retval;
}
/* ------------------------------------------------------------------------ */
static void ch_destroy(struct kref *ref)
{
scsi_changer *ch = container_of(ref, scsi_changer, ref);
kfree(ch->dt);
kfree(ch);
}
static int
ch_release(struct inode *inode, struct file *file)
{
scsi_changer *ch = file->private_data;
scsi_device_put(ch->device);
file->private_data = NULL;
kref_put(&ch->ref, ch_destroy);
return 0;
}
static int
ch_open(struct inode *inode, struct file *file)
{
scsi_changer *ch;
int minor = iminor(inode);
mutex_lock(&ch_mutex);
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
if (NULL == ch || scsi_device_get(ch->device)) {
spin_unlock(&ch_index_lock);
mutex_unlock(&ch_mutex);
return -ENXIO;
}
kref_get(&ch->ref);
spin_unlock(&ch_index_lock);
file->private_data = ch;
mutex_unlock(&ch_mutex);
return 0;
}
static int
ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
{
if (type >= CH_TYPES || unit >= ch->counts[type])
return -1;
return 0;
}
static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
int retval;
void __user *argp = (void __user *)arg;
retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
file->f_flags & O_NDELAY);
if (retval)
return retval;
switch (cmd) {
case CHIOGPARAMS:
{
struct changer_params params;
params.cp_curpicker = 0;
params.cp_npickers = ch->counts[CHET_MT];
params.cp_nslots = ch->counts[CHET_ST];
params.cp_nportals = ch->counts[CHET_IE];
params.cp_ndrives = ch->counts[CHET_DT];
if (copy_to_user(argp, ¶ms, sizeof(params)))
return -EFAULT;
return 0;
}
case CHIOGVPARAMS:
{
struct changer_vendor_params vparams;
memset(&vparams,0,sizeof(vparams));
if (ch->counts[CHET_V1]) {
vparams.cvp_n1 = ch->counts[CHET_V1];
strncpy(vparams.cvp_label1,vendor_labels[0],16);
}
if (ch->counts[CHET_V2]) {
vparams.cvp_n2 = ch->counts[CHET_V2];
strncpy(vparams.cvp_label2,vendor_labels[1],16);
}
if (ch->counts[CHET_V3]) {
vparams.cvp_n3 = ch->counts[CHET_V3];
strncpy(vparams.cvp_label3,vendor_labels[2],16);
}
if (ch->counts[CHET_V4]) {
vparams.cvp_n4 = ch->counts[CHET_V4];
strncpy(vparams.cvp_label4,vendor_labels[3],16);
}
if (copy_to_user(argp, &vparams, sizeof(vparams)))
return -EFAULT;
return 0;
}
case CHIOPOSITION:
{
struct changer_position pos;
if (copy_from_user(&pos, argp, sizeof (pos)))
return -EFAULT;
if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
DPRINTK("CHIOPOSITION: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_position(ch,0,
ch->firsts[pos.cp_type] + pos.cp_unit,
pos.cp_flags & CP_INVERT);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOMOVE:
{
struct changer_move mv;
if (copy_from_user(&mv, argp, sizeof (mv)))
return -EFAULT;
if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) {
DPRINTK("CHIOMOVE: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_move(ch,0,
ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
ch->firsts[mv.cm_totype] + mv.cm_tounit,
mv.cm_flags & CM_INVERT);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOEXCHANGE:
{
struct changer_exchange mv;
if (copy_from_user(&mv, argp, sizeof (mv)))
return -EFAULT;
if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) ||
0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) ||
0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) {
DPRINTK("CHIOEXCHANGE: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_exchange
(ch,0,
ch->firsts[mv.ce_srctype] + mv.ce_srcunit,
ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit,
ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit,
mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOGSTATUS:
{
struct changer_element_status ces;
if (copy_from_user(&ces, argp, sizeof (ces)))
return -EFAULT;
if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
return -EINVAL;
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
}
case CHIOGELEM:
{
struct changer_get_element cge;
u_char ch_cmd[12];
u_char *buffer;
unsigned int elem;
int result,i;
if (copy_from_user(&cge, argp, sizeof (cge)))
return -EFAULT;
if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
voltag_retry:
memset(ch_cmd, 0, sizeof(ch_cmd));
ch_cmd[0] = READ_ELEMENT_STATUS;
ch_cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
ch_cmd[2] = (elem >> 8) & 0xff;
ch_cmd[3] = elem & 0xff;
ch_cmd[5] = 1;
ch_cmd[9] = 255;
result = ch_do_scsi(ch, ch_cmd, 12,
buffer, 256, DMA_FROM_DEVICE);
if (!result) {
cge.cge_status = buffer[18];
cge.cge_flags = 0;
if (buffer[18] & CESTATUS_EXCEPT) {
cge.cge_errno = EIO;
}
if (buffer[25] & 0x80) {
cge.cge_flags |= CGE_SRC;
if (buffer[25] & 0x40)
cge.cge_flags |= CGE_INVERT;
elem = (buffer[26]<<8) | buffer[27];
for (i = 0; i < 4; i++) {
if (elem >= ch->firsts[i] &&
elem < ch->firsts[i] + ch->counts[i]) {
cge.cge_srctype = i;
cge.cge_srcunit = elem-ch->firsts[i];
}
}
}
if ((buffer[22] & 0x30) == 0x30) {
cge.cge_flags |= CGE_IDLUN;
cge.cge_id = buffer[23];
cge.cge_lun = buffer[22] & 7;
}
if (buffer[9] & 0x80) {
cge.cge_flags |= CGE_PVOLTAG;
memcpy(cge.cge_pvoltag,buffer+28,36);
}
if (buffer[9] & 0x40) {
cge.cge_flags |= CGE_AVOLTAG;
memcpy(cge.cge_avoltag,buffer+64,36);
}
} else if (ch->voltags) {
ch->voltags = 0;
VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto voltag_retry;
}
kfree(buffer);
mutex_unlock(&ch->lock);
if (copy_to_user(argp, &cge, sizeof (cge)))
return -EFAULT;
return result;
}
case CHIOINITELEM:
{
mutex_lock(&ch->lock);
retval = ch_init_elem(ch);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOSVOLTAG:
{
struct changer_set_voltag csv;
int elem;
if (copy_from_user(&csv, argp, sizeof(csv)))
return -EFAULT;
if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
DPRINTK("CHIOSVOLTAG: invalid parameter\n");
return -EBADSLT;
}
elem = ch->firsts[csv.csv_type] + csv.csv_unit;
mutex_lock(&ch->lock);
retval = ch_set_voltag(ch, elem,
csv.csv_flags & CSV_AVOLTAG,
csv.csv_flags & CSV_CLEARTAG,
csv.csv_voltag);
mutex_unlock(&ch->lock);
return retval;
}
default:
return scsi_ioctl(ch->device, cmd, argp);
}
}
#ifdef CONFIG_COMPAT
struct changer_element_status32 {
int ces_type;
compat_uptr_t ces_data;
};
#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
static long ch_ioctl_compat(struct file * file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
switch (cmd) {
case CHIOGPARAMS:
case CHIOGVPARAMS:
case CHIOPOSITION:
case CHIOMOVE:
case CHIOEXCHANGE:
case CHIOGELEM:
case CHIOINITELEM:
case CHIOSVOLTAG:
/* compatible */
return ch_ioctl(file, cmd, arg);
case CHIOGSTATUS32:
{
struct changer_element_status32 ces32;
unsigned char __user *data;
if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
return -EFAULT;
if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
return -EINVAL;
data = compat_ptr(ces32.ces_data);
return ch_gstatus(ch, ces32.ces_type, data);
}
default:
// return scsi_ioctl_compat(ch->device, cmd, (void*)arg);
return -ENOIOCTLCMD;
}
}
#endif
/* ------------------------------------------------------------------------ */
static int ch_probe(struct device *dev)
{
struct scsi_device *sd = to_scsi_device(dev);
struct device *class_dev;
int ret;
scsi_changer *ch;
if (sd->type != TYPE_MEDIUM_CHANGER)
return -ENODEV;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (NULL == ch)
return -ENOMEM;
idr_preload(GFP_KERNEL);
spin_lock(&ch_index_lock);
ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
spin_unlock(&ch_index_lock);
idr_preload_end();
if (ret < 0) {
if (ret == -ENOSPC)
ret = -ENODEV;
goto free_ch;
}
ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
class_dev = device_create(ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
ch->minor);
ret = PTR_ERR(class_dev);
goto remove_idr;
}
mutex_init(&ch->lock);
kref_init(&ch->ref);
ch->device = sd;
ret = ch_readconfig(ch);
if (ret)
goto destroy_dev;
if (init)
ch_init_elem(ch);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
destroy_dev:
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
remove_idr:
idr_remove(&ch_index_idr, ch->minor);
free_ch:
kfree(ch);
return ret;
}
static int ch_remove(struct device *dev)
{
scsi_changer *ch = dev_get_drvdata(dev);
spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
spin_unlock(&ch_index_lock);
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
kref_put(&ch->ref, ch_destroy);
return 0;
}
static struct scsi_driver ch_template = {
.gendrv = {
.name = "ch",
.owner = THIS_MODULE,
.probe = ch_probe,
.remove = ch_remove,
},
};
static const struct file_operations changer_fops = {
.owner = THIS_MODULE,
.open = ch_open,
.release = ch_release,
.unlocked_ioctl = ch_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ch_ioctl_compat,
#endif
.llseek = noop_llseek,
};
static int __init init_ch_module(void)
{
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer");
if (IS_ERR(ch_sysfs_class)) {
rc = PTR_ERR(ch_sysfs_class);
return rc;
}
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
if (rc < 0) {
printk("Unable to get major %d for SCSI-Changer\n",
SCSI_CHANGER_MAJOR);
goto fail1;
}
rc = scsi_register_driver(&ch_template.gendrv);
if (rc < 0)
goto fail2;
return 0;
fail2:
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
fail1:
class_destroy(ch_sysfs_class);
return rc;
}
static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
class_destroy(ch_sysfs_class);
idr_destroy(&ch_index_idr);
}
module_init(init_ch_module);
module_exit(exit_ch_module);
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| 24.340019 | 78 | 0.627806 | [
"model"
] |
9e2b9f6de7a82093a2da15c0636fa0e10cbec7ee | 181,132 | c | C | sys/fs/udf/udf_subr.c | calmsacibis995/minix | dfba95598f553b6560131d35a76658f1f8c9cf38 | [
"Unlicense"
] | null | null | null | sys/fs/udf/udf_subr.c | calmsacibis995/minix | dfba95598f553b6560131d35a76658f1f8c9cf38 | [
"Unlicense"
] | null | null | null | sys/fs/udf/udf_subr.c | calmsacibis995/minix | dfba95598f553b6560131d35a76658f1f8c9cf38 | [
"Unlicense"
] | null | null | null | /* $NetBSD: udf_subr.c,v 1.132 2015/08/24 08:31:56 hannken Exp $ */
/*
* Copyright (c) 2006, 2008 Reinoud Zandijk
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
#ifndef lint
__KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.132 2015/08/24 08:31:56 hannken Exp $");
#endif /* not lint */
#if defined(_KERNEL_OPT)
#include "opt_compat_netbsd.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <miscfs/genfs/genfs_node.h>
#include <sys/mount.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/device.h>
#include <sys/disklabel.h>
#include <sys/ioctl.h>
#include <sys/malloc.h>
#include <sys/dirent.h>
#include <sys/stat.h>
#include <sys/conf.h>
#include <sys/kauth.h>
#include <fs/unicode.h>
#include <dev/clock_subr.h>
#include <fs/udf/ecma167-udf.h>
#include <fs/udf/udf_mount.h>
#include <sys/dirhash.h>
#include "udf.h"
#include "udf_subr.h"
#include "udf_bswap.h"
#define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
#define UDF_SET_SYSTEMFILE(vp) \
/* XXXAD Is the vnode locked? */ \
(vp)->v_vflag |= VV_SYSTEM; \
vref((vp)); \
vput((vp)); \
extern int syncer_maxdelay; /* maximum delay time */
extern int (**udf_vnodeop_p)(void *);
/* --------------------------------------------------------------------- */
//#ifdef DEBUG
#if 1
#if 0
static void
udf_dumpblob(boid *blob, uint32_t dlen)
{
int i, j;
printf("blob = %p\n", blob);
printf("dump of %d bytes\n", dlen);
for (i = 0; i < dlen; i+ = 16) {
printf("%04x ", i);
for (j = 0; j < 16; j++) {
if (i+j < dlen) {
printf("%02x ", blob[i+j]);
} else {
printf(" ");
}
}
for (j = 0; j < 16; j++) {
if (i+j < dlen) {
if (blob[i+j]>32 && blob[i+j]! = 127) {
printf("%c", blob[i+j]);
} else {
printf(".");
}
}
}
printf("\n");
}
printf("\n");
Debugger();
}
#endif
static void
udf_dump_discinfo(struct udf_mount *ump)
{
char bits[128];
struct mmc_discinfo *di = &ump->discinfo;
if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
return;
printf("Device/media info :\n");
printf("\tMMC profile 0x%02x\n", di->mmc_profile);
printf("\tderived class %d\n", di->mmc_class);
printf("\tsector size %d\n", di->sector_size);
printf("\tdisc state %d\n", di->disc_state);
printf("\tlast ses state %d\n", di->last_session_state);
printf("\tbg format state %d\n", di->bg_format_state);
printf("\tfrst track %d\n", di->first_track);
printf("\tfst on last ses %d\n", di->first_track_last_session);
printf("\tlst on last ses %d\n", di->last_track_last_session);
printf("\tlink block penalty %d\n", di->link_block_penalty);
snprintb(bits, sizeof(bits), MMC_DFLAGS_FLAGBITS, di->disc_flags);
printf("\tdisc flags %s\n", bits);
printf("\tdisc id %x\n", di->disc_id);
printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
printf("\tnum sessions %d\n", di->num_sessions);
printf("\tnum tracks %d\n", di->num_tracks);
snprintb(bits, sizeof(bits), MMC_CAP_FLAGBITS, di->mmc_cur);
printf("\tcapabilities cur %s\n", bits);
snprintb(bits, sizeof(bits), MMC_CAP_FLAGBITS, di->mmc_cap);
printf("\tcapabilities cap %s\n", bits);
}
static void
udf_dump_trackinfo(struct mmc_trackinfo *trackinfo)
{
char bits[128];
if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
return;
printf("Trackinfo for track %d:\n", trackinfo->tracknr);
printf("\tsessionnr %d\n", trackinfo->sessionnr);
printf("\ttrack mode %d\n", trackinfo->track_mode);
printf("\tdata mode %d\n", trackinfo->data_mode);
snprintb(bits, sizeof(bits), MMC_TRACKINFO_FLAGBITS, trackinfo->flags);
printf("\tflags %s\n", bits);
printf("\ttrack start %d\n", trackinfo->track_start);
printf("\tnext_writable %d\n", trackinfo->next_writable);
printf("\tfree_blocks %d\n", trackinfo->free_blocks);
printf("\tpacket_size %d\n", trackinfo->packet_size);
printf("\ttrack size %d\n", trackinfo->track_size);
printf("\tlast recorded block %d\n", trackinfo->last_recorded);
}
#else
#define udf_dump_discinfo(a);
#define udf_dump_trackinfo(a);
#endif
/* --------------------------------------------------------------------- */
/* not called often */
int
udf_update_discinfo(struct udf_mount *ump)
{
struct vnode *devvp = ump->devvp;
uint64_t psize;
unsigned secsize;
struct mmc_discinfo *di;
int error;
DPRINTF(VOLUMES, ("read/update disc info\n"));
di = &ump->discinfo;
memset(di, 0, sizeof(struct mmc_discinfo));
/* check if we're on a MMC capable device, i.e. CD/DVD */
error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED);
if (error == 0) {
udf_dump_discinfo(ump);
return 0;
}
/* disc partition support */
error = getdisksize(devvp, &psize, &secsize);
if (error)
return error;
/* set up a disc info profile for partitions */
di->mmc_profile = 0x01; /* disc type */
di->mmc_class = MMC_CLASS_DISC;
di->disc_state = MMC_STATE_CLOSED;
di->last_session_state = MMC_STATE_CLOSED;
di->bg_format_state = MMC_BGFSTATE_COMPLETED;
di->link_block_penalty = 0;
di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
di->mmc_cap = di->mmc_cur;
di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
/* TODO problem with last_possible_lba on resizable VND; request */
di->last_possible_lba = psize;
di->sector_size = secsize;
di->num_sessions = 1;
di->num_tracks = 1;
di->first_track = 1;
di->first_track_last_session = di->last_track_last_session = 1;
udf_dump_discinfo(ump);
return 0;
}
int
udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
{
struct vnode *devvp = ump->devvp;
struct mmc_discinfo *di = &ump->discinfo;
int error, class;
DPRINTF(VOLUMES, ("read track info\n"));
class = di->mmc_class;
if (class != MMC_CLASS_DISC) {
/* tracknr specified in struct ti */
error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED);
return error;
}
/* disc partition support */
if (ti->tracknr != 1)
return EIO;
/* create fake ti (TODO check for resized vnds) */
ti->sessionnr = 1;
ti->track_mode = 0; /* XXX */
ti->data_mode = 0; /* XXX */
ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
ti->track_start = 0;
ti->packet_size = 1;
/* TODO support for resizable vnd */
ti->track_size = di->last_possible_lba;
ti->next_writable = di->last_possible_lba;
ti->last_recorded = ti->next_writable;
ti->free_blocks = 0;
return 0;
}
int
udf_setup_writeparams(struct udf_mount *ump)
{
struct mmc_writeparams mmc_writeparams;
int error;
if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
return 0;
/*
* only CD burning normally needs setting up, but other disc types
* might need other settings to be made. The MMC framework will set up
* the nessisary recording parameters according to the disc
* characteristics read in. Modifications can be made in the discinfo
* structure passed to change the nature of the disc.
*/
memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
/*
* UDF dictates first track to determine track mode for the whole
* disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
* To prevent problems with a `reserved' track in front we start with
* the 2nd track and if that is not valid, go for the 1st.
*/
mmc_writeparams.tracknr = 2;
mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
FKIOCTL, NOCRED);
if (error) {
mmc_writeparams.tracknr = 1;
error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
&mmc_writeparams, FKIOCTL, NOCRED);
}
return error;
}
int
udf_synchronise_caches(struct udf_mount *ump)
{
struct mmc_op mmc_op;
DPRINTF(CALL, ("udf_synchronise_caches()\n"));
if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
return 0;
/* discs are done now */
if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
return 0;
memset(&mmc_op, 0, sizeof(struct mmc_op));
mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
/* ignore return code */
(void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
return 0;
}
/* --------------------------------------------------------------------- */
/* track/session searching for mounting */
int
udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
int *first_tracknr, int *last_tracknr)
{
struct mmc_trackinfo trackinfo;
uint32_t tracknr, start_track, num_tracks;
int error;
/* if negative, sessionnr is relative to last session */
if (args->sessionnr < 0) {
args->sessionnr += ump->discinfo.num_sessions;
}
/* sanity */
if (args->sessionnr < 0)
args->sessionnr = 0;
if (args->sessionnr > ump->discinfo.num_sessions)
args->sessionnr = ump->discinfo.num_sessions;
/* search the tracks for this session, zero session nr indicates last */
if (args->sessionnr == 0)
args->sessionnr = ump->discinfo.num_sessions;
if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
args->sessionnr--;
/* sanity again */
if (args->sessionnr < 0)
args->sessionnr = 0;
/* search the first and last track of the specified session */
num_tracks = ump->discinfo.num_tracks;
start_track = ump->discinfo.first_track;
/* search for first track of this session */
for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
/* get track info */
trackinfo.tracknr = tracknr;
error = udf_update_trackinfo(ump, &trackinfo);
if (error)
return error;
if (trackinfo.sessionnr == args->sessionnr)
break;
}
*first_tracknr = tracknr;
/* search for last track of this session */
for (;tracknr <= num_tracks; tracknr++) {
/* get track info */
trackinfo.tracknr = tracknr;
error = udf_update_trackinfo(ump, &trackinfo);
if (error || (trackinfo.sessionnr != args->sessionnr)) {
tracknr--;
break;
}
}
if (tracknr > num_tracks)
tracknr--;
*last_tracknr = tracknr;
if (*last_tracknr < *first_tracknr) {
printf( "udf_search_tracks: sanity check on drive+disc failed, "
"drive returned garbage\n");
return EINVAL;
}
assert(*last_tracknr >= *first_tracknr);
return 0;
}
/*
* NOTE: this is the only routine in this file that directly peeks into the
* metadata file but since its at a larval state of the mount it can't hurt.
*
* XXX candidate for udf_allocation.c
* XXX clean me up!, change to new node reading code.
*/
static void
udf_check_track_metadata_overlap(struct udf_mount *ump,
struct mmc_trackinfo *trackinfo)
{
struct part_desc *part;
struct file_entry *fe;
struct extfile_entry *efe;
struct short_ad *s_ad;
struct long_ad *l_ad;
uint32_t track_start, track_end;
uint32_t phys_part_start, phys_part_end, part_start, part_end;
uint32_t sector_size, len, alloclen, plb_num;
uint8_t *pos;
int addr_type, icblen, icbflags;
/* get our track extents */
track_start = trackinfo->track_start;
track_end = track_start + trackinfo->track_size;
/* get our base partition extent */
KASSERT(ump->node_part == ump->fids_part);
part = ump->partitions[ump->vtop[ump->node_part]];
phys_part_start = udf_rw32(part->start_loc);
phys_part_end = phys_part_start + udf_rw32(part->part_len);
/* no use if its outside the physical partition */
if ((phys_part_start >= track_end) || (phys_part_end < track_start))
return;
/*
* now follow all extents in the fe/efe to see if they refer to this
* track
*/
sector_size = ump->discinfo.sector_size;
/* XXX should we claim exclusive access to the metafile ? */
/* TODO: move to new node read code */
fe = ump->metadata_node->fe;
efe = ump->metadata_node->efe;
if (fe) {
alloclen = udf_rw32(fe->l_ad);
pos = &fe->data[0] + udf_rw32(fe->l_ea);
icbflags = udf_rw16(fe->icbtag.flags);
} else {
assert(efe);
alloclen = udf_rw32(efe->l_ad);
pos = &efe->data[0] + udf_rw32(efe->l_ea);
icbflags = udf_rw16(efe->icbtag.flags);
}
addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
while (alloclen) {
if (addr_type == UDF_ICB_SHORT_ALLOC) {
icblen = sizeof(struct short_ad);
s_ad = (struct short_ad *) pos;
len = udf_rw32(s_ad->len);
plb_num = udf_rw32(s_ad->lb_num);
} else {
/* should not be present, but why not */
icblen = sizeof(struct long_ad);
l_ad = (struct long_ad *) pos;
len = udf_rw32(l_ad->len);
plb_num = udf_rw32(l_ad->loc.lb_num);
/* pvpart_num = udf_rw16(l_ad->loc.part_num); */
}
/* process extent */
len = UDF_EXT_LEN(len);
part_start = phys_part_start + plb_num;
part_end = part_start + (len / sector_size);
if ((part_start >= track_start) && (part_end <= track_end)) {
/* extent is enclosed within this track */
ump->metadata_track = *trackinfo;
return;
}
pos += icblen;
alloclen -= icblen;
}
}
int
udf_search_writing_tracks(struct udf_mount *ump)
{
struct vnode *devvp = ump->devvp;
struct mmc_trackinfo trackinfo;
struct mmc_op mmc_op;
struct part_desc *part;
uint32_t tracknr, start_track, num_tracks;
uint32_t track_start, track_end, part_start, part_end;
int node_alloc, error;
/*
* in the CD/(HD)DVD/BD recordable device model a few tracks within
* the last session might be open but in the UDF device model at most
* three tracks can be open: a reserved track for delayed ISO VRS
* writing, a data track and a metadata track. We search here for the
* data track and the metadata track. Note that the reserved track is
* troublesome but can be detected by its small size of < 512 sectors.
*/
/* update discinfo since it might have changed */
error = udf_update_discinfo(ump);
if (error)
return error;
num_tracks = ump->discinfo.num_tracks;
start_track = ump->discinfo.first_track;
/* fetch info on first and possibly only track */
trackinfo.tracknr = start_track;
error = udf_update_trackinfo(ump, &trackinfo);
if (error)
return error;
/* copy results to our mount point */
ump->data_track = trackinfo;
ump->metadata_track = trackinfo;
/* if not sequential, we're done */
if (num_tracks == 1)
return 0;
for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
/* get track info */
trackinfo.tracknr = tracknr;
error = udf_update_trackinfo(ump, &trackinfo);
if (error)
return error;
/*
* If this track is marked damaged, ask for repair. This is an
* optional command, so ignore its error but report warning.
*/
if (trackinfo.flags & MMC_TRACKINFO_DAMAGED) {
memset(&mmc_op, 0, sizeof(mmc_op));
mmc_op.operation = MMC_OP_REPAIRTRACK;
mmc_op.mmc_profile = ump->discinfo.mmc_profile;
mmc_op.tracknr = tracknr;
error = VOP_IOCTL(devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
if (error)
(void)printf("Drive can't explicitly repair "
"damaged track %d, but it might "
"autorepair\n", tracknr);
/* reget track info */
error = udf_update_trackinfo(ump, &trackinfo);
if (error)
return error;
}
if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
continue;
track_start = trackinfo.track_start;
track_end = track_start + trackinfo.track_size;
/* check for overlap on data partition */
part = ump->partitions[ump->data_part];
part_start = udf_rw32(part->start_loc);
part_end = part_start + udf_rw32(part->part_len);
if ((part_start < track_end) && (part_end > track_start)) {
ump->data_track = trackinfo;
/* TODO check if UDF partition data_part is writable */
}
/* check for overlap on metadata partition */
node_alloc = ump->vtop_alloc[ump->node_part];
if ((node_alloc == UDF_ALLOC_METASEQUENTIAL) ||
(node_alloc == UDF_ALLOC_METABITMAP)) {
udf_check_track_metadata_overlap(ump, &trackinfo);
} else {
ump->metadata_track = trackinfo;
}
}
if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
return EROFS;
if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
return EROFS;
return 0;
}
/* --------------------------------------------------------------------- */
/*
* Check if the blob starts with a good UDF tag. Tags are protected by a
* checksum over the reader except one byte at position 4 that is the checksum
* itself.
*/
int
udf_check_tag(void *blob)
{
struct desc_tag *tag = blob;
uint8_t *pos, sum, cnt;
/* check TAG header checksum */
pos = (uint8_t *) tag;
sum = 0;
for(cnt = 0; cnt < 16; cnt++) {
if (cnt != 4)
sum += *pos;
pos++;
}
if (sum != tag->cksum) {
/* bad tag header checksum; this is not a valid tag */
return EINVAL;
}
return 0;
}
/*
* check tag payload will check descriptor CRC as specified.
* If the descriptor is too long, it will return EIO otherwise EINVAL.
*/
int
udf_check_tag_payload(void *blob, uint32_t max_length)
{
struct desc_tag *tag = blob;
uint16_t crc, crc_len;
crc_len = udf_rw16(tag->desc_crc_len);
/* check payload CRC if applicable */
if (crc_len == 0)
return 0;
if (crc_len > max_length)
return EIO;
crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
if (crc != udf_rw16(tag->desc_crc)) {
/* bad payload CRC; this is a broken tag */
return EINVAL;
}
return 0;
}
void
udf_validate_tag_sum(void *blob)
{
struct desc_tag *tag = blob;
uint8_t *pos, sum, cnt;
/* calculate TAG header checksum */
pos = (uint8_t *) tag;
sum = 0;
for(cnt = 0; cnt < 16; cnt++) {
if (cnt != 4) sum += *pos;
pos++;
}
tag->cksum = sum; /* 8 bit */
}
/* assumes sector number of descriptor to be saved already present */
void
udf_validate_tag_and_crc_sums(void *blob)
{
struct desc_tag *tag = blob;
uint8_t *btag = (uint8_t *) tag;
uint16_t crc, crc_len;
crc_len = udf_rw16(tag->desc_crc_len);
/* check payload CRC if applicable */
if (crc_len > 0) {
crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
tag->desc_crc = udf_rw16(crc);
}
/* calculate TAG header checksum */
udf_validate_tag_sum(blob);
}
/* --------------------------------------------------------------------- */
/*
* XXX note the different semantics from udfclient: for FIDs it still rounds
* up to sectors. Use udf_fidsize() for a correct length.
*/
int
udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
{
uint32_t size, tag_id, num_lb, elmsz;
tag_id = udf_rw16(dscr->tag.id);
switch (tag_id) {
case TAGID_LOGVOL :
size = sizeof(struct logvol_desc) - 1;
size += udf_rw32(dscr->lvd.mt_l);
break;
case TAGID_UNALLOC_SPACE :
elmsz = sizeof(struct extent_ad);
size = sizeof(struct unalloc_sp_desc) - elmsz;
size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz;
break;
case TAGID_FID :
size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu);
size = (size + 3) & ~3;
break;
case TAGID_LOGVOL_INTEGRITY :
size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
size += udf_rw32(dscr->lvid.l_iu);
size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t));
break;
case TAGID_SPACE_BITMAP :
size = sizeof(struct space_bitmap_desc) - 1;
size += udf_rw32(dscr->sbd.num_bytes);
break;
case TAGID_SPARING_TABLE :
elmsz = sizeof(struct spare_map_entry);
size = sizeof(struct udf_sparing_table) - elmsz;
size += udf_rw16(dscr->spt.rt_l) * elmsz;
break;
case TAGID_FENTRY :
size = sizeof(struct file_entry);
size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1;
break;
case TAGID_EXTFENTRY :
size = sizeof(struct extfile_entry);
size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1;
break;
case TAGID_FSD :
size = sizeof(struct fileset_desc);
break;
default :
size = sizeof(union dscrptr);
break;
}
if ((size == 0) || (lb_size == 0))
return 0;
if (lb_size == 1)
return size;
/* round up in sectors */
num_lb = (size + lb_size -1) / lb_size;
return num_lb * lb_size;
}
int
udf_fidsize(struct fileid_desc *fid)
{
uint32_t size;
if (udf_rw16(fid->tag.id) != TAGID_FID)
panic("got udf_fidsize on non FID\n");
size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu);
size = (size + 3) & ~3;
return size;
}
/* --------------------------------------------------------------------- */
void
udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
{
int ret;
mutex_enter(&udf_node->node_mutex);
/* wait until free */
while (udf_node->i_flags & IN_LOCKED) {
ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
/* TODO check if we should return error; abort */
if (ret == EWOULDBLOCK) {
DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
"wanted at %s:%d, previously locked at %s:%d\n",
udf_node, fname, lineno,
udf_node->lock_fname, udf_node->lock_lineno));
}
}
/* grab */
udf_node->i_flags |= IN_LOCKED | flag;
/* debug */
udf_node->lock_fname = fname;
udf_node->lock_lineno = lineno;
mutex_exit(&udf_node->node_mutex);
}
void
udf_unlock_node(struct udf_node *udf_node, int flag)
{
mutex_enter(&udf_node->node_mutex);
udf_node->i_flags &= ~(IN_LOCKED | flag);
cv_broadcast(&udf_node->node_lock);
mutex_exit(&udf_node->node_mutex);
}
/* --------------------------------------------------------------------- */
static int
udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
{
int error;
error = udf_read_phys_dscr(ump, sector, M_UDFVOLD,
(union dscrptr **) dst);
if (!error) {
/* blank terminator blocks are not allowed here */
if (*dst == NULL)
return ENOENT;
if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) {
error = ENOENT;
free(*dst, M_UDFVOLD);
*dst = NULL;
DPRINTF(VOLUMES, ("Not an anchor\n"));
}
}
return error;
}
int
udf_read_anchors(struct udf_mount *ump)
{
struct udf_args *args = &ump->mount_args;
struct mmc_trackinfo first_track;
struct mmc_trackinfo second_track;
struct mmc_trackinfo last_track;
struct anchor_vdp **anchorsp;
uint32_t track_start;
uint32_t track_end;
uint32_t positions[4];
int first_tracknr, last_tracknr;
int error, anch, ok, first_anchor;
/* search the first and last track of the specified session */
error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
if (!error) {
first_track.tracknr = first_tracknr;
error = udf_update_trackinfo(ump, &first_track);
}
if (!error) {
last_track.tracknr = last_tracknr;
error = udf_update_trackinfo(ump, &last_track);
}
if ((!error) && (first_tracknr != last_tracknr)) {
second_track.tracknr = first_tracknr+1;
error = udf_update_trackinfo(ump, &second_track);
}
if (error) {
printf("UDF mount: reading disc geometry failed\n");
return 0;
}
track_start = first_track.track_start;
/* `end' is not as straitforward as start. */
track_end = last_track.track_start
+ last_track.track_size - last_track.free_blocks - 1;
if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
/* end of track is not straitforward here */
if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
track_end = last_track.last_recorded;
else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
track_end = last_track.next_writable
- ump->discinfo.link_block_penalty;
}
/* its no use reading a blank track */
first_anchor = 0;
if (first_track.flags & MMC_TRACKINFO_BLANK)
first_anchor = 1;
/* get our packet size */
ump->packet_size = first_track.packet_size;
if (first_track.flags & MMC_TRACKINFO_BLANK)
ump->packet_size = second_track.packet_size;
if (ump->packet_size <= 1) {
/* take max, but not bigger than 64 */
ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
ump->packet_size = MIN(ump->packet_size, 64);
}
KASSERT(ump->packet_size >= 1);
/* read anchors start+256, start+512, end-256, end */
positions[0] = track_start+256;
positions[1] = track_end-256;
positions[2] = track_end;
positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
/* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */
ok = 0;
anchorsp = ump->anchors;
for (anch = first_anchor; anch < 4; anch++) {
DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
positions[anch]));
error = udf_read_anchor(ump, positions[anch], anchorsp);
if (!error) {
anchorsp++;
ok++;
}
}
/* VATs are only recorded on sequential media, but initialise */
ump->first_possible_vat_location = track_start + 2;
ump->last_possible_vat_location = track_end + last_track.packet_size;
return ok;
}
/* --------------------------------------------------------------------- */
int
udf_get_c_type(struct udf_node *udf_node)
{
int isdir, what;
isdir = (udf_node->vnode->v_type == VDIR);
what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
if (udf_node->ump)
if (udf_node == udf_node->ump->metadatabitmap_node)
what = UDF_C_METADATA_SBM;
return what;
}
int
udf_get_record_vpart(struct udf_mount *ump, int udf_c_type)
{
int vpart_num;
vpart_num = ump->data_part;
if (udf_c_type == UDF_C_NODE)
vpart_num = ump->node_part;
if (udf_c_type == UDF_C_FIDS)
vpart_num = ump->fids_part;
return vpart_num;
}
/*
* BUGALERT: some rogue implementations use random physical partition
* numbers to break other implementations so lookup the number.
*/
static uint16_t
udf_find_raw_phys(struct udf_mount *ump, uint16_t raw_phys_part)
{
struct part_desc *part;
uint16_t phys_part;
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
part = ump->partitions[phys_part];
if (part == NULL)
break;
if (udf_rw16(part->part_num) == raw_phys_part)
break;
}
return phys_part;
}
/* --------------------------------------------------------------------- */
/* we dont try to be smart; we just record the parts */
#define UDF_UPDATE_DSCR(name, dscr) \
if (name) \
free(name, M_UDFVOLD); \
name = dscr;
static int
udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
{
uint16_t phys_part, raw_phys_part;
DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
udf_rw16(dscr->tag.id)));
switch (udf_rw16(dscr->tag.id)) {
case TAGID_PRI_VOL : /* primary partition */
UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
break;
case TAGID_LOGVOL : /* logical volume */
UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
break;
case TAGID_UNALLOC_SPACE : /* unallocated space */
UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
break;
case TAGID_IMP_VOL : /* implementation */
/* XXX do we care about multiple impl. descr ? */
UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
break;
case TAGID_PARTITION : /* physical partition */
/* not much use if its not allocated */
if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
free(dscr, M_UDFVOLD);
break;
}
/*
* BUGALERT: some rogue implementations use random physical
* partition numbers to break other implementations so lookup
* the number.
*/
raw_phys_part = udf_rw16(dscr->pd.part_num);
phys_part = udf_find_raw_phys(ump, raw_phys_part);
if (phys_part == UDF_PARTITIONS) {
free(dscr, M_UDFVOLD);
return EINVAL;
}
UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
break;
case TAGID_VOL : /* volume space extender; rare */
DPRINTF(VOLUMES, ("VDS extender ignored\n"));
free(dscr, M_UDFVOLD);
break;
default :
DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
udf_rw16(dscr->tag.id)));
free(dscr, M_UDFVOLD);
}
return 0;
}
#undef UDF_UPDATE_DSCR
/* --------------------------------------------------------------------- */
static int
udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
{
union dscrptr *dscr;
uint32_t sector_size, dscr_size;
int error;
sector_size = ump->discinfo.sector_size;
/* loc is sectornr, len is in bytes */
error = EIO;
while (len) {
error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr);
if (error)
return error;
/* blank block is a terminator */
if (dscr == NULL)
return 0;
/* TERM descriptor is a terminator */
if (udf_rw16(dscr->tag.id) == TAGID_TERM) {
free(dscr, M_UDFVOLD);
return 0;
}
/* process all others */
dscr_size = udf_tagsize(dscr, sector_size);
error = udf_process_vds_descriptor(ump, dscr);
if (error) {
free(dscr, M_UDFVOLD);
break;
}
assert((dscr_size % sector_size) == 0);
len -= dscr_size;
loc += dscr_size / sector_size;
}
return error;
}
int
udf_read_vds_space(struct udf_mount *ump)
{
/* struct udf_args *args = &ump->mount_args; */
struct anchor_vdp *anchor, *anchor2;
size_t size;
uint32_t main_loc, main_len;
uint32_t reserve_loc, reserve_len;
int error;
/*
* read in VDS space provided by the anchors; if one descriptor read
* fails, try the mirror sector.
*
* check if 2nd anchor is different from 1st; if so, go for 2nd. This
* avoids the `compatibility features' of DirectCD that may confuse
* stuff completely.
*/
anchor = ump->anchors[0];
anchor2 = ump->anchors[1];
assert(anchor);
if (anchor2) {
size = sizeof(struct extent_ad);
if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
anchor = anchor2;
/* reserve is specified to be a literal copy of main */
}
main_loc = udf_rw32(anchor->main_vds_ex.loc);
main_len = udf_rw32(anchor->main_vds_ex.len);
reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc);
reserve_len = udf_rw32(anchor->reserve_vds_ex.len);
error = udf_read_vds_extent(ump, main_loc, main_len);
if (error) {
printf("UDF mount: reading in reserve VDS extent\n");
error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
}
return error;
}
/* --------------------------------------------------------------------- */
/*
* Read in the logical volume integrity sequence pointed to by our logical
* volume descriptor. Its a sequence that can be extended using fields in the
* integrity descriptor itself. On sequential media only one is found, on
* rewritable media a sequence of descriptors can be found as a form of
* history keeping and on non sequential write-once media the chain is vital
* to allow more and more descriptors to be written. The last descriptor
* written in an extent needs to claim space for a new extent.
*/
static int
udf_retrieve_lvint(struct udf_mount *ump)
{
union dscrptr *dscr;
struct logvol_int_desc *lvint;
struct udf_lvintq *trace;
uint32_t lb_size, lbnum, len;
int dscr_type, error, trace_len;
lb_size = udf_rw32(ump->logical_vol->lb_size);
len = udf_rw32(ump->logical_vol->integrity_seq_loc.len);
lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc);
/* clean trace */
memset(ump->lvint_trace, 0,
UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
trace_len = 0;
trace = ump->lvint_trace;
trace->start = lbnum;
trace->end = lbnum + len/lb_size;
trace->pos = 0;
trace->wpos = 0;
lvint = NULL;
dscr = NULL;
error = 0;
while (len) {
trace->pos = lbnum - trace->start;
trace->wpos = trace->pos + 1;
/* read in our integrity descriptor */
error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr);
if (!error) {
if (dscr == NULL) {
trace->wpos = trace->pos;
break; /* empty terminates */
}
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_TERM) {
trace->wpos = trace->pos;
break; /* clean terminator */
}
if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
/* fatal... corrupt disc */
error = ENOENT;
break;
}
if (lvint)
free(lvint, M_UDFVOLD);
lvint = &dscr->lvid;
dscr = NULL;
} /* else hope for the best... maybe the next is ok */
DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN"));
/* proceed sequential */
lbnum += 1;
len -= lb_size;
/* are we linking to a new piece? */
if (dscr && lvint->next_extent.len) {
len = udf_rw32(lvint->next_extent.len);
lbnum = udf_rw32(lvint->next_extent.loc);
if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
/* IEK! segment link full... */
DPRINTF(VOLUMES, ("lvdint segments full\n"));
error = EINVAL;
} else {
trace++;
trace_len++;
trace->start = lbnum;
trace->end = lbnum + len/lb_size;
trace->pos = 0;
trace->wpos = 0;
}
}
}
/* clean up the mess, esp. when there is an error */
if (dscr)
free(dscr, M_UDFVOLD);
if (error && lvint) {
free(lvint, M_UDFVOLD);
lvint = NULL;
}
if (!lvint)
error = ENOENT;
ump->logvol_integrity = lvint;
return error;
}
static int
udf_loose_lvint_history(struct udf_mount *ump)
{
union dscrptr **bufs, *dscr, *last_dscr;
struct udf_lvintq *trace, *in_trace, *out_trace;
struct logvol_int_desc *lvint;
uint32_t in_ext, in_pos, in_len;
uint32_t out_ext, out_wpos, out_len;
uint32_t lb_num;
uint32_t len, start;
int ext, minext, extlen, cnt, cpy_len, dscr_type;
int losing;
int error;
DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
/* search smallest extent */
trace = &ump->lvint_trace[0];
minext = trace->end - trace->start;
for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
trace = &ump->lvint_trace[ext];
extlen = trace->end - trace->start;
if (extlen == 0)
break;
minext = MIN(minext, extlen);
}
losing = MIN(minext, UDF_LVINT_LOSSAGE);
/* no sense wiping all */
if (losing == minext)
losing--;
DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
/* get buffer for pieces */
bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK);
in_ext = 0;
in_pos = losing;
in_trace = &ump->lvint_trace[in_ext];
in_len = in_trace->end - in_trace->start;
out_ext = 0;
out_wpos = 0;
out_trace = &ump->lvint_trace[out_ext];
out_len = out_trace->end - out_trace->start;
last_dscr = NULL;
for(;;) {
out_trace->pos = out_wpos;
out_trace->wpos = out_trace->pos;
if (in_pos >= in_len) {
in_ext++;
in_pos = 0;
in_trace = &ump->lvint_trace[in_ext];
in_len = in_trace->end - in_trace->start;
}
if (out_wpos >= out_len) {
out_ext++;
out_wpos = 0;
out_trace = &ump->lvint_trace[out_ext];
out_len = out_trace->end - out_trace->start;
}
/* copy overlap contents */
cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
cpy_len = MIN(cpy_len, in_len - in_trace->pos);
if (cpy_len == 0)
break;
/* copy */
DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
for (cnt = 0; cnt < cpy_len; cnt++) {
/* read in our integrity descriptor */
lb_num = in_trace->start + in_pos + cnt;
error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD,
&dscr);
if (error) {
/* copy last one */
dscr = last_dscr;
}
bufs[cnt] = dscr;
if (!error) {
if (dscr == NULL) {
out_trace->pos = out_wpos + cnt;
out_trace->wpos = out_trace->pos;
break; /* empty terminates */
}
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_TERM) {
out_trace->pos = out_wpos + cnt;
out_trace->wpos = out_trace->pos;
break; /* clean terminator */
}
if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
panic( "UDF integrity sequence "
"corrupted while mounted!\n");
}
last_dscr = dscr;
}
}
/* patch up if first entry was on error */
if (bufs[0] == NULL) {
for (cnt = 0; cnt < cpy_len; cnt++)
if (bufs[cnt] != NULL)
break;
last_dscr = bufs[cnt];
for (; cnt > 0; cnt--) {
bufs[cnt] = last_dscr;
}
}
/* glue + write out */
DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
for (cnt = 0; cnt < cpy_len; cnt++) {
lb_num = out_trace->start + out_wpos + cnt;
lvint = &bufs[cnt]->lvid;
/* set continuation */
len = 0;
start = 0;
if (out_wpos + cnt == out_len) {
/* get continuation */
trace = &ump->lvint_trace[out_ext+1];
len = trace->end - trace->start;
start = trace->start;
}
lvint->next_extent.len = udf_rw32(len);
lvint->next_extent.loc = udf_rw32(start);
lb_num = trace->start + trace->wpos;
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
bufs[cnt], lb_num, lb_num);
DPRINTFIF(VOLUMES, error,
("error writing lvint lb_num\n"));
}
/* free non repeating descriptors */
last_dscr = NULL;
for (cnt = 0; cnt < cpy_len; cnt++) {
if (bufs[cnt] != last_dscr)
free(bufs[cnt], M_UDFVOLD);
last_dscr = bufs[cnt];
}
/* advance */
in_pos += cpy_len;
out_wpos += cpy_len;
}
free(bufs, M_TEMP);
return 0;
}
static int
udf_writeout_lvint(struct udf_mount *ump, int lvflag)
{
struct udf_lvintq *trace;
struct timeval now_v;
struct timespec now_s;
uint32_t sector;
int logvol_integrity;
int space, error;
DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
again:
/* get free space in last chunk */
trace = ump->lvint_trace;
while (trace->wpos > (trace->end - trace->start)) {
DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
"wpos = %d\n", trace->start, trace->end,
trace->pos, trace->wpos));
trace++;
}
/* check if there is space to append */
space = (trace->end - trace->start) - trace->wpos;
DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
"space = %d\n", trace->start, trace->end, trace->pos,
trace->wpos, space));
/* get state */
logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
/* TODO extent LVINT space if possible */
return EROFS;
}
}
if (space < 1) {
if (lvflag & UDF_APPENDONLY_LVINT)
return EROFS;
/* loose history by re-writing extents */
error = udf_loose_lvint_history(ump);
if (error)
return error;
goto again;
}
/* update our integrity descriptor to identify us and timestamp it */
DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
microtime(&now_v);
TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
/* writeout integrity descriptor */
sector = trace->start + trace->wpos;
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
(union dscrptr *) ump->logvol_integrity,
sector, sector);
DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
if (error)
return error;
/* advance write position */
trace->wpos++; space--;
if (space >= 1) {
/* append terminator */
sector = trace->start + trace->wpos;
error = udf_write_terminator(ump, sector);
DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
}
space = (trace->end - trace->start) - trace->wpos;
DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
"space = %d\n", trace->start, trace->end, trace->pos,
trace->wpos, space));
DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
"successfull\n"));
return error;
}
/* --------------------------------------------------------------------- */
static int
udf_read_physical_partition_spacetables(struct udf_mount *ump)
{
union dscrptr *dscr;
/* struct udf_args *args = &ump->mount_args; */
struct part_desc *partd;
struct part_hdr_desc *parthdr;
struct udf_bitmap *bitmap;
uint32_t phys_part;
uint32_t lb_num, len;
int error, dscr_type;
/* unallocated space map */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
lb_num = udf_rw32(partd->start_loc);
lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
len = udf_rw32(parthdr->unalloc_space_bitmap.len);
if (len == 0)
continue;
DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
if (!error && dscr) {
/* analyse */
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_SPACE_BITMAP) {
DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
/* fill in ump->part_unalloc_bits */
bitmap = &ump->part_unalloc_bits[phys_part];
bitmap->blob = (uint8_t *) dscr;
bitmap->bits = dscr->sbd.data;
bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
bitmap->pages = NULL; /* TODO */
bitmap->data_pos = 0;
bitmap->metadata_pos = 0;
} else {
free(dscr, M_UDFVOLD);
printf( "UDF mount: error reading unallocated "
"space bitmap\n");
return EROFS;
}
} else {
/* blank not allowed */
printf("UDF mount: blank unallocated space bitmap\n");
return EROFS;
}
}
/* unallocated space table (not supported) */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
len = udf_rw32(parthdr->unalloc_space_table.len);
if (len) {
printf("UDF mount: space tables not supported\n");
return EROFS;
}
}
/* freed space map */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
/* freed space map */
lb_num = udf_rw32(partd->start_loc);
lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num);
len = udf_rw32(parthdr->freed_space_bitmap.len);
if (len == 0)
continue;
DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
if (!error && dscr) {
/* analyse */
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_SPACE_BITMAP) {
DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
ump->part_freed_dscr[phys_part] = &dscr->sbd;
/* fill in ump->part_freed_bits */
bitmap = &ump->part_unalloc_bits[phys_part];
bitmap->blob = (uint8_t *) dscr;
bitmap->bits = dscr->sbd.data;
bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
bitmap->pages = NULL; /* TODO */
bitmap->data_pos = 0;
bitmap->metadata_pos = 0;
} else {
free(dscr, M_UDFVOLD);
printf( "UDF mount: error reading freed "
"space bitmap\n");
return EROFS;
}
} else {
/* blank not allowed */
printf("UDF mount: blank freed space bitmap\n");
return EROFS;
}
}
/* freed space table (not supported) */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
len = udf_rw32(parthdr->freed_space_table.len);
if (len) {
printf("UDF mount: space tables not supported\n");
return EROFS;
}
}
return 0;
}
/* TODO implement async writeout */
int
udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor)
{
union dscrptr *dscr;
/* struct udf_args *args = &ump->mount_args; */
struct part_desc *partd;
struct part_hdr_desc *parthdr;
uint32_t phys_part;
uint32_t lb_num, len, ptov;
int error_all, error;
error_all = 0;
/* unallocated space map */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
ptov = udf_rw32(partd->start_loc);
lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
len = udf_rw32(parthdr->unalloc_space_bitmap.len);
if (len == 0)
continue;
DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
lb_num + ptov));
dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
(union dscrptr *) dscr,
ptov + lb_num, lb_num);
if (error) {
DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
error_all = error;
}
}
/* freed space map */
for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
partd = ump->partitions[phys_part];
if (partd == NULL)
continue;
parthdr = &partd->_impl_use.part_hdr;
/* freed space map */
ptov = udf_rw32(partd->start_loc);
lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num);
len = udf_rw32(parthdr->freed_space_bitmap.len);
if (len == 0)
continue;
DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
lb_num + ptov));
dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
(union dscrptr *) dscr,
ptov + lb_num, lb_num);
if (error) {
DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
error_all = error;
}
}
return error_all;
}
static int
udf_read_metadata_partition_spacetable(struct udf_mount *ump)
{
struct udf_node *bitmap_node;
union dscrptr *dscr;
struct udf_bitmap *bitmap;
uint64_t inflen;
int error, dscr_type;
bitmap_node = ump->metadatabitmap_node;
/* only read in when metadata bitmap node is read in */
if (bitmap_node == NULL)
return 0;
if (bitmap_node->fe) {
inflen = udf_rw64(bitmap_node->fe->inf_len);
} else {
KASSERT(bitmap_node->efe);
inflen = udf_rw64(bitmap_node->efe->inf_len);
}
DPRINTF(VOLUMES, ("Reading metadata space bitmap for "
"%"PRIu64" bytes\n", inflen));
/* allocate space for bitmap */
dscr = malloc(inflen, M_UDFVOLD, M_CANFAIL | M_WAITOK);
if (!dscr)
return ENOMEM;
/* set vnode type to regular file or we can't read from it! */
bitmap_node->vnode->v_type = VREG;
/* read in complete metadata bitmap file */
error = vn_rdwr(UIO_READ, bitmap_node->vnode,
dscr,
inflen, 0,
UIO_SYSSPACE,
IO_SYNC | IO_ALTSEMANTICS, FSCRED,
NULL, NULL);
if (error) {
DPRINTF(VOLUMES, ("Error reading metadata space bitmap\n"));
goto errorout;
}
/* analyse */
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_SPACE_BITMAP) {
DPRINTF(VOLUMES, ("Accepting metadata space bitmap\n"));
ump->metadata_unalloc_dscr = &dscr->sbd;
/* fill in bitmap bits */
bitmap = &ump->metadata_unalloc_bits;
bitmap->blob = (uint8_t *) dscr;
bitmap->bits = dscr->sbd.data;
bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
bitmap->pages = NULL; /* TODO */
bitmap->data_pos = 0;
bitmap->metadata_pos = 0;
} else {
DPRINTF(VOLUMES, ("No valid bitmap found!\n"));
goto errorout;
}
return 0;
errorout:
free(dscr, M_UDFVOLD);
printf( "UDF mount: error reading unallocated "
"space bitmap for metadata partition\n");
return EROFS;
}
int
udf_write_metadata_partition_spacetable(struct udf_mount *ump, int waitfor)
{
struct udf_node *bitmap_node;
union dscrptr *dscr;
uint64_t new_inflen;
int dummy, error;
bitmap_node = ump->metadatabitmap_node;
/* only write out when metadata bitmap node is known */
if (bitmap_node == NULL)
return 0;
if (!bitmap_node->fe) {
KASSERT(bitmap_node->efe);
}
/* reduce length to zero */
dscr = (union dscrptr *) ump->metadata_unalloc_dscr;
new_inflen = udf_tagsize(dscr, 1);
DPRINTF(VOLUMES, ("Resize and write out metadata space bitmap "
" for %"PRIu64" bytes\n", new_inflen));
error = udf_resize_node(bitmap_node, new_inflen, &dummy);
if (error)
printf("Error resizing metadata space bitmap\n");
error = vn_rdwr(UIO_WRITE, bitmap_node->vnode,
dscr,
new_inflen, 0,
UIO_SYSSPACE,
IO_ALTSEMANTICS, FSCRED,
NULL, NULL);
bitmap_node->i_flags |= IN_MODIFIED;
error = vflushbuf(bitmap_node->vnode, FSYNC_WAIT);
if (error == 0)
error = VOP_FSYNC(bitmap_node->vnode,
FSCRED, FSYNC_WAIT, 0, 0);
if (error)
printf( "Error writing out metadata partition unalloced "
"space bitmap!\n");
return error;
}
/* --------------------------------------------------------------------- */
/*
* Checks if ump's vds information is correct and complete
*/
int
udf_process_vds(struct udf_mount *ump) {
union udf_pmap *mapping;
/* struct udf_args *args = &ump->mount_args; */
struct logvol_int_desc *lvint;
struct udf_logvol_info *lvinfo;
uint32_t n_pm;
uint8_t *pmap_pos;
char *domain_name, *map_name;
const char *check_name;
char bits[128];
int pmap_stype, pmap_size;
int pmap_type, log_part, phys_part, raw_phys_part, maps_on;
int n_phys, n_virt, n_spar, n_meta;
int len;
if (ump == NULL)
return ENOENT;
/* we need at least an anchor (trivial, but for safety) */
if (ump->anchors[0] == NULL)
return EINVAL;
/* we need at least one primary and one logical volume descriptor */
if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
return EINVAL;
/* we need at least one partition descriptor */
if (ump->partitions[0] == NULL)
return EINVAL;
/* check logical volume sector size verses device sector size */
if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
printf("UDF mount: format violation, lb_size != sector size\n");
return EINVAL;
}
/* check domain name */
domain_name = ump->logical_vol->domain_id.id;
if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
return EINVAL;
}
/* retrieve logical volume integrity sequence */
(void)udf_retrieve_lvint(ump);
/*
* We need at least one logvol integrity descriptor recorded. Note
* that its OK to have an open logical volume integrity here. The VAT
* will close/update the integrity.
*/
if (ump->logvol_integrity == NULL)
return EINVAL;
/* process derived structures */
n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
lvint = ump->logvol_integrity;
lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
ump->logvol_info = lvinfo;
/* TODO check udf versions? */
/*
* check logvol mappings: effective virt->log partmap translation
* check and recording of the mapping results. Saves expensive
* strncmp() in tight places.
*/
DPRINTF(VOLUMES, ("checking logvol mappings\n"));
n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
pmap_pos = ump->logical_vol->maps;
if (n_pm > UDF_PMAPS) {
printf("UDF mount: too many mappings\n");
return EINVAL;
}
/* count types and set partition numbers */
ump->data_part = ump->node_part = ump->fids_part = 0;
n_phys = n_virt = n_spar = n_meta = 0;
for (log_part = 0; log_part < n_pm; log_part++) {
mapping = (union udf_pmap *) pmap_pos;
pmap_stype = pmap_pos[0];
pmap_size = pmap_pos[1];
switch (pmap_stype) {
case 1: /* physical mapping */
/* volseq = udf_rw16(mapping->pm1.vol_seq_num); */
raw_phys_part = udf_rw16(mapping->pm1.part_num);
pmap_type = UDF_VTOP_TYPE_PHYS;
n_phys++;
ump->data_part = log_part;
ump->node_part = log_part;
ump->fids_part = log_part;
break;
case 2: /* virtual/sparable/meta mapping */
map_name = mapping->pm2.part_id.id;
/* volseq = udf_rw16(mapping->pm2.vol_seq_num); */
raw_phys_part = udf_rw16(mapping->pm2.part_num);
pmap_type = UDF_VTOP_TYPE_UNKNOWN;
len = UDF_REGID_ID_SIZE;
check_name = "*UDF Virtual Partition";
if (strncmp(map_name, check_name, len) == 0) {
pmap_type = UDF_VTOP_TYPE_VIRT;
n_virt++;
ump->node_part = log_part;
break;
}
check_name = "*UDF Sparable Partition";
if (strncmp(map_name, check_name, len) == 0) {
pmap_type = UDF_VTOP_TYPE_SPARABLE;
n_spar++;
ump->data_part = log_part;
ump->node_part = log_part;
ump->fids_part = log_part;
break;
}
check_name = "*UDF Metadata Partition";
if (strncmp(map_name, check_name, len) == 0) {
pmap_type = UDF_VTOP_TYPE_META;
n_meta++;
ump->node_part = log_part;
ump->fids_part = log_part;
break;
}
break;
default:
return EINVAL;
}
/*
* BUGALERT: some rogue implementations use random physical
* partition numbers to break other implementations so lookup
* the number.
*/
phys_part = udf_find_raw_phys(ump, raw_phys_part);
DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
raw_phys_part, phys_part, pmap_type));
if (phys_part == UDF_PARTITIONS)
return EINVAL;
if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
return EINVAL;
ump->vtop [log_part] = phys_part;
ump->vtop_tp[log_part] = pmap_type;
pmap_pos += pmap_size;
}
/* not winning the beauty contest */
ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
/* test some basic UDF assertions/requirements */
if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
return EINVAL;
if (n_virt) {
if ((n_phys == 0) || n_spar || n_meta)
return EINVAL;
}
if (n_spar + n_phys == 0)
return EINVAL;
/* select allocation type for each logical partition */
for (log_part = 0; log_part < n_pm; log_part++) {
maps_on = ump->vtop[log_part];
switch (ump->vtop_tp[log_part]) {
case UDF_VTOP_TYPE_PHYS :
assert(maps_on == log_part);
ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
break;
case UDF_VTOP_TYPE_VIRT :
ump->vtop_alloc[log_part] = UDF_ALLOC_VAT;
ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
break;
case UDF_VTOP_TYPE_SPARABLE :
assert(maps_on == log_part);
ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
break;
case UDF_VTOP_TYPE_META :
ump->vtop_alloc[log_part] = UDF_ALLOC_METABITMAP;
if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
/* special case for UDF 2.60 */
ump->vtop_alloc[log_part] = UDF_ALLOC_METASEQUENTIAL;
ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
}
break;
default:
panic("bad alloction type in udf's ump->vtop\n");
}
}
/* determine logical volume open/closure actions */
if (n_virt) {
ump->lvopen = 0;
if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
ump->lvopen |= UDF_OPEN_SESSION ;
ump->lvclose = UDF_WRITE_VAT;
if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
ump->lvclose |= UDF_CLOSE_SESSION;
} else {
/* `normal' rewritable or non sequential media */
ump->lvopen = UDF_WRITE_LVINT;
ump->lvclose = UDF_WRITE_LVINT;
if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
ump->lvopen |= UDF_APPENDONLY_LVINT;
if ((ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
ump->lvopen &= ~UDF_APPENDONLY_LVINT;
}
/*
* Determine sheduler error behaviour. For virtual partitions, update
* the trackinfo; for sparable partitions replace a whole block on the
* sparable table. Allways requeue.
*/
ump->lvreadwrite = 0;
if (n_virt)
ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
if (n_spar)
ump->lvreadwrite = UDF_REMAP_BLOCK;
/*
* Select our sheduler
*/
ump->strategy = &udf_strat_rmw;
if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
ump->strategy = &udf_strat_sequential;
if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
(ump->discinfo.mmc_class == MMC_CLASS_UNKN))
ump->strategy = &udf_strat_direct;
if (n_spar)
ump->strategy = &udf_strat_rmw;
#if 0
/* read-only access won't benefit from the other shedulers */
if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
ump->strategy = &udf_strat_direct;
#endif
/* print results */
DPRINTF(VOLUMES, ("\tdata partition %d\n", ump->data_part));
DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->data_part]));
DPRINTF(VOLUMES, ("\tnode partition %d\n", ump->node_part));
DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->node_part]));
DPRINTF(VOLUMES, ("\tfids partition %d\n", ump->fids_part));
DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->fids_part]));
snprintb(bits, sizeof(bits), UDFLOGVOL_BITS, ump->lvopen);
DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
snprintb(bits, sizeof(bits), UDFLOGVOL_BITS, ump->lvclose);
DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
snprintb(bits, sizeof(bits), UDFONERROR_BITS, ump->lvreadwrite);
DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
(ump->strategy == &udf_strat_direct) ? "Direct" :
(ump->strategy == &udf_strat_sequential) ? "Sequential" :
(ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
/* signal its OK for now */
return 0;
}
/* --------------------------------------------------------------------- */
/*
* Update logical volume name in all structures that keep a record of it. We
* use memmove since each of them might be specified as a source.
*
* Note that it doesn't update the VAT structure!
*/
static void
udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
{
struct logvol_desc *lvd = NULL;
struct fileset_desc *fsd = NULL;
struct udf_lv_info *lvi = NULL;
DPRINTF(VOLUMES, ("Updating logical volume name\n"));
lvd = ump->logical_vol;
fsd = ump->fileset_desc;
if (ump->implementation)
lvi = &ump->implementation->_impl_use.lv_info;
/* logvol's id might be specified as origional so use memmove here */
memmove(lvd->logvol_id, logvol_id, 128);
if (fsd)
memmove(fsd->logvol_id, logvol_id, 128);
if (lvi)
memmove(lvi->logvol_id, logvol_id, 128);
}
/* --------------------------------------------------------------------- */
void
udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
uint32_t sector)
{
assert(ump->logical_vol);
tag->id = udf_rw16(tagid);
tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
tag->cksum = 0;
tag->reserved = 0;
tag->serial_num = ump->logical_vol->tag.serial_num;
tag->tag_loc = udf_rw32(sector);
}
uint64_t
udf_advance_uniqueid(struct udf_mount *ump)
{
uint64_t unique_id;
mutex_enter(&ump->logvol_mutex);
unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id);
if (unique_id < 0x10)
unique_id = 0x10;
ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1);
mutex_exit(&ump->logvol_mutex);
return unique_id;
}
static void
udf_adjust_filecount(struct udf_node *udf_node, int sign)
{
struct udf_mount *ump = udf_node->ump;
uint32_t num_dirs, num_files;
int udf_file_type;
/* get file type */
if (udf_node->fe) {
udf_file_type = udf_node->fe->icbtag.file_type;
} else {
udf_file_type = udf_node->efe->icbtag.file_type;
}
/* adjust file count */
mutex_enter(&ump->allocate_mutex);
if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
num_dirs = udf_rw32(ump->logvol_info->num_directories);
ump->logvol_info->num_directories =
udf_rw32((num_dirs + sign));
} else {
num_files = udf_rw32(ump->logvol_info->num_files);
ump->logvol_info->num_files =
udf_rw32((num_files + sign));
}
mutex_exit(&ump->allocate_mutex);
}
void
udf_osta_charset(struct charspec *charspec)
{
memset(charspec, 0, sizeof(struct charspec));
charspec->type = 0;
strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
}
/* first call udf_set_regid and then the suffix */
void
udf_set_regid(struct regid *regid, char const *name)
{
memset(regid, 0, sizeof(struct regid));
regid->flags = 0; /* not dirty and not protected */
strcpy((char *) regid->id, name);
}
void
udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
{
uint16_t *ver;
ver = (uint16_t *) regid->id_suffix;
*ver = ump->logvol_info->min_udf_readver;
}
void
udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
{
uint16_t *ver;
ver = (uint16_t *) regid->id_suffix;
*ver = ump->logvol_info->min_udf_readver;
regid->id_suffix[2] = 4; /* unix */
regid->id_suffix[3] = 8; /* NetBSD */
}
void
udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
{
regid->id_suffix[0] = 4; /* unix */
regid->id_suffix[1] = 8; /* NetBSD */
}
void
udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
{
regid->id_suffix[0] = APP_VERSION_MAIN;
regid->id_suffix[1] = APP_VERSION_SUB;
}
static int
udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
struct long_ad *parent, uint64_t unique_id)
{
/* the size of an empty FID is 38 but needs to be a multiple of 4 */
int fidsize = 40;
udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num));
fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
fid->icb = *parent;
fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
fid->tag.desc_crc_len = udf_rw16(fidsize - UDF_DESC_TAG_LENGTH);
(void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
return fidsize;
}
/* --------------------------------------------------------------------- */
/*
* Extended attribute support. UDF knows of 3 places for extended attributes:
*
* (a) inside the file's (e)fe in the length of the extended attribute area
* before the allocation descriptors/filedata
*
* (b) in a file referenced by (e)fe->ext_attr_icb and
*
* (c) in the e(fe)'s associated stream directory that can hold various
* sub-files. In the stream directory a few fixed named subfiles are reserved
* for NT/Unix ACL's and OS/2 attributes.
*
* NOTE: Extended attributes are read randomly but allways written
* *atomicaly*. For ACL's this interface is propably different but not known
* to me yet.
*
* Order of extended attributes in a space :
* ECMA 167 EAs
* Non block aligned Implementation Use EAs
* Block aligned Implementation Use EAs
* Application Use EAs
*/
static int
udf_impl_extattr_check(struct impl_extattr_entry *implext)
{
uint16_t *spos;
if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
/* checksum valid? */
DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n"));
spos = (uint16_t *) implext->data;
if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext))
return EINVAL;
}
return 0;
}
static void
udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
{
uint16_t *spos;
if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
/* set checksum */
spos = (uint16_t *) implext->data;
*spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
}
}
int
udf_extattr_search_intern(struct udf_node *node,
uint32_t sattr, char const *sattrname,
uint32_t *offsetp, uint32_t *lengthp)
{
struct extattrhdr_desc *eahdr;
struct extattr_entry *attrhdr;
struct impl_extattr_entry *implext;
uint32_t offset, a_l, sector_size;
int32_t l_ea;
uint8_t *pos;
int error;
/* get mountpoint */
sector_size = node->ump->discinfo.sector_size;
/* get information from fe/efe */
if (node->fe) {
l_ea = udf_rw32(node->fe->l_ea);
eahdr = (struct extattrhdr_desc *) node->fe->data;
} else {
assert(node->efe);
l_ea = udf_rw32(node->efe->l_ea);
eahdr = (struct extattrhdr_desc *) node->efe->data;
}
/* something recorded here? */
if (l_ea == 0)
return ENOENT;
/* check extended attribute tag; what to do if it fails? */
error = udf_check_tag(eahdr);
if (error)
return EINVAL;
if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
return EINVAL;
error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
if (error)
return EINVAL;
DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
/* looking for Ecma-167 attributes? */
offset = sizeof(struct extattrhdr_desc);
/* looking for either implemenation use or application use */
if (sattr == 2048) { /* [4/48.10.8] */
offset = udf_rw32(eahdr->impl_attr_loc);
if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
return ENOENT;
}
if (sattr == 65536) { /* [4/48.10.9] */
offset = udf_rw32(eahdr->appl_attr_loc);
if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
return ENOENT;
}
/* paranoia check offset and l_ea */
if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
return EINVAL;
DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
/* find our extended attribute */
l_ea -= offset;
pos = (uint8_t *) eahdr + offset;
while (l_ea >= sizeof(struct extattr_entry)) {
DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
attrhdr = (struct extattr_entry *) pos;
implext = (struct impl_extattr_entry *) pos;
/* get complete attribute length and check for roque values */
a_l = udf_rw32(attrhdr->a_l);
DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
udf_rw32(attrhdr->type),
attrhdr->subtype, a_l, l_ea));
if ((a_l == 0) || (a_l > l_ea))
return EINVAL;
if (attrhdr->type != sattr)
goto next_attribute;
/* we might have found it! */
if (attrhdr->type < 2048) { /* Ecma-167 attribute */
*offsetp = offset;
*lengthp = a_l;
return 0; /* success */
}
/*
* Implementation use and application use extended attributes
* have a name to identify. They share the same structure only
* UDF implementation use extended attributes have a checksum
* we need to check
*/
DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
if (strcmp(implext->imp_id.id, sattrname) == 0) {
/* we have found our appl/implementation attribute */
*offsetp = offset;
*lengthp = a_l;
return 0; /* success */
}
next_attribute:
/* next attribute */
pos += a_l;
l_ea -= a_l;
offset += a_l;
}
/* not found */
return ENOENT;
}
static void
udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr,
struct extattr_entry *extattr)
{
struct file_entry *fe;
struct extfile_entry *efe;
struct extattrhdr_desc *extattrhdr;
struct impl_extattr_entry *implext;
uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len;
uint32_t *l_eap, l_ad;
uint16_t *spos;
uint8_t *bpos, *data;
if (udf_rw16(dscr->tag.id) == TAGID_FENTRY) {
fe = &dscr->fe;
data = fe->data;
l_eap = &fe->l_ea;
l_ad = udf_rw32(fe->l_ad);
} else if (udf_rw16(dscr->tag.id) == TAGID_EXTFENTRY) {
efe = &dscr->efe;
data = efe->data;
l_eap = &efe->l_ea;
l_ad = udf_rw32(efe->l_ad);
} else {
panic("Bad tag passed to udf_extattr_insert_internal");
}
/* can't append already written to file descriptors yet */
assert(l_ad == 0);
__USE(l_ad);
/* should have a header! */
extattrhdr = (struct extattrhdr_desc *) data;
l_ea = udf_rw32(*l_eap);
if (l_ea == 0) {
/* create empty extended attribute header */
exthdr_len = sizeof(struct extattrhdr_desc);
udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR,
/* loc */ 0);
extattrhdr->impl_attr_loc = udf_rw32(exthdr_len);
extattrhdr->appl_attr_loc = udf_rw32(exthdr_len);
extattrhdr->tag.desc_crc_len = udf_rw16(8);
/* record extended attribute header length */
l_ea = exthdr_len;
*l_eap = udf_rw32(l_ea);
}
/* extract locations */
impl_attr_loc = udf_rw32(extattrhdr->impl_attr_loc);
appl_attr_loc = udf_rw32(extattrhdr->appl_attr_loc);
if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
impl_attr_loc = l_ea;
if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
appl_attr_loc = l_ea;
/* Ecma 167 EAs */
if (udf_rw32(extattr->type) < 2048) {
assert(impl_attr_loc == l_ea);
assert(appl_attr_loc == l_ea);
}
/* implementation use extended attributes */
if (udf_rw32(extattr->type) == 2048) {
assert(appl_attr_loc == l_ea);
/* calculate and write extended attribute header checksum */
implext = (struct impl_extattr_entry *) extattr;
assert(udf_rw32(implext->iu_l) == 4); /* [UDF 3.3.4.5] */
spos = (uint16_t *) implext->data;
*spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
}
/* application use extended attributes */
assert(udf_rw32(extattr->type) != 65536);
assert(appl_attr_loc == l_ea);
/* append the attribute at the end of the current space */
bpos = data + udf_rw32(*l_eap);
a_l = udf_rw32(extattr->a_l);
/* update impl. attribute locations */
if (udf_rw32(extattr->type) < 2048) {
impl_attr_loc = l_ea + a_l;
appl_attr_loc = l_ea + a_l;
}
if (udf_rw32(extattr->type) == 2048) {
appl_attr_loc = l_ea + a_l;
}
/* copy and advance */
memcpy(bpos, extattr, a_l);
l_ea += a_l;
*l_eap = udf_rw32(l_ea);
/* do the `dance` again backwards */
if (udf_rw16(ump->logical_vol->tag.descriptor_ver) != 2) {
if (impl_attr_loc == l_ea)
impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT;
if (appl_attr_loc == l_ea)
appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT;
}
/* store offsets */
extattrhdr->impl_attr_loc = udf_rw32(impl_attr_loc);
extattrhdr->appl_attr_loc = udf_rw32(appl_attr_loc);
}
/* --------------------------------------------------------------------- */
static int
udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
{
struct udf_mount *ump;
struct udf_logvol_info *lvinfo;
struct impl_extattr_entry *implext;
struct vatlvext_extattr_entry lvext;
const char *extstr = "*UDF VAT LVExtension";
uint64_t vat_uniqueid;
uint32_t offset, a_l;
uint8_t *ea_start, *lvextpos;
int error;
/* get mountpoint and lvinfo */
ump = vat_node->ump;
lvinfo = ump->logvol_info;
/* get information from fe/efe */
if (vat_node->fe) {
vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
ea_start = vat_node->fe->data;
} else {
vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
ea_start = vat_node->efe->data;
}
error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
if (error)
return error;
implext = (struct impl_extattr_entry *) (ea_start + offset);
error = udf_impl_extattr_check(implext);
if (error)
return error;
/* paranoia */
if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) {
DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n"));
return EINVAL;
}
/*
* we have found our "VAT LVExtension attribute. BUT due to a
* bug in the specification it might not be word aligned so
* copy first to avoid panics on some machines (!!)
*/
DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
lvextpos = implext->data + udf_rw32(implext->iu_l);
memcpy(&lvext, lvextpos, sizeof(lvext));
/* check if it was updated the last time */
if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) {
lvinfo->num_files = lvext.num_files;
lvinfo->num_directories = lvext.num_directories;
udf_update_logvolname(ump, lvext.logvol_id);
} else {
DPRINTF(VOLUMES, ("VAT LVExtension out of date\n"));
/* replace VAT LVExt by free space EA */
memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
strcpy(implext->imp_id.id, "*UDF FreeEASpace");
udf_calc_impl_extattr_checksum(implext);
}
return 0;
}
static int
udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
{
struct udf_mount *ump;
struct udf_logvol_info *lvinfo;
struct impl_extattr_entry *implext;
struct vatlvext_extattr_entry lvext;
const char *extstr = "*UDF VAT LVExtension";
uint64_t vat_uniqueid;
uint32_t offset, a_l;
uint8_t *ea_start, *lvextpos;
int error;
/* get mountpoint and lvinfo */
ump = vat_node->ump;
lvinfo = ump->logvol_info;
/* get information from fe/efe */
if (vat_node->fe) {
vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
ea_start = vat_node->fe->data;
} else {
vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
ea_start = vat_node->efe->data;
}
error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
if (error)
return error;
/* found, it existed */
/* paranoia */
implext = (struct impl_extattr_entry *) (ea_start + offset);
error = udf_impl_extattr_check(implext);
if (error) {
DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
return error;
}
/* it is correct */
/*
* we have found our "VAT LVExtension attribute. BUT due to a
* bug in the specification it might not be word aligned so
* copy first to avoid panics on some machines (!!)
*/
DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
lvextpos = implext->data + udf_rw32(implext->iu_l);
lvext.unique_id_chk = vat_uniqueid;
lvext.num_files = lvinfo->num_files;
lvext.num_directories = lvinfo->num_directories;
memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
memcpy(lvextpos, &lvext, sizeof(lvext));
return 0;
}
/* --------------------------------------------------------------------- */
int
udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
{
struct udf_mount *ump = vat_node->ump;
if (offset + size > ump->vat_offset + ump->vat_entries * 4)
return EINVAL;
memcpy(blob, ump->vat_table + offset, size);
return 0;
}
int
udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
{
struct udf_mount *ump = vat_node->ump;
uint32_t offset_high;
uint8_t *new_vat_table;
/* extent VAT allocation if needed */
offset_high = offset + size;
if (offset_high >= ump->vat_table_alloc_len) {
/* realloc */
new_vat_table = realloc(ump->vat_table,
ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
M_UDFVOLD, M_WAITOK | M_CANFAIL);
if (!new_vat_table) {
printf("udf_vat_write: can't extent VAT, out of mem\n");
return ENOMEM;
}
ump->vat_table = new_vat_table;
ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
}
ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
memcpy(ump->vat_table + offset, blob, size);
return 0;
}
/* --------------------------------------------------------------------- */
/* TODO support previous VAT location writeout */
static int
udf_update_vat_descriptor(struct udf_mount *ump)
{
struct udf_node *vat_node = ump->vat_node;
struct udf_logvol_info *lvinfo = ump->logvol_info;
struct icb_tag *icbtag;
struct udf_oldvat_tail *oldvat_tl;
struct udf_vat *vat;
uint64_t unique_id;
uint32_t lb_size;
uint8_t *raw_vat;
int filetype, error;
KASSERT(vat_node);
KASSERT(lvinfo);
lb_size = udf_rw32(ump->logical_vol->lb_size);
/* get our new unique_id */
unique_id = udf_advance_uniqueid(ump);
/* get information from fe/efe */
if (vat_node->fe) {
icbtag = &vat_node->fe->icbtag;
vat_node->fe->unique_id = udf_rw64(unique_id);
} else {
icbtag = &vat_node->efe->icbtag;
vat_node->efe->unique_id = udf_rw64(unique_id);
}
/* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
filetype = icbtag->file_type;
KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
/* allocate piece to process head or tail of VAT file */
raw_vat = malloc(lb_size, M_TEMP, M_WAITOK);
if (filetype == 0) {
/*
* Update "*UDF VAT LVExtension" extended attribute from the
* lvint if present.
*/
udf_update_vat_extattr_from_lvid(vat_node);
/* setup identifying regid */
oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
udf_add_udf_regid(ump, &oldvat_tl->id);
oldvat_tl->prev_vat = udf_rw32(0xffffffff);
/* write out new tail of virtual allocation table file */
error = udf_vat_write(vat_node, raw_vat,
sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
} else {
/* compose the VAT2 header */
vat = (struct udf_vat *) raw_vat;
memset(vat, 0, sizeof(struct udf_vat));
vat->header_len = udf_rw16(152); /* as per spec */
vat->impl_use_len = udf_rw16(0);
memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
vat->prev_vat = udf_rw32(0xffffffff);
vat->num_files = lvinfo->num_files;
vat->num_directories = lvinfo->num_directories;
vat->min_udf_readver = lvinfo->min_udf_readver;
vat->min_udf_writever = lvinfo->min_udf_writever;
vat->max_udf_writever = lvinfo->max_udf_writever;
error = udf_vat_write(vat_node, raw_vat,
sizeof(struct udf_vat), 0);
}
free(raw_vat, M_TEMP);
return error; /* success! */
}
int
udf_writeout_vat(struct udf_mount *ump)
{
struct udf_node *vat_node = ump->vat_node;
int error;
KASSERT(vat_node);
DPRINTF(CALL, ("udf_writeout_vat\n"));
// mutex_enter(&ump->allocate_mutex);
udf_update_vat_descriptor(ump);
/* write out the VAT contents ; TODO intelligent writing */
error = vn_rdwr(UIO_WRITE, vat_node->vnode,
ump->vat_table, ump->vat_table_len, 0,
UIO_SYSSPACE, 0, FSCRED, NULL, NULL);
if (error) {
printf("udf_writeout_vat: failed to write out VAT contents\n");
goto out;
}
// mutex_exit(&ump->allocate_mutex);
error = vflushbuf(ump->vat_node->vnode, FSYNC_WAIT);
if (error)
goto out;
error = VOP_FSYNC(ump->vat_node->vnode,
FSCRED, FSYNC_WAIT, 0, 0);
if (error)
printf("udf_writeout_vat: error writing VAT node!\n");
out:
return error;
}
/* --------------------------------------------------------------------- */
/*
* Read in relevant pieces of VAT file and check if its indeed a VAT file
* descriptor. If OK, read in complete VAT file.
*/
static int
udf_check_for_vat(struct udf_node *vat_node)
{
struct udf_mount *ump;
struct icb_tag *icbtag;
struct timestamp *mtime;
struct udf_vat *vat;
struct udf_oldvat_tail *oldvat_tl;
struct udf_logvol_info *lvinfo;
uint64_t unique_id;
uint32_t vat_length;
uint32_t vat_offset, vat_entries, vat_table_alloc_len;
uint32_t sector_size;
uint32_t *raw_vat;
uint8_t *vat_table;
char *regid_name;
int filetype;
int error;
/* vat_length is really 64 bits though impossible */
DPRINTF(VOLUMES, ("Checking for VAT\n"));
if (!vat_node)
return ENOENT;
/* get mount info */
ump = vat_node->ump;
sector_size = udf_rw32(ump->logical_vol->lb_size);
/* check assertions */
assert(vat_node->fe || vat_node->efe);
assert(ump->logvol_integrity);
/* set vnode type to regular file or we can't read from it! */
vat_node->vnode->v_type = VREG;
/* get information from fe/efe */
if (vat_node->fe) {
vat_length = udf_rw64(vat_node->fe->inf_len);
icbtag = &vat_node->fe->icbtag;
mtime = &vat_node->fe->mtime;
unique_id = udf_rw64(vat_node->fe->unique_id);
} else {
vat_length = udf_rw64(vat_node->efe->inf_len);
icbtag = &vat_node->efe->icbtag;
mtime = &vat_node->efe->mtime;
unique_id = udf_rw64(vat_node->efe->unique_id);
}
/* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
filetype = icbtag->file_type;
if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
return ENOENT;
DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
vat_table_alloc_len =
((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
* UDF_VAT_CHUNKSIZE;
vat_table = malloc(vat_table_alloc_len, M_UDFVOLD,
M_CANFAIL | M_WAITOK);
if (vat_table == NULL) {
printf("allocation of %d bytes failed for VAT\n",
vat_table_alloc_len);
return ENOMEM;
}
/* allocate piece to read in head or tail of VAT file */
raw_vat = malloc(sector_size, M_TEMP, M_WAITOK);
/*
* check contents of the file if its the old 1.50 VAT table format.
* Its notoriously broken and allthough some implementations support an
* extention as defined in the UDF 1.50 errata document, its doubtfull
* to be useable since a lot of implementations don't maintain it.
*/
lvinfo = ump->logvol_info;
if (filetype == 0) {
/* definition */
vat_offset = 0;
vat_entries = (vat_length-36)/4;
/* read in tail of virtual allocation table file */
error = vn_rdwr(UIO_READ, vat_node->vnode,
(uint8_t *) raw_vat,
sizeof(struct udf_oldvat_tail),
vat_entries * 4,
UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
NULL, NULL);
if (error)
goto out;
/* check 1.50 VAT */
oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
regid_name = (char *) oldvat_tl->id.id;
error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
if (error) {
DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
error = ENOENT;
goto out;
}
/*
* update LVID from "*UDF VAT LVExtension" extended attribute
* if present.
*/
udf_update_lvid_from_vat_extattr(vat_node);
} else {
/* read in head of virtual allocation table file */
error = vn_rdwr(UIO_READ, vat_node->vnode,
(uint8_t *) raw_vat,
sizeof(struct udf_vat), 0,
UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
NULL, NULL);
if (error)
goto out;
/* definition */
vat = (struct udf_vat *) raw_vat;
vat_offset = vat->header_len;
vat_entries = (vat_length - vat_offset)/4;
assert(lvinfo);
lvinfo->num_files = vat->num_files;
lvinfo->num_directories = vat->num_directories;
lvinfo->min_udf_readver = vat->min_udf_readver;
lvinfo->min_udf_writever = vat->min_udf_writever;
lvinfo->max_udf_writever = vat->max_udf_writever;
udf_update_logvolname(ump, vat->logvol_id);
}
/* read in complete VAT file */
error = vn_rdwr(UIO_READ, vat_node->vnode,
vat_table,
vat_length, 0,
UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
NULL, NULL);
if (error)
printf("read in of complete VAT file failed (error %d)\n",
error);
if (error)
goto out;
DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id);
ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
ump->logvol_integrity->time = *mtime;
ump->vat_table_len = vat_length;
ump->vat_table_alloc_len = vat_table_alloc_len;
ump->vat_table = vat_table;
ump->vat_offset = vat_offset;
ump->vat_entries = vat_entries;
ump->vat_last_free_lb = 0; /* start at beginning */
out:
if (error) {
if (vat_table)
free(vat_table, M_UDFVOLD);
}
free(raw_vat, M_TEMP);
return error;
}
/* --------------------------------------------------------------------- */
static int
udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping)
{
struct udf_node *vat_node;
struct long_ad icb_loc;
uint32_t early_vat_loc, vat_loc;
int error;
/* mapping info not needed */
mapping = mapping;
vat_loc = ump->last_possible_vat_location;
early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
vat_loc, early_vat_loc));
early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
vat_loc, early_vat_loc));
/* start looking from the end of the range */
do {
DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART);
icb_loc.loc.lb_num = udf_rw32(vat_loc);
error = udf_get_node(ump, &icb_loc, &vat_node);
if (!error) {
error = udf_check_for_vat(vat_node);
DPRINTFIF(VOLUMES, !error,
("VAT accepted at %d\n", vat_loc));
if (!error)
break;
}
if (vat_node) {
vput(vat_node->vnode);
vat_node = NULL;
}
vat_loc--; /* walk backwards */
} while (vat_loc >= early_vat_loc);
/* keep our VAT node around */
if (vat_node) {
UDF_SET_SYSTEMFILE(vat_node->vnode);
ump->vat_node = vat_node;
}
return error;
}
/* --------------------------------------------------------------------- */
static int
udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
{
union dscrptr *dscr;
struct part_map_spare *pms = &mapping->pms;
uint32_t lb_num;
int spar, error;
/*
* The partition mapping passed on to us specifies the information we
* need to locate and initialise the sparable partition mapping
* information we need.
*/
DPRINTF(VOLUMES, ("Read sparable table\n"));
ump->sparable_packet_size = udf_rw16(pms->packet_len);
KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */
for (spar = 0; spar < pms->n_st; spar++) {
lb_num = pms->st_loc[spar];
DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num));
error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
if (!error && dscr) {
if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) {
if (ump->sparing_table)
free(ump->sparing_table, M_UDFVOLD);
ump->sparing_table = &dscr->spt;
dscr = NULL;
DPRINTF(VOLUMES,
("Sparing table accepted (%d entries)\n",
udf_rw16(ump->sparing_table->rt_l)));
break; /* we're done */
}
}
if (dscr)
free(dscr, M_UDFVOLD);
}
if (ump->sparing_table)
return 0;
return ENOENT;
}
/* --------------------------------------------------------------------- */
static int
udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
{
struct part_map_meta *pmm = &mapping->pmm;
struct long_ad icb_loc;
struct vnode *vp;
uint16_t raw_phys_part, phys_part;
int error;
/*
* BUGALERT: some rogue implementations use random physical
* partition numbers to break other implementations so lookup
* the number.
*/
/* extract our allocation parameters set up on format */
ump->metadata_alloc_unit_size = udf_rw32(mapping->pmm.alloc_unit_size);
ump->metadata_alignment_unit_size = udf_rw16(mapping->pmm.alignment_unit_size);
ump->metadata_flags = mapping->pmm.flags;
DPRINTF(VOLUMES, ("Reading in Metadata files\n"));
raw_phys_part = udf_rw16(pmm->part_num);
phys_part = udf_find_raw_phys(ump, raw_phys_part);
icb_loc.loc.part_num = udf_rw16(phys_part);
DPRINTF(VOLUMES, ("Metadata file\n"));
icb_loc.loc.lb_num = pmm->meta_file_lbn;
error = udf_get_node(ump, &icb_loc, &ump->metadata_node);
if (ump->metadata_node) {
vp = ump->metadata_node->vnode;
UDF_SET_SYSTEMFILE(vp);
}
icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
if (icb_loc.loc.lb_num != -1) {
DPRINTF(VOLUMES, ("Metadata copy file\n"));
error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node);
if (ump->metadatamirror_node) {
vp = ump->metadatamirror_node->vnode;
UDF_SET_SYSTEMFILE(vp);
}
}
icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
if (icb_loc.loc.lb_num != -1) {
DPRINTF(VOLUMES, ("Metadata bitmap file\n"));
error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node);
if (ump->metadatabitmap_node) {
vp = ump->metadatabitmap_node->vnode;
UDF_SET_SYSTEMFILE(vp);
}
}
/* if we're mounting read-only we relax the requirements */
if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
error = EFAULT;
if (ump->metadata_node)
error = 0;
if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
printf( "udf mount: Metadata file not readable, "
"substituting Metadata copy file\n");
ump->metadata_node = ump->metadatamirror_node;
ump->metadatamirror_node = NULL;
error = 0;
}
} else {
/* mounting read/write */
/* XXX DISABLED! metadata writing is not working yet XXX */
if (error)
error = EROFS;
}
DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
"metadata files\n"));
return error;
}
/* --------------------------------------------------------------------- */
int
udf_read_vds_tables(struct udf_mount *ump)
{
union udf_pmap *mapping;
/* struct udf_args *args = &ump->mount_args; */
uint32_t n_pm;
uint32_t log_part;
uint8_t *pmap_pos;
int pmap_size;
int error;
/* Iterate (again) over the part mappings for locations */
n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
pmap_pos = ump->logical_vol->maps;
for (log_part = 0; log_part < n_pm; log_part++) {
mapping = (union udf_pmap *) pmap_pos;
switch (ump->vtop_tp[log_part]) {
case UDF_VTOP_TYPE_PHYS :
/* nothing */
break;
case UDF_VTOP_TYPE_VIRT :
/* search and load VAT */
error = udf_search_vat(ump, mapping);
if (error)
return ENOENT;
break;
case UDF_VTOP_TYPE_SPARABLE :
/* load one of the sparable tables */
error = udf_read_sparables(ump, mapping);
if (error)
return ENOENT;
break;
case UDF_VTOP_TYPE_META :
/* load the associated file descriptors */
error = udf_read_metadata_nodes(ump, mapping);
if (error)
return ENOENT;
break;
default:
break;
}
pmap_size = pmap_pos[1];
pmap_pos += pmap_size;
}
/* read in and check unallocated and free space info if writing */
if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
error = udf_read_physical_partition_spacetables(ump);
if (error)
return error;
/* also read in metadata partition spacebitmap if defined */
error = udf_read_metadata_partition_spacetable(ump);
return error;
}
return 0;
}
/* --------------------------------------------------------------------- */
int
udf_read_rootdirs(struct udf_mount *ump)
{
union dscrptr *dscr;
/* struct udf_args *args = &ump->mount_args; */
struct udf_node *rootdir_node, *streamdir_node;
struct long_ad fsd_loc, *dir_loc;
uint32_t lb_num, dummy;
uint32_t fsd_len;
int dscr_type;
int error;
/* TODO implement FSD reading in separate function like integrity? */
/* get fileset descriptor sequence */
fsd_loc = ump->logical_vol->lv_fsd_loc;
fsd_len = udf_rw32(fsd_loc.len);
dscr = NULL;
error = 0;
while (fsd_len || error) {
DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
/* translate fsd_loc to lb_num */
error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
if (error)
break;
DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num));
error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
/* end markers */
if (error || (dscr == NULL))
break;
/* analyse */
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type == TAGID_TERM)
break;
if (dscr_type != TAGID_FSD) {
free(dscr, M_UDFVOLD);
return ENOENT;
}
/*
* TODO check for multiple fileset descriptors; its only
* picking the last now. Also check for FSD
* correctness/interpretability
*/
/* update */
if (ump->fileset_desc) {
free(ump->fileset_desc, M_UDFVOLD);
}
ump->fileset_desc = &dscr->fsd;
dscr = NULL;
/* continue to the next fsd */
fsd_len -= ump->discinfo.sector_size;
fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1);
/* follow up to fsd->next_ex (long_ad) if its not null */
if (udf_rw32(ump->fileset_desc->next_ex.len)) {
DPRINTF(VOLUMES, ("follow up FSD extent\n"));
fsd_loc = ump->fileset_desc->next_ex;
fsd_len = udf_rw32(ump->fileset_desc->next_ex.len);
}
}
if (dscr)
free(dscr, M_UDFVOLD);
/* there has to be one */
if (ump->fileset_desc == NULL)
return ENOENT;
DPRINTF(VOLUMES, ("FSD read in fine\n"));
DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
udf_update_logvolname(ump, ump->logical_vol->logvol_id);
/*
* Now the FSD is known, read in the rootdirectory and if one exists,
* the system stream dir. Some files in the system streamdir are not
* wanted in this implementation since they are not maintained. If
* writing is enabled we'll delete these files if they exist.
*/
rootdir_node = streamdir_node = NULL;
dir_loc = NULL;
/* try to read in the rootdir */
dir_loc = &ump->fileset_desc->rootdir_icb;
error = udf_get_node(ump, dir_loc, &rootdir_node);
if (error)
return ENOENT;
/* aparently it read in fine */
/*
* Try the system stream directory; not very likely in the ones we
* test, but for completeness.
*/
dir_loc = &ump->fileset_desc->streamdir_icb;
if (udf_rw32(dir_loc->len)) {
printf("udf_read_rootdirs: streamdir defined ");
error = udf_get_node(ump, dir_loc, &streamdir_node);
if (error) {
printf("but error in streamdir reading\n");
} else {
printf("but ignored\n");
/*
* TODO process streamdir `baddies' i.e. files we dont
* want if R/W
*/
}
}
DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n"));
/* release the vnodes again; they'll be auto-recycled later */
if (streamdir_node) {
vput(streamdir_node->vnode);
}
if (rootdir_node) {
vput(rootdir_node->vnode);
}
return 0;
}
/* --------------------------------------------------------------------- */
/* To make absolutely sure we are NOT returning zero, add one :) */
long
udf_get_node_id(const struct long_ad *icbptr)
{
/* ought to be enough since each mountpoint has its own chain */
return udf_rw32(icbptr->loc.lb_num) + 1;
}
int
udf_compare_icb(const struct long_ad *a, const struct long_ad *b)
{
if (udf_rw16(a->loc.part_num) < udf_rw16(b->loc.part_num))
return -1;
if (udf_rw16(a->loc.part_num) > udf_rw16(b->loc.part_num))
return 1;
if (udf_rw32(a->loc.lb_num) < udf_rw32(b->loc.lb_num))
return -1;
if (udf_rw32(a->loc.lb_num) > udf_rw32(b->loc.lb_num))
return 1;
return 0;
}
static int
udf_compare_rbnodes(void *ctx, const void *a, const void *b)
{
const struct udf_node *a_node = a;
const struct udf_node *b_node = b;
return udf_compare_icb(&a_node->loc, &b_node->loc);
}
static int
udf_compare_rbnode_icb(void *ctx, const void *a, const void *key)
{
const struct udf_node *a_node = a;
const struct long_ad * const icb = key;
return udf_compare_icb(&a_node->loc, icb);
}
static const rb_tree_ops_t udf_node_rbtree_ops = {
.rbto_compare_nodes = udf_compare_rbnodes,
.rbto_compare_key = udf_compare_rbnode_icb,
.rbto_node_offset = offsetof(struct udf_node, rbnode),
.rbto_context = NULL
};
void
udf_init_nodes_tree(struct udf_mount *ump)
{
rb_tree_init(&ump->udf_node_tree, &udf_node_rbtree_ops);
}
/* --------------------------------------------------------------------- */
static int
udf_validate_session_start(struct udf_mount *ump)
{
struct mmc_trackinfo trackinfo;
struct vrs_desc *vrs;
uint32_t tracknr, sessionnr, sector, sector_size;
uint32_t iso9660_vrs, write_track_start;
uint8_t *buffer, *blank, *pos;
int blks, max_sectors, vrs_len;
int error;
/* disc appendable? */
if (ump->discinfo.disc_state == MMC_STATE_FULL)
return EROFS;
/* already written here? if so, there should be an ISO VDS */
if (ump->discinfo.last_session_state == MMC_STATE_INCOMPLETE)
return 0;
/*
* Check if the first track of the session is blank and if so, copy or
* create a dummy ISO descriptor so the disc is valid again.
*/
tracknr = ump->discinfo.first_track_last_session;
memset(&trackinfo, 0, sizeof(struct mmc_trackinfo));
trackinfo.tracknr = tracknr;
error = udf_update_trackinfo(ump, &trackinfo);
if (error)
return error;
udf_dump_trackinfo(&trackinfo);
KASSERT(trackinfo.flags & (MMC_TRACKINFO_BLANK | MMC_TRACKINFO_RESERVED));
KASSERT(trackinfo.sessionnr > 1);
KASSERT(trackinfo.flags & MMC_TRACKINFO_NWA_VALID);
write_track_start = trackinfo.next_writable;
/* we have to copy the ISO VRS from a former session */
DPRINTF(VOLUMES, ("validate_session_start: "
"blank or reserved track, copying VRS\n"));
/* sessionnr should be the session we're mounting */
sessionnr = ump->mount_args.sessionnr;
/* start at the first track */
tracknr = ump->discinfo.first_track;
while (tracknr <= ump->discinfo.num_tracks) {
trackinfo.tracknr = tracknr;
error = udf_update_trackinfo(ump, &trackinfo);
if (error) {
DPRINTF(VOLUMES, ("failed to get trackinfo; aborting\n"));
return error;
}
if (trackinfo.sessionnr == sessionnr)
break;
tracknr++;
}
if (trackinfo.sessionnr != sessionnr) {
DPRINTF(VOLUMES, ("failed to get trackinfo; aborting\n"));
return ENOENT;
}
DPRINTF(VOLUMES, ("found possible former ISO VRS at\n"));
udf_dump_trackinfo(&trackinfo);
/*
* location of iso9660 vrs is defined as first sector AFTER 32kb,
* minimum ISO `sector size' 2048
*/
sector_size = ump->discinfo.sector_size;
iso9660_vrs = ((32*1024 + sector_size - 1) / sector_size)
+ trackinfo.track_start;
buffer = malloc(UDF_ISO_VRS_SIZE, M_TEMP, M_WAITOK);
max_sectors = UDF_ISO_VRS_SIZE / sector_size;
blks = MAX(1, 2048 / sector_size);
error = 0;
for (sector = 0; sector < max_sectors; sector += blks) {
pos = buffer + sector * sector_size;
error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
iso9660_vrs + sector, blks);
if (error)
break;
/* check this ISO descriptor */
vrs = (struct vrs_desc *) pos;
DPRINTF(VOLUMES, ("got VRS id `%4s`\n", vrs->identifier));
if (strncmp(vrs->identifier, VRS_CD001, 5) == 0)
continue;
if (strncmp(vrs->identifier, VRS_CDW02, 5) == 0)
continue;
if (strncmp(vrs->identifier, VRS_BEA01, 5) == 0)
continue;
if (strncmp(vrs->identifier, VRS_NSR02, 5) == 0)
continue;
if (strncmp(vrs->identifier, VRS_NSR03, 5) == 0)
continue;
if (strncmp(vrs->identifier, VRS_TEA01, 5) == 0)
break;
/* now what? for now, end of sequence */
break;
}
vrs_len = sector + blks;
if (error) {
DPRINTF(VOLUMES, ("error reading old ISO VRS\n"));
DPRINTF(VOLUMES, ("creating minimal ISO VRS\n"));
memset(buffer, 0, UDF_ISO_VRS_SIZE);
vrs = (struct vrs_desc *) (buffer);
vrs->struct_type = 0;
vrs->version = 1;
memcpy(vrs->identifier,VRS_BEA01, 5);
vrs = (struct vrs_desc *) (buffer + 2048);
vrs->struct_type = 0;
vrs->version = 1;
if (udf_rw16(ump->logical_vol->tag.descriptor_ver) == 2) {
memcpy(vrs->identifier,VRS_NSR02, 5);
} else {
memcpy(vrs->identifier,VRS_NSR03, 5);
}
vrs = (struct vrs_desc *) (buffer + 4096);
vrs->struct_type = 0;
vrs->version = 1;
memcpy(vrs->identifier, VRS_TEA01, 5);
vrs_len = 3*blks;
}
DPRINTF(VOLUMES, ("Got VRS of %d sectors long\n", vrs_len));
/*
* location of iso9660 vrs is defined as first sector AFTER 32kb,
* minimum ISO `sector size' 2048
*/
sector_size = ump->discinfo.sector_size;
iso9660_vrs = ((32*1024 + sector_size - 1) / sector_size)
+ write_track_start;
/* write out 32 kb */
blank = malloc(sector_size, M_TEMP, M_WAITOK);
memset(blank, 0, sector_size);
error = 0;
for (sector = write_track_start; sector < iso9660_vrs; sector ++) {
error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE,
blank, sector, 1);
if (error)
break;
}
if (!error) {
/* write out our ISO VRS */
KASSERT(sector == iso9660_vrs);
error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE, buffer,
sector, vrs_len);
sector += vrs_len;
}
if (!error) {
/* fill upto the first anchor at S+256 */
for (; sector < write_track_start+256; sector++) {
error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE,
blank, sector, 1);
if (error)
break;
}
}
if (!error) {
/* write out anchor; write at ABSOLUTE place! */
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_ABSOLUTE,
(union dscrptr *) ump->anchors[0], sector, sector);
if (error)
printf("writeout of anchor failed!\n");
}
free(blank, M_TEMP);
free(buffer, M_TEMP);
if (error)
printf("udf_open_session: error writing iso vrs! : "
"leaving disc in compromised state!\n");
/* synchronise device caches */
(void) udf_synchronise_caches(ump);
return error;
}
int
udf_open_logvol(struct udf_mount *ump)
{
int logvol_integrity;
int error;
/* already/still open? */
logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
if (logvol_integrity == UDF_INTEGRITY_OPEN)
return 0;
/* can we open it ? */
if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
return EROFS;
/* setup write parameters */
DPRINTF(VOLUMES, ("Setting up write parameters\n"));
if ((error = udf_setup_writeparams(ump)) != 0)
return error;
/* determine data and metadata tracks (most likely same) */
error = udf_search_writing_tracks(ump);
if (error) {
/* most likely lack of space */
printf("udf_open_logvol: error searching writing tracks\n");
return EROFS;
}
/* writeout/update lvint on disc or only in memory */
DPRINTF(VOLUMES, ("Opening logical volume\n"));
if (ump->lvopen & UDF_OPEN_SESSION) {
/* TODO optional track reservation opening */
error = udf_validate_session_start(ump);
if (error)
return error;
/* determine data and metadata tracks again */
error = udf_search_writing_tracks(ump);
}
/* mark it open */
ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN);
/* do we need to write it out? */
if (ump->lvopen & UDF_WRITE_LVINT) {
error = udf_writeout_lvint(ump, ump->lvopen);
/* if we couldn't write it mark it closed again */
if (error) {
ump->logvol_integrity->integrity_type =
udf_rw32(UDF_INTEGRITY_CLOSED);
return error;
}
}
return 0;
}
int
udf_close_logvol(struct udf_mount *ump, int mntflags)
{
struct vnode *devvp = ump->devvp;
struct mmc_op mmc_op;
int logvol_integrity;
int error = 0, error1 = 0, error2 = 0;
int tracknr;
int nvats, n, nok;
/* already/still closed? */
logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
if (logvol_integrity == UDF_INTEGRITY_CLOSED)
return 0;
/* writeout/update lvint or write out VAT */
DPRINTF(VOLUMES, ("udf_close_logvol: closing logical volume\n"));
#ifdef DIAGNOSTIC
if (ump->lvclose & UDF_CLOSE_SESSION)
KASSERT(ump->lvclose & UDF_WRITE_VAT);
#endif
if (ump->lvclose & UDF_WRITE_VAT) {
DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
/* write out the VAT data and all its descriptors */
DPRINTF(VOLUMES, ("writeout vat_node\n"));
udf_writeout_vat(ump);
(void) vflushbuf(ump->vat_node->vnode, FSYNC_WAIT);
(void) VOP_FSYNC(ump->vat_node->vnode,
FSCRED, FSYNC_WAIT, 0, 0);
if (ump->lvclose & UDF_CLOSE_SESSION) {
DPRINTF(VOLUMES, ("udf_close_logvol: closing session "
"as requested\n"));
}
/* at least two DVD packets and 3 CD-R packets */
nvats = 32;
#if notyet
/*
* TODO calculate the available space and if the disc is
* allmost full, write out till end-256-1 with banks, write
* AVDP and fill up with VATs, then close session and close
* disc.
*/
if (ump->lvclose & UDF_FINALISE_DISC) {
error = udf_write_phys_dscr_sync(ump, NULL,
UDF_C_FLOAT_DSCR,
(union dscrptr *) ump->anchors[0],
0, 0);
if (error)
printf("writeout of anchor failed!\n");
/* pad space with VAT ICBs */
nvats = 256;
}
#endif
/* write out a number of VAT nodes */
nok = 0;
for (n = 0; n < nvats; n++) {
/* will now only write last FE/EFE */
ump->vat_node->i_flags |= IN_MODIFIED;
error = VOP_FSYNC(ump->vat_node->vnode,
FSCRED, FSYNC_WAIT, 0, 0);
if (!error)
nok++;
}
if (nok < 14) {
/* arbitrary; but at least one or two CD frames */
printf("writeout of at least 14 VATs failed\n");
return error;
}
}
/* NOTE the disc is in a (minimal) valid state now; no erroring out */
/* finish closing of session */
if (ump->lvclose & UDF_CLOSE_SESSION) {
error = udf_validate_session_start(ump);
if (error)
return error;
(void) udf_synchronise_caches(ump);
/* close all associated tracks */
tracknr = ump->discinfo.first_track_last_session;
error = 0;
while (tracknr <= ump->discinfo.last_track_last_session) {
DPRINTF(VOLUMES, ("\tclosing possible open "
"track %d\n", tracknr));
memset(&mmc_op, 0, sizeof(mmc_op));
mmc_op.operation = MMC_OP_CLOSETRACK;
mmc_op.mmc_profile = ump->discinfo.mmc_profile;
mmc_op.tracknr = tracknr;
error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
FKIOCTL, NOCRED);
if (error)
printf("udf_close_logvol: closing of "
"track %d failed\n", tracknr);
tracknr ++;
}
if (!error) {
DPRINTF(VOLUMES, ("closing session\n"));
memset(&mmc_op, 0, sizeof(mmc_op));
mmc_op.operation = MMC_OP_CLOSESESSION;
mmc_op.mmc_profile = ump->discinfo.mmc_profile;
mmc_op.sessionnr = ump->discinfo.num_sessions;
error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
FKIOCTL, NOCRED);
if (error)
printf("udf_close_logvol: closing of session"
"failed\n");
}
if (!error)
ump->lvopen |= UDF_OPEN_SESSION;
if (error) {
printf("udf_close_logvol: leaving disc as it is\n");
ump->lvclose &= ~UDF_FINALISE_DISC;
}
}
if (ump->lvclose & UDF_FINALISE_DISC) {
memset(&mmc_op, 0, sizeof(mmc_op));
mmc_op.operation = MMC_OP_FINALISEDISC;
mmc_op.mmc_profile = ump->discinfo.mmc_profile;
mmc_op.sessionnr = ump->discinfo.num_sessions;
error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
FKIOCTL, NOCRED);
if (error)
printf("udf_close_logvol: finalising disc"
"failed\n");
}
/* write out partition bitmaps if requested */
if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
/* sync writeout metadata spacetable if existing */
error1 = udf_write_metadata_partition_spacetable(ump, true);
if (error1)
printf( "udf_close_logvol: writeout of metadata space "
"bitmap failed\n");
/* sync writeout partition spacetables */
error2 = udf_write_physical_partition_spacetables(ump, true);
if (error2)
printf( "udf_close_logvol: writeout of space tables "
"failed\n");
if (error1 || error2)
return (error1 | error2);
ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
}
/* write out metadata partition nodes if requested */
if (ump->lvclose & UDF_WRITE_METAPART_NODES) {
/* sync writeout metadata descriptor node */
error1 = udf_writeout_node(ump->metadata_node, FSYNC_WAIT);
if (error1)
printf( "udf_close_logvol: writeout of metadata partition "
"node failed\n");
/* duplicate metadata partition descriptor if needed */
udf_synchronise_metadatamirror_node(ump);
/* sync writeout metadatamirror descriptor node */
error2 = udf_writeout_node(ump->metadatamirror_node, FSYNC_WAIT);
if (error2)
printf( "udf_close_logvol: writeout of metadata partition "
"mirror node failed\n");
if (error1 || error2)
return (error1 | error2);
ump->lvclose &= ~UDF_WRITE_METAPART_NODES;
}
/* mark it closed */
ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
/* do we need to write out the logical volume integrity? */
if (ump->lvclose & UDF_WRITE_LVINT)
error = udf_writeout_lvint(ump, ump->lvopen);
if (error) {
/* HELP now what? mark it open again for now */
ump->logvol_integrity->integrity_type =
udf_rw32(UDF_INTEGRITY_OPEN);
return error;
}
(void) udf_synchronise_caches(ump);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* Genfs interfacing
*
* static const struct genfs_ops udf_genfsops = {
* .gop_size = genfs_size,
* size of transfers
* .gop_alloc = udf_gop_alloc,
* allocate len bytes at offset
* .gop_write = genfs_gop_write,
* putpages interface code
* .gop_markupdate = udf_gop_markupdate,
* set update/modify flags etc.
* }
*/
/*
* Genfs interface. These four functions are the only ones defined though not
* documented... great....
*/
/*
* Called for allocating an extent of the file either by VOP_WRITE() or by
* genfs filling up gaps.
*/
static int
udf_gop_alloc(struct vnode *vp, off_t off,
off_t len, int flags, kauth_cred_t cred)
{
struct udf_node *udf_node = VTOI(vp);
struct udf_mount *ump = udf_node->ump;
uint64_t lb_start, lb_end;
uint32_t lb_size, num_lb;
int udf_c_type, vpart_num, can_fail;
int error;
DPRINTF(ALLOC, ("udf_gop_alloc called for offset %"PRIu64" for %"PRIu64" bytes, %s\n",
off, len, flags? "SYNC":"NONE"));
/*
* request the pages of our vnode and see how many pages will need to
* be allocated and reserve that space
*/
lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
lb_start = off / lb_size;
lb_end = (off + len + lb_size -1) / lb_size;
num_lb = lb_end - lb_start;
udf_c_type = udf_get_c_type(udf_node);
vpart_num = udf_get_record_vpart(ump, udf_c_type);
/* all requests can fail */
can_fail = true;
/* fid's (directories) can't fail */
if (udf_c_type == UDF_C_FIDS)
can_fail = false;
/* system files can't fail */
if (vp->v_vflag & VV_SYSTEM)
can_fail = false;
error = udf_reserve_space(ump, udf_node, udf_c_type,
vpart_num, num_lb, can_fail);
DPRINTF(ALLOC, ("\tlb_start %"PRIu64", lb_end %"PRIu64", num_lb %d\n",
lb_start, lb_end, num_lb));
return error;
}
/*
* callback from genfs to update our flags
*/
static void
udf_gop_markupdate(struct vnode *vp, int flags)
{
struct udf_node *udf_node = VTOI(vp);
u_long mask = 0;
if ((flags & GOP_UPDATE_ACCESSED) != 0) {
mask = IN_ACCESS;
}
if ((flags & GOP_UPDATE_MODIFIED) != 0) {
if (vp->v_type == VREG) {
mask |= IN_CHANGE | IN_UPDATE;
} else {
mask |= IN_MODIFY;
}
}
if (mask) {
udf_node->i_flags |= mask;
}
}
static const struct genfs_ops udf_genfsops = {
.gop_size = genfs_size,
.gop_alloc = udf_gop_alloc,
.gop_write = genfs_gop_write_rwmap,
.gop_markupdate = udf_gop_markupdate,
};
/* --------------------------------------------------------------------- */
int
udf_write_terminator(struct udf_mount *ump, uint32_t sector)
{
union dscrptr *dscr;
int error;
dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK|M_ZERO);
udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
/* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH);
(void) udf_validate_tag_and_crc_sums(dscr);
error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
dscr, sector, sector);
free(dscr, M_TEMP);
return error;
}
/* --------------------------------------------------------------------- */
/* UDF<->unix converters */
/* --------------------------------------------------------------------- */
static mode_t
udf_perm_to_unix_mode(uint32_t perm)
{
mode_t mode;
mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
return mode;
}
/* --------------------------------------------------------------------- */
static uint32_t
unix_mode_to_udf_perm(mode_t mode)
{
uint32_t perm;
perm = ((mode & S_IRWXO) );
perm |= ((mode & S_IRWXG) << 2);
perm |= ((mode & S_IRWXU) << 4);
perm |= ((mode & S_IWOTH) << 3);
perm |= ((mode & S_IWGRP) << 5);
perm |= ((mode & S_IWUSR) << 7);
return perm;
}
/* --------------------------------------------------------------------- */
static uint32_t
udf_icb_to_unix_filetype(uint32_t icbftype)
{
switch (icbftype) {
case UDF_ICB_FILETYPE_DIRECTORY :
case UDF_ICB_FILETYPE_STREAMDIR :
return S_IFDIR;
case UDF_ICB_FILETYPE_FIFO :
return S_IFIFO;
case UDF_ICB_FILETYPE_CHARDEVICE :
return S_IFCHR;
case UDF_ICB_FILETYPE_BLOCKDEVICE :
return S_IFBLK;
case UDF_ICB_FILETYPE_RANDOMACCESS :
case UDF_ICB_FILETYPE_REALTIME :
return S_IFREG;
case UDF_ICB_FILETYPE_SYMLINK :
return S_IFLNK;
case UDF_ICB_FILETYPE_SOCKET :
return S_IFSOCK;
}
/* no idea what this is */
return 0;
}
/* --------------------------------------------------------------------- */
void
udf_to_unix_name(char *result, int result_len, char *id, int len,
struct charspec *chsp)
{
uint16_t *raw_name, *unix_name;
uint16_t *inchp, ch;
uint8_t *outchp;
const char *osta_id = "OSTA Compressed Unicode";
int ucode_chars, nice_uchars, is_osta_typ0, nout;
raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
unix_name = raw_name + 1024; /* split space in half */
assert(sizeof(char) == sizeof(uint8_t));
outchp = (uint8_t *) result;
is_osta_typ0 = (chsp->type == 0);
is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
if (is_osta_typ0) {
/* TODO clean up */
*raw_name = *unix_name = 0;
ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name);
ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name));
nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars);
/* output UTF8 */
for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) {
ch = *inchp;
nout = wput_utf8(outchp, result_len, ch);
outchp += nout; result_len -= nout;
if (!ch) break;
}
*outchp++ = 0;
} else {
/* assume 8bit char length byte latin-1 */
assert(*id == 8);
assert(strlen((char *) (id+1)) <= NAME_MAX);
strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1)));
}
free(raw_name, M_UDFTEMP);
}
/* --------------------------------------------------------------------- */
void
unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len,
struct charspec *chsp)
{
uint16_t *raw_name;
uint16_t *outchp;
const char *inchp;
const char *osta_id = "OSTA Compressed Unicode";
int udf_chars, is_osta_typ0, bits;
size_t cnt;
/* allocate temporary unicode-16 buffer */
raw_name = malloc(1024, M_UDFTEMP, M_WAITOK);
/* convert utf8 to unicode-16 */
*raw_name = 0;
inchp = name;
outchp = raw_name;
bits = 8;
for (cnt = name_len, udf_chars = 0; cnt;) {
*outchp = wget_utf8(&inchp, &cnt);
if (*outchp > 0xff)
bits=16;
outchp++;
udf_chars++;
}
/* null terminate just in case */
*outchp++ = 0;
is_osta_typ0 = (chsp->type == 0);
is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
if (is_osta_typ0) {
udf_chars = udf_CompressUnicode(udf_chars, bits,
(unicode_t *) raw_name,
(byte *) result);
} else {
printf("unix to udf name: no CHSP0 ?\n");
/* XXX assume 8bit char length byte latin-1 */
*result++ = 8; udf_chars = 1;
strncpy(result, name + 1, name_len);
udf_chars += name_len;
}
*result_len = udf_chars;
free(raw_name, M_UDFTEMP);
}
/* --------------------------------------------------------------------- */
void
udf_timestamp_to_timespec(struct udf_mount *ump,
struct timestamp *timestamp,
struct timespec *timespec)
{
struct clock_ymdhms ymdhms;
uint32_t usecs, secs, nsecs;
uint16_t tz;
/* fill in ymdhms structure from timestamp */
memset(&ymdhms, 0, sizeof(ymdhms));
ymdhms.dt_year = udf_rw16(timestamp->year);
ymdhms.dt_mon = timestamp->month;
ymdhms.dt_day = timestamp->day;
ymdhms.dt_wday = 0; /* ? */
ymdhms.dt_hour = timestamp->hour;
ymdhms.dt_min = timestamp->minute;
ymdhms.dt_sec = timestamp->second;
secs = clock_ymdhms_to_secs(&ymdhms);
usecs = timestamp->usec +
100*timestamp->hund_usec + 10000*timestamp->centisec;
nsecs = usecs * 1000;
/*
* Calculate the time zone. The timezone is 12 bit signed 2's
* compliment, so we gotta do some extra magic to handle it right.
*/
tz = udf_rw16(timestamp->type_tz);
tz &= 0x0fff; /* only lower 12 bits are significant */
if (tz & 0x0800) /* sign extention */
tz |= 0xf000;
/* TODO check timezone conversion */
/* check if we are specified a timezone to convert */
if (udf_rw16(timestamp->type_tz) & 0x1000) {
if ((int16_t) tz != -2047)
secs -= (int16_t) tz * 60;
} else {
secs -= ump->mount_args.gmtoff;
}
timespec->tv_sec = secs;
timespec->tv_nsec = nsecs;
}
void
udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
{
struct clock_ymdhms ymdhms;
uint32_t husec, usec, csec;
(void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
usec = timespec->tv_nsec / 1000;
husec = usec / 100;
usec -= husec * 100; /* only 0-99 in usec */
csec = husec / 100; /* only 0-99 in csec */
husec -= csec * 100; /* only 0-99 in husec */
/* set method 1 for CUT/GMT */
timestamp->type_tz = udf_rw16((1<<12) + 0);
timestamp->year = udf_rw16(ymdhms.dt_year);
timestamp->month = ymdhms.dt_mon;
timestamp->day = ymdhms.dt_day;
timestamp->hour = ymdhms.dt_hour;
timestamp->minute = ymdhms.dt_min;
timestamp->second = ymdhms.dt_sec;
timestamp->centisec = csec;
timestamp->hund_usec = husec;
timestamp->usec = usec;
}
/* --------------------------------------------------------------------- */
/*
* Attribute and filetypes converters with get/set pairs
*/
uint32_t
udf_getaccessmode(struct udf_node *udf_node)
{
struct file_entry *fe = udf_node->fe;
struct extfile_entry *efe = udf_node->efe;
uint32_t udf_perm, icbftype;
uint32_t mode, ftype;
uint16_t icbflags;
UDF_LOCK_NODE(udf_node, 0);
if (fe) {
udf_perm = udf_rw32(fe->perm);
icbftype = fe->icbtag.file_type;
icbflags = udf_rw16(fe->icbtag.flags);
} else {
assert(udf_node->efe);
udf_perm = udf_rw32(efe->perm);
icbftype = efe->icbtag.file_type;
icbflags = udf_rw16(efe->icbtag.flags);
}
mode = udf_perm_to_unix_mode(udf_perm);
ftype = udf_icb_to_unix_filetype(icbftype);
/* set suid, sgid, sticky from flags in fe/efe */
if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
mode |= S_ISUID;
if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
mode |= S_ISGID;
if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
mode |= S_ISVTX;
UDF_UNLOCK_NODE(udf_node, 0);
return mode | ftype;
}
void
udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
{
struct file_entry *fe = udf_node->fe;
struct extfile_entry *efe = udf_node->efe;
uint32_t udf_perm;
uint16_t icbflags;
UDF_LOCK_NODE(udf_node, 0);
udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
if (fe) {
icbflags = udf_rw16(fe->icbtag.flags);
} else {
icbflags = udf_rw16(efe->icbtag.flags);
}
icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
if (mode & S_ISUID)
icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
if (mode & S_ISGID)
icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
if (mode & S_ISVTX)
icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
if (fe) {
fe->perm = udf_rw32(udf_perm);
fe->icbtag.flags = udf_rw16(icbflags);
} else {
efe->perm = udf_rw32(udf_perm);
efe->icbtag.flags = udf_rw16(icbflags);
}
UDF_UNLOCK_NODE(udf_node, 0);
}
void
udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
{
struct udf_mount *ump = udf_node->ump;
struct file_entry *fe = udf_node->fe;
struct extfile_entry *efe = udf_node->efe;
uid_t uid;
gid_t gid;
UDF_LOCK_NODE(udf_node, 0);
if (fe) {
uid = (uid_t)udf_rw32(fe->uid);
gid = (gid_t)udf_rw32(fe->gid);
} else {
assert(udf_node->efe);
uid = (uid_t)udf_rw32(efe->uid);
gid = (gid_t)udf_rw32(efe->gid);
}
/* do the uid/gid translation game */
if (uid == (uid_t) -1)
uid = ump->mount_args.anon_uid;
if (gid == (gid_t) -1)
gid = ump->mount_args.anon_gid;
*uidp = uid;
*gidp = gid;
UDF_UNLOCK_NODE(udf_node, 0);
}
void
udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
{
struct udf_mount *ump = udf_node->ump;
struct file_entry *fe = udf_node->fe;
struct extfile_entry *efe = udf_node->efe;
uid_t nobody_uid;
gid_t nobody_gid;
UDF_LOCK_NODE(udf_node, 0);
/* do the uid/gid translation game */
nobody_uid = ump->mount_args.nobody_uid;
nobody_gid = ump->mount_args.nobody_gid;
if (uid == nobody_uid)
uid = (uid_t) -1;
if (gid == nobody_gid)
gid = (gid_t) -1;
if (fe) {
fe->uid = udf_rw32((uint32_t) uid);
fe->gid = udf_rw32((uint32_t) gid);
} else {
efe->uid = udf_rw32((uint32_t) uid);
efe->gid = udf_rw32((uint32_t) gid);
}
UDF_UNLOCK_NODE(udf_node, 0);
}
/* --------------------------------------------------------------------- */
int
udf_dirhash_fill(struct udf_node *dir_node)
{
struct vnode *dvp = dir_node->vnode;
struct dirhash *dirh;
struct file_entry *fe = dir_node->fe;
struct extfile_entry *efe = dir_node->efe;
struct fileid_desc *fid;
struct dirent *dirent;
uint64_t file_size, pre_diroffset, diroffset;
uint32_t lb_size;
int error;
/* make sure we have a dirhash to work on */
dirh = dir_node->dir_hash;
KASSERT(dirh);
KASSERT(dirh->refcnt > 0);
if (dirh->flags & DIRH_BROKEN)
return EIO;
if (dirh->flags & DIRH_COMPLETE)
return 0;
/* make sure we have a clean dirhash to add to */
dirhash_purge_entries(dirh);
/* get directory filesize */
if (fe) {
file_size = udf_rw64(fe->inf_len);
} else {
assert(efe);
file_size = udf_rw64(efe->inf_len);
}
/* allocate temporary space for fid */
lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
/* allocate temporary space for dirent */
dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
error = 0;
diroffset = 0;
while (diroffset < file_size) {
/* transfer a new fid/dirent */
pre_diroffset = diroffset;
error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
if (error) {
/* TODO what to do? continue but not add? */
dirh->flags |= DIRH_BROKEN;
dirhash_purge_entries(dirh);
break;
}
if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
/* register deleted extent for reuse */
dirhash_enter_freed(dirh, pre_diroffset,
udf_fidsize(fid));
} else {
/* append to the dirhash */
dirhash_enter(dirh, dirent, pre_diroffset,
udf_fidsize(fid), 0);
}
}
dirh->flags |= DIRH_COMPLETE;
free(fid, M_UDFTEMP);
free(dirent, M_UDFTEMP);
return error;
}
/* --------------------------------------------------------------------- */
/*
* Directory read and manipulation functions.
*
*/
int
udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
struct long_ad *icb_loc, int *found)
{
struct udf_node *dir_node = VTOI(vp);
struct dirhash *dirh;
struct dirhash_entry *dirh_ep;
struct fileid_desc *fid;
struct dirent *dirent;
uint64_t diroffset;
uint32_t lb_size;
int hit, error;
/* set default return */
*found = 0;
/* get our dirhash and make sure its read in */
dirhash_get(&dir_node->dir_hash);
error = udf_dirhash_fill(dir_node);
if (error) {
dirhash_put(dir_node->dir_hash);
return error;
}
dirh = dir_node->dir_hash;
/* allocate temporary space for fid */
lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
DPRINTF(DIRHASH, ("dirhash_lookup looking for `%*.*s`\n",
namelen, namelen, name));
/* search our dirhash hits */
memset(icb_loc, 0, sizeof(*icb_loc));
dirh_ep = NULL;
for (;;) {
hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
/* if no hit, abort the search */
if (!hit)
break;
/* check this hit */
diroffset = dirh_ep->offset;
/* transfer a new fid/dirent */
error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
if (error)
break;
DPRINTF(DIRHASH, ("dirhash_lookup\tchecking `%*.*s`\n",
dirent->d_namlen, dirent->d_namlen, dirent->d_name));
/* see if its our entry */
#ifdef DIAGNOSTIC
if (dirent->d_namlen != namelen) {
printf("WARNING: dirhash_lookup() returned wrong "
"d_namelen: %d and ought to be %d\n",
dirent->d_namlen, namelen);
printf("\tlooked for `%s' and got `%s'\n",
name, dirent->d_name);
}
#endif
if (strncmp(dirent->d_name, name, namelen) == 0) {
*found = 1;
*icb_loc = fid->icb;
break;
}
}
free(fid, M_UDFTEMP);
free(dirent, M_UDFTEMP);
dirhash_put(dir_node->dir_hash);
return error;
}
/* --------------------------------------------------------------------- */
static int
udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
struct long_ad *node_icb, struct long_ad *parent_icb,
uint64_t parent_unique_id)
{
struct timespec now;
struct icb_tag *icb;
struct filetimes_extattr_entry *ft_extattr;
uint64_t unique_id;
uint32_t fidsize, lb_num;
uint8_t *bpos;
int crclen, attrlen;
lb_num = udf_rw32(node_icb->loc.lb_num);
udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
icb = &fe->icbtag;
/*
* Always use strategy type 4 unless on WORM wich we don't support
* (yet). Fill in defaults and set for internal allocation of data.
*/
icb->strat_type = udf_rw16(4);
icb->max_num_entries = udf_rw16(1);
icb->file_type = file_type; /* 8 bit */
icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
fe->perm = udf_rw32(0x7fff); /* all is allowed */
fe->link_cnt = udf_rw16(0); /* explicit setting */
fe->ckpoint = udf_rw32(1); /* user supplied file version */
vfs_timestamp(&now);
udf_timespec_to_timestamp(&now, &fe->atime);
udf_timespec_to_timestamp(&now, &fe->attrtime);
udf_timespec_to_timestamp(&now, &fe->mtime);
udf_set_regid(&fe->imp_id, IMPL_NAME);
udf_add_impl_regid(ump, &fe->imp_id);
unique_id = udf_advance_uniqueid(ump);
fe->unique_id = udf_rw64(unique_id);
fe->l_ea = udf_rw32(0);
/* create extended attribute to record our creation time */
attrlen = UDF_FILETIMES_ATTR_SIZE(1);
ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK);
memset(ft_extattr, 0, attrlen);
ft_extattr->hdr.type = udf_rw32(UDF_FILETIMES_ATTR_NO);
ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */
ft_extattr->hdr.a_l = udf_rw32(UDF_FILETIMES_ATTR_SIZE(1));
ft_extattr->d_l = udf_rw32(UDF_TIMESTAMP_SIZE); /* one item */
ft_extattr->existence = UDF_FILETIMES_FILE_CREATION;
udf_timespec_to_timestamp(&now, &ft_extattr->times[0]);
udf_extattr_insert_internal(ump, (union dscrptr *) fe,
(struct extattr_entry *) ft_extattr);
free(ft_extattr, M_UDFTEMP);
/* if its a directory, create '..' */
bpos = (uint8_t *) fe->data + udf_rw32(fe->l_ea);
fidsize = 0;
if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
fidsize = udf_create_parentfid(ump,
(struct fileid_desc *) bpos, parent_icb,
parent_unique_id);
}
/* record fidlength information */
fe->inf_len = udf_rw64(fidsize);
fe->l_ad = udf_rw32(fidsize);
fe->logblks_rec = udf_rw64(0); /* intern */
crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
crclen += udf_rw32(fe->l_ea) + fidsize;
fe->tag.desc_crc_len = udf_rw16(crclen);
(void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
return fidsize;
}
/* --------------------------------------------------------------------- */
static int
udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
uint64_t parent_unique_id)
{
struct timespec now;
struct icb_tag *icb;
uint64_t unique_id;
uint32_t fidsize, lb_num;
uint8_t *bpos;
int crclen;
lb_num = udf_rw32(node_icb->loc.lb_num);
udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
icb = &efe->icbtag;
/*
* Always use strategy type 4 unless on WORM wich we don't support
* (yet). Fill in defaults and set for internal allocation of data.
*/
icb->strat_type = udf_rw16(4);
icb->max_num_entries = udf_rw16(1);
icb->file_type = file_type; /* 8 bit */
icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
efe->perm = udf_rw32(0x7fff); /* all is allowed */
efe->link_cnt = udf_rw16(0); /* explicit setting */
efe->ckpoint = udf_rw32(1); /* user supplied file version */
vfs_timestamp(&now);
udf_timespec_to_timestamp(&now, &efe->ctime);
udf_timespec_to_timestamp(&now, &efe->atime);
udf_timespec_to_timestamp(&now, &efe->attrtime);
udf_timespec_to_timestamp(&now, &efe->mtime);
udf_set_regid(&efe->imp_id, IMPL_NAME);
udf_add_impl_regid(ump, &efe->imp_id);
unique_id = udf_advance_uniqueid(ump);
efe->unique_id = udf_rw64(unique_id);
efe->l_ea = udf_rw32(0);
/* if its a directory, create '..' */
bpos = (uint8_t *) efe->data + udf_rw32(efe->l_ea);
fidsize = 0;
if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
fidsize = udf_create_parentfid(ump,
(struct fileid_desc *) bpos, parent_icb,
parent_unique_id);
}
/* record fidlength information */
efe->obj_size = udf_rw64(fidsize);
efe->inf_len = udf_rw64(fidsize);
efe->l_ad = udf_rw32(fidsize);
efe->logblks_rec = udf_rw64(0); /* intern */
crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
crclen += udf_rw32(efe->l_ea) + fidsize;
efe->tag.desc_crc_len = udf_rw16(crclen);
(void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
return fidsize;
}
/* --------------------------------------------------------------------- */
int
udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
struct udf_node *udf_node, struct componentname *cnp)
{
struct vnode *dvp = dir_node->vnode;
struct dirhash *dirh;
struct dirhash_entry *dirh_ep;
struct file_entry *fe = dir_node->fe;
struct fileid_desc *fid;
struct dirent *dirent;
uint64_t diroffset;
uint32_t lb_size, fidsize;
int found, error;
char const *name = cnp->cn_nameptr;
int namelen = cnp->cn_namelen;
int hit, refcnt;
/* get our dirhash and make sure its read in */
dirhash_get(&dir_node->dir_hash);
error = udf_dirhash_fill(dir_node);
if (error) {
dirhash_put(dir_node->dir_hash);
return error;
}
dirh = dir_node->dir_hash;
/* get directory filesize */
if (!fe) {
assert(dir_node->efe);
}
/* allocate temporary space for fid */
lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
/* search our dirhash hits */
found = 0;
dirh_ep = NULL;
for (;;) {
hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
/* if no hit, abort the search */
if (!hit)
break;
/* check this hit */
diroffset = dirh_ep->offset;
/* transfer a new fid/dirent */
error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
if (error)
break;
/* see if its our entry */
KASSERT(dirent->d_namlen == namelen);
if (strncmp(dirent->d_name, name, namelen) == 0) {
found = 1;
break;
}
}
if (!found)
error = ENOENT;
if (error)
goto error_out;
/* mark deleted */
fid->file_char |= UDF_FILE_CHAR_DEL;
#ifdef UDF_COMPLETE_DELETE
memset(&fid->icb, 0, sizeof(fid->icb));
#endif
(void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
/* get size of fid and compensate for the read_fid_stream advance */
fidsize = udf_fidsize(fid);
diroffset -= fidsize;
/* write out */
error = vn_rdwr(UIO_WRITE, dir_node->vnode,
fid, fidsize, diroffset,
UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
FSCRED, NULL, NULL);
if (error)
goto error_out;
/* get reference count of attached node */
if (udf_node->fe) {
refcnt = udf_rw16(udf_node->fe->link_cnt);
} else {
KASSERT(udf_node->efe);
refcnt = udf_rw16(udf_node->efe->link_cnt);
}
#ifdef UDF_COMPLETE_DELETE
/* substract reference counter in attached node */
refcnt -= 1;
if (udf_node->fe) {
udf_node->fe->link_cnt = udf_rw16(refcnt);
} else {
udf_node->efe->link_cnt = udf_rw16(refcnt);
}
/* prevent writeout when refcnt == 0 */
if (refcnt == 0)
udf_node->i_flags |= IN_DELETED;
if (fid->file_char & UDF_FILE_CHAR_DIR) {
int drefcnt;
/* substract reference counter in directory node */
/* note subtract 2 (?) for its was also backreferenced */
if (dir_node->fe) {
drefcnt = udf_rw16(dir_node->fe->link_cnt);
drefcnt -= 1;
dir_node->fe->link_cnt = udf_rw16(drefcnt);
} else {
KASSERT(dir_node->efe);
drefcnt = udf_rw16(dir_node->efe->link_cnt);
drefcnt -= 1;
dir_node->efe->link_cnt = udf_rw16(drefcnt);
}
}
udf_node->i_flags |= IN_MODIFIED;
dir_node->i_flags |= IN_MODIFIED;
#endif
/* if it is/was a hardlink adjust the file count */
if (refcnt > 0)
udf_adjust_filecount(udf_node, -1);
/* remove from the dirhash */
dirhash_remove(dirh, dirent, diroffset,
udf_fidsize(fid));
error_out:
free(fid, M_UDFTEMP);
free(dirent, M_UDFTEMP);
dirhash_put(dir_node->dir_hash);
return error;
}
/* --------------------------------------------------------------------- */
int
udf_dir_update_rootentry(struct udf_mount *ump, struct udf_node *dir_node,
struct udf_node *new_parent_node)
{
struct vnode *dvp = dir_node->vnode;
struct dirhash *dirh;
struct dirhash_entry *dirh_ep;
struct file_entry *fe;
struct extfile_entry *efe;
struct fileid_desc *fid;
struct dirent *dirent;
uint64_t diroffset;
uint64_t new_parent_unique_id;
uint32_t lb_size, fidsize;
int found, error;
char const *name = "..";
int namelen = 2;
int hit;
/* get our dirhash and make sure its read in */
dirhash_get(&dir_node->dir_hash);
error = udf_dirhash_fill(dir_node);
if (error) {
dirhash_put(dir_node->dir_hash);
return error;
}
dirh = dir_node->dir_hash;
/* get new parent's unique ID */
fe = new_parent_node->fe;
efe = new_parent_node->efe;
if (fe) {
new_parent_unique_id = udf_rw64(fe->unique_id);
} else {
assert(efe);
new_parent_unique_id = udf_rw64(efe->unique_id);
}
/* get directory filesize */
fe = dir_node->fe;
efe = dir_node->efe;
if (!fe) {
assert(efe);
}
/* allocate temporary space for fid */
lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
/*
* NOTE the standard does not dictate the FID entry '..' should be
* first, though in practice it will most likely be.
*/
/* search our dirhash hits */
found = 0;
dirh_ep = NULL;
for (;;) {
hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
/* if no hit, abort the search */
if (!hit)
break;
/* check this hit */
diroffset = dirh_ep->offset;
/* transfer a new fid/dirent */
error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
if (error)
break;
/* see if its our entry */
KASSERT(dirent->d_namlen == namelen);
if (strncmp(dirent->d_name, name, namelen) == 0) {
found = 1;
break;
}
}
if (!found)
error = ENOENT;
if (error)
goto error_out;
/* update our ICB to the new parent, hit of lower 32 bits of uniqueid */
fid->icb = new_parent_node->write_loc;
fid->icb.longad_uniqueid = udf_rw32(new_parent_unique_id);
(void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
/* get size of fid and compensate for the read_fid_stream advance */
fidsize = udf_fidsize(fid);
diroffset -= fidsize;
/* write out */
error = vn_rdwr(UIO_WRITE, dir_node->vnode,
fid, fidsize, diroffset,
UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
FSCRED, NULL, NULL);
/* nothing to be done in the dirhash */
error_out:
free(fid, M_UDFTEMP);
free(dirent, M_UDFTEMP);
dirhash_put(dir_node->dir_hash);
return error;
}
/* --------------------------------------------------------------------- */
/*
* We are not allowed to split the fid tag itself over an logical block so
* check the space remaining in the logical block.
*
* We try to select the smallest candidate for recycling or when none is
* found, append a new one at the end of the directory.
*/
int
udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
{
struct vnode *dvp = dir_node->vnode;
struct dirhash *dirh;
struct dirhash_entry *dirh_ep;
struct fileid_desc *fid;
struct icb_tag *icbtag;
struct charspec osta_charspec;
struct dirent dirent;
uint64_t unique_id, dir_size;
uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
uint32_t chosen_size, chosen_size_diff;
int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
int file_char, refcnt, icbflags, addr_type, hit, error;
/* get our dirhash and make sure its read in */
dirhash_get(&dir_node->dir_hash);
error = udf_dirhash_fill(dir_node);
if (error) {
dirhash_put(dir_node->dir_hash);
return error;
}
dirh = dir_node->dir_hash;
/* get info */
lb_size = udf_rw32(ump->logical_vol->lb_size);
udf_osta_charset(&osta_charspec);
if (dir_node->fe) {
dir_size = udf_rw64(dir_node->fe->inf_len);
icbtag = &dir_node->fe->icbtag;
} else {
dir_size = udf_rw64(dir_node->efe->inf_len);
icbtag = &dir_node->efe->icbtag;
}
icbflags = udf_rw16(icbtag->flags);
addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
if (udf_node->fe) {
unique_id = udf_rw64(udf_node->fe->unique_id);
refcnt = udf_rw16(udf_node->fe->link_cnt);
} else {
unique_id = udf_rw64(udf_node->efe->unique_id);
refcnt = udf_rw16(udf_node->efe->link_cnt);
}
if (refcnt > 0) {
unique_id = udf_advance_uniqueid(ump);
udf_adjust_filecount(udf_node, 1);
}
/* determine file characteristics */
file_char = 0; /* visible non deleted file and not stream metadata */
if (vap->va_type == VDIR)
file_char = UDF_FILE_CHAR_DIR;
/* malloc scrap buffer */
fid = malloc(lb_size, M_TEMP, M_WAITOK|M_ZERO);
/* calculate _minimum_ fid size */
unix_to_udf_name((char *) fid->data, &fid->l_fi,
cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
fidsize = UDF_FID_SIZE + fid->l_fi;
fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
/* find position that will fit the FID */
chosen_fid_pos = dir_size;
chosen_size = 0;
chosen_size_diff = UINT_MAX;
/* shut up gcc */
dirent.d_namlen = 0;
/* search our dirhash hits */
error = 0;
dirh_ep = NULL;
for (;;) {
hit = dirhash_lookup_freed(dirh, fidsize, &dirh_ep);
/* if no hit, abort the search */
if (!hit)
break;
/* check this hit for size */
this_fidsize = dirh_ep->entry_size;
/* check this hit */
fid_pos = dirh_ep->offset;
end_fid_pos = fid_pos + this_fidsize;
size_diff = this_fidsize - fidsize;
lb_rest = lb_size - (end_fid_pos % lb_size);
#ifndef UDF_COMPLETE_DELETE
/* transfer a new fid/dirent */
error = udf_read_fid_stream(vp, &fid_pos, fid, dirent);
if (error)
goto error_out;
/* only reuse entries that are wiped */
/* check if the len + loc are marked zero */
if (udf_rw32(fid->icb.len) != 0)
continue;
if (udf_rw32(fid->icb.loc.lb_num) != 0)
continue;
if (udf_rw16(fid->icb.loc.part_num) != 0)
continue;
#endif /* UDF_COMPLETE_DELETE */
/* select if not splitting the tag and its smaller */
if ((size_diff >= 0) &&
(size_diff < chosen_size_diff) &&
(lb_rest >= sizeof(struct desc_tag)))
{
/* UDF 2.3.4.2+3 specifies rules for iu size */
if ((size_diff == 0) || (size_diff >= 32)) {
chosen_fid_pos = fid_pos;
chosen_size = this_fidsize;
chosen_size_diff = size_diff;
}
}
}
/* extend directory if no other candidate found */
if (chosen_size == 0) {
chosen_fid_pos = dir_size;
chosen_size = fidsize;
chosen_size_diff = 0;
/* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
if (addr_type == UDF_ICB_INTERN_ALLOC) {
/* pre-grow directory to see if we're to switch */
udf_grow_node(dir_node, dir_size + chosen_size);
icbflags = udf_rw16(icbtag->flags);
addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
}
/* make sure the next fid desc_tag won't be splitted */
if (addr_type != UDF_ICB_INTERN_ALLOC) {
end_fid_pos = chosen_fid_pos + chosen_size;
lb_rest = lb_size - (end_fid_pos % lb_size);
/* pad with implementation use regid if needed */
if (lb_rest < sizeof(struct desc_tag))
chosen_size += 32;
}
}
chosen_size_diff = chosen_size - fidsize;
/* populate the FID */
memset(fid, 0, lb_size);
udf_inittag(ump, &fid->tag, TAGID_FID, 0);
fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
fid->file_char = file_char;
fid->icb = udf_node->loc;
fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
fid->l_iu = udf_rw16(0);
if (chosen_size > fidsize) {
/* insert implementation-use regid to space it correctly */
fid->l_iu = udf_rw16(chosen_size_diff);
/* set implementation use */
udf_set_regid((struct regid *) fid->data, IMPL_NAME);
udf_add_impl_regid(ump, (struct regid *) fid->data);
}
/* fill in name */
unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu),
&fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
fid->tag.desc_crc_len = udf_rw16(chosen_size - UDF_DESC_TAG_LENGTH);
(void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
/* writeout FID/update parent directory */
error = vn_rdwr(UIO_WRITE, dvp,
fid, chosen_size, chosen_fid_pos,
UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
FSCRED, NULL, NULL);
if (error)
goto error_out;
/* add reference counter in attached node */
if (udf_node->fe) {
refcnt = udf_rw16(udf_node->fe->link_cnt);
udf_node->fe->link_cnt = udf_rw16(refcnt+1);
} else {
KASSERT(udf_node->efe);
refcnt = udf_rw16(udf_node->efe->link_cnt);
udf_node->efe->link_cnt = udf_rw16(refcnt+1);
}
/* mark not deleted if it was... just in case, but do warn */
if (udf_node->i_flags & IN_DELETED) {
printf("udf: warning, marking a file undeleted\n");
udf_node->i_flags &= ~IN_DELETED;
}
if (file_char & UDF_FILE_CHAR_DIR) {
/* add reference counter in directory node for '..' */
if (dir_node->fe) {
refcnt = udf_rw16(dir_node->fe->link_cnt);
refcnt++;
dir_node->fe->link_cnt = udf_rw16(refcnt);
} else {
KASSERT(dir_node->efe);
refcnt = udf_rw16(dir_node->efe->link_cnt);
refcnt++;
dir_node->efe->link_cnt = udf_rw16(refcnt);
}
}
/* append to the dirhash */
/* NOTE do not use dirent anymore or it won't match later! */
udf_to_unix_name(dirent.d_name, NAME_MAX,
(char *) fid->data + udf_rw16(fid->l_iu), fid->l_fi, &osta_charspec);
dirent.d_namlen = strlen(dirent.d_name);
dirhash_enter(dirh, &dirent, chosen_fid_pos,
udf_fidsize(fid), 1);
/* note updates */
udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
/* VN_KNOTE(udf_node, ...) */
udf_update(udf_node->vnode, NULL, NULL, NULL, 0);
error_out:
free(fid, M_TEMP);
dirhash_put(dir_node->dir_hash);
return error;
}
/* --------------------------------------------------------------------- */
/*
* Each node can have an attached streamdir node though not recursively. These
* are otherwise known as named substreams/named extended attributes that have
* no size limitations.
*
* `Normal' extended attributes are indicated with a number and are recorded
* in either the fe/efe descriptor itself for small descriptors or recorded in
* the attached extended attribute file. Since these spaces can get
* fragmented, care ought to be taken.
*
* Since the size of the space reserved for allocation descriptors is limited,
* there is a mechanim provided for extending this space; this is done by a
* special extent to allow schrinking of the allocations without breaking the
* linkage to the allocation extent descriptor.
*/
int
udf_loadvnode(struct mount *mp, struct vnode *vp,
const void *key, size_t key_len, const void **new_key)
{
union dscrptr *dscr;
struct udf_mount *ump;
struct udf_node *udf_node;
struct long_ad node_icb_loc, icb_loc, next_icb_loc, last_fe_icb_loc;
uint64_t file_size;
uint32_t lb_size, sector, dummy;
int udf_file_type, dscr_type, strat, strat4096, needs_indirect;
int slot, eof, error;
int num_indir_followed = 0;
DPRINTF(NODE, ("udf_loadvnode called\n"));
udf_node = NULL;
ump = VFSTOUDF(mp);
KASSERT(key_len == sizeof(node_icb_loc.loc));
memset(&node_icb_loc, 0, sizeof(node_icb_loc));
node_icb_loc.len = ump->logical_vol->lb_size;
memcpy(&node_icb_loc.loc, key, key_len);
/* garbage check: translate udf_node_icb_loc to sectornr */
error = udf_translate_vtop(ump, &node_icb_loc, §or, &dummy);
if (error) {
DPRINTF(NODE, ("\tcan't translate icb address!\n"));
/* no use, this will fail anyway */
return EINVAL;
}
/* build udf_node (do initialise!) */
udf_node = pool_get(&udf_node_pool, PR_WAITOK);
memset(udf_node, 0, sizeof(struct udf_node));
vp->v_tag = VT_UDF;
vp->v_op = udf_vnodeop_p;
vp->v_data = udf_node;
/* initialise crosslinks, note location of fe/efe for hashing */
udf_node->ump = ump;
udf_node->vnode = vp;
udf_node->loc = node_icb_loc;
udf_node->lockf = 0;
mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
cv_init(&udf_node->node_lock, "udf_nlk");
genfs_node_init(vp, &udf_genfsops); /* inititise genfs */
udf_node->outstanding_bufs = 0;
udf_node->outstanding_nodedscr = 0;
udf_node->uncommitted_lbs = 0;
/* check if we're fetching the root */
if (ump->fileset_desc)
if (memcmp(&udf_node->loc, &ump->fileset_desc->rootdir_icb,
sizeof(struct long_ad)) == 0)
vp->v_vflag |= VV_ROOT;
icb_loc = node_icb_loc;
needs_indirect = 0;
strat4096 = 0;
udf_file_type = UDF_ICB_FILETYPE_UNKNOWN;
file_size = 0;
lb_size = udf_rw32(ump->logical_vol->lb_size);
DPRINTF(NODE, ("\tstart reading descriptors\n"));
do {
/* try to read in fe/efe */
error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
/* blank sector marks end of sequence, check this */
if ((dscr == NULL) && (!strat4096))
error = ENOENT;
/* break if read error or blank sector */
if (error || (dscr == NULL))
break;
/* process descriptor based on the descriptor type */
dscr_type = udf_rw16(dscr->tag.id);
DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type));
/* if dealing with an indirect entry, follow the link */
if (dscr_type == TAGID_INDIRECTENTRY) {
needs_indirect = 0;
next_icb_loc = dscr->inde.indirect_icb;
udf_free_logvol_dscr(ump, &icb_loc, dscr);
icb_loc = next_icb_loc;
if (++num_indir_followed > UDF_MAX_INDIRS_FOLLOW) {
error = EMLINK;
break;
}
continue;
}
/* only file entries and extended file entries allowed here */
if ((dscr_type != TAGID_FENTRY) &&
(dscr_type != TAGID_EXTFENTRY)) {
udf_free_logvol_dscr(ump, &icb_loc, dscr);
error = ENOENT;
break;
}
KASSERT(udf_tagsize(dscr, lb_size) == lb_size);
/* choose this one */
last_fe_icb_loc = icb_loc;
/* record and process/update (ext)fentry */
if (dscr_type == TAGID_FENTRY) {
if (udf_node->fe)
udf_free_logvol_dscr(ump, &last_fe_icb_loc,
udf_node->fe);
udf_node->fe = &dscr->fe;
strat = udf_rw16(udf_node->fe->icbtag.strat_type);
udf_file_type = udf_node->fe->icbtag.file_type;
file_size = udf_rw64(udf_node->fe->inf_len);
} else {
if (udf_node->efe)
udf_free_logvol_dscr(ump, &last_fe_icb_loc,
udf_node->efe);
udf_node->efe = &dscr->efe;
strat = udf_rw16(udf_node->efe->icbtag.strat_type);
udf_file_type = udf_node->efe->icbtag.file_type;
file_size = udf_rw64(udf_node->efe->inf_len);
}
/* check recording strategy (structure) */
/*
* Strategy 4096 is a daisy linked chain terminating with an
* unrecorded sector or a TERM descriptor. The next
* descriptor is to be found in the sector that follows the
* current sector.
*/
if (strat == 4096) {
strat4096 = 1;
needs_indirect = 1;
icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1;
}
/*
* Strategy 4 is the normal strategy and terminates, but if
* we're in strategy 4096, we can't have strategy 4 mixed in
*/
if (strat == 4) {
if (strat4096) {
error = EINVAL;
break;
}
break; /* done */
}
} while (!error);
/* first round of cleanup code */
if (error) {
DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
/* recycle udf_node */
udf_dispose_node(udf_node);
return EINVAL; /* error code ok? */
}
DPRINTF(NODE, ("\tnode fe/efe read in fine\n"));
/* assert no references to dscr anymore beyong this point */
assert((udf_node->fe) || (udf_node->efe));
dscr = NULL;
/*
* Remember where to record an updated version of the descriptor. If
* there is a sequence of indirect entries, icb_loc will have been
* updated. Its the write disipline to allocate new space and to make
* sure the chain is maintained.
*
* `needs_indirect' flags if the next location is to be filled with
* with an indirect entry.
*/
udf_node->write_loc = icb_loc;
udf_node->needs_indirect = needs_indirect;
/*
* Go trough all allocations extents of this descriptor and when
* encountering a redirect read in the allocation extension. These are
* daisy-chained.
*/
UDF_LOCK_NODE(udf_node, 0);
udf_node->num_extensions = 0;
error = 0;
slot = 0;
for (;;) {
udf_get_adslot(udf_node, slot, &icb_loc, &eof);
DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
"lb_num = %d, part = %d\n", slot, eof,
UDF_EXT_FLAGS(udf_rw32(icb_loc.len)),
UDF_EXT_LEN(udf_rw32(icb_loc.len)),
udf_rw32(icb_loc.loc.lb_num),
udf_rw16(icb_loc.loc.part_num)));
if (eof)
break;
slot++;
if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT)
continue;
DPRINTF(NODE, ("\tgot redirect extent\n"));
if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
DPRINTF(ALLOC, ("udf_get_node: implementation limit, "
"too many allocation extensions on "
"udf_node\n"));
error = EINVAL;
break;
}
/* length can only be *one* lb : UDF 2.50/2.3.7.1 */
if (UDF_EXT_LEN(udf_rw32(icb_loc.len)) != lb_size) {
DPRINTF(ALLOC, ("udf_get_node: bad allocation "
"extension size in udf_node\n"));
error = EINVAL;
break;
}
DPRINTF(NODE, ("read allocation extent at lb_num %d\n",
UDF_EXT_LEN(udf_rw32(icb_loc.loc.lb_num))));
/* load in allocation extent */
error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
if (error || (dscr == NULL))
break;
/* process read-in descriptor */
dscr_type = udf_rw16(dscr->tag.id);
if (dscr_type != TAGID_ALLOCEXTENT) {
udf_free_logvol_dscr(ump, &icb_loc, dscr);
error = ENOENT;
break;
}
DPRINTF(NODE, ("\trecording redirect extent\n"));
udf_node->ext[udf_node->num_extensions] = &dscr->aee;
udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
udf_node->num_extensions++;
} /* while */
UDF_UNLOCK_NODE(udf_node, 0);
/* second round of cleanup code */
if (error) {
/* recycle udf_node */
udf_dispose_node(udf_node);
return EINVAL; /* error code ok? */
}
DPRINTF(NODE, ("\tnode read in fine\n"));
/*
* Translate UDF filetypes into vnode types.
*
* Systemfiles like the meta main and mirror files are not treated as
* normal files, so we type them as having no type. UDF dictates that
* they are not allowed to be visible.
*/
switch (udf_file_type) {
case UDF_ICB_FILETYPE_DIRECTORY :
case UDF_ICB_FILETYPE_STREAMDIR :
vp->v_type = VDIR;
break;
case UDF_ICB_FILETYPE_BLOCKDEVICE :
vp->v_type = VBLK;
break;
case UDF_ICB_FILETYPE_CHARDEVICE :
vp->v_type = VCHR;
break;
case UDF_ICB_FILETYPE_SOCKET :
vp->v_type = VSOCK;
break;
case UDF_ICB_FILETYPE_FIFO :
vp->v_type = VFIFO;
break;
case UDF_ICB_FILETYPE_SYMLINK :
vp->v_type = VLNK;
break;
case UDF_ICB_FILETYPE_VAT :
case UDF_ICB_FILETYPE_META_MAIN :
case UDF_ICB_FILETYPE_META_MIRROR :
vp->v_type = VNON;
break;
case UDF_ICB_FILETYPE_RANDOMACCESS :
case UDF_ICB_FILETYPE_REALTIME :
vp->v_type = VREG;
break;
default:
/* YIKES, something else */
vp->v_type = VNON;
}
/* TODO specfs, fifofs etc etc. vnops setting */
/* don't forget to set vnode's v_size */
uvm_vnp_setsize(vp, file_size);
/* TODO ext attr and streamdir udf_nodes */
*new_key = &udf_node->loc.loc;
return 0;
}
int
udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc,
struct udf_node **udf_noderes)
{
int error;
struct vnode *vp;
error = vcache_get(ump->vfs_mountp, &node_icb_loc->loc,
sizeof(node_icb_loc->loc), &vp);
if (error)
return error;
error = vn_lock(vp, LK_EXCLUSIVE);
if (error) {
vrele(vp);
return error;
}
*udf_noderes = VTOI(vp);
return 0;
}
/* --------------------------------------------------------------------- */
int
udf_writeout_node(struct udf_node *udf_node, int waitfor)
{
union dscrptr *dscr;
struct long_ad *loc;
int extnr, error;
DPRINTF(NODE, ("udf_writeout_node called\n"));
KASSERT(udf_node->outstanding_bufs == 0);
KASSERT(udf_node->outstanding_nodedscr == 0);
KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
if (udf_node->i_flags & IN_DELETED) {
DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
udf_cleanup_reservation(udf_node);
return 0;
}
/* lock node; unlocked in callback */
UDF_LOCK_NODE(udf_node, 0);
/* remove pending reservations, we're written out */
udf_cleanup_reservation(udf_node);
/* at least one descriptor writeout */
udf_node->outstanding_nodedscr = 1;
/* we're going to write out the descriptor so clear the flags */
udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
/* if we were rebuild, write out the allocation extents */
if (udf_node->i_flags & IN_NODE_REBUILD) {
/* mark outstanding node descriptors and issue them */
udf_node->outstanding_nodedscr += udf_node->num_extensions;
for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
loc = &udf_node->ext_loc[extnr];
dscr = (union dscrptr *) udf_node->ext[extnr];
error = udf_write_logvol_dscr(udf_node, dscr, loc, 0);
if (error)
return error;
}
/* mark allocation extents written out */
udf_node->i_flags &= ~(IN_NODE_REBUILD);
}
if (udf_node->fe) {
KASSERT(udf_node->efe == NULL);
dscr = (union dscrptr *) udf_node->fe;
} else {
KASSERT(udf_node->efe);
KASSERT(udf_node->fe == NULL);
dscr = (union dscrptr *) udf_node->efe;
}
KASSERT(dscr);
loc = &udf_node->write_loc;
error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
return error;
}
/* --------------------------------------------------------------------- */
int
udf_dispose_node(struct udf_node *udf_node)
{
struct vnode *vp;
int extnr;
DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
if (!udf_node) {
DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
return 0;
}
vp = udf_node->vnode;
#ifdef DIAGNOSTIC
if (vp->v_numoutput)
panic("disposing UDF node with pending I/O's, udf_node = %p, "
"v_numoutput = %d", udf_node, vp->v_numoutput);
#endif
udf_cleanup_reservation(udf_node);
/* TODO extended attributes and streamdir */
/* remove dirhash if present */
dirhash_purge(&udf_node->dir_hash);
/* destroy our lock */
mutex_destroy(&udf_node->node_mutex);
cv_destroy(&udf_node->node_lock);
/* dissociate our udf_node from the vnode */
genfs_node_destroy(udf_node->vnode);
mutex_enter(vp->v_interlock);
vp->v_data = NULL;
mutex_exit(vp->v_interlock);
/* free associated memory and the node itself */
for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr],
udf_node->ext[extnr]);
udf_node->ext[extnr] = (void *) 0xdeadcccc;
}
if (udf_node->fe)
udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
udf_node->fe);
if (udf_node->efe)
udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
udf_node->efe);
udf_node->fe = (void *) 0xdeadaaaa;
udf_node->efe = (void *) 0xdeadbbbb;
udf_node->ump = (void *) 0xdeadbeef;
pool_put(&udf_node_pool, udf_node);
return 0;
}
/*
* create a new node using the specified dvp, vap and cnp.
* This allows special files to be created. Use with care.
*/
int
udf_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
struct vattr *vap, kauth_cred_t cred,
size_t *key_len, const void **new_key)
{
union dscrptr *dscr;
struct udf_node *dir_node = VTOI(dvp);
struct udf_node *udf_node;
struct udf_mount *ump = dir_node->ump;
struct long_ad node_icb_loc;
uint64_t parent_unique_id;
uint64_t lmapping;
uint32_t lb_size, lb_num;
uint16_t vpart_num;
uid_t uid;
gid_t gid, parent_gid;
int (**vnodeops)(void *);
int udf_file_type, fid_size, error;
vnodeops = udf_vnodeop_p;
udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
switch (vap->va_type) {
case VREG :
udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
break;
case VDIR :
udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
break;
case VLNK :
udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
break;
case VBLK :
udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
/* specfs */
return ENOTSUP;
break;
case VCHR :
udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
/* specfs */
return ENOTSUP;
break;
case VFIFO :
udf_file_type = UDF_ICB_FILETYPE_FIFO;
/* fifofs */
return ENOTSUP;
break;
case VSOCK :
udf_file_type = UDF_ICB_FILETYPE_SOCKET;
return ENOTSUP;
break;
case VNON :
case VBAD :
default :
/* nothing; can we even create these? */
return EINVAL;
}
lb_size = udf_rw32(ump->logical_vol->lb_size);
/* reserve space for one logical block */
vpart_num = ump->node_part;
error = udf_reserve_space(ump, NULL, UDF_C_NODE,
vpart_num, 1, /* can_fail */ true);
if (error)
return error;
/* allocate node */
error = udf_allocate_space(ump, NULL, UDF_C_NODE,
vpart_num, 1, &lmapping);
if (error) {
udf_do_unreserve_space(ump, NULL, vpart_num, 1);
return error;
}
lb_num = lmapping;
/* initialise pointer to location */
memset(&node_icb_loc, 0, sizeof(struct long_ad));
node_icb_loc.len = udf_rw32(lb_size);
node_icb_loc.loc.lb_num = udf_rw32(lb_num);
node_icb_loc.loc.part_num = udf_rw16(vpart_num);
/* build udf_node (do initialise!) */
udf_node = pool_get(&udf_node_pool, PR_WAITOK);
memset(udf_node, 0, sizeof(struct udf_node));
/* initialise crosslinks, note location of fe/efe for hashing */
/* bugalert: synchronise with udf_get_node() */
udf_node->ump = ump;
udf_node->vnode = vp;
vp->v_data = udf_node;
udf_node->loc = node_icb_loc;
udf_node->write_loc = node_icb_loc;
udf_node->lockf = 0;
mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
cv_init(&udf_node->node_lock, "udf_nlk");
udf_node->outstanding_bufs = 0;
udf_node->outstanding_nodedscr = 0;
udf_node->uncommitted_lbs = 0;
vp->v_tag = VT_UDF;
vp->v_op = vnodeops;
/* initialise genfs */
genfs_node_init(vp, &udf_genfsops);
/* get parent's unique ID for refering '..' if its a directory */
if (dir_node->fe) {
parent_unique_id = udf_rw64(dir_node->fe->unique_id);
parent_gid = (gid_t) udf_rw32(dir_node->fe->gid);
} else {
parent_unique_id = udf_rw64(dir_node->efe->unique_id);
parent_gid = (gid_t) udf_rw32(dir_node->efe->gid);
}
/* get descriptor */
udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
/* choose a fe or an efe for it */
if (udf_rw16(ump->logical_vol->tag.descriptor_ver) == 2) {
udf_node->fe = &dscr->fe;
fid_size = udf_create_new_fe(ump, udf_node->fe,
udf_file_type, &udf_node->loc,
&dir_node->loc, parent_unique_id);
/* TODO add extended attribute for creation time */
} else {
udf_node->efe = &dscr->efe;
fid_size = udf_create_new_efe(ump, udf_node->efe,
udf_file_type, &udf_node->loc,
&dir_node->loc, parent_unique_id);
}
KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
/* update vnode's size and type */
vp->v_type = vap->va_type;
uvm_vnp_setsize(vp, fid_size);
/* set access mode */
udf_setaccessmode(udf_node, vap->va_mode);
/* set ownership */
uid = kauth_cred_geteuid(cred);
gid = parent_gid;
udf_setownership(udf_node, uid, gid);
*key_len = sizeof(udf_node->loc.loc);;
*new_key = &udf_node->loc.loc;
return 0;
}
int
udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
struct componentname *cnp)
{
struct udf_node *udf_node, *dir_node = VTOI(dvp);
struct udf_mount *ump = dir_node->ump;
int error;
error = vcache_new(dvp->v_mount, dvp, vap, cnp->cn_cred, vpp);
if (error)
return error;
udf_node = VTOI(*vpp);
error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
if (error) {
struct long_ad *node_icb_loc = &udf_node->loc;
uint32_t lb_num = udf_rw32(node_icb_loc->loc.lb_num);
uint16_t vpart_num = udf_rw16(node_icb_loc->loc.part_num);
/* free disc allocation for node */
udf_free_allocated_space(ump, lb_num, vpart_num, 1);
/* recycle udf_node */
udf_dispose_node(udf_node);
vrele(*vpp);
*vpp = NULL;
return error;
}
/* adjust file count */
udf_adjust_filecount(udf_node, 1);
return 0;
}
/* --------------------------------------------------------------------- */
static void
udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
{
struct udf_mount *ump = udf_node->ump;
uint32_t lb_size, lb_num, len, num_lb;
uint16_t vpart_num;
/* is there really one? */
if (mem == NULL)
return;
/* got a descriptor here */
len = UDF_EXT_LEN(udf_rw32(loc->len));
lb_num = udf_rw32(loc->loc.lb_num);
vpart_num = udf_rw16(loc->loc.part_num);
lb_size = udf_rw32(ump->logical_vol->lb_size);
num_lb = (len + lb_size -1) / lb_size;
udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
}
void
udf_delete_node(struct udf_node *udf_node)
{
void *dscr;
struct long_ad *loc;
int extnr, lvint, dummy;
/* paranoia check on integrity; should be open!; we could panic */
lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type);
if (lvint == UDF_INTEGRITY_CLOSED)
printf("\tIntegrity was CLOSED!\n");
/* whatever the node type, change its size to zero */
(void) udf_resize_node(udf_node, 0, &dummy);
/* force it to be `clean'; no use writing it out */
udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
IN_CHANGE | IN_UPDATE | IN_MODIFY);
/* adjust file count */
udf_adjust_filecount(udf_node, -1);
/*
* Free its allocated descriptors; memory will be released when
* vop_reclaim() is called.
*/
loc = &udf_node->loc;
dscr = udf_node->fe;
udf_free_descriptor_space(udf_node, loc, dscr);
dscr = udf_node->efe;
udf_free_descriptor_space(udf_node, loc, dscr);
for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
dscr = udf_node->ext[extnr];
loc = &udf_node->ext_loc[extnr];
udf_free_descriptor_space(udf_node, loc, dscr);
}
}
/* --------------------------------------------------------------------- */
/* set new filesize; node but be LOCKED on entry and is locked on exit */
int
udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
{
struct file_entry *fe = udf_node->fe;
struct extfile_entry *efe = udf_node->efe;
uint64_t file_size;
int error;
if (fe) {
file_size = udf_rw64(fe->inf_len);
} else {
assert(udf_node->efe);
file_size = udf_rw64(efe->inf_len);
}
DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n",
file_size, new_size));
/* if not changing, we're done */
if (file_size == new_size)
return 0;
*extended = (new_size > file_size);
if (*extended) {
error = udf_grow_node(udf_node, new_size);
} else {
error = udf_shrink_node(udf_node, new_size);
}
return error;
}
/* --------------------------------------------------------------------- */
void
udf_itimes(struct udf_node *udf_node, struct timespec *acc,
struct timespec *mod, struct timespec *birth)
{
struct timespec now;
struct file_entry *fe;
struct extfile_entry *efe;
struct filetimes_extattr_entry *ft_extattr;
struct timestamp *atime, *mtime, *attrtime, *ctime;
struct timestamp fe_ctime;
struct timespec cur_birth;
uint32_t offset, a_l;
uint8_t *filedata;
int error;
/* protect against rogue values */
if (!udf_node)
return;
fe = udf_node->fe;
efe = udf_node->efe;
if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
return;
/* get descriptor information */
if (fe) {
atime = &fe->atime;
mtime = &fe->mtime;
attrtime = &fe->attrtime;
filedata = fe->data;
/* initial save dummy setting */
ctime = &fe_ctime;
/* check our extended attribute if present */
error = udf_extattr_search_intern(udf_node,
UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
if (!error) {
ft_extattr = (struct filetimes_extattr_entry *)
(filedata + offset);
if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
ctime = &ft_extattr->times[0];
}
/* TODO create the extended attribute if not found ? */
} else {
assert(udf_node->efe);
atime = &efe->atime;
mtime = &efe->mtime;
attrtime = &efe->attrtime;
ctime = &efe->ctime;
}
vfs_timestamp(&now);
/* set access time */
if (udf_node->i_flags & IN_ACCESS) {
if (acc == NULL)
acc = &now;
udf_timespec_to_timestamp(acc, atime);
}
/* set modification time */
if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
if (mod == NULL)
mod = &now;
udf_timespec_to_timestamp(mod, mtime);
/* ensure birthtime is older than set modification! */
udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth);
if ((cur_birth.tv_sec > mod->tv_sec) ||
((cur_birth.tv_sec == mod->tv_sec) &&
(cur_birth.tv_nsec > mod->tv_nsec))) {
udf_timespec_to_timestamp(mod, ctime);
}
}
/* update birthtime if specified */
/* XXX we assume here that given birthtime is older than mod */
if (birth && (birth->tv_sec != VNOVAL)) {
udf_timespec_to_timestamp(birth, ctime);
}
/* set change time */
if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY))
udf_timespec_to_timestamp(&now, attrtime);
/* notify updates to the node itself */
if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
udf_node->i_flags |= IN_ACCESSED;
if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
udf_node->i_flags |= IN_MODIFIED;
/* clear modification flags */
udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
}
/* --------------------------------------------------------------------- */
int
udf_update(struct vnode *vp, struct timespec *acc,
struct timespec *mod, struct timespec *birth, int updflags)
{
union dscrptr *dscrptr;
struct udf_node *udf_node = VTOI(vp);
struct udf_mount *ump = udf_node->ump;
struct regid *impl_id;
int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
int waitfor, flags;
#ifdef DEBUG
char bits[128];
DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth,
updflags));
snprintb(bits, sizeof(bits), IN_FLAGBITS, udf_node->i_flags);
DPRINTF(CALL, ("\tnode flags %s\n", bits));
DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
#endif
/* set our times */
udf_itimes(udf_node, acc, mod, birth);
/* set our implementation id */
if (udf_node->fe) {
dscrptr = (union dscrptr *) udf_node->fe;
impl_id = &udf_node->fe->imp_id;
} else {
dscrptr = (union dscrptr *) udf_node->efe;
impl_id = &udf_node->efe->imp_id;
}
/* set our ID */
udf_set_regid(impl_id, IMPL_NAME);
udf_add_impl_regid(ump, impl_id);
/* update our crc! on RMW we are not allowed to change a thing */
udf_validate_tag_and_crc_sums(dscrptr);
/* if called when mounted readonly, never write back */
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return 0;
/* check if the node is dirty 'enough'*/
if (updflags & UPDATE_CLOSE) {
flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
} else {
flags = udf_node->i_flags & IN_MODIFIED;
}
if (flags == 0)
return 0;
/* determine if we need to write sync or async */
waitfor = 0;
if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
/* sync mounted */
waitfor = updflags & UPDATE_WAIT;
if (updflags & UPDATE_DIROP)
waitfor |= UPDATE_WAIT;
}
if (waitfor)
return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* Read one fid and process it into a dirent and advance to the next (*fid)
* has to be allocated a logical block in size, (*dirent) struct dirent length
*/
int
udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
struct fileid_desc *fid, struct dirent *dirent)
{
struct udf_node *dir_node = VTOI(vp);
struct udf_mount *ump = dir_node->ump;
struct file_entry *fe = dir_node->fe;
struct extfile_entry *efe = dir_node->efe;
uint32_t fid_size, lb_size;
uint64_t file_size;
char *fid_name;
int enough, error;
assert(fid);
assert(dirent);
assert(dir_node);
assert(offset);
assert(*offset != 1);
DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset));
/* check if we're past the end of the directory */
if (fe) {
file_size = udf_rw64(fe->inf_len);
} else {
assert(dir_node->efe);
file_size = udf_rw64(efe->inf_len);
}
if (*offset >= file_size)
return EINVAL;
/* get maximum length of FID descriptor */
lb_size = udf_rw32(ump->logical_vol->lb_size);
/* initialise return values */
fid_size = 0;
memset(dirent, 0, sizeof(struct dirent));
memset(fid, 0, lb_size);
enough = (file_size - (*offset) >= UDF_FID_SIZE);
if (!enough) {
/* short dir ... */
return EIO;
}
error = vn_rdwr(UIO_READ, vp,
fid, MIN(file_size - (*offset), lb_size), *offset,
UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED,
NULL, NULL);
if (error)
return error;
DPRINTF(FIDS, ("\tfid piece read in fine\n"));
/*
* Check if we got a whole descriptor.
* TODO Try to `resync' directory stream when something is very wrong.
*/
/* check if our FID header is OK */
error = udf_check_tag(fid);
if (error) {
goto brokendir;
}
DPRINTF(FIDS, ("\ttag check ok\n"));
if (udf_rw16(fid->tag.id) != TAGID_FID) {
error = EIO;
goto brokendir;
}
DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
/* check for length */
fid_size = udf_fidsize(fid);
enough = (file_size - (*offset) >= fid_size);
if (!enough) {
error = EIO;
goto brokendir;
}
DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
/* check FID contents */
error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
brokendir:
if (error) {
/* note that is sometimes a bit quick to report */
printf("UDF: BROKEN DIRECTORY ENTRY\n");
/* RESYNC? */
/* TODO: use udf_resync_fid_stream */
return EIO;
}
DPRINTF(FIDS, ("\tpayload checked ok\n"));
/* we got a whole and valid descriptor! */
DPRINTF(FIDS, ("\tinterpret FID\n"));
/* create resulting dirent structure */
fid_name = (char *) fid->data + udf_rw16(fid->l_iu);
udf_to_unix_name(dirent->d_name, NAME_MAX,
fid_name, fid->l_fi, &ump->logical_vol->desc_charset);
/* '..' has no name, so provide one */
if (fid->file_char & UDF_FILE_CHAR_PAR)
strcpy(dirent->d_name, "..");
dirent->d_fileno = udf_get_node_id(&fid->icb); /* inode hash XXX */
dirent->d_namlen = strlen(dirent->d_name);
dirent->d_reclen = _DIRENT_SIZE(dirent);
/*
* Note that its not worth trying to go for the filetypes now... its
* too expensive too
*/
dirent->d_type = DT_UNKNOWN;
/* initial guess for filetype we can make */
if (fid->file_char & UDF_FILE_CHAR_DIR)
dirent->d_type = DT_DIR;
/* advance */
*offset += fid_size;
return error;
}
/* --------------------------------------------------------------------- */
static void
udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int pass, int *ndirty)
{
struct udf_node *udf_node, *n_udf_node;
struct vnode *vp;
int vdirty, error;
KASSERT(mutex_owned(&ump->sync_lock));
DPRINTF(SYNC, ("sync_pass %d\n", pass));
udf_node = RB_TREE_MIN(&ump->udf_node_tree);
for (;udf_node; udf_node = n_udf_node) {
DPRINTF(SYNC, ("."));
vp = udf_node->vnode;
n_udf_node = rb_tree_iterate(&ump->udf_node_tree,
udf_node, RB_DIR_RIGHT);
error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
if (error) {
KASSERT(error == EBUSY);
*ndirty += 1;
continue;
}
switch (pass) {
case 1:
VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
break;
case 2:
vdirty = vp->v_numoutput;
if (vp->v_tag == VT_UDF)
vdirty += udf_node->outstanding_bufs +
udf_node->outstanding_nodedscr;
if (vdirty == 0)
VOP_FSYNC(vp, cred, 0,0,0);
*ndirty += vdirty;
break;
case 3:
vdirty = vp->v_numoutput;
if (vp->v_tag == VT_UDF)
vdirty += udf_node->outstanding_bufs +
udf_node->outstanding_nodedscr;
*ndirty += vdirty;
break;
}
VOP_UNLOCK(vp);
}
DPRINTF(SYNC, ("END sync_pass %d\n", pass));
}
static bool
udf_sync_selector(void *cl, struct vnode *vp)
{
struct udf_node *udf_node = VTOI(vp);
if (vp->v_vflag & VV_SYSTEM)
return false;
if (vp->v_type == VNON)
return false;
if (udf_node == NULL)
return false;
if ((udf_node->i_flags & (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0)
return false;
if (LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj))
return false;
return true;
}
void
udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
{
struct vnode_iterator *marker;
struct vnode *vp;
struct udf_node *udf_node, *udf_next_node;
int dummy, ndirty;
if (waitfor == MNT_LAZY)
return;
mutex_enter(&ump->sync_lock);
/* Fill the rbtree with nodes to sync. */
vfs_vnode_iterator_init(ump->vfs_mountp, &marker);
while ((vp = vfs_vnode_iterator_next(marker,
udf_sync_selector, NULL)) != NULL) {
udf_node = VTOI(vp);
udf_node->i_flags |= IN_SYNCED;
rb_tree_insert_node(&ump->udf_node_tree, udf_node);
}
vfs_vnode_iterator_destroy(marker);
dummy = 0;
DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
udf_sync_pass(ump, cred, 1, &dummy);
DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
udf_sync_pass(ump, cred, 2, &dummy);
if (waitfor == MNT_WAIT) {
recount:
ndirty = ump->devvp->v_numoutput;
DPRINTF(SYNC, ("counting pending blocks: on devvp %d\n",
ndirty));
udf_sync_pass(ump, cred, 3, &ndirty);
DPRINTF(SYNC, ("counted num dirty pending blocks %d\n",
ndirty));
if (ndirty) {
/* 1/4 second wait */
kpause("udfsync2", false, hz/4, NULL);
goto recount;
}
}
/* Clean the rbtree. */
for (udf_node = RB_TREE_MIN(&ump->udf_node_tree);
udf_node; udf_node = udf_next_node) {
udf_next_node = rb_tree_iterate(&ump->udf_node_tree,
udf_node, RB_DIR_RIGHT);
rb_tree_remove_node(&ump->udf_node_tree, udf_node);
udf_node->i_flags &= ~IN_SYNCED;
vrele(udf_node->vnode);
}
mutex_exit(&ump->sync_lock);
}
/* --------------------------------------------------------------------- */
/*
* Read and write file extent in/from the buffer.
*
* The splitup of the extent into seperate request-buffers is to minimise
* copying around as much as possible.
*
* block based file reading and writing
*/
static int
udf_read_internal(struct udf_node *node, uint8_t *blob)
{
struct udf_mount *ump;
struct file_entry *fe = node->fe;
struct extfile_entry *efe = node->efe;
uint64_t inflen;
uint32_t sector_size;
uint8_t *pos;
int icbflags, addr_type;
/* get extent and do some paranoia checks */
ump = node->ump;
sector_size = ump->discinfo.sector_size;
if (fe) {
inflen = udf_rw64(fe->inf_len);
pos = &fe->data[0] + udf_rw32(fe->l_ea);
icbflags = udf_rw16(fe->icbtag.flags);
} else {
assert(node->efe);
inflen = udf_rw64(efe->inf_len);
pos = &efe->data[0] + udf_rw32(efe->l_ea);
icbflags = udf_rw16(efe->icbtag.flags);
}
addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
assert(addr_type == UDF_ICB_INTERN_ALLOC);
__USE(addr_type);
assert(inflen < sector_size);
/* copy out info */
memset(blob, 0, sector_size);
memcpy(blob, pos, inflen);
return 0;
}
static int
udf_write_internal(struct udf_node *node, uint8_t *blob)
{
struct udf_mount *ump;
struct file_entry *fe = node->fe;
struct extfile_entry *efe = node->efe;
uint64_t inflen;
uint32_t sector_size;
uint8_t *pos;
int icbflags, addr_type;
/* get extent and do some paranoia checks */
ump = node->ump;
sector_size = ump->discinfo.sector_size;
if (fe) {
inflen = udf_rw64(fe->inf_len);
pos = &fe->data[0] + udf_rw32(fe->l_ea);
icbflags = udf_rw16(fe->icbtag.flags);
} else {
assert(node->efe);
inflen = udf_rw64(efe->inf_len);
pos = &efe->data[0] + udf_rw32(efe->l_ea);
icbflags = udf_rw16(efe->icbtag.flags);
}
addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
assert(addr_type == UDF_ICB_INTERN_ALLOC);
__USE(addr_type);
assert(inflen < sector_size);
__USE(sector_size);
/* copy in blob */
/* memset(pos, 0, inflen); */
memcpy(pos, blob, inflen);
return 0;
}
void
udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
{
struct buf *nestbuf;
struct udf_mount *ump = udf_node->ump;
uint64_t *mapping;
uint64_t run_start;
uint32_t sector_size;
uint32_t buf_offset, sector, rbuflen, rblk;
uint32_t from, lblkno;
uint32_t sectors;
uint8_t *buf_pos;
int error, run_length, what;
sector_size = udf_node->ump->discinfo.sector_size;
from = buf->b_blkno;
sectors = buf->b_bcount / sector_size;
what = udf_get_c_type(udf_node);
/* assure we have enough translation slots */
KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
if (sectors > UDF_MAX_MAPPINGS) {
printf("udf_read_filebuf: implementation limit on bufsize\n");
buf->b_error = EIO;
biodone(buf);
return;
}
mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
error = 0;
DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
error = udf_translate_file_extent(udf_node, from, sectors, mapping);
if (error) {
buf->b_error = error;
biodone(buf);
goto out;
}
DPRINTF(READ, ("\ttranslate extent went OK\n"));
/* pre-check if its an internal */
if (*mapping == UDF_TRANS_INTERN) {
error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
if (error)
buf->b_error = error;
biodone(buf);
goto out;
}
DPRINTF(READ, ("\tnot intern\n"));
#ifdef DEBUG
if (udf_verbose & UDF_DEBUG_TRANSLATE) {
printf("Returned translation table:\n");
for (sector = 0; sector < sectors; sector++) {
printf("%d : %"PRIu64"\n", sector, mapping[sector]);
}
}
#endif
/* request read-in of data from disc sheduler */
buf->b_resid = buf->b_bcount;
for (sector = 0; sector < sectors; sector++) {
buf_offset = sector * sector_size;
buf_pos = (uint8_t *) buf->b_data + buf_offset;
DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
/* check if its zero or unmapped to stop reading */
switch (mapping[sector]) {
case UDF_TRANS_UNMAPPED:
case UDF_TRANS_ZERO:
/* copy zero sector TODO runlength like below */
memset(buf_pos, 0, sector_size);
DPRINTF(READ, ("\treturning zero sector\n"));
nestiobuf_done(buf, sector_size, 0);
break;
default :
DPRINTF(READ, ("\tread sector "
"%"PRIu64"\n", mapping[sector]));
lblkno = from + sector;
run_start = mapping[sector];
run_length = 1;
while (sector < sectors-1) {
if (mapping[sector+1] != mapping[sector]+1)
break;
run_length++;
sector++;
}
/*
* nest an iobuf and mark it for async reading. Since
* we're using nested buffers, they can't be cached by
* design.
*/
rbuflen = run_length * sector_size;
rblk = run_start * (sector_size/DEV_BSIZE);
nestbuf = getiobuf(NULL, true);
nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
/* nestbuf is B_ASYNC */
/* identify this nestbuf */
nestbuf->b_lblkno = lblkno;
assert(nestbuf->b_vp == udf_node->vnode);
/* CD shedules on raw blkno */
nestbuf->b_blkno = rblk;
nestbuf->b_proc = NULL;
nestbuf->b_rawblkno = rblk;
nestbuf->b_udf_c_type = what;
udf_discstrat_queuebuf(ump, nestbuf);
}
}
out:
/* if we're synchronously reading, wait for the completion */
if ((buf->b_flags & B_ASYNC) == 0)
biowait(buf);
DPRINTF(READ, ("\tend of read_filebuf\n"));
free(mapping, M_TEMP);
return;
}
void
udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
{
struct buf *nestbuf;
struct udf_mount *ump = udf_node->ump;
uint64_t *mapping;
uint64_t run_start;
uint32_t lb_size;
uint32_t buf_offset, lb_num, rbuflen, rblk;
uint32_t from, lblkno;
uint32_t num_lb;
int error, run_length, what, s;
lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
from = buf->b_blkno;
num_lb = buf->b_bcount / lb_size;
what = udf_get_c_type(udf_node);
/* assure we have enough translation slots */
KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
if (num_lb > UDF_MAX_MAPPINGS) {
printf("udf_write_filebuf: implementation limit on bufsize\n");
buf->b_error = EIO;
biodone(buf);
return;
}
mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
error = 0;
DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
if (error) {
buf->b_error = error;
biodone(buf);
goto out;
}
DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
/* if its internally mapped, we can write it in the descriptor itself */
if (*mapping == UDF_TRANS_INTERN) {
/* TODO paranoia check if we ARE going to have enough space */
error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
if (error)
buf->b_error = error;
biodone(buf);
goto out;
}
DPRINTF(WRITE, ("\tnot intern\n"));
/* request write out of data to disc sheduler */
buf->b_resid = buf->b_bcount;
for (lb_num = 0; lb_num < num_lb; lb_num++) {
buf_offset = lb_num * lb_size;
DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
/*
* Mappings are not that important here. Just before we write
* the lb_num we late-allocate them when needed and update the
* mapping in the udf_node.
*/
/* XXX why not ignore the mapping altogether ? */
DPRINTF(WRITE, ("\twrite lb_num "
"%"PRIu64, mapping[lb_num]));
lblkno = from + lb_num;
run_start = mapping[lb_num];
run_length = 1;
while (lb_num < num_lb-1) {
if (mapping[lb_num+1] != mapping[lb_num]+1)
if (mapping[lb_num+1] != mapping[lb_num])
break;
run_length++;
lb_num++;
}
DPRINTF(WRITE, ("+ %d\n", run_length));
/* nest an iobuf on the master buffer for the extent */
rbuflen = run_length * lb_size;
rblk = run_start * (lb_size/DEV_BSIZE);
nestbuf = getiobuf(NULL, true);
nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
/* nestbuf is B_ASYNC */
/* identify this nestbuf */
nestbuf->b_lblkno = lblkno;
KASSERT(nestbuf->b_vp == udf_node->vnode);
/* CD shedules on raw blkno */
nestbuf->b_blkno = rblk;
nestbuf->b_proc = NULL;
nestbuf->b_rawblkno = rblk;
nestbuf->b_udf_c_type = what;
/* increment our outstanding bufs counter */
s = splbio();
udf_node->outstanding_bufs++;
splx(s);
udf_discstrat_queuebuf(ump, nestbuf);
}
out:
/* if we're synchronously writing, wait for the completion */
if ((buf->b_flags & B_ASYNC) == 0)
biowait(buf);
DPRINTF(WRITE, ("\tend of write_filebuf\n"));
free(mapping, M_TEMP);
return;
}
/* --------------------------------------------------------------------- */
| 26.798639 | 87 | 0.66759 | [
"geometry",
"model"
] |
9e2f8cbee7e2f0528eac62ad3b8f7e6c9dad4a2a | 2,591 | h | C | inc/point_cloud.h | xannieto/rule-based-classifier-cpp | d458ff7808ed59de29692843f28167902a47a536 | [
"Apache-2.0"
] | null | null | null | inc/point_cloud.h | xannieto/rule-based-classifier-cpp | d458ff7808ed59de29692843f28167902a47a536 | [
"Apache-2.0"
] | null | null | null | inc/point_cloud.h | xannieto/rule-based-classifier-cpp | d458ff7808ed59de29692843f28167902a47a536 | [
"Apache-2.0"
] | null | null | null | // ======================================================================================
// Copyright 2017 State Key Laboratory of Remote Sensing Science,
// Institute of Remote Sensing Science and Engineering, Beijing Normal University
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ======================================================================================
// #######################################################################################
// # #
// # CSF: Airborne LiDAR filtering based on Cloth Simulation #
// # #
// # Please cite the following paper, If you use this software in your work. #
// # #
// # Zhang W, Qi J, Wan P, Wang H, Xie D, Wang X, Yan G. An Easy-to-Use Airborne LiDAR #
// # Data Filtering Method Based on Cloth Simulation. Remote Sensing. 2016; 8(6):501. #
// # (http://ramm.bnu.edu.cn/) #
// # #
// # Wuming Zhang; Jianbo Qi; Peng Wan; Hongtao Wang #
// # #
// # contact us: 2009zwm@gmail.com; wpqjbzwm@126.com #
// # #
// #######################################################################################
#ifndef _POINT_CLOUD_H_
#define _POINT_CLOUD_H_
#include "point.h"
#include <vector>
namespace csf
{
class PointCloud
{
public:
std::vector<Lpoint> m_points;
public:
PointCloud() = default;
PointCloud(std::vector<Lpoint>&);
void computeBoundingBox(Lpoint& bbMin, Lpoint& bbMax);
};
}
#endif // ifndef _POINT_CLOUD_H_
| 44.672414 | 90 | 0.421459 | [
"vector"
] |
9e3032635b4f55a4777eaa5234cb796ded5267ef | 4,949 | h | C | libs/opengl/include/mrpt/opengl/CFrustum.h | skair39/mrpt | 88238f8ac1abdcf15401e14dc3a9faa5c59ba559 | [
"BSD-3-Clause"
] | 2 | 2019-02-20T02:36:05.000Z | 2019-02-20T02:46:51.000Z | libs/opengl/include/mrpt/opengl/CFrustum.h | skair39/mrpt | 88238f8ac1abdcf15401e14dc3a9faa5c59ba559 | [
"BSD-3-Clause"
] | null | null | null | libs/opengl/include/mrpt/opengl/CFrustum.h | skair39/mrpt | 88238f8ac1abdcf15401e14dc3a9faa5c59ba559 | [
"BSD-3-Clause"
] | null | null | null | /* +------------------------------------------------------------------------+
| Mobile Robot Programming Toolkit (MRPT) |
| http://www.mrpt.org/ |
| |
| Copyright (c) 2005-2019, Individual contributors, see AUTHORS file |
| See: http://www.mrpt.org/Authors - All rights reserved. |
| Released under BSD License. See details in http://www.mrpt.org/License |
+------------------------------------------------------------------------+ */
#pragma once
#include <mrpt/opengl/CRenderizableDisplayList.h>
#include <mrpt/math/lightweight_geom_data.h>
namespace mrpt::opengl
{
/** A solid or wireframe frustum in 3D (a rectangular truncated pyramid), with
* arbitrary (possibly assymetric) field-of-view angles.
*
* You can switch whether to show only the lines, the surface of the frustum,
* or both.
* By default only the lines are drawn.
*
* The color of the object (via CRenderizable::setColor()) affects the color
* of lines.
* To set the color of planes use \a setPlaneColor()
*
* As usual in MRPT, the +X axis is assumed to by the main direction, in this
* case of the pyramid axis.
*
* The horizontal and vertical FOVs can be set directly with \a setHorzFOV()
* and \a setVertFOV() if
* they are symmetric, or with \a setHorzFOVAsymmetric() and \a
* setVertFOVAsymmetric() otherwise.
*
* All FOV angles are positive numbers. FOVs must be below 90deg on each side
* (below 180deg in total).
* If you try to set FOVs to larger values they'll truncated to 89.9deg.
*
* \sa opengl::COpenGLScene,opengl::CRenderizable
*
* <div align="center">
* <table border="0" cellspan="4" cellspacing="4" style="border-width: 1px;
* border-style: solid;">
* <tr> <td> mrpt::opengl::CFrustum </td> <td> \image html
* preview_CFrustum.png </td> </tr>
* </table>
* </div>
*
* \ingroup mrpt_opengl_grp
*/
class CFrustum : public CRenderizableDisplayList
{
DEFINE_SERIALIZABLE(CFrustum)
protected:
/** Near and far planes */
float m_min_distance{0.1f}, m_max_distance{1.f};
/** Semi FOVs (in radians) */
float m_fov_horz_left, m_fov_horz_right;
/** Semi FOVs (in radians) */
float m_fov_vert_down, m_fov_vert_up;
bool m_draw_lines{true}, m_draw_planes{false};
float m_lineWidth{1.5f};
mrpt::img::TColor m_planes_color;
public:
inline void setLineWidth(float width)
{
m_lineWidth = width;
CRenderizableDisplayList::notifyChange();
}
inline float getLineWidth() const { return m_lineWidth; }
/** Changes the color of the planes; to change color of lines, use
* CRenderizable base methods. */
inline void setPlaneColor(const mrpt::img::TColor& c)
{
m_planes_color = c;
CRenderizableDisplayList::notifyChange();
}
inline const mrpt::img::TColor& getPlaneColor() const
{
return m_planes_color;
}
/** Changes distance of near & far planes */
void setNearFarPlanes(const float near_distance, const float far_distance);
float getNearPlaneDistance() const { return m_min_distance; }
float getFarPlaneDistance() const { return m_max_distance; }
/** Changes horizontal FOV (symmetric) */
void setHorzFOV(const float fov_horz_degrees);
/** Changes vertical FOV (symmetric) */
void setVertFOV(const float fov_vert_degrees);
/** Changes horizontal FOV (asymmetric) */
void setHorzFOVAsymmetric(
const float fov_horz_left_degrees, const float fov_horz_right_degrees);
/** Changes vertical FOV (asymmetric) */
void setVertFOVAsymmetric(
const float fov_vert_down_degrees, const float fov_vert_up_degrees);
float getHorzFOV() const
{
return mrpt::RAD2DEG(m_fov_horz_left + m_fov_horz_right);
}
float getVertFOV() const
{
return mrpt::RAD2DEG(m_fov_vert_down + m_fov_vert_up);
}
float getHorzFOVLeft() const { return mrpt::RAD2DEG(m_fov_horz_left); }
float getHorzFOVRight() const { return mrpt::RAD2DEG(m_fov_horz_right); }
float getVertFOVDown() const { return mrpt::RAD2DEG(m_fov_vert_down); }
float getVertFOVUp() const { return mrpt::RAD2DEG(m_fov_vert_up); }
/** Render \sa mrpt::opengl::CRenderizable */
void render_dl() const override;
/** Ray tracing. \sa mrpt::opengl::CRenderizable */
bool traceRay(const mrpt::poses::CPose3D& o, double& dist) const override;
/** Evaluates the bounding box of this object (including possible children)
* in the coordinate frame of the object parent. */
void getBoundingBox(
mrpt::math::TPoint3D& bb_min,
mrpt::math::TPoint3D& bb_max) const override;
/** Basic empty constructor. Set all parameters to default. */
CFrustum();
/** Constructor with some parameters */
CFrustum(
float near_distance, float far_distance, float horz_FOV_degrees,
float vert_FOV_degrees, float lineWidth, bool draw_lines,
bool draw_planes);
/** Destructor */
~CFrustum() override = default;
};
} // namespace mrpt::opengl
| 36.124088 | 80 | 0.678117 | [
"render",
"object",
"3d",
"solid"
] |
9e342517b02962b782a54a80a06071851edd3c7c | 2,647 | c | C | ext/phalcon/translate/interpolator/indexedarray.zep.c | bullsoft/cphalcon | fe17e3ae897b8d3f41d65ed328fcb734d1955b51 | [
"BSD-3-Clause"
] | 1 | 2020-01-05T17:57:54.000Z | 2020-01-05T17:57:54.000Z | ext/phalcon/translate/interpolator/indexedarray.zep.c | bullsoft/cphalcon | fe17e3ae897b8d3f41d65ed328fcb734d1955b51 | [
"BSD-3-Clause"
] | null | null | null | ext/phalcon/translate/interpolator/indexedarray.zep.c | bullsoft/cphalcon | fe17e3ae897b8d3f41d65ed328fcb734d1955b51 | [
"BSD-3-Clause"
] | null | null | null |
#ifdef HAVE_CONFIG_H
#include "../../../ext_config.h"
#endif
#include <php.h>
#include "../../../php_ext.h"
#include "../../../ext.h"
#include <Zend/zend_operators.h>
#include <Zend/zend_exceptions.h>
#include <Zend/zend_interfaces.h>
#include "kernel/main.h"
#include "kernel/fcall.h"
#include "kernel/memory.h"
#include "ext/spl/spl_exceptions.h"
#include "kernel/exception.h"
#include "kernel/operators.h"
#include "kernel/object.h"
/**
* This file is part of the Phalcon Framework.
*
* (c) Phalcon Team <team@phalcon.io>
*
* For the full copyright and license information, please view the LICENSE.txt
* file that was distributed with this source code.
*/
ZEPHIR_INIT_CLASS(Phalcon_Translate_Interpolator_IndexedArray) {
ZEPHIR_REGISTER_CLASS(Phalcon\\Translate\\Interpolator, IndexedArray, phalcon, translate_interpolator_indexedarray, phalcon_translate_interpolator_indexedarray_method_entry, 0);
zend_class_implements(phalcon_translate_interpolator_indexedarray_ce, 1, phalcon_translate_interpolator_interpolatorinterface_ce);
return SUCCESS;
}
/**
* Replaces placeholders by the values passed
*/
PHP_METHOD(Phalcon_Translate_Interpolator_IndexedArray, replacePlaceholders) {
zephir_method_globals *ZEPHIR_METHOD_GLOBALS_PTR = NULL;
zend_long ZEPHIR_LAST_CALL_STATUS;
zval placeholders;
zval *translation_param = NULL, *placeholders_param = NULL, _0$$3;
zval translation;
zval *this_ptr = getThis();
ZVAL_UNDEF(&translation);
ZVAL_UNDEF(&_0$$3);
ZVAL_UNDEF(&placeholders);
ZEPHIR_MM_GROW();
zephir_fetch_params(1, 1, 1, &translation_param, &placeholders_param);
if (UNEXPECTED(Z_TYPE_P(translation_param) != IS_STRING && Z_TYPE_P(translation_param) != IS_NULL)) {
zephir_throw_exception_string(spl_ce_InvalidArgumentException, SL("Parameter 'translation' must be of the type string"));
RETURN_MM_NULL();
}
if (EXPECTED(Z_TYPE_P(translation_param) == IS_STRING)) {
zephir_get_strval(&translation, translation_param);
} else {
ZEPHIR_INIT_VAR(&translation);
ZVAL_EMPTY_STRING(&translation);
}
if (!placeholders_param) {
ZEPHIR_INIT_VAR(&placeholders);
array_init(&placeholders);
} else {
zephir_get_arrval(&placeholders, placeholders_param);
}
if (zephir_fast_count_int(&placeholders)) {
ZEPHIR_MAKE_REF(&placeholders);
ZEPHIR_CALL_FUNCTION(NULL, "array_unshift", NULL, 478, &placeholders, &translation);
ZEPHIR_UNREF(&placeholders);
zephir_check_call_status();
ZEPHIR_INIT_VAR(&_0$$3);
ZVAL_STRING(&_0$$3, "sprintf");
ZEPHIR_CALL_USER_FUNC_ARRAY(return_value, &_0$$3, &placeholders);
zephir_check_call_status();
RETURN_MM();
}
RETURN_CTOR(&translation);
}
| 28.771739 | 178 | 0.768039 | [
"object"
] |
9e36e1ead904e0583d3956642ef0051f09fd385c | 1,408 | c | C | qcsrc/menu/nexuiz/commandbutton.c | YamiND/RocketMinsta | 2e15176cdb4427856cb40f3f637cfd14c1c29fbc | [
"WTFPL"
] | null | null | null | qcsrc/menu/nexuiz/commandbutton.c | YamiND/RocketMinsta | 2e15176cdb4427856cb40f3f637cfd14c1c29fbc | [
"WTFPL"
] | null | null | null | qcsrc/menu/nexuiz/commandbutton.c | YamiND/RocketMinsta | 2e15176cdb4427856cb40f3f637cfd14c1c29fbc | [
"WTFPL"
] | null | null | null | #ifndef COMMANDBUTTON_CLOSE
# define COMMANDBUTTON_CLOSE 1
# define COMMANDBUTTON_APPLY 2
//# define COMMANDBUTTON_REVERT 4
#endif
#ifdef INTERFACE
CLASS(NexuizCommandButton) EXTENDS(NexuizButton)
METHOD(NexuizCommandButton, configureNexuizCommandButton, void(entity, string, vector, string, float))
ATTRIB(NexuizCommandButton, onClickCommand, string, NULL)
ATTRIB(NexuizCommandButton, flags, float, 0)
ENDCLASS(NexuizCommandButton)
entity makeNexuizCommandButton(string theText, vector theColor, string theCommand, float closesMenu);
#endif
#ifdef IMPLEMENTATION
entity makeNexuizCommandButton(string theText, vector theColor, string theCommand, float theFlags)
{
entity me;
me = spawnNexuizCommandButton();
me.configureNexuizCommandButton(me, theText, theColor, theCommand, theFlags);
return me;
}
void NexuizCommandButton_Click(entity me, entity other)
{
//if(me.flags & COMMANDBUTTON_APPLY)
// saveAllCvars(me.parent);
cmd("\n", me.onClickCommand, "\n");
//if(me.flags & COMMANDBUTTON_REVERT)
// loadAllCvars(me.parent);
if(me.flags & COMMANDBUTTON_CLOSE)
m_goto(NULL);
}
void configureNexuizCommandButtonNexuizCommandButton(entity me, string theText, vector theColor, string theCommand, float theFlags)
{
me.configureNexuizButton(me, theText, theColor);
me.onClickCommand = theCommand;
me.flags = theFlags;
me.onClick = NexuizCommandButton_Click;
me.onClickEntity = me;
}
#endif
| 31.288889 | 131 | 0.797585 | [
"vector"
] |
9e415ad80084eaaddc5a58fa19a255237fa492a5 | 14,138 | c | C | srv/xdebug/src/lib/var_export_html.c | tsnetwork/teste_meta | b3bb083427a56bc74a3a72533f97fef397cdbd1d | [
"MIT"
] | 3 | 2021-03-07T17:27:44.000Z | 2021-11-11T17:44:00.000Z | srv/xdebug/src/lib/var_export_html.c | tsnetwork/teste_meta | b3bb083427a56bc74a3a72533f97fef397cdbd1d | [
"MIT"
] | null | null | null | srv/xdebug/src/lib/var_export_html.c | tsnetwork/teste_meta | b3bb083427a56bc74a3a72533f97fef397cdbd1d | [
"MIT"
] | 1 | 2021-04-20T05:10:35.000Z | 2021-04-20T05:10:35.000Z | /*
+----------------------------------------------------------------------+
| Xdebug |
+----------------------------------------------------------------------+
| Copyright (c) 2002-2020 Derick Rethans |
+----------------------------------------------------------------------+
| This source file is subject to version 1.01 of the Xdebug license, |
| that is bundled with this package in the file LICENSE, and is |
| available at through the world-wide-web at |
| https://xdebug.org/license.php |
| If you did not receive a copy of the Xdebug license and are unable |
| to obtain it through the world-wide-web, please send a note to |
| derick@xdebug.org so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Derick Rethans <derick@xdebug.org> |
+----------------------------------------------------------------------+
*/
#include "var_export_html.h"
ZEND_EXTERN_MODULE_GLOBALS(xdebug)
/*****************************************************************************
** Fancy variable printing routines
*/
#define COLOR_POINTER "#888a85"
#define COLOR_BOOL "#75507b"
#define COLOR_LONG "#4e9a06"
#define COLOR_NULL "#3465a4"
#define COLOR_DOUBLE "#f57900"
#define COLOR_STRING "#cc0000"
#define COLOR_EMPTY "#888a85"
#define COLOR_ARRAY "#ce5c00"
#define COLOR_OBJECT "#8f5902"
#define COLOR_RESOURCE "#2e3436"
static int xdebug_array_element_export_html(zval *zv_nptr, zend_ulong index_key, zend_string *hash_key, int level, xdebug_str *str, int debug_zval, xdebug_var_export_options *options)
{
zval **zv = &zv_nptr;
size_t newlen;
char *tmp_str;
if (options->runtime[level].current_element_nr >= options->runtime[level].start_element_nr &&
options->runtime[level].current_element_nr < options->runtime[level].end_element_nr)
{
xdebug_str_add(str, xdebug_sprintf("%*s", (level * 4) - 2, ""), 1);
if (HASH_KEY_IS_NUMERIC(hash_key)) { /* numeric key */
xdebug_str_add(str, xdebug_sprintf(XDEBUG_INT_FMT " <font color='%s'>=></font> ", index_key, COLOR_POINTER), 1);
} else { /* string key */
xdebug_str_addl(str, "'", 1, 0);
tmp_str = xdebug_xmlize((char*) HASH_APPLY_KEY_VAL(hash_key), HASH_APPLY_KEY_LEN(hash_key) - 1, &newlen);
xdebug_str_addl(str, tmp_str, newlen, 0);
efree(tmp_str);
xdebug_str_add(str, xdebug_sprintf("' <font color='%s'>=></font> ", COLOR_POINTER), 1);
}
xdebug_var_export_html(zv, str, level + 1, debug_zval, options);
}
if (options->runtime[level].current_element_nr == options->runtime[level].end_element_nr) {
xdebug_str_add(str, xdebug_sprintf("%*s", (level * 4) - 2, ""), 1);
xdebug_str_addl(str, "<i>more elements...</i>\n", 24, 0);
}
options->runtime[level].current_element_nr++;
return 0;
}
static int xdebug_object_element_export_html(zval *object, zval *zv_nptr, zend_ulong index_key, zend_string *hash_key, int level, xdebug_str *str, int debug_zval, xdebug_var_export_options *options, char *class_name)
{
zval **zv = &zv_nptr;
if (options->runtime[level].current_element_nr >= options->runtime[level].start_element_nr &&
options->runtime[level].current_element_nr < options->runtime[level].end_element_nr)
{
xdebug_str_add(str, xdebug_sprintf("%*s", (level * 4) - 2, ""), 1);
if (!HASH_KEY_IS_NUMERIC(hash_key)) {
char *prop_class_name;
xdebug_str *property_name;
xdebug_str *property_type = NULL;
const char *modifier;
#if PHP_VERSION_ID >= 70400
property_type = xdebug_get_property_type(object, zv_nptr);
#endif
property_name = xdebug_get_property_info((char*) HASH_APPLY_KEY_VAL(hash_key), HASH_APPLY_KEY_LEN(hash_key), &modifier, &prop_class_name);
xdebug_str_add(str, xdebug_sprintf("<i>%s</i> ", modifier), 1);
if (property_type) {
xdebug_str_add(str, xdebug_sprintf("<i>%s</i> ", property_type->d), 1);
}
xdebug_str_addc(str, '\'');
xdebug_str_add_str(str, property_name);
if (strcmp(modifier, "private") != 0 || strcmp(class_name, prop_class_name) == 0) {
xdebug_str_add(str, xdebug_sprintf("' <font color='%s'>=></font> ", COLOR_POINTER), 1);
} else {
xdebug_str_add(str, xdebug_sprintf("' <small>(%s)</small> <font color='%s'>=></font> ", prop_class_name, COLOR_POINTER), 1);
}
if (property_type) {
xdebug_str_free(property_type);
}
xdebug_str_free(property_name);
xdfree(prop_class_name);
} else {
xdebug_str_add(str, xdebug_sprintf("<i>public</i> " XDEBUG_INT_FMT " <font color='%s'>=></font> ", index_key, COLOR_POINTER), 1);
}
xdebug_var_export_html(zv, str, level + 1, debug_zval, options);
}
if (options->runtime[level].current_element_nr == options->runtime[level].end_element_nr) {
xdebug_str_add(str, xdebug_sprintf("%*s", (level * 4) - 2, ""), 1);
xdebug_str_addl(str, "<i>more elements...</i>\n", 24, 0);
}
options->runtime[level].current_element_nr++;
return 0;
}
void xdebug_var_export_html(zval **struc, xdebug_str *str, int level, int debug_zval, xdebug_var_export_options *options)
{
HashTable *myht;
char* tmp_str;
size_t newlen;
#if PHP_VERSION_ID < 70400
int is_temp;
#endif
zend_ulong num;
zend_string *key;
zval *val;
zval *tmpz;
if (debug_zval) {
xdebug_add_variable_attributes(str, *struc, XDEBUG_VAR_ATTR_HTML);
}
if (Z_TYPE_P(*struc) == IS_INDIRECT) {
tmpz = Z_INDIRECT_P(*struc);
struc = &tmpz;
}
if (Z_TYPE_P(*struc) == IS_REFERENCE) {
tmpz = &((*struc)->value.ref->val);
struc = &tmpz;
}
switch (Z_TYPE_P(*struc)) {
case IS_TRUE:
case IS_FALSE:
xdebug_str_add(str, xdebug_sprintf("<small>boolean</small> <font color='%s'>%s</font>", COLOR_BOOL, Z_TYPE_P(*struc) == IS_TRUE ? "true" : "false"), 1);
break;
case IS_NULL:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>null</font>", COLOR_NULL), 1);
break;
case IS_LONG:
xdebug_str_add(str, xdebug_sprintf("<small>int</small> <font color='%s'>" XDEBUG_INT_FMT "</font>", COLOR_LONG, Z_LVAL_P(*struc)), 1);
break;
case IS_DOUBLE:
xdebug_str_add(str, xdebug_sprintf("<small>float</small> <font color='%s'>%.*G</font>", COLOR_DOUBLE, (int) EG(precision), Z_DVAL_P(*struc)), 1);
break;
case IS_STRING:
xdebug_str_add(str, xdebug_sprintf("<small>string</small> <font color='%s'>'", COLOR_STRING), 1);
if ((size_t) Z_STRLEN_P(*struc) > (size_t) options->max_data) {
tmp_str = xdebug_xmlize(Z_STRVAL_P(*struc), options->max_data, &newlen);
xdebug_str_addl(str, tmp_str, newlen, 0);
efree(tmp_str);
xdebug_str_addl(str, "'...</font>", 11, 0);
} else {
tmp_str = xdebug_xmlize(Z_STRVAL_P(*struc), Z_STRLEN_P(*struc), &newlen);
xdebug_str_addl(str, tmp_str, newlen, 0);
efree(tmp_str);
xdebug_str_addl(str, "'</font>", 8, 0);
}
xdebug_str_add(str, xdebug_sprintf(" <i>(length=%d)</i>", Z_STRLEN_P(*struc)), 1);
break;
case IS_ARRAY:
myht = Z_ARRVAL_P(*struc);
xdebug_str_add(str, xdebug_sprintf("\n%*s", (level - 1) * 4, ""), 1);
if (!xdebug_zend_hash_is_recursive(myht)) {
xdebug_str_add(str, xdebug_sprintf("<b>array</b> <i>(size=%d)</i>\n", myht->nNumOfElements), 1);
if (level <= options->max_depth) {
if (myht->nNumOfElements) {
options->runtime[level].current_element_nr = 0;
options->runtime[level].start_element_nr = 0;
options->runtime[level].end_element_nr = options->max_children;
xdebug_zend_hash_apply_protection_begin(myht);
ZEND_HASH_FOREACH_KEY_VAL_IND(myht, num, key, val) {
xdebug_array_element_export_html(val, num, key, level, str, debug_zval, options);
} ZEND_HASH_FOREACH_END();
xdebug_zend_hash_apply_protection_end(myht);
} else {
xdebug_str_add(str, xdebug_sprintf("%*s", (level * 4) - 2, ""), 1);
xdebug_str_add(str, xdebug_sprintf("<i><font color='%s'>empty</font></i>\n", COLOR_EMPTY), 1);
}
} else {
xdebug_str_add(str, xdebug_sprintf("%*s...\n", (level * 4) - 2, ""), 1);
}
} else {
xdebug_str_addl(str, "<i>&</i><b>array</b>\n", 21, 0);
}
break;
case IS_OBJECT:
#if PHP_VERSION_ID >= 70400
myht = xdebug_objdebug_pp(struc);
#else
myht = xdebug_objdebug_pp(struc, &is_temp);
#endif
xdebug_str_add(str, xdebug_sprintf("\n%*s", (level - 1) * 4, ""), 1);
if (!myht || !xdebug_zend_hash_is_recursive(myht)) {
char *class_name = (char*) STR_NAME_VAL(Z_OBJCE_P(*struc)->name);
xdebug_str_add(str, xdebug_sprintf("<b>object</b>(<i>%s</i>)", class_name), 1);
xdebug_str_add(str, xdebug_sprintf("[<i>%d</i>]\n", Z_OBJ_HANDLE_P(*struc)), 1);
if (myht && (level <= options->max_depth)) {
options->runtime[level].current_element_nr = 0;
options->runtime[level].start_element_nr = 0;
options->runtime[level].end_element_nr = options->max_children;
xdebug_zend_hash_apply_protection_begin(myht);
ZEND_HASH_FOREACH_KEY_VAL(myht, num, key, val) {
xdebug_object_element_export_html(*struc, val, num, key, level, str, debug_zval, options, class_name);
} ZEND_HASH_FOREACH_END();
xdebug_zend_hash_apply_protection_end(myht);
} else {
xdebug_str_add(str, xdebug_sprintf("%*s...\n", (level * 4) - 2, ""), 1);
}
} else {
xdebug_str_add(str, xdebug_sprintf("<i>&</i><b>object</b>(<i>%s</i>)", STR_NAME_VAL(Z_OBJCE_P(*struc)->name)), 1);
xdebug_str_add(str, xdebug_sprintf("[<i>%d</i>]\n", Z_OBJ_HANDLE_P(*struc)), 1);
}
#if PHP_VERSION_ID >= 70400
zend_release_properties(myht);
#else
xdebug_var_maybe_destroy_ht(myht, is_temp);
#endif
break;
case IS_RESOURCE: {
char *type_name;
type_name = (char *) zend_rsrc_list_get_rsrc_type(Z_RES_P(*struc));
xdebug_str_add(str, xdebug_sprintf("<b>resource</b>(<i>%ld</i><font color='%s'>,</font> <i>%s</i>)", Z_RES_P(*struc)->handle, COLOR_RESOURCE, type_name ? type_name : "Unknown"), 1);
break;
}
case IS_UNDEF:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>*uninitialized*</font>", COLOR_NULL), 0);
break;
default:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>NFC</font>", COLOR_NULL), 0);
break;
}
if (Z_TYPE_P(*struc) != IS_ARRAY && Z_TYPE_P(*struc) != IS_OBJECT) {
xdebug_str_addl(str, "\n", 1, 0);
}
}
xdebug_str* xdebug_get_zval_value_html(char *name, zval *val, int debug_zval, xdebug_var_export_options *options)
{
xdebug_str *str = xdebug_str_new();
int default_options = 0;
if (!options) {
options = xdebug_var_export_options_from_ini();
default_options = 1;
}
xdebug_str_addl(str, "<pre class='xdebug-var-dump' dir='ltr'>", 39, 0);
if (options->show_location && !debug_zval) {
char *formatted_filename;
xdebug_format_filename(&formatted_filename, XINI_BASE(filename_format), "%f", zend_get_executed_filename());
if (strlen(XINI_BASE(file_link_format)) > 0) {
char *file_link;
xdebug_format_file_link(&file_link, zend_get_executed_filename(), zend_get_executed_lineno());
xdebug_str_add(str, xdebug_sprintf("\n<small><a href='%s'>%s:%d</a>:</small>", file_link, formatted_filename, zend_get_executed_lineno()), 1);
xdfree(file_link);
} else {
xdebug_str_add(str, xdebug_sprintf("\n<small>%s:%d:</small>", formatted_filename, zend_get_executed_lineno()), 1);
}
xdfree(formatted_filename);
}
xdebug_var_export_html(&val, str, 1, debug_zval, options);
xdebug_str_addl(str, "</pre>", 6, 0);
if (default_options) {
xdfree(options->runtime);
xdfree(options);
}
return str;
}
static void xdebug_var_synopsis_html(zval **struc, xdebug_str *str, int level, int debug_zval, xdebug_var_export_options *options)
{
HashTable *myht;
zval *tmpz;
if (debug_zval) {
xdebug_add_variable_attributes(str, *struc, XDEBUG_VAR_ATTR_HTML);
}
if (Z_TYPE_P(*struc) == IS_REFERENCE) {
tmpz = &((*struc)->value.ref->val);
struc = &tmpz;
}
switch (Z_TYPE_P(*struc)) {
case IS_TRUE:
case IS_FALSE:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>%s</font>", COLOR_BOOL, Z_TYPE_P(*struc) == IS_TRUE ? "true" : "false"), 1);
break;
case IS_NULL:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>null</font>", COLOR_NULL), 1);
break;
case IS_LONG:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>long</font>", COLOR_LONG), 1);
break;
case IS_DOUBLE:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>double</font>", COLOR_DOUBLE), 1);
break;
case IS_STRING:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>string(%d)</font>", COLOR_STRING, Z_STRLEN_P(*struc)), 1);
break;
case IS_ARRAY:
myht = Z_ARRVAL_P(*struc);
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>array(%d)</font>", COLOR_ARRAY, myht->nNumOfElements), 1);
break;
case IS_OBJECT:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>object(%s)", COLOR_OBJECT, STR_NAME_VAL(Z_OBJCE_P(*struc)->name)), 1);
xdebug_str_add(str, xdebug_sprintf("[%d]", Z_OBJ_HANDLE_P(*struc)), 1);
xdebug_str_addl(str, "</font>", 7, 0);
break;
case IS_RESOURCE: {
char *type_name;
type_name = (char *) zend_rsrc_list_get_rsrc_type(Z_RES_P(*struc));
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>resource(%ld, %s)</font>", COLOR_RESOURCE, Z_RES_P(*struc)->handle, type_name ? type_name : "Unknown"), 1);
break;
}
case IS_UNDEF:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>*uninitialized*</font>", COLOR_NULL), 0);
break;
default:
xdebug_str_add(str, xdebug_sprintf("<font color='%s'>NFC</font>", COLOR_NULL), 0);
break;
}
}
xdebug_str* xdebug_get_zval_synopsis_html(const char *name, zval *val, int debug_zval, xdebug_var_export_options *options)
{
xdebug_str *str = xdebug_str_new();
int default_options = 0;
if (!options) {
options = xdebug_var_export_options_from_ini();
default_options = 1;
}
xdebug_var_synopsis_html(&val, str, 1, debug_zval, options);
if (default_options) {
xdfree(options->runtime);
xdfree(options);
}
return str;
}
| 36.344473 | 216 | 0.651577 | [
"object"
] |
9e43cd30d021e7211b274ad441fb930a1253b1dc | 6,791 | h | C | src/ihm_format.h | danpf/python-ihm | cbe681ee56701202769a401ee6380ccab5fcae84 | [
"MIT"
] | 12 | 2018-03-05T21:52:28.000Z | 2021-07-02T05:48:01.000Z | src/ihm_format.h | danpf/python-ihm | cbe681ee56701202769a401ee6380ccab5fcae84 | [
"MIT"
] | 73 | 2018-03-20T19:53:37.000Z | 2022-03-30T02:31:58.000Z | src/ihm_format.h | danpf/python-ihm | cbe681ee56701202769a401ee6380ccab5fcae84 | [
"MIT"
] | 5 | 2019-04-11T07:49:16.000Z | 2022-03-03T10:57:25.000Z | /** \file ihm_format.h Routines for handling mmCIF format files.
*
* The file is read sequentially. All values for desired keywords in
* desired categories are collected (other parts of the file are ignored)
* At the end of the file and each save frame a callback function for
* each category is called to process the data. In the case of mmCIF loops,
* this callback will be called multiple times, one for each entry in the loop.
*/
#ifndef IHM_FORMAT_H
#define IHM_FORMAT_H
#include <stdlib.h> /* For size_t */
#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#else
#include <unistd.h> /* For ssize_t */
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* IHM error types */
typedef enum {
IHM_ERROR_VALUE, /* Bad value */
IHM_ERROR_IO, /* Input/output error */
IHM_ERROR_FILE_FORMAT, /* File format error */
} IHMErrorCode;
/* Error reported by IHM functions. The caller is responsible for freeing
the memory used by this struct by calling ihm_error_free(). */
struct ihm_error {
/* The type of error */
IHMErrorCode code;
/* Human-readable error message */
char *msg;
};
/* Free the memory used by an ihm_error */
void ihm_error_free(struct ihm_error *err);
/* Set the error indicator */
void ihm_error_set(struct ihm_error **err, IHMErrorCode code,
const char *format, ...);
/* A keyword in an mmCIF file. Holds a description of its format and any
value read from the file. */
struct ihm_keyword {
char *name;
/* Last value read from the file */
char *data;
/* If TRUE, we own the memory for data */
int own_data;
/* TRUE iff this keyword is in the file (not necessarily with a value) */
int in_file;
/* TRUE iff the keyword is in the file but the value is omitted ('.') */
int omitted;
/* TRUE iff the keyword is in the file but the value is unknown ('?') */
int unknown;
};
/* Opaque types */
struct ihm_reader;
struct ihm_category;
/* Callback for mmCIF category data. Should set err on failure */
typedef void (*ihm_category_callback)(struct ihm_reader *reader,
void *data, struct ihm_error **err);
/* Callback for unknown mmCIF categories. Should set err on failure */
typedef void (*ihm_unknown_category_callback)(struct ihm_reader *reader,
const char *category, int linenum,
void *data,
struct ihm_error **err);
/* Callback for unknown mmCIF keywords. Should set err on failure */
typedef void (*ihm_unknown_keyword_callback)(struct ihm_reader *reader,
const char *category,
const char *keyword, int linenum,
void *data,
struct ihm_error **err);
/* Callback to free arbitrary data */
typedef void (*ihm_free_callback)(void *data);
/* Make a new struct ihm_category and add it to the reader. */
struct ihm_category *ihm_category_new(struct ihm_reader *reader,
const char *name,
ihm_category_callback data_callback,
ihm_category_callback end_frame_callback,
ihm_category_callback finalize_callback,
void *data, ihm_free_callback free_func);
/* Set a callback for unknown categories.
The given callback is called whenever a category is encountered in the
file that is not handled (by ihm_category_new).
*/
void ihm_reader_unknown_category_callback_set(struct ihm_reader *reader,
ihm_unknown_category_callback callback,
void *data, ihm_free_callback free_func);
/* Set a callback for unknown keywords.
The given callback is called whenever a keyword is encountered in the
file that is not handled (within a category that is handled by
ihm_category_new).
*/
void ihm_reader_unknown_keyword_callback_set(struct ihm_reader *reader,
ihm_unknown_keyword_callback callback,
void *data, ihm_free_callback free_func);
/* Remove all categories from the reader.
This also removes any unknown category or keyword callbacks.
*/
void ihm_reader_remove_all_categories(struct ihm_reader *reader);
/* Add a new struct ihm_keyword to a category. */
struct ihm_keyword *ihm_keyword_new(struct ihm_category *category,
const char *name);
struct ihm_file;
struct ihm_string;
/* Read data into the ihm_file buffer.
Return the number of bytes read (0 on EOF), or -1 (and sets err) on failure.
*/
typedef ssize_t (*ihm_file_read_callback)(char *buffer, size_t buffer_len,
void *data, struct ihm_error **err);
/* Track a file (or filelike object) that the data is read from */
struct ihm_file {
/* Raw data read from the file */
struct ihm_string *buffer;
/* Offset into buffer of the start of the current line */
size_t line_start;
/* Offset into buffer of the start of the next line, or line_start if the
line hasn't been read yet */
size_t next_line_start;
/* Callback function to read more data into buffer */
ihm_file_read_callback read_callback;
/* Data to pass to callback function */
void *data;
/* Function to free callback_data (or NULL) */
ihm_free_callback free_func;
};
/* Make a new ihm_file, used to handle reading data from a file.
`read_callback` is used to read a chunk of data from the file;
`data` is arbitrary data that is passed to the read callback;
`free_func` is used to do any necessary cleanup of `data` when
the ihm_file structure is freed. */
struct ihm_file *ihm_file_new(ihm_file_read_callback read_callback,
void *data, ihm_free_callback free_func);
/* Make a new ihm_file that will read data from the given file descriptor */
struct ihm_file *ihm_file_new_from_fd(int fd);
/* Make a new struct ihm_reader */
struct ihm_reader *ihm_reader_new(struct ihm_file *fh);
/* Free memory used by a struct ihm_reader.
Note that this does not close the
underlying file descriptor or object that is wrapped by ihm_file. */
void ihm_reader_free(struct ihm_reader *reader);
/* Read a data block from an mmCIF file.
*more_data is set TRUE iff more data blocks are available after this one.
Return FALSE and set err on error. */
int ihm_read_file(struct ihm_reader *reader, int *more_data,
struct ihm_error **err);
#ifdef __cplusplus
}
#endif
#endif /* IHM_FORMAT_H */
| 38.151685 | 80 | 0.655574 | [
"object"
] |
9e456e6ab7a4c1c87f4e888e9892d314351142f7 | 6,018 | h | C | src/common/dns_utils.h | italocoin-project/italocoin | 72600531df1d1e3b667ce8699eb182beb6353554 | [
"BSD-3-Clause"
] | 3 | 2018-03-14T16:50:15.000Z | 2018-08-07T07:48:06.000Z | src/common/dns_utils.h | italocoin-project/italocoin | 72600531df1d1e3b667ce8699eb182beb6353554 | [
"BSD-3-Clause"
] | 2 | 2018-03-22T15:54:15.000Z | 2018-04-20T07:36:03.000Z | src/common/dns_utils.h | italocoin-project/italocoin-old | 72600531df1d1e3b667ce8699eb182beb6353554 | [
"BSD-3-Clause"
] | 3 | 2018-06-04T08:15:30.000Z | 2018-07-15T08:12:08.000Z | // Copyright (c) 2018 The Monero And Italocoin Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <vector>
#include <string>
#include <functional>
namespace tools
{
// RFC defines for record types and classes for DNS, gleaned from ldns source
const static int DNS_CLASS_IN = 1;
const static int DNS_TYPE_A = 1;
const static int DNS_TYPE_TXT = 16;
const static int DNS_TYPE_AAAA = 8;
struct DNSResolverData;
/**
* @brief Provides high-level access to DNS resolution
*
* This class is designed to provide a high-level abstraction to DNS resolution
* functionality, including access to TXT records and such. It will also
* handle DNSSEC validation of the results.
*/
class DNSResolver
{
private:
/**
* @brief Constructs an instance of DNSResolver
*
* Constructs a class instance and does setup stuff for the backend resolver.
*/
DNSResolver();
public:
/**
* @brief takes care of freeing C pointers and such
*/
~DNSResolver();
/**
* @brief gets ipv4 addresses from DNS query of a URL
*
* returns a vector of all IPv4 "A" records for given URL.
* If no "A" records found, returns an empty vector.
*
* @param url A string containing a URL to query for
*
* @param dnssec_available
*
* @return vector of strings containing ipv4 addresses
*/
std::vector<std::string> get_ipv4(const std::string& url, bool& dnssec_available, bool& dnssec_valid);
/**
* @brief gets ipv6 addresses from DNS query
*
* returns a vector of all IPv6 "A" records for given URL.
* If no "A" records found, returns an empty vector.
*
* @param url A string containing a URL to query for
*
* @return vector of strings containing ipv6 addresses
*/
std::vector<std::string> get_ipv6(const std::string& url, bool& dnssec_available, bool& dnssec_valid);
/**
* @brief gets all TXT records from a DNS query for the supplied URL;
* if no TXT record present returns an empty vector.
*
* @param url A string containing a URL to query for
*
* @return A vector of strings containing a TXT record; or an empty vector
*/
// TODO: modify this to accommodate DNSSEC
std::vector<std::string> get_txt_record(const std::string& url, bool& dnssec_available, bool& dnssec_valid);
/**
* @brief Gets a DNS address from OpenAlias format
*
* If the address looks good, but contains one @ symbol, replace that with a .
* e.g. donate@getitalocoin.org becomes donate.getitalocoin.org
*
* @param oa_addr OpenAlias address
*
* @return dns_addr DNS address
*/
std::string get_dns_format_from_oa_address(const std::string& oa_addr);
/**
* @brief Gets the singleton instance of DNSResolver
*
* @return returns a pointer to the singleton
*/
static DNSResolver& instance();
/**
* @brief Gets a new instance of DNSResolver
*
* @return returns a pointer to the new object
*/
static DNSResolver create();
private:
/**
* @brief gets all records of a given type from a DNS query for the supplied URL;
* if no such record is present returns an empty vector.
*
* @param url A string containing a URL to query for
* @param record_type the record type to retrieve (DNS_TYPE_A, etc)
* @param reader a function that converts a record data to a string
*
* @return A vector of strings containing the requested record; or an empty vector
*/
// TODO: modify this to accommodate DNSSEC
std::vector<std::string> get_record(const std::string& url, int record_type, std::string (*reader)(const char *,size_t), bool& dnssec_available, bool& dnssec_valid);
/**
* @brief Checks a string to see if it looks like a URL
*
* @param addr the string to be checked
*
* @return true if it looks enough like a URL, false if not
*/
bool check_address_syntax(const char *addr) const;
DNSResolverData *m_data;
}; // class DNSResolver
namespace dns_utils
{
std::string address_from_txt_record(const std::string& s);
std::vector<std::string> addresses_from_url(const std::string& url, bool& dnssec_valid);
std::string get_account_address_as_str_from_url(const std::string& url, bool& dnssec_valid, std::function<std::string(const std::string&, const std::vector<std::string>&, bool)> confirm_dns);
bool load_txt_records_from_dns(std::vector<std::string> &records, const std::vector<std::string> &dns_urls);
std::vector<std::string> parse_dns_public(const char *s);
} // namespace tools::dns_utils
} // namespace tools
| 34.388571 | 191 | 0.71901 | [
"object",
"vector"
] |
9e467a91d2f8c5519ebbf792dfa5715b272f6bbe | 4,155 | h | C | ATKSideChainExpander/app_wrapper/app_main.h | rockyplum/atk | 6ca0f1edae8cfeee7404e82df82b519f01af3d1a | [
"BSD-3-Clause"
] | 43 | 2015-01-05T13:41:34.000Z | 2022-02-25T06:56:05.000Z | ATKSideChainExpander/app_wrapper/app_main.h | rockyplum/atk | 6ca0f1edae8cfeee7404e82df82b519f01af3d1a | [
"BSD-3-Clause"
] | null | null | null | ATKSideChainExpander/app_wrapper/app_main.h | rockyplum/atk | 6ca0f1edae8cfeee7404e82df82b519f01af3d1a | [
"BSD-3-Clause"
] | 9 | 2015-10-14T08:19:02.000Z | 2021-01-21T03:07:13.000Z | #ifndef _IPLUGAPP_APP_MAIN_H_
#define _IPLUGAPP_APP_MAIN_H_
#include "IPlugOSDetect.h"
/*
Standalone osx/win app wrapper for iPlug, using SWELL
Oli Larkin 2012
Notes:
App settings are stored in a .ini file. The location is as follows:
Windows7: C:\Users\USERNAME\AppData\Local\ATKSideChainExpander\settings.ini
Windows XP/Vista: C:\Documents and Settings\USERNAME\Local Settings\Application Data\ATKSideChainExpander\settings.ini
OSX: /Users/USERNAME/Library/Application\ Support/ATKSideChainExpander/settings.ini
*/
#ifdef OS_WIN
#include <windows.h>
#include <commctrl.h>
#define DEFAULT_INPUT_DEV "Default Device"
#define DEFAULT_OUTPUT_DEV "Default Device"
#define DAC_DS 0
#define DAC_ASIO 1
#elif defined OS_OSX
#include "swell.h"
#define SLEEP( milliseconds ) usleep( (unsigned long) (milliseconds * 1000.0) )
#define DEFAULT_INPUT_DEV "Built-in Input"
#define DEFAULT_OUTPUT_DEV "Built-in Output"
#define DAC_COREAUDIO 0
// #define DAC_JACK 1
#endif
#include "wdltypes.h"
#include "RtAudio.h"
#include "RtMidi.h"
#include <string>
#include <vector>
#include "../ATKSideChainExpander.h" // change this to match your iplug plugin .h file
typedef unsigned short UInt16;
struct AppState
{
// on osx core audio 0 or jack 1
// on windows DS 0 or ASIO 1
UInt16 mAudioDriverType;
// strings
char mAudioInDev[100];
char mAudioOutDev[100];
char mAudioSR[100];
char mAudioIOVS[100];
char mAudioSigVS[100];
UInt16 mAudioInChanL;
UInt16 mAudioInChanR;
UInt16 mAudioOutChanL;
UInt16 mAudioOutChanR;
UInt16 mAudioInIsMono;
// strings containing the names of the midi devices
char mMidiInDev[100];
char mMidiOutDev[100];
UInt16 mMidiInChan;
UInt16 mMidiOutChan;
AppState():
mAudioDriverType(0), // DS / CoreAudio by default
mAudioInChanL(1),
mAudioInChanR(2),
mAudioOutChanL(1),
mAudioOutChanR(2),
mMidiInChan(0),
mMidiOutChan(0)
{
strcpy(mAudioInDev, DEFAULT_INPUT_DEV);
strcpy(mAudioOutDev, DEFAULT_OUTPUT_DEV);
strcpy(mAudioSR, "44100");
strcpy(mAudioIOVS, "512");
strcpy(mAudioSigVS, "32");
strcpy(mMidiInDev, "off");
strcpy(mMidiOutDev, "off");
}
};
extern WDL_DLGRET MainDlgProc(HWND hwndDlg, UINT uMsg, WPARAM wParam, LPARAM lParam);
extern WDL_DLGRET PreferencesDlgProc(HWND hwndDlg, UINT uMsg, WPARAM wParam, LPARAM lParam);
extern HINSTANCE gHINST;
extern HWND gHWND;
extern UINT gScrollMessage;
extern IPlug* gPluginInstance; // The iplug plugin instance
extern std::string GetAudioDeviceName(int idx);
extern int GetAudioDeviceID(char* deviceNameToTest);
extern void ProbeAudioIO();
extern bool InitialiseAudio(unsigned int inId,
unsigned int outId,
unsigned int sr,
unsigned int iovs,
unsigned int chnls,
unsigned int inChanL,
unsigned int outChanL
);
extern bool AudioSettingsInStateAreEqual(AppState* os, AppState* ns);
extern bool MIDISettingsInStateAreEqual(AppState* os, AppState* ns);
extern bool TryToChangeAudioDriverType();
extern bool TryToChangeAudio();
extern bool ChooseMidiInput(const char* pPortName);
extern bool ChooseMidiOutput(const char* pPortName);
extern bool AttachGUI();
extern RtAudio* gDAC;
extern RtMidiIn *gMidiIn;
extern RtMidiOut *gMidiOut;
extern AppState *gState;
extern AppState *gTempState; // The state is copied here when the pref dialog is opened, and restored if cancel is pressed
extern AppState *gActiveState; // When the audio driver is started the current state is copied here so that if OK is pressed after APPLY nothing is changed
extern unsigned int gSigVS;
extern unsigned int gBufIndex; // index for signal vector, loops from 0 to gSigVS
extern char *gINIPath; // path of ini file
extern void UpdateINI();
extern std::vector<unsigned int> gAudioInputDevs;
extern std::vector<unsigned int> gAudioOutputDevs;
extern std::vector<std::string> gMIDIInputDevNames;
extern std::vector<std::string> gMIDIOutputDevNames;
#endif //_IPLUGAPP_APP_MAIN_H_
| 28.074324 | 155 | 0.727316 | [
"vector"
] |
9e487ab4e3d3098c9c89a115ac919d6d4c09b51a | 39,184 | h | C | jerry-core/parser/js/js-parser-internal.h | acrop/jerryscript | 2faafa4cfcf3be3acfb2e935937aa8082fb4b81d | [
"Apache-2.0"
] | null | null | null | jerry-core/parser/js/js-parser-internal.h | acrop/jerryscript | 2faafa4cfcf3be3acfb2e935937aa8082fb4b81d | [
"Apache-2.0"
] | null | null | null | jerry-core/parser/js/js-parser-internal.h | acrop/jerryscript | 2faafa4cfcf3be3acfb2e935937aa8082fb4b81d | [
"Apache-2.0"
] | null | null | null | /* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JS_PARSER_INTERNAL_H
#define JS_PARSER_INTERNAL_H
#include "common.h"
#include "byte-code.h"
#include "debugger.h"
#include "js-parser.h"
#include "js-parser-limits.h"
#include "js-lexer.h"
#include "js-scanner.h"
#include "ecma-module.h"
/** \addtogroup parser Parser
* @{
*
* \addtogroup jsparser JavaScript
* @{
*
* \addtogroup jsparser_internals Internals
* @{
*/
/**
* General parser flags.
*/
typedef enum
{
PARSER_IS_STRICT = (1u << 0), /**< strict mode code */
PARSER_IS_FUNCTION = (1u << 1), /**< function body is parsed */
PARSER_IS_CLOSURE = (1u << 2), /**< function body is encapsulated in {} block */
PARSER_IS_FUNC_EXPRESSION = (1u << 3), /**< a function expression is parsed */
PARSER_IS_PROPERTY_GETTER = (1u << 4), /**< a property getter function is parsed */
PARSER_IS_PROPERTY_SETTER = (1u << 5), /**< a property setter function is parsed */
PARSER_HAS_NON_STRICT_ARG = (1u << 6), /**< the function has arguments which
* are not supported in strict mode */
PARSER_ARGUMENTS_NEEDED = (1u << 7), /**< arguments object must be created */
PARSER_LEXICAL_ENV_NEEDED = (1u << 8), /**< lexical environment object must be created */
PARSER_INSIDE_WITH = (1u << 9), /**< code block is inside a with statement */
PARSER_NO_END_LABEL = (1u << 10), /**< return instruction must be inserted
* after the last byte code */
PARSER_DEBUGGER_BREAKPOINT_APPENDED = (1u << 11), /**< pending (unsent) breakpoint
* info is available */
#if ENABLED (JERRY_ESNEXT)
PARSER_LEXICAL_BLOCK_NEEDED = (1u << 12), /**< global script: needs a lexical environment for let and const
* function: needs a lexical environment for arguments */
PARSER_IS_ARROW_FUNCTION = (1u << 13), /**< an arrow function is parsed */
PARSER_IS_GENERATOR_FUNCTION = (1u << 14), /**< a generator function is parsed */
PARSER_IS_ASYNC_FUNCTION = (1u << 15), /**< an async function is parsed */
PARSER_DISALLOW_AWAIT_YIELD = (1u << 16), /**< throw SyntaxError for await / yield keywords */
PARSER_FUNCTION_IS_PARSING_ARGS = (1u << 17), /**< set when parsing function arguments */
PARSER_FUNCTION_HAS_COMPLEX_ARGUMENT = (1u << 18), /**< function has complex (ES2015+) argument definition */
PARSER_FUNCTION_HAS_REST_PARAM = (1u << 19), /**< function has rest parameter */
PARSER_CLASS_CONSTRUCTOR = (1u << 20), /**< a class constructor is parsed
* Note: PARSER_ALLOW_SUPER must be present */
/* These four status flags must be in this order. See PARSER_SAVED_FLAGS_OFFSET. */
PARSER_ALLOW_SUPER = (1u << 21), /**< allow super property access */
PARSER_ALLOW_SUPER_CALL = (1u << 22), /**< allow super constructor call
* Note: PARSER_CLASS_CONSTRUCTOR must be present */
PARSER_INSIDE_CLASS_FIELD = (1u << 23), /**< a class field is being parsed */
PARSER_ALLOW_NEW_TARGET = (1u << 24), /**< allow new.target parsing in the current context */
PARSER_IS_METHOD = (1u << 25), /**< method is parsed */
#endif /* ENABLED (JERRY_ESNEXT) */
#if ENABLED (JERRY_MODULE_SYSTEM)
PARSER_MODULE_DEFAULT_CLASS_OR_FUNC = (1u << 26), /**< parsing a function or class default export */
PARSER_MODULE_STORE_IDENT = (1u << 27), /**< store identifier of the current export statement */
#endif /* ENABLED (JERRY_MODULE_SYSTEM) */
PARSER_HAS_LATE_LIT_INIT = (1u << 30), /**< there are identifier or string literals which construction
* is postponed after the local parser data is freed */
#ifndef JERRY_NDEBUG
PARSER_SCANNING_SUCCESSFUL = PARSER_HAS_LATE_LIT_INIT, /**< scanning process was successful */
#endif /* !JERRY_NDEBUG */
} parser_general_flags_t;
/**
* Expression parsing flags.
*/
typedef enum
{
PARSE_EXPR = 0, /**< parse an expression without any special flags */
PARSE_EXPR_LEFT_HAND_SIDE = (1u << 0), /**< parse a left-hand-side expression */
PARSE_EXPR_NO_PUSH_RESULT = (1u << 1), /**< do not push the result of the expression onto the stack */
PARSE_EXPR_NO_COMMA = (1u << 2), /**< do not parse comma operator */
PARSE_EXPR_HAS_LITERAL = (1u << 3), /**< a primary literal is provided by a
* CBC_PUSH_LITERAL instruction */
} parser_expression_flags_t;
/**
* Pattern parsing flags.
*/
typedef enum
{
PARSER_PATTERN_NO_OPTS = 0, /**< parse the expression after '=' */
PARSER_PATTERN_BINDING = (1u << 0), /**< parse BindingPattern */
PARSER_PATTERN_TARGET_ON_STACK = (1u << 1), /**< assignment target is the topmost element on the stack */
PARSER_PATTERN_TARGET_DEFAULT = (1u << 2), /**< perform default value comparison for assignment target */
PARSER_PATTERN_NESTED_PATTERN = (1u << 3), /**< parse pattern inside a pattern */
PARSER_PATTERN_LET = (1u << 4), /**< pattern is a let declaration */
PARSER_PATTERN_CONST = (1u << 5), /**< pattern is a const declaration */
PARSER_PATTERN_LOCAL = (1u << 6), /**< pattern is a local (catch parameter) declaration */
PARSER_PATTERN_REST_ELEMENT = (1u << 7), /**< parse rest array / object element */
PARSER_PATTERN_HAS_REST_ELEMENT = (1u << 8), /**< object literal rest element will be present */
PARSER_PATTERN_ARGUMENTS = (1u << 9), /**< parse arguments binding */
} parser_pattern_flags_t;
/**
* Check type for scanner_is_context_needed function.
*/
typedef enum
{
PARSER_CHECK_BLOCK_CONTEXT, /**< check block context */
#if ENABLED (JERRY_ESNEXT)
PARSER_CHECK_GLOBAL_CONTEXT, /**< check global context */
PARSER_CHECK_FUNCTION_CONTEXT, /**< check function context */
#endif /* ENABLED (JERRY_ESNEXT) */
} parser_check_context_type_t;
#if ENABLED (JERRY_ESNEXT)
/**
* Class field bits.
*/
typedef enum
{
PARSER_CLASS_FIELD_END = (1u << 0), /**< last class field */
PARSER_CLASS_FIELD_NORMAL = (1u << 1), /**< normal (non-computed) class field */
PARSER_CLASS_FIELD_INITIALIZED = (1u << 2), /**< class field is initialized */
PARSER_CLASS_FIELD_STATIC = (1u << 3), /**< static class field */
} parser_class_field_type_t;
#endif /* ENABLED (JERRY_ESNEXT) */
/**
* Mask for strict mode code
*/
#define PARSER_STRICT_MODE_MASK 0x1
/**
* Shorthand for function closure definition
*/
#define PARSER_FUNCTION_CLOSURE (PARSER_IS_FUNCTION | PARSER_IS_CLOSURE)
#if PARSER_MAXIMUM_CODE_SIZE <= UINT16_MAX
/**
* Maximum number of bytes for branch target.
*/
#define PARSER_MAX_BRANCH_LENGTH 2
#else /* PARSER_MAXIMUM_CODE_SIZE > UINT16_MAX */
/**
* Maximum number of bytes for branch target.
*/
#define PARSER_MAX_BRANCH_LENGTH 3
#endif /* PARSER_MAXIMUM_CODE_SIZE <= UINT16_MAX */
#if ENABLED (JERRY_ESNEXT)
/**
* Offset of PARSER_ALLOW_SUPER
*/
#define PARSER_SAVED_FLAGS_OFFSET \
JERRY_LOG2 (PARSER_ALLOW_SUPER)
/**
* Mask of saved flags
*/
#define PARSER_SAVED_FLAGS_MASK \
((1 << (JERRY_LOG2 (PARSER_ALLOW_NEW_TARGET) - JERRY_LOG2 (PARSER_ALLOW_SUPER) + 1)) - 1)
/**
* Get class option bits from parser_general_flags_t
*/
#define PARSER_SAVE_STATUS_FLAGS(opts) \
((uint16_t) (((opts) >> PARSER_SAVED_FLAGS_OFFSET) & PARSER_SAVED_FLAGS_MASK))
/**
* Mask for get class option bits from ecma_parse_opts_t
*/
#define PARSER_RESTORE_STATUS_FLAGS_MASK \
(((ECMA_PARSE_ALLOW_NEW_TARGET << 1) - 1) - (ECMA_PARSE_ALLOW_SUPER - 1))
/**
* Shift for get class option bits from ecma_parse_opts_t
*/
#define PARSER_RESTORE_STATUS_FLAGS_SHIFT \
(JERRY_LOG2 (PARSER_ALLOW_SUPER) - JERRY_LOG2 (ECMA_PARSE_ALLOW_SUPER))
/**
* Get class option bits from ecma_parse_opts_t
*/
#define PARSER_RESTORE_STATUS_FLAGS(opts) \
(((opts) & PARSER_RESTORE_STATUS_FLAGS_MASK) << PARSER_RESTORE_STATUS_FLAGS_SHIFT)
/**
* All flags that affect exotic arguments object creation.
*/
#define PARSER_ARGUMENTS_RELATED_FLAGS \
(PARSER_ARGUMENTS_NEEDED | PARSER_FUNCTION_HAS_COMPLEX_ARGUMENT | PARSER_IS_STRICT)
/**
* Get the corresponding eval flag for a ecma_parse_opts_t flag
*/
#define PARSER_GET_EVAL_FLAG(type) \
((type) >> JERRY_LOG2 (ECMA_PARSE_ALLOW_SUPER))
/**
* Check non-generator async functions
*/
#define PARSER_IS_NORMAL_ASYNC_FUNCTION(status_flags) \
(((status_flags) & (PARSER_IS_GENERATOR_FUNCTION | PARSER_IS_ASYNC_FUNCTION)) == PARSER_IS_ASYNC_FUNCTION)
#else /* !ENABLED (JERRY_ESNEXT) */
/**
* All flags that affect exotic arguments object creation.
*/
#define PARSER_ARGUMENTS_RELATED_FLAGS \
(PARSER_ARGUMENTS_NEEDED | PARSER_IS_STRICT)
#endif /* ENABLED (JERRY_ESNEXT) */
/* Checks whether unmapped arguments are needed. */
#define PARSER_NEEDS_MAPPED_ARGUMENTS(status_flags) \
(((status_flags) & PARSER_ARGUMENTS_RELATED_FLAGS) == PARSER_ARGUMENTS_NEEDED)
/* The maximum of PARSER_CBC_STREAM_PAGE_SIZE is 127. */
#define PARSER_CBC_STREAM_PAGE_SIZE \
((uint32_t) (64 - sizeof (void *)))
/* Defines the size of the max page. */
#define PARSER_STACK_PAGE_SIZE \
((uint32_t) (((sizeof (void *) > 4) ? 128 : 64) - sizeof (void *)))
/* Avoid compiler warnings for += operations. */
#define PARSER_PLUS_EQUAL_U16(base, value) (base) = (uint16_t) ((base) + (value))
#define PARSER_MINUS_EQUAL_U16(base, value) (base) = (uint16_t) ((base) - (value))
#define PARSER_PLUS_EQUAL_LC(base, value) (base) = (parser_line_counter_t) ((base) + (value))
/**
* Argument for a compact-byte code.
*/
typedef struct
{
uint16_t literal_index; /**< literal index argument */
uint16_t value; /**< other argument (second literal or byte). */
uint16_t third_literal_index; /**< literal index argument */
uint8_t literal_type; /**< last literal type */
uint8_t literal_keyword_type; /**< last literal keyword type */
} cbc_argument_t;
/* Useful parser macros. */
#define PARSER_CBC_UNAVAILABLE CBC_EXT_OPCODE
#define PARSER_TO_EXT_OPCODE(opcode) ((uint16_t) ((opcode) + 256))
#define PARSER_GET_EXT_OPCODE(opcode) ((opcode) - 256)
#define PARSER_IS_BASIC_OPCODE(opcode) ((opcode) < 256)
#define PARSER_IS_PUSH_LITERAL(opcode) \
((opcode) == CBC_PUSH_LITERAL \
|| (opcode) == CBC_PUSH_TWO_LITERALS \
|| (opcode) == CBC_PUSH_THREE_LITERALS)
#define PARSER_IS_PUSH_NUMBER(opcode) \
((opcode) == CBC_PUSH_NUMBER_0 \
|| (opcode) == CBC_PUSH_NUMBER_POS_BYTE \
|| (opcode) == CBC_PUSH_NUMBER_NEG_BYTE \
|| (opcode) == PARSER_TO_EXT_OPCODE (CBC_EXT_PUSH_LITERAL_PUSH_NUMBER_0) \
|| (opcode) == PARSER_TO_EXT_OPCODE (CBC_EXT_PUSH_LITERAL_PUSH_NUMBER_POS_BYTE) \
|| (opcode) == PARSER_TO_EXT_OPCODE (CBC_EXT_PUSH_LITERAL_PUSH_NUMBER_NEG_BYTE))
#define PARSER_IS_MUTABLE_PUSH_LITERAL(opcode) \
((opcode) >= CBC_PUSH_LITERAL && (opcode) <= CBC_PUSH_THIS_LITERAL)
#define PARSER_IS_PUSH_LITERALS_WITH_THIS(opcode) \
((opcode) >= CBC_PUSH_LITERAL && (opcode) <= CBC_PUSH_THREE_LITERALS)
#define PARSER_IS_PUSH_PROP(opcode) \
((opcode) >= CBC_PUSH_PROP && (opcode) <= CBC_PUSH_PROP_THIS_LITERAL)
#define PARSER_IS_PUSH_PROP_LITERAL(opcode) \
((opcode) >= CBC_PUSH_PROP_LITERAL && (opcode) <= CBC_PUSH_PROP_THIS_LITERAL)
#define PARSER_PUSH_LITERAL_TO_PUSH_PROP_LITERAL(opcode) \
(uint16_t) ((opcode) + (CBC_PUSH_PROP_LITERAL - CBC_PUSH_LITERAL))
#define PARSER_PUSH_PROP_LITERAL_TO_PUSH_LITERAL(opcode) \
(uint16_t) ((opcode) - (CBC_PUSH_PROP_LITERAL - CBC_PUSH_LITERAL))
#define PARSER_PUSH_PROP_TO_PUSH_PROP_REFERENCE(opcode) \
(uint16_t) ((opcode) + (CBC_PUSH_PROP_REFERENCE - CBC_PUSH_PROP))
#define PARSER_PUSH_PROP_REFERENCE_TO_PUSH_PROP(opcode) \
(uint16_t) ((opcode) - (CBC_PUSH_PROP_REFERENCE - CBC_PUSH_PROP))
#define PARSER_GET_LITERAL(literal_index) \
((lexer_literal_t *) parser_list_get (&context_p->literal_pool, (literal_index)))
#define PARSER_TO_BINARY_OPERATION_WITH_RESULT(opcode) \
(PARSER_TO_EXT_OPCODE(opcode) - CBC_ASSIGN_ADD + CBC_EXT_ASSIGN_ADD_PUSH_RESULT)
#define PARSER_TO_BINARY_OPERATION_WITH_BLOCK(opcode) \
((uint16_t) (PARSER_TO_EXT_OPCODE(opcode) - CBC_ASSIGN_ADD + CBC_EXT_ASSIGN_ADD_BLOCK))
#define PARSER_GET_FLAGS(op) \
(PARSER_IS_BASIC_OPCODE (op) ? cbc_flags[(op)] : cbc_ext_flags[PARSER_GET_EXT_OPCODE (op)])
#define PARSER_OPCODE_IS_RETURN(op) \
((op) == CBC_RETURN || (op) == CBC_RETURN_WITH_BLOCK || (op) == CBC_RETURN_WITH_LITERAL)
#define PARSER_ARGS_EQ(op, types) \
((PARSER_GET_FLAGS (op) & CBC_ARG_TYPES) == (types))
/**
* All data allocated by the parser is
* stored in parser_data_pages in the memory.
*/
typedef struct parser_mem_page_t
{
struct parser_mem_page_t *next_p; /**< next page */
uint8_t bytes[1]; /**< memory bytes */
} parser_mem_page_t;
/**
* Structure for managing parser memory.
*/
typedef struct
{
parser_mem_page_t *first_p; /**< first allocated page */
parser_mem_page_t *last_p; /**< last allocated page */
uint32_t last_position; /**< position of the last allocated byte */
} parser_mem_data_t;
/**
* Parser memory list.
*/
typedef struct
{
parser_mem_data_t data; /**< storage space */
uint32_t page_size; /**< size of each page */
uint32_t item_size; /**< size of each item */
uint32_t item_count; /**< number of items on each page */
} parser_list_t;
/**
* Iterator for parser memory list.
*/
typedef struct
{
parser_list_t *list_p; /**< parser list */
parser_mem_page_t *current_p; /**< currently processed page */
size_t current_position; /**< current position on the page */
} parser_list_iterator_t;
/**
* Parser memory stack.
*/
typedef struct
{
parser_mem_data_t data; /**< storage space */
parser_mem_page_t *free_page_p; /**< space for fast allocation */
} parser_stack_t;
/**
* Iterator for parser memory stack.
*/
typedef struct
{
parser_mem_page_t *current_p; /**< currently processed page */
size_t current_position; /**< current position on the page */
} parser_stack_iterator_t;
/**
* Branch type.
*/
typedef struct
{
parser_mem_page_t *page_p; /**< branch location page */
uint32_t offset; /**< branch location offset */
} parser_branch_t;
/**
* Branch chain type.
*/
typedef struct parser_branch_node_t
{
struct parser_branch_node_t *next_p; /**< next linked list node */
parser_branch_t branch; /**< branch */
} parser_branch_node_t;
/**
* Items of scope stack.
*/
typedef struct
{
uint16_t map_from; /**< original literal index */
uint16_t map_to; /**< encoded register or literal index and flags */
} parser_scope_stack_t;
/**
* This item represents a function literal in the scope stack.
*
* When map_from == PARSER_SCOPE_STACK_FUNC:
* map_to represents the literal reserved for a function literal
* Note: the name of the function is the previous value in the scope stack
* Note: map_to is not encoded in this case
*/
#define PARSER_SCOPE_STACK_FUNC 0xffff
#if ENABLED (JERRY_ESNEXT)
/**
* Mask for decoding the register index of map_to
*/
#define PARSER_SCOPE_STACK_REGISTER_MASK 0x3fff
/**
* Function statements with the name specified
* in map_from should not be copied to global scope.
*/
#define PARSER_SCOPE_STACK_NO_FUNCTION_COPY 0x8000
/**
* The scope stack item represents a const binding stored in register
*/
#define PARSER_SCOPE_STACK_IS_CONST_REG 0x4000
/**
* The scope stack item represents a binding which has already created with ECMA_VALUE_UNINITIALIZED
*/
#define PARSER_SCOPE_STACK_IS_LOCAL_CREATED (PARSER_SCOPE_STACK_IS_CONST_REG)
#endif /* ENABLED (JERRY_ESNEXT) */
/**
* Starting literal index for registers.
*/
#define PARSER_REGISTER_START 0x8000
/**
* Invalid literal index
*/
#define PARSER_INVALID_LITERAL_INDEX UINT16_MAX
/**
* Lastly emitted opcode is not a function literal
*/
#define PARSER_NOT_FUNCTION_LITERAL PARSER_INVALID_LITERAL_INDEX
/**
* Lastly emitted opcode is not a named function literal
*/
#define PARSER_NAMED_FUNCTION (uint16_t) (PARSER_NOT_FUNCTION_LITERAL - 1)
/**
* Lastly emitted opcode is not an anonymous class literal
*/
#define PARSER_ANONYMOUS_CLASS (uint16_t) (PARSER_NAMED_FUNCTION - 1)
/* Forward definitions for js-scanner-internal.h. */
struct scanner_context_t;
typedef struct scanner_context_t scanner_context_t;
#if ENABLED (JERRY_DEBUGGER)
/**
* Extra information for each breakpoint.
*/
typedef struct
{
uint32_t value; /**< line or offset of the breakpoint */
} parser_breakpoint_info_t;
/**
* Maximum number of breakpoint info.
*/
#define PARSER_MAX_BREAKPOINT_INFO_COUNT \
(JERRY_DEBUGGER_TRANSPORT_MAX_BUFFER_SIZE / sizeof (parser_breakpoint_info_t))
#endif /* ENABLED (JERRY_DEBUGGER) */
/**
* Those members of a context which needs
* to be saved when a sub-function is parsed.
*/
typedef struct parser_saved_context_t
{
/* Parser members. */
uint32_t status_flags; /**< parsing options */
uint16_t stack_depth; /**< current stack depth */
uint16_t stack_limit; /**< maximum stack depth */
struct parser_saved_context_t *prev_context_p; /**< last saved context */
parser_stack_iterator_t last_statement; /**< last statement position */
/* Literal types */
uint16_t argument_count; /**< number of function arguments */
#if ENABLED (JERRY_ESNEXT)
uint16_t argument_length; /**< length property of arguments */
#endif /* ENABLED (JERRY_ESNEXT) */
uint16_t register_count; /**< number of registers */
uint16_t literal_count; /**< number of literals */
/* Memory storage members. */
parser_mem_data_t byte_code; /**< byte code buffer */
uint32_t byte_code_size; /**< byte code size for branches */
parser_mem_data_t literal_pool_data; /**< literal list */
parser_scope_stack_t *scope_stack_p; /**< scope stack */
uint16_t scope_stack_size; /**< size of scope stack */
uint16_t scope_stack_top; /**< preserved top of scope stack */
uint16_t scope_stack_reg_top; /**< preserved top register of scope stack */
#if ENABLED (JERRY_ESNEXT)
uint16_t scope_stack_global_end; /**< end of global declarations of a function */
ecma_value_t tagged_template_literal_cp; /**< compessed pointer to the tagged template literal collection */
#endif /* ENABLED (JERRY_ESNEXT) */
#ifndef JERRY_NDEBUG
uint16_t context_stack_depth; /**< current context stack depth */
#endif /* !JERRY_NDEBUG */
} parser_saved_context_t;
/**
* Shared parser context.
*/
typedef struct
{
PARSER_TRY_CONTEXT (try_buffer); /**< try_buffer */
parser_error_t error; /**< error code */
/** Union for rarely used members. */
union
{
void *allocated_buffer_p; /**< dinamically allocated buffer
* which needs to be freed on error */
scanner_context_t *scanner_context_p; /**< scanner context for the pre-scanner */
} u;
uint32_t allocated_buffer_size; /**< size of the dinamically allocated buffer */
/* Parser members. */
uint32_t status_flags; /**< status flags */
uint32_t global_status_flags; /**< global status flags */
uint16_t stack_depth; /**< current stack depth */
uint16_t stack_limit; /**< maximum stack depth */
parser_saved_context_t *last_context_p; /**< last saved context */
parser_stack_iterator_t last_statement; /**< last statement position */
#if ENABLED (JERRY_MODULE_SYSTEM)
ecma_module_node_t *module_current_node_p; /**< import / export node that is being processed */
lexer_literal_t *module_identifier_lit_p; /**< the literal for the identifier of the current element */
#endif /* ENABLED (JERRY_MODULE_SYSTEM) */
/* Lexer members. */
lexer_token_t token; /**< current token */
lexer_lit_object_t lit_object; /**< current literal object */
const uint8_t *source_p; /**< next source byte */
const uint8_t *source_end_p; /**< last source byte */
parser_line_counter_t line; /**< current line */
parser_line_counter_t column; /**< current column */
/* Scanner members. */
scanner_info_t *next_scanner_info_p; /**< next scanner info block */
scanner_info_t *active_scanner_info_p; /**< currently active scanner info block */
scanner_info_t *skipped_scanner_info_p; /**< next scanner info block */
scanner_info_t *skipped_scanner_info_end_p; /**< currently active scanner info block */
/* Compact byte code members. */
cbc_argument_t last_cbc; /**< argument of the last cbc */
uint16_t last_cbc_opcode; /**< opcode of the last cbc */
/* Literal types */
uint16_t argument_count; /**< number of function arguments */
#if ENABLED (JERRY_ESNEXT)
uint16_t argument_length; /**< length property of arguments */
#endif /* ENABLED (JERRY_ESNEXT) */
uint16_t register_count; /**< number of registers */
uint16_t literal_count; /**< number of literals */
/* Memory storage members. */
parser_mem_data_t byte_code; /**< byte code buffer */
uint32_t byte_code_size; /**< current byte code size for branches */
parser_list_t literal_pool; /**< literal list */
parser_mem_data_t stack; /**< storage space */
parser_scope_stack_t *scope_stack_p; /**< scope stack */
parser_mem_page_t *free_page_p; /**< space for fast allocation */
uint16_t scope_stack_size; /**< size of scope stack */
uint16_t scope_stack_top; /**< current top of scope stack */
uint16_t scope_stack_reg_top; /**< current top register of scope stack */
#if ENABLED (JERRY_ESNEXT)
uint16_t scope_stack_global_end; /**< end of global declarations of a function */
ecma_value_t tagged_template_literal_cp; /**< compessed pointer to the tagged template literal collection */
#endif /* ENABLED (JERRY_ESNEXT) */
uint8_t stack_top_uint8; /**< top byte stored on the stack */
#ifndef JERRY_NDEBUG
/* Variables for debugging / logging. */
uint16_t context_stack_depth; /**< current context stack depth */
#endif /* !JERRY_NDEBUG */
#if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE)
int is_show_opcodes; /**< show opcodes */
uint32_t total_byte_code_size; /**< total byte code size */
#endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */
#if ENABLED (JERRY_DEBUGGER)
parser_breakpoint_info_t breakpoint_info[PARSER_MAX_BREAKPOINT_INFO_COUNT]; /**< breakpoint info list */
uint16_t breakpoint_info_count; /**< current breakpoint index */
parser_line_counter_t last_breakpoint_line; /**< last line where breakpoint has been inserted */
#endif /* ENABLED (JERRY_DEBUGGER) */
#if ENABLED (JERRY_RESOURCE_NAME)
ecma_value_t resource_name; /**< resource name */
#endif /* ENABLED (JERRY_RESOURCE_NAME) */
#if ENABLED (JERRY_LINE_INFO)
parser_line_counter_t last_line_info_line; /**< last line where line info has been inserted */
#endif /* ENABLED (JERRY_LINE_INFO) */
} parser_context_t;
/**
* @}
* @}
* @}
*
* \addtogroup mem Memory allocation
* @{
*
* \addtogroup mem_parser Parser memory manager
* @{
*/
/* Memory management.
* Note: throws an error if unsuccessful. */
void *parser_malloc (parser_context_t *context_p, size_t size);
void parser_free (void *ptr, size_t size);
void *parser_malloc_local (parser_context_t *context_p, size_t size);
void parser_free_local (void *ptr, size_t size);
void parser_free_allocated_buffer (parser_context_t *context_p);
/* Parser byte stream. */
void parser_cbc_stream_init (parser_mem_data_t *data_p);
void parser_cbc_stream_free (parser_mem_data_t *data_p);
void parser_cbc_stream_alloc_page (parser_context_t *context_p, parser_mem_data_t *data_p);
/* Parser list. Ensures pointer alignment. */
void parser_list_init (parser_list_t *list_p, uint32_t item_size, uint32_t item_count);
void parser_list_free (parser_list_t *list_p);
void parser_list_reset (parser_list_t *list_p);
void *parser_list_append (parser_context_t *context_p, parser_list_t *list_p);
void *parser_list_get (parser_list_t *list_p, size_t index);
void parser_list_iterator_init (parser_list_t *list_p, parser_list_iterator_t *iterator_p);
void *parser_list_iterator_next (parser_list_iterator_t *iterator_p);
/* Parser stack. Optimized for pushing bytes.
* Pop functions never throws error. */
void parser_stack_init (parser_context_t *context_p);
void parser_stack_free (parser_context_t *context_p);
void parser_stack_push_uint8 (parser_context_t *context_p, uint8_t uint8_value);
void parser_stack_pop_uint8 (parser_context_t *context_p);
void parser_stack_change_last_uint8 (parser_context_t *context_p, uint8_t new_value);
uint8_t *parser_stack_get_prev_uint8 (parser_context_t *context_p);
void parser_stack_push_uint16 (parser_context_t *context_p, uint16_t uint16_value);
uint16_t parser_stack_pop_uint16 (parser_context_t *context_p);
void parser_stack_push (parser_context_t *context_p, const void *data_p, uint32_t length);
void parser_stack_pop (parser_context_t *context_p, void *data_p, uint32_t length);
void parser_stack_iterator_init (parser_context_t *context_p, parser_stack_iterator_t *iterator);
uint8_t parser_stack_iterator_read_uint8 (parser_stack_iterator_t *iterator);
void parser_stack_iterator_skip (parser_stack_iterator_t *iterator, size_t length);
void parser_stack_iterator_read (parser_stack_iterator_t *iterator, void *data_p, size_t length);
void parser_stack_iterator_write (parser_stack_iterator_t *iterator, const void *data_p, size_t length);
/**
* @}
* @}
*
* \addtogroup parser Parser
* @{
*
* \addtogroup jsparser JavaScript
* @{
*
* \addtogroup jsparser_utils Utility
* @{
*/
/* Compact byte code emitting functions. */
void parser_flush_cbc (parser_context_t *context_p);
void parser_emit_cbc (parser_context_t *context_p, uint16_t opcode);
void parser_emit_cbc_literal (parser_context_t *context_p, uint16_t opcode, uint16_t literal_index);
void parser_emit_cbc_literal_value (parser_context_t *context_p, uint16_t opcode, uint16_t literal_index,
uint16_t value);
void parser_emit_cbc_literal_from_token (parser_context_t *context_p, uint16_t opcode);
void parser_emit_cbc_call (parser_context_t *context_p, uint16_t opcode, size_t call_arguments);
void parser_emit_cbc_push_number (parser_context_t *context_p, bool is_negative_number);
void parser_emit_cbc_forward_branch (parser_context_t *context_p, uint16_t opcode, parser_branch_t *branch_p);
parser_branch_node_t *parser_emit_cbc_forward_branch_item (parser_context_t *context_p, uint16_t opcode,
parser_branch_node_t *next_p);
void parser_emit_cbc_backward_branch (parser_context_t *context_p, uint16_t opcode, uint32_t offset);
void parser_set_branch_to_current_position (parser_context_t *context_p, parser_branch_t *branch_p);
void parser_set_breaks_to_current_position (parser_context_t *context_p, parser_branch_node_t *current_p);
void parser_set_continues_to_current_position (parser_context_t *context_p, parser_branch_node_t *current_p);
/* Convenience macros. */
#define parser_emit_cbc_ext(context_p, opcode) \
parser_emit_cbc ((context_p), PARSER_TO_EXT_OPCODE (opcode))
#define parser_emit_cbc_ext_literal(context_p, opcode, literal_index) \
parser_emit_cbc_literal ((context_p), PARSER_TO_EXT_OPCODE (opcode), (literal_index))
#define parser_emit_cbc_ext_literal_from_token(context_p, opcode) \
parser_emit_cbc_literal_from_token ((context_p), PARSER_TO_EXT_OPCODE (opcode))
#define parser_emit_cbc_ext_call(context_p, opcode, call_arguments) \
parser_emit_cbc_call ((context_p), PARSER_TO_EXT_OPCODE (opcode), (call_arguments))
#define parser_emit_cbc_ext_call(context_p, opcode, call_arguments) \
parser_emit_cbc_call ((context_p), PARSER_TO_EXT_OPCODE (opcode), (call_arguments))
#define parser_emit_cbc_ext_forward_branch(context_p, opcode, branch_p) \
parser_emit_cbc_forward_branch ((context_p), PARSER_TO_EXT_OPCODE (opcode), (branch_p))
#define parser_emit_cbc_ext_backward_branch(context_p, opcode, offset) \
parser_emit_cbc_backward_branch ((context_p), PARSER_TO_EXT_OPCODE (opcode), (offset))
#if ENABLED (JERRY_ESNEXT)
void parser_reverse_class_fields (parser_context_t *context_p, size_t fields_size);
#endif /* ENABLED (JERRY_ESNEXT) */
/**
* @}
*
* \addtogroup jsparser_lexer Lexer
* @{
*/
/* Lexer functions */
void lexer_next_token (parser_context_t *context_p);
bool lexer_check_next_character (parser_context_t *context_p, lit_utf8_byte_t character);
bool lexer_check_next_characters (parser_context_t *context_p, lit_utf8_byte_t character1,
lit_utf8_byte_t character2);
uint8_t lexer_consume_next_character (parser_context_t *context_p);
bool lexer_check_post_primary_exp (parser_context_t *context_p);
#if ENABLED (JERRY_ESNEXT)
void lexer_skip_empty_statements (parser_context_t *context_p);
bool lexer_check_arrow (parser_context_t *context_p);
bool lexer_check_arrow_param (parser_context_t *context_p);
bool lexer_check_yield_no_arg (parser_context_t *context_p);
bool lexer_consume_generator (parser_context_t *context_p);
bool lexer_consume_assign (parser_context_t *context_p);
void lexer_update_await_yield (parser_context_t *context_p, uint32_t status_flags);
#endif /* ENABLED (JERRY_ESNEXT) */
void lexer_parse_string (parser_context_t *context_p, lexer_string_options_t opts);
void lexer_expect_identifier (parser_context_t *context_p, uint8_t literal_type);
bool lexer_scan_identifier (parser_context_t *context_p);
void lexer_check_property_modifier (parser_context_t *context_p);
void lexer_convert_ident_to_cesu8 (uint8_t *destination_p, const uint8_t *source_p, prop_length_t length);
const uint8_t *lexer_convert_literal_to_chars (parser_context_t *context_p, const lexer_lit_location_t *literal_p,
uint8_t *local_byte_array_p, lexer_string_options_t opts);
void lexer_expect_object_literal_id (parser_context_t *context_p, uint32_t ident_opts);
lexer_literal_t *lexer_construct_unused_literal (parser_context_t *context_p);
void lexer_construct_literal_object (parser_context_t *context_p, const lexer_lit_location_t *lit_location_p,
uint8_t literal_type);
bool lexer_construct_number_object (parser_context_t *context_p, bool is_expr, bool is_negative_number);
void lexer_convert_push_number_to_push_literal (parser_context_t *context_p);
uint16_t lexer_construct_function_object (parser_context_t *context_p, uint32_t extra_status_flags);
void lexer_construct_regexp_object (parser_context_t *context_p, bool parse_only);
bool lexer_compare_identifier_to_string (const lexer_lit_location_t *left_p, const uint8_t *right_p, size_t size);
bool lexer_compare_identifiers (parser_context_t *context_p, const lexer_lit_location_t *left_p,
const lexer_lit_location_t *right_p);
bool lexer_current_is_literal (parser_context_t *context_p, const lexer_lit_location_t *right_ident_p);
bool lexer_string_is_use_strict (parser_context_t *context_p);
bool lexer_string_is_directive (parser_context_t *context_p);
#if ENABLED (JERRY_ESNEXT)
bool lexer_token_is_identifier (parser_context_t *context_p, const char *identifier_p,
size_t identifier_length);
bool lexer_token_is_let (parser_context_t *context_p);
bool lexer_token_is_async (parser_context_t *context_p);
#endif /* ENABLED (JERRY_ESNEXT) */
bool lexer_compare_literal_to_string (parser_context_t *context_p, const char *string_p, size_t string_length);
uint8_t lexer_convert_binary_lvalue_token_to_binary (uint8_t token);
/**
* @}
*
* \addtogroup jsparser_expr Expression parser
* @{
*/
/* Parser functions. */
void parser_parse_block_expression (parser_context_t *context_p, int options);
void parser_parse_expression_statement (parser_context_t *context_p, int options);
void parser_parse_expression (parser_context_t *context_p, int options);
#if ENABLED (JERRY_ESNEXT)
void parser_parse_class (parser_context_t *context_p, bool is_statement);
void parser_parse_initializer (parser_context_t *context_p, parser_pattern_flags_t flags);
void parser_parse_initializer_by_next_char (parser_context_t *context_p, parser_pattern_flags_t flags);
#endif /* ENABLED (JERRY_ESNEXT) */
/**
* @}
*
* \addtogroup jsparser_scanner Scanner
* @{
*/
void scanner_release_next (parser_context_t *context_p, size_t size);
void scanner_set_active (parser_context_t *context_p);
void scanner_revert_active (parser_context_t *context_p);
void scanner_release_active (parser_context_t *context_p, size_t size);
void scanner_release_switch_cases (scanner_case_info_t *case_p);
void scanner_seek (parser_context_t *context_p);
void scanner_reverse_info_list (parser_context_t *context_p);
void scanner_cleanup (parser_context_t *context_p);
bool scanner_is_context_needed (parser_context_t *context_p, parser_check_context_type_t check_type);
#if ENABLED (JERRY_ESNEXT)
bool scanner_try_scan_new_target (parser_context_t *context_p);
void scanner_check_variables (parser_context_t *context_p);
#endif /* ENABLED (JERRY_ESNEXT) */
void scanner_create_variables (parser_context_t *context_p, uint32_t option_flags);
void scanner_get_location (scanner_location_t *location_p, parser_context_t *context_p);
void scanner_set_location (parser_context_t *context_p, scanner_location_t *location_p);
uint16_t scanner_decode_map_to (parser_scope_stack_t *stack_item_p);
#if ENABLED (JERRY_ESNEXT)
uint16_t scanner_save_literal (parser_context_t *context_p, uint16_t ident_index);
bool scanner_literal_is_const_reg (parser_context_t *context_p, uint16_t literal_index);
bool scanner_literal_is_created (parser_context_t *context_p, uint16_t literal_index);
bool scanner_literal_exists (parser_context_t *context_p, uint16_t literal_index);
#endif /* ENABLED (JERRY_ESNEXT) */
void scanner_scan_all (parser_context_t *context_p, const uint8_t *arg_list_p, const uint8_t *arg_list_end_p,
const uint8_t *source_p, const uint8_t *source_end_p);
/**
* @}
*
* \addtogroup jsparser_stmt Statement parser
* @{
*/
void parser_parse_statements (parser_context_t *context_p);
void parser_free_jumps (parser_stack_iterator_t iterator);
#if ENABLED (JERRY_MODULE_SYSTEM)
/**
* @}
*
* \addtogroup jsparser_stmt Module statement parser
* @{
*/
extern const lexer_lit_location_t lexer_default_literal;
void parser_module_add_export_node_to_context (parser_context_t *context_p);
void parser_module_add_import_node_to_context (parser_context_t *context_p);
void parser_module_check_request_place (parser_context_t *context_p);
void parser_module_context_init (parser_context_t *context_p);
void parser_module_handle_module_specifier (parser_context_t *context_p);
void parser_module_handle_requests (parser_context_t *context_p);
void parser_module_parse_export_clause (parser_context_t *context_p);
void parser_module_parse_import_clause (parser_context_t *context_p);
void parser_module_set_default (parser_context_t *context_p);
ecma_module_node_t *parser_module_create_module_node (parser_context_t *context_p);
bool parser_module_check_duplicate_import (parser_context_t *context_p, ecma_string_t *local_name_p);
bool parser_module_check_duplicate_export (parser_context_t *context_p, ecma_string_t *export_name_p);
void parser_module_append_export_name (parser_context_t *context_p);
void parser_module_add_names_to_node (parser_context_t *context_p,
ecma_string_t *imex_name_p,
ecma_string_t *local_name_p);
#endif /* ENABLED (JERRY_MODULE_SYSTEM) */
/**
* @}
*
* \addtogroup jsparser_parser Parser
* @{
*/
ecma_compiled_code_t *parser_parse_function (parser_context_t *context_p, uint32_t status_flags);
#if ENABLED (JERRY_ESNEXT)
ecma_compiled_code_t *parser_parse_arrow_function (parser_context_t *context_p, uint32_t status_flags);
ecma_compiled_code_t *parser_parse_class_fields (parser_context_t *context_p);
void parser_set_function_name (parser_context_t *context_p, uint16_t function_literal_index, uint16_t name_index,
uint32_t status_flags);
void parser_compiled_code_set_function_name (parser_context_t *context_p, ecma_compiled_code_t *bytecode_p,
uint16_t name_index, uint32_t status_flags);
uint16_t parser_check_anonymous_function_declaration (parser_context_t *context_p);
#endif /* ENABLED (JERRY_ESNEXT) */
/* Error management. */
void parser_raise_error (parser_context_t *context_p, parser_error_t error);
/* Debug functions. */
#if ENABLED (JERRY_DEBUGGER)
void parser_append_breakpoint_info (parser_context_t *context_p, jerry_debugger_header_type_t type, uint32_t value);
#endif /* ENABLED (JERRY_DEBUGGER) */
#if ENABLED (JERRY_LINE_INFO)
void parser_emit_line_info (parser_context_t *context_p, uint32_t line, bool flush_cbc);
#endif /* ENABLED (JERRY_LINE_INFO) */
#if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE)
void util_print_cbc (ecma_compiled_code_t *compiled_code_p);
#endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */
/**
* @}
* @}
* @}
*/
#endif /* !JS_PARSER_INTERNAL_H */
| 42.178687 | 116 | 0.71445 | [
"object"
] |
9e4d12ec3dcdf2d3a817dc4bf7e49a1e14c013b6 | 10,658 | h | C | Libraries/JUCE/modules/juce_dsp/maths/juce_Matrix.h | RobinSchmidt/RS-MET-Preliminary | 6c01cbaad7cce3daa3293c444dd9e4b74e5ebfbe | [
"FTL"
] | 2 | 2021-03-12T17:28:14.000Z | 2022-03-03T08:18:59.000Z | Libraries/JUCE/modules/juce_dsp/maths/juce_Matrix.h | RobinSchmidt/RS-MET-Preliminary | 6c01cbaad7cce3daa3293c444dd9e4b74e5ebfbe | [
"FTL"
] | null | null | null | Libraries/JUCE/modules/juce_dsp/maths/juce_Matrix.h | RobinSchmidt/RS-MET-Preliminary | 6c01cbaad7cce3daa3293c444dd9e4b74e5ebfbe | [
"FTL"
] | null | null | null | /*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2020 - Raw Material Software Limited
JUCE is an open source library subject to commercial or open-source
licensing.
By using JUCE, you agree to the terms of both the JUCE 6 End-User License
Agreement and JUCE Privacy Policy (both effective as of the 16th June 2020).
End User License Agreement: www.juce.com/juce-6-licence
Privacy Policy: www.juce.com/juce-privacy-policy
Or: You may also use this code under the terms of the GPL v3 (see
www.gnu.org/licenses).
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
namespace dsp
{
/**
General matrix and vectors class, meant for classic math manipulation such as
additions, multiplications, and linear systems of equations solving.
@see LinearAlgebra
@tags{DSP}
*/
template <typename ElementType>
class Matrix
{
public:
//==============================================================================
/** Creates a new matrix with a given number of rows and columns. */
Matrix (size_t numRows, size_t numColumns)
: rows (numRows), columns (numColumns)
{
resize();
clear();
}
/** Creates a new matrix with a given number of rows and columns, with initial
data coming from an array, stored in row-major order.
*/
Matrix (size_t numRows, size_t numColumns, const ElementType* dataPointer)
: rows (numRows), columns (numColumns)
{
resize();
memcpy (data.getRawDataPointer(), dataPointer, rows * columns * sizeof (ElementType));
}
/** Creates a copy of another matrix. */
Matrix (const Matrix&) = default;
/** Moves a copy of another matrix. */
Matrix (Matrix&&) noexcept = default;
/** Creates a copy of another matrix. */
Matrix& operator= (const Matrix&) = default;
/** Moves another matrix into this one */
Matrix& operator= (Matrix&&) noexcept = default;
//==============================================================================
/** Creates the identity matrix */
static Matrix identity (size_t size);
/** Creates a Toeplitz Matrix from a vector with a given squared size */
static Matrix toeplitz (const Matrix& vector, size_t size);
/** Creates a squared size x size Hankel Matrix from a vector with an optional offset.
@param vector The vector from which the Hankel matrix should be generated.
Its number of rows should be at least 2 * (size - 1) + 1
@param size The size of resulting square matrix.
@param offset An optional offset into the given vector.
*/
static Matrix hankel (const Matrix& vector, size_t size, size_t offset = 0);
//==============================================================================
/** Returns the number of rows in the matrix. */
size_t getNumRows() const noexcept { return rows; }
/** Returns the number of columns in the matrix. */
size_t getNumColumns() const noexcept { return columns; }
/** Returns an Array of 2 integers with the number of rows and columns in the
matrix.
*/
Array<size_t> getSize() const noexcept { return { rows, columns }; }
/** Fills the contents of the matrix with zeroes. */
void clear() noexcept { zeromem (data.begin(), (size_t) data.size() * sizeof (ElementType)); }
//==============================================================================
/** Swaps the contents of two rows in the matrix and returns a reference to itself. */
Matrix& swapRows (size_t rowOne, size_t rowTwo) noexcept;
/** Swaps the contents of two columns in the matrix and returns a reference to itself. */
Matrix& swapColumns (size_t columnOne, size_t columnTwo) noexcept;
//==============================================================================
/** Returns the value of the matrix at a given row and column (for reading). */
inline ElementType operator() (size_t row, size_t column) const noexcept
{
jassert (row < rows && column < columns);
return data.getReference (static_cast<int> (dataAcceleration.getReference (static_cast<int> (row))) + static_cast<int> (column));
}
/** Returns the value of the matrix at a given row and column (for modifying). */
inline ElementType& operator() (size_t row, size_t column) noexcept
{
jassert (row < rows && column < columns);
return data.getReference (static_cast<int> (dataAcceleration.getReference (static_cast<int> (row))) + static_cast<int> (column));
}
/** Returns a pointer to the raw data of the matrix object, ordered in row-major
order (for modifying).
*/
inline ElementType* getRawDataPointer() noexcept { return data.getRawDataPointer(); }
/** Returns a pointer to the raw data of the matrix object, ordered in row-major
order (for reading).
*/
inline const ElementType* getRawDataPointer() const noexcept { return data.begin(); }
//==============================================================================
/** Addition of two matrices */
inline Matrix& operator+= (const Matrix& other) noexcept { return apply (other, [] (ElementType a, ElementType b) { return a + b; } ); }
/** Subtraction of two matrices */
inline Matrix& operator-= (const Matrix& other) noexcept { return apply (other, [] (ElementType a, ElementType b) { return a - b; } ); }
/** Scalar multiplication */
inline Matrix& operator*= (ElementType scalar) noexcept
{
std::for_each (begin(), end(), [scalar] (ElementType& x) { x *= scalar; });
return *this;
}
/** Addition of two matrices */
inline Matrix operator+ (const Matrix& other) const { Matrix result (*this); result += other; return result; }
/** Addition of two matrices */
inline Matrix operator- (const Matrix& other) const { Matrix result (*this); result -= other; return result; }
/** Scalar multiplication */
inline Matrix operator* (ElementType scalar) const { Matrix result (*this); result *= scalar; return result; }
/** Matrix multiplication */
Matrix operator* (const Matrix& other) const;
/** Does a hadarmard product with the receiver and other and stores the result in the receiver */
inline Matrix& hadarmard (const Matrix& other) noexcept { return apply (other, [] (ElementType a, ElementType b) { return a * b; } ); }
/** Does a hadarmard product with a and b returns the result. */
static Matrix hadarmard (const Matrix& a, const Matrix& b) { Matrix result (a); result.hadarmard (b); return result; }
//==============================================================================
/** Compare to matrices with a given tolerance */
static bool compare (const Matrix& a, const Matrix& b, ElementType tolerance = 0) noexcept;
/* Comparison operator */
inline bool operator== (const Matrix& other) const noexcept { return compare (*this, other); }
//==============================================================================
/** Tells if the matrix is a square matrix */
bool isSquare() const noexcept { return rows == columns; }
/** Tells if the matrix is a vector */
bool isVector() const noexcept { return isOneColumnVector() || isOneRowVector(); }
/** Tells if the matrix is a one column vector */
bool isOneColumnVector() const noexcept { return columns == 1; }
/** Tells if the matrix is a one row vector */
bool isOneRowVector() const noexcept { return rows == 1; }
/** Tells if the matrix is a null matrix */
bool isNullMatrix() const noexcept { return rows == 0 || columns == 0; }
//==============================================================================
/** Solves a linear system of equations represented by this object and the argument b,
using various algorithms depending on the size of the arguments.
The matrix must be a square matrix N times N, and b must be a vector N times 1,
with the coefficients of b. After the execution of the algorithm,
the vector b will contain the solution.
Returns true if the linear system of equations was successfully solved.
*/
bool solve (Matrix& b) const noexcept;
//==============================================================================
/** Returns a String displaying in a convenient way the matrix contents. */
String toString() const;
//==============================================================================
ElementType* begin() noexcept { return data.begin(); }
ElementType* end() noexcept { return data.end(); }
const ElementType* begin() const noexcept { return &data.getReference (0); }
const ElementType* end() const noexcept { return begin() + data.size(); }
private:
//==============================================================================
/** Resizes the matrix. */
void resize()
{
data.resize (static_cast<int> (columns * rows));
dataAcceleration.resize (static_cast<int> (rows));
for (size_t i = 0; i < rows; ++i)
dataAcceleration.setUnchecked (static_cast<int> (i), i * columns);
}
template <typename BinaryOperation>
Matrix& apply (const Matrix& other, BinaryOperation binaryOp)
{
jassert (rows == other.rows && columns == other.columns);
auto* dst = getRawDataPointer();
for (auto src : other)
{
*dst = binaryOp (*dst, src);
++dst;
}
return *this;
}
//==============================================================================
Array<ElementType> data;
Array<size_t> dataAcceleration;
size_t rows, columns;
//==============================================================================
JUCE_LEAK_DETECTOR (Matrix)
};
} // namespace dsp
} // namespace juce
| 41.796078 | 151 | 0.55198 | [
"object",
"vector"
] |
9e4e9035f78317f49f106e60d7b6f22dbe736ce8 | 41,552 | c | C | src/pl/plpython/plpy_typeio.c | lmwnshn/postgres | 649750d995eb953c505077ac4baa73b243f68002 | [
"PostgreSQL"
] | 3 | 2019-03-20T06:17:24.000Z | 2022-01-09T06:38:46.000Z | src/pl/plpython/plpy_typeio.c | lmwnshn/postgres | 649750d995eb953c505077ac4baa73b243f68002 | [
"PostgreSQL"
] | 4 | 2020-12-09T05:20:51.000Z | 2021-01-10T01:57:28.000Z | src/pl/plpython/plpy_typeio.c | lmwnshn/postgres | 649750d995eb953c505077ac4baa73b243f68002 | [
"PostgreSQL"
] | null | null | null | /*
* transforming Datums to Python objects and vice versa
*
* src/pl/plpython/plpy_typeio.c
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "plpy_elog.h"
#include "plpy_main.h"
#include "plpy_typeio.h"
#include "plpython.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
/* conversion from Datums to Python objects */
static PyObject *PLyBool_FromBool(PLyDatumToOb *arg, Datum d);
static PyObject *PLyFloat_FromFloat4(PLyDatumToOb *arg, Datum d);
static PyObject *PLyFloat_FromFloat8(PLyDatumToOb *arg, Datum d);
static PyObject *PLyDecimal_FromNumeric(PLyDatumToOb *arg, Datum d);
static PyObject *PLyInt_FromInt16(PLyDatumToOb *arg, Datum d);
static PyObject *PLyInt_FromInt32(PLyDatumToOb *arg, Datum d);
static PyObject *PLyLong_FromInt64(PLyDatumToOb *arg, Datum d);
static PyObject *PLyLong_FromOid(PLyDatumToOb *arg, Datum d);
static PyObject *PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d);
static PyObject *PLyString_FromScalar(PLyDatumToOb *arg, Datum d);
static PyObject *PLyObject_FromTransform(PLyDatumToOb *arg, Datum d);
static PyObject *PLyList_FromArray(PLyDatumToOb *arg, Datum d);
static PyObject *PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim,
char **dataptr_p, bits8 **bitmap_p, int *bitmask_p);
static PyObject *PLyDict_FromComposite(PLyDatumToOb *arg, Datum d);
static PyObject *PLyDict_FromTuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc, bool include_generated);
/* conversion from Python objects to Datums */
static Datum PLyObject_ToBool(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLyObject_ToBytea(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLyObject_ToScalar(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLyObject_ToDomain(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLyObject_ToTransform(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static Datum PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray);
static void PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list,
int *dims, int ndim, int dim,
Datum *elems, bool *nulls, int *currelem);
/* conversion from Python objects to composite Datums */
static Datum PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray);
static Datum PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping);
static Datum PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence);
static Datum PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray);
/*
* Conversion functions. Remember output from Python is input to
* PostgreSQL, and vice versa.
*/
/*
* Perform input conversion, given correctly-set-up state information.
*
* This is the outer-level entry point for any input conversion. Internally,
* the conversion functions recurse directly to each other.
*/
PyObject *
PLy_input_convert(PLyDatumToOb *arg, Datum val)
{
PyObject *result;
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
MemoryContext scratch_context = PLy_get_scratch_context(exec_ctx);
MemoryContext oldcontext;
/*
* Do the work in the scratch context to avoid leaking memory from the
* datatype output function calls. (The individual PLyDatumToObFunc
* functions can't reset the scratch context, because they recurse and an
* inner one might clobber data an outer one still needs. So we do it
* once at the outermost recursion level.)
*
* We reset the scratch context before, not after, each conversion cycle.
* This way we aren't on the hook to release a Python refcount on the
* result object in case MemoryContextReset throws an error.
*/
MemoryContextReset(scratch_context);
oldcontext = MemoryContextSwitchTo(scratch_context);
result = arg->func(arg, val);
MemoryContextSwitchTo(oldcontext);
return result;
}
/*
* Perform output conversion, given correctly-set-up state information.
*
* This is the outer-level entry point for any output conversion. Internally,
* the conversion functions recurse directly to each other.
*
* The result, as well as any cruft generated along the way, are in the
* current memory context. Caller is responsible for cleanup.
*/
Datum
PLy_output_convert(PLyObToDatum *arg, PyObject *val, bool *isnull)
{
/* at outer level, we are not considering an array element */
return arg->func(arg, val, isnull, false);
}
/*
* Transform a tuple into a Python dict object.
*
* Note: the tupdesc must match the one used to set up *arg. We could
* insist that this function lookup the tupdesc from what is in *arg,
* but in practice all callers have the right tupdesc available.
*/
PyObject *
PLy_input_from_tuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc, bool include_generated)
{
PyObject *dict;
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
MemoryContext scratch_context = PLy_get_scratch_context(exec_ctx);
MemoryContext oldcontext;
/*
* As in PLy_input_convert, do the work in the scratch context.
*/
MemoryContextReset(scratch_context);
oldcontext = MemoryContextSwitchTo(scratch_context);
dict = PLyDict_FromTuple(arg, tuple, desc, include_generated);
MemoryContextSwitchTo(oldcontext);
return dict;
}
/*
* Initialize, or re-initialize, per-column input info for a composite type.
*
* This is separate from PLy_input_setup_func() because in cases involving
* anonymous record types, we need to be passed the tupdesc explicitly.
* It's caller's responsibility that the tupdesc has adequate lifespan
* in such cases. If the tupdesc is for a named composite or registered
* record type, it does not need to be long-lived.
*/
void
PLy_input_setup_tuple(PLyDatumToOb *arg, TupleDesc desc, PLyProcedure *proc)
{
int i;
/* We should be working on a previously-set-up struct */
Assert(arg->func == PLyDict_FromComposite);
/* Save pointer to tupdesc, but only if this is an anonymous record type */
if (arg->typoid == RECORDOID && arg->typmod < 0)
arg->u.tuple.recdesc = desc;
/* (Re)allocate atts array as needed */
if (arg->u.tuple.natts != desc->natts)
{
if (arg->u.tuple.atts)
pfree(arg->u.tuple.atts);
arg->u.tuple.natts = desc->natts;
arg->u.tuple.atts = (PLyDatumToOb *)
MemoryContextAllocZero(arg->mcxt,
desc->natts * sizeof(PLyDatumToOb));
}
/* Fill the atts entries, except for dropped columns */
for (i = 0; i < desc->natts; i++)
{
Form_pg_attribute attr = TupleDescAttr(desc, i);
PLyDatumToOb *att = &arg->u.tuple.atts[i];
if (attr->attisdropped)
continue;
if (att->typoid == attr->atttypid && att->typmod == attr->atttypmod)
continue; /* already set up this entry */
PLy_input_setup_func(att, arg->mcxt,
attr->atttypid, attr->atttypmod,
proc);
}
}
/*
* Initialize, or re-initialize, per-column output info for a composite type.
*
* This is separate from PLy_output_setup_func() because in cases involving
* anonymous record types, we need to be passed the tupdesc explicitly.
* It's caller's responsibility that the tupdesc has adequate lifespan
* in such cases. If the tupdesc is for a named composite or registered
* record type, it does not need to be long-lived.
*/
void
PLy_output_setup_tuple(PLyObToDatum *arg, TupleDesc desc, PLyProcedure *proc)
{
int i;
/* We should be working on a previously-set-up struct */
Assert(arg->func == PLyObject_ToComposite);
/* Save pointer to tupdesc, but only if this is an anonymous record type */
if (arg->typoid == RECORDOID && arg->typmod < 0)
arg->u.tuple.recdesc = desc;
/* (Re)allocate atts array as needed */
if (arg->u.tuple.natts != desc->natts)
{
if (arg->u.tuple.atts)
pfree(arg->u.tuple.atts);
arg->u.tuple.natts = desc->natts;
arg->u.tuple.atts = (PLyObToDatum *)
MemoryContextAllocZero(arg->mcxt,
desc->natts * sizeof(PLyObToDatum));
}
/* Fill the atts entries, except for dropped columns */
for (i = 0; i < desc->natts; i++)
{
Form_pg_attribute attr = TupleDescAttr(desc, i);
PLyObToDatum *att = &arg->u.tuple.atts[i];
if (attr->attisdropped)
continue;
if (att->typoid == attr->atttypid && att->typmod == attr->atttypmod)
continue; /* already set up this entry */
PLy_output_setup_func(att, arg->mcxt,
attr->atttypid, attr->atttypmod,
proc);
}
}
/*
* Set up output info for a PL/Python function returning record.
*
* Note: the given tupdesc is not necessarily long-lived.
*/
void
PLy_output_setup_record(PLyObToDatum *arg, TupleDesc desc, PLyProcedure *proc)
{
/* Makes no sense unless RECORD */
Assert(arg->typoid == RECORDOID);
Assert(desc->tdtypeid == RECORDOID);
/*
* Bless the record type if not already done. We'd have to do this anyway
* to return a tuple, so we might as well force the issue so we can use
* the known-record-type code path.
*/
BlessTupleDesc(desc);
/*
* Update arg->typmod, and clear the recdesc link if it's changed. The
* next call of PLyObject_ToComposite will look up a long-lived tupdesc
* for the record type.
*/
arg->typmod = desc->tdtypmod;
if (arg->u.tuple.recdesc &&
arg->u.tuple.recdesc->tdtypmod != arg->typmod)
arg->u.tuple.recdesc = NULL;
/* Update derived data if necessary */
PLy_output_setup_tuple(arg, desc, proc);
}
/*
* Recursively initialize the PLyObToDatum structure(s) needed to construct
* a SQL value of the specified typeOid/typmod from a Python value.
* (But note that at this point we may have RECORDOID/-1, ie, an indeterminate
* record type.)
* proc is used to look up transform functions.
*/
void
PLy_output_setup_func(PLyObToDatum *arg, MemoryContext arg_mcxt,
Oid typeOid, int32 typmod,
PLyProcedure *proc)
{
TypeCacheEntry *typentry;
char typtype;
Oid trfuncid;
Oid typinput;
/* Since this is recursive, it could theoretically be driven to overflow */
check_stack_depth();
arg->typoid = typeOid;
arg->typmod = typmod;
arg->mcxt = arg_mcxt;
/*
* Fetch typcache entry for the target type, asking for whatever info
* we'll need later. RECORD is a special case: just treat it as composite
* without bothering with the typcache entry.
*/
if (typeOid != RECORDOID)
{
typentry = lookup_type_cache(typeOid, TYPECACHE_DOMAIN_BASE_INFO);
typtype = typentry->typtype;
arg->typbyval = typentry->typbyval;
arg->typlen = typentry->typlen;
arg->typalign = typentry->typalign;
}
else
{
typentry = NULL;
typtype = TYPTYPE_COMPOSITE;
/* hard-wired knowledge about type RECORD: */
arg->typbyval = false;
arg->typlen = -1;
arg->typalign = TYPALIGN_DOUBLE;
}
/*
* Choose conversion method. Note that transform functions are checked
* for composite and scalar types, but not for arrays or domains. This is
* somewhat historical, but we'd have a problem allowing them on domains,
* since we drill down through all levels of a domain nest without looking
* at the intermediate levels at all.
*/
if (typtype == TYPTYPE_DOMAIN)
{
/* Domain */
arg->func = PLyObject_ToDomain;
arg->u.domain.domain_info = NULL;
/* Recursively set up conversion info for the element type */
arg->u.domain.base = (PLyObToDatum *)
MemoryContextAllocZero(arg_mcxt, sizeof(PLyObToDatum));
PLy_output_setup_func(arg->u.domain.base, arg_mcxt,
typentry->domainBaseType,
typentry->domainBaseTypmod,
proc);
}
else if (typentry &&
OidIsValid(typentry->typelem) && typentry->typlen == -1)
{
/* Standard varlena array (cf. get_element_type) */
arg->func = PLySequence_ToArray;
/* Get base type OID to insert into constructed array */
/* (note this might not be the same as the immediate child type) */
arg->u.array.elmbasetype = getBaseType(typentry->typelem);
/* Recursively set up conversion info for the element type */
arg->u.array.elm = (PLyObToDatum *)
MemoryContextAllocZero(arg_mcxt, sizeof(PLyObToDatum));
PLy_output_setup_func(arg->u.array.elm, arg_mcxt,
typentry->typelem, typmod,
proc);
}
else if ((trfuncid = get_transform_tosql(typeOid,
proc->langid,
proc->trftypes)))
{
arg->func = PLyObject_ToTransform;
fmgr_info_cxt(trfuncid, &arg->u.transform.typtransform, arg_mcxt);
}
else if (typtype == TYPTYPE_COMPOSITE)
{
/* Named composite type, or RECORD */
arg->func = PLyObject_ToComposite;
/* We'll set up the per-field data later */
arg->u.tuple.recdesc = NULL;
arg->u.tuple.typentry = typentry;
arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER;
arg->u.tuple.atts = NULL;
arg->u.tuple.natts = 0;
/* Mark this invalid till needed, too */
arg->u.tuple.recinfunc.fn_oid = InvalidOid;
}
else
{
/* Scalar type, but we have a couple of special cases */
switch (typeOid)
{
case BOOLOID:
arg->func = PLyObject_ToBool;
break;
case BYTEAOID:
arg->func = PLyObject_ToBytea;
break;
default:
arg->func = PLyObject_ToScalar;
getTypeInputInfo(typeOid, &typinput, &arg->u.scalar.typioparam);
fmgr_info_cxt(typinput, &arg->u.scalar.typfunc, arg_mcxt);
break;
}
}
}
/*
* Recursively initialize the PLyDatumToOb structure(s) needed to construct
* a Python value from a SQL value of the specified typeOid/typmod.
* (But note that at this point we may have RECORDOID/-1, ie, an indeterminate
* record type.)
* proc is used to look up transform functions.
*/
void
PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt,
Oid typeOid, int32 typmod,
PLyProcedure *proc)
{
TypeCacheEntry *typentry;
char typtype;
Oid trfuncid;
Oid typoutput;
bool typisvarlena;
/* Since this is recursive, it could theoretically be driven to overflow */
check_stack_depth();
arg->typoid = typeOid;
arg->typmod = typmod;
arg->mcxt = arg_mcxt;
/*
* Fetch typcache entry for the target type, asking for whatever info
* we'll need later. RECORD is a special case: just treat it as composite
* without bothering with the typcache entry.
*/
if (typeOid != RECORDOID)
{
typentry = lookup_type_cache(typeOid, TYPECACHE_DOMAIN_BASE_INFO);
typtype = typentry->typtype;
arg->typbyval = typentry->typbyval;
arg->typlen = typentry->typlen;
arg->typalign = typentry->typalign;
}
else
{
typentry = NULL;
typtype = TYPTYPE_COMPOSITE;
/* hard-wired knowledge about type RECORD: */
arg->typbyval = false;
arg->typlen = -1;
arg->typalign = TYPALIGN_DOUBLE;
}
/*
* Choose conversion method. Note that transform functions are checked
* for composite and scalar types, but not for arrays or domains. This is
* somewhat historical, but we'd have a problem allowing them on domains,
* since we drill down through all levels of a domain nest without looking
* at the intermediate levels at all.
*/
if (typtype == TYPTYPE_DOMAIN)
{
/* Domain --- we don't care, just recurse down to the base type */
PLy_input_setup_func(arg, arg_mcxt,
typentry->domainBaseType,
typentry->domainBaseTypmod,
proc);
}
else if (typentry &&
OidIsValid(typentry->typelem) && typentry->typlen == -1)
{
/* Standard varlena array (cf. get_element_type) */
arg->func = PLyList_FromArray;
/* Recursively set up conversion info for the element type */
arg->u.array.elm = (PLyDatumToOb *)
MemoryContextAllocZero(arg_mcxt, sizeof(PLyDatumToOb));
PLy_input_setup_func(arg->u.array.elm, arg_mcxt,
typentry->typelem, typmod,
proc);
}
else if ((trfuncid = get_transform_fromsql(typeOid,
proc->langid,
proc->trftypes)))
{
arg->func = PLyObject_FromTransform;
fmgr_info_cxt(trfuncid, &arg->u.transform.typtransform, arg_mcxt);
}
else if (typtype == TYPTYPE_COMPOSITE)
{
/* Named composite type, or RECORD */
arg->func = PLyDict_FromComposite;
/* We'll set up the per-field data later */
arg->u.tuple.recdesc = NULL;
arg->u.tuple.typentry = typentry;
arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER;
arg->u.tuple.atts = NULL;
arg->u.tuple.natts = 0;
}
else
{
/* Scalar type, but we have a couple of special cases */
switch (typeOid)
{
case BOOLOID:
arg->func = PLyBool_FromBool;
break;
case FLOAT4OID:
arg->func = PLyFloat_FromFloat4;
break;
case FLOAT8OID:
arg->func = PLyFloat_FromFloat8;
break;
case NUMERICOID:
arg->func = PLyDecimal_FromNumeric;
break;
case INT2OID:
arg->func = PLyInt_FromInt16;
break;
case INT4OID:
arg->func = PLyInt_FromInt32;
break;
case INT8OID:
arg->func = PLyLong_FromInt64;
break;
case OIDOID:
arg->func = PLyLong_FromOid;
break;
case BYTEAOID:
arg->func = PLyBytes_FromBytea;
break;
default:
arg->func = PLyString_FromScalar;
getTypeOutputInfo(typeOid, &typoutput, &typisvarlena);
fmgr_info_cxt(typoutput, &arg->u.scalar.typfunc, arg_mcxt);
break;
}
}
}
/*
* Special-purpose input converters.
*/
static PyObject *
PLyBool_FromBool(PLyDatumToOb *arg, Datum d)
{
if (DatumGetBool(d))
Py_RETURN_TRUE;
Py_RETURN_FALSE;
}
static PyObject *
PLyFloat_FromFloat4(PLyDatumToOb *arg, Datum d)
{
return PyFloat_FromDouble(DatumGetFloat4(d));
}
static PyObject *
PLyFloat_FromFloat8(PLyDatumToOb *arg, Datum d)
{
return PyFloat_FromDouble(DatumGetFloat8(d));
}
static PyObject *
PLyDecimal_FromNumeric(PLyDatumToOb *arg, Datum d)
{
static PyObject *decimal_constructor;
char *str;
PyObject *pyvalue;
/* Try to import cdecimal. If it doesn't exist, fall back to decimal. */
if (!decimal_constructor)
{
PyObject *decimal_module;
decimal_module = PyImport_ImportModule("cdecimal");
if (!decimal_module)
{
PyErr_Clear();
decimal_module = PyImport_ImportModule("decimal");
}
if (!decimal_module)
PLy_elog(ERROR, "could not import a module for Decimal constructor");
decimal_constructor = PyObject_GetAttrString(decimal_module, "Decimal");
if (!decimal_constructor)
PLy_elog(ERROR, "no Decimal attribute in module");
}
str = DatumGetCString(DirectFunctionCall1(numeric_out, d));
pyvalue = PyObject_CallFunction(decimal_constructor, "s", str);
if (!pyvalue)
PLy_elog(ERROR, "conversion from numeric to Decimal failed");
return pyvalue;
}
static PyObject *
PLyInt_FromInt16(PLyDatumToOb *arg, Datum d)
{
return PyInt_FromLong(DatumGetInt16(d));
}
static PyObject *
PLyInt_FromInt32(PLyDatumToOb *arg, Datum d)
{
return PyInt_FromLong(DatumGetInt32(d));
}
static PyObject *
PLyLong_FromInt64(PLyDatumToOb *arg, Datum d)
{
return PyLong_FromLongLong(DatumGetInt64(d));
}
static PyObject *
PLyLong_FromOid(PLyDatumToOb *arg, Datum d)
{
return PyLong_FromUnsignedLong(DatumGetObjectId(d));
}
static PyObject *
PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d)
{
text *txt = DatumGetByteaPP(d);
char *str = VARDATA_ANY(txt);
size_t size = VARSIZE_ANY_EXHDR(txt);
return PyBytes_FromStringAndSize(str, size);
}
/*
* Generic input conversion using a SQL type's output function.
*/
static PyObject *
PLyString_FromScalar(PLyDatumToOb *arg, Datum d)
{
char *x = OutputFunctionCall(&arg->u.scalar.typfunc, d);
PyObject *r = PyString_FromString(x);
pfree(x);
return r;
}
/*
* Convert using a from-SQL transform function.
*/
static PyObject *
PLyObject_FromTransform(PLyDatumToOb *arg, Datum d)
{
Datum t;
t = FunctionCall1(&arg->u.transform.typtransform, d);
return (PyObject *) DatumGetPointer(t);
}
/*
* Convert a SQL array to a Python list.
*/
static PyObject *
PLyList_FromArray(PLyDatumToOb *arg, Datum d)
{
ArrayType *array = DatumGetArrayTypeP(d);
PLyDatumToOb *elm = arg->u.array.elm;
int ndim;
int *dims;
char *dataptr;
bits8 *bitmap;
int bitmask;
if (ARR_NDIM(array) == 0)
return PyList_New(0);
/* Array dimensions and left bounds */
ndim = ARR_NDIM(array);
dims = ARR_DIMS(array);
Assert(ndim <= MAXDIM);
/*
* We iterate the SQL array in the physical order it's stored in the
* datum. For example, for a 3-dimensional array the order of iteration
* would be the following: [0,0,0] elements through [0,0,k], then [0,1,0]
* through [0,1,k] till [0,m,k], then [1,0,0] through [1,0,k] till
* [1,m,k], and so on.
*
* In Python, there are no multi-dimensional lists as such, but they are
* represented as a list of lists. So a 3-d array of [n,m,k] elements is a
* list of n m-element arrays, each element of which is k-element array.
* PLyList_FromArray_recurse() builds the Python list for a single
* dimension, and recurses for the next inner dimension.
*/
dataptr = ARR_DATA_PTR(array);
bitmap = ARR_NULLBITMAP(array);
bitmask = 1;
return PLyList_FromArray_recurse(elm, dims, ndim, 0,
&dataptr, &bitmap, &bitmask);
}
static PyObject *
PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim,
char **dataptr_p, bits8 **bitmap_p, int *bitmask_p)
{
int i;
PyObject *list;
list = PyList_New(dims[dim]);
if (!list)
return NULL;
if (dim < ndim - 1)
{
/* Outer dimension. Recurse for each inner slice. */
for (i = 0; i < dims[dim]; i++)
{
PyObject *sublist;
sublist = PLyList_FromArray_recurse(elm, dims, ndim, dim + 1,
dataptr_p, bitmap_p, bitmask_p);
PyList_SET_ITEM(list, i, sublist);
}
}
else
{
/*
* Innermost dimension. Fill the list with the values from the array
* for this slice.
*/
char *dataptr = *dataptr_p;
bits8 *bitmap = *bitmap_p;
int bitmask = *bitmask_p;
for (i = 0; i < dims[dim]; i++)
{
/* checking for NULL */
if (bitmap && (*bitmap & bitmask) == 0)
{
Py_INCREF(Py_None);
PyList_SET_ITEM(list, i, Py_None);
}
else
{
Datum itemvalue;
itemvalue = fetch_att(dataptr, elm->typbyval, elm->typlen);
PyList_SET_ITEM(list, i, elm->func(elm, itemvalue));
dataptr = att_addlength_pointer(dataptr, elm->typlen, dataptr);
dataptr = (char *) att_align_nominal(dataptr, elm->typalign);
}
/* advance bitmap pointer if any */
if (bitmap)
{
bitmask <<= 1;
if (bitmask == 0x100 /* (1<<8) */ )
{
bitmap++;
bitmask = 1;
}
}
}
*dataptr_p = dataptr;
*bitmap_p = bitmap;
*bitmask_p = bitmask;
}
return list;
}
/*
* Convert a composite SQL value to a Python dict.
*/
static PyObject *
PLyDict_FromComposite(PLyDatumToOb *arg, Datum d)
{
PyObject *dict;
HeapTupleHeader td;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tmptup;
td = DatumGetHeapTupleHeader(d);
/* Extract rowtype info and find a tupdesc */
tupType = HeapTupleHeaderGetTypeId(td);
tupTypmod = HeapTupleHeaderGetTypMod(td);
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
/* Set up I/O funcs if not done yet */
PLy_input_setup_tuple(arg, tupdesc,
PLy_current_execution_context()->curr_proc);
/* Build a temporary HeapTuple control structure */
tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
tmptup.t_data = td;
dict = PLyDict_FromTuple(arg, &tmptup, tupdesc, true);
ReleaseTupleDesc(tupdesc);
return dict;
}
/*
* Transform a tuple into a Python dict object.
*/
static PyObject *
PLyDict_FromTuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc, bool include_generated)
{
PyObject *volatile dict;
/* Simple sanity check that desc matches */
Assert(desc->natts == arg->u.tuple.natts);
dict = PyDict_New();
if (dict == NULL)
return NULL;
PG_TRY();
{
int i;
for (i = 0; i < arg->u.tuple.natts; i++)
{
PLyDatumToOb *att = &arg->u.tuple.atts[i];
Form_pg_attribute attr = TupleDescAttr(desc, i);
char *key;
Datum vattr;
bool is_null;
PyObject *value;
if (attr->attisdropped)
continue;
if (attr->attgenerated)
{
/* don't include unless requested */
if (!include_generated)
continue;
}
key = NameStr(attr->attname);
vattr = heap_getattr(tuple, (i + 1), desc, &is_null);
if (is_null)
PyDict_SetItemString(dict, key, Py_None);
else
{
value = att->func(att, vattr);
PyDict_SetItemString(dict, key, value);
Py_DECREF(value);
}
}
}
PG_CATCH();
{
Py_DECREF(dict);
PG_RE_THROW();
}
PG_END_TRY();
return dict;
}
/*
* Convert a Python object to a PostgreSQL bool datum. This can't go
* through the generic conversion function, because Python attaches a
* Boolean value to everything, more things than the PostgreSQL bool
* type can parse.
*/
static Datum
PLyObject_ToBool(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
return BoolGetDatum(PyObject_IsTrue(plrv));
}
/*
* Convert a Python object to a PostgreSQL bytea datum. This doesn't
* go through the generic conversion function to circumvent problems
* with embedded nulls. And it's faster this way.
*/
static Datum
PLyObject_ToBytea(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
PyObject *volatile plrv_so = NULL;
Datum rv = (Datum) 0;
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
plrv_so = PyObject_Bytes(plrv);
if (!plrv_so)
PLy_elog(ERROR, "could not create bytes representation of Python object");
PG_TRY();
{
char *plrv_sc = PyBytes_AsString(plrv_so);
size_t len = PyBytes_Size(plrv_so);
size_t size = len + VARHDRSZ;
bytea *result = palloc(size);
SET_VARSIZE(result, size);
memcpy(VARDATA(result), plrv_sc, len);
rv = PointerGetDatum(result);
}
PG_FINALLY();
{
Py_XDECREF(plrv_so);
}
PG_END_TRY();
return rv;
}
/*
* Convert a Python object to a composite type. First look up the type's
* description, then route the Python object through the conversion function
* for obtaining PostgreSQL tuples.
*/
static Datum
PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
Datum rv;
TupleDesc desc;
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
/*
* The string conversion case doesn't require a tupdesc, nor per-field
* conversion data, so just go for it if that's the case to use.
*/
if (PyString_Check(plrv) || PyUnicode_Check(plrv))
return PLyString_ToComposite(arg, plrv, inarray);
/*
* If we're dealing with a named composite type, we must look up the
* tupdesc every time, to protect against possible changes to the type.
* RECORD types can't change between calls; but we must still be willing
* to set up the info the first time, if nobody did yet.
*/
if (arg->typoid != RECORDOID)
{
desc = lookup_rowtype_tupdesc(arg->typoid, arg->typmod);
/* We should have the descriptor of the type's typcache entry */
Assert(desc == arg->u.tuple.typentry->tupDesc);
/* Detect change of descriptor, update cache if needed */
if (arg->u.tuple.tupdescid != arg->u.tuple.typentry->tupDesc_identifier)
{
PLy_output_setup_tuple(arg, desc,
PLy_current_execution_context()->curr_proc);
arg->u.tuple.tupdescid = arg->u.tuple.typentry->tupDesc_identifier;
}
}
else
{
desc = arg->u.tuple.recdesc;
if (desc == NULL)
{
desc = lookup_rowtype_tupdesc(arg->typoid, arg->typmod);
arg->u.tuple.recdesc = desc;
}
else
{
/* Pin descriptor to match unpin below */
PinTupleDesc(desc);
}
}
/* Simple sanity check on our caching */
Assert(desc->natts == arg->u.tuple.natts);
/*
* Convert, using the appropriate method depending on the type of the
* supplied Python object.
*/
if (PySequence_Check(plrv))
/* composite type as sequence (tuple, list etc) */
rv = PLySequence_ToComposite(arg, desc, plrv);
else if (PyMapping_Check(plrv))
/* composite type as mapping (currently only dict) */
rv = PLyMapping_ToComposite(arg, desc, plrv);
else
/* returned as smth, must provide method __getattr__(name) */
rv = PLyGenericObject_ToComposite(arg, desc, plrv, inarray);
ReleaseTupleDesc(desc);
return rv;
}
/*
* Convert Python object to C string in server encoding.
*
* Note: this is exported for use by add-on transform modules.
*/
char *
PLyObject_AsString(PyObject *plrv)
{
PyObject *plrv_bo;
char *plrv_sc;
size_t plen;
size_t slen;
if (PyUnicode_Check(plrv))
plrv_bo = PLyUnicode_Bytes(plrv);
else if (PyFloat_Check(plrv))
{
/* use repr() for floats, str() is lossy */
#if PY_MAJOR_VERSION >= 3
PyObject *s = PyObject_Repr(plrv);
plrv_bo = PLyUnicode_Bytes(s);
Py_XDECREF(s);
#else
plrv_bo = PyObject_Repr(plrv);
#endif
}
else
{
#if PY_MAJOR_VERSION >= 3
PyObject *s = PyObject_Str(plrv);
plrv_bo = PLyUnicode_Bytes(s);
Py_XDECREF(s);
#else
plrv_bo = PyObject_Str(plrv);
#endif
}
if (!plrv_bo)
PLy_elog(ERROR, "could not create string representation of Python object");
plrv_sc = pstrdup(PyBytes_AsString(plrv_bo));
plen = PyBytes_Size(plrv_bo);
slen = strlen(plrv_sc);
Py_XDECREF(plrv_bo);
if (slen < plen)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not convert Python object into cstring: Python string representation appears to contain null bytes")));
else if (slen > plen)
elog(ERROR, "could not convert Python object into cstring: Python string longer than reported length");
pg_verifymbstr(plrv_sc, slen, false);
return plrv_sc;
}
/*
* Generic output conversion function: convert PyObject to cstring and
* cstring into PostgreSQL type.
*/
static Datum
PLyObject_ToScalar(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
char *str;
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
str = PLyObject_AsString(plrv);
return InputFunctionCall(&arg->u.scalar.typfunc,
str,
arg->u.scalar.typioparam,
arg->typmod);
}
/*
* Convert to a domain type.
*/
static Datum
PLyObject_ToDomain(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
Datum result;
PLyObToDatum *base = arg->u.domain.base;
result = base->func(base, plrv, isnull, inarray);
domain_check(result, *isnull, arg->typoid,
&arg->u.domain.domain_info, arg->mcxt);
return result;
}
/*
* Convert using a to-SQL transform function.
*/
static Datum
PLyObject_ToTransform(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
return FunctionCall1(&arg->u.transform.typtransform, PointerGetDatum(plrv));
}
/*
* Convert Python sequence to SQL array.
*/
static Datum
PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv,
bool *isnull, bool inarray)
{
ArrayType *array;
int i;
Datum *elems;
bool *nulls;
int64 len;
int ndim;
int dims[MAXDIM];
int lbs[MAXDIM];
int currelem;
PyObject *pyptr = plrv;
PyObject *next;
if (plrv == Py_None)
{
*isnull = true;
return (Datum) 0;
}
*isnull = false;
/*
* Determine the number of dimensions, and their sizes.
*/
ndim = 0;
len = 1;
Py_INCREF(plrv);
for (;;)
{
if (!PyList_Check(pyptr))
break;
if (ndim == MAXDIM)
PLy_elog(ERROR, "number of array dimensions exceeds the maximum allowed (%d)", MAXDIM);
dims[ndim] = PySequence_Length(pyptr);
if (dims[ndim] < 0)
PLy_elog(ERROR, "could not determine sequence length for function return value");
if (dims[ndim] > MaxAllocSize)
PLy_elog(ERROR, "array size exceeds the maximum allowed");
len *= dims[ndim];
if (len > MaxAllocSize)
PLy_elog(ERROR, "array size exceeds the maximum allowed");
if (dims[ndim] == 0)
{
/* empty sequence */
break;
}
ndim++;
next = PySequence_GetItem(pyptr, 0);
Py_XDECREF(pyptr);
pyptr = next;
}
Py_XDECREF(pyptr);
/*
* Check for zero dimensions. This happens if the object is a tuple or a
* string, rather than a list, or is not a sequence at all. We don't map
* tuples or strings to arrays in general, but in the first level, be
* lenient, for historical reasons. So if the object is a sequence of any
* kind, treat it as a one-dimensional array.
*/
if (ndim == 0)
{
if (!PySequence_Check(plrv))
PLy_elog(ERROR, "return value of function with array return type is not a Python sequence");
ndim = 1;
len = dims[0] = PySequence_Length(plrv);
}
/*
* Traverse the Python lists, in depth-first order, and collect all the
* elements at the bottom level into 'elems'/'nulls' arrays.
*/
elems = palloc(sizeof(Datum) * len);
nulls = palloc(sizeof(bool) * len);
currelem = 0;
PLySequence_ToArray_recurse(arg->u.array.elm, plrv,
dims, ndim, 0,
elems, nulls, &currelem);
for (i = 0; i < ndim; i++)
lbs[i] = 1;
array = construct_md_array(elems,
nulls,
ndim,
dims,
lbs,
arg->u.array.elmbasetype,
arg->u.array.elm->typlen,
arg->u.array.elm->typbyval,
arg->u.array.elm->typalign);
return PointerGetDatum(array);
}
/*
* Helper function for PLySequence_ToArray. Traverse a Python list of lists in
* depth-first order, storing the elements in 'elems'.
*/
static void
PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list,
int *dims, int ndim, int dim,
Datum *elems, bool *nulls, int *currelem)
{
int i;
if (PySequence_Length(list) != dims[dim])
ereport(ERROR,
(errmsg("wrong length of inner sequence: has length %d, but %d was expected",
(int) PySequence_Length(list), dims[dim]),
(errdetail("To construct a multidimensional array, the inner sequences must all have the same length."))));
if (dim < ndim - 1)
{
for (i = 0; i < dims[dim]; i++)
{
PyObject *sublist = PySequence_GetItem(list, i);
PLySequence_ToArray_recurse(elm, sublist, dims, ndim, dim + 1,
elems, nulls, currelem);
Py_XDECREF(sublist);
}
}
else
{
for (i = 0; i < dims[dim]; i++)
{
PyObject *obj = PySequence_GetItem(list, i);
elems[*currelem] = elm->func(elm, obj, &nulls[*currelem], true);
Py_XDECREF(obj);
(*currelem)++;
}
}
}
/*
* Convert a Python string to composite, using record_in.
*/
static Datum
PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray)
{
char *str;
/*
* Set up call data for record_in, if we didn't already. (We can't just
* use DirectFunctionCall, because record_in needs a fn_extra field.)
*/
if (!OidIsValid(arg->u.tuple.recinfunc.fn_oid))
fmgr_info_cxt(F_RECORD_IN, &arg->u.tuple.recinfunc, arg->mcxt);
str = PLyObject_AsString(string);
/*
* If we are parsing a composite type within an array, and the string
* isn't a valid record literal, there's a high chance that the function
* did something like:
*
* CREATE FUNCTION .. RETURNS comptype[] AS $$ return [['foo', 'bar']] $$
* LANGUAGE plpython;
*
* Before PostgreSQL 10, that was interpreted as a single-dimensional
* array, containing record ('foo', 'bar'). PostgreSQL 10 added support
* for multi-dimensional arrays, and it is now interpreted as a
* two-dimensional array, containing two records, 'foo', and 'bar'.
* record_in() will throw an error, because "foo" is not a valid record
* literal.
*
* To make that less confusing to users who are upgrading from older
* versions, try to give a hint in the typical instances of that. If we
* are parsing an array of composite types, and we see a string literal
* that is not a valid record literal, give a hint. We only want to give
* the hint in the narrow case of a malformed string literal, not any
* error from record_in(), so check for that case here specifically.
*
* This check better match the one in record_in(), so that we don't forbid
* literals that are actually valid!
*/
if (inarray)
{
char *ptr = str;
/* Allow leading whitespace */
while (*ptr && isspace((unsigned char) *ptr))
ptr++;
if (*ptr++ != '(')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("malformed record literal: \"%s\"", str),
errdetail("Missing left parenthesis."),
errhint("To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\".")));
}
return InputFunctionCall(&arg->u.tuple.recinfunc,
str,
arg->typoid,
arg->typmod);
}
static Datum
PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping)
{
Datum result;
HeapTuple tuple;
Datum *values;
bool *nulls;
volatile int i;
Assert(PyMapping_Check(mapping));
/* Build tuple */
values = palloc(sizeof(Datum) * desc->natts);
nulls = palloc(sizeof(bool) * desc->natts);
for (i = 0; i < desc->natts; ++i)
{
char *key;
PyObject *volatile value;
PLyObToDatum *att;
Form_pg_attribute attr = TupleDescAttr(desc, i);
if (attr->attisdropped)
{
values[i] = (Datum) 0;
nulls[i] = true;
continue;
}
key = NameStr(attr->attname);
value = NULL;
att = &arg->u.tuple.atts[i];
PG_TRY();
{
value = PyMapping_GetItemString(mapping, key);
if (!value)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("key \"%s\" not found in mapping", key),
errhint("To return null in a column, "
"add the value None to the mapping with the key named after the column.")));
values[i] = att->func(att, value, &nulls[i], false);
Py_XDECREF(value);
value = NULL;
}
PG_CATCH();
{
Py_XDECREF(value);
PG_RE_THROW();
}
PG_END_TRY();
}
tuple = heap_form_tuple(desc, values, nulls);
result = heap_copy_tuple_as_datum(tuple, desc);
heap_freetuple(tuple);
pfree(values);
pfree(nulls);
return result;
}
static Datum
PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence)
{
Datum result;
HeapTuple tuple;
Datum *values;
bool *nulls;
volatile int idx;
volatile int i;
Assert(PySequence_Check(sequence));
/*
* Check that sequence length is exactly same as PG tuple's. We actually
* can ignore exceeding items or assume missing ones as null but to avoid
* plpython developer's errors we are strict here
*/
idx = 0;
for (i = 0; i < desc->natts; i++)
{
if (!TupleDescAttr(desc, i)->attisdropped)
idx++;
}
if (PySequence_Length(sequence) != idx)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("length of returned sequence did not match number of columns in row")));
/* Build tuple */
values = palloc(sizeof(Datum) * desc->natts);
nulls = palloc(sizeof(bool) * desc->natts);
idx = 0;
for (i = 0; i < desc->natts; ++i)
{
PyObject *volatile value;
PLyObToDatum *att;
if (TupleDescAttr(desc, i)->attisdropped)
{
values[i] = (Datum) 0;
nulls[i] = true;
continue;
}
value = NULL;
att = &arg->u.tuple.atts[i];
PG_TRY();
{
value = PySequence_GetItem(sequence, idx);
Assert(value);
values[i] = att->func(att, value, &nulls[i], false);
Py_XDECREF(value);
value = NULL;
}
PG_CATCH();
{
Py_XDECREF(value);
PG_RE_THROW();
}
PG_END_TRY();
idx++;
}
tuple = heap_form_tuple(desc, values, nulls);
result = heap_copy_tuple_as_datum(tuple, desc);
heap_freetuple(tuple);
pfree(values);
pfree(nulls);
return result;
}
static Datum
PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray)
{
Datum result;
HeapTuple tuple;
Datum *values;
bool *nulls;
volatile int i;
/* Build tuple */
values = palloc(sizeof(Datum) * desc->natts);
nulls = palloc(sizeof(bool) * desc->natts);
for (i = 0; i < desc->natts; ++i)
{
char *key;
PyObject *volatile value;
PLyObToDatum *att;
Form_pg_attribute attr = TupleDescAttr(desc, i);
if (attr->attisdropped)
{
values[i] = (Datum) 0;
nulls[i] = true;
continue;
}
key = NameStr(attr->attname);
value = NULL;
att = &arg->u.tuple.atts[i];
PG_TRY();
{
value = PyObject_GetAttrString(object, key);
if (!value)
{
/*
* No attribute for this column in the object.
*
* If we are parsing a composite type in an array, a likely
* cause is that the function contained something like "[[123,
* 'foo']]". Before PostgreSQL 10, that was interpreted as an
* array, with a composite type (123, 'foo') in it. But now
* it's interpreted as a two-dimensional array, and we try to
* interpret "123" as the composite type. See also similar
* heuristic in PLyObject_ToScalar().
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("attribute \"%s\" does not exist in Python object", key),
inarray ?
errhint("To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\".") :
errhint("To return null in a column, let the returned object have an attribute named after column with value None.")));
}
values[i] = att->func(att, value, &nulls[i], false);
Py_XDECREF(value);
value = NULL;
}
PG_CATCH();
{
Py_XDECREF(value);
PG_RE_THROW();
}
PG_END_TRY();
}
tuple = heap_form_tuple(desc, values, nulls);
result = heap_copy_tuple_as_datum(tuple, desc);
heap_freetuple(tuple);
pfree(values);
pfree(nulls);
return result;
}
| 26.48311 | 126 | 0.68981 | [
"object",
"transform"
] |
9e4f0cac060fbd05040c1f8856a4c632242b96bb | 7,041 | h | C | src/chrono/fea/ChNodeFEAxyz.h | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 1,383 | 2015-02-04T14:17:40.000Z | 2022-03-30T04:58:16.000Z | src/chrono/fea/ChNodeFEAxyz.h | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 245 | 2015-01-11T15:30:51.000Z | 2022-03-30T21:28:54.000Z | src/chrono/fea/ChNodeFEAxyz.h | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 351 | 2015-02-04T14:17:47.000Z | 2022-03-30T04:42:52.000Z | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Andrea Favali, Alessandro Tasora, Radu Serban
// =============================================================================
#ifndef CHNODEFEAXYZ_H
#define CHNODEFEAXYZ_H
#include "chrono/physics/ChNodeXYZ.h"
#include "chrono/solver/ChVariablesNode.h"
#include "chrono/fea/ChNodeFEAbase.h"
namespace chrono {
namespace fea {
/// @addtogroup fea_nodes
/// @{
// Forward declaration
class ChMesh;
/// Class for a generic 3D finite element node, with x,y,z displacement.
/// This is the typical node that can be used for tetrahedrons, etc.
class ChApi ChNodeFEAxyz : public ChNodeFEAbase, public ChNodeXYZ, public ChVariableTupleCarrier_1vars<3> {
public:
ChNodeFEAxyz(ChVector<> initial_pos = VNULL);
ChNodeFEAxyz(const ChNodeFEAxyz& other);
virtual ~ChNodeFEAxyz() {}
ChNodeFEAxyz& operator=(const ChNodeFEAxyz& other);
virtual ChVariablesNode& Variables() override { return variables; }
/// Set the rest position as the actual position.
virtual void Relax() override;
/// Reset to no speed and acceleration.
virtual void SetNoSpeedNoAcceleration() override;
/// Set the 'fixed' state of the node.
/// If true, its current field value is not changed by solver.
virtual void SetFixed(bool mev) override { variables.SetDisabled(mev); }
/// Get the 'fixed' state of the node.
/// If true, its current field value is not changed by solver.
virtual bool GetFixed() override { return variables.IsDisabled(); }
/// Get mass of the node.
virtual double GetMass() const override { return variables.GetNodeMass(); }
/// Set mass of the node.
virtual void SetMass(double mm) override { variables.SetNodeMass(mm); }
/// Set the initial (reference) position
virtual void SetX0(ChVector<> mx) { X0 = mx; }
/// Get the initial (reference) position
virtual ChVector<> GetX0() { return X0; }
/// Set the 3d applied force, in absolute reference
virtual void SetForce(ChVector<> mf) { Force = mf; }
/// Get the 3d applied force, in absolute reference
virtual ChVector<> GetForce() { return Force; }
/// Get the number of degrees of freedom
virtual int Get_ndof_x() const override { return 3; }
//
// INTERFACE to ChVariableTupleCarrier_1vars
//
virtual ChVariables* GetVariables1() override { return &Variables(); }
//
// Functions for interfacing to the state bookkeeping
//
virtual void NodeIntStateGather(const unsigned int off_x,
ChState& x,
const unsigned int off_v,
ChStateDelta& v,
double& T) override {
x.segment(off_x, 3) = pos.eigen();
v.segment(off_v, 3) = pos_dt.eigen();
}
virtual void NodeIntStateScatter(const unsigned int off_x,
const ChState& x,
const unsigned int off_v,
const ChStateDelta& v,
const double T) override {
SetPos(x.segment(off_x, 3));
SetPos_dt(v.segment(off_v, 3));
}
virtual void NodeIntStateGatherAcceleration(const unsigned int off_a, ChStateDelta& a) override {
a.segment(off_a, 3) = pos_dtdt.eigen();
}
virtual void NodeIntStateScatterAcceleration(const unsigned int off_a, const ChStateDelta& a) override {
SetPos_dtdt(a.segment(off_a, 3));
}
virtual void NodeIntStateIncrement(const unsigned int off_x,
ChState& x_new,
const ChState& x,
const unsigned int off_v,
const ChStateDelta& Dv) override {
x_new(off_x) = x(off_x) + Dv(off_v);
x_new(off_x + 1) = x(off_x + 1) + Dv(off_v + 1);
x_new(off_x + 2) = x(off_x + 2) + Dv(off_v + 2);
}
virtual void NodeIntLoadResidual_F(const unsigned int off, ChVectorDynamic<>& R, const double c) override {
R.segment(off, 3) += c * Force.eigen();
}
virtual void NodeIntLoadResidual_Mv(const unsigned int off,
ChVectorDynamic<>& R,
const ChVectorDynamic<>& w,
const double c) override {
R(off + 0) += c * GetMass() * w(off + 0);
R(off + 1) += c * GetMass() * w(off + 1);
R(off + 2) += c * GetMass() * w(off + 2);
}
virtual void NodeIntToDescriptor(const unsigned int off_v,
const ChStateDelta& v,
const ChVectorDynamic<>& R) override {
variables.Get_qb() = v.segment(off_v, 3);
variables.Get_fb() = R.segment(off_v, 3);
}
virtual void NodeIntFromDescriptor(const unsigned int off_v, ChStateDelta& v) override {
v.segment(off_v, 3) = variables.Get_qb();
}
//
// Functions for interfacing to the solver
//
virtual void InjectVariables(ChSystemDescriptor& mdescriptor) override { mdescriptor.InsertVariables(&variables); }
virtual void VariablesFbReset() override { variables.Get_fb().setZero(); }
virtual void VariablesFbLoadForces(double factor = 1) override {
variables.Get_fb() += factor * Force.eigen();
}
virtual void VariablesQbLoadSpeed() override { variables.Get_qb() = pos_dt.eigen(); }
virtual void VariablesQbSetSpeed(double step = 0) override {
ChVector<> old_dt = pos_dt;
SetPos_dt(variables.Get_qb().segment(0, 3));
if (step) {
SetPos_dtdt((pos_dt - old_dt) / step);
}
}
virtual void VariablesFbIncrementMq() override {
variables.Compute_inc_Mb_v(variables.Get_fb(), variables.Get_qb());
}
virtual void VariablesQbIncrementPosition(double step) override {
ChVector<> newspeed = variables.Get_qb().segment(0, 3);
// ADVANCE POSITION: pos' = pos + dt * vel
SetPos(GetPos() + newspeed * step);
}
//
// SERIALIZATION
//
virtual void ArchiveOUT(ChArchiveOut& marchive) override;
virtual void ArchiveIN(ChArchiveIn& marchive) override;
protected:
ChVariablesNode variables; /// 3D node variables, with x,y,z
ChVector<> X0; ///< reference position
ChVector<> Force; ///< applied force
};
/// @} fea_nodes
} // end namespace fea
} // end namespace chrono
#endif
| 35.923469 | 119 | 0.586564 | [
"3d"
] |
9e4f9a79235e69c27904e02725a24fda73fd0b1e | 11,314 | h | C | systems/trajectory_optimization/dircon/dircon.h | DavidDePauw1/dairlib | 3c75c8f587927b12a58f2e88dda61cc0e7dc82a3 | [
"BSD-3-Clause"
] | null | null | null | systems/trajectory_optimization/dircon/dircon.h | DavidDePauw1/dairlib | 3c75c8f587927b12a58f2e88dda61cc0e7dc82a3 | [
"BSD-3-Clause"
] | null | null | null | systems/trajectory_optimization/dircon/dircon.h | DavidDePauw1/dairlib | 3c75c8f587927b12a58f2e88dda61cc0e7dc82a3 | [
"BSD-3-Clause"
] | null | null | null | #pragma once
#include <vector>
#include <memory.h>
#include "drake/common/drake_copyable.h"
#include "drake/common/symbolic.h"
#include "drake/common/trajectories/piecewise_polynomial.h"
#include "drake/solvers/constraint.h"
#include "drake/systems/trajectory_optimization/multiple_shooting.h"
#include "systems/trajectory_optimization/dircon/dircon_mode.h"
#include "systems/trajectory_optimization/dircon/dynamics_cache.h"
#include "multibody/multipose_visualizer.h"
namespace dairlib {
namespace systems {
namespace trajectory_optimization {
/// DIRCON implements the approach to trajectory optimization as
/// described in
/// Michael Posa, Scott Kuindersma, Russ Tedrake. "Optimization and
/// Stabilization of Trajectories for Constrained Dynamical Systems." ICRA,
/// 2016.
/// It assumes a first-order hold on the input trajectory and a cubic spline
/// representation of the state trajectory, and adds dynamic constraints (and
/// running costs) to the midpoints as well as the knot points in order to
/// achieve a 3rd order integration accuracy.
/// DIRCON addresses kinematic constraints by incorporating constraint forces
/// and corresponding acceleration, velocity, and position constraints.
template <typename T>
class Dircon
: public drake::systems::trajectory_optimization::MultipleShooting {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(Dircon)
/// The default, hybrid constructor. Takes a mode sequence.
Dircon(const DirconModeSequence<T>& mode_sequence);
/// For simplicity, a constructor that takes only a single mode as a pointer.
Dircon(DirconMode<T>* mode);
/// Returns a vector of matrices containing the state and derivative values at
/// each breakpoint at the solution for each mode of the trajectory.
void GetStateAndDerivativeSamples(
const drake::solvers::MathematicalProgramResult& result,
std::vector<Eigen::MatrixXd>* state_samples,
std::vector<Eigen::MatrixXd>* derivative_samples,
std::vector<Eigen::VectorXd>* state_breaks) const;
/// Get the input trajectory at the solution as a
/// %drake::trajectories::PiecewisePolynomialTrajectory%.
drake::trajectories::PiecewisePolynomial<double> ReconstructInputTrajectory(
const drake::solvers::MathematicalProgramResult& result) const override;
/// Get the state trajectory at the solution as a
/// %drake::trajectories::PiecewisePolynomialTrajectory%.
drake::trajectories::PiecewisePolynomial<double> ReconstructStateTrajectory(
const drake::solvers::MathematicalProgramResult& result) const override;
/// Get the state samples by mode, as a matrix. Each column corresponds to
/// a knotpoint.
Eigen::MatrixXd GetStateSamplesByMode(
const drake::solvers::MathematicalProgramResult& result, int mode) const;
/// Get the input samples by mode, as a matrix. Each column corresponds to
/// a knotpoint.
Eigen::MatrixXd GetInputSamplesByMode(
const drake::solvers::MathematicalProgramResult& result, int mode) const;
/// Get the state samples by mode, as a matrix. Each column corresponds to
/// a knotpoint.
Eigen::MatrixXd GetForceSamplesByMode(
const drake::solvers::MathematicalProgramResult& result, int mode) const;
/// Adds a visualization callback that will visualize knot points
/// without transparency. Cannot be called twice
/// @param model_name The path of a URDF/SDF model name for visualization
/// @param poses_per_mode Regulates how many knot points are visualized. A
/// vector containing the nubmer of poses to show per mode. This is in
/// addition to the start/end poses of every mode! The total number of poses
/// is therefore [sum(poses_per_mode) + num_modes + 1]
/// @param alpha A transparency scaler for all poses except the first and last
/// @param weld_frame_to_world The name of a frame to weld to the world frame
/// when parsing the model. Defaults to blank, which will not perform a weld
void CreateVisualizationCallback(std::string model_file,
std::vector<unsigned int> poses_per_mode, double alpha,
std::string weld_frame_to_world = "");
/// See CreateVisualizationCallback(std::string model_file,
/// std::vector<unsigned int> poses_per_mode,
/// std::string weld_frame_to_world)
///
/// Creates a callback using a single pose count parameter, num_poses
/// Evenly divides the poses among the different modes, weighting by number
/// of frames in that mode. Since start/end poses per mdoe are required, must
/// have num_poses >= num_modes + 1
void CreateVisualizationCallback(std::string model_file,
unsigned int num_poses, double alpha,
std::string weld_frame_to_world = "");
/// See CreateVisualizationCallback(std::string model_file,
/// unsigned int poses_per_mode,
/// std::string weld_frame_to_world)
///
/// Creates a visualization callback that shows all knot points.
void CreateVisualizationCallback(std::string model_file, double alpha,
std::string weld_frame_to_world = "");
/// Set the initial guess for the force variables for a specific mode
/// @param mode the mode index
/// @param traj_init_l contact forces lambda (interpreted at knot points)
/// @param traj_init_lc contact forces (interpreted at collocation points)
/// @param traj_init_vc velocity constraint slack variables (at collocation)
void SetInitialForceTrajectory(
int mode,
const drake::trajectories::PiecewisePolynomial<double>& traj_init_l,
const drake::trajectories::PiecewisePolynomial<double>& traj_init_lc,
const drake::trajectories::PiecewisePolynomial<double>& traj_init_vc);
/// Set the initial guess for the force variables for a specific mode.
/// Sets both the contact forces lambda and the collocation forces lambda_c
/// from the same trajectory. Does not set velocity constraint slack variables
/// @param mode the mode index
/// @param traj_init_l contact forces lambda (interpreted at knot points)
void SetInitialForceTrajectory(
int mode,
const drake::trajectories::PiecewisePolynomial<double>& traj_init_l);
/// Get all knotpoint force variables associated with a specific mode and
/// knotpoint
const drake::solvers::VectorXDecisionVariable force_vars(int mode_index,
int knotpoint_index) const;
/// Get all collocation force variables associated with a specific mode and
/// collocation point
const drake::solvers::VectorXDecisionVariable collocation_force_vars(
int mode_index, int collocation_index) const;
/// Get all kinematic relative offset variables associated with a specific
/// mode
const drake::solvers::VectorXDecisionVariable offset_vars(
int mode_index) const;
/// Get all velocity slack variables (gamma) associated with a specific mode
/// and collocation point
const drake::solvers::VectorXDecisionVariable collocation_slack_vars(
int mode_index, int collocation_index) const;
/// Get all quaternion slack variables associated with a specific mode
/// and collocation point
const drake::solvers::VectorXDecisionVariable quaternion_slack_vars(
int mode_index, int collocation_index) const;
/// Get all post-impact velocity variables associated with a specific
/// mode transition (0 is the first transition between modes 0 and 1)
const drake::solvers::VectorXDecisionVariable post_impact_velocity_vars(
int mode_transition_index) const;
/// Get all impulsive force variables associated with a specific
/// mode transition (0 is the first transition between modes 0 and 1)
const drake::solvers::VectorXDecisionVariable impulse_vars(
int mode_transition_index) const;
/// Get the state decision variables given a mode and a time_index
/// (knotpoint_index is w.r.t that particular mode). This will use the
/// v_post_impact_vars_ if needed. Otherwise, it just returns the standard
/// x_vars element
const drake::solvers::VectorXDecisionVariable state_vars(
int mode_index, int knotpoint_index) const;
/// Get the input decision variables, given a mode and time index.
/// (knotpoint_index is w.r.t that particular mode).
const drake::solvers::VectorXDecisionVariable input_vars(
int mode_index, int knotpoint_index) const;
drake::VectorX<drake::symbolic::Expression> SubstitutePlaceholderVariables(
const drake::VectorX<drake::symbolic::Expression>& f,
int interval_index) const;
using drake::systems::trajectory_optimization::MultipleShooting::N;
using drake::systems::trajectory_optimization::MultipleShooting::
SubstitutePlaceholderVariables;
int num_modes() const ;
/// Get the number of knotpoints in a specified mode
int mode_length(int mode_index) const;
const multibody::KinematicEvaluatorSet<T>& get_evaluator_set(int mode) const {
return mode_sequence_.mode(mode).evaluators();
}
const DirconMode<T>& get_mode(int mode) const {
return mode_sequence_.mode(mode);
}
const drake::systems::Context<T>& get_context(int mode, int knotpoint_index) {
return *contexts_.at(mode).at(knotpoint_index);
}
/// Setters for variable scaling
void ScaleTimeVariables(double scale);
void ScaleQuaternionSlackVariables(double scale);
void ScaleStateVariable(int idx, double scale);
void ScaleInputVariable(int idx, double scale);
void ScaleForceVariable(int mode, int idx, double scale);
void ScaleImpulseVariable(int mode, int idx, double scale);
void ScaleKinConstraintSlackVariable(int mode, int idx, double scale);
void ScaleStateVariables(std::vector<int> idx_list, double scale);
void ScaleInputVariables(std::vector<int> idx_list, double scale);
void ScaleForceVariables(int mode, std::vector<int> idx_list, double scale);
void ScaleImpulseVariables(int mode, std::vector<int> idx_list, double scale);
void ScaleKinConstraintSlackVariables(int mode, std::vector<int> idx_list,
double scale);
private:
// Private constructor to which public constructors funnel
Dircon(std::unique_ptr<DirconModeSequence<T>> my_sequence,
const DirconModeSequence<T>* ext_sequence,
const drake::multibody::MultibodyPlant<T>& plant,
int num_knotpoints);
std::unique_ptr<DirconModeSequence<T>> my_sequence_;
const drake::multibody::MultibodyPlant<T>& plant_;
const DirconModeSequence<T>& mode_sequence_;
std::vector<std::vector<std::unique_ptr<drake::systems::Context<T>>>>
contexts_;
std::vector<int> mode_start_;
void DoAddRunningCost(const drake::symbolic::Expression& e) override;
std::vector<drake::solvers::VectorXDecisionVariable> force_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> collocation_force_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> collocation_slack_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> v_post_impact_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> impulse_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> offset_vars_;
std::vector<drake::solvers::VectorXDecisionVariable> quaternion_slack_vars_;
std::unique_ptr<multibody::MultiposeVisualizer> callback_visualizer_;
std::vector<std::unique_ptr<DynamicsCache<T>>> cache_;
};
} // namespace trajectory_optimization
} // namespace systems
} // namespace dairlib
| 46.179592 | 80 | 0.758176 | [
"vector",
"model"
] |
9e520b185b1932290d13b60ff758df2596888305 | 150,343 | c | C | src/tor/directory.c | mammix2/tele-master | acd20f2cc2fe4821b43bfa994cc3c9ca4ace98a5 | [
"MIT"
] | null | null | null | src/tor/directory.c | mammix2/tele-master | acd20f2cc2fe4821b43bfa994cc3c9ca4ace98a5 | [
"MIT"
] | null | null | null | src/tor/directory.c | mammix2/tele-master | acd20f2cc2fe4821b43bfa994cc3c9ca4ace98a5 | [
"MIT"
] | null | null | null | /* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
* Copyright (c) 2007-2013, The Tor Project, Inc. */
/* See LICENSE for licensing information */
#include "or.h"
#include "buffers.h"
#include "circuitbuild.h"
#include "config.h"
#include "connection.h"
#include "connection_edge.h"
#include "control.h"
#include "directory.h"
#include "dirserv.h"
#include "dirvote.h"
#include "entrynodes.h"
#include "geoip.h"
#include "onion_main.h"
#include "microdesc.h"
#include "networkstatus.h"
#include "nodelist.h"
#include "policies.h"
#include "rendclient.h"
#include "rendcommon.h"
#include "rephist.h"
#include "router.h"
#include "routerlist.h"
#include "routerparse.h"
#include "routerset.h"
#if defined(EXPORTMALLINFO) && defined(HAVE_MALLOC_H) && defined(HAVE_MALLINFO)
#ifndef OPENBSD
#include <malloc.h>
#endif
#endif
/**
* \file directory.c
* \brief Code to send and fetch directories and router
* descriptors via HTTP. Directories use dirserv.c to generate the
* results; clients use routers.c to parse them.
**/
/* In-points to directory.c:
*
* - directory_post_to_dirservers(), called from
* router_upload_dir_desc_to_dirservers() in router.c
* upload_service_descriptor() in rendservice.c
* - directory_get_from_dirserver(), called from
* rend_client_refetch_renddesc() in rendclient.c
* run_scheduled_events() in onion_main.c
* do_hup() in onion_main.c
* - connection_dir_process_inbuf(), called from
* connection_process_inbuf() in connection.c
* - connection_dir_finished_flushing(), called from
* connection_finished_flushing() in connection.c
* - connection_dir_finished_connecting(), called from
* connection_finished_connecting() in connection.c
*/
static void directory_send_command(dir_connection_t *conn,
int purpose, int direct, const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since);
static int directory_handle_command(dir_connection_t *conn);
static int body_is_plausible(const char *body, size_t body_len, int purpose);
static int purpose_needs_anonymity(uint8_t dir_purpose,
uint8_t router_purpose);
static char *http_get_header(const char *headers, const char *which);
static void http_set_address_origin(const char *headers, connection_t *conn);
static void connection_dir_download_routerdesc_failed(dir_connection_t *conn);
static void connection_dir_bridge_routerdesc_failed(dir_connection_t *conn);
static void connection_dir_download_cert_failed(
dir_connection_t *conn, int status_code);
static void connection_dir_retry_bridges(smartlist_t *descs);
static void dir_routerdesc_download_failed(smartlist_t *failed,
int status_code,
int router_purpose,
int was_extrainfo,
int was_descriptor_digests);
static void dir_microdesc_download_failed(smartlist_t *failed,
int status_code);
static void note_client_request(int purpose, int compressed, size_t bytes);
static int client_likes_consensus(networkstatus_t *v, const char *want_url);
static void directory_initiate_command_rend(const char *address,
const tor_addr_t *addr,
uint16_t or_port,
uint16_t dir_port,
const char *digest,
uint8_t dir_purpose,
uint8_t router_purpose,
dir_indirection_t indirection,
const char *resource,
const char *payload,
size_t payload_len,
time_t if_modified_since,
const rend_data_t *rend_query);
/********* START VARIABLES **********/
/** How far in the future do we allow a directory server to tell us it is
* before deciding that one of us has the wrong time? */
#define ALLOW_DIRECTORY_TIME_SKEW (30*60)
#define X_ADDRESS_HEADER "X-Your-Address-Is: "
/** HTTP cache control: how long do we tell proxies they can cache each
* kind of document we serve? */
#define FULL_DIR_CACHE_LIFETIME (60*60)
#define RUNNINGROUTERS_CACHE_LIFETIME (20*60)
#define DIRPORTFRONTPAGE_CACHE_LIFETIME (20*60)
#define NETWORKSTATUS_CACHE_LIFETIME (5*60)
#define ROUTERDESC_CACHE_LIFETIME (30*60)
#define ROUTERDESC_BY_DIGEST_CACHE_LIFETIME (48*60*60)
#define ROBOTS_CACHE_LIFETIME (24*60*60)
#define MICRODESC_CACHE_LIFETIME (48*60*60)
/********* END VARIABLES ************/
/** Return true iff the directory purpose <b>dir_purpose</b> (and if it's
* fetching descriptors, it's fetching them for <b>router_purpose</b>)
* must use an anonymous connection to a directory. */
static int
purpose_needs_anonymity(uint8_t dir_purpose, uint8_t router_purpose)
{
if (get_options()->AllDirActionsPrivate)
return 1;
if (router_purpose == ROUTER_PURPOSE_BRIDGE)
return 1; /* if no circuits yet, this might break bootstrapping, but it's
* needed to be safe. */
if (dir_purpose == DIR_PURPOSE_UPLOAD_DIR ||
dir_purpose == DIR_PURPOSE_UPLOAD_VOTE ||
dir_purpose == DIR_PURPOSE_UPLOAD_SIGNATURES ||
dir_purpose == DIR_PURPOSE_FETCH_STATUS_VOTE ||
dir_purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES ||
dir_purpose == DIR_PURPOSE_FETCH_CONSENSUS ||
dir_purpose == DIR_PURPOSE_FETCH_CERTIFICATE ||
dir_purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
dir_purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
dir_purpose == DIR_PURPOSE_FETCH_MICRODESC)
return 0;
return 1;
}
/** Return a newly allocated string describing <b>auth</b>. Only describes
* authority features. */
static char *
authdir_type_to_string(dirinfo_type_t auth)
{
char *result;
smartlist_t *lst = smartlist_new();
if (auth & V1_DIRINFO)
smartlist_add(lst, (void*)"V1");
if (auth & V3_DIRINFO)
smartlist_add(lst, (void*)"V3");
if (auth & BRIDGE_DIRINFO)
smartlist_add(lst, (void*)"Bridge");
if (auth & HIDSERV_DIRINFO)
smartlist_add(lst, (void*)"Hidden service");
if (smartlist_len(lst)) {
result = smartlist_join_strings(lst, ", ", 0, NULL);
} else {
result = tor_strdup("[Not an authority]");
}
smartlist_free(lst);
return result;
}
/** Return a string describing a given directory connection purpose. */
static const char *
dir_conn_purpose_to_string(int purpose)
{
switch (purpose)
{
case DIR_PURPOSE_FETCH_RENDDESC:
return "hidden-service descriptor fetch";
case DIR_PURPOSE_UPLOAD_DIR:
return "server descriptor upload";
case DIR_PURPOSE_UPLOAD_RENDDESC:
return "hidden-service descriptor upload";
case DIR_PURPOSE_UPLOAD_VOTE:
return "server vote upload";
case DIR_PURPOSE_UPLOAD_SIGNATURES:
return "consensus signature upload";
case DIR_PURPOSE_FETCH_SERVERDESC:
return "server descriptor fetch";
case DIR_PURPOSE_FETCH_EXTRAINFO:
return "extra-info fetch";
case DIR_PURPOSE_FETCH_CONSENSUS:
return "consensus network-status fetch";
case DIR_PURPOSE_FETCH_CERTIFICATE:
return "authority cert fetch";
case DIR_PURPOSE_FETCH_STATUS_VOTE:
return "status vote fetch";
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
return "consensus signature fetch";
case DIR_PURPOSE_FETCH_RENDDESC_V2:
return "hidden-service v2 descriptor fetch";
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
return "hidden-service v2 descriptor upload";
case DIR_PURPOSE_FETCH_MICRODESC:
return "microdescriptor fetch";
}
log_warn(LD_BUG, "Called with unknown purpose %d", purpose);
return "(unknown)";
}
/** Return true iff <b>identity_digest</b> is the digest of a router we
* believe to support extrainfo downloads. (If <b>is_authority</b> we do
* additional checking that's only valid for authorities.) */
int
router_supports_extrainfo(const char *identity_digest, int is_authority)
{
const node_t *node = node_get_by_id(identity_digest);
if (node && node->ri) {
if (node->ri->caches_extra_info)
return 1;
}
if (is_authority) {
return 1;
}
return 0;
}
/** Return true iff any trusted directory authority has accepted our
* server descriptor.
*
* We consider any authority sufficient because waiting for all of
* them means it never happens while any authority is down; we don't
* go for something more complex in the middle (like \>1/3 or \>1/2 or
* \>=1/2) because that doesn't seem necessary yet.
*/
int
directories_have_accepted_server_descriptor(void)
{
const smartlist_t *servers = router_get_trusted_dir_servers();
const or_options_t *options = get_options();
SMARTLIST_FOREACH(servers, dir_server_t *, d, {
if ((d->type & options->PublishServerDescriptor_) &&
d->has_accepted_serverdesc) {
return 1;
}
});
return 0;
}
/** Start a connection to every suitable directory authority, using
* connection purpose <b>dir_purpose</b> and uploading <b>payload</b>
* (of length <b>payload_len</b>). The dir_purpose should be one of
* 'DIR_PURPOSE_UPLOAD_DIR' or 'DIR_PURPOSE_UPLOAD_RENDDESC'.
*
* <b>router_purpose</b> describes the type of descriptor we're
* publishing, if we're publishing a descriptor -- e.g. general or bridge.
*
* <b>type</b> specifies what sort of dir authorities (V1, V3,
* HIDSERV, BRIDGE, etc) we should upload to.
*
* If <b>extrainfo_len</b> is nonzero, the first <b>payload_len</b> bytes of
* <b>payload</b> hold a router descriptor, and the next <b>extrainfo_len</b>
* bytes of <b>payload</b> hold an extra-info document. Upload the descriptor
* to all authorities, and the extra-info document to all authorities that
* support it.
*/
void
directory_post_to_dirservers(uint8_t dir_purpose, uint8_t router_purpose,
dirinfo_type_t type,
const char *payload,
size_t payload_len, size_t extrainfo_len)
{
const or_options_t *options = get_options();
int post_via_tor;
const smartlist_t *dirservers = router_get_trusted_dir_servers();
int found = 0;
const int exclude_self = (dir_purpose == DIR_PURPOSE_UPLOAD_VOTE ||
dir_purpose == DIR_PURPOSE_UPLOAD_SIGNATURES);
tor_assert(dirservers);
/* This tries dirservers which we believe to be down, but ultimately, that's
* harmless, and we may as well err on the side of getting things uploaded.
*/
SMARTLIST_FOREACH_BEGIN(dirservers, dir_server_t *, ds) {
routerstatus_t *rs = &(ds->fake_status);
size_t upload_len = payload_len;
tor_addr_t ds_addr;
if ((type & ds->type) == 0)
continue;
if (exclude_self && router_digest_is_me(ds->digest))
continue;
if (options->StrictNodes &&
routerset_contains_routerstatus(options->ExcludeNodes, rs, -1)) {
log_warn(LD_DIR, "Wanted to contact authority '%s' for %s, but "
"it's in our ExcludedNodes list and StrictNodes is set. "
"Skipping.",
ds->nickname,
dir_conn_purpose_to_string(dir_purpose));
continue;
}
found = 1; /* at least one authority of this type was listed */
if (dir_purpose == DIR_PURPOSE_UPLOAD_DIR)
ds->has_accepted_serverdesc = 0;
if (extrainfo_len && router_supports_extrainfo(ds->digest, 1)) {
upload_len += extrainfo_len;
log_info(LD_DIR, "Uploading an extrainfo too (length %d)",
(int) extrainfo_len);
}
tor_addr_from_ipv4h(&ds_addr, ds->addr);
post_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose) ||
!fascist_firewall_allows_address_dir(&ds_addr, ds->dir_port);
directory_initiate_command_routerstatus(rs, dir_purpose,
router_purpose,
post_via_tor,
NULL, payload, upload_len, 0);
} SMARTLIST_FOREACH_END(ds);
if (!found) {
char *s = authdir_type_to_string(type);
log_warn(LD_DIR, "Publishing server descriptor to directory authorities "
"of type '%s', but no authorities of that type listed!", s);
tor_free(s);
}
}
/** Return true iff, according to the values in <b>options</b>, we should be
* using directory guards for direct downloads of directory information. */
static int
should_use_directory_guards(const or_options_t *options)
{
/* Public (non-bridge) servers never use directory guards. */
if (public_server_mode(options))
return 0;
/* If guards are disabled, or directory guards are disabled, we can't
* use directory guards.
*/
if (!options->UseEntryGuards || !options->UseEntryGuardsAsDirGuards)
return 0;
/* If we're configured to fetch directory info aggressively or of a
* nonstandard type, don't use directory guards. */
if (options->DownloadExtraInfo || options->FetchDirInfoEarly ||
options->FetchDirInfoExtraEarly || options->FetchUselessDescriptors)
return 0;
if (! options->PreferTunneledDirConns)
return 0;
return 1;
}
/** Pick an unconsetrained directory server from among our guards, the latest
* networkstatus, or the fallback dirservers, for use in downloading
* information of type <b>type</b>, and return its routerstatus. */
static const routerstatus_t *
directory_pick_generic_dirserver(dirinfo_type_t type, int pds_flags,
uint8_t dir_purpose)
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
if (options->UseBridges)
log_warn(LD_BUG, "Called when we have UseBridges set.");
if (should_use_directory_guards(options)) {
const node_t *node = choose_random_dirguard(type);
if (node)
rs = node->rs;
} else {
/* anybody with a non-zero dirport will do */
rs = router_pick_directory_server(type, pds_flags);
}
if (!rs) {
log_info(LD_DIR, "No router found for %s; falling back to "
"dirserver list.", dir_conn_purpose_to_string(dir_purpose));
rs = router_pick_fallback_dirserver(type, pds_flags);
}
return rs;
}
/** Start a connection to a random running directory server, using
* connection purpose <b>dir_purpose</b>, intending to fetch descriptors
* of purpose <b>router_purpose</b>, and requesting <b>resource</b>.
* Use <b>pds_flags</b> as arguments to router_pick_directory_server()
* or router_pick_trusteddirserver().
*/
void
directory_get_from_dirserver(uint8_t dir_purpose, uint8_t router_purpose,
const char *resource, int pds_flags)
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
int prefer_authority = directory_fetches_from_authorities(options);
int require_authority = 0;
int get_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose);
dirinfo_type_t type;
time_t if_modified_since = 0;
/* FFFF we could break this switch into its own function, and call
* it elsewhere in directory.c. -RD */
switch (dir_purpose) {
case DIR_PURPOSE_FETCH_EXTRAINFO:
type = EXTRAINFO_DIRINFO |
(router_purpose == ROUTER_PURPOSE_BRIDGE ? BRIDGE_DIRINFO :
V3_DIRINFO);
break;
case DIR_PURPOSE_FETCH_SERVERDESC:
type = (router_purpose == ROUTER_PURPOSE_BRIDGE ? BRIDGE_DIRINFO :
V3_DIRINFO);
break;
case DIR_PURPOSE_FETCH_RENDDESC:
type = HIDSERV_DIRINFO;
break;
case DIR_PURPOSE_FETCH_STATUS_VOTE:
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
case DIR_PURPOSE_FETCH_CERTIFICATE:
type = V3_DIRINFO;
break;
case DIR_PURPOSE_FETCH_CONSENSUS:
type = V3_DIRINFO;
if (resource && !strcmp(resource,"microdesc"))
type |= MICRODESC_DIRINFO;
break;
case DIR_PURPOSE_FETCH_MICRODESC:
type = MICRODESC_DIRINFO;
break;
default:
log_warn(LD_BUG, "Unexpected purpose %d", (int)dir_purpose);
return;
}
if (dir_purpose == DIR_PURPOSE_FETCH_CONSENSUS) {
int flav = FLAV_NS;
networkstatus_t *v;
if (resource)
flav = networkstatus_parse_flavor_name(resource);
if (flav != -1) {
/* IF we have a parsed consensus of this type, we can do an
* if-modified-time based on it. */
v = networkstatus_get_latest_consensus_by_flavor(flav);
if (v)
if_modified_since = v->valid_after + 180;
} else {
/* Otherwise it might be a consensus we don't parse, but which we
* do cache. Look at the cached copy, perhaps. */
cached_dir_t *cd = dirserv_get_consensus(resource);
if (cd)
if_modified_since = cd->published + 180;
}
}
if (!options->FetchServerDescriptors && type != HIDSERV_DIRINFO)
return;
if (!get_via_tor) {
if (options->UseBridges && type != BRIDGE_DIRINFO) {
/* We want to ask a running bridge for which we have a descriptor.
*
* When we ask choose_random_entry() for a bridge, we specify what
* sort of dir fetch we'll be doing, so it won't return a bridge
* that can't answer our question.
*/
/* XXX024 Not all bridges handle conditional consensus downloading,
* so, for now, never assume the server supports that. -PP */
const node_t *node = choose_random_dirguard(type);
if (node && node->ri) {
/* every bridge has a routerinfo. */
tor_addr_t addr;
routerinfo_t *ri = node->ri;
node_get_addr(node, &addr);
directory_initiate_command(ri->address, &addr,
ri->or_port, 0/*no dirport*/,
ri->cache_info.identity_digest,
dir_purpose,
router_purpose,
DIRIND_ONEHOP,
resource, NULL, 0, if_modified_since);
} else
log_notice(LD_DIR, "Ignoring directory request, since no bridge "
"nodes are available yet.");
return;
} else {
if (prefer_authority || type == BRIDGE_DIRINFO) {
/* only ask authdirservers, and don't ask myself */
rs = router_pick_trusteddirserver(type, pds_flags);
if (rs == NULL && (pds_flags & (PDS_NO_EXISTING_SERVERDESC_FETCH|
PDS_NO_EXISTING_MICRODESC_FETCH))) {
/* We don't want to fetch from any authorities that we're currently
* fetching server descriptors from, and we got no match. Did we
* get no match because all the authorities have connections
* fetching server descriptors (in which case we should just
* return,) or because all the authorities are down or on fire or
* unreachable or something (in which case we should go on with
* our fallback code)? */
pds_flags &= ~(PDS_NO_EXISTING_SERVERDESC_FETCH|
PDS_NO_EXISTING_MICRODESC_FETCH);
rs = router_pick_trusteddirserver(type, pds_flags);
if (rs) {
log_debug(LD_DIR, "Deferring serverdesc fetch: all authorities "
"are in use.");
return;
}
}
if (rs == NULL && require_authority) {
log_info(LD_DIR, "No authorities were available for %s: will try "
"later.", dir_conn_purpose_to_string(dir_purpose));
return;
}
}
if (!rs && type != BRIDGE_DIRINFO) {
/* */
rs = directory_pick_generic_dirserver(type, pds_flags,
dir_purpose);
if (!rs) {
/*XXXX024 I'm pretty sure this can never do any good, since
* rs isn't set. */
get_via_tor = 1; /* last resort: try routing it via Tor */
}
}
}
} else { /* get_via_tor */
/* Never use fascistfirewall; we're going via Tor. */
if (dir_purpose == DIR_PURPOSE_FETCH_RENDDESC) {
/* only ask hidserv authorities, any of them will do */
pds_flags |= PDS_IGNORE_FASCISTFIREWALL|PDS_ALLOW_SELF;
rs = router_pick_trusteddirserver(HIDSERV_DIRINFO, pds_flags);
} else {
/* anybody with a non-zero dirport will do. Disregard firewalls. */
pds_flags |= PDS_IGNORE_FASCISTFIREWALL;
rs = router_pick_directory_server(type, pds_flags);
/* If we have any hope of building an indirect conn, we know some router
* descriptors. If (rs==NULL), we can't build circuits anyway, so
* there's no point in falling back to the authorities in this case. */
}
}
if (rs) {
const dir_indirection_t indirection =
get_via_tor ? DIRIND_ANONYMOUS : DIRIND_ONEHOP;
directory_initiate_command_routerstatus(rs, dir_purpose,
router_purpose,
indirection,
resource, NULL, 0,
if_modified_since);
} else {
log_notice(LD_DIR,
"While fetching directory info, "
"no running dirservers known. Will try again later. "
"(purpose %d)", dir_purpose);
if (!purpose_needs_anonymity(dir_purpose, router_purpose)) {
/* remember we tried them all and failed. */
directory_all_unreachable(time(NULL));
}
}
}
/** As directory_get_from_dirserver, but initiates a request to <i>every</i>
* directory authority other than ourself. Only for use by authorities when
* searching for missing information while voting. */
void
directory_get_from_all_authorities(uint8_t dir_purpose,
uint8_t router_purpose,
const char *resource)
{
tor_assert(dir_purpose == DIR_PURPOSE_FETCH_STATUS_VOTE ||
dir_purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES);
SMARTLIST_FOREACH_BEGIN(router_get_trusted_dir_servers(),
dir_server_t *, ds) {
routerstatus_t *rs;
if (router_digest_is_me(ds->digest))
continue;
if (!(ds->type & V3_DIRINFO))
continue;
rs = &ds->fake_status;
directory_initiate_command_routerstatus(rs, dir_purpose, router_purpose,
DIRIND_ONEHOP, resource, NULL,
0, 0);
} SMARTLIST_FOREACH_END(ds);
}
/** Return true iff <b>ind</b> requires a multihop circuit. */
static int
dirind_is_anon(dir_indirection_t ind)
{
return ind == DIRIND_ANON_DIRPORT || ind == DIRIND_ANONYMOUS;
}
/** Same as directory_initiate_command_routerstatus(), but accepts
* rendezvous data to fetch a hidden service descriptor. */
void
directory_initiate_command_routerstatus_rend(const routerstatus_t *status,
uint8_t dir_purpose,
uint8_t router_purpose,
dir_indirection_t indirection,
const char *resource,
const char *payload,
size_t payload_len,
time_t if_modified_since,
const rend_data_t *rend_query)
{
const or_options_t *options = get_options();
const node_t *node;
char address_buf[INET_NTOA_BUF_LEN+1];
struct in_addr in;
const char *address;
tor_addr_t addr;
const int anonymized_connection = dirind_is_anon(indirection);
node = node_get_by_id(status->identity_digest);
if (!node && anonymized_connection) {
log_info(LD_DIR, "Not sending anonymized request to directory '%s'; we "
"don't have its router descriptor.",
routerstatus_describe(status));
return;
} else if (node) {
node_get_address_string(node, address_buf, sizeof(address_buf));
address = address_buf;
} else {
in.s_addr = htonl(status->addr);
tor_inet_ntoa(&in, address_buf, sizeof(address_buf));
address = address_buf;
}
tor_addr_from_ipv4h(&addr, status->addr);
if (options->ExcludeNodes && options->StrictNodes &&
routerset_contains_routerstatus(options->ExcludeNodes, status, -1)) {
log_warn(LD_DIR, "Wanted to contact directory mirror %s for %s, but "
"it's in our ExcludedNodes list and StrictNodes is set. "
"Skipping. This choice might make your Tor not work.",
routerstatus_describe(status),
dir_conn_purpose_to_string(dir_purpose));
return;
}
directory_initiate_command_rend(address, &addr,
status->or_port, status->dir_port,
status->identity_digest,
dir_purpose, router_purpose,
indirection, resource,
payload, payload_len, if_modified_since,
rend_query);
}
/** Launch a new connection to the directory server <b>status</b> to
* upload or download a server or rendezvous
* descriptor. <b>dir_purpose</b> determines what
* kind of directory connection we're launching, and must be one of
* DIR_PURPOSE_{FETCH|UPLOAD}_{DIR|RENDDESC|RENDDESC_V2}. <b>router_purpose</b>
* specifies the descriptor purposes we have in mind (currently only
* used for FETCH_DIR).
*
* When uploading, <b>payload</b> and <b>payload_len</b> determine the content
* of the HTTP post. Otherwise, <b>payload</b> should be NULL.
*
* When fetching a rendezvous descriptor, <b>resource</b> is the service ID we
* want to fetch.
*/
void
directory_initiate_command_routerstatus(const routerstatus_t *status,
uint8_t dir_purpose,
uint8_t router_purpose,
dir_indirection_t indirection,
const char *resource,
const char *payload,
size_t payload_len,
time_t if_modified_since)
{
directory_initiate_command_routerstatus_rend(status, dir_purpose,
router_purpose,
indirection, resource,
payload, payload_len,
if_modified_since, NULL);
}
/** Return true iff <b>conn</b> is the client side of a directory connection
* we launched to ourself in order to determine the reachability of our
* dir_port. */
static int
directory_conn_is_self_reachability_test(dir_connection_t *conn)
{
if (conn->requested_resource &&
!strcmpstart(conn->requested_resource,"authority")) {
const routerinfo_t *me = router_get_my_routerinfo();
if (me &&
router_digest_is_me(conn->identity_digest) &&
tor_addr_eq_ipv4h(&conn->base_.addr, me->addr) && /*XXXX prop 118*/
me->dir_port == conn->base_.port)
return 1;
}
return 0;
}
/** Called when we are unable to complete the client's request to a directory
* server due to a network error: Mark the router as down and try again if
* possible.
*/
static void
connection_dir_request_failed(dir_connection_t *conn)
{
if (directory_conn_is_self_reachability_test(conn)) {
return; /* this was a test fetch. don't retry. */
}
if (!entry_list_is_constrained(get_options()))
router_set_status(conn->identity_digest, 0); /* don't try him again */
if (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO) {
log_info(LD_DIR, "Giving up on serverdesc/extrainfo fetch from "
"directory server at '%s'; retrying",
conn->base_.address);
if (conn->router_purpose == ROUTER_PURPOSE_BRIDGE)
connection_dir_bridge_routerdesc_failed(conn);
connection_dir_download_routerdesc_failed(conn);
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_CONSENSUS) {
if (conn->requested_resource)
networkstatus_consensus_download_failed(0, conn->requested_resource);
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE) {
log_info(LD_DIR, "Giving up on certificate fetch from directory server "
"at '%s'; retrying",
conn->base_.address);
connection_dir_download_cert_failed(conn, 0);
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES) {
log_info(LD_DIR, "Giving up downloading detached signatures from '%s'",
conn->base_.address);
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_STATUS_VOTE) {
log_info(LD_DIR, "Giving up downloading votes from '%s'",
conn->base_.address);
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC) {
log_info(LD_DIR, "Giving up on downloading microdescriptors from "
"directory server at '%s'; will retry", conn->base_.address);
connection_dir_download_routerdesc_failed(conn);
}
}
/** Helper: Attempt to fetch directly the descriptors of each bridge
* listed in <b>failed</b>.
*/
static void
connection_dir_retry_bridges(smartlist_t *descs)
{
char digest[DIGEST_LEN];
SMARTLIST_FOREACH(descs, const char *, cp,
{
if (base16_decode(digest, DIGEST_LEN, cp, strlen(cp))<0) {
log_warn(LD_BUG, "Malformed fingerprint in list: %s",
escaped(cp));
continue;
}
retry_bridge_descriptor_fetch_directly(digest);
});
}
/** Called when an attempt to download one or more router descriptors
* or extra-info documents on connection <b>conn</b> failed.
*/
static void
connection_dir_download_routerdesc_failed(dir_connection_t *conn)
{
/* No need to increment the failure count for routerdescs, since
* it's not their fault. */
/* No need to relaunch descriptor downloads here: we already do it
* every 10 or 60 seconds (FOO_DESCRIPTOR_RETRY_INTERVAL) in onion_main.c. */
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
(void) conn;
}
/** Called when an attempt to download a bridge's routerdesc from
* one of the authorities failed due to a network error. If
* possible attempt to download descriptors from the bridge directly.
*/
static void
connection_dir_bridge_routerdesc_failed(dir_connection_t *conn)
{
smartlist_t *which = NULL;
/* Requests for bridge descriptors are in the form 'fp/', so ignore
anything else. */
if (!conn->requested_resource || strcmpstart(conn->requested_resource,"fp/"))
return;
which = smartlist_new();
dir_split_resource_into_fingerprints(conn->requested_resource
+ strlen("fp/"),
which, NULL, 0);
tor_assert(conn->base_.purpose != DIR_PURPOSE_FETCH_EXTRAINFO);
if (smartlist_len(which)) {
connection_dir_retry_bridges(which);
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
}
smartlist_free(which);
}
/** Called when an attempt to fetch a certificate fails. */
static void
connection_dir_download_cert_failed(dir_connection_t *conn, int status)
{
const char *fp_pfx = "fp/";
const char *fpsk_pfx = "fp-sk/";
smartlist_t *failed;
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE);
if (!conn->requested_resource)
return;
failed = smartlist_new();
/*
* We have two cases download by fingerprint (resource starts
* with "fp/") or download by fingerprint/signing key pair
* (resource starts with "fp-sk/").
*/
if (!strcmpstart(conn->requested_resource, fp_pfx)) {
/* Download by fingerprint case */
dir_split_resource_into_fingerprints(conn->requested_resource +
strlen(fp_pfx),
failed, NULL, DSR_HEX);
SMARTLIST_FOREACH_BEGIN(failed, char *, cp) {
/* Null signing key digest indicates download by fp only */
authority_cert_dl_failed(cp, NULL, status);
tor_free(cp);
} SMARTLIST_FOREACH_END(cp);
} else if (!strcmpstart(conn->requested_resource, fpsk_pfx)) {
/* Download by (fp,sk) pairs */
dir_split_resource_into_fingerprint_pairs(conn->requested_resource +
strlen(fpsk_pfx), failed);
SMARTLIST_FOREACH_BEGIN(failed, fp_pair_t *, cp) {
authority_cert_dl_failed(cp->first, cp->second, status);
tor_free(cp);
} SMARTLIST_FOREACH_END(cp);
} else {
log_warn(LD_DIR,
"Don't know what to do with failure for cert fetch %s",
conn->requested_resource);
}
smartlist_free(failed);
update_certificate_downloads(time(NULL));
}
/** Evaluate the situation and decide if we should use an encrypted
* "begindir-style" connection for this directory request.
* 1) If or_port is 0, or it's a direct conn and or_port is firewalled
* or we're a dir mirror, no.
* 2) If we prefer to avoid begindir conns, and we're not fetching or
* publishing a bridge relay descriptor, no.
* 3) Else yes.
*/
static int
directory_command_should_use_begindir(const or_options_t *options,
const tor_addr_t *addr,
int or_port, uint8_t router_purpose,
dir_indirection_t indirection)
{
if (!or_port)
return 0; /* We don't know an ORPort -- no chance. */
if (indirection == DIRIND_DIRECT_CONN || indirection == DIRIND_ANON_DIRPORT)
return 0;
if (indirection == DIRIND_ONEHOP)
if (!fascist_firewall_allows_address_or(addr, or_port) ||
directory_fetches_from_authorities(options))
return 0; /* We're firewalled or are acting like a relay -- also no. */
if (!options->TunnelDirConns &&
router_purpose != ROUTER_PURPOSE_BRIDGE)
return 0; /* We prefer to avoid using begindir conns. Fine. */
return 1;
}
/** Helper for directory_initiate_command_routerstatus: send the
* command to a server whose address is <b>address</b>, whose IP is
* <b>addr</b>, whose directory port is <b>dir_port</b>, whose tor version
* <b>supports_begindir</b>, and whose identity key digest is
* <b>digest</b>. */
void
directory_initiate_command(const char *address, const tor_addr_t *_addr,
uint16_t or_port, uint16_t dir_port,
const char *digest,
uint8_t dir_purpose, uint8_t router_purpose,
dir_indirection_t indirection, const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since)
{
directory_initiate_command_rend(address, _addr, or_port, dir_port,
digest, dir_purpose,
router_purpose, indirection,
resource, payload, payload_len,
if_modified_since, NULL);
}
/** Return non-zero iff a directory connection with purpose
* <b>dir_purpose</b> reveals sensitive information about a Tor
* instance's client activities. (Such connections must be performed
* through normal three-hop Tor circuits.) */
static int
is_sensitive_dir_purpose(uint8_t dir_purpose)
{
return ((dir_purpose == DIR_PURPOSE_FETCH_RENDDESC) ||
(dir_purpose == DIR_PURPOSE_HAS_FETCHED_RENDDESC) ||
(dir_purpose == DIR_PURPOSE_UPLOAD_RENDDESC) ||
(dir_purpose == DIR_PURPOSE_UPLOAD_RENDDESC_V2) ||
(dir_purpose == DIR_PURPOSE_FETCH_RENDDESC_V2));
}
/** Same as directory_initiate_command(), but accepts rendezvous data to
* fetch a hidden service descriptor. */
static void
directory_initiate_command_rend(const char *address, const tor_addr_t *_addr,
uint16_t or_port, uint16_t dir_port,
const char *digest,
uint8_t dir_purpose, uint8_t router_purpose,
dir_indirection_t indirection,
const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since,
const rend_data_t *rend_query)
{
dir_connection_t *conn;
const or_options_t *options = get_options();
int socket_error = 0;
int use_begindir = directory_command_should_use_begindir(options, _addr,
or_port, router_purpose, indirection);
const int anonymized_connection = dirind_is_anon(indirection);
tor_addr_t addr;
tor_assert(address);
tor_assert(_addr);
tor_assert(or_port || dir_port);
tor_assert(digest);
tor_addr_copy(&addr, _addr);
log_debug(LD_DIR, "anonymized %d, use_begindir %d.",
anonymized_connection, use_begindir);
log_debug(LD_DIR, "Initiating %s", dir_conn_purpose_to_string(dir_purpose));
#ifndef NON_ANONYMOUS_MODE_ENABLED
tor_assert(!(is_sensitive_dir_purpose(dir_purpose) &&
!anonymized_connection));
#else
(void)is_sensitive_dir_purpose;
#endif
/* ensure that we don't make direct connections when a SOCKS server is
* configured. */
if (!anonymized_connection && !use_begindir && !options->HTTPProxy &&
(options->Socks4Proxy || options->Socks5Proxy)) {
log_warn(LD_DIR, "Cannot connect to a directory server through a "
"SOCKS proxy!");
return;
}
conn = dir_connection_new(tor_addr_family(&addr));
/* set up conn so it's got all the data we need to remember */
tor_addr_copy(&conn->base_.addr, &addr);
conn->base_.port = use_begindir ? or_port : dir_port;
conn->base_.address = tor_strdup(address);
memcpy(conn->identity_digest, digest, DIGEST_LEN);
conn->base_.purpose = dir_purpose;
conn->router_purpose = router_purpose;
/* give it an initial state */
conn->base_.state = DIR_CONN_STATE_CONNECTING;
/* decide whether we can learn our IP address from this conn */
/* XXXX This is a bad name for this field now. */
conn->dirconn_direct = !anonymized_connection;
/* copy rendezvous data, if any */
if (rend_query)
conn->rend_data = rend_data_dup(rend_query);
if (!anonymized_connection && !use_begindir) {
/* then we want to connect to dirport directly */
if (options->HTTPProxy) {
tor_addr_copy(&addr, &options->HTTPProxyAddr);
dir_port = options->HTTPProxyPort;
}
switch (connection_connect(TO_CONN(conn), conn->base_.address, &addr,
dir_port, &socket_error)) {
case -1:
connection_dir_request_failed(conn); /* retry if we want */
/* XXX we only pass 'conn' above, not 'resource', 'payload',
* etc. So in many situations it can't retry! -RD */
connection_free(TO_CONN(conn));
return;
case 1:
/* start flushing conn */
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* fall through */
case 0:
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 1, resource,
payload, payload_len,
if_modified_since);
connection_watch_events(TO_CONN(conn), READ_EVENT | WRITE_EVENT);
/* writable indicates finish, readable indicates broken link,
error indicates broken link in windowsland. */
}
} else { /* we want to connect via a tor connection */
entry_connection_t *linked_conn;
/* Anonymized tunneled connections can never share a circuit.
* One-hop directory connections can share circuits with each other
* but nothing else. */
int iso_flags = anonymized_connection ? ISO_STREAM : ISO_SESSIONGRP;
/* If it's an anonymized connection, remember the fact that we
* wanted it for later: maybe we'll want it again soon. */
if (anonymized_connection && use_begindir)
rep_hist_note_used_internal(time(NULL), 0, 1);
else if (anonymized_connection && !use_begindir)
rep_hist_note_used_port(time(NULL), conn->base_.port);
/* make an AP connection
* populate it and add it at the right state
* hook up both sides
*/
linked_conn =
connection_ap_make_link(TO_CONN(conn),
conn->base_.address, conn->base_.port,
digest,
SESSION_GROUP_DIRCONN, iso_flags,
use_begindir, conn->dirconn_direct);
if (!linked_conn) {
log_warn(LD_NET,"Making tunnel to dirserver failed.");
connection_mark_for_close(TO_CONN(conn));
return;
}
if (connection_add(TO_CONN(conn)) < 0) {
log_warn(LD_NET,"Unable to add connection for link to dirserver.");
connection_mark_for_close(TO_CONN(conn));
return;
}
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 0, resource,
payload, payload_len,
if_modified_since);
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
IF_HAS_BUFFEREVENT(ENTRY_TO_CONN(linked_conn), {
connection_watch_events(ENTRY_TO_CONN(linked_conn),
READ_EVENT|WRITE_EVENT);
}) ELSE_IF_NO_BUFFEREVENT
connection_start_reading(ENTRY_TO_CONN(linked_conn));
}
}
/** Return true iff anything we say on <b>conn</b> is being encrypted before
* we send it to the client/server. */
int
connection_dir_is_encrypted(dir_connection_t *conn)
{
/* Right now it's sufficient to see if conn is or has been linked, since
* the only thing it could be linked to is an edge connection on a
* circuit, and the only way it could have been unlinked is at the edge
* connection getting closed.
*/
return TO_CONN(conn)->linked;
}
/** Helper for sorting
*
* sort strings alphabetically
*/
static int
compare_strs_(const void **a, const void **b)
{
const char *s1 = *a, *s2 = *b;
return strcmp(s1, s2);
}
#define CONDITIONAL_CONSENSUS_FPR_LEN 3
#if (CONDITIONAL_CONSENSUS_FPR_LEN > DIGEST_LEN)
#error "conditional consensus fingerprint length is larger than digest length"
#endif
/** Return the URL we should use for a consensus download.
*
* This url depends on whether or not the server we go to
* is sufficiently new to support conditional consensus downloading,
* i.e. GET .../consensus/<b>fpr</b>+<b>fpr</b>+<b>fpr</b>
*
* If 'resource' is provided, it is the name of a consensus flavor to request.
*/
static char *
directory_get_consensus_url(const char *resource)
{
char *url = NULL;
const char *hyphen, *flavor;
if (resource==NULL || strcmp(resource, "ns")==0) {
flavor = ""; /* Request ns consensuses as "", so older servers will work*/
hyphen = "";
} else {
flavor = resource;
hyphen = "-";
}
{
char *authority_id_list;
smartlist_t *authority_digests = smartlist_new();
SMARTLIST_FOREACH_BEGIN(router_get_trusted_dir_servers(),
dir_server_t *, ds) {
char *hex;
if (!(ds->type & V3_DIRINFO))
continue;
hex = tor_malloc(2*CONDITIONAL_CONSENSUS_FPR_LEN+1);
base16_encode(hex, 2*CONDITIONAL_CONSENSUS_FPR_LEN+1,
ds->v3_identity_digest, CONDITIONAL_CONSENSUS_FPR_LEN);
smartlist_add(authority_digests, hex);
} SMARTLIST_FOREACH_END(ds);
smartlist_sort(authority_digests, compare_strs_);
authority_id_list = smartlist_join_strings(authority_digests,
"+", 0, NULL);
tor_asprintf(&url, "/tor/status-vote/current/consensus%s%s/%s.z",
hyphen, flavor, authority_id_list);
SMARTLIST_FOREACH(authority_digests, char *, cp, tor_free(cp));
smartlist_free(authority_digests);
tor_free(authority_id_list);
}
return url;
}
/** Queue an appropriate HTTP command on conn-\>outbuf. The other args
* are as in directory_initiate_command().
*/
static void
directory_send_command(dir_connection_t *conn,
int purpose, int direct, const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since)
{
char proxystring[256];
char hoststring[128];
smartlist_t *headers = smartlist_new();
char *url;
char request[8192];
const char *httpcommand = NULL;
tor_assert(conn);
tor_assert(conn->base_.type == CONN_TYPE_DIR);
tor_free(conn->requested_resource);
if (resource)
conn->requested_resource = tor_strdup(resource);
/* come up with a string for which Host: we want */
if (conn->base_.port == 80) {
strlcpy(hoststring, conn->base_.address, sizeof(hoststring));
} else {
tor_snprintf(hoststring, sizeof(hoststring),"%s:%d",
conn->base_.address, conn->base_.port);
}
/* Format if-modified-since */
if (if_modified_since) {
char b[RFC1123_TIME_LEN+1];
format_rfc1123_time(b, if_modified_since);
smartlist_add_asprintf(headers, "If-Modified-Since: %s\r\n", b);
}
/* come up with some proxy lines, if we're using one. */
if (direct && get_options()->HTTPProxy) {
char *base64_authenticator=NULL;
const char *authenticator = get_options()->HTTPProxyAuthenticator;
tor_snprintf(proxystring, sizeof(proxystring),"http://%s", hoststring);
if (authenticator) {
base64_authenticator = alloc_http_authenticator(authenticator);
if (!base64_authenticator)
log_warn(LD_BUG, "Encoding http authenticator failed");
}
if (base64_authenticator) {
smartlist_add_asprintf(headers,
"Proxy-Authorization: Basic %s\r\n",
base64_authenticator);
tor_free(base64_authenticator);
}
} else {
proxystring[0] = 0;
}
switch (purpose) {
case DIR_PURPOSE_FETCH_CONSENSUS:
/* resource is optional. If present, it's a flavor name */
tor_assert(!payload);
httpcommand = "GET";
url = directory_get_consensus_url(resource);
log_info(LD_DIR, "Downloading consensus from %s using %s",
hoststring, url);
break;
case DIR_PURPOSE_FETCH_CERTIFICATE:
tor_assert(resource);
tor_assert(!payload);
httpcommand = "GET";
tor_asprintf(&url, "/tor/keys/%s", resource);
break;
case DIR_PURPOSE_FETCH_STATUS_VOTE:
tor_assert(resource);
tor_assert(!payload);
httpcommand = "GET";
tor_asprintf(&url, "/tor/status-vote/next/%s.z", resource);
break;
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
tor_assert(!resource);
tor_assert(!payload);
httpcommand = "GET";
url = tor_strdup("/tor/status-vote/next/consensus-signatures.z");
break;
case DIR_PURPOSE_FETCH_SERVERDESC:
tor_assert(resource);
httpcommand = "GET";
tor_asprintf(&url, "/tor/server/%s", resource);
break;
case DIR_PURPOSE_FETCH_EXTRAINFO:
tor_assert(resource);
httpcommand = "GET";
tor_asprintf(&url, "/tor/extra/%s", resource);
break;
case DIR_PURPOSE_FETCH_MICRODESC:
tor_assert(resource);
httpcommand = "GET";
tor_asprintf(&url, "/tor/micro/%s", resource);
break;
case DIR_PURPOSE_UPLOAD_DIR: {
const char *why = router_get_descriptor_gen_reason();
tor_assert(!resource);
tor_assert(payload);
httpcommand = "POST";
url = tor_strdup("/tor/");
if (why) {
smartlist_add_asprintf(headers, "X-Desc-Gen-Reason: %s\r\n", why);
}
break;
}
case DIR_PURPOSE_UPLOAD_VOTE:
tor_assert(!resource);
tor_assert(payload);
httpcommand = "POST";
url = tor_strdup("/tor/post/vote");
break;
case DIR_PURPOSE_UPLOAD_SIGNATURES:
tor_assert(!resource);
tor_assert(payload);
httpcommand = "POST";
url = tor_strdup("/tor/post/consensus-signature");
break;
case DIR_PURPOSE_FETCH_RENDDESC_V2:
tor_assert(resource);
tor_assert(strlen(resource) <= REND_DESC_ID_V2_LEN_BASE32);
tor_assert(!payload);
httpcommand = "GET";
tor_asprintf(&url, "/tor/rendezvous2/%s", resource);
break;
case DIR_PURPOSE_UPLOAD_RENDDESC:
tor_assert(!resource);
tor_assert(payload);
httpcommand = "POST";
url = tor_strdup("/tor/rendezvous/publish");
break;
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
tor_assert(!resource);
tor_assert(payload);
httpcommand = "POST";
url = tor_strdup("/tor/rendezvous2/publish");
break;
default:
tor_assert(0);
return;
}
if (strlen(proxystring) + strlen(url) >= 4096) {
log_warn(LD_BUG,
"Squid does not like URLs longer than 4095 bytes, and this "
"one is %d bytes long: %s%s",
(int)(strlen(proxystring) + strlen(url)), proxystring, url);
}
tor_snprintf(request, sizeof(request), "%s %s", httpcommand, proxystring);
connection_write_to_buf(request, strlen(request), TO_CONN(conn));
connection_write_to_buf(url, strlen(url), TO_CONN(conn));
tor_free(url);
if (!strcmp(httpcommand, "POST") || payload) {
smartlist_add_asprintf(headers, "Content-Length: %lu\r\n",
payload ? (unsigned long)payload_len : 0);
}
{
char *header = smartlist_join_strings(headers, "", 0, NULL);
tor_snprintf(request, sizeof(request), " HTTP/1.0\r\nHost: %s\r\n%s\r\n",
hoststring, header);
tor_free(header);
}
connection_write_to_buf(request, strlen(request), TO_CONN(conn));
if (payload) {
/* then send the payload afterwards too */
connection_write_to_buf(payload, payload_len, TO_CONN(conn));
}
SMARTLIST_FOREACH(headers, char *, h, tor_free(h));
smartlist_free(headers);
}
/** Parse an HTTP request string <b>headers</b> of the form
* \verbatim
* "\%s [http[s]://]\%s HTTP/1..."
* \endverbatim
* If it's well-formed, strdup the second \%s into *<b>url</b>, and
* nul-terminate it. If the url doesn't start with "/tor/", rewrite it
* so it does. Return 0.
* Otherwise, return -1.
*/
STATIC int
parse_http_url(const char *headers, char **url)
{
char *s, *start, *tmp;
s = (char *)eat_whitespace_no_nl(headers);
if (!*s) return -1;
s = (char *)find_whitespace(s); /* get past GET/POST */
if (!*s) return -1;
s = (char *)eat_whitespace_no_nl(s);
if (!*s) return -1;
start = s; /* this is it, assuming it's valid */
s = (char *)find_whitespace(start);
if (!*s) return -1;
/* tolerate the http[s] proxy style of putting the hostname in the url */
if (s-start >= 4 && !strcmpstart(start,"http")) {
tmp = start + 4;
if (*tmp == 's')
tmp++;
if (s-tmp >= 3 && !strcmpstart(tmp,"://")) {
tmp = strchr(tmp+3, '/');
if (tmp && tmp < s) {
log_debug(LD_DIR,"Skipping over 'http[s]://hostname/' string");
start = tmp;
}
}
}
/* Check if the header is well formed (next sequence
* should be HTTP/1.X\r\n). Assumes we're supporting 1.0? */
{
unsigned minor_ver;
char ch;
char *e = (char *)eat_whitespace_no_nl(s);
if (2 != tor_sscanf(e, "HTTP/1.%u%c", &minor_ver, &ch)) {
return -1;
}
if (ch != '\r')
return -1;
}
if (s-start < 5 || strcmpstart(start,"/tor/")) { /* need to rewrite it */
*url = tor_malloc(s - start + 5);
strlcpy(*url,"/tor", s-start+5);
strlcat((*url)+4, start, s-start+1);
} else {
*url = tor_strndup(start, s-start);
}
return 0;
}
/** Return a copy of the first HTTP header in <b>headers</b> whose key is
* <b>which</b>. The key should be given with a terminating colon and space;
* this function copies everything after, up to but not including the
* following \\r\\n. */
static char *
http_get_header(const char *headers, const char *which)
{
const char *cp = headers;
while (cp) {
if (!strcasecmpstart(cp, which)) {
char *eos;
cp += strlen(which);
if ((eos = strchr(cp,'\r')))
return tor_strndup(cp, eos-cp);
else
return tor_strdup(cp);
}
cp = strchr(cp, '\n');
if (cp)
++cp;
}
return NULL;
}
/** If <b>headers</b> indicates that a proxy was involved, then rewrite
* <b>conn</b>-\>address to describe our best guess of the address that
* originated this HTTP request. */
static void
http_set_address_origin(const char *headers, connection_t *conn)
{
char *fwd;
fwd = http_get_header(headers, "Forwarded-For: ");
if (!fwd)
fwd = http_get_header(headers, "X-Forwarded-For: ");
if (fwd) {
struct in_addr in;
if (!tor_inet_aton(fwd, &in) || is_internal_IP(ntohl(in.s_addr), 0)) {
log_debug(LD_DIR, "Ignoring unrecognized or internal IP %s",
escaped(fwd));
tor_free(fwd);
return;
}
tor_free(conn->address);
conn->address = tor_strdup(fwd);
tor_free(fwd);
}
}
/** Parse an HTTP response string <b>headers</b> of the form
* \verbatim
* "HTTP/1.\%d \%d\%s\r\n...".
* \endverbatim
*
* If it's well-formed, assign the status code to *<b>code</b> and
* return 0. Otherwise, return -1.
*
* On success: If <b>date</b> is provided, set *date to the Date
* header in the http headers, or 0 if no such header is found. If
* <b>compression</b> is provided, set *<b>compression</b> to the
* compression method given in the Content-Encoding header, or 0 if no
* such header is found, or -1 if the value of the header is not
* recognized. If <b>reason</b> is provided, strdup the reason string
* into it.
*/
int
parse_http_response(const char *headers, int *code, time_t *date,
compress_method_t *compression, char **reason)
{
unsigned n1, n2;
char datestr[RFC1123_TIME_LEN+1];
smartlist_t *parsed_headers;
tor_assert(headers);
tor_assert(code);
while (TOR_ISSPACE(*headers)) headers++; /* tolerate leading whitespace */
if (tor_sscanf(headers, "HTTP/1.%u %u", &n1, &n2) < 2 ||
(n1 != 0 && n1 != 1) ||
(n2 < 100 || n2 >= 600)) {
log_warn(LD_HTTP,"Failed to parse header %s",escaped(headers));
return -1;
}
*code = n2;
parsed_headers = smartlist_new();
smartlist_split_string(parsed_headers, headers, "\n",
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1);
if (reason) {
smartlist_t *status_line_elements = smartlist_new();
tor_assert(smartlist_len(parsed_headers));
smartlist_split_string(status_line_elements,
smartlist_get(parsed_headers, 0),
" ", SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 3);
tor_assert(smartlist_len(status_line_elements) <= 3);
if (smartlist_len(status_line_elements) == 3) {
*reason = smartlist_get(status_line_elements, 2);
smartlist_set(status_line_elements, 2, NULL); /* Prevent free */
}
SMARTLIST_FOREACH(status_line_elements, char *, cp, tor_free(cp));
smartlist_free(status_line_elements);
}
if (date) {
*date = 0;
SMARTLIST_FOREACH(parsed_headers, const char *, s,
if (!strcmpstart(s, "Date: ")) {
strlcpy(datestr, s+6, sizeof(datestr));
/* This will do nothing on failure, so we don't need to check
the result. We shouldn't warn, since there are many other valid
date formats besides the one we use. */
parse_rfc1123_time(datestr, date);
break;
});
}
if (compression) {
const char *enc = NULL;
SMARTLIST_FOREACH(parsed_headers, const char *, s,
if (!strcmpstart(s, "Content-Encoding: ")) {
enc = s+18; break;
});
if (!enc || !strcmp(enc, "identity")) {
*compression = NO_METHOD;
} else if (!strcmp(enc, "deflate") || !strcmp(enc, "x-deflate")) {
*compression = ZLIB_METHOD;
} else if (!strcmp(enc, "gzip") || !strcmp(enc, "x-gzip")) {
*compression = GZIP_METHOD;
} else {
log_info(LD_HTTP, "Unrecognized content encoding: %s. Trying to deal.",
escaped(enc));
*compression = UNKNOWN_METHOD;
}
}
SMARTLIST_FOREACH(parsed_headers, char *, s, tor_free(s));
smartlist_free(parsed_headers);
return 0;
}
/** Return true iff <b>body</b> doesn't start with a plausible router or
* running-list or directory opening. This is a sign of possible compression.
**/
static int
body_is_plausible(const char *body, size_t len, int purpose)
{
int i;
if (len == 0)
return 1; /* empty bodies don't need decompression */
if (len < 32)
return 0;
if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
return (!strcmpstart(body,"onion-key"));
}
if (purpose != DIR_PURPOSE_FETCH_RENDDESC) {
if (!strcmpstart(body,"router") ||
!strcmpstart(body,"signed-directory") ||
!strcmpstart(body,"network-status") ||
!strcmpstart(body,"running-routers"))
return 1;
for (i=0;i<32;++i) {
if (!TOR_ISPRINT(body[i]) && !TOR_ISSPACE(body[i]))
return 0;
}
return 1;
} else {
return 1;
}
}
/** Called when we've just fetched a bunch of router descriptors in
* <b>body</b>. The list <b>which</b>, if present, holds digests for
* descriptors we requested: descriptor digests if <b>descriptor_digests</b>
* is true, or identity digests otherwise. Parse the descriptors, validate
* them, and annotate them as having purpose <b>purpose</b> and as having been
* downloaded from <b>source</b>.
*
* Return the number of routers actually added. */
static int
load_downloaded_routers(const char *body, smartlist_t *which,
int descriptor_digests,
int router_purpose,
const char *source)
{
char buf[256];
char time_buf[ISO_TIME_LEN+1];
int added = 0;
int general = router_purpose == ROUTER_PURPOSE_GENERAL;
format_iso_time(time_buf, time(NULL));
tor_assert(source);
if (tor_snprintf(buf, sizeof(buf),
"@downloaded-at %s\n"
"@source %s\n"
"%s%s%s", time_buf, escaped(source),
!general ? "@purpose " : "",
!general ? router_purpose_to_string(router_purpose) : "",
!general ? "\n" : "")<0)
return added;
added = router_load_routers_from_string(body, NULL, SAVED_NOWHERE, which,
descriptor_digests, buf);
if (general)
control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
count_loading_descriptors_progress());
return added;
}
/** We are a client, and we've finished reading the server's
* response. Parse it and act appropriately.
*
* If we're still happy with using this directory server in the future, return
* 0. Otherwise return -1; and the caller should consider trying the request
* again.
*
* The caller will take care of marking the connection for close.
*/
static int
connection_dir_client_reached_eof(dir_connection_t *conn)
{
char *body;
char *headers;
char *reason = NULL;
size_t body_len = 0, orig_len = 0;
int status_code;
time_t date_header = 0;
long delta;
compress_method_t compression;
int plausible;
int skewed = 0;
int allow_partial = (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
int was_compressed = 0;
time_t now = time(NULL);
int src_code;
switch (connection_fetch_from_buf_http(TO_CONN(conn),
&headers, MAX_HEADERS_SIZE,
&body, &body_len, MAX_DIR_DL_SIZE,
allow_partial)) {
case -1: /* overflow */
log_warn(LD_PROTOCOL,
"'fetch' response too large (server '%s:%d'). Closing.",
conn->base_.address, conn->base_.port);
return -1;
case 0:
log_info(LD_HTTP,
"'fetch' response not all here, but we're at eof. Closing.");
return -1;
/* case 1, fall through */
}
orig_len = body_len;
if (parse_http_response(headers, &status_code, &date_header,
&compression, &reason) < 0) {
log_warn(LD_HTTP,"Unparseable headers (server '%s:%d'). Closing.",
conn->base_.address, conn->base_.port);
tor_free(body); tor_free(headers);
return -1;
}
if (!reason) reason = tor_strdup("[no reason given]");
log_debug(LD_DIR,
"Received response from directory server '%s:%d': %d %s "
"(purpose: %d)",
conn->base_.address, conn->base_.port, status_code,
escaped(reason),
conn->base_.purpose);
/* now check if it's got any hints for us about our IP address. */
if (conn->dirconn_direct) {
char *guess = http_get_header(headers, X_ADDRESS_HEADER);
if (guess) {
router_new_address_suggestion(guess, conn);
tor_free(guess);
}
}
if (date_header > 0) {
/* The date header was written very soon after we sent our request,
* so compute the skew as the difference between sending the request
* and the date header. (We used to check now-date_header, but that's
* inaccurate if we spend a lot of time downloading.)
*/
delta = conn->base_.timestamp_lastwritten - date_header;
if (labs(delta)>ALLOW_DIRECTORY_TIME_SKEW) {
char dbuf[64];
int trusted = router_digest_is_trusted_dir(conn->identity_digest);
format_time_interval(dbuf, sizeof(dbuf), delta);
log_fn(trusted ? LOG_WARN : LOG_INFO,
LD_HTTP,
"Received directory with skewed time (server '%s:%d'): "
"It seems that our clock is %s by %s, or that theirs is %s. "
"Tor requires an accurate clock to work: please check your time, "
"timezone, and date settings.",
conn->base_.address, conn->base_.port,
delta>0 ? "ahead" : "behind", dbuf,
delta>0 ? "behind" : "ahead");
skewed = 1; /* don't check the recommended-versions line */
if (trusted)
control_event_general_status(LOG_WARN,
"CLOCK_SKEW SKEW=%ld SOURCE=DIRSERV:%s:%d",
delta, conn->base_.address, conn->base_.port);
} else {
log_debug(LD_HTTP, "Time on received directory is within tolerance; "
"we are %ld seconds skewed. (That's okay.)", delta);
}
}
(void) skewed; /* skewed isn't used yet. */
if (status_code == 503) {
routerstatus_t *rs;
dir_server_t *ds;
const char *id_digest = conn->identity_digest;
log_info(LD_DIR,"Received http status code %d (%s) from server "
"'%s:%d'. I'll try again soon.",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
if ((rs = router_get_mutable_consensus_status_by_id(id_digest)))
rs->last_dir_503_at = now;
if ((ds = router_get_fallback_dirserver_by_digest(id_digest)))
ds->fake_status.last_dir_503_at = now;
tor_free(body); tor_free(headers); tor_free(reason);
return -1;
}
plausible = body_is_plausible(body, body_len, conn->base_.purpose);
if (compression != NO_METHOD || !plausible) {
char *new_body = NULL;
size_t new_len = 0;
compress_method_t guessed = detect_compression_method(body, body_len);
if (compression == UNKNOWN_METHOD || guessed != compression) {
/* Tell the user if we don't believe what we're told about compression.*/
const char *description1, *description2;
if (compression == ZLIB_METHOD)
description1 = "as deflated";
else if (compression == GZIP_METHOD)
description1 = "as gzipped";
else if (compression == NO_METHOD)
description1 = "as uncompressed";
else
description1 = "with an unknown Content-Encoding";
if (guessed == ZLIB_METHOD)
description2 = "deflated";
else if (guessed == GZIP_METHOD)
description2 = "gzipped";
else if (!plausible)
description2 = "confusing binary junk";
else
description2 = "uncompressed";
log_info(LD_HTTP, "HTTP body from server '%s:%d' was labeled %s, "
"but it seems to be %s.%s",
conn->base_.address, conn->base_.port, description1,
description2,
(compression>0 && guessed>0)?" Trying both.":"");
}
/* Try declared compression first if we can. */
if (compression == GZIP_METHOD || compression == ZLIB_METHOD)
tor_gzip_uncompress(&new_body, &new_len, body, body_len, compression,
!allow_partial, LOG_PROTOCOL_WARN);
/* Okay, if that didn't work, and we think that it was compressed
* differently, try that. */
if (!new_body &&
(guessed == GZIP_METHOD || guessed == ZLIB_METHOD) &&
compression != guessed)
tor_gzip_uncompress(&new_body, &new_len, body, body_len, guessed,
!allow_partial, LOG_PROTOCOL_WARN);
/* If we're pretty sure that we have a compressed directory, and
* we didn't manage to uncompress it, then warn and bail. */
if (!plausible && !new_body) {
log_fn(LOG_PROTOCOL_WARN, LD_HTTP,
"Unable to decompress HTTP body (server '%s:%d').",
conn->base_.address, conn->base_.port);
tor_free(body); tor_free(headers); tor_free(reason);
return -1;
}
if (new_body) {
tor_free(body);
body = new_body;
body_len = new_len;
was_compressed = 1;
}
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_CONSENSUS) {
int r;
const char *flavname = conn->requested_resource;
if (status_code != 200) {
int severity = (status_code == 304) ? LOG_INFO : LOG_WARN;
tor_log(severity, LD_DIR,
"Received http status code %d (%s) from server "
"'%s:%d' while fetching consensus directory.",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
tor_free(body); tor_free(headers); tor_free(reason);
networkstatus_consensus_download_failed(status_code, flavname);
return -1;
}
log_info(LD_DIR,"Received consensus directory (size %d) from server "
"'%s:%d'", (int)body_len, conn->base_.address, conn->base_.port);
if ((r=networkstatus_set_current_consensus(body, flavname, 0))<0) {
log_fn(r<-1?LOG_WARN:LOG_INFO, LD_DIR,
"Unable to load %s consensus directory downloaded from "
"server '%s:%d'. I'll try again soon.",
flavname, conn->base_.address, conn->base_.port);
tor_free(body); tor_free(headers); tor_free(reason);
networkstatus_consensus_download_failed(0, flavname);
return -1;
}
/* launches router downloads as needed */
routers_update_all_from_networkstatus(now, 3);
update_microdescs_from_networkstatus(now);
update_microdesc_downloads(now);
directory_info_has_arrived(now, 0);
log_info(LD_DIR, "Successfully loaded consensus.");
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE) {
if (status_code != 200) {
log_warn(LD_DIR,
"Received http status code %d (%s) from server "
"'%s:%d' while fetching \"/tor/keys/%s\".",
status_code, escaped(reason), conn->base_.address,
conn->base_.port, conn->requested_resource);
connection_dir_download_cert_failed(conn, status_code);
tor_free(body); tor_free(headers); tor_free(reason);
return -1;
}
log_info(LD_DIR,"Received authority certificates (size %d) from server "
"'%s:%d'", (int)body_len, conn->base_.address, conn->base_.port);
/*
* Tell trusted_dirs_load_certs_from_string() whether it was by fp
* or fp-sk pair.
*/
src_code = -1;
if (!strcmpstart(conn->requested_resource, "fp/")) {
src_code = TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST;
} else if (!strcmpstart(conn->requested_resource, "fp-sk/")) {
src_code = TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST;
}
if (src_code != -1) {
if (trusted_dirs_load_certs_from_string(body, src_code, 1)<0) {
log_warn(LD_DIR, "Unable to parse fetched certificates");
/* if we fetched more than one and only some failed, the successful
* ones got flushed to disk so it's safe to call this on them */
connection_dir_download_cert_failed(conn, status_code);
} else {
directory_info_has_arrived(now, 0);
log_info(LD_DIR, "Successfully loaded certificates from fetch.");
}
} else {
log_warn(LD_DIR,
"Couldn't figure out what to do with fetched certificates for "
"unknown resource %s",
conn->requested_resource);
connection_dir_download_cert_failed(conn, status_code);
}
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_STATUS_VOTE) {
const char *msg;
int st;
log_info(LD_DIR,"Got votes (size %d) from server %s:%d",
(int)body_len, conn->base_.address, conn->base_.port);
if (status_code != 200) {
log_warn(LD_DIR,
"Received http status code %d (%s) from server "
"'%s:%d' while fetching \"/tor/status-vote/next/%s.z\".",
status_code, escaped(reason), conn->base_.address,
conn->base_.port, conn->requested_resource);
tor_free(body); tor_free(headers); tor_free(reason);
return -1;
}
dirvote_add_vote(body, &msg, &st);
if (st > 299) {
log_warn(LD_DIR, "Error adding retrieved vote: %s", msg);
} else {
log_info(LD_DIR, "Added vote(s) successfully [msg: %s]", msg);
}
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES) {
const char *msg = NULL;
log_info(LD_DIR,"Got detached signatures (size %d) from server %s:%d",
(int)body_len, conn->base_.address, conn->base_.port);
if (status_code != 200) {
log_warn(LD_DIR,
"Received http status code %d (%s) from server '%s:%d' while fetching "
"\"/tor/status-vote/next/consensus-signatures.z\".",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
tor_free(body); tor_free(headers); tor_free(reason);
return -1;
}
if (dirvote_add_signatures(body, conn->base_.address, &msg)<0) {
log_warn(LD_DIR, "Problem adding detached signatures from %s:%d: %s",
conn->base_.address, conn->base_.port, msg?msg:"???");
}
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO) {
int was_ei = conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO;
smartlist_t *which = NULL;
int n_asked_for = 0;
int descriptor_digests = conn->requested_resource &&
!strcmpstart(conn->requested_resource,"d/");
log_info(LD_DIR,"Received %s (size %d) from server '%s:%d'",
was_ei ? "extra server info" : "server info",
(int)body_len, conn->base_.address, conn->base_.port);
if (conn->requested_resource &&
(!strcmpstart(conn->requested_resource,"d/") ||
!strcmpstart(conn->requested_resource,"fp/"))) {
which = smartlist_new();
dir_split_resource_into_fingerprints(conn->requested_resource +
(descriptor_digests ? 2 : 3),
which, NULL, 0);
n_asked_for = smartlist_len(which);
}
if (status_code != 200) {
int dir_okay = status_code == 404 ||
(status_code == 400 && !strcmp(reason, "Servers unavailable."));
/* 404 means that it didn't have them; no big deal.
* Older (pre-0.1.1.8) servers said 400 Servers unavailable instead. */
log_fn(dir_okay ? LOG_INFO : LOG_WARN, LD_DIR,
"Received http status code %d (%s) from server '%s:%d' "
"while fetching \"/tor/server/%s\". I'll try again soon.",
status_code, escaped(reason), conn->base_.address,
conn->base_.port, conn->requested_resource);
if (!which) {
connection_dir_download_routerdesc_failed(conn);
} else {
dir_routerdesc_download_failed(which, status_code,
conn->router_purpose,
was_ei, descriptor_digests);
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
smartlist_free(which);
}
tor_free(body); tor_free(headers); tor_free(reason);
return dir_okay ? 0 : -1;
}
/* Learn the routers, assuming we requested by fingerprint or "all"
* or "authority".
*
* We use "authority" to fetch our own descriptor for
* testing, and to fetch bridge descriptors for bootstrapping. Ignore
* the output of "authority" requests unless we are using bridges,
* since otherwise they'll be the response from reachability tests,
* and we don't really want to add that to our routerlist. */
if (which || (conn->requested_resource &&
(!strcmpstart(conn->requested_resource, "all") ||
(!strcmpstart(conn->requested_resource, "authority") &&
get_options()->UseBridges)))) {
/* as we learn from them, we remove them from 'which' */
if (was_ei) {
router_load_extrainfo_from_string(body, NULL, SAVED_NOWHERE, which,
descriptor_digests);
} else {
//router_load_routers_from_string(body, NULL, SAVED_NOWHERE, which,
// descriptor_digests, conn->router_purpose);
if (load_downloaded_routers(body, which, descriptor_digests,
conn->router_purpose,
conn->base_.address))
directory_info_has_arrived(now, 0);
}
}
if (which) { /* mark remaining ones as failed */
log_info(LD_DIR, "Received %d/%d %s requested from %s:%d",
n_asked_for-smartlist_len(which), n_asked_for,
was_ei ? "extra-info documents" : "router descriptors",
conn->base_.address, (int)conn->base_.port);
if (smartlist_len(which)) {
dir_routerdesc_download_failed(which, status_code,
conn->router_purpose,
was_ei, descriptor_digests);
}
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
smartlist_free(which);
}
if (directory_conn_is_self_reachability_test(conn))
router_dirport_found_reachable();
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC) {
smartlist_t *which = NULL;
log_info(LD_DIR,"Received answer to microdescriptor request (status %d, "
"size %d) from server '%s:%d'",
status_code, (int)body_len, conn->base_.address,
conn->base_.port);
tor_assert(conn->requested_resource &&
!strcmpstart(conn->requested_resource, "d/"));
which = smartlist_new();
dir_split_resource_into_fingerprints(conn->requested_resource+2,
which, NULL,
DSR_DIGEST256|DSR_BASE64);
if (status_code != 200) {
log_info(LD_DIR, "Received status code %d (%s) from server "
"'%s:%d' while fetching \"/tor/micro/%s\". I'll try again "
"soon.",
status_code, escaped(reason), conn->base_.address,
(int)conn->base_.port, conn->requested_resource);
dir_microdesc_download_failed(which, status_code);
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
smartlist_free(which);
tor_free(body); tor_free(headers); tor_free(reason);
return 0;
} else {
smartlist_t *mds;
mds = microdescs_add_to_cache(get_microdesc_cache(),
body, body+body_len, SAVED_NOWHERE, 0,
now, which);
if (smartlist_len(which)) {
/* Mark remaining ones as failed. */
dir_microdesc_download_failed(which, status_code);
}
control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
count_loading_descriptors_progress());
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
smartlist_free(which);
smartlist_free(mds);
}
}
if (conn->base_.purpose == DIR_PURPOSE_UPLOAD_DIR) {
switch (status_code) {
case 200: {
dir_server_t *ds =
router_get_trusteddirserver_by_digest(conn->identity_digest);
char *rejected_hdr = http_get_header(headers,
"X-Descriptor-Not-New: ");
if (rejected_hdr) {
if (!strcmp(rejected_hdr, "Yes")) {
log_info(LD_GENERAL,
"Authority '%s' declined our descriptor (not new)",
ds->nickname);
/* XXXX use this information; be sure to upload next one
* sooner. -NM */
/* XXXX023 On further thought, the task above implies that we're
* basing our regenerate-descriptor time on when we uploaded the
* last descriptor, not on the published time of the last
* descriptor. If those are different, that's a bad thing to
* do. -NM */
}
tor_free(rejected_hdr);
}
log_info(LD_GENERAL,"eof (status 200) after uploading server "
"descriptor: finished.");
control_event_server_status(
LOG_NOTICE, "ACCEPTED_SERVER_DESCRIPTOR DIRAUTH=%s:%d",
conn->base_.address, conn->base_.port);
ds->has_accepted_serverdesc = 1;
if (directories_have_accepted_server_descriptor())
control_event_server_status(LOG_NOTICE, "GOOD_SERVER_DESCRIPTOR");
}
break;
case 400:
log_warn(LD_GENERAL,"http status 400 (%s) response from "
"dirserver '%s:%d'. Please correct.",
escaped(reason), conn->base_.address, conn->base_.port);
control_event_server_status(LOG_WARN,
"BAD_SERVER_DESCRIPTOR DIRAUTH=%s:%d REASON=\"%s\"",
conn->base_.address, conn->base_.port, escaped(reason));
break;
default:
log_warn(LD_GENERAL,
"http status %d (%s) reason unexpected while uploading "
"descriptor to server '%s:%d').",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
break;
}
/* return 0 in all cases, since we don't want to mark any
* dirservers down just because they don't like us. */
}
if (conn->base_.purpose == DIR_PURPOSE_UPLOAD_VOTE) {
switch (status_code) {
case 200: {
log_notice(LD_DIR,"Uploaded a vote to dirserver %s:%d",
conn->base_.address, conn->base_.port);
}
break;
case 400:
log_warn(LD_DIR,"http status 400 (%s) response after uploading "
"vote to dirserver '%s:%d'. Please correct.",
escaped(reason), conn->base_.address, conn->base_.port);
break;
default:
log_warn(LD_GENERAL,
"http status %d (%s) reason unexpected while uploading "
"vote to server '%s:%d').",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
break;
}
/* return 0 in all cases, since we don't want to mark any
* dirservers down just because they don't like us. */
}
if (conn->base_.purpose == DIR_PURPOSE_UPLOAD_SIGNATURES) {
switch (status_code) {
case 200: {
log_notice(LD_DIR,"Uploaded signature(s) to dirserver %s:%d",
conn->base_.address, conn->base_.port);
}
break;
case 400:
log_warn(LD_DIR,"http status 400 (%s) response after uploading "
"signatures to dirserver '%s:%d'. Please correct.",
escaped(reason), conn->base_.address, conn->base_.port);
break;
default:
log_warn(LD_GENERAL,
"http status %d (%s) reason unexpected while uploading "
"signatures to server '%s:%d').",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
break;
}
/* return 0 in all cases, since we don't want to mark any
* dirservers down just because they don't like us. */
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_RENDDESC) {
tor_assert(conn->rend_data);
log_info(LD_REND,"Received rendezvous descriptor (size %d, status %d "
"(%s))",
(int)body_len, status_code, escaped(reason));
switch (status_code) {
case 200:
if (rend_cache_store(body, body_len, 0,
conn->rend_data->onion_address) < -1) {
log_warn(LD_REND,"Failed to parse rendezvous descriptor.");
/* Any pending rendezvous attempts will notice when
* connection_about_to_close_connection()
* cleans this dir conn up. */
/* We could retry. But since v0 descriptors are going out of
* style, it isn't worth the hassle. We'll do better in v2. */
} else {
/* Success, or at least there's a v2 descriptor already
* present. Notify pending connections about this. */
conn->base_.purpose = DIR_PURPOSE_HAS_FETCHED_RENDDESC;
rend_client_desc_trynow(conn->rend_data->onion_address);
}
break;
case 404:
/* Not there. Pending connections will be notified when
* connection_about_to_close_connection() cleans this conn up. */
break;
case 400:
log_warn(LD_REND,
"http status 400 (%s). Dirserver didn't like our "
"rendezvous query?", escaped(reason));
break;
default:
log_warn(LD_REND,"http status %d (%s) response unexpected while "
"fetching hidden service descriptor (server '%s:%d').",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
break;
}
}
if (conn->base_.purpose == DIR_PURPOSE_FETCH_RENDDESC_V2) {
#define SEND_HS_DESC_FAILED_EVENT() ( \
control_event_hs_descriptor_failed(conn->rend_data, \
conn->identity_digest) )
tor_assert(conn->rend_data);
log_info(LD_REND,"Received rendezvous descriptor (size %d, status %d "
"(%s))",
(int)body_len, status_code, escaped(reason));
switch (status_code) {
case 200:
switch (rend_cache_store_v2_desc_as_client(body, conn->rend_data)) {
case -2:
log_warn(LD_REND,"Fetching v2 rendezvous descriptor failed. "
"Retrying at another directory.");
/* We'll retry when connection_about_to_close_connection()
* cleans this dir conn up. */
SEND_HS_DESC_FAILED_EVENT();
break;
case -1:
/* We already have a v0 descriptor here. Ignoring this one
* and _not_ performing another request. */
log_info(LD_REND, "Successfully fetched v2 rendezvous "
"descriptor, but we already have a v0 descriptor.");
conn->base_.purpose = DIR_PURPOSE_HAS_FETCHED_RENDDESC;
break;
default:
/* success. notify pending connections about this. */
log_info(LD_REND, "Successfully fetched v2 rendezvous "
"descriptor.");
control_event_hs_descriptor_received(conn->rend_data,
conn->identity_digest);
conn->base_.purpose = DIR_PURPOSE_HAS_FETCHED_RENDDESC;
rend_client_desc_trynow(conn->rend_data->onion_address);
break;
}
break;
case 404:
/* Not there. We'll retry when
* connection_about_to_close_connection() cleans this conn up. */
log_info(LD_REND,"Fetching v2 rendezvous descriptor failed: "
"Retrying at another directory.");
SEND_HS_DESC_FAILED_EVENT();
break;
case 400:
log_warn(LD_REND, "Fetching v2 rendezvous descriptor failed: "
"http status 400 (%s). Dirserver didn't like our "
"v2 rendezvous query? Retrying at another directory.",
escaped(reason));
SEND_HS_DESC_FAILED_EVENT();
break;
default:
log_warn(LD_REND, "Fetching v2 rendezvous descriptor failed: "
"http status %d (%s) response unexpected while "
"fetching v2 hidden service descriptor (server '%s:%d'). "
"Retrying at another directory.",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
SEND_HS_DESC_FAILED_EVENT();
break;
}
}
if (conn->base_.purpose == DIR_PURPOSE_UPLOAD_RENDDESC ||
conn->base_.purpose == DIR_PURPOSE_UPLOAD_RENDDESC_V2) {
log_info(LD_REND,"Uploaded rendezvous descriptor (status %d "
"(%s))",
status_code, escaped(reason));
switch (status_code) {
case 200:
log_info(LD_REND,
"Uploading rendezvous descriptor: finished with status "
"200 (%s)", escaped(reason));
break;
case 400:
log_warn(LD_REND,"http status 400 (%s) response from dirserver "
"'%s:%d'. Malformed rendezvous descriptor?",
escaped(reason), conn->base_.address, conn->base_.port);
break;
default:
log_warn(LD_REND,"http status %d (%s) response unexpected (server "
"'%s:%d').",
status_code, escaped(reason), conn->base_.address,
conn->base_.port);
break;
}
}
note_client_request(conn->base_.purpose, was_compressed, orig_len);
tor_free(body); tor_free(headers); tor_free(reason);
return 0;
}
/** Called when a directory connection reaches EOF. */
int
connection_dir_reached_eof(dir_connection_t *conn)
{
int retval;
if (conn->base_.state != DIR_CONN_STATE_CLIENT_READING) {
log_info(LD_HTTP,"conn reached eof, not reading. [state=%d] Closing.",
conn->base_.state);
connection_close_immediate(TO_CONN(conn)); /* error: give up on flushing */
connection_mark_for_close(TO_CONN(conn));
return -1;
}
retval = connection_dir_client_reached_eof(conn);
if (retval == 0) /* success */
conn->base_.state = DIR_CONN_STATE_CLIENT_FINISHED;
connection_mark_for_close(TO_CONN(conn));
return retval;
}
/** If any directory object is arriving, and it's over 10MB large, we're
* getting DoS'd. (As of 0.1.2.x, raw directories are about 1MB, and we never
* ask for more than 96 router descriptors at a time.)
*/
#define MAX_DIRECTORY_OBJECT_SIZE (10*(1<<20))
/** Read handler for directory connections. (That's connections <em>to</em>
* directory servers and connections <em>at</em> directory servers.)
*/
int
connection_dir_process_inbuf(dir_connection_t *conn)
{
tor_assert(conn);
tor_assert(conn->base_.type == CONN_TYPE_DIR);
/* Directory clients write, then read data until they receive EOF;
* directory servers read data until they get an HTTP command, then
* write their response (when it's finished flushing, they mark for
* close).
*/
/* If we're on the dirserver side, look for a command. */
if (conn->base_.state == DIR_CONN_STATE_SERVER_COMMAND_WAIT) {
if (directory_handle_command(conn) < 0) {
connection_mark_for_close(TO_CONN(conn));
return -1;
}
return 0;
}
if (connection_get_inbuf_len(TO_CONN(conn)) > MAX_DIRECTORY_OBJECT_SIZE) {
log_warn(LD_HTTP, "Too much data received from directory connection: "
"denial of service attempt, or you need to upgrade?");
connection_mark_for_close(TO_CONN(conn));
return -1;
}
if (!conn->base_.inbuf_reached_eof)
log_debug(LD_HTTP,"Got data, not eof. Leaving on inbuf.");
return 0;
}
/** Called when we're about to finally unlink and free a directory connection:
* perform necessary accounting and cleanup */
void
connection_dir_about_to_close(dir_connection_t *dir_conn)
{
connection_t *conn = TO_CONN(dir_conn);
if (conn->state < DIR_CONN_STATE_CLIENT_FINISHED) {
/* It's a directory connection and connecting or fetching
* failed: forget about this router, and maybe try again. */
connection_dir_request_failed(dir_conn);
}
/* If we were trying to fetch a v2 rend desc and did not succeed,
* retry as needed. (If a fetch is successful, the connection state
* is changed to DIR_PURPOSE_HAS_FETCHED_RENDDESC to mark that
* refetching is unnecessary.) */
if (conn->purpose == DIR_PURPOSE_FETCH_RENDDESC_V2 &&
dir_conn->rend_data &&
strlen(dir_conn->rend_data->onion_address) == REND_SERVICE_ID_LEN_BASE32)
rend_client_refetch_v2_renddesc(dir_conn->rend_data);
}
/** Create an http response for the client <b>conn</b> out of
* <b>status</b> and <b>reason_phrase</b>. Write it to <b>conn</b>.
*/
static void
write_http_status_line(dir_connection_t *conn, int status,
const char *reason_phrase)
{
char buf[256];
if (tor_snprintf(buf, sizeof(buf), "HTTP/1.0 %d %s\r\n\r\n",
status, reason_phrase ? reason_phrase : "OK") < 0) {
log_warn(LD_BUG,"status line too long.");
return;
}
connection_write_to_buf(buf, strlen(buf), TO_CONN(conn));
}
/** Write the header for an HTTP/1.0 response onto <b>conn</b>-\>outbuf,
* with <b>type</b> as the Content-Type.
*
* If <b>length</b> is nonnegative, it is the Content-Length.
* If <b>encoding</b> is provided, it is the Content-Encoding.
* If <b>cache_lifetime</b> is greater than 0, the content may be cached for
* up to cache_lifetime seconds. Otherwise, the content may not be cached. */
static void
write_http_response_header_impl(dir_connection_t *conn, ssize_t length,
const char *type, const char *encoding,
const char *extra_headers,
long cache_lifetime)
{
char date[RFC1123_TIME_LEN+1];
char tmp[1024];
char *cp;
time_t now = time(NULL);
tor_assert(conn);
format_rfc1123_time(date, now);
cp = tmp;
tor_snprintf(cp, sizeof(tmp),
"HTTP/1.0 200 OK\r\nDate: %s\r\n",
date);
cp += strlen(tmp);
if (type) {
tor_snprintf(cp, sizeof(tmp)-(cp-tmp), "Content-Type: %s\r\n", type);
cp += strlen(cp);
}
if (!is_local_addr(&conn->base_.addr)) {
/* Don't report the source address for a nearby/private connection.
* Otherwise we tend to mis-report in cases where incoming ports are
* being forwarded to a Tor server running behind the firewall. */
tor_snprintf(cp, sizeof(tmp)-(cp-tmp),
X_ADDRESS_HEADER "%s\r\n", conn->base_.address);
cp += strlen(cp);
}
if (encoding) {
tor_snprintf(cp, sizeof(tmp)-(cp-tmp),
"Content-Encoding: %s\r\n", encoding);
cp += strlen(cp);
}
if (length >= 0) {
tor_snprintf(cp, sizeof(tmp)-(cp-tmp),
"Content-Length: %ld\r\n", (long)length);
cp += strlen(cp);
}
if (cache_lifetime > 0) {
char expbuf[RFC1123_TIME_LEN+1];
format_rfc1123_time(expbuf, now + cache_lifetime);
/* We could say 'Cache-control: max-age=%d' here if we start doing
* http/1.1 */
tor_snprintf(cp, sizeof(tmp)-(cp-tmp),
"Expires: %s\r\n", expbuf);
cp += strlen(cp);
} else if (cache_lifetime == 0) {
/* We could say 'Cache-control: no-cache' here if we start doing
* http/1.1 */
strlcpy(cp, "Pragma: no-cache\r\n", sizeof(tmp)-(cp-tmp));
cp += strlen(cp);
}
if (extra_headers) {
strlcpy(cp, extra_headers, sizeof(tmp)-(cp-tmp));
cp += strlen(cp);
}
if (sizeof(tmp)-(cp-tmp) > 3)
memcpy(cp, "\r\n", 3);
else
tor_assert(0);
connection_write_to_buf(tmp, strlen(tmp), TO_CONN(conn));
}
/** As write_http_response_header_impl, but sets encoding and content-typed
* based on whether the response will be <b>compressed</b> or not. */
static void
write_http_response_header(dir_connection_t *conn, ssize_t length,
int compressed, long cache_lifetime)
{
write_http_response_header_impl(conn, length,
compressed?"application/octet-stream":"text/plain",
compressed?"deflate":"identity",
NULL,
cache_lifetime);
}
#if defined(INSTRUMENT_DOWNLOADS) || defined(RUNNING_DOXYGEN)
/* DOCDOC */
typedef struct request_t {
uint64_t bytes; /**< How many bytes have we transferred? */
uint64_t count; /**< How many requests have we made? */
} request_t;
/** Map used to keep track of how much data we've up/downloaded in what kind
* of request. Maps from request type to pointer to request_t. */
static strmap_t *request_map = NULL;
/** Record that a client request of <b>purpose</b> was made, and that
* <b>bytes</b> bytes of possibly <b>compressed</b> data were sent/received.
* Used to keep track of how much we've up/downloaded in what kind of
* request. */
static void
note_client_request(int purpose, int compressed, size_t bytes)
{
char *key;
const char *kind = NULL;
switch (purpose) {
case DIR_PURPOSE_FETCH_CONSENSUS: kind = "dl/consensus"; break;
case DIR_PURPOSE_FETCH_CERTIFICATE: kind = "dl/cert"; break;
case DIR_PURPOSE_FETCH_STATUS_VOTE: kind = "dl/vote"; break;
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES: kind = "dl/detached_sig";
break;
case DIR_PURPOSE_FETCH_SERVERDESC: kind = "dl/server"; break;
case DIR_PURPOSE_FETCH_EXTRAINFO: kind = "dl/extra"; break;
case DIR_PURPOSE_UPLOAD_DIR: kind = "dl/ul-dir"; break;
case DIR_PURPOSE_UPLOAD_VOTE: kind = "dl/ul-vote"; break;
case DIR_PURPOSE_UPLOAD_SIGNATURES: kind = "dl/ul-sig"; break;
case DIR_PURPOSE_FETCH_RENDDESC: kind = "dl/rend"; break;
case DIR_PURPOSE_FETCH_RENDDESC_V2: kind = "dl/rend2"; break;
case DIR_PURPOSE_UPLOAD_RENDDESC: kind = "dl/ul-rend"; break;
case DIR_PURPOSE_UPLOAD_RENDDESC_V2: kind = "dl/ul-rend2"; break;
}
if (kind) {
tor_asprintf(&key, "%s%s", kind, compressed?".z":"");
} else {
tor_asprintf(&key, "unknown purpose (%d)%s",
purpose, compressed?".z":"");
}
note_request(key, bytes);
tor_free(key);
}
/** Helper: initialize the request map to instrument downloads. */
static void
ensure_request_map_initialized(void)
{
if (!request_map)
request_map = strmap_new();
}
/** Called when we just transmitted or received <b>bytes</b> worth of data
* because of a request of type <b>key</b> (an arbitrary identifier): adds
* <b>bytes</b> to the total associated with key. */
void
note_request(const char *key, size_t bytes)
{
request_t *r;
ensure_request_map_initialized();
r = strmap_get(request_map, key);
if (!r) {
r = tor_malloc_zero(sizeof(request_t));
strmap_set(request_map, key, r);
}
r->bytes += bytes;
r->count++;
}
/** Return a newly allocated string holding a summary of bytes used per
* request type. */
char *
directory_dump_request_log(void)
{
smartlist_t *lines;
char *result;
strmap_iter_t *iter;
ensure_request_map_initialized();
lines = smartlist_new();
for (iter = strmap_iter_init(request_map);
!strmap_iter_done(iter);
iter = strmap_iter_next(request_map, iter)) {
const char *key;
void *val;
request_t *r;
strmap_iter_get(iter, &key, &val);
r = val;
smartlist_add_asprintf(lines, "%s "U64_FORMAT" "U64_FORMAT"\n",
key, U64_PRINTF_ARG(r->bytes), U64_PRINTF_ARG(r->count));
}
smartlist_sort_strings(lines);
result = smartlist_join_strings(lines, "", 0, NULL);
SMARTLIST_FOREACH(lines, char *, cp, tor_free(cp));
smartlist_free(lines);
return result;
}
#else
static void
note_client_request(int purpose, int compressed, size_t bytes)
{
(void)purpose;
(void)compressed;
(void)bytes;
}
void
note_request(const char *key, size_t bytes)
{
(void)key;
(void)bytes;
}
char *
directory_dump_request_log(void)
{
return tor_strdup("Not supported.");
}
#endif
/** Decide whether a client would accept the consensus we have.
*
* Clients can say they only want a consensus if it's signed by more
* than half the authorities in a list. They pass this list in
* the url as "...consensus/<b>fpr</b>+<b>fpr</b>+<b>fpr</b>".
*
* <b>fpr</b> may be an abbreviated fingerprint, i.e. only a left substring
* of the full authority identity digest. (Only strings of even length,
* i.e. encodings of full bytes, are handled correctly. In the case
* of an odd number of hex digits the last one is silently ignored.)
*
* Returns 1 if more than half of the requested authorities signed the
* consensus, 0 otherwise.
*/
int
client_likes_consensus(networkstatus_t *v, const char *want_url)
{
smartlist_t *want_authorities = smartlist_new();
int need_at_least;
int have = 0;
dir_split_resource_into_fingerprints(want_url, want_authorities, NULL, 0);
need_at_least = smartlist_len(want_authorities)/2+1;
SMARTLIST_FOREACH_BEGIN(want_authorities, const char *, d) {
char want_digest[DIGEST_LEN];
size_t want_len = strlen(d)/2;
if (want_len > DIGEST_LEN)
want_len = DIGEST_LEN;
if (base16_decode(want_digest, DIGEST_LEN, d, want_len*2) < 0) {
log_fn(LOG_PROTOCOL_WARN, LD_DIR,
"Failed to decode requested authority digest %s.", d);
continue;
};
SMARTLIST_FOREACH_BEGIN(v->voters, networkstatus_voter_info_t *, vi) {
if (smartlist_len(vi->sigs) &&
tor_memeq(vi->identity_digest, want_digest, want_len)) {
have++;
break;
};
} SMARTLIST_FOREACH_END(vi);
/* early exit, if we already have enough */
if (have >= need_at_least)
break;
} SMARTLIST_FOREACH_END(d);
SMARTLIST_FOREACH(want_authorities, char *, d, tor_free(d));
smartlist_free(want_authorities);
return (have >= need_at_least);
}
/** Helper function: called when a dirserver gets a complete HTTP GET
* request. Look for a request for a directory or for a rendezvous
* service descriptor. On finding one, write a response into
* conn-\>outbuf. If the request is unrecognized, send a 400.
* Always return 0. */
static int
directory_handle_command_get(dir_connection_t *conn, const char *headers,
const char *req_body, size_t req_body_len)
{
size_t dlen;
char *url, *url_mem, *header;
const or_options_t *options = get_options();
time_t if_modified_since = 0;
int compressed;
size_t url_len;
/* We ignore the body of a GET request. */
(void)req_body;
(void)req_body_len;
log_debug(LD_DIRSERV,"Received GET command.");
conn->base_.state = DIR_CONN_STATE_SERVER_WRITING;
if (parse_http_url(headers, &url) < 0) {
write_http_status_line(conn, 400, "Bad request");
return 0;
}
if ((header = http_get_header(headers, "If-Modified-Since: "))) {
struct tm tm;
if (parse_http_time(header, &tm) == 0) {
if (tor_timegm(&tm, &if_modified_since)<0)
if_modified_since = 0;
}
/* The correct behavior on a malformed If-Modified-Since header is to
* act as if no If-Modified-Since header had been given. */
tor_free(header);
}
log_debug(LD_DIRSERV,"rewritten url as '%s'.", url);
url_mem = url;
url_len = strlen(url);
compressed = url_len > 2 && !strcmp(url+url_len-2, ".z");
if (compressed) {
url[url_len-2] = '\0';
url_len -= 2;
}
if (!strcmp(url,"/tor/")) {
const char *frontpage = get_dirportfrontpage();
if (frontpage) {
dlen = strlen(frontpage);
/* Let's return a disclaimer page (users shouldn't use V1 anymore,
and caches don't fetch '/', so this is safe). */
/* [We don't check for write_bucket_low here, since we want to serve
* this page no matter what.] */
note_request(url, dlen);
write_http_response_header_impl(conn, dlen, "text/html", "identity",
NULL, DIRPORTFRONTPAGE_CACHE_LIFETIME);
connection_write_to_buf(frontpage, dlen, TO_CONN(conn));
goto done;
}
/* if no disclaimer file, fall through and continue */
}
if (!strcmp(url,"/tor/") || !strcmp(url,"/tor/dir")) { /* v1 dir fetch */
cached_dir_t *d = dirserv_get_directory();
if (!d) {
log_info(LD_DIRSERV,"Client asked for the mirrored directory, but we "
"don't have a good one yet. Sending 503 Dir not available.");
write_http_status_line(conn, 503, "Directory unavailable");
goto done;
}
if (d->published < if_modified_since) {
write_http_status_line(conn, 304, "Not modified");
goto done;
}
dlen = compressed ? d->dir_z_len : d->dir_len;
if (global_write_bucket_low(TO_CONN(conn), dlen, 1)) {
log_debug(LD_DIRSERV,
"Client asked for the mirrored directory, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
write_http_status_line(conn, 503, "Directory busy, try again later");
goto done;
}
note_request(url, dlen);
log_debug(LD_DIRSERV,"Dumping %sdirectory to client.",
compressed?"compressed ":"");
write_http_response_header(conn, dlen, compressed,
FULL_DIR_CACHE_LIFETIME);
conn->cached_dir = d;
conn->cached_dir_offset = 0;
if (!compressed)
conn->zlib_state = tor_zlib_new(0, ZLIB_METHOD);
++d->refcnt;
/* Prime the connection with some data. */
conn->dir_spool_src = DIR_SPOOL_CACHED_DIR;
connection_dirserv_flushed_some(conn);
goto done;
}
if (!strcmp(url,"/tor/running-routers")) { /* running-routers fetch */
cached_dir_t *d = dirserv_get_runningrouters();
if (!d) {
write_http_status_line(conn, 503, "Directory unavailable");
goto done;
}
if (d->published < if_modified_since) {
write_http_status_line(conn, 304, "Not modified");
goto done;
}
dlen = compressed ? d->dir_z_len : d->dir_len;
if (global_write_bucket_low(TO_CONN(conn), dlen, 1)) {
log_info(LD_DIRSERV,
"Client asked for running-routers, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
write_http_status_line(conn, 503, "Directory busy, try again later");
goto done;
}
note_request(url, dlen);
write_http_response_header(conn, dlen, compressed,
RUNNINGROUTERS_CACHE_LIFETIME);
connection_write_to_buf(compressed ? d->dir_z : d->dir, dlen,
TO_CONN(conn));
goto done;
}
if (!strcmpstart(url, "/tor/status-vote/current/consensus")) {
/* v3 network status fetch. */
smartlist_t *dir_fps = smartlist_new();
const char *request_type = NULL;
long lifetime = NETWORKSTATUS_CACHE_LIFETIME;
if (1) {
networkstatus_t *v;
time_t now = time(NULL);
const char *want_fps = NULL;
char *flavor = NULL;
int flav = FLAV_NS;
#define CONSENSUS_URL_PREFIX "/tor/status-vote/current/consensus/"
#define CONSENSUS_FLAVORED_PREFIX "/tor/status-vote/current/consensus-"
/* figure out the flavor if any, and who we wanted to sign the thing */
if (!strcmpstart(url, CONSENSUS_FLAVORED_PREFIX)) {
const char *f, *cp;
f = url + strlen(CONSENSUS_FLAVORED_PREFIX);
cp = strchr(f, '/');
if (cp) {
want_fps = cp+1;
flavor = tor_strndup(f, cp-f);
} else {
flavor = tor_strdup(f);
}
flav = networkstatus_parse_flavor_name(flavor);
if (flav < 0)
flav = FLAV_NS;
} else {
if (!strcmpstart(url, CONSENSUS_URL_PREFIX))
want_fps = url+strlen(CONSENSUS_URL_PREFIX);
}
v = networkstatus_get_latest_consensus_by_flavor(flav);
if (v && want_fps &&
!client_likes_consensus(v, want_fps)) {
write_http_status_line(conn, 404, "Consensus not signed by sufficient "
"number of requested authorities");
smartlist_free(dir_fps);
geoip_note_ns_response(GEOIP_REJECT_NOT_ENOUGH_SIGS);
tor_free(flavor);
goto done;
}
{
char *fp = tor_malloc_zero(DIGEST_LEN);
if (flavor)
strlcpy(fp, flavor, DIGEST_LEN);
tor_free(flavor);
smartlist_add(dir_fps, fp);
}
request_type = compressed?"v3.z":"v3";
lifetime = (v && v->fresh_until > now) ? v->fresh_until - now : 0;
}
if (!smartlist_len(dir_fps)) { /* we failed to create/cache cp */
write_http_status_line(conn, 503, "Network status object unavailable");
smartlist_free(dir_fps);
geoip_note_ns_response(GEOIP_REJECT_UNAVAILABLE);
goto done;
}
if (!dirserv_remove_old_statuses(dir_fps, if_modified_since)) {
write_http_status_line(conn, 404, "Not found");
SMARTLIST_FOREACH(dir_fps, char *, cp, tor_free(cp));
smartlist_free(dir_fps);
geoip_note_ns_response(GEOIP_REJECT_NOT_FOUND);
goto done;
} else if (!smartlist_len(dir_fps)) {
write_http_status_line(conn, 304, "Not modified");
SMARTLIST_FOREACH(dir_fps, char *, cp, tor_free(cp));
smartlist_free(dir_fps);
geoip_note_ns_response(GEOIP_REJECT_NOT_MODIFIED);
goto done;
}
dlen = dirserv_estimate_data_size(dir_fps, 0, compressed);
if (global_write_bucket_low(TO_CONN(conn), dlen, 2)) {
log_debug(LD_DIRSERV,
"Client asked for network status lists, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
write_http_status_line(conn, 503, "Directory busy, try again later");
SMARTLIST_FOREACH(dir_fps, char *, fp, tor_free(fp));
smartlist_free(dir_fps);
geoip_note_ns_response(GEOIP_REJECT_BUSY);
goto done;
}
if (1) {
struct in_addr in;
tor_addr_t addr;
if (tor_inet_aton((TO_CONN(conn))->address, &in)) {
tor_addr_from_ipv4h(&addr, ntohl(in.s_addr));
geoip_note_client_seen(GEOIP_CLIENT_NETWORKSTATUS,
&addr, NULL,
time(NULL));
geoip_note_ns_response(GEOIP_SUCCESS);
/* Note that a request for a network status has started, so that we
* can measure the download time later on. */
if (conn->dirreq_id)
geoip_start_dirreq(conn->dirreq_id, dlen, DIRREQ_TUNNELED);
else
geoip_start_dirreq(TO_CONN(conn)->global_identifier, dlen,
DIRREQ_DIRECT);
}
}
// note_request(request_type,dlen);
(void) request_type;
write_http_response_header(conn, -1, compressed,
smartlist_len(dir_fps) == 1 ? lifetime : 0);
conn->fingerprint_stack = dir_fps;
if (! compressed)
conn->zlib_state = tor_zlib_new(0, ZLIB_METHOD);
/* Prime the connection with some data. */
conn->dir_spool_src = DIR_SPOOL_NETWORKSTATUS;
connection_dirserv_flushed_some(conn);
goto done;
}
if (!strcmpstart(url,"/tor/status-vote/current/") ||
!strcmpstart(url,"/tor/status-vote/next/")) {
/* XXXX If-modified-since is only implemented for the current
* consensus: that's probably fine, since it's the only vote document
* people fetch much. */
int current;
ssize_t body_len = 0;
ssize_t estimated_len = 0;
smartlist_t *items = smartlist_new();
smartlist_t *dir_items = smartlist_new();
int lifetime = 60; /* XXXX023 should actually use vote intervals. */
url += strlen("/tor/status-vote/");
current = !strcmpstart(url, "current/");
url = strchr(url, '/');
tor_assert(url);
++url;
if (!strcmp(url, "consensus")) {
const char *item;
tor_assert(!current); /* we handle current consensus specially above,
* since it wants to be spooled. */
if ((item = dirvote_get_pending_consensus(FLAV_NS)))
smartlist_add(items, (char*)item);
} else if (!current && !strcmp(url, "consensus-signatures")) {
/* XXXX the spec says that we should implement
* current/consensus-signatures too. It doesn't seem to be needed,
* though. */
const char *item;
if ((item=dirvote_get_pending_detached_signatures()))
smartlist_add(items, (char*)item);
} else if (!strcmp(url, "authority")) {
const cached_dir_t *d;
int flags = DGV_BY_ID |
(current ? DGV_INCLUDE_PREVIOUS : DGV_INCLUDE_PENDING);
if ((d=dirvote_get_vote(NULL, flags)))
smartlist_add(dir_items, (cached_dir_t*)d);
} else {
const cached_dir_t *d;
smartlist_t *fps = smartlist_new();
int flags;
if (!strcmpstart(url, "d/")) {
url += 2;
flags = DGV_INCLUDE_PENDING | DGV_INCLUDE_PREVIOUS;
} else {
flags = DGV_BY_ID |
(current ? DGV_INCLUDE_PREVIOUS : DGV_INCLUDE_PENDING);
}
dir_split_resource_into_fingerprints(url, fps, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(fps, char *, fp, {
if ((d = dirvote_get_vote(fp, flags)))
smartlist_add(dir_items, (cached_dir_t*)d);
tor_free(fp);
});
smartlist_free(fps);
}
if (!smartlist_len(dir_items) && !smartlist_len(items)) {
write_http_status_line(conn, 404, "Not found");
goto vote_done;
}
SMARTLIST_FOREACH(dir_items, cached_dir_t *, d,
body_len += compressed ? d->dir_z_len : d->dir_len);
estimated_len += body_len;
SMARTLIST_FOREACH(items, const char *, item, {
size_t ln = strlen(item);
if (compressed) {
estimated_len += ln/2;
} else {
body_len += ln; estimated_len += ln;
}
});
if (global_write_bucket_low(TO_CONN(conn), estimated_len, 2)) {
write_http_status_line(conn, 503, "Directory busy, try again later.");
goto vote_done;
}
write_http_response_header(conn, body_len ? body_len : -1, compressed,
lifetime);
if (smartlist_len(items)) {
if (compressed) {
conn->zlib_state = tor_zlib_new(1, ZLIB_METHOD);
SMARTLIST_FOREACH(items, const char *, c,
connection_write_to_buf_zlib(c, strlen(c), conn, 0));
connection_write_to_buf_zlib("", 0, conn, 1);
} else {
SMARTLIST_FOREACH(items, const char *, c,
connection_write_to_buf(c, strlen(c), TO_CONN(conn)));
}
} else {
SMARTLIST_FOREACH(dir_items, cached_dir_t *, d,
connection_write_to_buf(compressed ? d->dir_z : d->dir,
compressed ? d->dir_z_len : d->dir_len,
TO_CONN(conn)));
}
vote_done:
smartlist_free(items);
smartlist_free(dir_items);
goto done;
}
if (!strcmpstart(url, "/tor/micro/d/")) {
smartlist_t *fps = smartlist_new();
dir_split_resource_into_fingerprints(url+strlen("/tor/micro/d/"),
fps, NULL,
DSR_DIGEST256|DSR_BASE64|DSR_SORT_UNIQ);
if (!dirserv_have_any_microdesc(fps)) {
write_http_status_line(conn, 404, "Not found");
SMARTLIST_FOREACH(fps, char *, fp, tor_free(fp));
smartlist_free(fps);
goto done;
}
dlen = dirserv_estimate_microdesc_size(fps, compressed);
if (global_write_bucket_low(TO_CONN(conn), dlen, 2)) {
log_info(LD_DIRSERV,
"Client asked for server descriptors, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
write_http_status_line(conn, 503, "Directory busy, try again later");
SMARTLIST_FOREACH(fps, char *, fp, tor_free(fp));
smartlist_free(fps);
goto done;
}
write_http_response_header(conn, -1, compressed, MICRODESC_CACHE_LIFETIME);
conn->dir_spool_src = DIR_SPOOL_MICRODESC;
conn->fingerprint_stack = fps;
if (compressed)
conn->zlib_state = tor_zlib_new(1, ZLIB_METHOD);
connection_dirserv_flushed_some(conn);
goto done;
}
if (!strcmpstart(url,"/tor/server/") ||
(!options->BridgeAuthoritativeDir &&
!options->BridgeRelay && !strcmpstart(url,"/tor/extra/"))) {
int res;
const char *msg;
const char *request_type = NULL;
int cache_lifetime = 0;
int is_extra = !strcmpstart(url,"/tor/extra/");
url += is_extra ? strlen("/tor/extra/") : strlen("/tor/server/");
conn->fingerprint_stack = smartlist_new();
res = dirserv_get_routerdesc_fingerprints(conn->fingerprint_stack, url,
&msg,
!connection_dir_is_encrypted(conn),
is_extra);
if (!strcmpstart(url, "fp/")) {
request_type = compressed?"/tor/server/fp.z":"/tor/server/fp";
if (smartlist_len(conn->fingerprint_stack) == 1)
cache_lifetime = ROUTERDESC_CACHE_LIFETIME;
} else if (!strcmpstart(url, "authority")) {
request_type = compressed?"/tor/server/authority.z":
"/tor/server/authority";
cache_lifetime = ROUTERDESC_CACHE_LIFETIME;
} else if (!strcmpstart(url, "all")) {
request_type = compressed?"/tor/server/all.z":"/tor/server/all";
cache_lifetime = FULL_DIR_CACHE_LIFETIME;
} else if (!strcmpstart(url, "d/")) {
request_type = compressed?"/tor/server/d.z":"/tor/server/d";
if (smartlist_len(conn->fingerprint_stack) == 1)
cache_lifetime = ROUTERDESC_BY_DIGEST_CACHE_LIFETIME;
} else {
request_type = "/tor/server/?";
}
(void) request_type; /* usable for note_request. */
if (!strcmpstart(url, "d/"))
conn->dir_spool_src =
is_extra ? DIR_SPOOL_EXTRA_BY_DIGEST : DIR_SPOOL_SERVER_BY_DIGEST;
else
conn->dir_spool_src =
is_extra ? DIR_SPOOL_EXTRA_BY_FP : DIR_SPOOL_SERVER_BY_FP;
if (!dirserv_have_any_serverdesc(conn->fingerprint_stack,
conn->dir_spool_src)) {
res = -1;
msg = "Not found";
}
if (res < 0)
write_http_status_line(conn, 404, msg);
else {
dlen = dirserv_estimate_data_size(conn->fingerprint_stack,
1, compressed);
if (global_write_bucket_low(TO_CONN(conn), dlen, 2)) {
log_info(LD_DIRSERV,
"Client asked for server descriptors, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
write_http_status_line(conn, 503, "Directory busy, try again later");
conn->dir_spool_src = DIR_SPOOL_NONE;
goto done;
}
write_http_response_header(conn, -1, compressed, cache_lifetime);
if (compressed)
conn->zlib_state = tor_zlib_new(1, ZLIB_METHOD);
/* Prime the connection with some data. */
connection_dirserv_flushed_some(conn);
}
goto done;
}
if (!strcmpstart(url,"/tor/keys/")) {
smartlist_t *certs = smartlist_new();
ssize_t len = -1;
if (!strcmp(url, "/tor/keys/all")) {
authority_cert_get_all(certs);
} else if (!strcmp(url, "/tor/keys/authority")) {
authority_cert_t *cert = get_my_v3_authority_cert();
if (cert)
smartlist_add(certs, cert);
} else if (!strcmpstart(url, "/tor/keys/fp/")) {
smartlist_t *fps = smartlist_new();
dir_split_resource_into_fingerprints(url+strlen("/tor/keys/fp/"),
fps, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(fps, char *, d, {
authority_cert_t *c = authority_cert_get_newest_by_id(d);
if (c) smartlist_add(certs, c);
tor_free(d);
});
smartlist_free(fps);
} else if (!strcmpstart(url, "/tor/keys/sk/")) {
smartlist_t *fps = smartlist_new();
dir_split_resource_into_fingerprints(url+strlen("/tor/keys/sk/"),
fps, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(fps, char *, d, {
authority_cert_t *c = authority_cert_get_by_sk_digest(d);
if (c) smartlist_add(certs, c);
tor_free(d);
});
smartlist_free(fps);
} else if (!strcmpstart(url, "/tor/keys/fp-sk/")) {
smartlist_t *fp_sks = smartlist_new();
dir_split_resource_into_fingerprint_pairs(url+strlen("/tor/keys/fp-sk/"),
fp_sks);
SMARTLIST_FOREACH(fp_sks, fp_pair_t *, pair, {
authority_cert_t *c = authority_cert_get_by_digests(pair->first,
pair->second);
if (c) smartlist_add(certs, c);
tor_free(pair);
});
smartlist_free(fp_sks);
} else {
write_http_status_line(conn, 400, "Bad request");
goto keys_done;
}
if (!smartlist_len(certs)) {
write_http_status_line(conn, 404, "Not found");
goto keys_done;
}
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
if (c->cache_info.published_on < if_modified_since)
SMARTLIST_DEL_CURRENT(certs, c));
if (!smartlist_len(certs)) {
write_http_status_line(conn, 304, "Not modified");
goto keys_done;
}
len = 0;
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
len += c->cache_info.signed_descriptor_len);
if (global_write_bucket_low(TO_CONN(conn), compressed?len/2:len, 2)) {
write_http_status_line(conn, 503, "Directory busy, try again later.");
goto keys_done;
}
write_http_response_header(conn, compressed?-1:len, compressed, 60*60);
if (compressed) {
conn->zlib_state = tor_zlib_new(1, ZLIB_METHOD);
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
connection_write_to_buf_zlib(c->cache_info.signed_descriptor_body,
c->cache_info.signed_descriptor_len,
conn, 0));
connection_write_to_buf_zlib("", 0, conn, 1);
} else {
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
connection_write_to_buf(c->cache_info.signed_descriptor_body,
c->cache_info.signed_descriptor_len,
TO_CONN(conn)));
}
keys_done:
smartlist_free(certs);
goto done;
}
if (options->HidServDirectoryV2 &&
connection_dir_is_encrypted(conn) &&
!strcmpstart(url,"/tor/rendezvous2/")) {
/* Handle v2 rendezvous descriptor fetch request. */
const char *descp;
const char *query = url + strlen("/tor/rendezvous2/");
if (strlen(query) == REND_DESC_ID_V2_LEN_BASE32) {
log_info(LD_REND, "Got a v2 rendezvous descriptor request for ID '%s'",
safe_str(query));
switch (rend_cache_lookup_v2_desc_as_dir(query, &descp)) {
case 1: /* valid */
write_http_response_header(conn, strlen(descp), 0, 0);
connection_write_to_buf(descp, strlen(descp), TO_CONN(conn));
break;
case 0: /* well-formed but not present */
write_http_status_line(conn, 404, "Not found");
break;
case -1: /* not well-formed */
write_http_status_line(conn, 400, "Bad request");
break;
}
} else { /* not well-formed */
write_http_status_line(conn, 400, "Bad request");
}
goto done;
}
if (options->HSAuthoritativeDir && !strcmpstart(url,"/tor/rendezvous/")) {
/* rendezvous descriptor fetch */
const char *descp;
size_t desc_len;
const char *query = url+strlen("/tor/rendezvous/");
log_info(LD_REND, "Handling rendezvous descriptor get");
switch (rend_cache_lookup_desc(query, 0, &descp, &desc_len)) {
case 1: /* valid */
write_http_response_header_impl(conn, desc_len,
"application/octet-stream",
NULL, NULL, 0);
note_request("/tor/rendezvous?/", desc_len);
/* need to send descp separately, because it may include NULs */
connection_write_to_buf(descp, desc_len, TO_CONN(conn));
break;
case 0: /* well-formed but not present */
write_http_status_line(conn, 404, "Not found");
break;
case -1: /* not well-formed */
write_http_status_line(conn, 400, "Bad request");
break;
}
goto done;
}
if (options->BridgeAuthoritativeDir &&
options->BridgePassword_AuthDigest_ &&
connection_dir_is_encrypted(conn) &&
!strcmp(url,"/tor/networkstatus-bridges")) {
char *status;
char digest[DIGEST256_LEN];
header = http_get_header(headers, "Authorization: Basic ");
if (header)
crypto_digest256(digest, header, strlen(header), DIGEST_SHA256);
/* now make sure the password is there and right */
if (!header ||
tor_memneq(digest,
options->BridgePassword_AuthDigest_, DIGEST256_LEN)) {
write_http_status_line(conn, 404, "Not found");
tor_free(header);
goto done;
}
tor_free(header);
/* all happy now. send an answer. */
status = networkstatus_getinfo_by_purpose("bridge", time(NULL));
dlen = strlen(status);
write_http_response_header(conn, dlen, 0, 0);
connection_write_to_buf(status, dlen, TO_CONN(conn));
tor_free(status);
goto done;
}
if (!strcmpstart(url,"/tor/bytes.txt")) {
char *bytes = directory_dump_request_log();
size_t len = strlen(bytes);
write_http_response_header(conn, len, 0, 0);
connection_write_to_buf(bytes, len, TO_CONN(conn));
tor_free(bytes);
goto done;
}
if (!strcmp(url,"/tor/robots.txt")) { /* /robots.txt will have been
rewritten to /tor/robots.txt */
char robots[] = "User-agent: *\r\nDisallow: /\r\n";
size_t len = strlen(robots);
write_http_response_header(conn, len, 0, ROBOTS_CACHE_LIFETIME);
connection_write_to_buf(robots, len, TO_CONN(conn));
goto done;
}
if (!strcmp(url,"/tor/dbg-stability.txt")) {
const char *stability;
size_t len;
if (options->BridgeAuthoritativeDir ||
! authdir_mode_tests_reachability(options) ||
! (stability = rep_hist_get_router_stability_doc(time(NULL)))) {
write_http_status_line(conn, 404, "Not found.");
goto done;
}
len = strlen(stability);
write_http_response_header(conn, len, 0, 0);
connection_write_to_buf(stability, len, TO_CONN(conn));
goto done;
}
#if defined(EXPORTMALLINFO) && defined(HAVE_MALLOC_H) && defined(HAVE_MALLINFO)
#define ADD_MALLINFO_LINE(x) do { \
smartlist_add_asprintf(lines, "%s %d\n", #x, mi.x); \
}while(0);
if (!strcmp(url,"/tor/mallinfo.txt") &&
(tor_addr_eq_ipv4h(&conn->base_.addr, 0x7f000001ul))) {
char *result;
size_t len;
struct mallinfo mi;
smartlist_t *lines;
memset(&mi, 0, sizeof(mi));
mi = mallinfo();
lines = smartlist_new();
ADD_MALLINFO_LINE(arena)
ADD_MALLINFO_LINE(ordblks)
ADD_MALLINFO_LINE(smblks)
ADD_MALLINFO_LINE(hblks)
ADD_MALLINFO_LINE(hblkhd)
ADD_MALLINFO_LINE(usmblks)
ADD_MALLINFO_LINE(fsmblks)
ADD_MALLINFO_LINE(uordblks)
ADD_MALLINFO_LINE(fordblks)
ADD_MALLINFO_LINE(keepcost)
result = smartlist_join_strings(lines, "", 0, NULL);
SMARTLIST_FOREACH(lines, char *, cp, tor_free(cp));
smartlist_free(lines);
len = strlen(result);
write_http_response_header(conn, len, 0, 0);
connection_write_to_buf(result, len, TO_CONN(conn));
tor_free(result);
goto done;
}
#endif
/* we didn't recognize the url */
write_http_status_line(conn, 404, "Not found");
done:
tor_free(url_mem);
return 0;
}
/** Helper function: called when a dirserver gets a complete HTTP POST
* request. Look for an uploaded server descriptor or rendezvous
* service descriptor. On finding one, process it and write a
* response into conn-\>outbuf. If the request is unrecognized, send a
* 400. Always return 0. */
static int
directory_handle_command_post(dir_connection_t *conn, const char *headers,
const char *body, size_t body_len)
{
char *url = NULL;
const or_options_t *options = get_options();
log_debug(LD_DIRSERV,"Received POST command.");
conn->base_.state = DIR_CONN_STATE_SERVER_WRITING;
if (parse_http_url(headers, &url) < 0) {
write_http_status_line(conn, 400, "Bad request");
return 0;
}
log_debug(LD_DIRSERV,"rewritten url as '%s'.", url);
/* Handle v2 rendezvous service publish request. */
if (options->HidServDirectoryV2 &&
connection_dir_is_encrypted(conn) &&
!strcmpstart(url,"/tor/rendezvous2/publish")) {
switch (rend_cache_store_v2_desc_as_dir(body)) {
case -2:
log_info(LD_REND, "Rejected v2 rend descriptor (length %d) from %s "
"since we're not currently a hidden service directory.",
(int)body_len, conn->base_.address);
write_http_status_line(conn, 503, "Currently not acting as v2 "
"hidden service directory");
break;
case -1:
log_warn(LD_REND, "Rejected v2 rend descriptor (length %d) from %s.",
(int)body_len, conn->base_.address);
write_http_status_line(conn, 400,
"Invalid v2 service descriptor rejected");
break;
default:
write_http_status_line(conn, 200, "Service descriptor (v2) stored");
log_info(LD_REND, "Handled v2 rendezvous descriptor post: accepted");
}
goto done;
}
if (!authdir_mode(options)) {
/* we just provide cached directories; we don't want to
* receive anything. */
write_http_status_line(conn, 400, "Nonauthoritative directory does not "
"accept posted server descriptors");
goto done;
}
if (authdir_mode_handles_descs(options, -1) &&
!strcmp(url,"/tor/")) { /* server descriptor post */
const char *msg = "[None]";
uint8_t purpose = authdir_mode_bridge(options) ?
ROUTER_PURPOSE_BRIDGE : ROUTER_PURPOSE_GENERAL;
was_router_added_t r = dirserv_add_multiple_descriptors(body, purpose,
conn->base_.address, &msg);
tor_assert(msg);
if (WRA_WAS_ADDED(r))
dirserv_get_directory(); /* rebuild and write to disk */
if (r == ROUTER_ADDED_NOTIFY_GENERATOR) {
/* Accepted with a message. */
log_info(LD_DIRSERV,
"Problematic router descriptor or extra-info from %s "
"(\"%s\").",
conn->base_.address, msg);
write_http_status_line(conn, 400, msg);
} else if (r == ROUTER_ADDED_SUCCESSFULLY) {
write_http_status_line(conn, 200, msg);
} else if (WRA_WAS_OUTDATED(r)) {
write_http_response_header_impl(conn, -1, NULL, NULL,
"X-Descriptor-Not-New: Yes\r\n", -1);
} else {
log_info(LD_DIRSERV,
"Rejected router descriptor or extra-info from %s "
"(\"%s\").",
conn->base_.address, msg);
write_http_status_line(conn, 400, msg);
}
goto done;
}
if (options->HSAuthoritativeDir &&
!strcmpstart(url,"/tor/rendezvous/publish")) {
/* rendezvous descriptor post */
log_info(LD_REND, "Handling rendezvous descriptor post.");
if (rend_cache_store(body, body_len, 1, NULL) < 0) {
log_fn(LOG_PROTOCOL_WARN, LD_DIRSERV,
"Rejected rend descriptor (length %d) from %s.",
(int)body_len, conn->base_.address);
write_http_status_line(conn, 400,
"Invalid v0 service descriptor rejected");
} else {
write_http_status_line(conn, 200, "Service descriptor (v0) stored");
}
goto done;
}
if (authdir_mode_v3(options) &&
!strcmp(url,"/tor/post/vote")) { /* v3 networkstatus vote */
const char *msg = "OK";
int status;
if (dirvote_add_vote(body, &msg, &status)) {
write_http_status_line(conn, status, "Vote stored");
} else {
tor_assert(msg);
log_warn(LD_DIRSERV, "Rejected vote from %s (\"%s\").",
conn->base_.address, msg);
write_http_status_line(conn, status, msg);
}
goto done;
}
if (authdir_mode_v3(options) &&
!strcmp(url,"/tor/post/consensus-signature")) { /* sigs on consensus. */
const char *msg = NULL;
if (dirvote_add_signatures(body, conn->base_.address, &msg)>=0) {
write_http_status_line(conn, 200, msg?msg:"Signatures stored");
} else {
log_warn(LD_DIR, "Unable to store signatures posted by %s: %s",
conn->base_.address, msg?msg:"???");
write_http_status_line(conn, 400, msg?msg:"Unable to store signatures");
}
goto done;
}
/* we didn't recognize the url */
write_http_status_line(conn, 404, "Not found");
done:
tor_free(url);
return 0;
}
/** Called when a dirserver receives data on a directory connection;
* looks for an HTTP request. If the request is complete, remove it
* from the inbuf, try to process it; otherwise, leave it on the
* buffer. Return a 0 on success, or -1 on error.
*/
static int
directory_handle_command(dir_connection_t *conn)
{
char *headers=NULL, *body=NULL;
size_t body_len=0;
int r;
tor_assert(conn);
tor_assert(conn->base_.type == CONN_TYPE_DIR);
switch (connection_fetch_from_buf_http(TO_CONN(conn),
&headers, MAX_HEADERS_SIZE,
&body, &body_len, MAX_DIR_UL_SIZE, 0)) {
case -1: /* overflow */
log_warn(LD_DIRSERV,
"Request too large from address '%s' to DirPort. Closing.",
safe_str(conn->base_.address));
return -1;
case 0:
log_debug(LD_DIRSERV,"command not all here yet.");
return 0;
/* case 1, fall through */
}
http_set_address_origin(headers, TO_CONN(conn));
//log_debug(LD_DIRSERV,"headers %s, body %s.", headers, body);
if (!strncasecmp(headers,"GET",3))
r = directory_handle_command_get(conn, headers, body, body_len);
else if (!strncasecmp(headers,"POST",4))
r = directory_handle_command_post(conn, headers, body, body_len);
else {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Got headers %s with unknown command. Closing.",
escaped(headers));
r = -1;
}
tor_free(headers); tor_free(body);
return r;
}
/** Write handler for directory connections; called when all data has
* been flushed. Close the connection or wait for a response as
* appropriate.
*/
int
connection_dir_finished_flushing(dir_connection_t *conn)
{
tor_assert(conn);
tor_assert(conn->base_.type == CONN_TYPE_DIR);
/* Note that we have finished writing the directory response. For direct
* connections this means we're done, for tunneled connections its only
* an intermediate step. */
if (conn->dirreq_id)
geoip_change_dirreq_state(conn->dirreq_id, DIRREQ_TUNNELED,
DIRREQ_FLUSHING_DIR_CONN_FINISHED);
else
geoip_change_dirreq_state(TO_CONN(conn)->global_identifier,
DIRREQ_DIRECT,
DIRREQ_FLUSHING_DIR_CONN_FINISHED);
switch (conn->base_.state) {
case DIR_CONN_STATE_CONNECTING:
case DIR_CONN_STATE_CLIENT_SENDING:
log_debug(LD_DIR,"client finished sending command.");
conn->base_.state = DIR_CONN_STATE_CLIENT_READING;
return 0;
case DIR_CONN_STATE_SERVER_WRITING:
if (conn->dir_spool_src != DIR_SPOOL_NONE) {
#ifdef USE_BUFFEREVENTS
/* This can happen with paired bufferevents, since a paired connection
* can flush immediately when you write to it, making the subsequent
* check in connection_handle_write_cb() decide that the connection
* is flushed. */
log_debug(LD_DIRSERV, "Emptied a dirserv buffer, but still spooling.");
#else
log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!");
connection_mark_for_close(TO_CONN(conn));
#endif
} else {
log_debug(LD_DIRSERV, "Finished writing server response. Closing.");
connection_mark_for_close(TO_CONN(conn));
}
return 0;
default:
log_warn(LD_BUG,"called in unexpected state %d.",
conn->base_.state);
tor_fragile_assert();
return -1;
}
return 0;
}
/** Connected handler for directory connections: begin sending data to the
* server */
int
connection_dir_finished_connecting(dir_connection_t *conn)
{
tor_assert(conn);
tor_assert(conn->base_.type == CONN_TYPE_DIR);
tor_assert(conn->base_.state == DIR_CONN_STATE_CONNECTING);
log_debug(LD_HTTP,"Dir connection to router %s:%u established.",
conn->base_.address,conn->base_.port);
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* start flushing conn */
return 0;
}
/** Decide which download schedule we want to use based on descriptor type
* in <b>dls</b> and whether we are acting as directory <b>server</b>, and
* then return a list of int pointers defining download delays in seconds.
* Helper function for download_status_increment_failure() and
* download_status_reset(). */
static const smartlist_t *
find_dl_schedule_and_len(download_status_t *dls, int server)
{
switch (dls->schedule) {
case DL_SCHED_GENERIC:
if (server)
return get_options()->TestingServerDownloadSchedule;
else
return get_options()->TestingClientDownloadSchedule;
case DL_SCHED_CONSENSUS:
if (server)
return get_options()->TestingServerConsensusDownloadSchedule;
else
return get_options()->TestingClientConsensusDownloadSchedule;
case DL_SCHED_BRIDGE:
return get_options()->TestingBridgeDownloadSchedule;
default:
tor_assert(0);
}
}
/** Called when an attempt to download <b>dls</b> has failed with HTTP status
* <b>status_code</b>. Increment the failure count (if the code indicates a
* real failure) and set <b>dls</b>-\>next_attempt_at to an appropriate time
* in the future. */
time_t
download_status_increment_failure(download_status_t *dls, int status_code,
const char *item, int server, time_t now)
{
const smartlist_t *schedule;
int increment;
tor_assert(dls);
if (status_code != 503 || server) {
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1)
++dls->n_download_failures;
}
schedule = find_dl_schedule_and_len(dls, server);
if (dls->n_download_failures < smartlist_len(schedule))
increment = *(int *)smartlist_get(schedule, dls->n_download_failures);
else if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD)
increment = INT_MAX;
else
increment = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1);
if (increment < INT_MAX)
dls->next_attempt_at = now+increment;
else
dls->next_attempt_at = TIME_MAX;
if (item) {
if (increment == 0)
log_debug(LD_DIR, "%s failed %d time(s); I'll try again immediately.",
item, (int)dls->n_download_failures);
else if (dls->next_attempt_at < TIME_MAX)
log_debug(LD_DIR, "%s failed %d time(s); I'll try again in %d seconds.",
item, (int)dls->n_download_failures,
(int)(dls->next_attempt_at-now));
else
log_debug(LD_DIR, "%s failed %d time(s); Giving up for a while.",
item, (int)dls->n_download_failures);
}
return dls->next_attempt_at;
}
/** Reset <b>dls</b> so that it will be considered downloadable
* immediately, and/or to show that we don't need it anymore.
*
* (We find the zeroth element of the download schedule, and set
* next_attempt_at to be the appropriate offset from 'now'. In most
* cases this means setting it to 'now', so the item will be immediately
* downloadable; in the case of bridge descriptors, the zeroth element
* is an hour from now.) */
void
download_status_reset(download_status_t *dls)
{
const smartlist_t *schedule = find_dl_schedule_and_len(
dls, get_options()->DirPort_set);
dls->n_download_failures = 0;
dls->next_attempt_at = time(NULL) + *(int *)smartlist_get(schedule, 0);
}
/** Return the number of failures on <b>dls</b> since the last success (if
* any). */
int
download_status_get_n_failures(const download_status_t *dls)
{
return dls->n_download_failures;
}
/** Called when one or more routerdesc (or extrainfo, if <b>was_extrainfo</b>)
* fetches have failed (with uppercase fingerprints listed in <b>failed</b>,
* either as descriptor digests or as identity digests based on
* <b>was_descriptor_digests</b>).
*/
static void
dir_routerdesc_download_failed(smartlist_t *failed, int status_code,
int router_purpose,
int was_extrainfo, int was_descriptor_digests)
{
char digest[DIGEST_LEN];
time_t now = time(NULL);
int server = directory_fetches_from_authorities(get_options());
if (!was_descriptor_digests) {
if (router_purpose == ROUTER_PURPOSE_BRIDGE) {
tor_assert(!was_extrainfo);
connection_dir_retry_bridges(failed);
}
return; /* FFFF should implement for other-than-router-purpose someday */
}
SMARTLIST_FOREACH_BEGIN(failed, const char *, cp) {
download_status_t *dls = NULL;
if (base16_decode(digest, DIGEST_LEN, cp, strlen(cp)) < 0) {
log_warn(LD_BUG, "Malformed fingerprint in list: %s", escaped(cp));
continue;
}
if (was_extrainfo) {
signed_descriptor_t *sd =
router_get_by_extrainfo_digest(digest);
if (sd)
dls = &sd->ei_dl_status;
} else {
dls = router_get_dl_status_by_descriptor_digest(digest);
}
if (!dls || dls->n_download_failures >=
get_options()->TestingDescriptorMaxDownloadTries)
continue;
download_status_increment_failure(dls, status_code, cp, server, now);
} SMARTLIST_FOREACH_END(cp);
/* No need to relaunch descriptor downloads here: we already do it
* every 10 or 60 seconds (FOO_DESCRIPTOR_RETRY_INTERVAL) in onion_main.c. */
}
/** Called when a connection to download microdescriptors has failed in whole
* or in part. <b>failed</b> is a list of every microdesc digest we didn't
* get. <b>status_code</b> is the http status code we received. Reschedule the
* microdesc downloads as appropriate. */
static void
dir_microdesc_download_failed(smartlist_t *failed,
int status_code)
{
networkstatus_t *consensus
= networkstatus_get_latest_consensus_by_flavor(FLAV_MICRODESC);
routerstatus_t *rs;
download_status_t *dls;
time_t now = time(NULL);
int server = directory_fetches_from_authorities(get_options());
if (! consensus)
return;
SMARTLIST_FOREACH_BEGIN(failed, const char *, d) {
rs = router_get_mutable_consensus_status_by_descriptor_digest(consensus,d);
if (!rs)
continue;
dls = &rs->dl_status;
if (dls->n_download_failures >=
get_options()->TestingMicrodescMaxDownloadTries)
continue;
{
char buf[BASE64_DIGEST256_LEN+1];
digest256_to_base64(buf, d);
download_status_increment_failure(dls, status_code, buf,
server, now);
}
} SMARTLIST_FOREACH_END(d);
}
/** Helper. Compare two fp_pair_t objects, and return negative, 0, or
* positive as appropriate. */
static int
compare_pairs_(const void **a, const void **b)
{
const fp_pair_t *fp1 = *a, *fp2 = *b;
int r;
if ((r = fast_memcmp(fp1->first, fp2->first, DIGEST_LEN)))
return r;
else
return fast_memcmp(fp1->second, fp2->second, DIGEST_LEN);
}
/** Divide a string <b>res</b> of the form FP1-FP2+FP3-FP4...[.z], where each
* FP is a hex-encoded fingerprint, into a sequence of distinct sorted
* fp_pair_t. Skip malformed pairs. On success, return 0 and add those
* fp_pair_t into <b>pairs_out</b>. On failure, return -1. */
int
dir_split_resource_into_fingerprint_pairs(const char *res,
smartlist_t *pairs_out)
{
smartlist_t *pairs_tmp = smartlist_new();
smartlist_t *pairs_result = smartlist_new();
smartlist_split_string(pairs_tmp, res, "+", 0, 0);
if (smartlist_len(pairs_tmp)) {
char *last = smartlist_get(pairs_tmp,smartlist_len(pairs_tmp)-1);
size_t last_len = strlen(last);
if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
last[last_len-2] = '\0';
}
}
SMARTLIST_FOREACH_BEGIN(pairs_tmp, char *, cp) {
if (strlen(cp) != HEX_DIGEST_LEN*2+1) {
log_info(LD_DIR,
"Skipping digest pair %s with non-standard length.", escaped(cp));
} else if (cp[HEX_DIGEST_LEN] != '-') {
log_info(LD_DIR,
"Skipping digest pair %s with missing dash.", escaped(cp));
} else {
fp_pair_t pair;
if (base16_decode(pair.first, DIGEST_LEN, cp, HEX_DIGEST_LEN)<0 ||
base16_decode(pair.second,
DIGEST_LEN, cp+HEX_DIGEST_LEN+1, HEX_DIGEST_LEN)<0) {
log_info(LD_DIR, "Skipping non-decodable digest pair %s", escaped(cp));
} else {
smartlist_add(pairs_result, tor_memdup(&pair, sizeof(pair)));
}
}
tor_free(cp);
} SMARTLIST_FOREACH_END(cp);
smartlist_free(pairs_tmp);
/* Uniq-and-sort */
smartlist_sort(pairs_result, compare_pairs_);
smartlist_uniq(pairs_result, compare_pairs_, tor_free_);
smartlist_add_all(pairs_out, pairs_result);
smartlist_free(pairs_result);
return 0;
}
/** Given a directory <b>resource</b> request, containing zero
* or more strings separated by plus signs, followed optionally by ".z", store
* the strings, in order, into <b>fp_out</b>. If <b>compressed_out</b> is
* non-NULL, set it to 1 if the resource ends in ".z", else set it to 0.
*
* If (flags & DSR_HEX), then delete all elements that aren't hex digests, and
* decode the rest. If (flags & DSR_BASE64), then use "-" rather than "+" as
* a separator, delete all the elements that aren't base64-encoded digests,
* and decode the rest. If (flags & DSR_DIGEST256), these digests should be
* 256 bits long; else they should be 160.
*
* If (flags & DSR_SORT_UNIQ), then sort the list and remove all duplicates.
*/
int
dir_split_resource_into_fingerprints(const char *resource,
smartlist_t *fp_out, int *compressed_out,
int flags)
{
const int decode_hex = flags & DSR_HEX;
const int decode_base64 = flags & DSR_BASE64;
const int digests_are_256 = flags & DSR_DIGEST256;
const int sort_uniq = flags & DSR_SORT_UNIQ;
const int digest_len = digests_are_256 ? DIGEST256_LEN : DIGEST_LEN;
const int hex_digest_len = digests_are_256 ?
HEX_DIGEST256_LEN : HEX_DIGEST_LEN;
const int base64_digest_len = digests_are_256 ?
BASE64_DIGEST256_LEN : BASE64_DIGEST_LEN;
smartlist_t *fp_tmp = smartlist_new();
tor_assert(!(decode_hex && decode_base64));
tor_assert(fp_out);
smartlist_split_string(fp_tmp, resource, decode_base64?"-":"+", 0, 0);
if (compressed_out)
*compressed_out = 0;
if (smartlist_len(fp_tmp)) {
char *last = smartlist_get(fp_tmp,smartlist_len(fp_tmp)-1);
size_t last_len = strlen(last);
if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
last[last_len-2] = '\0';
if (compressed_out)
*compressed_out = 1;
}
}
if (decode_hex || decode_base64) {
const size_t encoded_len = decode_hex ? hex_digest_len : base64_digest_len;
int i;
char *cp, *d = NULL;
for (i = 0; i < smartlist_len(fp_tmp); ++i) {
cp = smartlist_get(fp_tmp, i);
if (strlen(cp) != encoded_len) {
log_info(LD_DIR,
"Skipping digest %s with non-standard length.", escaped(cp));
smartlist_del_keeporder(fp_tmp, i--);
goto again;
}
d = tor_malloc_zero(digest_len);
if (decode_hex ?
(base16_decode(d, digest_len, cp, hex_digest_len)<0) :
(base64_decode(d, digest_len, cp, base64_digest_len)<0)) {
log_info(LD_DIR, "Skipping non-decodable digest %s", escaped(cp));
smartlist_del_keeporder(fp_tmp, i--);
goto again;
}
smartlist_set(fp_tmp, i, d);
d = NULL;
again:
tor_free(cp);
tor_free(d);
}
}
if (sort_uniq) {
if (decode_hex || decode_base64) {
if (digests_are_256) {
smartlist_sort_digests256(fp_tmp);
smartlist_uniq_digests256(fp_tmp);
} else {
smartlist_sort_digests(fp_tmp);
smartlist_uniq_digests(fp_tmp);
}
} else {
smartlist_sort_strings(fp_tmp);
smartlist_uniq_strings(fp_tmp);
}
}
smartlist_add_all(fp_out, fp_tmp);
smartlist_free(fp_tmp);
return 0;
}
| 38.598973 | 80 | 0.615227 | [
"object"
] |
9e5266ffdb88c8b73c84aff8e386c7d99f04127a | 5,725 | c | C | ext/src/class_entity_column_set.c | php-lsys/entity | c1867f9e95c71df7aa8ad0cba6ce4a7b813f45fc | [
"Apache-2.0"
] | null | null | null | ext/src/class_entity_column_set.c | php-lsys/entity | c1867f9e95c71df7aa8ad0cba6ce4a7b813f45fc | [
"Apache-2.0"
] | null | null | null | ext/src/class_entity_column_set.c | php-lsys/entity | c1867f9e95c71df7aa8ad0cba6ce4a7b813f45fc | [
"Apache-2.0"
] | 1 | 2021-09-03T14:33:00.000Z | 2021-09-03T14:33:00.000Z |
#include "zend_interfaces.h"
#include "zend_exceptions.h"
#include "zend.h"
#include "php.h"
#include "zend_API.h"
#include "entity.h"
#include "utils.h"
#include "class_entity_column_set.h"
#include "class_column.h"
#include "class_column_set.h"
#include "class_exception.h"
zend_class_entry *lsentity_entity_column_set_ce_ptr;
ZEND_BEGIN_ARG_INFO_EX(lsentity_entity_column_set_construct_arginfo, 0, 0, 0)
ZEND_ARG_ARRAY_INFO(0, column,1)
ZEND_ARG_ARRAY_INFO(0, patch_columns,1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(lsentity_entity_column_set_ascs_arginfo, 0, 0, 1)
ZEND_ARG_OBJ_INFO_ENTITYNS(0, table_columns, ColumnSet, 1)
ZEND_ARG_INFO(0, patch)
ZEND_END_ARG_INFO()
ZEND_METHOD(lsentity_entity_column_set_class, __construct){
zval *object,*column=NULL,*patch_columns=NULL;
ZEND_PARSE_PARAMETERS_START(0, 2)
Z_PARAM_OPTIONAL
Z_PARAM_ARRAY(column)
Z_PARAM_ARRAY(patch_columns)
ZEND_PARSE_PARAMETERS_END_EX(RETURN_FALSE);
object=getThis();
if(column){
zval *tmp;
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(column), tmp) {
convert_to_string(tmp);
} ZEND_HASH_FOREACH_END();
zend_update_property(Z_OBJCE_P(object),object,ZEND_STRL("_columns"),column);
}
if(patch_columns){
zval *tmp;
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(patch_columns), tmp) {
if(!lsentity_obj_check(lsentity_column_ce_ptr,tmp,1,0)){
RETURN_FALSE;
}
} ZEND_HASH_FOREACH_END();
zend_update_property(Z_OBJCE_P(object),object,ZEND_STRL("_patch_columns"),patch_columns);
}else{
zval _column;
array_init(&_column);
zend_update_property(Z_OBJCE_P(object),object,ZEND_STRL("_patch_columns"),&_column);
zval_ptr_dtor(&_column);
}
}
ZEND_METHOD(lsentity_entity_column_set_class, notCustom){
zval *mzval,*pzval;
mzval=zend_read_property(Z_OBJCE_P(getThis()),getThis(),ZEND_STRL("_columns"),0,NULL);
pzval=zend_read_property(Z_OBJCE_P(getThis()),getThis(),ZEND_STRL("_patch_columns"),0,NULL);
RETURN_BOOL(Z_TYPE_P(mzval)!=IS_ARRAY&&zend_array_count(Z_ARR_P(pzval))==0);
}
ZEND_METHOD(lsentity_entity_column_set_class, asColumnSet){
zend_bool patch=0;
zval *column;
ZEND_PARSE_PARAMETERS_START(1, 2)
Z_PARAM_OBJECT_OF_CLASS(column, lsentity_column_set_ce_ptr)
Z_PARAM_OPTIONAL
Z_PARAM_BOOL(patch)
ZEND_PARSE_PARAMETERS_END_EX(RETURN_FALSE);
zval *mzval,*pzval;
mzval=zend_read_property(Z_OBJCE_P(getThis()),getThis(),ZEND_STRL("_columns"),0,NULL);
if(Z_TYPE_P(mzval)!=IS_ARRAY){
zval tmp_columns;
ZVAL_OBJ(&tmp_columns,zend_objects_clone_obj(column));
SEPARATE_ARRAY(zend_read_property(Z_OBJCE(tmp_columns),&tmp_columns,ZEND_STRL("_columns"),1,NULL));
if(patch){
zval *tmpp;
pzval=zend_read_property(Z_OBJCE_P(getThis()),getThis(),ZEND_STRL("_patch_columns"),0,NULL);
if(Z_TYPE_P(pzval)==IS_ARRAY){
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(pzval), tmpp) {
zend_call_method_with_1_params(&tmp_columns,Z_OBJCE(tmp_columns), NULL, "add", NULL,tmpp);
} ZEND_HASH_FOREACH_END();
}
}
RETURN_ZVAL(&tmp_columns,1,1);
}
zval columntype;
array_init(&columntype);
zval *tmp;
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(mzval), tmp) {
if(lsentity_check_bool_with_1_params(column,"offsetexists",tmp)){
zval param1;
zend_call_method_with_1_params(column,Z_OBJCE_P(column), NULL, "offsetget", ¶m1,tmp);
Z_REFCOUNTED(param1)&&Z_ADDREF_P(¶m1);
zend_hash_add(Z_ARR(columntype),Z_STR_P(tmp),¶m1);
zval_ptr_dtor(¶m1);
}
} ZEND_HASH_FOREACH_END();
if(patch){
zval *tmp1;
pzval=zend_read_property(Z_OBJCE_P(getThis()),getThis(),ZEND_STRL("_patch_columns"),0,NULL);
if(Z_TYPE_P(pzval)==IS_ARRAY) {
ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(pzval), tmp1)
{
zval param1;
zend_call_method_with_0_params(tmp1, Z_OBJCE_P(tmp1), NULL, "name", ¶m1);
Z_REFCOUNTED_P(tmp1) && Z_ADDREF_P(tmp1);
zend_hash_add(Z_ARR(columntype), Z_STR(param1), tmp1);
zval_ptr_dtor(¶m1);
}ZEND_HASH_FOREACH_END();
}
}
zval param[]={
columntype
};
if(!lsentity_new_class(lsentity_column_set_ce_ptr,return_value,param,1)){
zval_ptr_dtor(&columntype);
RETURN_NULL();
}
zval_ptr_dtor(&columntype);
}
static zend_function_entry lsentity_entity_column_set_class_method[] = {
ZEND_ME(lsentity_entity_column_set_class,__construct, lsentity_entity_column_set_construct_arginfo, ZEND_ACC_PUBLIC)
ZEND_ME(lsentity_entity_column_set_class,notCustom, NULL, ZEND_ACC_PUBLIC)
ZEND_ME(lsentity_entity_column_set_class,asColumnSet, lsentity_entity_column_set_ascs_arginfo, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
void lsentity_entity_column_set_class_init(){
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce,LSENTITY_NS,"EntityColumnSet",lsentity_entity_column_set_class_method);
lsentity_entity_column_set_ce_ptr = zend_register_internal_class(&ce );
zend_declare_property_null(lsentity_entity_column_set_ce_ptr,ZEND_STRL("_columns"), ZEND_ACC_PROTECTED );
zend_declare_property_null(lsentity_entity_column_set_ce_ptr,ZEND_STRL("_patch_columns"), ZEND_ACC_PROTECTED );
}
| 36.006289 | 121 | 0.680699 | [
"object"
] |
9e532f859967b05850e372fd9eeff00b6a0dc0a0 | 6,976 | h | C | qmlui/virtualconsole/vcbutton.h | markusb/qlcplus | 1aae45b8d1914114b9a7ea6174e83e51e81ab8a1 | [
"Apache-2.0"
] | 1 | 2017-05-11T06:20:08.000Z | 2017-05-11T06:20:08.000Z | qmlui/virtualconsole/vcbutton.h | markusb/qlcplus | 1aae45b8d1914114b9a7ea6174e83e51e81ab8a1 | [
"Apache-2.0"
] | null | null | null | qmlui/virtualconsole/vcbutton.h | markusb/qlcplus | 1aae45b8d1914114b9a7ea6174e83e51e81ab8a1 | [
"Apache-2.0"
] | null | null | null | /*
Q Light Controller Plus
vcbutton.h
Copyright (c) Massimo Callegari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.txt
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef VCBUTTON_H
#define VCBUTTON_H
#include "vcwidget.h"
#define KXMLQLCVCButton "Button"
#define KXMLQLCVCButtonFunction "Function"
#define KXMLQLCVCButtonFunctionID "ID"
#define KXMLQLCVCButtonAction "Action"
#define KXMLQLCVCButtonActionFlash "Flash"
#define KXMLQLCVCButtonActionToggle "Toggle"
#define KXMLQLCVCButtonActionBlackout "Blackout"
#define KXMLQLCVCButtonActionStopAll "StopAll"
#define KXMLQLCVCButtonStopAllFadeTime "FadeOut"
#define KXMLQLCVCButtonIntensity "Intensity"
#define KXMLQLCVCButtonIntensityAdjust "Adjust"
class FunctionParent;
class VCButton : public VCWidget
{
Q_OBJECT
Q_PROPERTY(ButtonAction actionType READ actionType WRITE setActionType NOTIFY actionTypeChanged)
Q_PROPERTY(ButtonState state READ state WRITE setState NOTIFY stateChanged)
Q_PROPERTY(quint32 functionID READ functionID WRITE setFunctionID NOTIFY functionIDChanged)
Q_PROPERTY(bool startupIntensityEnabled READ startupIntensityEnabled WRITE setStartupIntensityEnabled NOTIFY startupIntensityEnabledChanged)
Q_PROPERTY(qreal startupIntensity READ startupIntensity WRITE setStartupIntensity NOTIFY startupIntensityChanged)
/*********************************************************************
* Initialization
*********************************************************************/
public:
VCButton(Doc* doc = NULL, QObject *parent = 0);
virtual ~VCButton();
/** @reimp */
QString defaultCaption();
/** @reimp */
void setupLookAndFeel(qreal pixelDensity, int page);
/** @reimp */
void render(QQuickView *view, QQuickItem *parent);
/** @reimp */
QString propertiesResource() const;
/** @reimp */
VCWidget *createCopy(VCWidget *parent);
protected:
/** @reimp */
bool copyFrom(const VCWidget* widget);
/*********************************************************************
* Function attachment
*********************************************************************/
public:
/**
* Attach a function to a VCButton. This function is started when the
* button is pressed down.
*
* @param function An ID of a function to attach
*/
Q_INVOKABLE void setFunctionID(quint32 fid);
/**
* Get the ID of the function attached to a VCButton
*
* @return The ID of the attached function or Function::invalidId()
* if there isn't one
*/
quint32 functionID() const;
/** @reimp */
void adjustFunctionIntensity(Function *f, qreal value);
/**
* The actual method used to request a change of state of this
* Button. Depending on the action type this will start/stop
* the attached Function, if any */
Q_INVOKABLE void requestStateChange(bool pressed);
/** @reimp */
void notifyFunctionStarting(VCWidget *widget, quint32 fid, qreal fIntensity);
signals:
void functionIDChanged(quint32 id);
protected slots:
/** Handler for function running signal */
void slotFunctionRunning(quint32 fid);
/** Handler for function stop signal */
void slotFunctionStopped(quint32 fid);
/** Basically the same as slotFunctionStopped() but for flash signal */
void slotFunctionFlashing(quint32 fid, bool state);
private:
FunctionParent functionParent() const;
protected:
/** The ID of the Function that this button is controlling */
quint32 m_functionID;
/*********************************************************************
* Button state
*********************************************************************/
public:
enum ButtonState
{
Inactive,
Monitoring,
Active
};
Q_ENUM(ButtonState)
/** Get/Set the button pressure state */
ButtonState state() const;
void setState(ButtonState state);
signals:
/** Signal emitted when the button has actually changed the graphic state */
void stateChanged(int state);
protected:
ButtonState m_state;
/*********************************************************************
* Button action
*********************************************************************/
public:
/**
* Toggle: Start/stop the assigned function.
* Flash: Keep the function running as long as the button is kept down.
* Blackout: Toggle blackout on/off.
* StopAll: Stop all functions (panic button).
*/
enum ButtonAction { Toggle, Flash, Blackout, StopAll };
Q_ENUM(ButtonAction)
ButtonAction actionType() const;
void setActionType(ButtonAction actionType);
static QString actionToString(ButtonAction action);
static ButtonAction stringToAction(const QString& str);
void setStopAllFadeOutTime(int ms);
int stopAllFadeTime();
signals:
void actionTypeChanged(ButtonAction actionType);
protected:
ButtonAction m_actionType;
/** if button action is StopAll, this indicates the time
* in milliseconds of fadeout before stopping */
int m_blackoutFadeOutTime;
/*****************************************************************************
* Function startup intensity adjustment
*****************************************************************************/
public:
/** Get/Set if a startup intensity amount should be applied
* when starting the attached Function */
bool startupIntensityEnabled() const;
void setStartupIntensityEnabled(bool enable);
/** Get/Set the amount of intensity adjustment applied
* when starting the attached Function */
qreal startupIntensity() const;
void setStartupIntensity(qreal fraction);
signals:
void startupIntensityEnabledChanged();
void startupIntensityChanged();
protected:
bool m_startupIntensityEnabled;
qreal m_startupIntensity;
/*********************************************************************
* External input
*********************************************************************/
public slots:
/** @reimp */
void slotInputValueChanged(quint8 id, uchar value);
/*********************************************************************
* Load & Save
*********************************************************************/
public:
/** @reimp */
bool loadXML(QXmlStreamReader &root);
/** @reimp */
bool saveXML(QXmlStreamWriter *doc);
};
#endif
| 30.596491 | 144 | 0.607081 | [
"render"
] |
9e57e77ec060566b07b83b6bb5a0a8940723d50d | 3,907 | h | C | editor/debugger/editor_performance_profiler.h | mehdigoom/ValjangEngine4.0.x | 30e9e5a6dfa92306b16aba39753d07addc57a93b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 2 | 2021-02-10T07:16:37.000Z | 2021-06-25T17:13:06.000Z | editor/debugger/editor_performance_profiler.h | mehdigoom/ValjangEngine4.0.x | 30e9e5a6dfa92306b16aba39753d07addc57a93b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | null | null | null | editor/debugger/editor_performance_profiler.h | mehdigoom/ValjangEngine4.0.x | 30e9e5a6dfa92306b16aba39753d07addc57a93b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | null | null | null | /*************************************************************************/
/* editor_performance_profiler.h */
/*************************************************************************/
/* This file is part of: */
/* Valjang Engine */
/* http://Valjang.fr */
/*************************************************************************/
/* Copyright (c) 2007-2020 Valjang. */
/* Copyright (c) 2014-2020 Valjang Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef EDITOR_PERFORMANCE_PROFILER_H
#define EDITOR_PERFORMANCE_PROFILER_H
#include "core/map.h"
#include "core/ordered_hash_map.h"
#include "main/performance.h"
#include "scene/gui/control.h"
#include "scene/gui/label.h"
#include "scene/gui/split_container.h"
#include "scene/gui/tree.h"
class EditorPerformanceProfiler : public HSplitContainer {
GDCLASS(EditorPerformanceProfiler, HSplitContainer);
private:
class Monitor {
public:
String name;
String base;
List<float> history;
float max = 0.0f;
TreeItem *item = nullptr;
Performance::MonitorType type = Performance::MONITOR_TYPE_QUANTITY;
int frame_index = 0;
Monitor();
Monitor(String p_name, String p_base, int p_frame_index, Performance::MonitorType p_type, TreeItem *p_item);
void update_value(float p_value);
void reset();
};
OrderedHashMap<StringName, Monitor> monitors;
Map<StringName, TreeItem *> base_map;
Tree *monitor_tree;
Control *monitor_draw;
Label *info_message;
StringName marker_key;
int marker_frame;
const int MARGIN = 4;
const int POINT_SEPARATION = 5;
const int MARKER_MARGIN = 2;
static String _create_label(float p_value, Performance::MonitorType p_type);
void _monitor_select();
void _monitor_draw();
void _build_monitor_tree();
TreeItem *_get_monitor_base(const StringName &p_base_name);
TreeItem *_create_monitor_item(const StringName &p_monitor_name, TreeItem *p_base);
void _marker_input(const Ref<InputEvent> &p_event);
public:
void reset();
void update_monitors(const Vector<StringName> &p_names);
void add_profile_frame(const Vector<float> &p_values);
List<float> *get_monitor_data(const StringName &p_name);
EditorPerformanceProfiler();
};
#endif // EDITOR_PERFORMANCE_PROFILER_H
| 42.934066 | 110 | 0.593294 | [
"vector"
] |
9e595e9a381e09ba25f6ff93ee7e4497061cae91 | 1,250 | c | C | kernel/main.c | cpuex2020-3/xv6-rv32im | 2d169a77943bff0930283ed7ceee81c00d6eeba9 | [
"MIT-0"
] | null | null | null | kernel/main.c | cpuex2020-3/xv6-rv32im | 2d169a77943bff0930283ed7ceee81c00d6eeba9 | [
"MIT-0"
] | null | null | null | kernel/main.c | cpuex2020-3/xv6-rv32im | 2d169a77943bff0930283ed7ceee81c00d6eeba9 | [
"MIT-0"
] | null | null | null | #include "types.h"
#include "param.h"
#include "memlayout.h"
#include "riscv.h"
#include "defs.h"
volatile static int started = 0;
// start() jumps here in supervisor mode on all CPUs.
void
main()
{
if(cpuid() == 0){
consoleinit();
printfinit();
printf("\n");
printf("xv6 kernel is booting\n");
printf("\n");
kinit(); // physical page allocator
kvminit(); // create kernel page table
kvminithart(); // turn on paging
procinit(); // process table
trapinit(); // trap vectors
trapinithart(); // install kernel trap vector
plicinit(); // set up interrupt controller
plicinithart(); // ask PLIC for device interrupts
binit(); // buffer cache
iinit(); // inode cache
fileinit(); // file table
virtio_disk_init(); // emulated hard disk
userinit(); // first user process
__sync_synchronize();
started = 1;
printf("starting xv6...\n");
} else {
while(started == 0);
__sync_synchronize();
printf("hart %d starting\n", cpuid());
kvminithart(); // turn on paging
trapinithart(); // install kernel trap vector
plicinithart(); // ask PLIC for device interrupts
}
scheduler();
}
| 27.173913 | 55 | 0.5936 | [
"vector"
] |
9e5a8fd6f5e3d2aa39c6fb68e249c58e5fc4a5e5 | 229,028 | h | C | xxhash.h | bket/xxHash | d8aff149b5b9b9461f4b01fc5ab4842112d2c867 | [
"BSD-2-Clause"
] | null | null | null | xxhash.h | bket/xxHash | d8aff149b5b9b9461f4b01fc5ab4842112d2c867 | [
"BSD-2-Clause"
] | null | null | null | xxhash.h | bket/xxHash | d8aff149b5b9b9461f4b01fc5ab4842112d2c867 | [
"BSD-2-Clause"
] | 1 | 2022-02-11T15:39:45.000Z | 2022-02-11T15:39:45.000Z | /*
* xxHash - Extremely Fast Hash algorithm
* Header File
* Copyright (C) 2012-2021 Yann Collet
*
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at:
* - xxHash homepage: https://www.xxhash.com
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
/*!
* @mainpage xxHash
*
* xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
* limits.
*
* It is proposed in four flavors, in three families:
* 1. @ref XXH32_family
* - Classic 32-bit hash function. Simple, compact, and runs on almost all
* 32-bit and 64-bit systems.
* 2. @ref XXH64_family
* - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
* 64-bit systems (but _not_ 32-bit systems).
* 3. @ref XXH3_family
* - Modern 64-bit and 128-bit hash function family which features improved
* strength and performance across the board, especially on smaller data.
* It benefits greatly from SIMD and 64-bit without requiring it.
*
* Benchmarks
* ---
* The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
* The open source benchmark program is compiled with clang v10.0 using -O3 flag.
*
* | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
* | -------------------- | ------- | ----: | ---------------: | ------------------: |
* | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
* | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
* | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
* | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
* | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
* | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
* | RAM sequential read | | N/A | 28.0 GB/s | N/A |
* | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
* | City64 | | 64 | 22.0 GB/s | 76.6 |
* | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
* | City128 | | 128 | 21.7 GB/s | 57.7 |
* | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
* | XXH64() | | 64 | 19.4 GB/s | 71.0 |
* | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
* | Mum | | 64 | 18.0 GB/s | 67.0 |
* | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
* | XXH32() | | 32 | 9.7 GB/s | 71.9 |
* | City32 | | 32 | 9.1 GB/s | 66.0 |
* | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
* | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
* | SipHash* | | 64 | 3.0 GB/s | 43.2 |
* | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
* | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
* | FNV64 | | 64 | 1.2 GB/s | 62.7 |
* | Blake2* | | 256 | 1.1 GB/s | 5.1 |
* | SHA1* | | 160 | 0.8 GB/s | 5.6 |
* | MD5* | | 128 | 0.6 GB/s | 7.8 |
* @note
* - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
* even though it is mandatory on x64.
* - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
* by modern standards.
* - Small data velocity is a rough average of algorithm's efficiency for small
* data. For more accurate information, see the wiki.
* - More benchmarks and strength tests are found on the wiki:
* https://github.com/Cyan4973/xxHash/wiki
*
* Usage
* ------
* All xxHash variants use a similar API. Changing the algorithm is a trivial
* substitution.
*
* @pre
* For functions which take an input and length parameter, the following
* requirements are assumed:
* - The range from [`input`, `input + length`) is valid, readable memory.
* - The only exception is if the `length` is `0`, `input` may be `NULL`.
* - For C++, the objects must have the *TriviallyCopyable* property, as the
* functions access bytes directly as if it was an array of `unsigned char`.
*
* @anchor single_shot_example
* **Single Shot**
*
* These functions are stateless functions which hash a contiguous block of memory,
* immediately returning the result. They are the easiest and usually the fastest
* option.
*
* XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
*
* @code{.c}
* #include <string.h>
* #include "xxhash.h"
*
* // Example for a function which hashes a null terminated string with XXH32().
* XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
* {
* // NULL pointers are only valid if the length is zero
* size_t length = (string == NULL) ? 0 : strlen(string);
* return XXH32(string, length, seed);
* }
* @endcode
*
* @anchor streaming_example
* **Streaming**
*
* These groups of functions allow incremental hashing of unknown size, even
* more than what would fit in a size_t.
*
* XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
*
* @code{.c}
* #include <stdio.h>
* #include <assert.h>
* #include "xxhash.h"
* // Example for a function which hashes a FILE incrementally with XXH3_64bits().
* XXH64_hash_t hashFile(FILE* f)
* {
* // Allocate a state struct. Do not just use malloc() or new.
* XXH3_state_t* state = XXH3_createState();
* assert(state != NULL && "Out of memory!");
* // Reset the state to start a new hashing session.
* XXH3_64bits_reset(state);
* char buffer[4096];
* size_t count;
* // Read the file in chunks
* while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
* // Run update() as many times as necessary to process the data
* XXH3_64bits_update(state, buffer, count);
* }
* // Retrieve the finalized hash. This will not change the state.
* XXH64_hash_t result = XXH3_64bits_digest(state);
* // Free the state. Do not use free().
* XXH3_freeState(state);
* return result;
* }
* @endcode
*
* @file xxhash.h
* xxHash prototypes and implementation
*/
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************
* INLINE mode
******************************/
/*!
* @defgroup public Public API
* Contains details on the public xxHash functions.
* @{
*/
#ifdef XXH_DOXYGEN
/*!
* @brief Exposes the implementation and marks all functions as `inline`.
*
* Use these build macros to inline xxhash into the target unit.
* Inlining improves performance on small inputs, especially when the length is
* expressed as a compile-time constant:
*
* https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
*
* It also keeps xxHash symbols private to the unit, so they are not exported.
*
* Usage:
* @code{.c}
* #define XXH_INLINE_ALL
* #include "xxhash.h"
* @endcode
* Do not compile and link xxhash.o as a separate object, as it is not useful.
*/
# define XXH_INLINE_ALL
# undef XXH_INLINE_ALL
/*!
* @brief Exposes the implementation without marking functions as inline.
*/
# define XXH_PRIVATE_API
# undef XXH_PRIVATE_API
/*!
* @brief Emulate a namespace by transparently prefixing all symbols.
*
* If you want to include _and expose_ xxHash functions from within your own
* library, but also want to avoid symbol collisions with other libraries which
* may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
* any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
* (therefore, avoid empty or numeric values).
*
* Note that no change is required within the calling program as long as it
* includes `xxhash.h`: Regular symbol names will be automatically translated
* by this header.
*/
# define XXH_NAMESPACE /* YOUR NAME HERE */
# undef XXH_NAMESPACE
#endif
#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
&& !defined(XXH_INLINE_ALL_31684351384)
/* this section should be traversed only once */
# define XXH_INLINE_ALL_31684351384
/* give access to the advanced API, required to compile implementations */
# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
# define XXH_STATIC_LINKING_ONLY
/* make all functions private */
# undef XXH_PUBLIC_API
# if defined(__GNUC__)
# define XXH_PUBLIC_API static __inline __attribute__((unused))
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define XXH_PUBLIC_API static inline
# elif defined(_MSC_VER)
# define XXH_PUBLIC_API static __inline
# else
/* note: this version may generate warnings for unused static functions */
# define XXH_PUBLIC_API static
# endif
/*
* This part deals with the special case where a unit wants to inline xxHash,
* but "xxhash.h" has previously been included without XXH_INLINE_ALL,
* such as part of some previously included *.h header file.
* Without further action, the new include would just be ignored,
* and functions would effectively _not_ be inlined (silent failure).
* The following macros solve this situation by prefixing all inlined names,
* avoiding naming collision with previous inclusions.
*/
/* Before that, we unconditionally #undef all symbols,
* in case they were already defined with XXH_NAMESPACE.
* They will then be redefined for XXH_INLINE_ALL
*/
# undef XXH_versionNumber
/* XXH32 */
# undef XXH32
# undef XXH32_createState
# undef XXH32_freeState
# undef XXH32_reset
# undef XXH32_update
# undef XXH32_digest
# undef XXH32_copyState
# undef XXH32_canonicalFromHash
# undef XXH32_hashFromCanonical
/* XXH64 */
# undef XXH64
# undef XXH64_createState
# undef XXH64_freeState
# undef XXH64_reset
# undef XXH64_update
# undef XXH64_digest
# undef XXH64_copyState
# undef XXH64_canonicalFromHash
# undef XXH64_hashFromCanonical
/* XXH3_64bits */
# undef XXH3_64bits
# undef XXH3_64bits_withSecret
# undef XXH3_64bits_withSeed
# undef XXH3_64bits_withSecretandSeed
# undef XXH3_createState
# undef XXH3_freeState
# undef XXH3_copyState
# undef XXH3_64bits_reset
# undef XXH3_64bits_reset_withSeed
# undef XXH3_64bits_reset_withSecret
# undef XXH3_64bits_update
# undef XXH3_64bits_digest
# undef XXH3_generateSecret
/* XXH3_128bits */
# undef XXH128
# undef XXH3_128bits
# undef XXH3_128bits_withSeed
# undef XXH3_128bits_withSecret
# undef XXH3_128bits_reset
# undef XXH3_128bits_reset_withSeed
# undef XXH3_128bits_reset_withSecret
# undef XXH3_128bits_reset_withSecretandSeed
# undef XXH3_128bits_update
# undef XXH3_128bits_digest
# undef XXH128_isEqual
# undef XXH128_cmp
# undef XXH128_canonicalFromHash
# undef XXH128_hashFromCanonical
/* Finally, free the namespace itself */
# undef XXH_NAMESPACE
/* employ the namespace for XXH_INLINE_ALL */
# define XXH_NAMESPACE XXH_INLINE_
/*
* Some identifiers (enums, type names) are not symbols,
* but they must nonetheless be renamed to avoid redeclaration.
* Alternative solution: do not redeclare them.
* However, this requires some #ifdefs, and has a more dispersed impact.
* Meanwhile, renaming can be achieved in a single place.
*/
# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
# define XXH_OK XXH_IPREF(XXH_OK)
# define XXH_ERROR XXH_IPREF(XXH_ERROR)
# define XXH_errorcode XXH_IPREF(XXH_errorcode)
# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
# define XXH32_state_s XXH_IPREF(XXH32_state_s)
# define XXH32_state_t XXH_IPREF(XXH32_state_t)
# define XXH64_state_s XXH_IPREF(XXH64_state_s)
# define XXH64_state_t XXH_IPREF(XXH64_state_t)
# define XXH3_state_s XXH_IPREF(XXH3_state_s)
# define XXH3_state_t XXH_IPREF(XXH3_state_t)
# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
/* Ensure the header is parsed again, even if it was previously included */
# undef XXHASH_H_5627135585666179
# undef XXHASH_H_STATIC_13879238742
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/* ****************************************************************
* Stable API
*****************************************************************/
#ifndef XXHASH_H_5627135585666179
#define XXHASH_H_5627135585666179 1
/*! @brief Marks a global symbol. */
#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
# ifdef XXH_EXPORT
# define XXH_PUBLIC_API __declspec(dllexport)
# elif XXH_IMPORT
# define XXH_PUBLIC_API __declspec(dllimport)
# endif
# else
# define XXH_PUBLIC_API /* do nothing */
# endif
#endif
#ifdef XXH_NAMESPACE
# define XXH_CAT(A,B) A##B
# define XXH_NAME2(A,B) XXH_CAT(A,B)
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
/* XXH32 */
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
/* XXH64 */
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
/* XXH3_64bits */
# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
/* XXH3_128bits */
# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
#endif
/* *************************************
* Compiler specifics
***************************************/
/* specific declaration modes for Windows */
#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
# ifdef XXH_EXPORT
# define XXH_PUBLIC_API __declspec(dllexport)
# elif XXH_IMPORT
# define XXH_PUBLIC_API __declspec(dllimport)
# endif
# else
# define XXH_PUBLIC_API /* do nothing */
# endif
#endif
#if defined (__GNUC__)
# define XXH_CONSTF __attribute__((const))
# define XXH_PUREF __attribute__((pure))
# define XXH_MALLOCF __attribute__((malloc))
#else
# define XXH_CONSTF /* disable */
# define XXH_PUREF
# define XXH_MALLOCF
#endif
/* *************************************
* Version
***************************************/
#define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 8
#define XXH_VERSION_RELEASE 1
/*! @brief Version number, encoded as two digits each */
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
/*!
* @brief Obtains the xxHash version.
*
* This is mostly useful when xxHash is compiled as a shared library,
* since the returned value comes from the library, as opposed to header file.
*
* @return @ref XXH_VERSION_NUMBER of the invoked library.
*/
XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
/* ****************************
* Common basic types
******************************/
#include <stddef.h> /* size_t */
/*!
* @brief Exit code for the streaming API.
*/
typedef enum {
XXH_OK = 0, /*!< OK */
XXH_ERROR /*!< Error */
} XXH_errorcode;
/*-**********************************************************************
* 32-bit hash
************************************************************************/
#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
/*!
* @brief An unsigned 32-bit integer.
*
* Not necessarily defined to `uint32_t` but functionally equivalent.
*/
typedef uint32_t XXH32_hash_t;
#elif !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint32_t XXH32_hash_t;
#else
# include <limits.h>
# if UINT_MAX == 0xFFFFFFFFUL
typedef unsigned int XXH32_hash_t;
# elif ULONG_MAX == 0xFFFFFFFFUL
typedef unsigned long XXH32_hash_t;
# else
# error "unsupported platform: need a 32-bit type"
# endif
#endif
/*!
* @}
*
* @defgroup XXH32_family XXH32 family
* @ingroup public
* Contains functions used in the classic 32-bit xxHash algorithm.
*
* @note
* XXH32 is useful for older platforms, with no or poor 64-bit performance.
* Note that the @ref XXH3_family provides competitive speed for both 32-bit
* and 64-bit systems, and offers true 64/128 bit hash results.
*
* @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
* @see @ref XXH32_impl for implementation details
* @{
*/
/*!
* @brief Calculates the 32-bit hash of @p input using xxHash32.
*
* Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
*
* See @ref single_shot_example "Single Shot Example" for an example.
*
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 32-bit seed to alter the hash's output predictably.
*
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return The calculated 32-bit hash value.
*
* @see
* XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
* Direct equivalents for the other variants of xxHash.
* @see
* XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
#ifndef XXH_NO_STREAM
/*!
* Streaming functions generate the xxHash value from an incremental input.
* This method is slower than single-call functions, due to state management.
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
*
* An XXH state must first be allocated using `XXH*_createState()`.
*
* Start a new hash by initializing the state with a seed using `XXH*_reset()`.
*
* Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
*
* The function returns an error code, with 0 meaning OK, and any other value
* meaning there is an error.
*
* Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
* This function returns the nn-bits hash as an int or long long.
*
* It's still possible to continue inserting input into the hash state after a
* digest, and generate new hash values later on by invoking `XXH*_digest()`.
*
* When done, release the state using `XXH*_freeState()`.
*
* @see streaming_example at the top of @ref xxhash.h for an example.
*/
/*!
* @typedef struct XXH32_state_s XXH32_state_t
* @brief The opaque state struct for the XXH32 streaming API.
*
* @see XXH32_state_s for details.
*/
typedef struct XXH32_state_s XXH32_state_t;
/*!
* @brief Allocates an @ref XXH32_state_t.
*
* Must be freed with XXH32_freeState().
* @return An allocated XXH32_state_t on success, `NULL` on failure.
*/
XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
/*!
* @brief Frees an @ref XXH32_state_t.
*
* Must be allocated with XXH32_createState().
* @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
* @return XXH_OK.
*/
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
/*!
* @brief Copies one @ref XXH32_state_t to another.
*
* @param dst_state The state to copy to.
* @param src_state The state to copy from.
* @pre
* @p dst_state and @p src_state must not be `NULL` and must not overlap.
*/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
/*!
* @brief Resets an @ref XXH32_state_t to begin a new hash.
*
* This function resets and seeds a state. Call it before @ref XXH32_update().
*
* @param statePtr The state struct to reset.
* @param seed The 32-bit seed to alter the hash result predictably.
*
* @pre
* @p statePtr must not be `NULL`.
*
* @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
*/
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
/*!
* @brief Consumes a block of @p input to an @ref XXH32_state_t.
*
* Call this to incrementally consume blocks of data.
*
* @param statePtr The state struct to update.
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
*
* @pre
* @p statePtr must not be `NULL`.
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
*/
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
/*!
* @brief Returns the calculated hash value from an @ref XXH32_state_t.
*
* @note
* Calling XXH32_digest() will not affect @p statePtr, so you can update,
* digest, and update again.
*
* @param statePtr The state struct to calculate the hash from.
*
* @pre
* @p statePtr must not be `NULL`.
*
* @return The calculated xxHash32 value from that state.
*/
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
/*
* The default return values from XXH functions are unsigned 32 and 64 bit
* integers.
* This the simplest and fastest format for further post-processing.
*
* However, this leaves open the question of what is the order on the byte level,
* since little and big endian conventions will store the same number differently.
*
* The canonical representation settles this issue by mandating big-endian
* convention, the same convention as human-readable numbers (large digits first).
*
* When writing hash values to storage, sending them over a network, or printing
* them, it's highly recommended to use the canonical representation to ensure
* portability across a wider range of systems, present and future.
*
* The following functions allow transformation of hash values to and from
* canonical format.
*/
/*!
* @brief Canonical (big endian) representation of @ref XXH32_hash_t.
*/
typedef struct {
unsigned char digest[4]; /*!< Hash bytes, big endian */
} XXH32_canonical_t;
/*!
* @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
*
* @param dst The @ref XXH32_canonical_t pointer to be stored to.
* @param hash The @ref XXH32_hash_t to be converted.
*
* @pre
* @p dst must not be `NULL`.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
/*!
* @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
*
* @param src The @ref XXH32_canonical_t to convert.
*
* @pre
* @p src must not be `NULL`.
*
* @return The converted hash.
*/
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
#ifdef __has_attribute
# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
#else
# define XXH_HAS_ATTRIBUTE(x) 0
#endif
/* C-language Attributes are added in C23. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
#else
# define XXH_HAS_C_ATTRIBUTE(x) 0
#endif
#if defined(__cplusplus) && defined(__has_cpp_attribute)
# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define XXH_HAS_CPP_ATTRIBUTE(x) 0
#endif
/*
* Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
* introduced in CPP17 and C23.
* CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
* C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
*/
#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
# define XXH_FALLTHROUGH [[fallthrough]]
#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
#else
# define XXH_FALLTHROUGH /* fallthrough */
#endif
/*!
* @}
* @ingroup public
* @{
*/
#ifndef XXH_NO_LONG_LONG
/*-**********************************************************************
* 64-bit hash
************************************************************************/
#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
/*!
* @brief An unsigned 64-bit integer.
*
* Not necessarily defined to `uint64_t` but functionally equivalent.
*/
typedef uint64_t XXH64_hash_t;
#elif !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint64_t XXH64_hash_t;
#else
# include <limits.h>
# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
/* LP64 ABI says uint64_t is unsigned long */
typedef unsigned long XXH64_hash_t;
# else
/* the following type must have a width of 64-bit */
typedef unsigned long long XXH64_hash_t;
# endif
#endif
/*!
* @}
*
* @defgroup XXH64_family XXH64 family
* @ingroup public
* @{
* Contains functions used in the classic 64-bit xxHash algorithm.
*
* @note
* XXH3 provides competitive speed for both 32-bit and 64-bit systems,
* and offers true 64/128 bit hash results.
* It provides better speed for systems with vector processing capabilities.
*/
/*!
* @brief Calculates the 64-bit hash of @p input using xxHash64.
*
* This function usually runs faster on 64-bit systems, but slower on 32-bit
* systems (see benchmark).
*
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 64-bit seed to alter the hash's output predictably.
*
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return The calculated 64-bit hash.
*
* @see
* XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
* Direct equivalents for the other variants of xxHash.
* @see
* XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
/******* Streaming *******/
#ifndef XXH_NO_STREAM
/*!
* @brief The opaque state struct for the XXH64 streaming API.
*
* @see XXH64_state_s for details.
*/
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
#ifndef XXH_NO_XXH3
/*!
* @}
* ************************************************************************
* @defgroup XXH3_family XXH3 family
* @ingroup public
* @{
*
* XXH3 is a more recent hash algorithm featuring:
* - Improved speed for both small and large inputs
* - True 64-bit and 128-bit outputs
* - SIMD acceleration
* - Improved 32-bit viability
*
* Speed analysis methodology is explained here:
*
* https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
*
* Compared to XXH64, expect XXH3 to run approximately
* ~2x faster on large inputs and >3x faster on small ones,
* exact differences vary depending on platform.
*
* XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
* but does not require it.
* Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
* at competitive speeds, even without vector support. Further details are
* explained in the implementation.
*
* Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
* ZVector and scalar targets. This can be controlled via the @ref XXH_VECTOR
* macro. For the x86 family, an automatic dispatcher is included separately
* in @ref xxh_x86dispatch.c.
*
* XXH3 implementation is portable:
* it has a generic C90 formulation that can be compiled on any platform,
* all implementations generage exactly the same hash value on all platforms.
* Starting from v0.8.0, it's also labelled "stable", meaning that
* any future version will also generate the same hash value.
*
* XXH3 offers 2 variants, _64bits and _128bits.
*
* When only 64 bits are needed, prefer invoking the _64bits variant, as it
* reduces the amount of mixing, resulting in faster speed on small inputs.
* It's also generally simpler to manipulate a scalar return type than a struct.
*
* The API supports one-shot hashing, streaming mode, and custom secrets.
*/
/*-**********************************************************************
* XXH3 64-bit variant
************************************************************************/
/*!
* @brief 64-bit unseeded variant of XXH3.
*
* This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however
* it may have slightly better performance due to constant propagation of the
* defaults.
*
* @see
* XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms
* @see
* XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
* @see
* XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(const void* input, size_t length);
/*!
* @brief 64-bit seeded variant of XXH3
*
* This variant generates a custom secret on the fly based on default secret
* altered using the `seed` value.
*
* While this operation is decently fast, note that it's not completely free.
*
* @note
* seed == 0 produces the same results as @ref XXH3_64bits().
*
* @param input The data to hash
* @param length The length
* @param seed The 64-bit seed to alter the state.
*/
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(const void* input, size_t length, XXH64_hash_t seed);
/*!
* The bare minimum size for a custom secret.
*
* @see
* XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
* XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
*/
#define XXH3_SECRET_SIZE_MIN 136
/*!
* @brief 64-bit variant of XXH3 with a custom "secret".
*
* It's possible to provide any blob of bytes as a "secret" to generate the hash.
* This makes it more difficult for an external actor to prepare an intentional collision.
* The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
* However, the quality of the secret impacts the dispersion of the hash algorithm.
* Therefore, the secret _must_ look like a bunch of random bytes.
* Avoid "trivial" or structured data such as repeated sequences or a text document.
* Whenever in doubt about the "randomness" of the blob of bytes,
* consider employing "XXH3_generateSecret()" instead (see below).
* It will generate a proper high entropy secret derived from the blob of bytes.
* Another advantage of using XXH3_generateSecret() is that
* it guarantees that all bits within the initial blob of bytes
* will impact every bit of the output.
* This is not necessarily the case when using the blob of bytes directly
* because, when hashing _small_ inputs, only a portion of the secret is employed.
*/
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
/******* Streaming *******/
#ifndef XXH_NO_STREAM
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
* As a consequence, streaming is slower than one-shot hashing.
* For better performance, prefer one-shot functions whenever applicable.
*/
/*!
* @brief The state struct for the XXH3 streaming API.
*
* @see XXH3_state_s for details.
*/
typedef struct XXH3_state_s XXH3_state_t;
XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
/*
* XXH3_64bits_reset():
* Initialize with default parameters.
* digest will be equivalent to `XXH3_64bits()`.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
/*
* XXH3_64bits_reset_withSeed():
* Generate a custom secret from `seed`, and store it into `statePtr`.
* digest will be equivalent to `XXH3_64bits_withSeed()`.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
/*!
* XXH3_64bits_reset_withSecret():
* `secret` is referenced, it _must outlive_ the hash streaming session.
* Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
* and the quality of produced hash values depends on secret's entropy
* (secret's content should look like a bunch of random bytes).
* When in doubt about the randomness of a candidate `secret`,
* consider employing `XXH3_generateSecret()` instead (see below).
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
#endif /* !XXH_NO_STREAM */
/* note : canonical representation of XXH3 is the same as XXH64
* since they both produce XXH64_hash_t values */
/*-**********************************************************************
* XXH3 128-bit variant
************************************************************************/
/*!
* @brief The return value from 128-bit hashes.
*
* Stored in little endian order, although the fields themselves are in native
* endianness.
*/
typedef struct {
XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
XXH64_hash_t high64; /*!< `value >> 64` */
} XXH128_hash_t;
/*!
* @brief Unseeded 128-bit variant of XXH3
*
* The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
* for shorter inputs.
*
* This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however
* it may have slightly better performance due to constant propagation of the
* defaults.
*
* @see
* XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms
* @see
* XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
* @see
* XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(const void* data, size_t len);
/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
/******* Streaming *******/
#ifndef XXH_NO_STREAM
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
* As a consequence, streaming is slower than one-shot hashing.
* For better performance, prefer one-shot functions whenever applicable.
*
* XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
* Use already declared XXH3_createState() and XXH3_freeState().
*
* All reset and streaming functions have same meaning as their 64-bit counterpart.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
#endif /* !XXH_NO_STREAM */
/* Following helper functions make it possible to compare XXH128_hast_t values.
* Since XXH128_hash_t is a structure, this capability is not offered by the language.
* Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
/*!
* XXH128_isEqual():
* Return: 1 if `h1` and `h2` are equal, 0 if they are not.
*/
XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
/*!
* @brief Compares two @ref XXH128_hash_t
* This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
*
* @return: >0 if *h128_1 > *h128_2
* =0 if *h128_1 == *h128_2
* <0 if *h128_1 < *h128_2
*/
XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(const void* h128_1, const void* h128_2);
/******* Canonical representation *******/
typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
#endif /* !XXH_NO_XXH3 */
#endif /* XXH_NO_LONG_LONG */
/*!
* @}
*/
#endif /* XXHASH_H_5627135585666179 */
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
#define XXHASH_H_STATIC_13879238742
/* ****************************************************************************
* This section contains declarations which are not guaranteed to remain stable.
* They may change in future versions, becoming incompatible with a different
* version of the library.
* These declarations should only be used with static linking.
* Never use them in association with dynamic linking!
***************************************************************************** */
/*
* These definitions are only present to allow static allocation
* of XXH states, on stack or in a struct, for example.
* Never **ever** access their members directly.
*/
/*!
* @internal
* @brief Structure for XXH32 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH32_state_t.
* Do not access the members of this struct directly.
* @see XXH64_state_s, XXH3_state_s
*/
struct XXH32_state_s {
XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
XXH32_hash_t v[4]; /*!< Accumulator lanes */
XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
}; /* typedef'd to XXH32_state_t */
#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
/*!
* @internal
* @brief Structure for XXH64 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH64_state_t.
* Do not access the members of this struct directly.
* @see XXH32_state_s, XXH3_state_s
*/
struct XXH64_state_s {
XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
XXH64_hash_t v[4]; /*!< Accumulator lanes */
XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
}; /* typedef'd to XXH64_state_t */
#ifndef XXH_NO_XXH3
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
# include <stdalign.h>
# define XXH_ALIGN(n) alignas(n)
#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
/* In C++ alignas() is a keyword */
# define XXH_ALIGN(n) alignas(n)
#elif defined(__GNUC__)
# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
#elif defined(_MSC_VER)
# define XXH_ALIGN(n) __declspec(align(n))
#else
# define XXH_ALIGN(n) /* disabled */
#endif
/* Old GCC versions only accept the attribute after the type in structures. */
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
&& ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
&& defined(__GNUC__)
# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
#else
# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
#endif
/*!
* @brief The size of the internal XXH3 buffer.
*
* This is the optimal update size for incremental hashing.
*
* @see XXH3_64b_update(), XXH3_128b_update().
*/
#define XXH3_INTERNALBUFFER_SIZE 256
/*!
* @brief Default size of the secret buffer (and @ref XXH3_kSecret).
*
* This is the size used in @ref XXH3_kSecret and the seeded functions.
*
* Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
*/
#define XXH3_SECRET_DEFAULT_SIZE 192
/*!
* @internal
* @brief Structure for XXH3 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
* Otherwise it is an opaque type.
* Never use this definition in combination with dynamic library.
* This allows fields to safely be changed in the future.
*
* @note ** This structure has a strict alignment requirement of 64 bytes!! **
* Do not allocate this with `malloc()` or `new`,
* it will not be sufficiently aligned.
* Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
*
* Typedef'd to @ref XXH3_state_t.
* Do never access the members of this struct directly.
*
* @see XXH3_INITSTATE() for stack initialization.
* @see XXH3_createState(), XXH3_freeState().
* @see XXH32_state_s, XXH64_state_s
*/
struct XXH3_state_s {
XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
/*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
/*!< Used to store a custom secret generated from a seed. */
XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
/*!< The internal buffer. @see XXH32_state_s::mem32 */
XXH32_hash_t bufferedSize;
/*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
XXH32_hash_t useSeed;
/*!< Reserved field. Needed for padding on 64-bit. */
size_t nbStripesSoFar;
/*!< Number or stripes processed. */
XXH64_hash_t totalLen;
/*!< Total length hashed. 64-bit even on 32-bit targets. */
size_t nbStripesPerBlock;
/*!< Number of stripes per block. */
size_t secretLimit;
/*!< Size of @ref customSecret or @ref extSecret */
XXH64_hash_t seed;
/*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
XXH64_hash_t reserved64;
/*!< Reserved field. */
const unsigned char* extSecret;
/*!< Reference to an external secret for the _withSecret variants, NULL
* for other variants. */
/* note: there may be some padding at the end due to alignment on 64 bytes */
}; /* typedef'd to XXH3_state_t */
#undef XXH_ALIGN_MEMBER
/*!
* @brief Initializes a stack-allocated `XXH3_state_s`.
*
* When the @ref XXH3_state_t structure is merely emplaced on stack,
* it should be initialized with XXH3_INITSTATE() or a memset()
* in case its first reset uses XXH3_NNbits_reset_withSeed().
* This init can be omitted if the first reset uses default or _withSecret mode.
* This operation isn't necessary when the state is created with XXH3_createState().
* Note that this doesn't prepare the state for a streaming operation,
* it's still necessary to use XXH3_NNbits_reset*() afterwards.
*/
#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
/*!
* simple alias to pre-selected XXH3_128bits variant
*/
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
/* === Experimental API === */
/* Symbols defined below must be considered tied to a specific library version. */
/*!
* XXH3_generateSecret():
*
* Derive a high-entropy secret from any user-defined content, named customSeed.
* The generated secret can be used in combination with `*_withSecret()` functions.
* The `_withSecret()` variants are useful to provide a higher level of protection
* than 64-bit seed, as it becomes much more difficult for an external actor to
* guess how to impact the calculation logic.
*
* The function accepts as input a custom seed of any length and any content,
* and derives from it a high-entropy secret of length @p secretSize into an
* already allocated buffer @p secretBuffer.
*
* The generated secret can then be used with any `*_withSecret()` variant.
* The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
* @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
* are part of this list. They all accept a `secret` parameter
* which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
* _and_ feature very high entropy (consist of random-looking bytes).
* These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
* be employed to ensure proper quality.
*
* @p customSeed can be anything. It can have any size, even small ones,
* and its content can be anything, even "poor entropy" sources such as a bunch
* of zeroes. The resulting `secret` will nonetheless provide all required qualities.
*
* @pre
* - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
* - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
*
* Example code:
* @code{.c}
* #include <stdio.h>
* #include <stdlib.h>
* #include <string.h>
* #define XXH_STATIC_LINKING_ONLY // expose unstable API
* #include "xxhash.h"
* // Hashes argv[2] using the entropy from argv[1].
* int main(int argc, char* argv[])
* {
* char secret[XXH3_SECRET_SIZE_MIN];
* if (argv != 3) { return 1; }
* XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
* XXH64_hash_t h = XXH3_64bits_withSecret(
* argv[2], strlen(argv[2]),
* secret, sizeof(secret)
* );
* printf("%016llx\n", (unsigned long long) h);
* }
* @endcode
*/
XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
/*!
* @brief Generate the same secret as the _withSeed() variants.
*
* The generated secret can be used in combination with
*`*_withSecret()` and `_withSecretandSeed()` variants.
*
* Example C++ `std::string` hash class:
* @code{.cpp}
* #include <string>
* #define XXH_STATIC_LINKING_ONLY // expose unstable API
* #include "xxhash.h"
* // Slow, seeds each time
* class HashSlow {
* XXH64_hash_t seed;
* public:
* HashSlow(XXH64_hash_t s) : seed{s} {}
* size_t operator()(const std::string& x) const {
* return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
* }
* };
* // Fast, caches the seeded secret for future uses.
* class HashFast {
* unsigned char secret[XXH3_SECRET_SIZE_MIN];
* public:
* HashFast(XXH64_hash_t s) {
* XXH3_generateSecret_fromSeed(secret, seed);
* }
* size_t operator()(const std::string& x) const {
* return size_t{
* XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
* };
* }
* };
* @endcode
* @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
* @param seed The seed to seed the state.
*/
XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
/*!
* These variants generate hash values using either
* @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
* or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX).
*
* This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
* `_withSeed()` has to generate the secret on the fly for "large" keys.
* It's fast, but can be perceptible for "not so large" keys (< 1 KB).
* `_withSecret()` has to generate the masks on the fly for "small" keys,
* which requires more instructions than _withSeed() variants.
* Therefore, _withSecretandSeed variant combines the best of both worlds.
*
* When @p secret has been generated by XXH3_generateSecret_fromSeed(),
* this variant produces *exactly* the same results as `_withSeed()` variant,
* hence offering only a pure speed benefit on "large" input,
* by skipping the need to regenerate the secret for every large input.
*
* Another usage scenario is to hash the secret to a 64-bit hash value,
* for example with XXH3_64bits(), which then becomes the seed,
* and then employ both the seed and the secret in _withSecretandSeed().
* On top of speed, an added benefit is that each bit in the secret
* has a 50% chance to swap each bit in the output, via its impact to the seed.
*
* This is not guaranteed when using the secret directly in "small data" scenarios,
* because only portions of the secret are employed for small data.
*/
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void* data, size_t len,
const void* secret, size_t secretSize,
XXH64_hash_t seed);
/*! @copydoc XXH3_64bits_withSecretandSeed() */
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void* input, size_t length,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
#ifndef XXH_NO_STREAM
/*! @copydoc XXH3_64bits_withSecretandSeed() */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
/*! @copydoc XXH3_64bits_withSecretandSeed() */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
#endif /* !XXH_NO_STREAM */
#endif /* !XXH_NO_XXH3 */
#endif /* XXH_NO_LONG_LONG */
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# define XXH_IMPLEMENTATION
#endif
#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
/* ======================================================================== */
/* ======================================================================== */
/* ======================================================================== */
/*-**********************************************************************
* xxHash implementation
*-**********************************************************************
* xxHash's implementation used to be hosted inside xxhash.c.
*
* However, inlining requires implementation to be visible to the compiler,
* hence be included alongside the header.
* Previously, implementation was hosted inside xxhash.c,
* which was then #included when inlining was activated.
* This construction created issues with a few build and install systems,
* as it required xxhash.c to be stored in /include directory.
*
* xxHash implementation is now directly integrated within xxhash.h.
* As a consequence, xxhash.c is no longer needed in /include.
*
* xxhash.c is still available and is still useful.
* In a "normal" setup, when xxhash is not inlined,
* xxhash.h only exposes the prototypes and public symbols,
* while xxhash.c can be built into an object file xxhash.o
* which can then be linked into the final binary.
************************************************************************/
#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
|| defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
# define XXH_IMPLEM_13a8737387
/* *************************************
* Tuning parameters
***************************************/
/*!
* @defgroup tuning Tuning parameters
* @{
*
* Various macros to control xxHash's behavior.
*/
#ifdef XXH_DOXYGEN
/*!
* @brief Define this to disable 64-bit code.
*
* Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
*/
# define XXH_NO_LONG_LONG
# undef XXH_NO_LONG_LONG /* don't actually */
/*!
* @brief Controls how unaligned memory is accessed.
*
* By default, access to unaligned memory is controlled by `memcpy()`, which is
* safe and portable.
*
* Unfortunately, on some target/compiler combinations, the generated assembly
* is sub-optimal.
*
* The below switch allow selection of a different access method
* in the search for improved performance.
*
* @par Possible options:
*
* - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
* @par
* Use `memcpy()`. Safe and portable. Note that most modern compilers will
* eliminate the function call and treat it as an unaligned access.
*
* - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
* @par
* Depends on compiler extensions and is therefore not portable.
* This method is safe _if_ your compiler supports it,
* and *generally* as fast or faster than `memcpy`.
*
* - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
* @par
* Casts directly and dereferences. This method doesn't depend on the
* compiler, but it violates the C standard as it directly dereferences an
* unaligned pointer. It can generate buggy code on targets which do not
* support unaligned memory accesses, but in some circumstances, it's the
* only known way to get the most performance.
*
* - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
* @par
* Also portable. This can generate the best code on old compilers which don't
* inline small `memcpy()` calls, and it might also be faster on big-endian
* systems which lack a native byteswap instruction. However, some compilers
* will emit literal byteshifts even if the target supports unaligned access.
* .
*
* @warning
* Methods 1 and 2 rely on implementation-defined behavior. Use these with
* care, as what works on one compiler/platform/optimization level may cause
* another to read garbage data or even crash.
*
* See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
*
* Prefer these methods in priority order (0 > 3 > 1 > 2)
*/
# define XXH_FORCE_MEMORY_ACCESS 0
/*!
* @def XXH_SIZE_OPT
* @brief Controls how much xxHash optimizes for size.
*
* xxHash, when compiled, tends to result in a rather large binary size. This
* is mostly due to heavy usage to forced inlining and constant folding of the
* @ref XXH3_family to increase performance.
*
* However, some developers prefer size over speed. This option can
* significantly reduce the size of the generated code. When using the `-Os`
* or `-Oz` options on GCC or Clang, this is defined to 1 by default,
* otherwise it is defined to 0.
*
* Most of these size optimizations can be controlled manually.
*
* This is a number from 0-2.
* - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
* comes first.
* - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
* conservative and disables hacks that increase code size. It implies the
* options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
* and @ref XXH3_NEON_LANES == 8 if they are not already defined.
* - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
* Performance may cry. For example, the single shot functions just use the
* streaming API.
*/
# define XXH_SIZE_OPT 0
/*!
* @def XXH_FORCE_ALIGN_CHECK
* @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
* and XXH64() only).
*
* This is an important performance trick for architectures without decent
* unaligned memory access performance.
*
* It checks for input alignment, and when conditions are met, uses a "fast
* path" employing direct 32-bit/64-bit reads, resulting in _dramatically
* faster_ read speed.
*
* The check costs one initial branch per hash, which is generally negligible,
* but not zero.
*
* Moreover, it's not useful to generate an additional code path if memory
* access uses the same instruction for both aligned and unaligned
* addresses (e.g. x86 and aarch64).
*
* In these cases, the alignment check can be removed by setting this macro to 0.
* Then the code will always use unaligned memory access.
* Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
* which are platforms known to offer good unaligned memory accesses performance.
*
* It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
*
* This option does not affect XXH3 (only XXH32 and XXH64).
*/
# define XXH_FORCE_ALIGN_CHECK 0
/*!
* @def XXH_NO_INLINE_HINTS
* @brief When non-zero, sets all functions to `static`.
*
* By default, xxHash tries to force the compiler to inline almost all internal
* functions.
*
* This can usually improve performance due to reduced jumping and improved
* constant folding, but significantly increases the size of the binary which
* might not be favorable.
*
* Additionally, sometimes the forced inlining can be detrimental to performance,
* depending on the architecture.
*
* XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
* compiler full control on whether to inline or not.
*
* When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
* @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
*/
# define XXH_NO_INLINE_HINTS 0
/*!
* @def XXH32_ENDJMP
* @brief Whether to use a jump for `XXH32_finalize`.
*
* For performance, `XXH32_finalize` uses multiple branches in the finalizer.
* This is generally preferable for performance,
* but depending on exact architecture, a jmp may be preferable.
*
* This setting is only possibly making a difference for very small inputs.
*/
# define XXH32_ENDJMP 0
/*!
* @internal
* @brief Redefines old internal names.
*
* For compatibility with code that uses xxHash's internals before the names
* were changed to improve namespacing. There is no other reason to use this.
*/
# define XXH_OLD_NAMES
# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
/*!
* @def XXH_NO_STREAM
* @brief Disables the streaming API.
*
* When xxHash is not inlined and the streaming functions are not used, disabling
* the streaming functions can improve code size significantly, especially with
* the @ref XXH3_family which tends to make constant folded copies of itself.
*/
# define XXH_NO_STREAM
# undef XXH_NO_STREAM /* don't actually */
#endif /* XXH_DOXYGEN */
/*!
* @}
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
/* prefer __packed__ structures (method 1) for GCC
* < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
* which for some reason does unaligned loads. */
# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
#ifndef XXH_SIZE_OPT
/* default to 1 for -Os or -Oz */
# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
# define XXH_SIZE_OPT 1
# else
# define XXH_SIZE_OPT 0
# endif
#endif
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
/* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
# if XXH_SIZE_OPT >= 1 || \
defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
|| defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
# define XXH_FORCE_ALIGN_CHECK 0
# else
# define XXH_FORCE_ALIGN_CHECK 1
# endif
#endif
#ifndef XXH_NO_INLINE_HINTS
# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
# define XXH_NO_INLINE_HINTS 1
# else
# define XXH_NO_INLINE_HINTS 0
# endif
#endif
#ifndef XXH32_ENDJMP
/* generally preferable for performance */
# define XXH32_ENDJMP 0
#endif
/*!
* @defgroup impl Implementation
* @{
*/
/* *************************************
* Includes & Memory related functions
***************************************/
#if defined(XXH_NO_STREAM)
/* nothing */
#elif defined(XXH_NO_STDLIB)
/* When requesting to disable any mention of stdlib,
* the library loses the ability to invoked malloc / free.
* In practice, it means that functions like `XXH*_createState()`
* will always fail, and return NULL.
* This flag is useful in situations where
* xxhash.h is integrated into some kernel, embedded or limited environment
* without access to dynamic allocation.
*/
static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
static void XXH_free(void* p) { (void)p; }
#else
/*
* Modify the local functions below should you wish to use
* different memory routines for malloc() and free()
*/
#include <stdlib.h>
/*!
* @internal
* @brief Modify this function to use a different routine than malloc().
*/
static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
/*!
* @internal
* @brief Modify this function to use a different routine than free().
*/
static void XXH_free(void* p) { free(p); }
#endif /* XXH_NO_STDLIB */
#include <string.h>
/*!
* @internal
* @brief Modify this function to use a different routine than memcpy().
*/
static void* XXH_memcpy(void* dest, const void* src, size_t size)
{
return memcpy(dest,src,size);
}
#include <limits.h> /* ULLONG_MAX */
/* *************************************
* Compiler Specific Options
***************************************/
#ifdef _MSC_VER /* Visual Studio warning fix */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
#if XXH_NO_INLINE_HINTS /* disable inlining hints */
# if defined(__GNUC__) || defined(__clang__)
# define XXH_FORCE_INLINE static __attribute__((unused))
# else
# define XXH_FORCE_INLINE static
# endif
# define XXH_NO_INLINE static
/* enable inlining hints */
#elif defined(__GNUC__) || defined(__clang__)
# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
# define XXH_NO_INLINE static __attribute__((noinline))
#elif defined(_MSC_VER) /* Visual Studio */
# define XXH_FORCE_INLINE static __forceinline
# define XXH_NO_INLINE static __declspec(noinline)
#elif defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
# define XXH_FORCE_INLINE static inline
# define XXH_NO_INLINE static
#else
# define XXH_FORCE_INLINE static
# define XXH_NO_INLINE static
#endif
/* *************************************
* Debug
***************************************/
/*!
* @ingroup tuning
* @def XXH_DEBUGLEVEL
* @brief Sets the debugging level.
*
* XXH_DEBUGLEVEL is expected to be defined externally, typically via the
* compiler's command line options. The value must be a number.
*/
#ifndef XXH_DEBUGLEVEL
# ifdef DEBUGLEVEL /* backwards compat */
# define XXH_DEBUGLEVEL DEBUGLEVEL
# else
# define XXH_DEBUGLEVEL 0
# endif
#endif
#if (XXH_DEBUGLEVEL>=1)
# include <assert.h> /* note: can still be disabled with NDEBUG */
# define XXH_ASSERT(c) assert(c)
#else
# define XXH_ASSERT(c) ((void)0)
#endif
/* note: use after variable declarations */
#ifndef XXH_STATIC_ASSERT
# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
# else
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
# endif
# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
#endif
/*!
* @internal
* @def XXH_COMPILER_GUARD(var)
* @brief Used to prevent unwanted optimizations for @p var.
*
* It uses an empty GCC inline assembly statement with a register constraint
* which forces @p var into a general purpose register (eg eax, ebx, ecx
* on x86) and marks it as modified.
*
* This is used in a few places to avoid unwanted autovectorization (e.g.
* XXH32_round()). All vectorization we want is explicit via intrinsics,
* and _usually_ isn't wanted elsewhere.
*
* We also use it to prevent unwanted constant folding for AArch64 in
* XXH3_initCustomSecret_scalar().
*/
#if defined(__GNUC__) || defined(__clang__)
# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
#else
# define XXH_COMPILER_GUARD(var) ((void)0)
#endif
/* *************************************
* Basic Types
***************************************/
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint8_t xxh_u8;
#else
typedef unsigned char xxh_u8;
#endif
typedef XXH32_hash_t xxh_u32;
#ifdef XXH_OLD_NAMES
# define BYTE xxh_u8
# define U8 xxh_u8
# define U32 xxh_u32
#endif
/* *** Memory access *** */
/*!
* @internal
* @fn xxh_u32 XXH_read32(const void* ptr)
* @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit native endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readLE32(const void* ptr)
* @brief Reads an unaligned 32-bit little endian integer from @p ptr.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit little endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readBE32(const void* ptr)
* @brief Reads an unaligned 32-bit big endian integer from @p ptr.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit big endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
* @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
* Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
* always @ref XXH_alignment::XXH_unaligned.
*
* @param ptr The pointer to read from.
* @param align Whether @p ptr is aligned.
* @pre
* If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
* aligned.
* @return The 32-bit little endian integer from the bytes at @p ptr.
*/
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
/*
* Manual byteshift. Best for old compilers which don't inline memcpy.
* We actually directly use XXH_readLE32 and XXH_readBE32.
*/
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/*
* Force direct memory access. Only works on CPU which support unaligned memory
* access in hardware.
*/
static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/*
* __attribute__((aligned(1))) is supported by gcc and clang. Originally the
* documentation claimed that it only increased the alignment, but actually it
* can decrease it on gcc, clang, and icc:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
* https://gcc.godbolt.org/z/xYez1j67Y.
*/
#ifdef XXH_OLD_NAMES
typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
#endif
static xxh_u32 XXH_read32(const void* ptr)
{
typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
return *((const xxh_unalign32*)ptr);
}
#else
/*
* Portable and safe solution. Generally efficient.
* see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
*/
static xxh_u32 XXH_read32(const void* memPtr)
{
xxh_u32 val;
XXH_memcpy(&val, memPtr, sizeof(val));
return val;
}
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
/* *** Endianness *** */
/*!
* @ingroup tuning
* @def XXH_CPU_LITTLE_ENDIAN
* @brief Whether the target is little endian.
*
* Defined to 1 if the target is little endian, or 0 if it is big endian.
* It can be defined externally, for example on the compiler command line.
*
* If it is not defined,
* a runtime check (which is usually constant folded) is used instead.
*
* @note
* This is not necessarily defined to an integer constant.
*
* @see XXH_isLittleEndian() for the runtime check.
*/
#ifndef XXH_CPU_LITTLE_ENDIAN
/*
* Try to detect endianness automatically, to avoid the nonstandard behavior
* in `XXH_isLittleEndian()`
*/
# if defined(_WIN32) /* Windows is always little endian */ \
|| defined(__LITTLE_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
# define XXH_CPU_LITTLE_ENDIAN 1
# elif defined(__BIG_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define XXH_CPU_LITTLE_ENDIAN 0
# else
/*!
* @internal
* @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
*
* Most compilers will constant fold this.
*/
static int XXH_isLittleEndian(void)
{
/*
* Portable and well-defined behavior.
* Don't use static: it is detrimental to performance.
*/
const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
# endif
#endif
/* ****************************************
* Compiler-specific Functions and Macros
******************************************/
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#ifdef __has_builtin
# define XXH_HAS_BUILTIN(x) __has_builtin(x)
#else
# define XXH_HAS_BUILTIN(x) 0
#endif
/*!
* @internal
* @def XXH_rotl32(x,r)
* @brief 32-bit rotate left.
*
* @param x The 32-bit integer to be rotated.
* @param r The number of bits to rotate.
* @pre
* @p r > 0 && @p r < 32
* @note
* @p x and @p r may be evaluated multiple times.
* @return The rotated result.
*/
#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
&& XXH_HAS_BUILTIN(__builtin_rotateleft64)
# define XXH_rotl32 __builtin_rotateleft32
# define XXH_rotl64 __builtin_rotateleft64
/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
#elif defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
#endif
/*!
* @internal
* @fn xxh_u32 XXH_swap32(xxh_u32 x)
* @brief A 32-bit byteswap.
*
* @param x The 32-bit integer to byteswap.
* @return @p x, byteswapped.
*/
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap32 _byteswap_ulong
#elif XXH_GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
#else
static xxh_u32 XXH_swap32 (xxh_u32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
#endif
/* ***************************
* Memory reads
*****************************/
/*!
* @internal
* @brief Enum to indicate whether a pointer is aligned.
*/
typedef enum {
XXH_aligned, /*!< Aligned */
XXH_unaligned /*!< Possibly unaligned */
} XXH_alignment;
/*
* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
*
* This is ideal for older compilers which don't inline memcpy.
*/
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[0]
| ((xxh_u32)bytePtr[1] << 8)
| ((xxh_u32)bytePtr[2] << 16)
| ((xxh_u32)bytePtr[3] << 24);
}
XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[3]
| ((xxh_u32)bytePtr[2] << 8)
| ((xxh_u32)bytePtr[1] << 16)
| ((xxh_u32)bytePtr[0] << 24);
}
#else
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
}
static xxh_u32 XXH_readBE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
}
#endif
XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned) {
return XXH_readLE32(ptr);
} else {
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
}
}
/* *************************************
* Misc
***************************************/
/*! @ingroup public */
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* *******************************************************************
* 32-bit hash functions
*********************************************************************/
/*!
* @}
* @defgroup XXH32_impl XXH32 implementation
* @ingroup impl
*
* Details on the XXH32 implementation.
* @{
*/
/* #define instead of static const, to be used as initializers */
#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
#ifdef XXH_OLD_NAMES
# define PRIME32_1 XXH_PRIME32_1
# define PRIME32_2 XXH_PRIME32_2
# define PRIME32_3 XXH_PRIME32_3
# define PRIME32_4 XXH_PRIME32_4
# define PRIME32_5 XXH_PRIME32_5
#endif
/*!
* @internal
* @brief Normal stripe processing routine.
*
* This shuffles the bits so that any bit from @p input impacts several bits in
* @p acc.
*
* @param acc The accumulator lane.
* @param input The stripe of input to mix.
* @return The mixed accumulator lane.
*/
static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
{
acc += input * XXH_PRIME32_2;
acc = XXH_rotl32(acc, 13);
acc *= XXH_PRIME32_1;
#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
/*
* UGLY HACK:
* A compiler fence is the only thing that prevents GCC and Clang from
* autovectorizing the XXH32 loop (pragmas and attributes don't work for some
* reason) without globally disabling SSE4.1.
*
* The reason we want to avoid vectorization is because despite working on
* 4 integers at a time, there are multiple factors slowing XXH32 down on
* SSE4:
* - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
* newer chips!) making it slightly slower to multiply four integers at
* once compared to four integers independently. Even when pmulld was
* fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
* just to multiply unless doing a long operation.
*
* - Four instructions are required to rotate,
* movqda tmp, v // not required with VEX encoding
* pslld tmp, 13 // tmp <<= 13
* psrld v, 19 // x >>= 19
* por v, tmp // x |= tmp
* compared to one for scalar:
* roll v, 13 // reliably fast across the board
* shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
*
* - Instruction level parallelism is actually more beneficial here because
* the SIMD actually serializes this operation: While v1 is rotating, v2
* can load data, while v3 can multiply. SSE forces them to operate
* together.
*
* This is also enabled on AArch64, as Clang autovectorizes it incorrectly
* and it is pointless writing a NEON implementation that is basically the
* same speed as scalar for XXH32.
*/
XXH_COMPILER_GUARD(acc);
#endif
return acc;
}
/*!
* @internal
* @brief Mixes all bits to finalize the hash.
*
* The final mix ensures that all input bits have a chance to impact any bit in
* the output digest, resulting in an unbiased distribution.
*
* @param hash The hash to avalanche.
* @return The avalanched hash.
*/
static xxh_u32 XXH32_avalanche(xxh_u32 hash)
{
hash ^= hash >> 15;
hash *= XXH_PRIME32_2;
hash ^= hash >> 13;
hash *= XXH_PRIME32_3;
hash ^= hash >> 16;
return hash;
}
#define XXH_get32bits(p) XXH_readLE32_align(p, align)
/*!
* @internal
* @brief Processes the last 0-15 bytes of @p ptr.
*
* There may be up to 15 bytes remaining to consume from the input.
* This final stage will digest them to ensure that all input bytes are present
* in the final mix.
*
* @param hash The hash to finalize.
* @param ptr The pointer to the remaining input.
* @param len The remaining length, modulo 16.
* @param align Whether @p ptr is aligned.
* @return The finalized hash.
* @see XXH64_finalize().
*/
static XXH_PUREF xxh_u32
XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
#define XXH_PROCESS1 do { \
hash += (*ptr++) * XXH_PRIME32_5; \
hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
} while (0)
#define XXH_PROCESS4 do { \
hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
ptr += 4; \
hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
} while (0)
if (ptr==NULL) XXH_ASSERT(len == 0);
/* Compact rerolled version; generally faster */
if (!XXH32_ENDJMP) {
len &= 15;
while (len >= 4) {
XXH_PROCESS4;
len -= 4;
}
while (len > 0) {
XXH_PROCESS1;
--len;
}
return XXH32_avalanche(hash);
} else {
switch(len&15) /* or switch(bEnd - p) */ {
case 12: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 8: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 4: XXH_PROCESS4;
return XXH32_avalanche(hash);
case 13: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 9: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 5: XXH_PROCESS4;
XXH_PROCESS1;
return XXH32_avalanche(hash);
case 14: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 10: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 6: XXH_PROCESS4;
XXH_PROCESS1;
XXH_PROCESS1;
return XXH32_avalanche(hash);
case 15: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 11: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 7: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 3: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 2: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 1: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 0: return XXH32_avalanche(hash);
}
XXH_ASSERT(0);
return hash; /* reaching this point is deemed impossible */
}
}
#ifdef XXH_OLD_NAMES
# define PROCESS1 XXH_PROCESS1
# define PROCESS4 XXH_PROCESS4
#else
# undef XXH_PROCESS1
# undef XXH_PROCESS4
#endif
/*!
* @internal
* @brief The implementation for @ref XXH32().
*
* @param input , len , seed Directly passed from @ref XXH32().
* @param align Whether @p input is aligned.
* @return The calculated hash.
*/
XXH_FORCE_INLINE XXH_PUREF xxh_u32
XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
{
xxh_u32 h32;
if (input==NULL) XXH_ASSERT(len == 0);
if (len>=16) {
const xxh_u8* const bEnd = input + len;
const xxh_u8* const limit = bEnd - 15;
xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
xxh_u32 v2 = seed + XXH_PRIME32_2;
xxh_u32 v3 = seed + 0;
xxh_u32 v4 = seed - XXH_PRIME32_1;
do {
v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
} while (input < limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
} else {
h32 = seed + XXH_PRIME32_5;
}
h32 += (xxh_u32)len;
return XXH32_finalize(h32, input, len&15, align);
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
{
#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_state_t state;
XXH32_reset(&state, seed);
XXH32_update(&state, (const xxh_u8*)input, len);
return XXH32_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
/******* Hash streaming *******/
#ifndef XXH_NO_STREAM
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
{
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
{
XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
{
XXH_ASSERT(statePtr != NULL);
memset(statePtr, 0, sizeof(*statePtr));
statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
statePtr->v[1] = seed + XXH_PRIME32_2;
statePtr->v[2] = seed + 0;
statePtr->v[3] = seed - XXH_PRIME32_1;
return XXH_OK;
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t* state, const void* input, size_t len)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
{ const xxh_u8* p = (const xxh_u8*)input;
const xxh_u8* const bEnd = p + len;
state->total_len_32 += (XXH32_hash_t)len;
state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
if (state->memsize + len < 16) { /* fill in tmp buffer */
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
state->memsize += (XXH32_hash_t)len;
return XXH_OK;
}
if (state->memsize) { /* some data left from previous update */
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
{ const xxh_u32* p32 = state->mem32;
state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16) {
const xxh_u8* const limit = bEnd - 16;
do {
state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
} while (p<=limit);
}
if (p < bEnd) {
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
}
return XXH_OK;
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
{
xxh_u32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v[0], 1)
+ XXH_rotl32(state->v[1], 7)
+ XXH_rotl32(state->v[2], 12)
+ XXH_rotl32(state->v[3], 18);
} else {
h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
}
h32 += state->total_len_32;
return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
}
#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
/*!
* @ingroup XXH32_family
* The default return values from XXH functions are unsigned 32 and 64 bit
* integers.
*
* The canonical representation uses big endian convention, the same convention
* as human-readable numbers (large digits first).
*
* This way, hash values can be written into a file or buffer, remaining
* comparable across different systems.
*
* The following functions allow transformation of hash values to and from their
* canonical format.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
XXH_memcpy(dst, &hash, sizeof(*dst));
}
/*! @ingroup XXH32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
{
return XXH_readBE32(src);
}
#ifndef XXH_NO_LONG_LONG
/* *******************************************************************
* 64-bit hash functions
*********************************************************************/
/*!
* @}
* @ingroup impl
* @{
*/
/******* Memory access *******/
typedef XXH64_hash_t xxh_u64;
#ifdef XXH_OLD_NAMES
# define U64 xxh_u64
#endif
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
/*
* Manual byteshift. Best for old compilers which don't inline memcpy.
* We actually directly use XXH_readLE64 and XXH_readBE64.
*/
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
static xxh_u64 XXH_read64(const void* memPtr)
{
return *(const xxh_u64*) memPtr;
}
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/*
* __attribute__((aligned(1))) is supported by gcc and clang. Originally the
* documentation claimed that it only increased the alignment, but actually it
* can decrease it on gcc, clang, and icc:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
* https://gcc.godbolt.org/z/xYez1j67Y.
*/
#ifdef XXH_OLD_NAMES
typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
#endif
static xxh_u64 XXH_read64(const void* ptr)
{
typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
return *((const xxh_unalign64*)ptr);
}
#else
/*
* Portable and safe solution. Generally efficient.
* see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
*/
static xxh_u64 XXH_read64(const void* memPtr)
{
xxh_u64 val;
XXH_memcpy(&val, memPtr, sizeof(val));
return val;
}
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap64 _byteswap_uint64
#elif XXH_GCC_VERSION >= 403
# define XXH_swap64 __builtin_bswap64
#else
static xxh_u64 XXH_swap64(xxh_u64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 56) & 0x00000000000000ffULL);
}
#endif
/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[0]
| ((xxh_u64)bytePtr[1] << 8)
| ((xxh_u64)bytePtr[2] << 16)
| ((xxh_u64)bytePtr[3] << 24)
| ((xxh_u64)bytePtr[4] << 32)
| ((xxh_u64)bytePtr[5] << 40)
| ((xxh_u64)bytePtr[6] << 48)
| ((xxh_u64)bytePtr[7] << 56);
}
XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[7]
| ((xxh_u64)bytePtr[6] << 8)
| ((xxh_u64)bytePtr[5] << 16)
| ((xxh_u64)bytePtr[4] << 24)
| ((xxh_u64)bytePtr[3] << 32)
| ((xxh_u64)bytePtr[2] << 40)
| ((xxh_u64)bytePtr[1] << 48)
| ((xxh_u64)bytePtr[0] << 56);
}
#else
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
}
static xxh_u64 XXH_readBE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
}
#endif
XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned)
return XXH_readLE64(ptr);
else
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
}
/******* xxh64 *******/
/*!
* @}
* @defgroup XXH64_impl XXH64 implementation
* @ingroup impl
*
* Details on the XXH64 implementation.
* @{
*/
/* #define rather that static const, to be used as initializers */
#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
#ifdef XXH_OLD_NAMES
# define PRIME64_1 XXH_PRIME64_1
# define PRIME64_2 XXH_PRIME64_2
# define PRIME64_3 XXH_PRIME64_3
# define PRIME64_4 XXH_PRIME64_4
# define PRIME64_5 XXH_PRIME64_5
#endif
/*! @copydoc XXH32_round */
static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
{
acc += input * XXH_PRIME64_2;
acc = XXH_rotl64(acc, 31);
acc *= XXH_PRIME64_1;
return acc;
}
static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
{
val = XXH64_round(0, val);
acc ^= val;
acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
return acc;
}
/*! @copydoc XXH32_avalanche */
static xxh_u64 XXH64_avalanche(xxh_u64 hash)
{
hash ^= hash >> 33;
hash *= XXH_PRIME64_2;
hash ^= hash >> 29;
hash *= XXH_PRIME64_3;
hash ^= hash >> 32;
return hash;
}
#define XXH_get64bits(p) XXH_readLE64_align(p, align)
/*!
* @internal
* @brief Processes the last 0-31 bytes of @p ptr.
*
* There may be up to 31 bytes remaining to consume from the input.
* This final stage will digest them to ensure that all input bytes are present
* in the final mix.
*
* @param hash The hash to finalize.
* @param ptr The pointer to the remaining input.
* @param len The remaining length, modulo 32.
* @param align Whether @p ptr is aligned.
* @return The finalized hash
* @see XXH32_finalize().
*/
static XXH_PUREF xxh_u64
XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
if (ptr==NULL) XXH_ASSERT(len == 0);
len &= 31;
while (len >= 8) {
xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
ptr += 8;
hash ^= k1;
hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
len -= 8;
}
if (len >= 4) {
hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
ptr += 4;
hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
len -= 4;
}
while (len > 0) {
hash ^= (*ptr++) * XXH_PRIME64_5;
hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
--len;
}
return XXH64_avalanche(hash);
}
#ifdef XXH_OLD_NAMES
# define PROCESS1_64 XXH_PROCESS1_64
# define PROCESS4_64 XXH_PROCESS4_64
# define PROCESS8_64 XXH_PROCESS8_64
#else
# undef XXH_PROCESS1_64
# undef XXH_PROCESS4_64
# undef XXH_PROCESS8_64
#endif
/*!
* @internal
* @brief The implementation for @ref XXH64().
*
* @param input , len , seed Directly passed from @ref XXH64().
* @param align Whether @p input is aligned.
* @return The calculated hash.
*/
XXH_FORCE_INLINE XXH_PUREF xxh_u64
XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
{
xxh_u64 h64;
if (input==NULL) XXH_ASSERT(len == 0);
if (len>=32) {
const xxh_u8* const bEnd = input + len;
const xxh_u8* const limit = bEnd - 31;
xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
xxh_u64 v2 = seed + XXH_PRIME64_2;
xxh_u64 v3 = seed + 0;
xxh_u64 v4 = seed - XXH_PRIME64_1;
do {
v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
} while (input<limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = seed + XXH_PRIME64_5;
}
h64 += (xxh_u64) len;
return XXH64_finalize(h64, input, len, align);
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
{
#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_state_t state;
XXH64_reset(&state, seed);
XXH64_update(&state, (const xxh_u8*)input, len);
return XXH64_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
/******* Hash Streaming *******/
#ifndef XXH_NO_STREAM
/*! @ingroup XXH64_family*/
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
{
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
{
XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
{
XXH_ASSERT(statePtr != NULL);
memset(statePtr, 0, sizeof(*statePtr));
statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
statePtr->v[1] = seed + XXH_PRIME64_2;
statePtr->v[2] = seed + 0;
statePtr->v[3] = seed - XXH_PRIME64_1;
return XXH_OK;
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH_errorcode
XXH64_update (XXH64_state_t* state, const void* input, size_t len)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
{ const xxh_u8* p = (const xxh_u8*)input;
const xxh_u8* const bEnd = p + len;
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
state->memsize += (xxh_u32)len;
return XXH_OK;
}
if (state->memsize) { /* tmp buffer is full */
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
p += 32 - state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd) {
const xxh_u8* const limit = bEnd - 32;
do {
state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
} while (p<=limit);
}
if (p < bEnd) {
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
}
return XXH_OK;
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
{
xxh_u64 h64;
if (state->total_len >= 32) {
h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
h64 = XXH64_mergeRound(h64, state->v[0]);
h64 = XXH64_mergeRound(h64, state->v[1]);
h64 = XXH64_mergeRound(h64, state->v[2]);
h64 = XXH64_mergeRound(h64, state->v[3]);
} else {
h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
}
h64 += (xxh_u64) state->total_len;
return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
}
#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
/*! @ingroup XXH64_family */
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
XXH_memcpy(dst, &hash, sizeof(*dst));
}
/*! @ingroup XXH64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
{
return XXH_readBE64(src);
}
#ifndef XXH_NO_XXH3
/* *********************************************************************
* XXH3
* New generation hash designed for speed on small keys and vectorization
************************************************************************ */
/*!
* @}
* @defgroup XXH3_impl XXH3 implementation
* @ingroup impl
* @{
*/
/* === Compiler specifics === */
#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
# define XXH_RESTRICT /* disable */
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
# define XXH_RESTRICT restrict
#else
/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
# define XXH_RESTRICT /* disable */
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
|| (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
|| defined(__clang__)
# define XXH_likely(x) __builtin_expect(x, 1)
# define XXH_unlikely(x) __builtin_expect(x, 0)
#else
# define XXH_likely(x) (x)
# define XXH_unlikely(x) (x)
#endif
#if defined(__GNUC__) || defined(__clang__)
# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
|| defined(__aarch64__) || defined(_M_ARM) \
|| defined(_M_ARM64) || defined(_M_ARM64EC)
# define inline __inline__ /* circumvent a clang bug */
# include <arm_neon.h>
# undef inline
# elif defined(__AVX2__)
# include <immintrin.h>
# elif defined(__SSE2__)
# include <emmintrin.h>
# endif
#endif
#if defined(_MSC_VER)
# include <intrin.h>
#endif
/*
* One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
* remaining a true 64-bit/128-bit hash function.
*
* This is done by prioritizing a subset of 64-bit operations that can be
* emulated without too many steps on the average 32-bit machine.
*
* For example, these two lines seem similar, and run equally fast on 64-bit:
*
* xxh_u64 x;
* x ^= (x >> 47); // good
* x ^= (x >> 13); // bad
*
* However, to a 32-bit machine, there is a major difference.
*
* x ^= (x >> 47) looks like this:
*
* x.lo ^= (x.hi >> (47 - 32));
*
* while x ^= (x >> 13) looks like this:
*
* // note: funnel shifts are not usually cheap.
* x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
* x.hi ^= (x.hi >> 13);
*
* The first one is significantly faster than the second, simply because the
* shift is larger than 32. This means:
* - All the bits we need are in the upper 32 bits, so we can ignore the lower
* 32 bits in the shift.
* - The shift result will always fit in the lower 32 bits, and therefore,
* we can ignore the upper 32 bits in the xor.
*
* Thanks to this optimization, XXH3 only requires these features to be efficient:
*
* - Usable unaligned access
* - A 32-bit or 64-bit ALU
* - If 32-bit, a decent ADC instruction
* - A 32 or 64-bit multiply with a 64-bit result
* - For the 128-bit variant, a decent byteswap helps short inputs.
*
* The first two are already required by XXH32, and almost all 32-bit and 64-bit
* platforms which can run XXH32 can run XXH3 efficiently.
*
* Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
* notable exception.
*
* First of all, Thumb-1 lacks support for the UMULL instruction which
* performs the important long multiply. This means numerous __aeabi_lmul
* calls.
*
* Second of all, the 8 functional registers are just not enough.
* Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
* Lo registers, and this shuffling results in thousands more MOVs than A32.
*
* A32 and T32 don't have this limitation. They can access all 14 registers,
* do a 32->64 multiply with UMULL, and the flexible operand allowing free
* shifts is helpful, too.
*
* Therefore, we do a quick sanity check.
*
* If compiling Thumb-1 for a target which supports ARM instructions, we will
* emit a warning, as it is not a "sane" platform to compile for.
*
* Usually, if this happens, it is because of an accident and you probably need
* to specify -march, as you likely meant to compile for a newer architecture.
*
* Credit: large sections of the vectorial and asm source code paths
* have been contributed by @easyaspi314
*/
#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
# warning "XXH3 is highly inefficient without ARM or Thumb-2."
#endif
/* ==========================================
* Vectorization detection
* ========================================== */
#ifdef XXH_DOXYGEN
/*!
* @ingroup tuning
* @brief Overrides the vectorization implementation chosen for XXH3.
*
* Can be defined to 0 to disable SIMD or any of the values mentioned in
* @ref XXH_VECTOR_TYPE.
*
* If this is not defined, it uses predefined macros to determine the best
* implementation.
*/
# define XXH_VECTOR XXH_SCALAR
/*!
* @ingroup tuning
* @brief Possible values for @ref XXH_VECTOR.
*
* Note that these are actually implemented as macros.
*
* If this is not defined, it is detected automatically.
* @ref XXH_X86DISPATCH overrides this.
*/
enum XXH_VECTOR_TYPE /* fake enum */ {
XXH_SCALAR = 0, /*!< Portable scalar version */
XXH_SSE2 = 1, /*!<
* SSE2 for Pentium 4, Opteron, all x86_64.
*
* @note SSE2 is also guaranteed on Windows 10, macOS, and
* Android x86.
*/
XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
};
/*!
* @ingroup tuning
* @brief Selects the minimum alignment for XXH3's accumulators.
*
* When using SIMD, this should match the alignment reqired for said vector
* type, so, for example, 32 for AVX2.
*
* Default: Auto detected.
*/
# define XXH_ACC_ALIGN 8
#endif
/* Actual definition */
#ifndef XXH_DOXYGEN
# define XXH_SCALAR 0
# define XXH_SSE2 1
# define XXH_AVX2 2
# define XXH_AVX512 3
# define XXH_NEON 4
# define XXH_VSX 5
#endif
#ifndef XXH_VECTOR /* can be defined on command line */
# if ( \
defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
|| defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
) && ( \
defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
)
# define XXH_VECTOR XXH_NEON
# elif defined(__AVX512F__)
# define XXH_VECTOR XXH_AVX512
# elif defined(__AVX2__)
# define XXH_VECTOR XXH_AVX2
# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
# define XXH_VECTOR XXH_SSE2
# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
|| (defined(__s390x__) && defined(__VEC__)) \
&& defined(__GNUC__) /* TODO: IBM XL */
# define XXH_VECTOR XXH_VSX
# else
# define XXH_VECTOR XXH_SCALAR
# endif
#endif
/*
* Controls the alignment of the accumulator,
* for compatibility with aligned vector loads, which are usually faster.
*/
#ifndef XXH_ACC_ALIGN
# if defined(XXH_X86DISPATCH)
# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
# elif XXH_VECTOR == XXH_SCALAR /* scalar */
# define XXH_ACC_ALIGN 8
# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
# define XXH_ACC_ALIGN 32
# elif XXH_VECTOR == XXH_NEON /* neon */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_VSX /* vsx */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
# define XXH_ACC_ALIGN 64
# endif
#endif
#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
|| XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
# define XXH_SEC_ALIGN XXH_ACC_ALIGN
#else
# define XXH_SEC_ALIGN 8
#endif
/*
* UGLY HACK:
* GCC usually generates the best code with -O3 for xxHash.
*
* However, when targeting AVX2, it is overzealous in its unrolling resulting
* in code roughly 3/4 the speed of Clang.
*
* There are other issues, such as GCC splitting _mm256_loadu_si256 into
* _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
* only applies to Sandy and Ivy Bridge... which don't even support AVX2.
*
* That is why when compiling the AVX2 version, it is recommended to use either
* -O2 -mavx2 -march=haswell
* or
* -O2 -mavx2 -mno-avx256-split-unaligned-load
* for decent performance, or to use Clang instead.
*
* Fortunately, we can control the first one with a pragma that forces GCC into
* -O2, but the other one we can't control without "failed to inline always
* inline function due to target mismatch" warnings.
*/
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
&& defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
# pragma GCC push_options
# pragma GCC optimize("-O2")
#endif
#if XXH_VECTOR == XXH_NEON
/*
* NEON's setup for vmlal_u32 is a little more complicated than it is on
* SSE2, AVX2, and VSX.
*
* While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
*
* To do the same operation, the 128-bit 'Q' register needs to be split into
* two 64-bit 'D' registers, performing this operation::
*
* [ a | b ]
* | '---------. .--------' |
* | x |
* | .---------' '--------. |
* [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
*
* Due to significant changes in aarch64, the fastest method for aarch64 is
* completely different than the fastest method for ARMv7-A.
*
* ARMv7-A treats D registers as unions overlaying Q registers, so modifying
* D11 will modify the high half of Q5. This is similar to how modifying AH
* will only affect bits 8-15 of AX on x86.
*
* VZIP takes two registers, and puts even lanes in one register and odd lanes
* in the other.
*
* On ARMv7-A, this strangely modifies both parameters in place instead of
* taking the usual 3-operand form.
*
* Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
* lower and upper halves of the Q register to end up with the high and low
* halves where we want - all in one instruction.
*
* vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
*
* Unfortunately we need inline assembly for this: Instructions modifying two
* registers at once is not possible in GCC or Clang's IR, and they have to
* create a copy.
*
* aarch64 requires a different approach.
*
* In order to make it easier to write a decent compiler for aarch64, many
* quirks were removed, such as conditional execution.
*
* NEON was also affected by this.
*
* aarch64 cannot access the high bits of a Q-form register, and writes to a
* D-form register zero the high bits, similar to how writes to W-form scalar
* registers (or DWORD registers on x86_64) work.
*
* The formerly free vget_high intrinsics now require a vext (with a few
* exceptions)
*
* Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
* of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
* operand.
*
* The equivalent of the VZIP.32 on the lower and upper halves would be this
* mess:
*
* ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
* zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
* zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
*
* Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
*
* shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
* xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
*
* This is available on ARMv7-A, but is less efficient than a single VZIP.32.
*/
/*!
* Function-like macro:
* void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
* {
* outLo = (uint32x2_t)(in & 0xFFFFFFFF);
* outHi = (uint32x2_t)(in >> 32);
* in = UNDEFINED;
* }
*/
# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
&& (defined(__GNUC__) || defined(__clang__)) \
&& (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
do { \
/* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
/* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
/* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
__asm__("vzip.32 %e0, %f0" : "+w" (in)); \
(outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
(outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
} while (0)
# else
# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
do { \
(outLo) = vmovn_u64 (in); \
(outHi) = vshrn_n_u64 ((in), 32); \
} while (0)
# endif
/*!
* @internal
* @brief `vld1q_u64` but faster and alignment-safe.
*
* On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
* *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
*
* GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
* prohibits load-store optimizations. Therefore, a direct dereference is used.
*
* Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
* unaligned load.
*/
#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
{
return *(uint64x2_t const*)ptr;
}
#else
XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
{
return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
}
#endif
/*!
* @ingroup tuning
* @brief Controls the NEON to scalar ratio for XXH3
*
* On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
* 2 lanes on scalar by default.
*
* This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
* emulated 64-bit arithmetic is too slow.
*
* Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
*
* For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
* have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
* you are only using 2/3 of the CPU bandwidth.
*
* This is even more noticable on the more advanced cores like the A76 which
* can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
*
* Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
* remaining lanes will use scalar instructions. This improves the bandwidth
* and also gives the integer pipelines something to do besides twiddling loop
* counters and pointers.
*
* This change benefits CPUs with large micro-op buffers without negatively affecting
* other CPUs:
*
* | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
* |:----------------------|:--------------------|----------:|-----------:|------:|
* | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
* | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
* | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
*
* It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
*
* @see XXH3_accumulate_512_neon()
*/
# ifndef XXH3_NEON_LANES
# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
&& XXH_SIZE_OPT <= 0
# define XXH3_NEON_LANES 6
# else
# define XXH3_NEON_LANES XXH_ACC_NB
# endif
# endif
#endif /* XXH_VECTOR == XXH_NEON */
/*
* VSX and Z Vector helpers.
*
* This is very messy, and any pull requests to clean this up are welcome.
*
* There are a lot of problems with supporting VSX and s390x, due to
* inconsistent intrinsics, spotty coverage, and multiple endiannesses.
*/
#if XXH_VECTOR == XXH_VSX
/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
* and `pixel`. This is a problem for obvious reasons.
*
* These keywords are unnecessary; the spec literally says they are
* equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
* after including the header.
*
* We use pragma push_macro/pop_macro to keep the namespace clean. */
# pragma push_macro("bool")
# pragma push_macro("vector")
# pragma push_macro("pixel")
/* silence potential macro redefined warnings */
# undef bool
# undef vector
# undef pixel
# if defined(__s390x__)
# include <s390intrin.h>
# else
# include <altivec.h>
# endif
/* Restore the original macro values, if applicable. */
# pragma pop_macro("pixel")
# pragma pop_macro("vector")
# pragma pop_macro("bool")
typedef __vector unsigned long long xxh_u64x2;
typedef __vector unsigned char xxh_u8x16;
typedef __vector unsigned xxh_u32x4;
# ifndef XXH_VSX_BE
# if defined(__BIG_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define XXH_VSX_BE 1
# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
# warning "-maltivec=be is not recommended. Please use native endianness."
# define XXH_VSX_BE 1
# else
# define XXH_VSX_BE 0
# endif
# endif /* !defined(XXH_VSX_BE) */
# if XXH_VSX_BE
# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
# define XXH_vec_revb vec_revb
# else
/*!
* A polyfill for POWER9's vec_revb().
*/
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
{
xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
return vec_perm(val, val, vByteSwap);
}
# endif
# endif /* XXH_VSX_BE */
/*!
* Performs an unaligned vector load and byte swaps it on big endian.
*/
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
{
xxh_u64x2 ret;
XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
# if XXH_VSX_BE
ret = XXH_vec_revb(ret);
# endif
return ret;
}
/*
* vec_mulo and vec_mule are very problematic intrinsics on PowerPC
*
* These intrinsics weren't added until GCC 8, despite existing for a while,
* and they are endian dependent. Also, their meaning swap depending on version.
* */
# if defined(__s390x__)
/* s390x is always big endian, no issue on this platform */
# define XXH_vec_mulo vec_mulo
# define XXH_vec_mule vec_mule
# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
# define XXH_vec_mulo __builtin_altivec_vmulouw
# define XXH_vec_mule __builtin_altivec_vmuleuw
# else
/* gcc needs inline assembly */
/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
{
xxh_u64x2 result;
__asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
return result;
}
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
{
xxh_u64x2 result;
__asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
return result;
}
# endif /* XXH_vec_mulo, XXH_vec_mule */
#endif /* XXH_VECTOR == XXH_VSX */
/* prefetch
* can be disabled, by declaring XXH_NO_PREFETCH build macro */
#if defined(XXH_NO_PREFETCH)
# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
#else
# if XXH_SIZE_OPT >= 1
# define XXH_PREFETCH(ptr) (void)(ptr)
# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# else
# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
# endif
#endif /* XXH_NO_PREFETCH */
/* ==========================================
* XXH3 default settings
* ========================================== */
#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
# error "default keyset is not large enough"
#endif
/*! Pseudorandom secret taken directly from FARSH. */
XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
};
#ifdef XXH_OLD_NAMES
# define kSecret XXH3_kSecret
#endif
#ifdef XXH_DOXYGEN
/*!
* @brief Calculates a 32-bit to 64-bit long multiply.
*
* Implemented as a macro.
*
* Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
* need to (but it shouldn't need to anyways, it is about 7 instructions to do
* a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
* use that instead of the normal method.
*
* If you are compiling for platforms like Thumb-1 and don't have a better option,
* you may also want to write your own long multiply routine here.
*
* @param x, y Numbers to be multiplied
* @return 64-bit product of the low 32 bits of @p x and @p y.
*/
XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x, xxh_u64 y)
{
return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
}
#elif defined(_MSC_VER) && defined(_M_IX86)
# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
#else
/*
* Downcast + upcast is usually better than masking on older compilers like
* GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
*
* The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
* and perform a full 64x64 multiply -- entirely redundant on 32-bit.
*/
# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
#endif
/*!
* @brief Calculates a 64->128-bit long multiply.
*
* Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
* version.
*
* @param lhs , rhs The 64-bit integers to be multiplied
* @return The 128-bit result represented in an @ref XXH128_hash_t.
*/
static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
{
/*
* GCC/Clang __uint128_t method.
*
* On most 64-bit targets, GCC and Clang define a __uint128_t type.
* This is usually the best way as it usually uses a native long 64-bit
* multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
*
* Usually.
*
* Despite being a 32-bit platform, Clang (and emscripten) define this type
* despite not having the arithmetic for it. This results in a laggy
* compiler builtin call which calculates a full 128-bit multiply.
* In that case it is best to use the portable one.
* https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
*/
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
&& defined(__SIZEOF_INT128__) \
|| (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
__uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
XXH128_hash_t r128;
r128.low64 = (xxh_u64)(product);
r128.high64 = (xxh_u64)(product >> 64);
return r128;
/*
* MSVC for x64's _umul128 method.
*
* xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
*
* This compiles to single operand MUL on x64.
*/
#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
#ifndef _MSC_VER
# pragma intrinsic(_umul128)
#endif
xxh_u64 product_high;
xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
XXH128_hash_t r128;
r128.low64 = product_low;
r128.high64 = product_high;
return r128;
/*
* MSVC for ARM64's __umulh method.
*
* This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
*/
#elif defined(_M_ARM64) || defined(_M_ARM64EC)
#ifndef _MSC_VER
# pragma intrinsic(__umulh)
#endif
XXH128_hash_t r128;
r128.low64 = lhs * rhs;
r128.high64 = __umulh(lhs, rhs);
return r128;
#else
/*
* Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
*
* This is a fast and simple grade school multiply, which is shown below
* with base 10 arithmetic instead of base 0x100000000.
*
* 9 3 // D2 lhs = 93
* x 7 5 // D2 rhs = 75
* ----------
* 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
* 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
* 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
* + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
* ---------
* 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
* + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
* ---------
* 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
*
* The reasons for adding the products like this are:
* 1. It avoids manual carry tracking. Just like how
* (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
* This avoids a lot of complexity.
*
* 2. It hints for, and on Clang, compiles to, the powerful UMAAL
* instruction available in ARM's Digital Signal Processing extension
* in 32-bit ARMv6 and later, which is shown below:
*
* void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
* {
* xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
* *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
* *RdHi = (xxh_u32)(product >> 32);
* }
*
* This instruction was designed for efficient long multiplication, and
* allows this to be calculated in only 4 instructions at speeds
* comparable to some 64-bit ALUs.
*
* 3. It isn't terrible on other platforms. Usually this will be a couple
* of 32-bit ADD/ADCs.
*/
/* First calculate all of the cross products. */
xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
/* Now add the products together. These will never overflow. */
xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
XXH128_hash_t r128;
r128.low64 = lower;
r128.high64 = upper;
return r128;
#endif
}
/*!
* @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
*
* The reason for the separate function is to prevent passing too many structs
* around by value. This will hopefully inline the multiply, but we don't force it.
*
* @param lhs , rhs The 64-bit integers to multiply
* @return The low 64 bits of the product XOR'd by the high 64 bits.
* @see XXH_mult64to128()
*/
static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
{
XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
return product.low64 ^ product.high64;
}
/*! Seems to produce slightly better code on GCC for some reason. */
XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
{
XXH_ASSERT(0 <= shift && shift < 64);
return v64 ^ (v64 >> shift);
}
/*
* This is a fast avalanche stage,
* suitable when input bits are already partially mixed
*/
static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
{
h64 = XXH_xorshift64(h64, 37);
h64 *= 0x165667919E3779F9ULL;
h64 = XXH_xorshift64(h64, 32);
return h64;
}
/*
* This is a stronger avalanche,
* inspired by Pelle Evensen's rrmxmx
* preferable when input has not been previously mixed
*/
static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
{
/* this mix is inspired by Pelle Evensen's rrmxmx */
h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
h64 *= 0x9FB21C651E98DF25ULL;
h64 ^= (h64 >> 35) + len ;
h64 *= 0x9FB21C651E98DF25ULL;
return XXH_xorshift64(h64, 28);
}
/* ==========================================
* Short keys
* ==========================================
* One of the shortcomings of XXH32 and XXH64 was that their performance was
* sub-optimal on short lengths. It used an iterative algorithm which strongly
* favored lengths that were a multiple of 4 or 8.
*
* Instead of iterating over individual inputs, we use a set of single shot
* functions which piece together a range of lengths and operate in constant time.
*
* Additionally, the number of multiplies has been significantly reduced. This
* reduces latency, especially when emulating 64-bit multiplies on 32-bit.
*
* Depending on the platform, this may or may not be faster than XXH32, but it
* is almost guaranteed to be faster than XXH64.
*/
/*
* At very short lengths, there isn't enough input to fully hide secrets, or use
* the entire secret.
*
* There is also only a limited amount of mixing we can do before significantly
* impacting performance.
*
* Therefore, we use different sections of the secret and always mix two secret
* samples with an XOR. This should have no effect on performance on the
* seedless or withSeed variants because everything _should_ be constant folded
* by modern compilers.
*
* The XOR mixing hides individual parts of the secret and increases entropy.
*
* This adds an extra layer of strength for custom secrets.
*/
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(1 <= len && len <= 3);
XXH_ASSERT(secret != NULL);
/*
* len = 1: combined = { input[0], 0x01, input[0], input[0] }
* len = 2: combined = { input[1], 0x02, input[0], input[1] }
* len = 3: combined = { input[2], 0x03, input[0], input[1] }
*/
{ xxh_u8 const c1 = input[0];
xxh_u8 const c2 = input[len >> 1];
xxh_u8 const c3 = input[len - 1];
xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
| ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
return XXH64_avalanche(keyed);
}
}
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(4 <= len && len <= 8);
seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
{ xxh_u32 const input1 = XXH_readLE32(input);
xxh_u32 const input2 = XXH_readLE32(input + len - 4);
xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
xxh_u64 const keyed = input64 ^ bitflip;
return XXH3_rrmxmx(keyed, len);
}
}
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(9 <= len && len <= 16);
{ xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
xxh_u64 const acc = len
+ XXH_swap64(input_lo) + input_hi
+ XXH3_mul128_fold64(input_lo, input_hi);
return XXH3_avalanche(acc);
}
}
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(len <= 16);
{ if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
}
}
/*
* DISCLAIMER: There are known *seed-dependent* multicollisions here due to
* multiplication by zero, affecting hashes of lengths 17 to 240.
*
* However, they are very unlikely.
*
* Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
* unseeded non-cryptographic hashes, it does not attempt to defend itself
* against specially crafted inputs, only random inputs.
*
* Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
* cancelling out the secret is taken an arbitrary number of times (addressed
* in XXH3_accumulate_512), this collision is very unlikely with random inputs
* and/or proper seeding:
*
* This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
* function that is only called up to 16 times per hash with up to 240 bytes of
* input.
*
* This is not too bad for a non-cryptographic hash function, especially with
* only 64 bit outputs.
*
* The 128-bit variant (which trades some speed for strength) is NOT affected
* by this, although it is always a good idea to use a proper seed if you care
* about strength.
*/
XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
{
#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
/*
* UGLY HACK:
* GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
* slower code.
*
* By forcing seed64 into a register, we disrupt the cost model and
* cause it to scalarize. See `XXH32_round()`
*
* FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
* XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
* GCC 9.2, despite both emitting scalar code.
*
* GCC generates much better scalar code than Clang for the rest of XXH3,
* which is why finding a more optimal codepath is an interest.
*/
XXH_COMPILER_GUARD(seed64);
#endif
{ xxh_u64 const input_lo = XXH_readLE64(input);
xxh_u64 const input_hi = XXH_readLE64(input+8);
return XXH3_mul128_fold64(
input_lo ^ (XXH_readLE64(secret) + seed64),
input_hi ^ (XXH_readLE64(secret+8) - seed64)
);
}
}
/* For mid range keys, XXH3 uses a Mum-hash variant. */
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(16 < len && len <= 128);
{ xxh_u64 acc = len * XXH_PRIME64_1;
#if XXH_SIZE_OPT >= 1
/* Smaller and cleaner, but slightly slower. */
size_t i = (len - 1) / 32;
do {
acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
} while (i-- != 0);
#else
if (len > 32) {
if (len > 64) {
if (len > 96) {
acc += XXH3_mix16B(input+48, secret+96, seed);
acc += XXH3_mix16B(input+len-64, secret+112, seed);
}
acc += XXH3_mix16B(input+32, secret+64, seed);
acc += XXH3_mix16B(input+len-48, secret+80, seed);
}
acc += XXH3_mix16B(input+16, secret+32, seed);
acc += XXH3_mix16B(input+len-32, secret+48, seed);
}
acc += XXH3_mix16B(input+0, secret+0, seed);
acc += XXH3_mix16B(input+len-16, secret+16, seed);
#endif
return XXH3_avalanche(acc);
}
}
#define XXH3_MIDSIZE_MAX 240
XXH_NO_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
#define XXH3_MIDSIZE_STARTOFFSET 3
#define XXH3_MIDSIZE_LASTOFFSET 17
{ xxh_u64 acc = len * XXH_PRIME64_1;
int const nbRounds = (int)len / 16;
int i;
for (i=0; i<8; i++) {
acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
}
acc = XXH3_avalanche(acc);
XXH_ASSERT(nbRounds >= 8);
#if defined(__clang__) /* Clang */ \
&& (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
/*
* UGLY HACK:
* Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
* In everywhere else, it uses scalar code.
*
* For 64->128-bit multiplies, even if the NEON was 100% optimal, it
* would still be slower than UMAAL (see XXH_mult64to128).
*
* Unfortunately, Clang doesn't handle the long multiplies properly and
* converts them to the nonexistent "vmulq_u64" intrinsic, which is then
* scalarized into an ugly mess of VMOV.32 instructions.
*
* This mess is difficult to avoid without turning autovectorization
* off completely, but they are usually relatively minor and/or not
* worth it to fix.
*
* This loop is the easiest to fix, as unlike XXH32, this pragma
* _actually works_ because it is a loop vectorization instead of an
* SLP vectorization.
*/
#pragma clang loop vectorize(disable)
#endif
for (i=8 ; i < nbRounds; i++) {
acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
}
/* last bytes */
acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
return XXH3_avalanche(acc);
}
}
/* ======= Long Keys ======= */
#define XXH_STRIPE_LEN 64
#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
#ifdef XXH_OLD_NAMES
# define STRIPE_LEN XXH_STRIPE_LEN
# define ACC_NB XXH_ACC_NB
#endif
XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
{
if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
XXH_memcpy(dst, &v64, sizeof(v64));
}
/* Several intrinsic functions below are supposed to accept __int64 as argument,
* as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
* However, several environments do not define __int64 type,
* requiring a workaround.
*/
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
typedef int64_t xxh_i64;
#else
/* the following type must have a width of 64-bit */
typedef long long xxh_i64;
#endif
/*
* XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
*
* It is a hardened version of UMAC, based off of FARSH's implementation.
*
* This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
* implementations, and it is ridiculously fast.
*
* We harden it by mixing the original input to the accumulators as well as the product.
*
* This means that in the (relatively likely) case of a multiply by zero, the
* original input is preserved.
*
* On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
* cross-pollination, as otherwise the upper and lower halves would be
* essentially independent.
*
* This doesn't matter on 64-bit hashes since they all get merged together in
* the end, so we skip the extra step.
*
* Both XXH3_64bits and XXH3_128bits use this subroutine.
*/
#if (XXH_VECTOR == XXH_AVX512) \
|| (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
#ifndef XXH_TARGET_AVX512
# define XXH_TARGET_AVX512 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
__m512i* const xacc = (__m512i *) acc;
XXH_ASSERT((((size_t)acc) & 63) == 0);
XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
{
/* data_vec = input[0]; */
__m512i const data_vec = _mm512_loadu_si512 (input);
/* key_vec = secret[0]; */
__m512i const key_vec = _mm512_loadu_si512 (secret);
/* data_key = data_vec ^ key_vec; */
__m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
/* xacc[0] += swap(data_vec); */
__m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
__m512i const sum = _mm512_add_epi64(*xacc, data_swap);
/* xacc[0] += product; */
*xacc = _mm512_add_epi64(product, sum);
}
}
/*
* XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
*
* Multiplication isn't perfect, as explained by Google in HighwayHash:
*
* // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
* // varying degrees. In descending order of goodness, bytes
* // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
* // As expected, the upper and lower bytes are much worse.
*
* Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
*
* Since our algorithm uses a pseudorandom secret to add some variance into the
* mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
*
* This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
* extraction.
*
* Both XXH3_64bits and XXH3_128bits use this subroutine.
*/
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 63) == 0);
XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
{ __m512i* const xacc = (__m512i*) acc;
const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
/* xacc[0] ^= (xacc[0] >> 47) */
__m512i const acc_vec = *xacc;
__m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
__m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
/* xacc[0] ^= secret; */
__m512i const key_vec = _mm512_loadu_si512 (secret);
__m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
/* xacc[0] *= XXH_PRIME32_1; */
__m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
__m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
__m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
*xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
}
}
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
XXH_ASSERT(((size_t)customSecret & 63) == 0);
(void)(&XXH_writeLE64);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
__m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
__m512i* const dest = ( __m512i*) customSecret;
int i;
XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
XXH_ASSERT(((size_t)dest & 63) == 0);
for (i=0; i < nbRounds; ++i) {
/* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
* this will warn "discards 'const' qualifier". */
union {
const __m512i* cp;
void* p;
} remote_const_void;
remote_const_void.cp = src + i;
dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
} }
}
#endif
#if (XXH_VECTOR == XXH_AVX2) \
|| (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
#ifndef XXH_TARGET_AVX2
# define XXH_TARGET_AVX2 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 31) == 0);
{ __m256i* const xacc = (__m256i *) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xinput = (const __m256i *) input;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xsecret = (const __m256i *) secret;
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
/* data_vec = xinput[i]; */
__m256i const data_vec = _mm256_loadu_si256 (xinput+i);
/* key_vec = xsecret[i]; */
__m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
/* data_key = data_vec ^ key_vec; */
__m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
/* xacc[i] += swap(data_vec); */
__m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
__m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
/* xacc[i] += product; */
xacc[i] = _mm256_add_epi64(product, sum);
} }
}
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 31) == 0);
{ __m256i* const xacc = (__m256i*) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xsecret = (const __m256i *) secret;
const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
/* xacc[i] ^= (xacc[i] >> 47) */
__m256i const acc_vec = xacc[i];
__m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
__m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
/* xacc[i] ^= xsecret; */
__m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
__m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
/* xacc[i] *= XXH_PRIME32_1; */
__m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
__m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
__m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
}
}
}
XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
(void)(&XXH_writeLE64);
XXH_PREFETCH(customSecret);
{ __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
__m256i* dest = ( __m256i*) customSecret;
# if defined(__GNUC__) || defined(__clang__)
/*
* On GCC & Clang, marking 'dest' as modified will cause the compiler:
* - do not extract the secret from sse registers in the internal loop
* - use less common registers, and avoid pushing these reg into stack
*/
XXH_COMPILER_GUARD(dest);
# endif
XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
XXH_ASSERT(((size_t)dest & 31) == 0);
/* GCC -O2 need unroll loop manually */
dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
}
}
#endif
/* x86dispatch always generates SSE2 */
#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
#ifndef XXH_TARGET_SSE2
# define XXH_TARGET_SSE2 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
/* SSE2 is just a half-scale version of the AVX2 version. */
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ __m128i* const xacc = (__m128i *) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xinput = (const __m128i *) input;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xsecret = (const __m128i *) secret;
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
/* data_vec = xinput[i]; */
__m128i const data_vec = _mm_loadu_si128 (xinput+i);
/* key_vec = xsecret[i]; */
__m128i const key_vec = _mm_loadu_si128 (xsecret+i);
/* data_key = data_vec ^ key_vec; */
__m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
/* xacc[i] += swap(data_vec); */
__m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
__m128i const sum = _mm_add_epi64(xacc[i], data_swap);
/* xacc[i] += product; */
xacc[i] = _mm_add_epi64(product, sum);
} }
}
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ __m128i* const xacc = (__m128i*) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xsecret = (const __m128i *) secret;
const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
/* xacc[i] ^= (xacc[i] >> 47) */
__m128i const acc_vec = xacc[i];
__m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
__m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
/* xacc[i] ^= xsecret[i]; */
__m128i const key_vec = _mm_loadu_si128 (xsecret+i);
__m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
/* xacc[i] *= XXH_PRIME32_1; */
__m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
__m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
__m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
}
}
}
XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
(void)(&XXH_writeLE64);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
/* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
__m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
# else
__m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
# endif
int i;
const void* const src16 = XXH3_kSecret;
__m128i* dst16 = (__m128i*) customSecret;
# if defined(__GNUC__) || defined(__clang__)
/*
* On GCC & Clang, marking 'dest' as modified will cause the compiler:
* - do not extract the secret from sse registers in the internal loop
* - use less common registers, and avoid pushing these reg into stack
*/
XXH_COMPILER_GUARD(dst16);
# endif
XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
XXH_ASSERT(((size_t)dst16 & 15) == 0);
for (i=0; i < nbRounds; ++i) {
dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
} }
}
#endif
#if (XXH_VECTOR == XXH_NEON)
/* forward declarations for the scalar routines */
XXH_FORCE_INLINE void
XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
void const* XXH_RESTRICT secret, size_t lane);
XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT secret, size_t lane);
/*!
* @internal
* @brief The bulk processing loop for NEON.
*
* The NEON code path is actually partially scalar when running on AArch64. This
* is to optimize the pipelining and can have up to 15% speedup depending on the
* CPU, and it also mitigates some GCC codegen issues.
*
* @see XXH3_NEON_LANES for configuring this and details about this optimization.
*/
XXH_FORCE_INLINE void
XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
{
uint64x2_t* const xacc = (uint64x2_t *) acc;
/* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
uint8_t const* const xinput = (const uint8_t *) input;
uint8_t const* const xsecret = (const uint8_t *) secret;
size_t i;
/* AArch64 uses both scalar and neon at the same time */
for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
XXH3_scalarRound(acc, input, secret, i);
}
for (i=0; i < XXH3_NEON_LANES / 2; i++) {
uint64x2_t acc_vec = xacc[i];
/* data_vec = xinput[i]; */
uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
/* key_vec = xsecret[i]; */
uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
uint64x2_t data_key;
uint32x2_t data_key_lo, data_key_hi;
/* acc_vec_2 = swap(data_vec) */
uint64x2_t acc_vec_2 = vextq_u64(data_vec, data_vec, 1);
/* data_key = data_vec ^ key_vec; */
data_key = veorq_u64(data_vec, key_vec);
/* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
* data_key_hi = (uint32x2_t) (data_key >> 32);
* data_key = UNDEFINED; */
XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
/* acc_vec_2 += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
acc_vec_2 = vmlal_u32 (acc_vec_2, data_key_lo, data_key_hi);
/* xacc[i] += acc_vec_2; */
acc_vec = vaddq_u64 (acc_vec, acc_vec_2);
xacc[i] = acc_vec;
}
}
}
XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ uint64x2_t* xacc = (uint64x2_t*) acc;
uint8_t const* xsecret = (uint8_t const*) secret;
uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
size_t i;
/* AArch64 uses both scalar and neon at the same time */
for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
XXH3_scalarScrambleRound(acc, secret, i);
}
for (i=0; i < XXH3_NEON_LANES / 2; i++) {
/* xacc[i] ^= (xacc[i] >> 47); */
uint64x2_t acc_vec = xacc[i];
uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
/* xacc[i] ^= xsecret[i]; */
uint64x2_t key_vec = XXH_vld1q_u64 (xsecret + (i * 16));
uint64x2_t data_key = veorq_u64 (data_vec, key_vec);
/* xacc[i] *= XXH_PRIME32_1 */
uint32x2_t data_key_lo, data_key_hi;
/* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
* data_key_hi = (uint32x2_t) (xacc[i] >> 32);
* xacc[i] = UNDEFINED; */
XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
{ /*
* prod_hi = (data_key >> 32) * XXH_PRIME32_1;
*
* Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
* incorrectly "optimize" this:
* tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
* shifted = vshll_n_u32(tmp, 32);
* to this:
* tmp = "vmulq_u64"(a, b); // no such thing!
* shifted = vshlq_n_u64(tmp, 32);
*
* However, unlike SSE, Clang lacks a 64-bit multiply routine
* for NEON, and it scalarizes two 64-bit multiplies instead.
*
* vmull_u32 has the same timing as vmul_u32, and it avoids
* this bug completely.
* See https://bugs.llvm.org/show_bug.cgi?id=39967
*/
uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
/* xacc[i] = prod_hi << 32; */
prod_hi = vshlq_n_u64(prod_hi, 32);
/* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
xacc[i] = vmlal_u32(prod_hi, data_key_lo, prime);
}
}
}
}
#endif
#if (XXH_VECTOR == XXH_VSX)
XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
/* presumed aligned */
unsigned int* const xacc = (unsigned int*) acc;
xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
xxh_u64x2 const v32 = { 32, 32 };
size_t i;
for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
/* data_vec = xinput[i]; */
xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
/* key_vec = xsecret[i]; */
xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
xxh_u64x2 const data_key = data_vec ^ key_vec;
/* shuffled = (data_key << 32) | (data_key >> 32); */
xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
/* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
/* acc_vec = xacc[i]; */
xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
acc_vec += product;
/* swap high and low halves */
#ifdef __s390x__
acc_vec += vec_permi(data_vec, data_vec, 2);
#else
acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
#endif
/* xacc[i] = acc_vec; */
vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
}
}
XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ xxh_u64x2* const xacc = (xxh_u64x2*) acc;
const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
/* constants */
xxh_u64x2 const v32 = { 32, 32 };
xxh_u64x2 const v47 = { 47, 47 };
xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
size_t i;
for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
/* xacc[i] ^= (xacc[i] >> 47); */
xxh_u64x2 const acc_vec = xacc[i];
xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
/* xacc[i] ^= xsecret[i]; */
xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
xxh_u64x2 const data_key = data_vec ^ key_vec;
/* xacc[i] *= XXH_PRIME32_1 */
/* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
/* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
xacc[i] = prod_odd + (prod_even << v32);
} }
}
#endif
/* scalar variants - universal */
/*!
* @internal
* @brief Scalar round for @ref XXH3_accumulate_512_scalar().
*
* This is extracted to its own function because the NEON path uses a combination
* of NEON and scalar.
*/
XXH_FORCE_INLINE void
XXH3_scalarRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT input,
void const* XXH_RESTRICT secret,
size_t lane)
{
xxh_u64* xacc = (xxh_u64*) acc;
xxh_u8 const* xinput = (xxh_u8 const*) input;
xxh_u8 const* xsecret = (xxh_u8 const*) secret;
XXH_ASSERT(lane < XXH_ACC_NB);
XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
{
xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
}
}
/*!
* @internal
* @brief Processes a 64 byte block of data using the scalar path.
*/
XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
size_t i;
/* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__arm__) || defined(__thumb2__)) \
&& defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
&& XXH_SIZE_OPT <= 0
# pragma GCC unroll 8
#endif
for (i=0; i < XXH_ACC_NB; i++) {
XXH3_scalarRound(acc, input, secret, i);
}
}
/*!
* @internal
* @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
*
* This is extracted to its own function because the NEON path uses a combination
* of NEON and scalar.
*/
XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT secret,
size_t lane)
{
xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
XXH_ASSERT(lane < XXH_ACC_NB);
{
xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
xxh_u64 acc64 = xacc[lane];
acc64 = XXH_xorshift64(acc64, 47);
acc64 ^= key64;
acc64 *= XXH_PRIME32_1;
xacc[lane] = acc64;
}
}
/*!
* @internal
* @brief Scrambles the accumulators after a large chunk has been read
*/
XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
size_t i;
for (i=0; i < XXH_ACC_NB; i++) {
XXH3_scalarScrambleRound(acc, secret, i);
}
}
XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
/*
* We need a separate pointer for the hack below,
* which requires a non-const pointer.
* Any decent compiler will optimize this out otherwise.
*/
const xxh_u8* kSecretPtr = XXH3_kSecret;
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
#if defined(__clang__) && defined(__aarch64__)
/*
* UGLY HACK:
* Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
* placed sequentially, in order, at the top of the unrolled loop.
*
* While MOVK is great for generating constants (2 cycles for a 64-bit
* constant compared to 4 cycles for LDR), it fights for bandwidth with
* the arithmetic instructions.
*
* I L S
* MOVK
* MOVK
* MOVK
* MOVK
* ADD
* SUB STR
* STR
* By forcing loads from memory (as the asm line causes Clang to assume
* that XXH3_kSecretPtr has been changed), the pipelines are used more
* efficiently:
* I L S
* LDR
* ADD LDR
* SUB STR
* STR
*
* See XXH3_NEON_LANES for details on the pipsline.
*
* XXH3_64bits_withSeed, len == 256, Snapdragon 835
* without hack: 2654.4 MB/s
* with hack: 3202.9 MB/s
*/
XXH_COMPILER_GUARD(kSecretPtr);
#endif
/*
* Note: in debug mode, this overrides the asm optimization
* and Clang will emit MOVK chains again.
*/
XXH_ASSERT(kSecretPtr == XXH3_kSecret);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
int i;
for (i=0; i < nbRounds; i++) {
/*
* The asm hack causes Clang to assume that kSecretPtr aliases with
* customSecret, and on aarch64, this prevented LDP from merging two
* loads together for free. Putting the loads together before the stores
* properly generates LDP.
*/
xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
} }
}
typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
#if (XXH_VECTOR == XXH_AVX512)
#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
#elif (XXH_VECTOR == XXH_AVX2)
#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
#elif (XXH_VECTOR == XXH_SSE2)
#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
#elif (XXH_VECTOR == XXH_NEON)
#define XXH3_accumulate_512 XXH3_accumulate_512_neon
#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#elif (XXH_VECTOR == XXH_VSX)
#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#else /* scalar */
#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#endif
#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
# undef XXH3_initCustomSecret
# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#endif
#ifndef XXH_PREFETCH_DIST
# ifdef __clang__
# define XXH_PREFETCH_DIST 320
# else
# if (XXH_VECTOR == XXH_AVX512)
# define XXH_PREFETCH_DIST 512
# else
# define XXH_PREFETCH_DIST 384
# endif
# endif /* __clang__ */
#endif /* XXH_PREFETCH_DIST */
/*
* XXH3_accumulate()
* Loops over XXH3_accumulate_512().
* Assumption: nbStripes will not overflow the secret size
*/
XXH_FORCE_INLINE void
XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT input,
const xxh_u8* XXH_RESTRICT secret,
size_t nbStripes,
XXH3_f_accumulate_512 f_acc512)
{
size_t n;
for (n = 0; n < nbStripes; n++ ) {
const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
XXH_PREFETCH(in + XXH_PREFETCH_DIST);
f_acc512(acc,
in,
secret + n*XXH_SECRET_CONSUME_RATE);
}
}
XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
size_t const nb_blocks = (len - 1) / block_len;
size_t n;
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
for (n = 0; n < nb_blocks; n++) {
XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
}
/* last partial block */
XXH_ASSERT(len > XXH_STRIPE_LEN);
{ size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
/* last stripe */
{ const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
} }
}
XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
{
return XXH3_mul128_fold64(
acc[0] ^ XXH_readLE64(secret),
acc[1] ^ XXH_readLE64(secret+8) );
}
static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
{
xxh_u64 result64 = start;
size_t i = 0;
for (i = 0; i < 4; i++) {
result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
#if defined(__clang__) /* Clang */ \
&& (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
&& (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
/*
* UGLY HACK:
* Prevent autovectorization on Clang ARMv7-a. Exact same problem as
* the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
* XXH3_64bits, len == 256, Snapdragon 835:
* without hack: 2063.7 MB/s
* with hack: 2560.7 MB/s
*/
XXH_COMPILER_GUARD(result64);
#endif
}
return XXH3_avalanche(result64);
}
#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
const void* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
/* converge into final hash */
XXH_STATIC_ASSERT(sizeof(acc) == 64);
/* do not align on 8, so that the secret is different from the accumulator */
#define XXH_SECRET_MERGEACCS_START 11
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
}
/*
* It's important for performance to transmit secret's size (when it's static)
* so that the compiler can properly optimize the vectorized loop.
* This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
*/
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64;
return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* It's preferable for performance that XXH3_hashLong is not inlined,
* as it results in a smaller function for small data, easier to the instruction cache.
* Note that inside this no_inline function, we do inline the internal loop,
* and provide a statically defined secret size to allow optimization of vector loop.
*/
XXH_NO_INLINE XXH_PUREF XXH64_hash_t
XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64; (void)secret; (void)secretLen;
return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* XXH3_hashLong_64b_withSeed():
* Generate a custom key based on alteration of default XXH3_kSecret with the seed,
* and then use this key for long mode hashing.
*
* This operation is decently fast but nonetheless costs a little bit of time.
* Try to avoid it whenever possible (typically when seed==0).
*
* It's important for performance that XXH3_hashLong is not inlined. Not sure
* why (uop cache maybe?), but the difference is large and easily measurable.
*/
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
XXH64_hash_t seed,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble,
XXH3_f_initCustomSecret f_initSec)
{
#if XXH_SIZE_OPT <= 0
if (seed == 0)
return XXH3_hashLong_64b_internal(input, len,
XXH3_kSecret, sizeof(XXH3_kSecret),
f_acc512, f_scramble);
#endif
{ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
f_initSec(secret, seed);
return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
f_acc512, f_scramble);
}
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void* input, size_t len,
XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
{
(void)secret; (void)secretLen;
return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
XXH3_hashLong64_f f_hashLong)
{
XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
/*
* If an action is to be taken if `secretLen` condition is not respected,
* it should be done here.
* For now, it's a contract pre-condition.
* Adding a check and a branch here would cost performance at every hash.
* Also, note that function signature doesn't offer room to return an error.
*/
if (len <= 16)
return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
if (len <= 128)
return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
}
/* === Public entry point === */
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t length)
{
return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void* input, size_t length, const void* secret, size_t secretSize)
{
return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void* input, size_t length, XXH64_hash_t seed)
{
return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
}
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void* input, size_t length, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
if (length <= XXH3_MIDSIZE_MAX)
return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
}
/* === XXH3 streaming === */
#ifndef XXH_NO_STREAM
/*
* Malloc's a pointer that is always aligned to align.
*
* This must be freed with `XXH_alignedFree()`.
*
* malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
* alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
* or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
*
* This underalignment previously caused a rather obvious crash which went
* completely unnoticed due to XXH3_createState() not actually being tested.
* Credit to RedSpah for noticing this bug.
*
* The alignment is done manually: Functions like posix_memalign or _mm_malloc
* are avoided: To maintain portability, we would have to write a fallback
* like this anyways, and besides, testing for the existence of library
* functions without relying on external build tools is impossible.
*
* The method is simple: Overallocate, manually align, and store the offset
* to the original behind the returned pointer.
*
* Align must be a power of 2 and 8 <= align <= 128.
*/
static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
{
XXH_ASSERT(align <= 128 && align >= 8); /* range check */
XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
{ /* Overallocate to make room for manual realignment and an offset byte */
xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
if (base != NULL) {
/*
* Get the offset needed to align this pointer.
*
* Even if the returned pointer is aligned, there will always be
* at least one byte to store the offset to the original pointer.
*/
size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
/* Add the offset for the now-aligned pointer */
xxh_u8* ptr = base + offset;
XXH_ASSERT((size_t)ptr % align == 0);
/* Store the offset immediately before the returned pointer. */
ptr[-1] = (xxh_u8)offset;
return ptr;
}
return NULL;
}
}
/*
* Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
* normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
*/
static void XXH_alignedFree(void* p)
{
if (p != NULL) {
xxh_u8* ptr = (xxh_u8*)p;
/* Get the offset byte we added in XXH_malloc. */
xxh_u8 offset = ptr[-1];
/* Free the original malloc'd pointer */
xxh_u8* base = ptr - offset;
XXH_free(base);
}
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
{
XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
if (state==NULL) return NULL;
XXH3_INITSTATE(state);
return state;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
{
XXH_alignedFree(statePtr);
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
{
XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
}
static void
XXH3_reset_internal(XXH3_state_t* statePtr,
XXH64_hash_t seed,
const void* secret, size_t secretSize)
{
size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
XXH_ASSERT(statePtr != NULL);
/* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
memset((char*)statePtr + initStart, 0, initLength);
statePtr->acc[0] = XXH_PRIME32_3;
statePtr->acc[1] = XXH_PRIME64_1;
statePtr->acc[2] = XXH_PRIME64_2;
statePtr->acc[3] = XXH_PRIME64_3;
statePtr->acc[4] = XXH_PRIME64_4;
statePtr->acc[5] = XXH_PRIME32_2;
statePtr->acc[6] = XXH_PRIME64_5;
statePtr->acc[7] = XXH_PRIME32_1;
statePtr->seed = seed;
statePtr->useSeed = (seed != 0);
statePtr->extSecret = (const unsigned char*)secret;
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t* statePtr)
{
if (statePtr == NULL) return XXH_ERROR;
XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
{
if (statePtr == NULL) return XXH_ERROR;
XXH3_reset_internal(statePtr, 0, secret, secretSize);
if (secret == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
{
if (statePtr == NULL) return XXH_ERROR;
if (seed==0) return XXH3_64bits_reset(statePtr);
if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
XXH3_initCustomSecret(statePtr->customSecret, seed);
XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
{
if (statePtr == NULL) return XXH_ERROR;
if (secret == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
XXH3_reset_internal(statePtr, seed64, secret, secretSize);
statePtr->useSeed = 1; /* always, even if seed64==0 */
return XXH_OK;
}
/* Note : when XXH3_consumeStripes() is invoked,
* there must be a guarantee that at least one more byte must be consumed from input
* so that the function can blindly consume all stripes using the "normal" secret segment */
XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
/* need a scrambling operation */
size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
f_scramble(acc, secret + secretLimit);
XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
*nbStripesSoFarPtr = nbStripesAfterBlock;
} else {
XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
*nbStripesSoFarPtr += nbStripes;
}
}
#ifndef XXH3_STREAM_USE_STACK
# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
# define XXH3_STREAM_USE_STACK 1
# endif
#endif
/*
* Both XXH3_64bits_update and XXH3_128bits_update use this routine.
*/
XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
const xxh_u8* XXH_RESTRICT input, size_t len,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
XXH_ASSERT(state != NULL);
{ const xxh_u8* const bEnd = input + len;
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
/* For some reason, gcc and MSVC seem to suffer greatly
* when operating accumulators directly into state.
* Operating into stack space seems to enable proper optimization.
* clang, on the other hand, doesn't seem to need this trick */
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
#else
xxh_u64* XXH_RESTRICT const acc = state->acc;
#endif
state->totalLen += len;
XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
/* small input : just fill in tmp buffer */
if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
XXH_memcpy(state->buffer + state->bufferedSize, input, len);
state->bufferedSize += (XXH32_hash_t)len;
return XXH_OK;
}
/* total input is now > XXH3_INTERNALBUFFER_SIZE */
#define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
/*
* Internal buffer is partially filled (always, except at beginning)
* Complete it, then consume it.
*/
if (state->bufferedSize) {
size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
input += loadSize;
XXH3_consumeStripes(acc,
&state->nbStripesSoFar, state->nbStripesPerBlock,
state->buffer, XXH3_INTERNALBUFFER_STRIPES,
secret, state->secretLimit,
f_acc512, f_scramble);
state->bufferedSize = 0;
}
XXH_ASSERT(input < bEnd);
/* large input to consume : ingest per full block */
if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
/* join to current block's end */
{ size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
XXH_ASSERT(nbStripesToEnd <= nbStripes);
XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
f_scramble(acc, secret + state->secretLimit);
state->nbStripesSoFar = 0;
input += nbStripesToEnd * XXH_STRIPE_LEN;
nbStripes -= nbStripesToEnd;
}
/* consume per entire blocks */
while(nbStripes >= state->nbStripesPerBlock) {
XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
f_scramble(acc, secret + state->secretLimit);
input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
nbStripes -= state->nbStripesPerBlock;
}
/* consume last partial block */
XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
input += nbStripes * XXH_STRIPE_LEN;
XXH_ASSERT(input < bEnd); /* at least some bytes left */
state->nbStripesSoFar = nbStripes;
/* buffer predecessor of last partial stripe */
XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
} else {
/* content to consume <= block size */
/* Consume input by a multiple of internal buffer size */
if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
do {
XXH3_consumeStripes(acc,
&state->nbStripesSoFar, state->nbStripesPerBlock,
input, XXH3_INTERNALBUFFER_STRIPES,
secret, state->secretLimit,
f_acc512, f_scramble);
input += XXH3_INTERNALBUFFER_SIZE;
} while (input<limit);
/* buffer predecessor of last partial stripe */
XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
}
}
/* Some remaining input (always) : buffer it */
XXH_ASSERT(input < bEnd);
XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
XXH_ASSERT(state->bufferedSize == 0);
XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
state->bufferedSize = (XXH32_hash_t)(bEnd-input);
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
/* save stack accumulators into state */
memcpy(state->acc, acc, sizeof(acc));
#endif
}
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
{
return XXH3_update(state, (const xxh_u8*)input, len,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
XXH_FORCE_INLINE void
XXH3_digest_long (XXH64_hash_t* acc,
const XXH3_state_t* state,
const unsigned char* secret)
{
/*
* Digest on a local copy. This way, the state remains unaltered, and it can
* continue ingesting more input afterwards.
*/
XXH_memcpy(acc, state->acc, sizeof(state->acc));
if (state->bufferedSize >= XXH_STRIPE_LEN) {
size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
size_t nbStripesSoFar = state->nbStripesSoFar;
XXH3_consumeStripes(acc,
&nbStripesSoFar, state->nbStripesPerBlock,
state->buffer, nbStripes,
secret, state->secretLimit,
XXH3_accumulate_512, XXH3_scrambleAcc);
/* last stripe */
XXH3_accumulate_512(acc,
state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
secret + state->secretLimit - XXH_SECRET_LASTACC_START);
} else { /* bufferedSize < XXH_STRIPE_LEN */
xxh_u8 lastStripe[XXH_STRIPE_LEN];
size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
XXH3_accumulate_512(acc,
lastStripe,
secret + state->secretLimit - XXH_SECRET_LASTACC_START);
}
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
{
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
if (state->totalLen > XXH3_MIDSIZE_MAX) {
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
XXH3_digest_long(acc, state, secret);
return XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)state->totalLen * XXH_PRIME64_1);
}
/* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
if (state->useSeed)
return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
secret, state->secretLimit + XXH_STRIPE_LEN);
}
#endif /* !XXH_NO_STREAM */
/* ==========================================
* XXH3 128 bits (a.k.a XXH128)
* ==========================================
* XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
* even without counting the significantly larger output size.
*
* For example, extra steps are taken to avoid the seed-dependent collisions
* in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
*
* This strength naturally comes at the cost of some speed, especially on short
* lengths. Note that longer hashes are about as fast as the 64-bit version
* due to it using only a slight modification of the 64-bit loop.
*
* XXH128 is also more oriented towards 64-bit machines. It is still extremely
* fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
*/
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
/* A doubled version of 1to3_64b with different constants. */
XXH_ASSERT(input != NULL);
XXH_ASSERT(1 <= len && len <= 3);
XXH_ASSERT(secret != NULL);
/*
* len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
* len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
* len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
*/
{ xxh_u8 const c1 = input[0];
xxh_u8 const c2 = input[len >> 1];
xxh_u8 const c3 = input[len - 1];
xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
| ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
XXH128_hash_t h128;
h128.low64 = XXH64_avalanche(keyed_lo);
h128.high64 = XXH64_avalanche(keyed_hi);
return h128;
}
}
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(4 <= len && len <= 8);
seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
{ xxh_u32 const input_lo = XXH_readLE32(input);
xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
xxh_u64 const keyed = input_64 ^ bitflip;
/* Shift len to the left to ensure it is even, this avoids even multiplies. */
XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
m128.high64 += (m128.low64 << 1);
m128.low64 ^= (m128.high64 >> 3);
m128.low64 = XXH_xorshift64(m128.low64, 35);
m128.low64 *= 0x9FB21C651E98DF25ULL;
m128.low64 = XXH_xorshift64(m128.low64, 28);
m128.high64 = XXH3_avalanche(m128.high64);
return m128;
}
}
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(9 <= len && len <= 16);
{ xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
xxh_u64 const input_lo = XXH_readLE64(input);
xxh_u64 input_hi = XXH_readLE64(input + len - 8);
XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
/*
* Put len in the middle of m128 to ensure that the length gets mixed to
* both the low and high bits in the 128x64 multiply below.
*/
m128.low64 += (xxh_u64)(len - 1) << 54;
input_hi ^= bitfliph;
/*
* Add the high 32 bits of input_hi to the high 32 bits of m128, then
* add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
* the high 64 bits of m128.
*
* The best approach to this operation is different on 32-bit and 64-bit.
*/
if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
/*
* 32-bit optimized version, which is more readable.
*
* On 32-bit, it removes an ADC and delays a dependency between the two
* halves of m128.high64, but it generates an extra mask on 64-bit.
*/
m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
} else {
/*
* 64-bit optimized (albeit more confusing) version.
*
* Uses some properties of addition and multiplication to remove the mask:
*
* Let:
* a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
* b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
* c = XXH_PRIME32_2
*
* a + (b * c)
* Inverse Property: x + y - x == y
* a + (b * (1 + c - 1))
* Distributive Property: x * (y + z) == (x * y) + (x * z)
* a + (b * 1) + (b * (c - 1))
* Identity Property: x * 1 == x
* a + b + (b * (c - 1))
*
* Substitute a, b, and c:
* input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
*
* Since input_hi.hi + input_hi.lo == input_hi, we get this:
* input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
*/
m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
}
/* m128 ^= XXH_swap64(m128 >> 64); */
m128.low64 ^= XXH_swap64(m128.high64);
{ /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
h128.high64 += m128.high64 * XXH_PRIME64_2;
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = XXH3_avalanche(h128.high64);
return h128;
} }
}
/*
* Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
*/
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(len <= 16);
{ if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
{ XXH128_hash_t h128;
xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
h128.low64 = XXH64_avalanche(seed ^ bitflipl);
h128.high64 = XXH64_avalanche( seed ^ bitfliph);
return h128;
} }
}
/*
* A bit slower than XXH3_mix16B, but handles multiply by zero better.
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
const xxh_u8* secret, XXH64_hash_t seed)
{
acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
return acc;
}
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(16 < len && len <= 128);
{ XXH128_hash_t acc;
acc.low64 = len * XXH_PRIME64_1;
acc.high64 = 0;
#if XXH_SIZE_OPT >= 1
{
/* Smaller, but slightly slower. */
size_t i = (len - 1) / 32;
do {
acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
} while (i-- != 0);
}
#else
if (len > 32) {
if (len > 64) {
if (len > 96) {
acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
}
acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
}
acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
}
acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
#endif
{ XXH128_hash_t h128;
h128.low64 = acc.low64 + acc.high64;
h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ (acc.high64 * XXH_PRIME64_4)
+ ((len - seed) * XXH_PRIME64_2);
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
return h128;
}
}
}
XXH_NO_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
{ XXH128_hash_t acc;
int const nbRounds = (int)len / 32;
int i;
acc.low64 = len * XXH_PRIME64_1;
acc.high64 = 0;
for (i=0; i<4; i++) {
acc = XXH128_mix32B(acc,
input + (32 * i),
input + (32 * i) + 16,
secret + (32 * i),
seed);
}
acc.low64 = XXH3_avalanche(acc.low64);
acc.high64 = XXH3_avalanche(acc.high64);
XXH_ASSERT(nbRounds >= 4);
for (i=4 ; i < nbRounds; i++) {
acc = XXH128_mix32B(acc,
input + (32 * i),
input + (32 * i) + 16,
secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
seed);
}
/* last bytes */
acc = XXH128_mix32B(acc,
input + len - 16,
input + len - 32,
secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
0ULL - seed);
{ XXH128_hash_t h128;
h128.low64 = acc.low64 + acc.high64;
h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ (acc.high64 * XXH_PRIME64_4)
+ ((len - seed) * XXH_PRIME64_2);
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
return h128;
}
}
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
/* converge into final hash */
XXH_STATIC_ASSERT(sizeof(acc) == 64);
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
{ XXH128_hash_t h128;
h128.low64 = XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)len * XXH_PRIME64_1);
h128.high64 = XXH3_mergeAccs(acc,
secret + secretSize
- sizeof(acc) - XXH_SECRET_MERGEACCS_START,
~((xxh_u64)len * XXH_PRIME64_2));
return h128;
}
}
/*
* It's important for performance that XXH3_hashLong() is not inlined.
*/
XXH_NO_INLINE XXH_PUREF XXH128_hash_t
XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64; (void)secret; (void)secretLen;
return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* It's important for performance to pass @p secretLen (when it's static)
* to the compiler, so that it can properly optimize the vectorized loop.
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64;
return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble,
XXH3_f_initCustomSecret f_initSec)
{
if (seed64 == 0)
return XXH3_hashLong_128b_internal(input, len,
XXH3_kSecret, sizeof(XXH3_kSecret),
f_acc512, f_scramble);
{ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
f_initSec(secret, seed64);
return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
f_acc512, f_scramble);
}
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void* input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)secret; (void)secretLen;
return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
XXH64_hash_t, const void* XXH_RESTRICT, size_t);
XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void* input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
XXH3_hashLong128_f f_hl128)
{
XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
/*
* If an action is to be taken if `secret` conditions are not respected,
* it should be done here.
* For now, it's a contract pre-condition.
* Adding a check and a branch here would cost performance at every hash.
*/
if (len <= 16)
return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
if (len <= 128)
return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
return f_hl128(input, len, seed64, secret, secretLen);
}
/* === Public XXH128 API === */
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
{
return XXH3_128bits_internal(input, len, 0,
XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_hashLong_128b_default);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
{
return XXH3_128bits_internal(input, len, 0,
(const xxh_u8*)secret, secretSize,
XXH3_hashLong_128b_withSecret);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
{
return XXH3_128bits_internal(input, len, seed,
XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_hashLong_128b_withSeed);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH128(const void* input, size_t len, XXH64_hash_t seed)
{
return XXH3_128bits_withSeed(input, len, seed);
}
/* === XXH3 128-bit streaming === */
#ifndef XXH_NO_STREAM
/*
* All initialization and update functions are identical to 64-bit streaming variant.
* The only difference is the finalization routine.
*/
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t* statePtr)
{
return XXH3_64bits_reset(statePtr);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
{
return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
{
return XXH3_64bits_reset_withSeed(statePtr, seed);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
{
return XXH3_update(state, (const xxh_u8*)input, len,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
{
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
if (state->totalLen > XXH3_MIDSIZE_MAX) {
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
XXH3_digest_long(acc, state, secret);
XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
{ XXH128_hash_t h128;
h128.low64 = XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)state->totalLen * XXH_PRIME64_1);
h128.high64 = XXH3_mergeAccs(acc,
secret + state->secretLimit + XXH_STRIPE_LEN
- sizeof(acc) - XXH_SECRET_MERGEACCS_START,
~((xxh_u64)state->totalLen * XXH_PRIME64_2));
return h128;
}
}
/* len <= XXH3_MIDSIZE_MAX : short code */
if (state->seed)
return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
secret, state->secretLimit + XXH_STRIPE_LEN);
}
#endif /* !XXH_NO_STREAM */
/* 128-bit utility functions */
#include <string.h> /* memcmp, memcpy */
/* return : 1 is equal, 0 if different */
/*! @ingroup XXH3_family */
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
{
/* note : XXH128_hash_t is compact, it has no padding byte */
return !(memcmp(&h1, &h2, sizeof(h1)));
}
/* This prototype is compatible with stdlib's qsort().
* @return : >0 if *h128_1 > *h128_2
* <0 if *h128_1 < *h128_2
* =0 if *h128_1 == *h128_2 */
/*! @ingroup XXH3_family */
XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
{
XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
/* note : bets that, in most cases, hash values are different */
if (hcmp) return hcmp;
return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
}
/*====== Canonical representation ======*/
/*! @ingroup XXH3_family */
XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) {
hash.high64 = XXH_swap64(hash.high64);
hash.low64 = XXH_swap64(hash.low64);
}
XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t* src)
{
XXH128_hash_t h;
h.high64 = XXH_readBE64(src);
h.low64 = XXH_readBE64(src->digest + 8);
return h;
}
/* ==========================================
* Secret generators
* ==========================================
*/
#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
{
XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
{
#if (XXH_DEBUGLEVEL >= 1)
XXH_ASSERT(secretBuffer != NULL);
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
#else
/* production mode, assert() are disabled */
if (secretBuffer == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
#endif
if (customSeedSize == 0) {
customSeed = XXH3_kSecret;
customSeedSize = XXH_SECRET_DEFAULT_SIZE;
}
#if (XXH_DEBUGLEVEL >= 1)
XXH_ASSERT(customSeed != NULL);
#else
if (customSeed == NULL) return XXH_ERROR;
#endif
/* Fill secretBuffer with a copy of customSeed - repeat as needed */
{ size_t pos = 0;
while (pos < secretSize) {
size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
memcpy((char*)secretBuffer + pos, customSeed, toCopy);
pos += toCopy;
} }
{ size_t const nbSeg16 = secretSize / 16;
size_t n;
XXH128_canonical_t scrambler;
XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
for (n=0; n<nbSeg16; n++) {
XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
XXH3_combine16((char*)secretBuffer + n*16, h128);
}
/* last segment */
XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
}
return XXH_OK;
}
/*! @ingroup XXH3_family */
XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
{
XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
XXH3_initCustomSecret(secret, seed);
XXH_ASSERT(secretBuffer != NULL);
memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
}
/* Pop our optimization override from above */
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
&& defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
# pragma GCC pop_options
#endif
#endif /* XXH_NO_LONG_LONG */
#endif /* XXH_NO_XXH3 */
/*!
* @}
*/
#endif /* XXH_IMPLEMENTATION */
#if defined (__cplusplus)
}
#endif
| 37.700082 | 135 | 0.651292 | [
"object",
"vector",
"model"
] |
9e5bd969047776b4a3db1287cb30029128ef8b32 | 28,551 | h | C | src/net/third_party/quiche/src/quic/core/quic_sent_packet_manager.h | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | 1 | 2020-06-02T02:28:34.000Z | 2020-06-02T02:28:34.000Z | src/net/third_party/quiche/src/quic/core/quic_sent_packet_manager.h | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | null | null | null | src/net/third_party/quiche/src/quic/core/quic_sent_packet_manager.h | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | null | null | null | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef QUICHE_QUIC_CORE_QUIC_SENT_PACKET_MANAGER_H_
#define QUICHE_QUIC_CORE_QUIC_SENT_PACKET_MANAGER_H_
#include <cstddef>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "net/third_party/quiche/src/quic/core/congestion_control/pacing_sender.h"
#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
#include "net/third_party/quiche/src/quic/core/congestion_control/uber_loss_algorithm.h"
#include "net/third_party/quiche/src/quic/core/proto/cached_network_parameters_proto.h"
#include "net/third_party/quiche/src/quic/core/quic_packets.h"
#include "net/third_party/quiche/src/quic/core/quic_sustained_bandwidth_recorder.h"
#include "net/third_party/quiche/src/quic/core/quic_transmission_info.h"
#include "net/third_party/quiche/src/quic/core/quic_types.h"
#include "net/third_party/quiche/src/quic/core/quic_unacked_packet_map.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_containers.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
namespace quic {
namespace test {
class QuicConnectionPeer;
class QuicSentPacketManagerPeer;
} // namespace test
class QuicClock;
class QuicConfig;
struct QuicConnectionStats;
// Class which tracks the set of packets sent on a QUIC connection and contains
// a send algorithm to decide when to send new packets. It keeps track of any
// retransmittable data associated with each packet. If a packet is
// retransmitted, it will keep track of each version of a packet so that if a
// previous transmission is acked, the data will not be retransmitted.
class QUIC_EXPORT_PRIVATE QuicSentPacketManager {
public:
// Interface which gets callbacks from the QuicSentPacketManager at
// interesting points. Implementations must not mutate the state of
// the packet manager or connection as a result of these callbacks.
class QUIC_EXPORT_PRIVATE DebugDelegate {
public:
virtual ~DebugDelegate() {}
// Called when a spurious retransmission is detected.
virtual void OnSpuriousPacketRetransmission(
TransmissionType /*transmission_type*/,
QuicByteCount /*byte_size*/) {}
virtual void OnIncomingAck(QuicPacketNumber /*ack_packet_number*/,
EncryptionLevel /*ack_decrypted_level*/,
const QuicAckFrame& /*ack_frame*/,
QuicTime /*ack_receive_time*/,
QuicPacketNumber /*largest_observed*/,
bool /*rtt_updated*/,
QuicPacketNumber /*least_unacked_sent_packet*/) {
}
virtual void OnPacketLoss(QuicPacketNumber /*lost_packet_number*/,
EncryptionLevel /*encryption_level*/,
TransmissionType /*transmission_type*/,
QuicTime /*detection_time*/) {}
virtual void OnApplicationLimited() {}
virtual void OnAdjustNetworkParameters(QuicBandwidth /*bandwidth*/,
QuicTime::Delta /*rtt*/,
QuicByteCount /*old_cwnd*/,
QuicByteCount /*new_cwnd*/) {}
virtual void OnOvershootingDetected() {}
};
// Interface which gets callbacks from the QuicSentPacketManager when
// network-related state changes. Implementations must not mutate the
// state of the packet manager as a result of these callbacks.
class QUIC_EXPORT_PRIVATE NetworkChangeVisitor {
public:
virtual ~NetworkChangeVisitor() {}
// Called when congestion window or RTT may have changed.
virtual void OnCongestionChange() = 0;
// Called when the Path MTU may have increased.
virtual void OnPathMtuIncreased(QuicPacketLength packet_size) = 0;
};
// The retransmission timer is a single timer which switches modes depending
// upon connection state.
enum RetransmissionTimeoutMode {
// A conventional TCP style RTO.
RTO_MODE,
// A tail loss probe. By default, QUIC sends up to two before RTOing.
TLP_MODE,
// Retransmission of handshake packets prior to handshake completion.
HANDSHAKE_MODE,
// Re-invoke the loss detection when a packet is not acked before the
// loss detection algorithm expects.
LOSS_MODE,
// A probe timeout. At least one probe packet must be sent when timer
// expires.
PTO_MODE,
};
QuicSentPacketManager(Perspective perspective,
const QuicClock* clock,
QuicRandom* random,
QuicConnectionStats* stats,
CongestionControlType congestion_control_type);
QuicSentPacketManager(const QuicSentPacketManager&) = delete;
QuicSentPacketManager& operator=(const QuicSentPacketManager&) = delete;
virtual ~QuicSentPacketManager();
virtual void SetFromConfig(const QuicConfig& config);
void ApplyConnectionOptions(const QuicTagVector& connection_options);
// Pass the CachedNetworkParameters to the send algorithm.
void ResumeConnectionState(
const CachedNetworkParameters& cached_network_params,
bool max_bandwidth_resumption);
void SetMaxPacingRate(QuicBandwidth max_pacing_rate) {
pacing_sender_.set_max_pacing_rate(max_pacing_rate);
}
QuicBandwidth MaxPacingRate() const {
return pacing_sender_.max_pacing_rate();
}
// Called to mark the handshake state complete, and all handshake packets are
// neutered.
// TODO(fayang): Rename this function to OnHandshakeComplete.
void SetHandshakeConfirmed();
// Requests retransmission of all unacked 0-RTT packets.
// Only 0-RTT encrypted packets will be retransmitted. This can happen,
// for example, when a CHLO has been rejected and the previously encrypted
// data needs to be encrypted with a new key.
void MarkZeroRttPacketsForRetransmission();
// Notify the sent packet manager of an external network measurement or
// prediction for either |bandwidth| or |rtt|; either can be empty.
void AdjustNetworkParameters(
const SendAlgorithmInterface::NetworkParams& params);
void SetLossDetectionTuner(
std::unique_ptr<LossDetectionTunerInterface> tuner);
void OnConfigNegotiated();
void OnConnectionClosed();
// Retransmits the oldest pending packet there is still a tail loss probe
// pending. Invoked after OnRetransmissionTimeout.
bool MaybeRetransmitTailLossProbe();
// Retransmits the oldest pending packet.
bool MaybeRetransmitOldestPacket(TransmissionType type);
// Removes the retransmittable frames from all unencrypted packets to ensure
// they don't get retransmitted.
void NeuterUnencryptedPackets();
// Returns true if there's outstanding crypto data.
bool HasUnackedCryptoPackets() const {
return unacked_packets_.HasPendingCryptoPackets();
}
// Returns true if there are packets in flight expecting to be acknowledged.
bool HasInFlightPackets() const {
return unacked_packets_.HasInFlightPackets();
}
// Returns the smallest packet number of a serialized packet which has not
// been acked by the peer.
QuicPacketNumber GetLeastUnacked() const {
return unacked_packets_.GetLeastUnacked();
}
// Called when we have sent bytes to the peer. This informs the manager both
// the number of bytes sent and if they were retransmitted. Returns true if
// the sender should reset the retransmission timer.
bool OnPacketSent(SerializedPacket* serialized_packet,
QuicTime sent_time,
TransmissionType transmission_type,
HasRetransmittableData has_retransmittable_data);
// Called when the retransmission timer expires and returns the retransmission
// mode.
RetransmissionTimeoutMode OnRetransmissionTimeout();
// Calculate the time until we can send the next packet to the wire.
// Note 1: When kUnknownWaitTime is returned, there is no need to poll
// TimeUntilSend again until we receive an OnIncomingAckFrame event.
// Note 2: Send algorithms may or may not use |retransmit| in their
// calculations.
QuicTime::Delta TimeUntilSend(QuicTime now) const;
// Returns the current delay for the retransmission timer, which may send
// either a tail loss probe or do a full RTO. Returns QuicTime::Zero() if
// there are no retransmittable packets.
const QuicTime GetRetransmissionTime() const;
// Returns the current delay for the path degrading timer, which is used to
// notify the session that this connection is degrading.
const QuicTime::Delta GetPathDegradingDelay() const;
// Returns the current delay for detecting network blackhole.
const QuicTime::Delta GetNetworkBlackholeDelay(
int8_t num_rtos_for_blackhole_detection) const;
// Returns the delay before reducing max packet size. This delay is guranteed
// to be smaller than the network blackhole delay.
QuicTime::Delta GetMtuReductionDelay(
int8_t num_rtos_for_blackhole_detection) const;
const RttStats* GetRttStats() const { return &rtt_stats_; }
// Returns the estimated bandwidth calculated by the congestion algorithm.
QuicBandwidth BandwidthEstimate() const {
return send_algorithm_->BandwidthEstimate();
}
const QuicSustainedBandwidthRecorder* SustainedBandwidthRecorder() const {
return &sustained_bandwidth_recorder_;
}
// Returns the size of the current congestion window in number of
// kDefaultTCPMSS-sized segments. Note, this is not the *available* window.
// Some send algorithms may not use a congestion window and will return 0.
QuicPacketCount GetCongestionWindowInTcpMss() const {
return send_algorithm_->GetCongestionWindow() / kDefaultTCPMSS;
}
// Returns the number of packets of length |max_packet_length| which fit in
// the current congestion window. More packets may end up in flight if the
// congestion window has been recently reduced, of if non-full packets are
// sent.
QuicPacketCount EstimateMaxPacketsInFlight(
QuicByteCount max_packet_length) const {
return send_algorithm_->GetCongestionWindow() / max_packet_length;
}
// Returns the size of the current congestion window size in bytes.
QuicByteCount GetCongestionWindowInBytes() const {
return send_algorithm_->GetCongestionWindow();
}
QuicBandwidth GetPacingRate() const {
return send_algorithm_->PacingRate(GetBytesInFlight());
}
// Returns the size of the slow start congestion window in nume of 1460 byte
// TCP segments, aka ssthresh. Some send algorithms do not define a slow
// start threshold and will return 0.
QuicPacketCount GetSlowStartThresholdInTcpMss() const {
return send_algorithm_->GetSlowStartThreshold() / kDefaultTCPMSS;
}
// Return the total time spent in slow start so far. If the sender is
// currently in slow start, the return value will include the duration between
// the most recent entry to slow start and now.
//
// Only implemented for BBR. Return QuicTime::Delta::Infinite() for other
// congestion controllers.
QuicTime::Delta GetSlowStartDuration() const;
// Returns debugging information about the state of the congestion controller.
std::string GetDebugState() const;
// Returns the number of bytes that are considered in-flight, i.e. not lost or
// acknowledged.
QuicByteCount GetBytesInFlight() const {
return unacked_packets_.bytes_in_flight();
}
// Called when peer address changes and the connection migrates.
void OnConnectionMigration(AddressChangeType type);
// Called when an ack frame is initially parsed.
void OnAckFrameStart(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time,
QuicTime ack_receive_time);
// Called when ack range [start, end) is received. Populates packets_acked_
// with newly acked packets.
void OnAckRange(QuicPacketNumber start, QuicPacketNumber end);
// Called when a timestamp is processed. If it's present in packets_acked_,
// the timestamp field is set. Otherwise, the timestamp is ignored.
void OnAckTimestamp(QuicPacketNumber packet_number, QuicTime timestamp);
// Called when an ack frame is parsed completely.
AckResult OnAckFrameEnd(QuicTime ack_receive_time,
QuicPacketNumber ack_packet_number,
EncryptionLevel ack_decrypted_level);
void EnableMultiplePacketNumberSpacesSupport();
void SetDebugDelegate(DebugDelegate* debug_delegate);
void SetPacingAlarmGranularity(QuicTime::Delta alarm_granularity) {
pacing_sender_.set_alarm_granularity(alarm_granularity);
}
QuicPacketNumber GetLargestObserved() const {
return unacked_packets_.largest_acked();
}
QuicPacketNumber GetLargestAckedPacket(
EncryptionLevel decrypted_packet_level) const;
QuicPacketNumber GetLargestSentPacket() const {
return unacked_packets_.largest_sent_packet();
}
// Returns the lowest of the largest acknowledged packet and the least
// unacked packet. This is designed to be used when computing the packet
// number length to send.
QuicPacketNumber GetLeastPacketAwaitedByPeer(
EncryptionLevel encryption_level) const;
QuicPacketNumber GetLargestPacketPeerKnowsIsAcked(
EncryptionLevel decrypted_packet_level) const;
void SetNetworkChangeVisitor(NetworkChangeVisitor* visitor) {
DCHECK(!network_change_visitor_);
DCHECK(visitor);
network_change_visitor_ = visitor;
}
bool InSlowStart() const { return send_algorithm_->InSlowStart(); }
size_t GetConsecutiveRtoCount() const { return consecutive_rto_count_; }
size_t GetConsecutiveTlpCount() const { return consecutive_tlp_count_; }
size_t GetConsecutivePtoCount() const { return consecutive_pto_count_; }
void OnApplicationLimited();
const SendAlgorithmInterface* GetSendAlgorithm() const {
return send_algorithm_.get();
}
void SetSessionNotifier(SessionNotifierInterface* session_notifier) {
unacked_packets_.SetSessionNotifier(session_notifier);
}
NextReleaseTimeResult GetNextReleaseTime() const;
QuicPacketCount initial_congestion_window() const {
return initial_congestion_window_;
}
QuicPacketNumber largest_packet_peer_knows_is_acked() const {
DCHECK(!supports_multiple_packet_number_spaces());
return largest_packet_peer_knows_is_acked_;
}
size_t pending_timer_transmission_count() const {
return pending_timer_transmission_count_;
}
QuicTime::Delta peer_max_ack_delay() const { return peer_max_ack_delay_; }
void set_peer_max_ack_delay(QuicTime::Delta peer_max_ack_delay) {
// The delayed ack time should never be more than one half the min RTO time.
DCHECK_LE(peer_max_ack_delay, (min_rto_timeout_ * 0.5));
peer_max_ack_delay_ = peer_max_ack_delay;
}
const QuicUnackedPacketMap& unacked_packets() const {
return unacked_packets_;
}
const UberLossAlgorithm* uber_loss_algorithm() const {
return &uber_loss_algorithm_;
}
// Sets the send algorithm to the given congestion control type and points the
// pacing sender at |send_algorithm_|. Can be called any number of times.
void SetSendAlgorithm(CongestionControlType congestion_control_type);
// Sets the send algorithm to |send_algorithm| and points the pacing sender at
// |send_algorithm_|. Takes ownership of |send_algorithm|. Can be called any
// number of times.
// Setting the send algorithm once the connection is underway is dangerous.
void SetSendAlgorithm(SendAlgorithmInterface* send_algorithm);
// Sends up to max_probe_packets_per_pto_ probe packets.
void MaybeSendProbePackets();
// Called to adjust pending_timer_transmission_count_ accordingly.
void AdjustPendingTimerTransmissions();
// Called to disable HANDSHAKE_MODE, and only PTO and LOSS modes are used.
// Also enable IETF loss detection.
void EnableIetfPtoAndLossDetection();
// Called to set the start point of doing exponential backoff when calculating
// PTO timeout.
void StartExponentialBackoffAfterNthPto(
size_t exponential_backoff_start_point);
// Called to retransmit in flight packet of |space| if any.
void RetransmitDataOfSpaceIfAny(PacketNumberSpace space);
// Returns true if |timeout| is less than 3 * RTO/PTO delay.
bool IsLessThanThreePTOs(QuicTime::Delta timeout) const;
// Returns current PTO delay.
QuicTime::Delta GetPtoDelay() const;
bool supports_multiple_packet_number_spaces() const {
return unacked_packets_.supports_multiple_packet_number_spaces();
}
bool pto_enabled() const { return pto_enabled_; }
bool handshake_mode_disabled() const { return handshake_mode_disabled_; }
bool skip_packet_number_for_pto() const {
return skip_packet_number_for_pto_;
}
bool one_rtt_packet_acked() const { return one_rtt_packet_acked_; }
void OnUserAgentIdKnown() { loss_algorithm_->OnUserAgentIdKnown(); }
bool fix_packet_number_length() const { return fix_packet_number_length_; }
private:
friend class test::QuicConnectionPeer;
friend class test::QuicSentPacketManagerPeer;
// Returns the current retransmission mode.
RetransmissionTimeoutMode GetRetransmissionMode() const;
// Retransmits all crypto stream packets.
void RetransmitCryptoPackets();
// Retransmits two packets for an RTO and removes any non-retransmittable
// packets from flight.
void RetransmitRtoPackets();
// Returns the timeout for retransmitting crypto handshake packets.
const QuicTime::Delta GetCryptoRetransmissionDelay() const;
// Calls GetTailLossProbeDelay() with values from the current state of this
// packet manager as its params.
const QuicTime::Delta GetTailLossProbeDelay() const;
// Calls GetRetransmissionDelay() with values from the current state of this
// packet manager as its params.
const QuicTime::Delta GetRetransmissionDelay() const;
// Returns the probe timeout.
const QuicTime::Delta GetProbeTimeoutDelay(PacketNumberSpace space) const;
// Update the RTT if the ack is for the largest acked packet number.
// Returns true if the rtt was updated.
bool MaybeUpdateRTT(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time,
QuicTime ack_receive_time);
// Invokes the loss detection algorithm and loses and retransmits packets if
// necessary.
void InvokeLossDetection(QuicTime time);
// Invokes OnCongestionEvent if |rtt_updated| is true, there are pending acks,
// or pending losses. Clears pending acks and pending losses afterwards.
// |prior_in_flight| is the number of bytes in flight before the losses or
// acks, |event_time| is normally the timestamp of the ack packet which caused
// the event, although it can be the time at which loss detection was
// triggered.
void MaybeInvokeCongestionEvent(bool rtt_updated,
QuicByteCount prior_in_flight,
QuicTime event_time);
// Removes the retransmittability and in flight properties from the packet at
// |info| due to receipt by the peer.
void MarkPacketHandled(QuicPacketNumber packet_number,
QuicTransmissionInfo* info,
QuicTime ack_receive_time,
QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp);
// Request that |packet_number| be retransmitted after the other pending
// retransmissions. Does not add it to the retransmissions if it's already
// a pending retransmission.
void MarkForRetransmission(QuicPacketNumber packet_number,
TransmissionType transmission_type);
// Performs whatever work is need to retransmit the data correctly, either
// by retransmitting the frames directly or by notifying that the frames
// are lost.
void HandleRetransmission(TransmissionType transmission_type,
QuicTransmissionInfo* transmission_info);
// Called after packets have been marked handled with last received ack frame.
void PostProcessNewlyAckedPackets(QuicPacketNumber ack_packet_number,
EncryptionLevel ack_decrypted_level,
const QuicAckFrame& ack_frame,
QuicTime ack_receive_time,
bool rtt_updated,
QuicByteCount prior_bytes_in_flight);
// Notify observers that packet with QuicTransmissionInfo |info| is a spurious
// retransmission. It is caller's responsibility to guarantee the packet with
// QuicTransmissionInfo |info| is a spurious retransmission before calling
// this function.
void RecordOneSpuriousRetransmission(const QuicTransmissionInfo& info);
// Sets the initial RTT of the connection.
void SetInitialRtt(QuicTime::Delta rtt);
// Called when handshake is confirmed to remove the retransmittable frames
// from all packets of HANDSHAKE_DATA packet number space to ensure they don't
// get retransmitted and will eventually be removed from unacked packets map.
void NeuterHandshakePackets();
// Indicates whether including peer_max_ack_delay_ when calculating PTO
// timeout.
bool ShouldAddMaxAckDelay(PacketNumberSpace space) const;
// Gets the earliest in flight packet sent time to calculate PTO. Also
// updates |packet_number_space| if a PTO timer should be armed.
QuicTime GetEarliestPacketSentTimeForPto(
PacketNumberSpace* packet_number_space) const;
// Returns true if application data should be used to arm PTO. Only used when
// multiple packet number space is enabled.
bool ShouldArmPtoForApplicationData() const;
// A helper function to return total delay of |num_timeouts| retransmission
// timeout with TLP and RTO mode.
QuicTime::Delta GetNConsecutiveRetransmissionTimeoutDelay(
int num_timeouts) const;
// Returns true if peer has finished address validation, such that
// retransmission timer is not armed if there is no packets in flight.
bool PeerCompletedAddressValidation() const;
// Newly serialized retransmittable packets are added to this map, which
// contains owning pointers to any contained frames. If a packet is
// retransmitted, this map will contain entries for both the old and the new
// packet. The old packet's retransmittable frames entry will be nullptr,
// while the new packet's entry will contain the frames to retransmit.
// If the old packet is acked before the new packet, then the old entry will
// be removed from the map and the new entry's retransmittable frames will be
// set to nullptr.
QuicUnackedPacketMap unacked_packets_;
const QuicClock* clock_;
QuicRandom* random_;
QuicConnectionStats* stats_;
DebugDelegate* debug_delegate_;
NetworkChangeVisitor* network_change_visitor_;
QuicPacketCount initial_congestion_window_;
RttStats rtt_stats_;
std::unique_ptr<SendAlgorithmInterface> send_algorithm_;
// Not owned. Always points to |uber_loss_algorithm_| outside of tests.
LossDetectionInterface* loss_algorithm_;
UberLossAlgorithm uber_loss_algorithm_;
// Tracks the first RTO packet. If any packet before that packet gets acked,
// it indicates the RTO was spurious and should be reversed(F-RTO).
QuicPacketNumber first_rto_transmission_;
// Number of times the RTO timer has fired in a row without receiving an ack.
size_t consecutive_rto_count_;
// Number of times the tail loss probe has been sent.
size_t consecutive_tlp_count_;
// Number of times the crypto handshake has been retransmitted.
size_t consecutive_crypto_retransmission_count_;
// Number of pending transmissions of TLP, RTO, or crypto packets.
size_t pending_timer_transmission_count_;
// Maximum number of tail loss probes to send before firing an RTO.
size_t max_tail_loss_probes_;
// Maximum number of packets to send upon RTO.
QuicPacketCount max_rto_packets_;
// If true, send the TLP at 0.5 RTT.
bool enable_half_rtt_tail_loss_probe_;
bool using_pacing_;
// If true, use the new RTO with loss based CWND reduction instead of the send
// algorithms's OnRetransmissionTimeout to reduce the congestion window.
bool use_new_rto_;
// If true, use a more conservative handshake retransmission policy.
bool conservative_handshake_retransmits_;
// The minimum TLP timeout.
QuicTime::Delta min_tlp_timeout_;
// The minimum RTO.
QuicTime::Delta min_rto_timeout_;
// Vectors packets acked and lost as a result of the last congestion event.
AckedPacketVector packets_acked_;
LostPacketVector packets_lost_;
// Largest newly acknowledged packet.
QuicPacketNumber largest_newly_acked_;
// Largest packet in bytes ever acknowledged.
QuicPacketLength largest_mtu_acked_;
// Replaces certain calls to |send_algorithm_| when |using_pacing_| is true.
// Calls into |send_algorithm_| for the underlying congestion control.
PacingSender pacing_sender_;
// Indicates whether handshake is finished. This is purely used to determine
// retransmission mode. DONOT use this to infer handshake state.
bool handshake_finished_;
// Records bandwidth from server to client in normal operation, over periods
// of time with no loss events.
QuicSustainedBandwidthRecorder sustained_bandwidth_recorder_;
// The largest acked value that was sent in an ack, which has then been acked.
QuicPacketNumber largest_packet_peer_knows_is_acked_;
// The largest acked value that was sent in an ack, which has then been acked
// for per packet number space. Only used when connection supports multiple
// packet number spaces.
QuicPacketNumber
largest_packets_peer_knows_is_acked_[NUM_PACKET_NUMBER_SPACES];
// The maximum ACK delay time that the peer uses. Initialized to be the
// same as local_max_ack_delay_, may be changed via transport parameter
// negotiation.
QuicTime::Delta peer_max_ack_delay_;
// Latest received ack frame.
QuicAckFrame last_ack_frame_;
// Record whether RTT gets updated by last largest acked..
bool rtt_updated_;
// A reverse iterator of last_ack_frame_.packets. This is reset in
// OnAckRangeStart, and gradually moves in OnAckRange..
PacketNumberQueue::const_reverse_iterator acked_packets_iter_;
// Indicates whether PTO mode has been enabled. PTO mode unifies TLP and RTO
// modes.
bool pto_enabled_;
// Maximum number of probes to send when PTO fires.
size_t max_probe_packets_per_pto_;
// Number of times the PTO timer has fired in a row without receiving an ack.
size_t consecutive_pto_count_;
// True if HANDSHAKE mode has been disabled.
bool handshake_mode_disabled_;
// If true, skip packet number before sending the last PTO retransmission.
bool skip_packet_number_for_pto_;
// If true, always include peer_max_ack_delay_ when calculating PTO timeout.
bool always_include_max_ack_delay_for_pto_timeout_;
// When calculating PTO timeout, the start point of doing exponential backoff.
// For example, 0 : always do exponential backoff. n : do exponential backoff
// since nth PTO.
size_t pto_exponential_backoff_start_point_;
// The multiplier of rttvar when calculating PTO timeout.
int pto_rttvar_multiplier_;
// Number of PTOs similar to TLPs.
size_t num_tlp_timeout_ptos_;
// True if any ENCRYPTION_HANDSHAKE packet gets acknowledged.
bool handshake_packet_acked_;
// True if any 1-RTT packet gets acknowledged.
bool one_rtt_packet_acked_;
// True if any 1-RTT packet gets sent.
bool one_rtt_packet_sent_;
// If > 0, arm the 1st PTO with max of earliest in flight sent time + PTO
// delay and multiplier * srtt from last in flight packet.
float first_pto_srtt_multiplier_;
// If true, use standard deviation (instead of mean deviation) when
// calculating PTO timeout.
bool use_standard_deviation_for_pto_;
// The multiplier for caculating PTO timeout before any RTT sample is
// available.
float pto_multiplier_without_rtt_samples_;
const bool fix_packet_number_length_ =
GetQuicReloadableFlag(quic_fix_packet_number_length);
};
} // namespace quic
#endif // QUICHE_QUIC_CORE_QUIC_SENT_PACKET_MANAGER_H_
| 40.497872 | 93 | 0.74782 | [
"vector"
] |
9e5d2809c3ad25b98e9281e59e66c4ac1a364d80 | 12,268 | h | C | android/android_9/frameworks/native/services/sensorservice/vec.h | yakuizhao/intel-vaapi-driver | b2bb0383352694941826543a171b557efac2219b | [
"MIT"
] | null | null | null | android/android_9/frameworks/native/services/sensorservice/vec.h | yakuizhao/intel-vaapi-driver | b2bb0383352694941826543a171b557efac2219b | [
"MIT"
] | null | null | null | android/android_9/frameworks/native/services/sensorservice/vec.h | yakuizhao/intel-vaapi-driver | b2bb0383352694941826543a171b557efac2219b | [
"MIT"
] | null | null | null | /*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_VEC_H
#define ANDROID_VEC_H
#include <math.h>
#include <stdint.h>
#include <stddef.h>
#include "traits.h"
// -----------------------------------------------------------------------
#define PURE __attribute__((pure))
namespace android {
// -----------------------------------------------------------------------
// non-inline helpers
template <typename TYPE, size_t SIZE>
class vec;
template <typename TYPE, size_t SIZE>
struct vbase;
namespace helpers {
template <typename T> inline T min(T a, T b) { return a<b ? a : b; }
template <typename T> inline T max(T a, T b) { return a>b ? a : b; }
template < template<typename T, size_t S> class VEC,
typename TYPE, size_t SIZE, size_t S>
vec<TYPE, SIZE>& doAssign(
vec<TYPE, SIZE>& lhs, const VEC<TYPE, S>& rhs) {
const size_t minSize = min(SIZE, S);
const size_t maxSize = max(SIZE, S);
for (size_t i=0 ; i<minSize ; i++)
lhs[i] = rhs[i];
for (size_t i=minSize ; i<maxSize ; i++)
lhs[i] = 0;
return lhs;
}
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE,
size_t SIZE
>
VLHS<TYPE, SIZE> PURE doAdd(
const VLHS<TYPE, SIZE>& lhs,
const VRHS<TYPE, SIZE>& rhs) {
VLHS<TYPE, SIZE> r;
for (size_t i=0 ; i<SIZE ; i++)
r[i] = lhs[i] + rhs[i];
return r;
}
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE,
size_t SIZE
>
VLHS<TYPE, SIZE> PURE doSub(
const VLHS<TYPE, SIZE>& lhs,
const VRHS<TYPE, SIZE>& rhs) {
VLHS<TYPE, SIZE> r;
for (size_t i=0 ; i<SIZE ; i++)
r[i] = lhs[i] - rhs[i];
return r;
}
template <
template<typename T, size_t S> class VEC,
typename TYPE,
size_t SIZE
>
VEC<TYPE, SIZE> PURE doMulScalar(
const VEC<TYPE, SIZE>& lhs,
typename TypeTraits<TYPE>::ParameterType rhs) {
VEC<TYPE, SIZE> r;
for (size_t i=0 ; i<SIZE ; i++)
r[i] = lhs[i] * rhs;
return r;
}
template <
template<typename T, size_t S> class VEC,
typename TYPE,
size_t SIZE
>
VEC<TYPE, SIZE> PURE doScalarMul(
typename TypeTraits<TYPE>::ParameterType lhs,
const VEC<TYPE, SIZE>& rhs) {
VEC<TYPE, SIZE> r;
for (size_t i=0 ; i<SIZE ; i++)
r[i] = lhs * rhs[i];
return r;
}
}; // namespace helpers
// -----------------------------------------------------------------------
// Below we define the mathematical operators for vectors.
// We use template template arguments so we can generically
// handle the case where the right-hand-size and left-hand-side are
// different vector types (but with same value_type and size).
// This is needed for performance when using ".xy{z}" element access
// on vec<>. Without this, an extra conversion to vec<> would be needed.
//
// example:
// vec4_t a;
// vec3_t b;
// vec3_t c = a.xyz + b;
//
// "a.xyz + b" is a mixed-operation between a vbase<> and a vec<>, requiring
// a conversion of vbase<> to vec<>. The template gunk below avoids this,
// by allowing the addition on these different vector types directly
//
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE,
size_t SIZE
>
inline VLHS<TYPE, SIZE> PURE operator + (
const VLHS<TYPE, SIZE>& lhs,
const VRHS<TYPE, SIZE>& rhs) {
return helpers::doAdd(lhs, rhs);
}
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE,
size_t SIZE
>
inline VLHS<TYPE, SIZE> PURE operator - (
const VLHS<TYPE, SIZE>& lhs,
const VRHS<TYPE, SIZE>& rhs) {
return helpers::doSub(lhs, rhs);
}
template <
template<typename T, size_t S> class VEC,
typename TYPE,
size_t SIZE
>
inline VEC<TYPE, SIZE> PURE operator * (
const VEC<TYPE, SIZE>& lhs,
typename TypeTraits<TYPE>::ParameterType rhs) {
return helpers::doMulScalar(lhs, rhs);
}
template <
template<typename T, size_t S> class VEC,
typename TYPE,
size_t SIZE
>
inline VEC<TYPE, SIZE> PURE operator * (
typename TypeTraits<TYPE>::ParameterType lhs,
const VEC<TYPE, SIZE>& rhs) {
return helpers::doScalarMul(lhs, rhs);
}
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE,
size_t SIZE
>
TYPE PURE dot_product(
const VLHS<TYPE, SIZE>& lhs,
const VRHS<TYPE, SIZE>& rhs) {
TYPE r(0);
for (size_t i=0 ; i<SIZE ; i++)
r += lhs[i] * rhs[i];
return r;
}
template <
template<typename T, size_t S> class V,
typename TYPE,
size_t SIZE
>
TYPE PURE length(const V<TYPE, SIZE>& v) {
return sqrt(dot_product(v, v));
}
template <
template<typename T, size_t S> class V,
typename TYPE,
size_t SIZE
>
TYPE PURE length_squared(const V<TYPE, SIZE>& v) {
return dot_product(v, v);
}
template <
template<typename T, size_t S> class V,
typename TYPE,
size_t SIZE
>
V<TYPE, SIZE> PURE normalize(const V<TYPE, SIZE>& v) {
return v * (1/length(v));
}
template <
template<typename T, size_t S> class VLHS,
template<typename T, size_t S> class VRHS,
typename TYPE
>
VLHS<TYPE, 3> PURE cross_product(
const VLHS<TYPE, 3>& u,
const VRHS<TYPE, 3>& v) {
VLHS<TYPE, 3> r;
r.x = u.y*v.z - u.z*v.y;
r.y = u.z*v.x - u.x*v.z;
r.z = u.x*v.y - u.y*v.x;
return r;
}
template <typename TYPE, size_t SIZE>
vec<TYPE, SIZE> PURE operator - (const vec<TYPE, SIZE>& lhs) {
vec<TYPE, SIZE> r;
for (size_t i=0 ; i<SIZE ; i++)
r[i] = -lhs[i];
return r;
}
// -----------------------------------------------------------------------
// This our basic vector type, it just implements the data storage
// and accessors.
template <typename TYPE, size_t SIZE>
struct vbase {
TYPE v[SIZE];
inline const TYPE& operator[](size_t i) const { return v[i]; }
inline TYPE& operator[](size_t i) { return v[i]; }
};
template<> struct vbase<float, 2> {
union {
float v[2];
struct { float x, y; };
struct { float s, t; };
};
inline const float& operator[](size_t i) const { return v[i]; }
inline float& operator[](size_t i) { return v[i]; }
};
template<> struct vbase<float, 3> {
union {
float v[3];
struct { float x, y, z; };
struct { float s, t, r; };
vbase<float, 2> xy;
vbase<float, 2> st;
};
inline const float& operator[](size_t i) const { return v[i]; }
inline float& operator[](size_t i) { return v[i]; }
};
template<> struct vbase<float, 4> {
union {
float v[4];
struct { float x, y, z, w; };
struct { float s, t, r, q; };
vbase<float, 3> xyz;
vbase<float, 3> str;
vbase<float, 2> xy;
vbase<float, 2> st;
};
inline const float& operator[](size_t i) const { return v[i]; }
inline float& operator[](size_t i) { return v[i]; }
};
// -----------------------------------------------------------------------
template <typename TYPE, size_t SIZE>
class vec : public vbase<TYPE, SIZE>
{
typedef typename TypeTraits<TYPE>::ParameterType pTYPE;
typedef vbase<TYPE, SIZE> base;
public:
// STL-like interface.
typedef TYPE value_type;
typedef TYPE& reference;
typedef TYPE const& const_reference;
typedef size_t size_type;
typedef TYPE* iterator;
typedef TYPE const* const_iterator;
iterator begin() { return base::v; }
iterator end() { return base::v + SIZE; }
const_iterator begin() const { return base::v; }
const_iterator end() const { return base::v + SIZE; }
size_type size() const { return SIZE; }
// -----------------------------------------------------------------------
// default constructors
vec() { }
vec(const vec& rhs) : base(rhs) { }
vec(const base& rhs) : base(rhs) { } // NOLINT(implicit)
// -----------------------------------------------------------------------
// conversion constructors
vec(pTYPE rhs) { // NOLINT(implicit)
for (size_t i=0 ; i<SIZE ; i++)
base::operator[](i) = rhs;
}
template < template<typename T, size_t S> class VEC, size_t S>
explicit vec(const VEC<TYPE, S>& rhs) {
helpers::doAssign(*this, rhs);
}
explicit vec(TYPE const* array) {
for (size_t i=0 ; i<SIZE ; i++)
base::operator[](i) = array[i];
}
// -----------------------------------------------------------------------
// Assignment
vec& operator = (const vec& rhs) {
base::operator=(rhs);
return *this;
}
vec& operator = (const base& rhs) {
base::operator=(rhs);
return *this;
}
vec& operator = (pTYPE rhs) {
for (size_t i=0 ; i<SIZE ; i++)
base::operator[](i) = rhs;
return *this;
}
template < template<typename T, size_t S> class VEC, size_t S>
vec& operator = (const VEC<TYPE, S>& rhs) {
return helpers::doAssign(*this, rhs);
}
// -----------------------------------------------------------------------
// operation-assignment
vec& operator += (const vec& rhs);
vec& operator -= (const vec& rhs);
vec& operator *= (pTYPE rhs);
// -----------------------------------------------------------------------
// non-member function declaration and definition
// NOTE: we declare the non-member function as friend inside the class
// so that they are known to the compiler when the class is instantiated.
// This helps the compiler doing template argument deduction when the
// passed types are not identical. Essentially this helps with
// type conversion so that you can multiply a vec<float> by an scalar int
// (for instance).
friend inline vec PURE operator + (const vec& lhs, const vec& rhs) {
return helpers::doAdd(lhs, rhs);
}
friend inline vec PURE operator - (const vec& lhs, const vec& rhs) {
return helpers::doSub(lhs, rhs);
}
friend inline vec PURE operator * (const vec& lhs, pTYPE v) {
return helpers::doMulScalar(lhs, v);
}
friend inline vec PURE operator * (pTYPE v, const vec& rhs) {
return helpers::doScalarMul(v, rhs);
}
friend inline TYPE PURE dot_product(const vec& lhs, const vec& rhs) {
return android::dot_product(lhs, rhs);
}
};
// -----------------------------------------------------------------------
template <typename TYPE, size_t SIZE>
vec<TYPE, SIZE>& vec<TYPE, SIZE>::operator += (const vec<TYPE, SIZE>& rhs) {
vec<TYPE, SIZE>& lhs(*this);
for (size_t i=0 ; i<SIZE ; i++)
lhs[i] += rhs[i];
return lhs;
}
template <typename TYPE, size_t SIZE>
vec<TYPE, SIZE>& vec<TYPE, SIZE>::operator -= (const vec<TYPE, SIZE>& rhs) {
vec<TYPE, SIZE>& lhs(*this);
for (size_t i=0 ; i<SIZE ; i++)
lhs[i] -= rhs[i];
return lhs;
}
template <typename TYPE, size_t SIZE>
vec<TYPE, SIZE>& vec<TYPE, SIZE>::operator *= (vec<TYPE, SIZE>::pTYPE rhs) {
vec<TYPE, SIZE>& lhs(*this);
for (size_t i=0 ; i<SIZE ; i++)
lhs[i] *= rhs;
return lhs;
}
// -----------------------------------------------------------------------
typedef vec<float, 2> vec2_t;
typedef vec<float, 3> vec3_t;
typedef vec<float, 4> vec4_t;
// -----------------------------------------------------------------------
}; // namespace android
#endif /* ANDROID_VEC_H */
| 27.94533 | 78 | 0.561705 | [
"vector"
] |
9e5d87a42a052bc349031ac9752886163d15bd46 | 1,842 | h | C | plugins/editor/src/editor/Theme.h | michaelwillis/sfizz | 0461f6e5e288da71aeccf7b7dfd71302bf0ba175 | [
"BSD-2-Clause"
] | 281 | 2019-06-06T05:58:59.000Z | 2022-03-06T12:20:09.000Z | plugins/editor/src/editor/Theme.h | michaelwillis/sfizz | 0461f6e5e288da71aeccf7b7dfd71302bf0ba175 | [
"BSD-2-Clause"
] | 590 | 2019-09-22T00:26:10.000Z | 2022-03-31T19:21:58.000Z | plugins/editor/src/editor/Theme.h | michaelwillis/sfizz | 0461f6e5e288da71aeccf7b7dfd71302bf0ba175 | [
"BSD-2-Clause"
] | 44 | 2019-10-08T08:24:20.000Z | 2022-02-26T04:21:44.000Z | // SPDX-License-Identifier: BSD-2-Clause
// This code is part of the sfizz library and is licensed under a BSD 2-clause
// license. You should have receive a LICENSE.md file along with the code.
// If not, contact the sfizz maintainers at https://github.com/sfztools/sfizz
#pragma once
#include "utility/vstgui_before.h"
#include "vstgui/vstgui.h"
#include "utility/vstgui_after.h"
#include <absl/strings/string_view.h>
#include <string>
#include <vector>
namespace pugi { class xml_document; }
using namespace VSTGUI;
struct Palette {
CColor boxBackground;
CColor text;
CColor inactiveText;
CColor highlightedText;
CColor titleBoxText;
CColor titleBoxBackground;
CColor icon;
CColor iconHighlight;
CColor valueText;
CColor valueBackground;
CColor knobActiveTrack;
CColor knobInactiveTrack;
CColor knobLineIndicator;
CColor knobText;
CColor knobLabelText;
CColor knobLabelBackground;
};
struct Theme {
Theme() = default;
Theme(const Theme&) = delete;
Theme& operator=(const Theme&) = delete;
CColor frameBackground;
Palette normalPalette {};
Palette invertedPalette {};
struct ChangeListener {
virtual ~ChangeListener() {}
virtual void onThemeChanged() = 0;
};
ChangeListener* listener = nullptr;
void clear();
void load(const std::string& name);
void loadDocument(const pugi::xml_document& doc);
void invokeChangeListener() { if (listener) listener->onThemeChanged(); }
static void storeCurrentName(absl::string_view name);
static std::string loadCurrentName();
static const std::vector<std::string>& getAvailableNames();
CColor* getColorFromName(absl::string_view name, bool fromInvertedPalette = false);
private:
static std::vector<std::string> extractAvailableNames();
};
| 26.695652 | 87 | 0.714984 | [
"vector"
] |
9e62c3401296ebe20bcd3c1ddc7603cedf1f6dbd | 3,544 | h | C | src/experiments/babybot/objectspotter/stlport/old_hp/algo.h | robotology-legacy/yarp1 | 21434f5b776edea201b39a9644552dca59339dbc | [
"Artistic-1.0-Perl"
] | null | null | null | src/experiments/babybot/objectspotter/stlport/old_hp/algo.h | robotology-legacy/yarp1 | 21434f5b776edea201b39a9644552dca59339dbc | [
"Artistic-1.0-Perl"
] | null | null | null | src/experiments/babybot/objectspotter/stlport/old_hp/algo.h | robotology-legacy/yarp1 | 21434f5b776edea201b39a9644552dca59339dbc | [
"Artistic-1.0-Perl"
] | 1 | 2020-04-06T02:02:28.000Z | 2020-04-06T02:02:28.000Z | /*
*
* Copyright (c) 1994
* Hewlett-Packard Company
*
* Copyright (c) 1996,1997
* Silicon Graphics Computer Systems, Inc.
*
* Copyright (c) 1997
* Moscow Center for SPARC Technology
*
* Copyright (c) 1999
* Boris Fomitchev
*
* This material is provided "as is", with absolutely no warranty expressed
* or implied. Any use is at your own risk.
*
* Permission to use or copy this software for any purpose is hereby granted
* without fee, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef _STLP_ALGO_H
#define _STLP_ALGO_H
# ifndef _STLP_OUTERMOST_HEADER_ID
# define _STLP_OUTERMOST_HEADER_ID 0xa001
# include <stl/_prolog.h>
# endif
# ifndef _STLP_ALGOBASE_H
# include <algobase.h>
# endif
# ifndef _STLP_TEMPBUF_H
# include <tempbuf.h>
# endif
# ifndef _STLP_INTERNAL_HEAP_H
# include <stl/_heap.h>
# endif
# ifndef _STLP_ITERATOR_H
# include <iterator.h>
# endif
# ifndef _STLP_INTERNAL_ALGO_H
# include <stl/_algo.h>
# endif
# ifndef _STLP_NUMERIC_H
# include <stl/_numeric.h>
# endif
#ifdef _STLP_USE_NAMESPACES
# ifdef _STLP_BROKEN_USING_DIRECTIVE
using namespace STLPORT;
# else
// Names from <stl/_algo.h>
using STLPORT::for_each;
using STLPORT::find;
using STLPORT::find_if;
using STLPORT::adjacent_find;
using STLPORT::count;
using STLPORT::count_if;
using STLPORT::search;
using STLPORT::search_n;
using STLPORT::swap_ranges;
using STLPORT::transform;
using STLPORT::replace;
using STLPORT::replace_if;
using STLPORT::replace_copy;
using STLPORT::replace_copy_if;
using STLPORT::generate;
using STLPORT::generate_n;
// using STLPORT::remove;
using STLPORT::remove_if;
using STLPORT::remove_copy;
using STLPORT::remove_copy_if;
using STLPORT::unique;
using STLPORT::unique_copy;
using STLPORT::reverse;
using STLPORT::reverse_copy;
using STLPORT::rotate;
using STLPORT::rotate_copy;
using STLPORT::random_shuffle;
using STLPORT::random_sample;
using STLPORT::random_sample_n;
using STLPORT::partition;
using STLPORT::stable_partition;
using STLPORT::sort;
using STLPORT::stable_sort;
using STLPORT::partial_sort;
using STLPORT::partial_sort_copy;
using STLPORT::nth_element;
using STLPORT::lower_bound;
using STLPORT::upper_bound;
using STLPORT::equal_range;
using STLPORT::binary_search;
using STLPORT::merge;
using STLPORT::inplace_merge;
using STLPORT::includes;
using STLPORT::set_union;
using STLPORT::set_intersection;
using STLPORT::set_difference;
using STLPORT::set_symmetric_difference;
using STLPORT::min_element;
using STLPORT::max_element;
using STLPORT::next_permutation;
using STLPORT::prev_permutation;
using STLPORT::find_first_of;
using STLPORT::find_end;
using STLPORT::is_sorted;
using STLPORT::is_heap;
// Names from stl_heap.h
using STLPORT::push_heap;
using STLPORT::pop_heap;
using STLPORT::make_heap;
using STLPORT::sort_heap;
// Names from <stl/_numeric.h>
using STLPORT::accumulate;
using STLPORT::inner_product;
using STLPORT::partial_sum;
using STLPORT::adjacent_difference;
using STLPORT::power;
using STLPORT::iota;
# endif /* _STLP_BROKEN_USING_DIRECTIVE */
#endif /* _STLP_USE_NAMESPACES */
# if (_STLP_OUTERMOST_HEADER_ID == 0xa001)
# include <stl/_epilog.h>
# undef _STLP_OUTERMOST_HEADER_ID
# endif
#endif /* _STLP_ALGO_H */
// Local Variables:
// mode:C++
// End:
| 24.108844 | 77 | 0.76298 | [
"transform"
] |
9e64f5df128b86f7a147ac4ed647397323ae8ac7 | 1,589 | h | C | CMP_POPNET_ACK/CMP/PMS_trace/cmp4/popnetForSimplescalar/index.h | FCAS-SCUT/pms | 5f30da037f17165cb8ad6327ab3b6c9189a32d6a | [
"WTFPL"
] | null | null | null | CMP_POPNET_ACK/CMP/PMS_trace/cmp4/popnetForSimplescalar/index.h | FCAS-SCUT/pms | 5f30da037f17165cb8ad6327ab3b6c9189a32d6a | [
"WTFPL"
] | null | null | null | CMP_POPNET_ACK/CMP/PMS_trace/cmp4/popnetForSimplescalar/index.h | FCAS-SCUT/pms | 5f30da037f17165cb8ad6327ab3b6c9189a32d6a | [
"WTFPL"
] | null | null | null | #ifndef NETWORK_INDEX_H_
#define NETWORK_INDEX_H_
#include <vector>
#include <utility>
#include <cmath>
using namespace std;
enum mess_type {EVG_, ROUTER_, WIRE_, CREDIT_};
enum routing_type {XY_ , TXY_ };
enum VC_state_type {INIT_, ROUTING_, VC_AB_, SW_AB_, SW_TR_, HOME_};
enum flit_type {HEADER_, BODY_, TAIL_};
enum vc_share_type {SHARE_, MONO_};
enum VC_usage_type {USED_, FREE_};
typedef double time_type;
typedef vector<long> add_type;
typedef pair<long, long> VC_type;
typedef vector<unsigned long long> Data_type;
typedef unsigned long long Atom_type;
const VC_type VC_NULL = VC_type(-1, -1);
typedef vector<long > Regedid;//zl
typedef vector<long > Regedno;//zl
#define BUFF_BOUND_ 100
#define WIRE_DELAY_ 0.9
#define PIPE_DELAY_ 1.0
#define CREDIT_DELAY_ 1.0
#define REPORT_PERIOD_ 2000
#define S_ELPS_ 0.00000001
#define ATOM_WIDTH_ 64 //bus width for power model
#define ZERO_ 0
#define MAX_64_ 0xffffffffffffffffLL
#define CORR_EFF_ 0.8
#define POWER_NOM_ 1
/*
#define BUFF_BOUND_ 100
#define WIRE_DELAY_ 2.0 0.9
#define PIPE_DELAY_ 1.0 1.0
#define CREDIT_DELAY_ 2.0 1.0
#define REPORT_PERIOD_ 2000
#define S_ELPS_ 0.00000001
#define ATOM_WIDTH_ 64
#define ZERO_ 0
#define MAX_64_ 0xffffffffffffffffLL
#define CORR_EFF_ 0.8
#define POWER_NOM_ 1e9
*/
typedef struct message {
long src1;
long src2;
long dst1;
long dst2;
long long int sim_cycle_m;
int src_cmp_m;
unsigned long long src_addr_m;
int dst_cmp_m;
unsigned long long dst_addr_m;
int data_length_m;
int operation_m;
long long int messageNo_m;
} mess_struct_type;
#endif
| 24.446154 | 68 | 0.76652 | [
"vector",
"model"
] |
9e6835ef278fc7df8d2e4807f7d1c0d6062be9c8 | 2,053 | h | C | ProgramMapTable.h | berolinaro/dvbv5-tools | eba2552407af10fd880c28335f1e6efac33de136 | [
"Apache-2.0"
] | 2 | 2018-01-01T23:57:07.000Z | 2019-03-03T17:14:29.000Z | ProgramMapTable.h | octaplexsys/dvbv5-tools | eba2552407af10fd880c28335f1e6efac33de136 | [
"Apache-2.0"
] | 1 | 2018-03-03T11:49:11.000Z | 2018-03-03T11:49:11.000Z | ProgramMapTable.h | octaplexsys/dvbv5-tools | eba2552407af10fd880c28335f1e6efac33de136 | [
"Apache-2.0"
] | null | null | null | #pragma once
#include "DVBTable.h"
#include "DVBDescriptor.h"
#include <iostream>
#include <vector>
#include <map>
class Stream {
public:
Stream(uint8_t streamType, uint16_t pid, std::vector<DVBDescriptor*> descriptors):_streamType(streamType),_pid(pid),_descriptors(descriptors) { }
uint8_t streamType() const { return _streamType; }
uint16_t pid() const { return _pid; }
std::vector<DVBDescriptor*> descriptors() const { return _descriptors; }
void dump(std::ostream &where=std::cerr, std::string const &indent="") const;
bool isAudio() const;
bool isVideo() const;
bool isTeletext() const;
bool isSubtitle() const;
bool isPcr() const;
enum StreamType {
Video,
Audio,
Teletext,
Subtitle,
PCR,
Other,
Any
};
StreamType type() const;
protected:
uint8_t _streamType;
uint16_t _pid;
std::vector<DVBDescriptor*> _descriptors;
};
class ProgramMapTable:public DVBTable {
friend class Program;
public:
ProgramMapTable(DVBTable *t):DVBTable(t) { }
uint16_t programNumber() const { return _number; }
uint16_t pcrPid() const { return _pcrPid; }
virtual void dump(std::ostream &where=std::cerr, std::string const &indent="") const override;
static constexpr uint8_t tableFilter = ProgramMap;
static constexpr uint8_t tableMask = 0xff;
protected:
uint16_t _pcrPid;
};
class Program;
class ProgramMapTables:public DVBTables<ProgramMapTable> {
public:
ProgramMapTables():DVBTables<ProgramMapTable>() { }
ProgramMapTables(ProgramMapTable *t):DVBTables<ProgramMapTable>(t) { }
std::map<uint16_t,uint16_t> pids() const;
Program program() const;
};
class Program {
public:
Program(ProgramMapTables const &pmts);
std::vector<DVBDescriptor*> const &descriptors() const { return _descriptors; }
std::vector<Stream> const &streams() const { return _streams; }
void dump(std::ostream &where=std::cerr, std::string const &indent="") const;
uint16_t pcrPid() const { return _pcrPid; }
protected:
uint16_t _programNumber;
uint16_t _pcrPid;
std::vector<DVBDescriptor*> _descriptors;
std::vector<Stream> _streams;
};
| 27.743243 | 146 | 0.74038 | [
"vector"
] |
9e68e91979dc0e08dc92d7a5850966911302b06d | 33,649 | h | C | windows/mbedtls/oid.h | McKillroy/yojimbo | 4d13cc2d219ab35a9e48e3cbd7e65b70170a8567 | [
"BSD-3-Clause"
] | 5,547 | 2019-09-18T02:20:53.000Z | 2021-08-25T10:29:54.000Z | windows/mbedtls/oid.h | McKillroy/yojimbo | 4d13cc2d219ab35a9e48e3cbd7e65b70170a8567 | [
"BSD-3-Clause"
] | 381 | 2020-10-30T08:06:02.000Z | 2022-03-31T15:50:59.000Z | windows/mbedtls/oid.h | McKillroy/yojimbo | 4d13cc2d219ab35a9e48e3cbd7e65b70170a8567 | [
"BSD-3-Clause"
] | 1,445 | 2019-09-18T04:38:19.000Z | 2021-08-25T09:17:30.000Z | /**
* \file oid.h
*
* \brief Object Identifier (OID) database
*/
/*
* Copyright The Mbed TLS Contributors
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBEDTLS_OID_H
#define MBEDTLS_OID_H
#include "mbedtls/private_access.h"
#include "mbedtls/build_info.h"
#include "mbedtls/asn1.h"
#include "mbedtls/pk.h"
#include <stddef.h>
#if defined(MBEDTLS_CIPHER_C)
#include "mbedtls/cipher.h"
#endif
#if defined(MBEDTLS_MD_C)
#include "mbedtls/md.h"
#endif
#define MBEDTLS_ERR_OID_NOT_FOUND -0x002E /**< OID is not found. */
#define MBEDTLS_ERR_OID_BUF_TOO_SMALL -0x000B /**< output buffer is too small */
/* This is for the benefit of X.509, but defined here in order to avoid
* having a "backwards" include of x.509.h here */
/*
* X.509 extension types (internal, arbitrary values for bitsets)
*/
#define MBEDTLS_OID_X509_EXT_AUTHORITY_KEY_IDENTIFIER (1 << 0)
#define MBEDTLS_OID_X509_EXT_SUBJECT_KEY_IDENTIFIER (1 << 1)
#define MBEDTLS_OID_X509_EXT_KEY_USAGE (1 << 2)
#define MBEDTLS_OID_X509_EXT_CERTIFICATE_POLICIES (1 << 3)
#define MBEDTLS_OID_X509_EXT_POLICY_MAPPINGS (1 << 4)
#define MBEDTLS_OID_X509_EXT_SUBJECT_ALT_NAME (1 << 5)
#define MBEDTLS_OID_X509_EXT_ISSUER_ALT_NAME (1 << 6)
#define MBEDTLS_OID_X509_EXT_SUBJECT_DIRECTORY_ATTRS (1 << 7)
#define MBEDTLS_OID_X509_EXT_BASIC_CONSTRAINTS (1 << 8)
#define MBEDTLS_OID_X509_EXT_NAME_CONSTRAINTS (1 << 9)
#define MBEDTLS_OID_X509_EXT_POLICY_CONSTRAINTS (1 << 10)
#define MBEDTLS_OID_X509_EXT_EXTENDED_KEY_USAGE (1 << 11)
#define MBEDTLS_OID_X509_EXT_CRL_DISTRIBUTION_POINTS (1 << 12)
#define MBEDTLS_OID_X509_EXT_INIHIBIT_ANYPOLICY (1 << 13)
#define MBEDTLS_OID_X509_EXT_FRESHEST_CRL (1 << 14)
#define MBEDTLS_OID_X509_EXT_NS_CERT_TYPE (1 << 16)
/*
* Top level OID tuples
*/
#define MBEDTLS_OID_ISO_MEMBER_BODIES "\x2a" /* {iso(1) member-body(2)} */
#define MBEDTLS_OID_ISO_IDENTIFIED_ORG "\x2b" /* {iso(1) identified-organization(3)} */
#define MBEDTLS_OID_ISO_CCITT_DS "\x55" /* {joint-iso-ccitt(2) ds(5)} */
#define MBEDTLS_OID_ISO_ITU_COUNTRY "\x60" /* {joint-iso-itu-t(2) country(16)} */
/*
* ISO Member bodies OID parts
*/
#define MBEDTLS_OID_COUNTRY_US "\x86\x48" /* {us(840)} */
#define MBEDTLS_OID_ORG_RSA_DATA_SECURITY "\x86\xf7\x0d" /* {rsadsi(113549)} */
#define MBEDTLS_OID_RSA_COMPANY MBEDTLS_OID_ISO_MEMBER_BODIES MBEDTLS_OID_COUNTRY_US \
MBEDTLS_OID_ORG_RSA_DATA_SECURITY /* {iso(1) member-body(2) us(840) rsadsi(113549)} */
#define MBEDTLS_OID_ORG_ANSI_X9_62 "\xce\x3d" /* ansi-X9-62(10045) */
#define MBEDTLS_OID_ANSI_X9_62 MBEDTLS_OID_ISO_MEMBER_BODIES MBEDTLS_OID_COUNTRY_US \
MBEDTLS_OID_ORG_ANSI_X9_62
/*
* ISO Identified organization OID parts
*/
#define MBEDTLS_OID_ORG_DOD "\x06" /* {dod(6)} */
#define MBEDTLS_OID_ORG_OIW "\x0e"
#define MBEDTLS_OID_OIW_SECSIG MBEDTLS_OID_ORG_OIW "\x03"
#define MBEDTLS_OID_OIW_SECSIG_ALG MBEDTLS_OID_OIW_SECSIG "\x02"
#define MBEDTLS_OID_OIW_SECSIG_SHA1 MBEDTLS_OID_OIW_SECSIG_ALG "\x1a"
#define MBEDTLS_OID_ORG_CERTICOM "\x81\x04" /* certicom(132) */
#define MBEDTLS_OID_CERTICOM MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_CERTICOM
#define MBEDTLS_OID_ORG_TELETRUST "\x24" /* teletrust(36) */
#define MBEDTLS_OID_TELETRUST MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_TELETRUST
/*
* ISO ITU OID parts
*/
#define MBEDTLS_OID_ORGANIZATION "\x01" /* {organization(1)} */
#define MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ISO_ITU_COUNTRY MBEDTLS_OID_COUNTRY_US MBEDTLS_OID_ORGANIZATION /* {joint-iso-itu-t(2) country(16) us(840) organization(1)} */
#define MBEDTLS_OID_ORG_GOV "\x65" /* {gov(101)} */
#define MBEDTLS_OID_GOV MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ORG_GOV /* {joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101)} */
#define MBEDTLS_OID_ORG_NETSCAPE "\x86\xF8\x42" /* {netscape(113730)} */
#define MBEDTLS_OID_NETSCAPE MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ORG_NETSCAPE /* Netscape OID {joint-iso-itu-t(2) country(16) us(840) organization(1) netscape(113730)} */
/* ISO arc for standard certificate and CRL extensions */
#define MBEDTLS_OID_ID_CE MBEDTLS_OID_ISO_CCITT_DS "\x1D" /**< id-ce OBJECT IDENTIFIER ::= {joint-iso-ccitt(2) ds(5) 29} */
#define MBEDTLS_OID_NIST_ALG MBEDTLS_OID_GOV "\x03\x04" /** { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistAlgorithm(4) */
/**
* Private Internet Extensions
* { iso(1) identified-organization(3) dod(6) internet(1)
* security(5) mechanisms(5) pkix(7) }
*/
#define MBEDTLS_OID_INTERNET MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_DOD "\x01"
#define MBEDTLS_OID_PKIX MBEDTLS_OID_INTERNET "\x05\x05\x07"
/*
* Arc for standard naming attributes
*/
#define MBEDTLS_OID_AT MBEDTLS_OID_ISO_CCITT_DS "\x04" /**< id-at OBJECT IDENTIFIER ::= {joint-iso-ccitt(2) ds(5) 4} */
#define MBEDTLS_OID_AT_CN MBEDTLS_OID_AT "\x03" /**< id-at-commonName AttributeType:= {id-at 3} */
#define MBEDTLS_OID_AT_SUR_NAME MBEDTLS_OID_AT "\x04" /**< id-at-surName AttributeType:= {id-at 4} */
#define MBEDTLS_OID_AT_SERIAL_NUMBER MBEDTLS_OID_AT "\x05" /**< id-at-serialNumber AttributeType:= {id-at 5} */
#define MBEDTLS_OID_AT_COUNTRY MBEDTLS_OID_AT "\x06" /**< id-at-countryName AttributeType:= {id-at 6} */
#define MBEDTLS_OID_AT_LOCALITY MBEDTLS_OID_AT "\x07" /**< id-at-locality AttributeType:= {id-at 7} */
#define MBEDTLS_OID_AT_STATE MBEDTLS_OID_AT "\x08" /**< id-at-state AttributeType:= {id-at 8} */
#define MBEDTLS_OID_AT_ORGANIZATION MBEDTLS_OID_AT "\x0A" /**< id-at-organizationName AttributeType:= {id-at 10} */
#define MBEDTLS_OID_AT_ORG_UNIT MBEDTLS_OID_AT "\x0B" /**< id-at-organizationalUnitName AttributeType:= {id-at 11} */
#define MBEDTLS_OID_AT_TITLE MBEDTLS_OID_AT "\x0C" /**< id-at-title AttributeType:= {id-at 12} */
#define MBEDTLS_OID_AT_POSTAL_ADDRESS MBEDTLS_OID_AT "\x10" /**< id-at-postalAddress AttributeType:= {id-at 16} */
#define MBEDTLS_OID_AT_POSTAL_CODE MBEDTLS_OID_AT "\x11" /**< id-at-postalCode AttributeType:= {id-at 17} */
#define MBEDTLS_OID_AT_GIVEN_NAME MBEDTLS_OID_AT "\x2A" /**< id-at-givenName AttributeType:= {id-at 42} */
#define MBEDTLS_OID_AT_INITIALS MBEDTLS_OID_AT "\x2B" /**< id-at-initials AttributeType:= {id-at 43} */
#define MBEDTLS_OID_AT_GENERATION_QUALIFIER MBEDTLS_OID_AT "\x2C" /**< id-at-generationQualifier AttributeType:= {id-at 44} */
#define MBEDTLS_OID_AT_UNIQUE_IDENTIFIER MBEDTLS_OID_AT "\x2D" /**< id-at-uniqueIdentifier AttributType:= {id-at 45} */
#define MBEDTLS_OID_AT_DN_QUALIFIER MBEDTLS_OID_AT "\x2E" /**< id-at-dnQualifier AttributeType:= {id-at 46} */
#define MBEDTLS_OID_AT_PSEUDONYM MBEDTLS_OID_AT "\x41" /**< id-at-pseudonym AttributeType:= {id-at 65} */
#define MBEDTLS_OID_UID "\x09\x92\x26\x89\x93\xF2\x2C\x64\x01\x01" /** id-domainComponent AttributeType:= {itu-t(0) data(9) pss(2342) ucl(19200300) pilot(100) pilotAttributeType(1) uid(1)} */
#define MBEDTLS_OID_DOMAIN_COMPONENT "\x09\x92\x26\x89\x93\xF2\x2C\x64\x01\x19" /** id-domainComponent AttributeType:= {itu-t(0) data(9) pss(2342) ucl(19200300) pilot(100) pilotAttributeType(1) domainComponent(25)} */
/*
* OIDs for standard certificate extensions
*/
#define MBEDTLS_OID_AUTHORITY_KEY_IDENTIFIER MBEDTLS_OID_ID_CE "\x23" /**< id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } */
#define MBEDTLS_OID_SUBJECT_KEY_IDENTIFIER MBEDTLS_OID_ID_CE "\x0E" /**< id-ce-subjectKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 14 } */
#define MBEDTLS_OID_KEY_USAGE MBEDTLS_OID_ID_CE "\x0F" /**< id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 } */
#define MBEDTLS_OID_CERTIFICATE_POLICIES MBEDTLS_OID_ID_CE "\x20" /**< id-ce-certificatePolicies OBJECT IDENTIFIER ::= { id-ce 32 } */
#define MBEDTLS_OID_POLICY_MAPPINGS MBEDTLS_OID_ID_CE "\x21" /**< id-ce-policyMappings OBJECT IDENTIFIER ::= { id-ce 33 } */
#define MBEDTLS_OID_SUBJECT_ALT_NAME MBEDTLS_OID_ID_CE "\x11" /**< id-ce-subjectAltName OBJECT IDENTIFIER ::= { id-ce 17 } */
#define MBEDTLS_OID_ISSUER_ALT_NAME MBEDTLS_OID_ID_CE "\x12" /**< id-ce-issuerAltName OBJECT IDENTIFIER ::= { id-ce 18 } */
#define MBEDTLS_OID_SUBJECT_DIRECTORY_ATTRS MBEDTLS_OID_ID_CE "\x09" /**< id-ce-subjectDirectoryAttributes OBJECT IDENTIFIER ::= { id-ce 9 } */
#define MBEDTLS_OID_BASIC_CONSTRAINTS MBEDTLS_OID_ID_CE "\x13" /**< id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 } */
#define MBEDTLS_OID_NAME_CONSTRAINTS MBEDTLS_OID_ID_CE "\x1E" /**< id-ce-nameConstraints OBJECT IDENTIFIER ::= { id-ce 30 } */
#define MBEDTLS_OID_POLICY_CONSTRAINTS MBEDTLS_OID_ID_CE "\x24" /**< id-ce-policyConstraints OBJECT IDENTIFIER ::= { id-ce 36 } */
#define MBEDTLS_OID_EXTENDED_KEY_USAGE MBEDTLS_OID_ID_CE "\x25" /**< id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } */
#define MBEDTLS_OID_CRL_DISTRIBUTION_POINTS MBEDTLS_OID_ID_CE "\x1F" /**< id-ce-cRLDistributionPoints OBJECT IDENTIFIER ::= { id-ce 31 } */
#define MBEDTLS_OID_INIHIBIT_ANYPOLICY MBEDTLS_OID_ID_CE "\x36" /**< id-ce-inhibitAnyPolicy OBJECT IDENTIFIER ::= { id-ce 54 } */
#define MBEDTLS_OID_FRESHEST_CRL MBEDTLS_OID_ID_CE "\x2E" /**< id-ce-freshestCRL OBJECT IDENTIFIER ::= { id-ce 46 } */
/*
* Certificate policies
*/
#define MBEDTLS_OID_ANY_POLICY MBEDTLS_OID_CERTIFICATE_POLICIES "\x00" /**< anyPolicy OBJECT IDENTIFIER ::= { id-ce-certificatePolicies 0 } */
/*
* Netscape certificate extensions
*/
#define MBEDTLS_OID_NS_CERT MBEDTLS_OID_NETSCAPE "\x01"
#define MBEDTLS_OID_NS_CERT_TYPE MBEDTLS_OID_NS_CERT "\x01"
#define MBEDTLS_OID_NS_BASE_URL MBEDTLS_OID_NS_CERT "\x02"
#define MBEDTLS_OID_NS_REVOCATION_URL MBEDTLS_OID_NS_CERT "\x03"
#define MBEDTLS_OID_NS_CA_REVOCATION_URL MBEDTLS_OID_NS_CERT "\x04"
#define MBEDTLS_OID_NS_RENEWAL_URL MBEDTLS_OID_NS_CERT "\x07"
#define MBEDTLS_OID_NS_CA_POLICY_URL MBEDTLS_OID_NS_CERT "\x08"
#define MBEDTLS_OID_NS_SSL_SERVER_NAME MBEDTLS_OID_NS_CERT "\x0C"
#define MBEDTLS_OID_NS_COMMENT MBEDTLS_OID_NS_CERT "\x0D"
#define MBEDTLS_OID_NS_DATA_TYPE MBEDTLS_OID_NETSCAPE "\x02"
#define MBEDTLS_OID_NS_CERT_SEQUENCE MBEDTLS_OID_NS_DATA_TYPE "\x05"
/*
* OIDs for CRL extensions
*/
#define MBEDTLS_OID_PRIVATE_KEY_USAGE_PERIOD MBEDTLS_OID_ID_CE "\x10"
#define MBEDTLS_OID_CRL_NUMBER MBEDTLS_OID_ID_CE "\x14" /**< id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } */
/*
* X.509 v3 Extended key usage OIDs
*/
#define MBEDTLS_OID_ANY_EXTENDED_KEY_USAGE MBEDTLS_OID_EXTENDED_KEY_USAGE "\x00" /**< anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } */
#define MBEDTLS_OID_KP MBEDTLS_OID_PKIX "\x03" /**< id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } */
#define MBEDTLS_OID_SERVER_AUTH MBEDTLS_OID_KP "\x01" /**< id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } */
#define MBEDTLS_OID_CLIENT_AUTH MBEDTLS_OID_KP "\x02" /**< id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } */
#define MBEDTLS_OID_CODE_SIGNING MBEDTLS_OID_KP "\x03" /**< id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } */
#define MBEDTLS_OID_EMAIL_PROTECTION MBEDTLS_OID_KP "\x04" /**< id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } */
#define MBEDTLS_OID_TIME_STAMPING MBEDTLS_OID_KP "\x08" /**< id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } */
#define MBEDTLS_OID_OCSP_SIGNING MBEDTLS_OID_KP "\x09" /**< id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } */
/**
* Wi-SUN Alliance Field Area Network
* { iso(1) identified-organization(3) dod(6) internet(1)
* private(4) enterprise(1) WiSUN(45605) FieldAreaNetwork(1) }
*/
#define MBEDTLS_OID_WISUN_FAN MBEDTLS_OID_INTERNET "\x04\x01\x82\xe4\x25\x01"
#define MBEDTLS_OID_ON MBEDTLS_OID_PKIX "\x08" /**< id-on OBJECT IDENTIFIER ::= { id-pkix 8 } */
#define MBEDTLS_OID_ON_HW_MODULE_NAME MBEDTLS_OID_ON "\x04" /**< id-on-hardwareModuleName OBJECT IDENTIFIER ::= { id-on 4 } */
/*
* PKCS definition OIDs
*/
#define MBEDTLS_OID_PKCS MBEDTLS_OID_RSA_COMPANY "\x01" /**< pkcs OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) 1 } */
#define MBEDTLS_OID_PKCS1 MBEDTLS_OID_PKCS "\x01" /**< pkcs-1 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } */
#define MBEDTLS_OID_PKCS5 MBEDTLS_OID_PKCS "\x05" /**< pkcs-5 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 5 } */
#define MBEDTLS_OID_PKCS9 MBEDTLS_OID_PKCS "\x09" /**< pkcs-9 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 9 } */
#define MBEDTLS_OID_PKCS12 MBEDTLS_OID_PKCS "\x0c" /**< pkcs-12 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 12 } */
/*
* PKCS#1 OIDs
*/
#define MBEDTLS_OID_PKCS1_RSA MBEDTLS_OID_PKCS1 "\x01" /**< rsaEncryption OBJECT IDENTIFIER ::= { pkcs-1 1 } */
#define MBEDTLS_OID_PKCS1_MD5 MBEDTLS_OID_PKCS1 "\x04" /**< md5WithRSAEncryption ::= { pkcs-1 4 } */
#define MBEDTLS_OID_PKCS1_SHA1 MBEDTLS_OID_PKCS1 "\x05" /**< sha1WithRSAEncryption ::= { pkcs-1 5 } */
#define MBEDTLS_OID_PKCS1_SHA224 MBEDTLS_OID_PKCS1 "\x0e" /**< sha224WithRSAEncryption ::= { pkcs-1 14 } */
#define MBEDTLS_OID_PKCS1_SHA256 MBEDTLS_OID_PKCS1 "\x0b" /**< sha256WithRSAEncryption ::= { pkcs-1 11 } */
#define MBEDTLS_OID_PKCS1_SHA384 MBEDTLS_OID_PKCS1 "\x0c" /**< sha384WithRSAEncryption ::= { pkcs-1 12 } */
#define MBEDTLS_OID_PKCS1_SHA512 MBEDTLS_OID_PKCS1 "\x0d" /**< sha512WithRSAEncryption ::= { pkcs-1 13 } */
#define MBEDTLS_OID_RSA_SHA_OBS "\x2B\x0E\x03\x02\x1D"
#define MBEDTLS_OID_PKCS9_EMAIL MBEDTLS_OID_PKCS9 "\x01" /**< emailAddress AttributeType ::= { pkcs-9 1 } */
/* RFC 4055 */
#define MBEDTLS_OID_RSASSA_PSS MBEDTLS_OID_PKCS1 "\x0a" /**< id-RSASSA-PSS ::= { pkcs-1 10 } */
#define MBEDTLS_OID_MGF1 MBEDTLS_OID_PKCS1 "\x08" /**< id-mgf1 ::= { pkcs-1 8 } */
/*
* Digest algorithms
*/
#define MBEDTLS_OID_DIGEST_ALG_MD5 MBEDTLS_OID_RSA_COMPANY "\x02\x05" /**< id-mbedtls_md5 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 5 } */
#define MBEDTLS_OID_DIGEST_ALG_SHA1 MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_OIW_SECSIG_SHA1 /**< id-mbedtls_sha1 OBJECT IDENTIFIER ::= { iso(1) identified-organization(3) oiw(14) secsig(3) algorithms(2) 26 } */
#define MBEDTLS_OID_DIGEST_ALG_SHA224 MBEDTLS_OID_NIST_ALG "\x02\x04" /**< id-sha224 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 4 } */
#define MBEDTLS_OID_DIGEST_ALG_SHA256 MBEDTLS_OID_NIST_ALG "\x02\x01" /**< id-mbedtls_sha256 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 1 } */
#define MBEDTLS_OID_DIGEST_ALG_SHA384 MBEDTLS_OID_NIST_ALG "\x02\x02" /**< id-sha384 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 2 } */
#define MBEDTLS_OID_DIGEST_ALG_SHA512 MBEDTLS_OID_NIST_ALG "\x02\x03" /**< id-mbedtls_sha512 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 3 } */
#define MBEDTLS_OID_DIGEST_ALG_RIPEMD160 MBEDTLS_OID_TELETRUST "\x03\x02\x01" /**< id-ripemd160 OBJECT IDENTIFIER :: { iso(1) identified-organization(3) teletrust(36) algorithm(3) hashAlgorithm(2) ripemd160(1) } */
#define MBEDTLS_OID_HMAC_SHA1 MBEDTLS_OID_RSA_COMPANY "\x02\x07" /**< id-hmacWithSHA1 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 7 } */
#define MBEDTLS_OID_HMAC_SHA224 MBEDTLS_OID_RSA_COMPANY "\x02\x08" /**< id-hmacWithSHA224 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 8 } */
#define MBEDTLS_OID_HMAC_SHA256 MBEDTLS_OID_RSA_COMPANY "\x02\x09" /**< id-hmacWithSHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 9 } */
#define MBEDTLS_OID_HMAC_SHA384 MBEDTLS_OID_RSA_COMPANY "\x02\x0A" /**< id-hmacWithSHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 10 } */
#define MBEDTLS_OID_HMAC_SHA512 MBEDTLS_OID_RSA_COMPANY "\x02\x0B" /**< id-hmacWithSHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 11 } */
/*
* Encryption algorithms
*/
#define MBEDTLS_OID_DES_CBC MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_OIW_SECSIG_ALG "\x07" /**< desCBC OBJECT IDENTIFIER ::= { iso(1) identified-organization(3) oiw(14) secsig(3) algorithms(2) 7 } */
#define MBEDTLS_OID_DES_EDE3_CBC MBEDTLS_OID_RSA_COMPANY "\x03\x07" /**< des-ede3-cbc OBJECT IDENTIFIER ::= { iso(1) member-body(2) -- us(840) rsadsi(113549) encryptionAlgorithm(3) 7 } */
#define MBEDTLS_OID_AES MBEDTLS_OID_NIST_ALG "\x01" /** aes OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistAlgorithm(4) 1 } */
/*
* Key Wrapping algorithms
*/
/*
* RFC 5649
*/
#define MBEDTLS_OID_AES128_KW MBEDTLS_OID_AES "\x05" /** id-aes128-wrap OBJECT IDENTIFIER ::= { aes 5 } */
#define MBEDTLS_OID_AES128_KWP MBEDTLS_OID_AES "\x08" /** id-aes128-wrap-pad OBJECT IDENTIFIER ::= { aes 8 } */
#define MBEDTLS_OID_AES192_KW MBEDTLS_OID_AES "\x19" /** id-aes192-wrap OBJECT IDENTIFIER ::= { aes 25 } */
#define MBEDTLS_OID_AES192_KWP MBEDTLS_OID_AES "\x1c" /** id-aes192-wrap-pad OBJECT IDENTIFIER ::= { aes 28 } */
#define MBEDTLS_OID_AES256_KW MBEDTLS_OID_AES "\x2d" /** id-aes256-wrap OBJECT IDENTIFIER ::= { aes 45 } */
#define MBEDTLS_OID_AES256_KWP MBEDTLS_OID_AES "\x30" /** id-aes256-wrap-pad OBJECT IDENTIFIER ::= { aes 48 } */
/*
* PKCS#5 OIDs
*/
#define MBEDTLS_OID_PKCS5_PBKDF2 MBEDTLS_OID_PKCS5 "\x0c" /**< id-PBKDF2 OBJECT IDENTIFIER ::= {pkcs-5 12} */
#define MBEDTLS_OID_PKCS5_PBES2 MBEDTLS_OID_PKCS5 "\x0d" /**< id-PBES2 OBJECT IDENTIFIER ::= {pkcs-5 13} */
#define MBEDTLS_OID_PKCS5_PBMAC1 MBEDTLS_OID_PKCS5 "\x0e" /**< id-PBMAC1 OBJECT IDENTIFIER ::= {pkcs-5 14} */
/*
* PKCS#5 PBES1 algorithms
*/
#define MBEDTLS_OID_PKCS5_PBE_MD5_DES_CBC MBEDTLS_OID_PKCS5 "\x03" /**< pbeWithMD5AndDES-CBC OBJECT IDENTIFIER ::= {pkcs-5 3} */
#define MBEDTLS_OID_PKCS5_PBE_MD5_RC2_CBC MBEDTLS_OID_PKCS5 "\x06" /**< pbeWithMD5AndRC2-CBC OBJECT IDENTIFIER ::= {pkcs-5 6} */
#define MBEDTLS_OID_PKCS5_PBE_SHA1_DES_CBC MBEDTLS_OID_PKCS5 "\x0a" /**< pbeWithSHA1AndDES-CBC OBJECT IDENTIFIER ::= {pkcs-5 10} */
#define MBEDTLS_OID_PKCS5_PBE_SHA1_RC2_CBC MBEDTLS_OID_PKCS5 "\x0b" /**< pbeWithSHA1AndRC2-CBC OBJECT IDENTIFIER ::= {pkcs-5 11} */
/*
* PKCS#8 OIDs
*/
#define MBEDTLS_OID_PKCS9_CSR_EXT_REQ MBEDTLS_OID_PKCS9 "\x0e" /**< extensionRequest OBJECT IDENTIFIER ::= {pkcs-9 14} */
/*
* PKCS#12 PBE OIDs
*/
#define MBEDTLS_OID_PKCS12_PBE MBEDTLS_OID_PKCS12 "\x01" /**< pkcs-12PbeIds OBJECT IDENTIFIER ::= {pkcs-12 1} */
#define MBEDTLS_OID_PKCS12_PBE_SHA1_DES3_EDE_CBC MBEDTLS_OID_PKCS12_PBE "\x03" /**< pbeWithSHAAnd3-KeyTripleDES-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 3} */
#define MBEDTLS_OID_PKCS12_PBE_SHA1_DES2_EDE_CBC MBEDTLS_OID_PKCS12_PBE "\x04" /**< pbeWithSHAAnd2-KeyTripleDES-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 4} */
#define MBEDTLS_OID_PKCS12_PBE_SHA1_RC2_128_CBC MBEDTLS_OID_PKCS12_PBE "\x05" /**< pbeWithSHAAnd128BitRC2-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 5} */
#define MBEDTLS_OID_PKCS12_PBE_SHA1_RC2_40_CBC MBEDTLS_OID_PKCS12_PBE "\x06" /**< pbeWithSHAAnd40BitRC2-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 6} */
/*
* EC key algorithms from RFC 5480
*/
/* id-ecPublicKey OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } */
#define MBEDTLS_OID_EC_ALG_UNRESTRICTED MBEDTLS_OID_ANSI_X9_62 "\x02\01"
/* id-ecDH OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132)
* schemes(1) ecdh(12) } */
#define MBEDTLS_OID_EC_ALG_ECDH MBEDTLS_OID_CERTICOM "\x01\x0c"
/*
* ECParameters namedCurve identifiers, from RFC 5480, RFC 5639, and SEC2
*/
/* secp192r1 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) prime(1) 1 } */
#define MBEDTLS_OID_EC_GRP_SECP192R1 MBEDTLS_OID_ANSI_X9_62 "\x03\x01\x01"
/* secp224r1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 33 } */
#define MBEDTLS_OID_EC_GRP_SECP224R1 MBEDTLS_OID_CERTICOM "\x00\x21"
/* secp256r1 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) prime(1) 7 } */
#define MBEDTLS_OID_EC_GRP_SECP256R1 MBEDTLS_OID_ANSI_X9_62 "\x03\x01\x07"
/* secp384r1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 34 } */
#define MBEDTLS_OID_EC_GRP_SECP384R1 MBEDTLS_OID_CERTICOM "\x00\x22"
/* secp521r1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 35 } */
#define MBEDTLS_OID_EC_GRP_SECP521R1 MBEDTLS_OID_CERTICOM "\x00\x23"
/* secp192k1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 31 } */
#define MBEDTLS_OID_EC_GRP_SECP192K1 MBEDTLS_OID_CERTICOM "\x00\x1f"
/* secp224k1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 32 } */
#define MBEDTLS_OID_EC_GRP_SECP224K1 MBEDTLS_OID_CERTICOM "\x00\x20"
/* secp256k1 OBJECT IDENTIFIER ::= {
* iso(1) identified-organization(3) certicom(132) curve(0) 10 } */
#define MBEDTLS_OID_EC_GRP_SECP256K1 MBEDTLS_OID_CERTICOM "\x00\x0a"
/* RFC 5639 4.1
* ecStdCurvesAndGeneration OBJECT IDENTIFIER::= {iso(1)
* identified-organization(3) teletrust(36) algorithm(3) signature-
* algorithm(3) ecSign(2) 8}
* ellipticCurve OBJECT IDENTIFIER ::= {ecStdCurvesAndGeneration 1}
* versionOne OBJECT IDENTIFIER ::= {ellipticCurve 1} */
#define MBEDTLS_OID_EC_BRAINPOOL_V1 MBEDTLS_OID_TELETRUST "\x03\x03\x02\x08\x01\x01"
/* brainpoolP256r1 OBJECT IDENTIFIER ::= {versionOne 7} */
#define MBEDTLS_OID_EC_GRP_BP256R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x07"
/* brainpoolP384r1 OBJECT IDENTIFIER ::= {versionOne 11} */
#define MBEDTLS_OID_EC_GRP_BP384R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x0B"
/* brainpoolP512r1 OBJECT IDENTIFIER ::= {versionOne 13} */
#define MBEDTLS_OID_EC_GRP_BP512R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x0D"
/*
* SEC1 C.1
*
* prime-field OBJECT IDENTIFIER ::= { id-fieldType 1 }
* id-fieldType OBJECT IDENTIFIER ::= { ansi-X9-62 fieldType(1)}
*/
#define MBEDTLS_OID_ANSI_X9_62_FIELD_TYPE MBEDTLS_OID_ANSI_X9_62 "\x01"
#define MBEDTLS_OID_ANSI_X9_62_PRIME_FIELD MBEDTLS_OID_ANSI_X9_62_FIELD_TYPE "\x01"
/*
* ECDSA signature identifiers, from RFC 5480
*/
#define MBEDTLS_OID_ANSI_X9_62_SIG MBEDTLS_OID_ANSI_X9_62 "\x04" /* signatures(4) */
#define MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 MBEDTLS_OID_ANSI_X9_62_SIG "\x03" /* ecdsa-with-SHA2(3) */
/* ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) 1 } */
#define MBEDTLS_OID_ECDSA_SHA1 MBEDTLS_OID_ANSI_X9_62_SIG "\x01"
/* ecdsa-with-SHA224 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4)
* ecdsa-with-SHA2(3) 1 } */
#define MBEDTLS_OID_ECDSA_SHA224 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x01"
/* ecdsa-with-SHA256 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4)
* ecdsa-with-SHA2(3) 2 } */
#define MBEDTLS_OID_ECDSA_SHA256 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x02"
/* ecdsa-with-SHA384 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4)
* ecdsa-with-SHA2(3) 3 } */
#define MBEDTLS_OID_ECDSA_SHA384 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x03"
/* ecdsa-with-SHA512 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4)
* ecdsa-with-SHA2(3) 4 } */
#define MBEDTLS_OID_ECDSA_SHA512 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x04"
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Base OID descriptor structure
*/
typedef struct mbedtls_oid_descriptor_t
{
const char *MBEDTLS_PRIVATE(asn1); /*!< OID ASN.1 representation */
size_t MBEDTLS_PRIVATE(asn1_len); /*!< length of asn1 */
#if !defined(MBEDTLS_X509_REMOVE_INFO)
const char *MBEDTLS_PRIVATE(name); /*!< official name (e.g. from RFC) */
const char *MBEDTLS_PRIVATE(description); /*!< human friendly description */
#endif
} mbedtls_oid_descriptor_t;
/**
* \brief Translate an ASN.1 OID into its numeric representation
* (e.g. "\x2A\x86\x48\x86\xF7\x0D" into "1.2.840.113549")
*
* \param buf buffer to put representation in
* \param size size of the buffer
* \param oid OID to translate
*
* \return Length of the string written (excluding final NULL) or
* MBEDTLS_ERR_OID_BUF_TOO_SMALL in case of error
*/
int mbedtls_oid_get_numeric_string( char *buf, size_t size, const mbedtls_asn1_buf *oid );
/**
* \brief Translate an X.509 extension OID into local values
*
* \param oid OID to use
* \param ext_type place to store the extension type
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_x509_ext_type( const mbedtls_asn1_buf *oid, int *ext_type );
/**
* \brief Translate an X.509 attribute type OID into the short name
* (e.g. the OID for an X520 Common Name into "CN")
*
* \param oid OID to use
* \param short_name place to store the string pointer
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_attr_short_name( const mbedtls_asn1_buf *oid, const char **short_name );
/**
* \brief Translate PublicKeyAlgorithm OID into pk_type
*
* \param oid OID to use
* \param pk_alg place to store public key algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_pk_alg( const mbedtls_asn1_buf *oid, mbedtls_pk_type_t *pk_alg );
/**
* \brief Translate pk_type into PublicKeyAlgorithm OID
*
* \param pk_alg Public key type to look for
* \param oid place to store ASN.1 OID string pointer
* \param olen length of the OID
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_oid_by_pk_alg( mbedtls_pk_type_t pk_alg,
const char **oid, size_t *olen );
#if defined(MBEDTLS_ECP_C)
/**
* \brief Translate NamedCurve OID into an EC group identifier
*
* \param oid OID to use
* \param grp_id place to store group id
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_ec_grp( const mbedtls_asn1_buf *oid, mbedtls_ecp_group_id *grp_id );
/**
* \brief Translate EC group identifier into NamedCurve OID
*
* \param grp_id EC group identifier
* \param oid place to store ASN.1 OID string pointer
* \param olen length of the OID
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_oid_by_ec_grp( mbedtls_ecp_group_id grp_id,
const char **oid, size_t *olen );
#endif /* MBEDTLS_ECP_C */
#if defined(MBEDTLS_MD_C)
/**
* \brief Translate SignatureAlgorithm OID into md_type and pk_type
*
* \param oid OID to use
* \param md_alg place to store message digest algorithm
* \param pk_alg place to store public key algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_sig_alg( const mbedtls_asn1_buf *oid,
mbedtls_md_type_t *md_alg, mbedtls_pk_type_t *pk_alg );
/**
* \brief Translate SignatureAlgorithm OID into description
*
* \param oid OID to use
* \param desc place to store string pointer
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_sig_alg_desc( const mbedtls_asn1_buf *oid, const char **desc );
/**
* \brief Translate md_type and pk_type into SignatureAlgorithm OID
*
* \param md_alg message digest algorithm
* \param pk_alg public key algorithm
* \param oid place to store ASN.1 OID string pointer
* \param olen length of the OID
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_oid_by_sig_alg( mbedtls_pk_type_t pk_alg, mbedtls_md_type_t md_alg,
const char **oid, size_t *olen );
/**
* \brief Translate hash algorithm OID into md_type
*
* \param oid OID to use
* \param md_alg place to store message digest algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_md_alg( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_alg );
/**
* \brief Translate hmac algorithm OID into md_type
*
* \param oid OID to use
* \param md_hmac place to store message hmac algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_md_hmac( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_hmac );
#endif /* MBEDTLS_MD_C */
#if !defined(MBEDTLS_X509_REMOVE_INFO)
/**
* \brief Translate Extended Key Usage OID into description
*
* \param oid OID to use
* \param desc place to store string pointer
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_extended_key_usage( const mbedtls_asn1_buf *oid, const char **desc );
#endif
/**
* \brief Translate certificate policies OID into description
*
* \param oid OID to use
* \param desc place to store string pointer
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_certificate_policies( const mbedtls_asn1_buf *oid, const char **desc );
/**
* \brief Translate md_type into hash algorithm OID
*
* \param md_alg message digest algorithm
* \param oid place to store ASN.1 OID string pointer
* \param olen length of the OID
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_oid_by_md( mbedtls_md_type_t md_alg, const char **oid, size_t *olen );
#if defined(MBEDTLS_CIPHER_C)
/**
* \brief Translate encryption algorithm OID into cipher_type
*
* \param oid OID to use
* \param cipher_alg place to store cipher algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_cipher_alg( const mbedtls_asn1_buf *oid, mbedtls_cipher_type_t *cipher_alg );
#endif /* MBEDTLS_CIPHER_C */
#if defined(MBEDTLS_PKCS12_C)
/**
* \brief Translate PKCS#12 PBE algorithm OID into md_type and
* cipher_type
*
* \param oid OID to use
* \param md_alg place to store message digest algorithm
* \param cipher_alg place to store cipher algorithm
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
int mbedtls_oid_get_pkcs12_pbe_alg( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_alg,
mbedtls_cipher_type_t *cipher_alg );
#endif /* MBEDTLS_PKCS12_C */
#ifdef __cplusplus
}
#endif
#endif /* oid.h */
| 52.412773 | 234 | 0.678861 | [
"object"
] |
9e6a700c36d3e680b7d03f8aa7df8019c5a18033 | 4,229 | h | C | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/engine/constants.h | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | 1 | 2018-12-15T19:57:24.000Z | 2018-12-15T19:57:24.000Z | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/engine/constants.h | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | null | null | null | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/engine/constants.h | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | 1 | 2019-03-08T11:06:22.000Z | 2019-03-08T11:06:22.000Z | /*
* Copyright 2011 Steven Watanabe
*
* This file is part of Jam - see jam.c for Copyright information.
*/
/*
* constants.h - constant objects
*/
#ifndef BOOST_JAM_CONSTANTS_H
#define BOOST_JAM_CONSTANTS_H
#include "object.h"
void constants_init( void );
void constants_done( void );
extern OBJECT * constant_empty; /* "" */
extern OBJECT * constant_dot; /* "." */
extern OBJECT * constant_plus; /* "+" */
extern OBJECT * constant_star; /* "*" */
extern OBJECT * constant_question_mark; /* "?" */
extern OBJECT * constant_ok; /* "ok" */
extern OBJECT * constant_true; /* "true" */
extern OBJECT * constant_name; /* "__name__" */
extern OBJECT * constant_bases; /* "__bases__" */
extern OBJECT * constant_class; /* "__class__" */
extern OBJECT * constant_typecheck; /* ".typecheck" */
extern OBJECT * constant_builtin; /* "(builtin)" */
extern OBJECT * constant_HCACHEFILE; /* "HCACHEFILE" */
extern OBJECT * constant_HCACHEMAXAGE; /* "HCACHEMAXAGE" */
extern OBJECT * constant_HDRSCAN; /* "HDRSCAN" */
extern OBJECT * constant_HDRRULE; /* "HDRRULE" */
extern OBJECT * constant_BINDRULE; /* "BINDRULE" */
extern OBJECT * constant_LOCATE; /* "LOCATE" */
extern OBJECT * constant_SEARCH; /* "SEARCH" */
extern OBJECT * constant_JAM_SEMAPHORE; /* "JAM_SEMAPHORE" */
extern OBJECT * constant_TIMING_RULE; /* "__TIMING_RULE__" */
extern OBJECT * constant_ACTION_RULE; /* "__ACTION_RULE__" */
extern OBJECT * constant_JAMSHELL; /* "JAMSHELL" */
extern OBJECT * constant_TMPDIR; /* "TMPDIR" */
extern OBJECT * constant_TMPNAME; /* "TMPNAME" */
extern OBJECT * constant_TMPFILE; /* "TMPFILE" */
extern OBJECT * constant_STDOUT; /* "STDOUT" */
extern OBJECT * constant_STDERR; /* "STDERR" */
extern OBJECT * constant_JAMDATE; /* "JAMDATE" */
extern OBJECT * constant_JAM_TIMESTAMP_RESOLUTION; /* "JAM_TIMESTAMP_RESOLUTION" */
extern OBJECT * constant_JAM_VERSION; /* "JAM_VERSION" */
extern OBJECT * constant_JAMUNAME; /* "JAMUNAME" */
extern OBJECT * constant_ENVIRON; /* ".ENVIRON" */
extern OBJECT * constant_ARGV; /* "ARGV" */
extern OBJECT * constant_all; /* "all" */
extern OBJECT * constant_PARALLELISM; /* "PARALLELISM" */
extern OBJECT * constant_KEEP_GOING; /* "KEEP_GOING" */
extern OBJECT * constant_other; /* "[OTHER]" */
extern OBJECT * constant_total; /* "[TOTAL]" */
extern OBJECT * constant_FILE_DIRSCAN; /* "FILE_DIRSCAN" */
extern OBJECT * constant_MAIN; /* "MAIN" */
extern OBJECT * constant_MAIN_MAKE; /* "MAIN_MAKE" */
extern OBJECT * constant_MAKE_MAKE0; /* "MAKE_MAKE0" */
extern OBJECT * constant_MAKE_MAKE1; /* "MAKE_MAKE1" */
extern OBJECT * constant_MAKE_MAKE0SORT; /* "MAKE_MAKE0SORT" */
extern OBJECT * constant_BINDMODULE; /* "BINDMODULE" */
extern OBJECT * constant_IMPORT_MODULE; /* "IMPORT_MODULE" */
extern OBJECT * constant_BUILTIN_GLOB_BACK; /* "BUILTIN_GLOB_BACK" */
extern OBJECT * constant_timestamp; /* "timestamp" */
extern OBJECT * constant_python; /* "__python__" */
extern OBJECT * constant_python_interface; /* "python_interface" */
extern OBJECT * constant_extra_pythonpath; /* "EXTRA_PYTHONPATH" */
extern OBJECT * constant_MAIN_PYTHON; /* "MAIN_PYTHON" */
extern OBJECT * constant_FILE_ARCHIVESCAN; /* "FILE_ARCHIVESCAN" */
extern OBJECT * constant_BUILTIN_GLOB_ARCHIVE_BACK; /* "BUILTIN_GLOB_ARCHIVE_BACK" */
#endif
| 55.644737 | 86 | 0.554741 | [
"object"
] |
9e6b0c96ce11fbe172678638835ba3c851979eb9 | 759 | h | C | app-gui/include/resources.h | funbiscuit/spacedisplay | 963e1ce22abfec28af2529f25ebaeda0e82e3701 | [
"MIT"
] | null | null | null | app-gui/include/resources.h | funbiscuit/spacedisplay | 963e1ce22abfec28af2529f25ebaeda0e82e3701 | [
"MIT"
] | 1 | 2021-10-11T16:38:06.000Z | 2021-10-11T16:38:06.000Z | app-gui/include/resources.h | funbiscuit/spacedisplay | 963e1ce22abfec28af2529f25ebaeda0e82e3701 | [
"MIT"
] | null | null | null |
#ifndef CURVEDETECT_RESOURCES_H
#define CURVEDETECT_RESOURCES_H
#include <cstdint>
#include <vector>
#include <QPixmap>
#include <QImage>
#include "resource_builder/resources.h"
class Resources {
private:
Resources() = default;
public:
Resources(const Resources &) = delete;
Resources &operator=(Resources &) = delete;
static Resources &get();
//qt uses implicit sharing so its safe and efficient to pass pixmap by value
QPixmap get_vector_pixmap(ResourceBuilder::ResId id, int width, const QColor &color, qreal strength = 1.0);
QPixmap get_vector_pixmap(ResourceBuilder::ResId id, int width);
private:
QImage tint(const QImage &src, const QColor &color, qreal strength = 1.0);
};
#endif //CURVEDETECT_RESOURCES_H
| 21.685714 | 111 | 0.73386 | [
"vector"
] |
9e6cb764458c9fa14bcee3c561b3f132c79de93e | 10,564 | h | C | libs/math/include/math/TQuatHelpers.h | ValtoLibraries/Filament | 9aa3d449b9ba66a1e29bf040bc4ebbcd128af59e | [
"Apache-2.0"
] | 2 | 2021-06-19T18:13:18.000Z | 2021-11-14T17:43:40.000Z | libs/math/include/math/TQuatHelpers.h | ValtoLibraries/Filament | 9aa3d449b9ba66a1e29bf040bc4ebbcd128af59e | [
"Apache-2.0"
] | 1 | 2019-04-08T02:40:24.000Z | 2019-04-08T02:40:24.000Z | libs/math/include/math/TQuatHelpers.h | ValtoLibraries/Filament | 9aa3d449b9ba66a1e29bf040bc4ebbcd128af59e | [
"Apache-2.0"
] | 1 | 2020-09-04T07:38:32.000Z | 2020-09-04T07:38:32.000Z | /*
* Copyright 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MATH_TQUATHELPERS_H_
#define MATH_TQUATHELPERS_H_
#include <math.h>
#include <stdint.h>
#include <sys/types.h>
#include <iostream>
#include <math/compiler.h>
#include <math/vec3.h>
namespace filament {
namespace math {
namespace details {
// -------------------------------------------------------------------------------------
/*
* No user serviceable parts here.
*
* Don't use this file directly, instead include math/quat.h
*/
/*
* TQuatProductOperators implements basic arithmetic and basic compound assignment
* operators on a quaternion of type BASE<T>.
*
* BASE only needs to implement operator[] and size().
* By simply inheriting from TQuatProductOperators<BASE, T> BASE will automatically
* get all the functionality here.
*/
template <template<typename T> class QUATERNION, typename T>
class TQuatProductOperators {
public:
/* compound assignment from a another quaternion of the same size but different
* element type.
*/
template <typename OTHER>
constexpr QUATERNION<T>& operator *= (const QUATERNION<OTHER>& r) {
QUATERNION<T>& q = static_cast<QUATERNION<T>&>(*this);
q = q * r;
return q;
}
/* compound assignment products by a scalar
*/
constexpr QUATERNION<T>& operator *= (T v) {
QUATERNION<T>& lhs = static_cast<QUATERNION<T>&>(*this);
for (size_t i = 0; i < QUATERNION<T>::size(); i++) {
lhs[i] *= v;
}
return lhs;
}
constexpr QUATERNION<T>& operator /= (T v) {
QUATERNION<T>& lhs = static_cast<QUATERNION<T>&>(*this);
for (size_t i = 0; i < QUATERNION<T>::size(); i++) {
lhs[i] /= v;
}
return lhs;
}
/*
* NOTE: the functions below ARE NOT member methods. They are friend functions
* with they definition inlined with their declaration. This makes these
* template functions available to the compiler when (and only when) this class
* is instantiated, at which point they're only templated on the 2nd parameter
* (the first one, BASE<T> being known).
*/
/* The operators below handle operation between quaternion of the same size
* but of a different element type.
*/
template<typename RT>
friend inline
constexpr QUATERNION<T> MATH_PURE operator *(const QUATERNION<T>& q, const QUATERNION<RT>& r) {
// could be written as:
// return QUATERNION<T>(
// q.w*r.w - dot(q.xyz, r.xyz),
// q.w*r.xyz + r.w*q.xyz + cross(q.xyz, r.xyz));
return QUATERNION<T>(
q.w*r.w - q.x*r.x - q.y*r.y - q.z*r.z,
q.w*r.x + q.x*r.w + q.y*r.z - q.z*r.y,
q.w*r.y - q.x*r.z + q.y*r.w + q.z*r.x,
q.w*r.z + q.x*r.y - q.y*r.x + q.z*r.w);
}
template<typename RT>
friend inline
constexpr TVec3<T> MATH_PURE operator *(const QUATERNION<T>& q, const TVec3<RT>& v) {
// note: if q is known to be a unit quaternion, then this simplifies to:
// TVec3<T> t = 2 * cross(q.xyz, v)
// return v + (q.w * t) + cross(q.xyz, t)
return imaginary(q * QUATERNION<T>(v, 0) * inverse(q));
}
/* For quaternions, we use explicit "by a scalar" products because it's much faster
* than going (implicitly) through the quaternion multiplication.
* For reference: we could use the code below instead, but it would be a lot slower.
* friend inline
* constexpr BASE<T> MATH_PURE operator *(const BASE<T>& q, const BASE<T>& r) {
* return BASE<T>(
* q.w*r.w - q.x*r.x - q.y*r.y - q.z*r.z,
* q.w*r.x + q.x*r.w + q.y*r.z - q.z*r.y,
* q.w*r.y - q.x*r.z + q.y*r.w + q.z*r.x,
* q.w*r.z + q.x*r.y - q.y*r.x + q.z*r.w);
*
*/
friend inline
constexpr QUATERNION<T> MATH_PURE operator *(QUATERNION<T> q, T scalar) {
// don't pass q by reference because we need a copy anyways
return q *= scalar;
}
friend inline
constexpr QUATERNION<T> MATH_PURE operator *(T scalar, QUATERNION<T> q) {
// don't pass q by reference because we need a copy anyways
return q *= scalar;
}
friend inline
constexpr QUATERNION<T> MATH_PURE operator /(QUATERNION<T> q, T scalar) {
// don't pass q by reference because we need a copy anyways
return q /= scalar;
}
};
/*
* TQuatFunctions implements functions on a quaternion of type BASE<T>.
*
* BASE only needs to implement operator[] and size().
* By simply inheriting from TQuatFunctions<BASE, T> BASE will automatically
* get all the functionality here.
*/
template <template<typename T> class QUATERNION, typename T>
class TQuatFunctions {
public:
/*
* NOTE: the functions below ARE NOT member methods. They are friend functions
* with they definition inlined with their declaration. This makes these
* template functions available to the compiler when (and only when) this class
* is instantiated, at which point they're only templated on the 2nd parameter
* (the first one, BASE<T> being known).
*/
template<typename RT>
friend inline
constexpr T MATH_PURE dot(const QUATERNION<T>& p, const QUATERNION<RT>& q) {
return p.x * q.x +
p.y * q.y +
p.z * q.z +
p.w * q.w;
}
friend inline
T MATH_PURE norm(const QUATERNION<T>& q) {
return std::sqrt( dot(q, q) );
}
friend inline
T MATH_PURE length(const QUATERNION<T>& q) {
return norm(q);
}
friend inline
constexpr T MATH_PURE length2(const QUATERNION<T>& q) {
return dot(q, q);
}
friend inline
QUATERNION<T> MATH_PURE normalize(const QUATERNION<T>& q) {
return length(q) ? q / length(q) : QUATERNION<T>(static_cast<T>(1));
}
friend inline
constexpr QUATERNION<T> MATH_PURE conj(const QUATERNION<T>& q) {
return QUATERNION<T>(q.w, -q.x, -q.y, -q.z);
}
friend inline
constexpr QUATERNION<T> MATH_PURE inverse(const QUATERNION<T>& q) {
return conj(q) * (1 / dot(q, q));
}
friend inline
constexpr T MATH_PURE real(const QUATERNION<T>& q) {
return q.w;
}
friend inline
constexpr TVec3<T> MATH_PURE imaginary(const QUATERNION<T>& q) {
return q.xyz;
}
friend inline
constexpr QUATERNION<T> MATH_PURE unreal(const QUATERNION<T>& q) {
return QUATERNION<T>(q.xyz, 0);
}
friend inline
constexpr QUATERNION<T> MATH_PURE cross(const QUATERNION<T>& p, const QUATERNION<T>& q) {
return unreal(p*q);
}
friend inline
QUATERNION<T> MATH_PURE exp(const QUATERNION<T>& q) {
const T nq(norm(q.xyz));
return std::exp(q.w)*QUATERNION<T>((sin(nq)/nq)*q.xyz, cos(nq));
}
friend inline
QUATERNION<T> MATH_PURE log(const QUATERNION<T>& q) {
const T nq(norm(q));
return QUATERNION<T>((std::acos(q.w/nq)/norm(q.xyz))*q.xyz, log(nq));
}
friend inline
QUATERNION<T> MATH_PURE pow(const QUATERNION<T>& q, T a) {
// could also be computed as: exp(a*log(q));
const T nq(norm(q));
const T theta(a*std::acos(q.w / nq));
return std::pow(nq, a) * QUATERNION<T>(normalize(q.xyz) * std::sin(theta), std::cos(theta));
}
friend inline
QUATERNION<T> MATH_PURE slerp(const QUATERNION<T>& p, const QUATERNION<T>& q, T t) {
// could also be computed as: pow(q * inverse(p), t) * p;
const T d = std::abs(dot(p, q));
static constexpr T value_eps = T(10) * std::numeric_limits<T>::epsilon();
// Prevent blowing up when slerping between two quaternions that are very near each other.
if ((T(1) - d) < value_eps) {
return normalize(lerp(p, q, t));
}
const T npq = sqrt(dot(p, p) * dot(q, q)); // ||p|| * ||q||
const T a = std::acos(d / npq);
const T a0 = a * (1 - t);
const T a1 = a * t;
const T sina = sin(a);
if (sina < value_eps) {
return normalize(lerp(p, q, t));
}
const T isina = 1 / sina;
const T s0 = std::sin(a0) * isina;
const T s1 = std::sin(a1) * isina;
// ensure we're taking the "short" side
return normalize(s0 * p + ((d < 0) ? (-s1) : (s1)) * q);
}
friend inline
constexpr QUATERNION<T> MATH_PURE lerp(const QUATERNION<T>& p, const QUATERNION<T>& q, T t) {
return ((1 - t) * p) + (t * q);
}
friend inline
constexpr QUATERNION<T> MATH_PURE nlerp(const QUATERNION<T>& p, const QUATERNION<T>& q, T t) {
return normalize(lerp(p, q, t));
}
friend inline
constexpr QUATERNION<T> MATH_PURE positive(const QUATERNION<T>& q) {
return q.w < 0 ? -q : q;
}
};
/*
* TQuatDebug implements functions on a vector of type BASE<T>.
*
* BASE only needs to implement operator[] and size().
* By simply inheriting from TQuatDebug<BASE, T> BASE will automatically
* get all the functionality here.
*/
template <template<typename T> class QUATERNION, typename T>
class TQuatDebug {
public:
/*
* NOTE: the functions below ARE NOT member methods. They are friend functions
* with they definition inlined with their declaration. This makes these
* template functions available to the compiler when (and only when) this class
* is instantiated, at which point they're only templated on the 2nd parameter
* (the first one, BASE<T> being known).
*/
friend std::ostream& operator<< (std::ostream& stream, const QUATERNION<T>& q) {
return stream << "< " << q.w << " + " << q.x << "i + " << q.y << "j + " << q.z << "k >";
}
};
// -------------------------------------------------------------------------------------
} // namespace details
} // namespace math
} // namespace filament
#endif // MATH_TQUATHELPERS_H_
| 33.750799 | 100 | 0.594756 | [
"vector"
] |
9e6da9ae2acdd4612ea6824f12118f161b0ec5e1 | 1,315 | h | C | libgpos/include/gpos/error/CAutoTrace.h | davidli2010/gporca | 4c946e5e41051c832736b2fce712c37ca651ddf5 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-01-01T03:11:59.000Z | 2021-01-01T03:11:59.000Z | libgpos/include/gpos/error/CAutoTrace.h | davidli2010/gporca | 4c946e5e41051c832736b2fce712c37ca651ddf5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | libgpos/include/gpos/error/CAutoTrace.h | davidli2010/gporca | 4c946e5e41051c832736b2fce712c37ca651ddf5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp.
//
// @filename:
// CAutoTrace.h
//
// @doc:
// Auto object for creating trace messages
//---------------------------------------------------------------------------
#ifndef GPOS_CAutoTrace_H
#define GPOS_CAutoTrace_H
#include "gpos/base.h"
#include "gpos/io/COstreamString.h"
#include "gpos/string/CWStringDynamic.h"
namespace gpos
{
//---------------------------------------------------------------------------
// @class:
// CAutoTrace
//
// @doc:
// Auto object for creating trace messages;
// creates a stream over a dynamic string and uses it to print objects;
// at destruction the string is written to the log as a trace msg;
//
//---------------------------------------------------------------------------
class CAutoTrace : public CStackObject
{
private:
// dynamic string buffer
CWStringDynamic m_wstr;
// string stream
COstreamString m_os;
// private copy ctor
CAutoTrace(const CAutoTrace &);
public:
// ctor
explicit
CAutoTrace(CMemoryPool *mp);
// dtor
~CAutoTrace();
// stream accessor
IOstream& Os()
{
return m_os;
}
}; // class CAutoTrace
}
#endif // !GPOS_CAutoTrace_H
// EOF
| 19.626866 | 78 | 0.506464 | [
"object"
] |
9e6de1a34a9ad5813fa79ca2889f82491d815a20 | 14,363 | c | C | sys/ubb/udf.c | aaliomer/exos | 6a37c41cad910c373322441a9f23cfabdbfae275 | [
"BSD-3-Clause"
] | 1 | 2018-01-23T23:07:19.000Z | 2018-01-23T23:07:19.000Z | sys/ubb/udf.c | aaliomer/exos | 6a37c41cad910c373322441a9f23cfabdbfae275 | [
"BSD-3-Clause"
] | null | null | null | sys/ubb/udf.c | aaliomer/exos | 6a37c41cad910c373322441a9f23cfabdbfae275 | [
"BSD-3-Clause"
] | null | null | null |
/*
* Copyright (C) 1997 Massachusetts Institute of Technology
*
* This software is being provided by the copyright holders under the
* following license. By obtaining, using and/or copying this software,
* you agree that you have read, understood, and will comply with the
* following terms and conditions:
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose and without fee or royalty is
* hereby granted, provided that the full text of this NOTICE appears on
* ALL copies of the software and documentation or portions thereof,
* including modifications, that you make.
*
* THIS SOFTWARE IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
* BUT NOT LIMITATION, COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR
* WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
* THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
* THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. COPYRIGHT
* HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE OR
* DOCUMENTATION.
*
* The name and trademarks of copyright holders may NOT be used in
* advertising or publicity pertaining to the software without specific,
* written prior permission. Title to copyright in this software and any
* associated documentation will at all times remain with copyright
* holders. See the file AUTHORS which should have accompanied this software
* for a list of all copyright holders.
*
* This file may be derived from previously copyrighted software. This
* copyright applies only to those changes made by the copyright
* holders listed in the AUTHORS file. The rest of this file is covered by
* the copyright notices, if any, listed below.
*/
#include "udf.h"
#include "template.h"
#include "set.h"
#include "xn-struct.h"
#include "registry.h"
#include "demand.h"
#include <kernel.h>
#include <kexcept.h>
/*
* Give no access (udf's currently not allowed to write meta data).
* They could, as long as either (1) they did not read this value ever,
* or (2) we ran them again and rolled back --- this makes sure
* they did not spit out a bogus answer.
*/
static struct udf_ext no_access;
typedef int (*udf_chk)(Set old, Set new, struct xn_update u, void *state);
typedef int (*udf_funp)(Set s, struct xn_update u);
Set udf_run(void* meta, struct udf_fun *up, size_t index, struct udf_ext *r, struct udf_ext *w);
Set udf_incore_run(void* meta, struct udf_fun *up, size_t index, struct udf_ext *r, struct udf_ext *w);
int udf_checkpt(struct udf_ckpt *ck, void *meta, xn_op *op) {
ck->n = 0;
if((ck->n = op->m.n))
return (ck->m = mv_read(meta, op->m.mv, ck->n)) != 0;
else {
ck->m = 0;
return 1;
}
}
void udf_check_free(struct udf_ckpt *ck) {
Arena_free(sys_arena);
}
/* Given a UDF and index, zero out its read set. */
void udf_zero(void *meta, udf_fun *f, int index) {
udf_ext access;
struct udf_bl *v;
int i;
udf_getset(&access, f, index);
v = &access.v[0];
for(i = 0; i < access.n; i++, v++)
memset((char *)meta + v->lb, 0, v->ub - v->lb);
}
void udf_rollback(void *meta, struct udf_ckpt *ckpt) {
mv_write(meta, ckpt->m, ckpt->n);
}
#if 0
static int udf_registry_miss;
#endif
static int udf_check_registry;
/*
* Free everything except for blocks that are not in core.
*
* Things would be a lot more efficient if we could just free each
* disk block as it was encountered rather than storing it in
* some intermediate data structure for later extraction.
*
* This whole deal is not particularly efficient. Fuck. They
* could just give us a hint as to how many owns functions need
* to be run.
*
* Intermediate hacks: build up the set and then consume rather
* than boundary cross.
*
* Fast case: Don't modify any bytes, just free the blocks.
*/
xn_err_t udf_free_all(udf_type *t, void *meta) {
#if 0
udf_ext all_access;
int nowns, i, hint;
udf_fun *owns;
Set s;
xn_err_t res;
if(depth++ > 3)
return XN_TOO_DEEP;
demand(t->class == UDF_BASE_TYPE, bogus class);
all_access.n = 1;
all_access.v[0].lb = 0;
all_access.v[0].ub = t->nbytes;
nowns = t->u.t.nowns;
owns = &t->u.t.owns;
hint = nowns;
udf_check_registry = 1;
res = XN_SUCCESS;
for(miss = 0, i = 0; i < hint; i++) {
s = udf_run(meta, owns, i, &all_access, &all_access);
if(udf_registry_miss) {
res = XN_REGISTRY_MISS;
udf_registry_miss = 0;
} else if(s) {
E e;
struct xn_update T
size_t bytes;
udf_type *t0;
/*
* The pointers in these blocks have to be the
* entire deal.
*/
for(e = l_first(s->set); e; e = l_next(e, next)) {
t0 = type_lookup(e->type);
demand(t0, must be in registry!);
ensure(m0 = xr_backing(t0), XN_TYPE_MISS);
bytes = e->nelem * e->nbytes;
db_free(db, bytes_to_blocks(bytes));
}
/* Nuke this UDF's read set. */
udf_zero(meta, owns, i);
}
if(s)
set_free();
}
udf_check_registry = 0;
return res;
#endif
return XN_SUCCESS;
}
/*
* XXX Should make the assumption that each type that is allocated
* is a true disk block.
*/
extern int xn_in_kernel;
inline int udf_read(udf_funp fun, int type, void *meta, xn_op *op) {
size_t index;
struct udf_fun *f, *read_f;
struct udf_ext access;
int res;
/* hack since we subvert the type system */
if(xn_in_kernel && xn_isspecial(type))
return XN_SUCCESS;
index = op->m.own_id;
if(!(f = udf_lookup(&read_f, type, meta, index, op->m.cap)))
return 0;
udf_getset(&access, read_f, index);
res = fun(udf_run(meta, f, index, &access, &no_access), op->u);
set_free();
return res;
}
/* Caller is responsible for rolling back changes. */
static inline int
meta_transaction(udf_chk chk, int type, void *meta, xn_op *op, void *state) {
int res;
Set old_set, new_set;
struct xn_m_vec *mv;
size_t n, index;
struct udf_fun *f, *read_f;
struct udf_ext access;
/* hack since we subvert the type system */
if(xn_in_kernel && xn_isspecial(type))
return XN_SUCCESS;
if(!(f = udf_lookup(&read_f, type, meta, index = op->m.own_id, op->m.cap)))
return XN_BOGUS_INDEX;
mv = op->m.mv;
n = op->m.n;
/* Do access control for this function. */
udf_getset(&access, read_f, index);
old_set = udf_run(meta, f, index, &access, &no_access);
/* set_print("old", old_set); */
mv_write(meta, mv, n);
new_set = udf_run(meta, f, index, &access, &no_access);
/* set_print("new", new_set); */
/* ensure: {old_set - b} == new_set */
res = chk(old_set, new_set, op->u, state) ?
XN_SUCCESS : XN_TYPE_ERROR;
/* Free data consumed by the modules. */
set_free();
return res;
}
/* Check that the given set is a subset of what is in the udf. */
int udf_contains(int type, void *meta, xn_op *op) {
/* ensure that old == new */
static inline int contains(Set s, struct xn_update u) {
return set_issubset(s, set_single(u));
}
return udf_read(contains, type, meta, op);
}
/* ensure that old == new */
static inline int nop(Set old, Set new, struct xn_update u, void *unused) {
return set_eq(old, new);
}
/* Modify meta, should not produce a difference in the access set. */
int udf_write(int type, void *meta, xn_op *op) {
return meta_transaction(nop, type, meta, op, 0);
}
/* ensure that {old_set U b} == new_set */
static inline int add(Set old, Set new, struct xn_update u, void *unused) {
return set_eq(set_union(old, set_single(u)), new);
}
/* Add exactly b to the set guarded by meta. */
int udf_alloc(int type, void *meta, xn_op *op) {
return meta_transaction(add, type, meta, op, 0);
}
/* ensure that old_set - new = {db} */
static inline int remove(Set old, Set new, struct xn_update u, void *nil) {
return set_eq(set_diff(old, new), set_single(u));
}
/* Delete exactly b from set guarded by meta. */
int udf_dealloc(int type, void *meta, xn_op *op) {
return meta_transaction(remove, type, meta, op, 0);
}
/* They don't actually have to say what to add, delete, right? */
static struct xstate { Set add; Set del; } state;
static inline int move(Set old, Set new, struct xn_update u, void *unused) {
if(!set_issubset(old, state.del) || !set_isdisjoint(old, state.add))
return 0;
return set_eq(new, set_union(set_diff(old, state.del), state.add));
}
/*
* Use vector in o->m to add the blocks in o_add->u and delete those
* in o_del->u.
*/
int udf_move(int type, void *meta, xn_op *op, xn_update *o_add, xn_update *o_del) {
Set add, del;
/*
* make sure we have all of del, and none of add:
* (old inter del = del) && (old inter add = nil)
* make sure add is in the final version and del is not:
* (new inter add = add) && (new inter del = nil)
* make sure we added exactly add and deleted exactly del:
* new = (old - del) U add:
*/
if(!(add = set_single(*o_add)) || !(del = set_single(*o_del)))
return XN_CANNOT_ALLOC;
state.add = add; state.del = del;
return meta_transaction(move, type, meta, op, 0);
}
static unsigned meta[PAGESIZ];
int udf_type_init(struct udf_type *t) {
return udf_get_union_type(t, meta);
}
xn_err_t
udf_set_type(int ty, int exp, void *meta, size_t off, void *p, size_t nbytes) {
char ckpt[32];
udf_type *t;
ensure(nbytes < sizeof ckpt, XN_WRITE_FAILED);
ensure(t = type_lookup(ty), XN_TYPE_MISS);
ensure(t->class == UDF_UNION, XN_TYPE_ERROR);
/* we will have to allow this to change. */
ensure(XN_NIL == udf_get_union_type(t, meta), XN_TYPE_ERROR);
ensure(u_in_set(&t->type_access, off, nbytes), XN_WRITE_FAILED);
/* save state. */
memcpy(ckpt, (char *)meta + off, nbytes);
/* modify state. */
memcpy((char *)meta + off, p, nbytes);
if(exp == udf_get_union_type(t, meta))
return XN_SUCCESS;
/* restore state */
memcpy((char *)meta + off, ckpt, nbytes);
return XN_TYPE_ERROR;
}
/*
* Make sure that the before set is nil --- need to test on all
* udf's.
*/
int udf_init(struct udf_type *t) {
struct udf_fun *owns;
size_t nowns, i, nbytes;
struct udf_ext r_access;
nbytes = t->nbytes;
nowns = t->u.t.nowns;
owns = &t->u.t.owns;
for(i = 0; i < nowns; i++) {
udf_getset(&r_access, &t->u.t.read_f, i);
if(!set_empty(udf_run(meta, owns, i, &r_access, &no_access)))
return -1;
}
return 0;
}
Set udf_access;
Set udf_run(void* meta, struct udf_fun *f, size_t index, struct udf_ext *r, struct udf_ext *w) {
unsigned params[1];
udf_access = set_new();
params[0] = index;
return !udf_interp(f, 0, meta, params, 1, r, w) ? 0 : udf_access;
}
static struct udf_ext *udf_extent;
static inline void ext_copy(struct udf_ext *dst, struct udf_ext *src) {
int i, n;
for(i = 0, n = src->n; i < n; i++)
dst->v[i] = src->v[i];
dst->n = n;
}
/* Extract the set from getset (can memoize for the moment). */
int udf_getset(struct udf_ext *e, struct udf_fun *f, int index) {
unsigned params[1];
int res;
/* Used for memoized cache. */
static int last_index;
static struct udf_fun *last_f;
static struct udf_ext ext;
/*
* Should we do this in a continuation passing style?
* udf interp could return where it is and what value
* it has for the instruction and we could then resume.
*/
/* memoize */
if(index == last_index && f == last_f) {
/* Yea, we hit: return old value. */
ext_copy(e, &ext);
return 1;
}
params[0] = index;
memset(e, 0, sizeof *e);
udf_extent = e;
/*
* Is currently a strictly algebraic computation: not allowed to
* look at meta data.
*/
res = udf_interp(f, 0, 0, params, 1, &no_access, &no_access);
/* reset cache. */
last_f = f;
last_index = index;
ext_copy(&ext, e);
return res;
}
int udf_add_ext(size_t base, size_t len) {
size_t n;
struct udf_bl *v;
n = udf_extent->n;
if(n >= UDF_EXT_MAX)
return 0;
v = &udf_extent->v[n];
udf_extent->n++;
v->lb = base;
v->ub = base + len;
return 1;
}
int udf_get_union_type(udf_type *t, void *meta) {
return udf_interp(&t->get_type, 0, meta,
0, 0, &t->type_access, &no_access);
}
/* Find what type is union really is. */
struct udf_type *udf_resolve_union(udf_type *t, void *meta) {
int ty;
ty = udf_get_union_type(t, meta);
return !ty ? 0 : type_lookup(ty);
}
udf_fun *
udf_lookup(udf_fun **read_f, int type, void *meta, size_t own_id, cap_t c) {
udf_type *t;
if(!(t = type_lookup(type)))
return 0;
else if(t->class == UDF_UNION && !(t = udf_resolve_union(t, meta)))
return 0;
*read_f = &t->u.t.read_f;
if(own_id >= t->u.t.nowns)
return 0;
return &t->u.t.owns;
}
void udf_add(db_t db, size_t n, int type) {
int i;
struct xn_update u;
if(type == XN_BYTES) {
i = udf_add_ext(db, n);
demand(i, bogus i);
return;
} else if(udf_check_registry) {
fatal(not here);
#if 0
if(!xr_incore(db, n)) {
udf_registry_miss = 1;
return;
}
#endif
}
if(!db)
return;
u.nelem = n;
u.type = type;
for(i = 0; i < n; i++) {
u.db = db + i;
udf_access = set_union(udf_access, set_single(u));
}
}
int udf_illegal_access(size_t offset, size_t nbytes, struct udf_ext *e) {
return !u_in_set(e, offset, nbytes);
}
/* Should really be a method, right? */
int udf_blk_access(int op, struct xr *xr, void *meta, cap_t cap, size_t index) {
struct udf_ctx ctx;
struct udf_type *t;
unsigned params[3];
int res;
t = type_lookup(xr->td.type);
ctx.segs[0] = xr_get_sticky(xr);
params[0] = (unsigned)meta;
params[1] = (unsigned)∩
params[2] = (unsigned)index;
res = udf_interp(&t->u.t.block_access, &ctx, meta,
params, 3, &t->u.t.raw_read, &t->u.t.raw_write);
if(res == 0)
return XN_BOGUS_CAP;
else if(res == 1)
return XN_SUCCESS;
return res;
}
/* Caller needs to handle udf self, right? Uch. What a fucking mess. */
int
udf_switch_seg(udf_ctx *ctx, int n, void **meta, udf_ext **r_access, udf_ext **w_access) {
struct xr *xr;
struct udf_type *t;
if(!ctx || n >= (UDF_PARENT + UDF_MAX_SEG))
return XN_BOGUS_CTX;
n -= UDF_PARENT;
ensure(xr = xr_lookup(da_to_db(ctx->segs[n])), XN_REGISTRY_MISS);
ensure(t = type_lookup(xr->td.type), XN_TYPE_MISS);
/* We are not handling mutiple types here, at all. */
*meta = (char *)xr_backing(xr) + da_to_offset(ctx->segs[n]);
*r_access = &t->u.t.raw_read;
*w_access = &t->u.t.raw_write;
return XN_SUCCESS;
}
| 26.354128 | 103 | 0.66539 | [
"vector"
] |
9e6fd01317276b4ecdefa12c08881da05ed4e529 | 9,276 | h | C | dev/Code/Sandbox/Editor/Mannequin/MannequinModelViewport.h | horvay/lumberyardtutor | 63b0681a7ed2a98d651b699984de92951721353e | [
"AML"
] | 5 | 2018-08-17T21:05:55.000Z | 2021-04-17T10:48:26.000Z | dev/Code/Sandbox/Editor/Mannequin/MannequinModelViewport.h | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | null | null | null | dev/Code/Sandbox/Editor/Mannequin/MannequinModelViewport.h | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | 5 | 2017-12-05T16:36:00.000Z | 2021-04-27T06:33:54.000Z | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
#ifndef CRYINCLUDE_EDITOR_MANNEQUIN_MANNEQUINMODELVIEWPORT_H
#define CRYINCLUDE_EDITOR_MANNEQUIN_MANNEQUINMODELVIEWPORT_H
#pragma once
#include "ModelViewport.h"
#include "MannequinBase.h"
#include "ICryMannequinEditor.h"
#include "SequencerDopeSheetBase.h"
#include "Util/ArcBall.h"
#include "RenderHelpers/AxisHelper.h"
class IActionController;
class CMannequinDialog;
class CCharacterPropertiesDlg;
class CMannequinModelViewport
: public CModelViewport
, public IParticleEffectListener
, public IMannequinGameListener
, public CActionInputHandler
{
public:
enum ELocatorMode
{
LM_Translate,
LM_Rotate,
};
enum EMannequinViewMode
{
eVM_Unknown = 0,
eVM_FirstPerson = 1,
eVM_ThirdPerson = 2
};
CMannequinModelViewport(EMannequinEditorMode editorMode = eMEM_FragmentEditor, QWidget* parent = nullptr);
virtual ~CMannequinModelViewport();
virtual void Update();
void UpdateAnimation(float timePassed);
virtual void OnRender();
void SetActionController(IActionController* pActionController)
{
m_pActionController = pActionController;
}
void ToggleCamera()
{
mv_AttachCamera = !mv_AttachCamera;
CCamera oldCamera(GetCamera());
SetCamera(m_alternateCamera);
m_alternateCamera = oldCamera;
}
bool IsCameraAttached() const { return mv_AttachCamera; }
void ClearLocators();
void AddLocator(uint32 refID, const char* name, const QuatT& transform, IEntity* pEntity = NULL, int16 refJointId = -1, IAttachment* pAttachment = NULL, uint32 paramCRC = 0, string helperName = "");
const QuatTS& GetPhysicalLocation() const
{
return m_PhysicalLocation;
}
virtual void DrawCharacter(ICharacterInstance* pInstance, const SRendParams& rRP, const SRenderingPassInfo& passInfo);
//--- Override base level as we don't want the animation control there
void SetPlaybackMultiplier(float multiplier)
{
m_bPaused = (multiplier == 0.0f);
m_playbackMultiplier = multiplier;
}
void OnScrubTime(float timePassed);
void OnSequenceRestart(float timePassed);
void UpdateDebugParams();
void OnLoadPreviewFile()
{
if (m_attachCameraToEntity)
{
m_attachCameraToEntity = NULL;
AttachToEntity();
}
m_viewmode = eVM_Unknown;
m_pHoverBaseObject = NULL;
update();
}
void ClearCharacters()
{
m_entityList.resize(0);
}
void AddCharacter(IEntity* entity, const QuatT& startPosition)
{
m_entityList.push_back(SCharacterEntity(entity, startPosition));
}
static int OnPostStepLogged(const EventPhys* pEvent);
void OnCreateEmitter(IParticleEmitter* pEmitter, QuatTS const& qLoc, const IParticleEffect* pEffect, uint32 uEmitterFlags) override;
void OnDeleteEmitter(IParticleEmitter* pEmitter) override;
void OnSpawnParticleEmitter(IParticleEmitter* pEmitter, IActionController& actionController) override;
void SetTimelineUnits(ESequencerTickMode mode);
bool ToggleLookingAtCamera() { (m_lookAtCamera = !m_lookAtCamera); return m_lookAtCamera; }
void SetLocatorRotateMode() { m_locMode = LM_Rotate; }
void SetLocatorTranslateMode() { m_locMode = LM_Translate; }
void SetShowSceneRoots(bool showSceneRoots) { m_showSceneRoots = showSceneRoots; }
void AttachToEntity()
{
if (m_entityList.size() > 0)
{
m_attachCameraToEntity = m_entityList[0].GetEntity();
if (m_attachCameraToEntity != NULL)
{
m_lastEntityPos = m_attachCameraToEntity->GetPos();
}
}
}
void DetachFromEntity() { m_attachCameraToEntity = NULL; }
void Focus(AABB& pBoundingBox);
bool IsLookingAtCamera() const { return m_lookAtCamera; }
bool IsTranslateLocatorMode() const { return (LM_Translate == m_locMode); }
bool IsAttachedToEntity() const { return NULL != m_attachCameraToEntity; }
bool IsShowingSceneRoots() const { return m_showSceneRoots; }
// ModelViewportCE
enum ViewModeCE
{
NothingMode = 0,
SelectMode,
MoveMode,
RotateMode,
ScrollZoomMode,
ScrollMode,
ZoomMode,
};
struct ClosestPoint
{
uint32 m_nBaseModel;
uint32 m_nAttachmentIdx;
uint32 m_nJointIdx;
f32 m_fDistance;
ClosestPoint()
{
m_nBaseModel = 0;
m_nAttachmentIdx = 0xffffffff;
m_nJointIdx = 0xffffffff;
m_fDistance = 9999999.0f;
}
};
virtual void DrawGrid(const Quat& tmRotation, const Vec3& MotionTranslation, const Vec3& FootSlide, const Matrix33& rGridRot);
void DrawMoveTool(IRenderAuxGeom* pAuxGeom);
void LoadObject(const QString& fileName, float scale = 1.f);
f32 Picking_BaseMesh(const Ray& mray);
f32 Picking_AttachedMeshes(const Ray& mray, IAttachment* pIAttachment, const Matrix34& m34);
bool UpdateOrbitPosition();
ViewModeCE m_opMode;
int32 m_MouseOnAttachment;
QRect m_WinRect;
HitContext m_HitContext;
int32 m_highlightedBoneID;
CArcBall3D m_ArcBall;
int32 m_Button_MOVE;
int32 m_Button_ROTATE;
int32 m_Button_IK;
IAnimationGroundAlignmentPtr m_groundAlignment;
CMaterial* m_pDefaultMaterial;
int32 m_SelectedAttachment;
int32 m_SelectionUpdate;
int32 m_MouseOnBoneID;
bool m_animEventPosition;
CAxisHelper m_AxisHelper;
ClosestPoint m_ClosestPoint;
bool m_MouseButtonL;
bool m_MouseButtonR;
protected:
bool UseAnimationDrivenMotionForEntity(const IEntity* piEntity);
void SetFirstperson(IAttachmentManager* pAttachmentManager, EMannequinViewMode viewmode);
bool HitTest(HitContext& hc, const bool bIsClick);
void mousePressEvent(QMouseEvent* event) override;
void mouseReleaseEvent(QMouseEvent* event) override;
void mouseMoveEvent(QMouseEvent* event) override;
void keyPressEvent(QKeyEvent* event) override;
void keyReleaseEvent(QKeyEvent* event) override;
// ModelViewportCE
void CELButtonDown(QPoint point);
void CELButtonUp(QPoint point);
void CEMButtonDown(QMouseEvent* event, QPoint point);
private:
void UpdateCharacter(IEntity* pEntity, ICharacterInstance* pInstance, float deltaTime);
void DrawCharacter(IEntity* pEntity, ICharacterInstance* pInstance, IStatObj* pStatObj, const SRendParams& rRP, const SRenderingPassInfo& passInfo);
void DrawEntityAndChildren(class CEntityObject* pEntityObject, const SRendParams& rp, const SRenderingPassInfo& passInfo);
void UpdatePropEntities(SMannequinContexts* pContexts, SMannequinContexts::SProp& prop);
struct SLocator
{
uint32 m_refID;
QString m_name;
CArcBall3D m_ArcBall;
CAxisHelper m_AxisHelper;
IEntity* m_pEntity;
_smart_ptr<IAttachment> m_pAttachment;
string m_helperName;
int16 m_jointId;
uint32 m_paramCRC;
};
inline Matrix34 GetLocatorReferenceMatrix(const SLocator& locator);
inline Matrix34 GetLocatorWorldMatrix(const SLocator& locator);
// ModelViewportCE
bool RepositionMoveTool(const QPoint& point);
ELocatorMode m_locMode;
struct SCharacterEntity
{
SCharacterEntity(IEntity* _entity = NULL, const QuatT& _startLocation = QuatT(IDENTITY))
: entityId(_entity ? _entity->GetId() : 0)
, startLocation(_startLocation)
{}
EntityId entityId;
QuatT startLocation;
IEntity* GetEntity() const { return gEnv->pEntitySystem->GetEntity(entityId); }
};
std::vector<SCharacterEntity> m_entityList;
std::vector<IParticleEmitter*> m_particleEmitters;
std::vector<SLocator> m_locators;
uint32 m_selectedLocator;
EMannequinViewMode m_viewmode;
bool m_draggingLocator;
QPoint m_dragStartPoint;
bool m_LeftButtonDown;
bool m_lookAtCamera;
bool m_showSceneRoots;
bool m_cameraKeyDown;
float m_playbackMultiplier;
Vec3 m_tweenToFocusStart;
Vec3 m_tweenToFocusDelta;
float m_tweenToFocusTime;
static const float s_maxTweenTime;
EMannequinEditorMode m_editorMode;
IActionController* m_pActionController;
IPhysicalEntity* m_piGroundPlanePhysicalEntity;
ESequencerTickMode m_TickerMode;
IEntity* m_attachCameraToEntity;
Vec3 m_lastEntityPos;
CCamera m_alternateCamera;
CBaseObject* m_pHoverBaseObject;
};
#endif // CRYINCLUDE_EDITOR_MANNEQUIN_MANNEQUINMODELVIEWPORT_H
| 31.337838 | 202 | 0.708711 | [
"vector",
"transform"
] |
9e7389764f966d0b694e8b70190e597b4edae707 | 4,624 | h | C | src/atlas/util/vector.h | wdeconinck/atlas | 8949d2b362b9b5431023a967bcf4ca84f6b8ce05 | [
"Apache-2.0"
] | null | null | null | src/atlas/util/vector.h | wdeconinck/atlas | 8949d2b362b9b5431023a967bcf4ca84f6b8ce05 | [
"Apache-2.0"
] | null | null | null | src/atlas/util/vector.h | wdeconinck/atlas | 8949d2b362b9b5431023a967bcf4ca84f6b8ce05 | [
"Apache-2.0"
] | null | null | null | /*
* (C) Copyright 2013 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation
* nor does it submit to any jurisdiction.
*/
#pragma once
#include <type_traits>
#include <utility> // std::swap
#include "atlas/library/config.h"
#include "atlas/parallel/omp/copy.h"
#include "atlas/parallel/omp/fill.h"
#include "atlas/runtime/Exception.h"
namespace atlas {
template <typename T>
class vector {
public:
using value_type = T;
using iterator = T*;
using const_iterator = T const*;
public:
vector() = default;
template <typename size_t, typename std::enable_if<std::is_integral<size_t>::value, int>::type = 0>
vector( size_t size ) {
resize( size );
}
template <typename size_t, typename std::enable_if<std::is_integral<size_t>::value, int>::type = 0>
vector( size_t size, const value_type& value ) : vector( size ) {
assign( size, value );
}
vector( const vector& other ) { assign( other.data_, other.data_ + other.size_ ); }
vector( vector&& other ) {
std::swap( data_, other.data_ );
std::swap( size_, other.size_ );
std::swap( capacity_, other.capacity_ );
}
vector& operator=( vector other ) {
std::swap( data_, other.data_ );
std::swap( size_, other.size_ );
std::swap( capacity_, other.capacity_ );
return *this;
}
template <typename T2>
vector( const std::initializer_list<T2>& list ) {
assign( list.begin(), list.end() );
}
~vector() {
if ( data_ ) {
delete[] data_;
}
}
template <typename idx_t, typename std::enable_if<std::is_integral<idx_t>::value, int>::type = 0>
T& at( idx_t i ) noexcept( false ) {
if ( i >= size_ ) {
throw_OutOfRange( "atlas::vector", i, size_ );
}
return data_[i];
}
template <typename idx_t, typename std::enable_if<std::is_integral<idx_t>::value, int>::type = 0>
T const& at( idx_t i ) const noexcept( false ) {
if ( i >= size_ ) {
throw_OutOfRange( "atlas::vector", i, size_ );
}
return data_[i];
}
template <typename idx_t, typename std::enable_if<std::is_integral<idx_t>::value, int>::type = 0>
T& operator[]( idx_t i ) {
#if ATLAS_VECTOR_BOUNDS_CHECKING
return at( i );
#else
return data_[i];
#endif
}
template <typename idx_t, typename std::enable_if<std::is_integral<idx_t>::value, int>::type = 0>
T const& operator[]( idx_t i ) const {
#if ATLAS_VECTOR_BOUNDS_CHECKING
return at( i );
#else
return data_[i];
#endif
}
const T* data() const { return data_; }
T* data() { return data_; }
idx_t size() const { return size_; }
template <typename Size, typename std::enable_if<std::is_integral<Size>::value, int>::type = 0>
void assign( Size n, const value_type& value ) {
resize( n );
omp::fill( begin(), begin() + n, value );
}
template <typename Iter, typename std::enable_if<!std::is_integral<Iter>::value, int>::type = 0>
void assign( const Iter& first, const Iter& last ) {
size_t size = std::distance( first, last );
resize( size );
omp::copy( first, last, begin() );
}
template <typename Size, typename std::enable_if<std::is_integral<Size>::value, int>::type = 0>
void reserve( Size size ) {
if ( capacity_ != 0 )
ATLAS_NOTIMPLEMENTED;
data_ = new T[size];
capacity_ = size;
}
template <typename size_t, typename std::enable_if<std::is_integral<size_t>::value, int>::type = 0>
void resize( size_t size ) {
if ( static_cast<idx_t>( size ) > 0 ) {
if ( capacity_ == 0 ) {
reserve( size );
}
if ( static_cast<idx_t>( size ) > capacity_ ) {
ATLAS_NOTIMPLEMENTED;
}
size_ = size;
}
}
const_iterator begin() const { return data_; }
const_iterator end() const { return data_ + size_; }
iterator begin() { return data_; }
iterator end() { return data_ + size_; }
const_iterator cbegin() const { return data_; }
const_iterator cend() const { return data_ + size_; }
private:
value_type* data_{nullptr};
idx_t size_{0};
idx_t capacity_{0};
};
} // namespace atlas
| 29.832258 | 103 | 0.599265 | [
"vector"
] |
9e74fe3dbc6760f21c96e331cd5704ffa0cb29a0 | 5,243 | h | C | include/SketchFile.h | HerrNamenlos123/TechnicalSketcher | 90c5af44dcaa6060dc202fddb38ea9d869bea194 | [
"MIT"
] | null | null | null | include/SketchFile.h | HerrNamenlos123/TechnicalSketcher | 90c5af44dcaa6060dc202fddb38ea9d869bea194 | [
"MIT"
] | null | null | null | include/SketchFile.h | HerrNamenlos123/TechnicalSketcher | 90c5af44dcaa6060dc202fddb38ea9d869bea194 | [
"MIT"
] | null | null | null | #pragma once
#include "pch.h"
#include "config.h"
#include "FileContent.h"
#include "Layer.h"
/// <summary>
/// This class guarantees, that always at least 1 layer exists
/// </summary>
class SketchFile {
FileContent content;
bool fileChanged = false;
std::string filename = DEFAULT_FILENAME; // Filename contains extension
std::string fileLocation = "";
public:
glm::vec4 backgroundColor = DEFAULT_BACKGROUND_COLOR;
SketchFile() {
}
void PushLayer() {
content.PushLayer();
fileChanged = true;
}
void PushLayer(const std::string& name) {
content.PushLayer(name);
fileChanged = true;
}
void PushLayer(Layer&& layer) {
content.PushLayer(std::move(layer));
fileChanged = true;
}
void GeneratePreviews() {
content.GeneratePreviews();
}
const std::vector<Layer>& GetLayers() {
return content.GetLayers();
}
const Layer& GetActiveLayer() {
return content.GetActiveLayer();
}
Layer DuplicateActiveLayer() {
return content.GetActiveLayer().Duplicate();
}
bool SetLayerName(LayerID id, const char* name) {
auto layer = content.FindLayer(id);
if (layer.has_value()) { // Layer was found
layer->get().name = name;
fileChanged = true;
return true;
}
return false;
}
void SaveActiveLayerState() {
content.GetActiveLayer().SaveState();
}
void UndoAction() {
content.GetActiveLayer().UndoAction();
}
bool MoveLayerFront(LayerID id) {
if (!content.MoveLayerFront(id)) {
return false;
}
fileChanged = true;
return true;
}
bool MoveLayerBack(LayerID id) {
if (!content.MoveLayerBack(id)) {
return false;
}
fileChanged = true;
return true;
}
bool RemoveLayer(LayerID id) {
if (!content.RemoveLayer(id)) {
return false;
}
fileChanged = true;
return true;
}
bool ActivateLayer(LayerID id) {
return content.ActivateLayer(id);
}
void AddShape(enum class ShapeType type, glm::vec2 p1, glm::vec2 p2, float thickness, const glm::vec4& color) {
content.GetActiveLayer().AddShape(type, p1, p2, thickness, color);
fileChanged = true;
}
void AddShape(enum class ShapeType type, glm::vec2 center, float radius, float thickness, const glm::vec4& color) {
content.GetActiveLayer().AddShape(type, center, radius, thickness, color);
fileChanged = true;
}
void AddShape(enum class ShapeType type, glm::vec2 center, float radius, float startAngle, float endAngle, float thickness, const glm::vec4& color) {
content.GetActiveLayer().AddShape(type, center, radius, startAngle, endAngle, thickness, color);
fileChanged = true;
}
void AddShapes(std::vector<ShapePTR>&& shapes) {
content.GetActiveLayer().AddShapes(std::move(shapes));
fileChanged = true;
}
bool RemoveShapes(const std::vector<ShapeID>& ids) {
if (content.GetActiveLayer().RemoveShapes(ids)) {
fileChanged = true;
return true;
}
return false;
}
bool MoveShapesLeft(const std::vector<ShapeID>& ids, float amount) {
if (content.GetActiveLayer().MoveShapesLeft(ids, amount)) {
fileChanged = true;
return true;
}
return false;
}
bool MoveShapesRight(const std::vector<ShapeID>& ids, float amount) {
if (content.GetActiveLayer().MoveShapesRight(ids, amount)) {
fileChanged = true;
return true;
}
return false;
}
bool MoveShapesUp(const std::vector<ShapeID>& ids, float amount) {
if (content.GetActiveLayer().MoveShapesUp(ids, amount)) {
fileChanged = true;
return true;
}
return false;
}
bool MoveShapesDown(const std::vector<ShapeID>& ids, float amount) {
if (content.GetActiveLayer().MoveShapesDown(ids, amount)) {
fileChanged = true;
return true;
}
return false;
}
bool MoveShapes(const std::vector<ShapeID>& ids, glm::vec2 amount) {
if (content.GetActiveLayer().MoveShapes(ids, amount)) {
fileChanged = true;
return true;
}
return false;
}
void ShowPropertiesWindow(ShapeID id) {
auto opt = content.GetActiveLayer().FindShape(id);
if (opt.has_value()) {
if (opt.value().get().ShowPropertiesWindow()) {
fileChanged = true;
}
}
}
std::optional<std::reference_wrapper<const GenericShape>> FindShape(const ShapeID& id) {
return content.GetActiveLayer().FindShape(id);
}
void UpdateWindowTitle();
void FileChanged() {
fileChanged = true;
}
nlohmann::json GetJsonFromShapes(const std::vector<ShapeID>& ids) {
nlohmann::json json = nlohmann::json();
for (auto id : ids) {
auto shape = FindShape(id);
if (shape.has_value()) {
json.push_back(shape.value().get().GetJson());
}
}
return json;
}
bool ContainsChanges() {
return fileChanged;
}
bool SaveFile(bool saveAs = false);
bool OpenFile();
bool OpenEmptyFile();
bool OpenFile(const std::string& path, bool silent = false);
Battery::Texture2D ExportImage(bool transparent = true, float dpi = 300);
nlohmann::json GetJson() {
nlohmann::json j = nlohmann::json();
nlohmann::json layers = nlohmann::json::array();
for (Layer& layer : content.GetLayers()) {
layers.push_back(layer.GetJson());
}
j["layers"] = layers;
j["background_color"] = nlohmann::json::array({ backgroundColor.r, backgroundColor.g, backgroundColor.b, backgroundColor.a });
j["file_type"] = JSON_FILE_TYPE;
j["file_version"] = JSON_FILE_VERSION;
return j;
}
};
| 22.502146 | 150 | 0.690826 | [
"shape",
"vector"
] |
9e7524598228600fe86958bbf6f559163b958fb4 | 1,622 | h | C | old/System/Core/classini.h | DrItanium/AdventureEngine | abb2f492a9cc085fd95e3674d8af65ea6d8cbb81 | [
"BSD-3-Clause"
] | 24 | 2015-04-24T09:53:24.000Z | 2022-02-09T02:34:49.000Z | old/System/Core/classini.h | DrItanium/AdventureEngine | abb2f492a9cc085fd95e3674d8af65ea6d8cbb81 | [
"BSD-3-Clause"
] | 1 | 2022-02-23T01:10:15.000Z | 2022-02-23T01:10:15.000Z | old/System/Core/classini.h | DrItanium/AdventureEngine | abb2f492a9cc085fd95e3674d8af65ea6d8cbb81 | [
"BSD-3-Clause"
] | 16 | 2015-05-26T23:49:42.000Z | 2021-02-08T07:32:28.000Z | /*******************************************************/
/* "C" Language Integrated Production System */
/* */
/* CLIPS Version 6.20 01/31/02 */
/* */
/* */
/*******************************************************/
/*************************************************************/
/* Purpose: */
/* */
/* Principal Programmer(s): */
/* Brian L. Dantes */
/* */
/* Contributing Programmer(s): */
/* */
/* Revision History: */
/* */
/*************************************************************/
#ifndef _H_classini
#define _H_classini
#ifndef _H_constrct
#include "constrct.h"
#endif
#ifndef _H_object
#include "object.h"
#endif
#if OBJECT_SYSTEM
#ifdef LOCALE
#undef LOCALE
#endif
#ifdef _CLASSINI_SOURCE_
#define LOCALE
#else
#define LOCALE extern
#endif
LOCALE void SetupObjectSystem(void *);
#if RUN_TIME
LOCALE void ObjectsRunTimeInitialize(void *,DEFCLASS *[],SLOT_NAME *[],DEFCLASS *[],unsigned);
#else
LOCALE void CreateSystemClasses(void *);
#endif
#endif
#endif
| 27.965517 | 94 | 0.311961 | [
"object"
] |
9e79e7b3104953f4cdca37f0d33a1fd2cf9d97a8 | 84,278 | c | C | impl/stateMachine.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | impl/stateMachine.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | impl/stateMachine.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | /*
* stateMachine.c
*
* Created on: 1 Aug 2014
* Author: benedictpaten
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <ctype.h>
#include <assert.h>
#include "stateMachine.h"
#include "bioioC.h"
#include "pairwiseAligner.h"
#include "discreteHmm.h"
//////////////////////////////////////////////////////////////////////////////
// StateMachine Emission functions for discrete alignments (symbols and kmers)
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////// STATIC FUNCTIONS ////////////////////////////////////////////////////////
static void state_check(StateMachine *sM, State s) {
assert(s >= 0 && s < sM->stateNumber);
}
void stateMachine_index_check(int64_t c) {
assert(c >= 0 && c < NUM_OF_KMERS);
if ((c < 0) || (c > NUM_OF_KMERS)) {
st_errAbort("stateMachine_index_check: Got invalid kmerIndex %lld\n", c);
}
}
static inline void emissions_discrete_initializeEmissionsMatrices(StateMachine *sM) {
sM->EMISSION_GAP_X_PROBS = st_malloc(sM->parameterSetSize*sizeof(double));
sM->EMISSION_GAP_Y_MATRIX = st_malloc(sM->parameterSetSize * sizeof(double));
sM->EMISSION_MATCH_MATRIX = st_malloc(sM->parameterSetSize * sM->parameterSetSize * sizeof(double));
}
/*
void emissions_symbol_setEmissionsToDefaults(StateMachine *sM) {
// initialize
emissions_discrete_initializeEmissionsMatrices(sM);
// Set Match probs to default values
const double EMISSION_MATCH=-2.1149196655034745; //log(0.12064298095701059);
const double EMISSION_TRANSVERSION=-4.5691014376830479; //log(0.010367271172731285);
const double EMISSION_TRANSITION=-3.9833860032220842; //log(0.01862247669752685);
const double M[SYMBOL_NUMBER_NO_N*SYMBOL_NUMBER_NO_N] = {
EMISSION_MATCH, EMISSION_TRANSVERSION, EMISSION_TRANSITION, EMISSION_TRANSVERSION,
EMISSION_TRANSVERSION, EMISSION_MATCH, EMISSION_TRANSVERSION, EMISSION_TRANSITION,
EMISSION_TRANSITION, EMISSION_TRANSVERSION, EMISSION_MATCH, EMISSION_TRANSVERSION,
EMISSION_TRANSVERSION, EMISSION_TRANSITION, EMISSION_TRANSVERSION, EMISSION_MATCH };
memcpy(sM->EMISSION_MATCH_MATRIX, M, sizeof(double)*SYMBOL_NUMBER_NO_N*SYMBOL_NUMBER_NO_N);
// Set Gap probs to default values
const double EMISSION_GAP = -1.6094379124341003; //log(0.2)
const double G[4] = { EMISSION_GAP, EMISSION_GAP, EMISSION_GAP, EMISSION_GAP };
memcpy(sM->EMISSION_GAP_X_PROBS, G, sizeof(double)*SYMBOL_NUMBER_NO_N);
memcpy(sM->EMISSION_GAP_Y_MATRIX, G, sizeof(double)*SYMBOL_NUMBER_NO_N);
}
*/
static inline void emissions_discrete_initMatchProbsToZero(double *emissionMatchProbs, int64_t symbolSetSize) {
memset(emissionMatchProbs, 0, symbolSetSize*symbolSetSize*sizeof(double));
}
static inline void emissions_discrete_initGapProbsToZero(double *emissionGapProbs, int64_t symbolSetSize) {
memset(emissionGapProbs, 0, symbolSetSize*sizeof(double));
}
///////////////////////////////////////////// CORE FUNCTIONS ////////////////////////////////////////////////////////
void emissions_discrete_initEmissionsToZero(StateMachine *sM) {
// initialize
emissions_discrete_initializeEmissionsMatrices(sM);
// set match matrix to zeros
emissions_discrete_initMatchProbsToZero(sM->EMISSION_MATCH_MATRIX, sM->parameterSetSize);
// set gap matrix to zeros
emissions_discrete_initGapProbsToZero(sM->EMISSION_GAP_X_PROBS, sM->parameterSetSize);
emissions_discrete_initGapProbsToZero(sM->EMISSION_GAP_Y_MATRIX, sM->parameterSetSize);
}
int64_t emissions_discrete_getBaseIndex(void *base) {
char b = *(char*) base;
switch (b) {
case 'A':
return 0;
case 'C':
return 1;
case 'E':
return 2;
case 'G':
return 3;
case 'O':
return 4;
case 'T':
return 5;
default: // N or n. Hack to make sure that we get a zero model mean
return NUM_OF_KMERS + 1;
}
}
int64_t emissions_discrete_getKmerIndexFromKmer(void *kmer) {
int64_t kmerLen = strlen((char*) kmer);
if (kmerLen == 0) {
return NUM_OF_KMERS + 1;
}
int64_t axisLength = NUM_OF_KMERS;
int64_t l = axisLength / SYMBOL_NUMBER_NO_N;
int64_t i = 0;
int64_t x = 0;
while(l > 1) {
x += l * emissions_discrete_getBaseIndex((char *)kmer + i);
i += 1;
l = l / SYMBOL_NUMBER_NO_N;
}
int64_t last = KMER_LENGTH - 1;
x += emissions_discrete_getBaseIndex((char *)kmer + last);
return x;
}
int64_t emissions_discrete_getKmerIndexFromPtr(void *kmer) {
// make temp kmer meant to work with getKmer
char *kmer_i = malloc((KMER_LENGTH+1) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_i[x] = *((char *)kmer+x);
}
kmer_i[KMER_LENGTH] = '\0';
int64_t i = emissions_discrete_getKmerIndexFromKmer(kmer_i);
//index_check(i);
free(kmer_i);
return i;
}
double emissions_symbol_getGapProb(const double *emissionGapProbs, void *base) {
int64_t i = emissions_discrete_getBaseIndex(base);
if(i == 4) {
return -1.386294361; //log(0.25)
}
return emissionGapProbs[i];
}
double emissions_symbol_getMatchProb(const double *emissionMatchProbs, void *x, void *y) {
int64_t iX = emissions_discrete_getBaseIndex(x);
int64_t iY = emissions_discrete_getBaseIndex(y);
if(iX == 4 || iY == 4) {
return -2.772588722; //log(0.25**2)
}
return emissionMatchProbs[iX * SYMBOL_NUMBER_NO_N + iY];
}
double emissions_kmer_getGapProb(StateMachine *sM, const double *emissionGapProbs, void *x_i) {
// even though this shouldn't happen, check for null x_i and return logzero
if (x_i == NULL) {
return LOG_ZERO;
}
// make temp x_i meant to work with getKmer
char *kmer_i = malloc((sM->kmerLength) * sizeof(char));
for (int64_t x = 0; x < sM->kmerLength; x++) {
kmer_i[x] = *((char *) x_i + x);
}
kmer_i[sM->kmerLength] = '\0';
int64_t i = kmer_id(kmer_i, sM->alphabet, sM->alphabetSize, sM->kmerLength);
//index_check(i);
free(kmer_i);
double p = i > NUM_OF_KMERS ? LOG_ZERO : emissionGapProbs[i];
return p;
}
double emissions_kmer_getMatchProb(const double *emissionMatchProbs, void *x, void *y) {
int64_t iX = emissions_discrete_getKmerIndexFromKmer(x);
int64_t iY = emissions_discrete_getKmerIndexFromKmer(y);
int64_t tableIndex = iX * NUM_OF_KMERS + iY;
return emissionMatchProbs[tableIndex];
}
/////////////////////////////////////////
// functions for signal/kmer alignment //
/////////////////////////////////////////
/////////////////////////////////////////// STATIC FUNCTIONS ////////////////////////////////////////////////////////
static inline void emissions_signal_initializeEmissionsMatrices(StateMachine *sM, int64_t nbSkipParams) {
// changed to 30 for skip prob bins
// the kmer/gap and skip (f(|ui-1 - ui|)) probs have smaller tables, either 30 for the skip or parameterSetSize
// for the naive kmer skip one
// see note at emissions_signal_initEmissionsToZero about this change
sM->EMISSION_GAP_X_PROBS = st_malloc(nbSkipParams * sizeof(double));
// both the Iy and M - type states use the event/kmer match model so the matrices need to be the same size
sM->EMISSION_GAP_Y_MATRIX = st_malloc((sM->parameterSetSize * MODEL_PARAMS) * sizeof(double));
sM->EMISSION_MATCH_MATRIX = st_malloc((sM->parameterSetSize * MODEL_PARAMS) * sizeof(double));
}
static inline void emissions_signal_initMatchMatrixToZero(double *matchModel, int64_t parameterSetSize) {
memset(matchModel, 0, ((parameterSetSize * MODEL_PARAMS)) * sizeof(double));
}
static inline void emissions_signal_initKmerSkipTableToZero(double *skipModel, int64_t parameterSetSize) {
memset(skipModel, 0, parameterSetSize * sizeof(double));
}
double emissions_signal_getModelLevelMean(const double *eventModel, int64_t kmerIndex) {
return kmerIndex > NUM_OF_KMERS ? 0.0 : eventModel[(kmerIndex * MODEL_PARAMS)];
}
static inline double emissions_signal_getModelLevelSd(const double *eventModel, int64_t kmerIndex) {
return kmerIndex > NUM_OF_KMERS ? 0.0 : eventModel[(kmerIndex * MODEL_PARAMS + 1)];
}
static inline double emissions_signal_getModelFluctuationMean(const double *eventModel, int64_t kmerIndex) {
return kmerIndex > NUM_OF_KMERS ? 0.0 : eventModel[(kmerIndex * MODEL_PARAMS + 2)];
}
static inline double emissions_signal_getModelFluctuationSd(const double *eventModel, int64_t kmerIndex) {
return kmerIndex > NUM_OF_KMERS ? 0.0 : eventModel[(kmerIndex * MODEL_PARAMS + 3)];
}
static inline double emissions_signal_getModelFluctuationLambda(const double *eventModel, int64_t kmerIndex) {
return kmerIndex > NUM_OF_KMERS ? 0.0 : eventModel[(kmerIndex * MODEL_PARAMS + 4)];
}
static inline double emissions_signal_logInvGaussPdf(double eventNoise, double modelNoiseMean,
double modelNoiseLambda) {
double l_twoPi = 1.8378770664093453; // log(2*pi)
double l_eventNoise = log(eventNoise);
double a = (eventNoise - modelNoiseMean) / modelNoiseMean;
double l_modelNoseLambda = log(modelNoiseLambda);
// returns Log-space
return (l_modelNoseLambda - l_twoPi - 3 * l_eventNoise - modelNoiseLambda * a * a / eventNoise) / 2;
}
static inline double emissions_signal_logGaussPdf(double x, double mu, double sigma) {
if (sigma == 0.0) {
return LOG_ZERO;
}
double log_inv_sqrt_2pi = -0.91893853320467267;
double l_sigma = log(sigma);
double a = (x - mu) / sigma;
// returns Log-space
return log_inv_sqrt_2pi - l_sigma + (-0.5 * a * a);
}
static double emissions_signal_poissonPosteriorProb(int64_t n, double duration) {
assert(n <= 5);
// Experimented with different values of c,
//double c = 0.00570570570571; // max of PDF
double c = 0.00332005312085; // mode of all test durations
//double c = 0.0045; // guess of somewhere in between
// Experimenting with changing the rate parameter, started with 2, but then the p(c|N=2) == p(c|N=1) which
// doesn't make sense. When 0 < beta < 1 then the p(c|N=0) > p(c|N=1). At 1.25, it seems to have the correct
// curve.
//double l_beta = 0.22314355131420976; // log(1.25)
double l_beta = 0.1397619423751586; // log(1.15)
double lambda = duration / c;
double l_factorials[6] = {0.0, 0.0, 0.69314718056, 1.79175946923, 3.17805383035, 4.78749174278};
//result = ((n+1)*np.log(2)) + (n*np.log(lam)) - np.log(factorial(n)) - (2*lam)
double a = (n+1) * l_beta;
double b = n * log(lambda);
double d = 2 * lambda;
// returns log-space
//double prob = a + b - l_factorials[n] - d;
//return prob;
return a + b - l_factorials[n] - d;
}
///////////////////////////////////////////// CORE FUNCTIONS ////////////////////////////////////////////////////////
double emissions_signal_logGaussianProbabilityDensity(double x, double mu, double sigma) {
return emissions_signal_logGaussPdf(x, mu, sigma);
}
double emissions_signal_logInverseGaussianProbabilityDensity(double x, double mu, double lambda) {
return emissions_signal_logInvGaussPdf(x, mu, lambda);
}
double emissions_signal_descaleEventMean_JordanStyle(double scaledEvent, double levelMean,
double scale, double shift, double var) {
// (x + var * level_mean - scale * level_mean - shift) / var
return (scaledEvent + var * levelMean - scale * levelMean - shift) / var;
}
void emissions_signal_initEmissionsToZero(StateMachine *sM, int64_t nbSkipParams) {
// initialize
emissions_signal_initializeEmissionsMatrices(sM, nbSkipParams);
// set kmer skip matrix to zeros
emissions_signal_initKmerSkipTableToZero(sM->EMISSION_GAP_X_PROBS, nbSkipParams);
// set extra event matrix to zeros
emissions_signal_initMatchMatrixToZero(sM->EMISSION_GAP_Y_MATRIX, sM->parameterSetSize);
// set match matrix to zeros
emissions_signal_initMatchMatrixToZero(sM->EMISSION_MATCH_MATRIX, sM->parameterSetSize);
}
int64_t emissions_signal_getKmerSkipBin(double *matchModel, void *kmers) {
char *kmer_im1 = malloc((KMER_LENGTH) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_im1[x] = *((char *)kmers+x);
}
kmer_im1[KMER_LENGTH] = '\0';
// make kmer_i
char *kmer_i = malloc((KMER_LENGTH) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_i[x] = *((char *)kmers+(x+1));
}
kmer_i[KMER_LENGTH] = '\0';
// get indices
int64_t k_i= emissions_discrete_getKmerIndexFromKmer(kmer_i);
int64_t k_im1 = emissions_discrete_getKmerIndexFromKmer(kmer_im1);
// get the expected mean current for each one
double u_ki = emissions_signal_getModelLevelMean(matchModel, k_i);
double u_kim1 = emissions_signal_getModelLevelMean(matchModel, k_im1);
// find the difference
double d = fabs(u_ki - u_kim1);
// get the 'bin' for skip prob, clamp to the last bin
int64_t bin = (int64_t)(d / 0.5); // 0.5 pA bins right now
bin = bin >= 30 ? 29 : bin;
free(kmer_im1);
free(kmer_i);
return bin;
}
double emissions_signal_getBetaOrAlphaSkipProb(StateMachine *sM, void *kmers, bool getAlpha) {
// downcast
//StateMachine3Vanilla *sM3v = (StateMachine3Vanilla *) sM;
// get the skip bin
int64_t bin = emissions_signal_getKmerSkipBin(sM->EMISSION_MATCH_MATRIX, kmers);
return getAlpha ? sM->EMISSION_GAP_X_PROBS[bin+30] : sM->EMISSION_GAP_X_PROBS[bin];
}
double emissions_signal_logGaussMatchProb(const double *eventModel, void *kmer, void *event) {
// make temp kmer meant to work with getKmer2
char *kmer_i = malloc((KMER_LENGTH) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_i[x] = *((char *)kmer+(x+1));
}
kmer_i[KMER_LENGTH] = '\0';
// get event mean, and kmer index
double eventMean = *(double *) event;
int64_t kmerIndex = emissions_discrete_getKmerIndexFromKmer(kmer_i);
double l_inv_sqrt_2pi = log(0.3989422804014327); // constant
double modelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double modelStdDev = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
double l_modelSD = log(modelStdDev);
double a = (eventMean - modelMean) / modelStdDev;
// clean up
free(kmer_i);
// debugging
//double prob = l_inv_sqrt_2pi - l_modelSD + (-0.5f * a * a);
//st_uglyf("MATCHING--kmer:%s (index: %lld), event mean: %f, levelMean: %f, prob: %f\n", kmer_i, kmerIndex, eventMean, modelMean, prob);
// returns log space
return l_inv_sqrt_2pi - l_modelSD + (-0.5f * a * a);
}
double emissions_signal_getEventMatchProbWithTwoDists(const double *eventModel, void *kmer, void *event) {
// make temp kmer meant to work with getKmer2
char *kmer_i = malloc((KMER_LENGTH) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_i[x] = *((char *)kmer+(x+1));
}
kmer_i[KMER_LENGTH] = '\0';
// get event mean, and noise
double eventMean = *(double *) event;
double eventNoise = *(double *) ((char *)event + sizeof(double));
// get the kmer index
int64_t kmerIndex = emissions_discrete_getKmerIndexFromKmer(kmer_i);
// first calculate the prob of the level mean
double expectedLevelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double expectedLevelSd = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
double levelProb = emissions_signal_logGaussPdf(eventMean, expectedLevelMean, expectedLevelSd);
// now calculate the prob of the noise mean
double expectedNoiseMean = emissions_signal_getModelFluctuationMean(eventModel, kmerIndex);
double modelNoiseLambda = emissions_signal_getModelFluctuationLambda(eventModel, kmerIndex);
double noiseProb = emissions_signal_logInvGaussPdf(eventNoise, expectedNoiseMean, modelNoiseLambda);
// clean up
free(kmer_i);
return levelProb + noiseProb;
}
double emissions_signal_multipleKmerMatchProb(const double *eventModel, void *kmers, void *event, int64_t n) {
// this is meant to work with getKmer2
double p = 0.0;
for (int64_t i = 0; i < n; i++) {
// check if we're going to run off the end of the sequence
int64_t l = (KMER_LENGTH * n);
//char lastBase = *(char *) (kmers + l);
char lastBase = *((char *)kmers + l); // new way
// if we're not, logAdd up all the probs for the next n kmers
if (isupper(lastBase)) {
//char *x_i = &kmers[i];
char *x_i = ((char *)kmers + i); // new way
p = logAdd(p, emissions_signal_getEventMatchProbWithTwoDists(eventModel, x_i, event));
} else { // otherwise return zero prob of emitting this many kmers from this event
return LOG_ZERO;
}
}
return p - log(n);
}
double emissions_signal_getDurationProb(void *event, int64_t n) {
double duration = *(double *) ((char *)event + (2 * sizeof(double)));
return emissions_signal_poissonPosteriorProb(n, duration);
}
double emissions_signal_getBivariateGaussPdfMatchProb(const double *eventModel, void *kmer, void *event) {
// this is meant to work with getKmer2
// wrangle event data
double eventMean = *(double *) event;
double eventNoise = *(double *) ((char*)event + sizeof(double)); // aaah pointers
// correlation coefficient is the 0th member of the event model
double p = eventModel[0];
double pSq = p * p;
// make temp kmer
char *kmer_i = malloc((KMER_LENGTH) * sizeof(char));
for (int64_t x = 0; x < KMER_LENGTH; x++) {
kmer_i[x] = *((char *)kmer+(x+1));
}
kmer_i[KMER_LENGTH] = '\0';
int64_t kmerIndex = emissions_discrete_getKmerIndexFromKmer(kmer_i);
// get the µ and σ for the level and noise for the model
double levelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double levelStdDev = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
double noiseMean = emissions_signal_getModelFluctuationMean(eventModel, kmerIndex);
double noiseStdDev = emissions_signal_getModelFluctuationSd(eventModel, kmerIndex);
// do calculation
double log_inv_2pi = -1.8378770664093453;
double expC = -1 / (2 * (1 - pSq));
double xu = (eventMean - levelMean) / levelStdDev;
double yu = (eventNoise - noiseMean) / noiseStdDev;
double a = expC * ((xu * xu) + (yu * yu) - (2 * p * xu * yu));
double c = log_inv_2pi - log(levelStdDev * noiseStdDev * sqrt(1 - pSq));
// clean
free(kmer_i);
return c + a;
}
double emissions_signal_getHdpKmerDensity(StateMachine *sM, void *x_i, void *e_j, bool ignore) {
(void) ignore;
StateMachine3_HDP *self = (StateMachine3_HDP *)sM;
if (x_i == NULL) {
return LOG_ZERO;
}
// make temp x_i
char *kmer_i = malloc((self->model.kmerLength+1) * sizeof(char));
for (int64_t x = 0; x < self->model.kmerLength; x++) {
kmer_i[x] = *((char *) x_i + x);
}
kmer_i[self->model.kmerLength] = '\0';
// wrangle e_j data
double eventMean = *(double *)e_j;
int64_t kmerIndex = kmer_id(kmer_i, self->model.alphabet, self->model.alphabetSize, self->model.kmerLength);
double levelMean = emissions_signal_getModelLevelMean(self->model.EMISSION_MATCH_MATRIX, kmerIndex);
double *normedMean = (double *)st_malloc(sizeof(double));
*normedMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, levelMean,
self->model.scale, self->model.shift, self->model.var);
double density = (1 / self->model.var) * get_nanopore_kmer_density(self->hdpModel, x_i, normedMean);
free(normedMean);
free(kmer_i);
return log(density);
}
double emissions_signal_strawManGetKmerEventMatchProbWithDescaling_MeanOnly(StateMachine *sM, void *x_i, void *e_j, bool match) {
StateMachine3 *self = (StateMachine3 *)sM; // downcast
if (x_i == NULL) {
return LOG_ZERO;
}
// this is meant to work with getKmer (NOT getKmer2)
// wrangle e_j data
double eventMean = *(double *) e_j;
// double eventNoise = *(double *) ((char *) e_j + sizeof(double)); // aaah pointers
// make temp x_i
char *kmer_i = malloc((self->model.kmerLength) * sizeof(char));
for (int64_t x = 0; x < self->model.kmerLength; x++) {
kmer_i[x] = *((char *) x_i + x);
}
kmer_i[self->model.kmerLength] = '\0';
// get index
int64_t kmerIndex = kmer_id(kmer_i, self->model.alphabet, self->model.alphabetSize, self->model.kmerLength);
double *eventModel = match ? self->model.EMISSION_MATCH_MATRIX : self->model.EMISSION_GAP_Y_MATRIX;
// get the µ and σ for the level and noise for the model
double levelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double levelStdDev = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
eventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, levelMean, self->model.scale,
self->model.shift, self->model.var);
// double noiseMean = emissions_signal_getModelFluctuationMean(eventModel, kmerIndex);
// //double noiseStdDev = emissions_signal_getModelFluctuationSd(eventModel, kmerIndex);
//
// double modelNoiseLambda = emissions_signal_getModelFluctuationLambda(eventModel, kmerIndex);
double l_probEventMean = emissions_signal_logGaussPdf(eventMean, levelMean, levelStdDev);
//double l_probEventNoise = emissions_signal_logGaussPdf(eventNoise, noiseMean, noiseStdDev);
// double l_probEventNoise = emissions_signal_logInvGaussPdf(eventNoise, noiseMean, modelNoiseLambda);
// clean
free(kmer_i);
// debugging
//double prob = l_probEventMean + l_probEventNoise;
//st_uglyf("MATCHING--x_i:%s (index: %lld), e_j mean: %f, \n modelMean: %f, modelLsd: %f probEvent: %f probNoise: %f, combined: %f\n",
// kmer_i, kmerIndex, eventMean, levelMean, levelStdDev, l_probEventMean, l_probEventNoise, prob);
return l_probEventMean;
}
double emissions_signal_strawManGetKmerEventMatchProbWithDescaling(StateMachine *sM, void *x_i, void *e_j, bool match) {
StateMachine3 *self = (StateMachine3 *)sM; // downcast
if (x_i == NULL) {
return LOG_ZERO;
}
// this is meant to work with getKmer (NOT getKmer2)
// wrangle e_j data
double eventMean = *(double *) e_j;
double eventNoise = *(double *) ((char *) e_j + sizeof(double)); // aaah pointers
// if eventNoise turns up zero we get a NAN error so add small value
if (eventNoise == 0){
eventNoise = 0.000000001;
}
// make temp x_i
char *kmer_i = malloc((self->model.kmerLength) * sizeof(char));
for (int64_t x = 0; x < self->model.kmerLength; x++) {
kmer_i[x] = *((char *) x_i + x);
}
kmer_i[self->model.kmerLength] = '\0';
// get index
int64_t kmerIndex = kmer_id(kmer_i, self->model.alphabet, self->model.alphabetSize, self->model.kmerLength);
double *eventModel = match ? self->model.EMISSION_MATCH_MATRIX : self->model.EMISSION_GAP_Y_MATRIX;
// get the µ and σ for the level and noise for the model
double levelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double levelStdDev = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
eventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, levelMean, self->model.scale,
self->model.shift, self->model.var);
double noiseMean = emissions_signal_getModelFluctuationMean(eventModel, kmerIndex);
//double noiseStdDev = emissions_signal_getModelFluctuationSd(eventModel, kmerIndex);
double modelNoiseLambda = emissions_signal_getModelFluctuationLambda(eventModel, kmerIndex);
double l_probEventMean = emissions_signal_logGaussPdf(eventMean, levelMean, levelStdDev);
//double l_probEventNoise = emissions_signal_logGaussPdf(eventNoise, noiseMean, noiseStdDev);
double l_probEventNoise = emissions_signal_logInvGaussPdf(eventNoise, noiseMean, modelNoiseLambda);
// clean
free(kmer_i);
// debugging
//double prob = l_probEventMean + l_probEventNoise;
//st_uglyf("MATCHING--x_i:%s (index: %lld), e_j mean: %f, \n modelMean: %f, modelLsd: %f probEvent: %f probNoise: %f, combined: %f\n",
// kmer_i, kmerIndex, eventMean, levelMean, levelStdDev, l_probEventMean, l_probEventNoise, prob);
return l_probEventMean + l_probEventNoise;
}
double emissions_signal_strawManGetKmerEventMatchProb(StateMachine *sM, void *x_i, void *e_j, bool match) {
StateMachine3 *self = (StateMachine3 *)sM;
if (x_i == NULL) {
return LOG_ZERO;
}
// this is meant to work with getKmer (NOT getKmer2)
// wrangle e_j data
double eventMean = *(double *) e_j;
double eventNoise = *(double *) ((char *) e_j + sizeof(double)); // aaah pointers
// make temp x_i
char *kmer_i = malloc((self->model.kmerLength) * sizeof(char));
for (int64_t x = 0; x < self->model.kmerLength; x++) {
kmer_i[x] = *((char *) x_i + x);
}
kmer_i[self->model.kmerLength] = '\0';
// get index
int64_t kmerIndex = kmer_id(kmer_i, self->model.alphabet, self->model.alphabetSize, self->model.kmerLength);
double *eventModel = match ? self->model.EMISSION_MATCH_MATRIX : self->model.EMISSION_GAP_Y_MATRIX;
// get the µ and σ for the level and noise for the model
double levelMean = emissions_signal_getModelLevelMean(eventModel, kmerIndex);
double levelStdDev = emissions_signal_getModelLevelSd(eventModel, kmerIndex);
double noiseMean = emissions_signal_getModelFluctuationMean(eventModel, kmerIndex);
//double noiseStdDev = emissions_signal_getModelFluctuationSd(eventModel, kmerIndex);
double modelNoiseLambda = emissions_signal_getModelFluctuationLambda(eventModel, kmerIndex);
double l_probEventMean = emissions_signal_logGaussPdf(eventMean, levelMean, levelStdDev);
//double l_probEventNoise = emissions_signal_logGaussPdf(eventNoise, noiseMean, noiseStdDev);
double l_probEventNoise = emissions_signal_logInvGaussPdf(eventNoise, noiseMean, modelNoiseLambda);
// clean
free(kmer_i);
// debugging
//double prob = l_probEventMean + l_probEventNoise;
//st_uglyf("MATCHING--x_i:%s (index: %lld), e_j mean: %f, \n modelMean: %f, modelLsd: %f probEvent: %f probNoise: %f, combined: %f\n",
// kmer_i, kmerIndex, eventMean, levelMean, levelStdDev, l_probEventMean, l_probEventNoise, prob);
return l_probEventMean + l_probEventNoise;
}
void emissions_signal_scaleEmissions(StateMachine *sM, double scale, double shift, double var) {
// model is arranged: level_mean, level_stdev, sd_mean, sd_stdev, sd_lambda per kmer
// already been adjusted for correlation coeff.
for (int64_t i = 0; i < (sM->parameterSetSize * MODEL_PARAMS); i += MODEL_PARAMS) {
// Level adjustments
// level_mean = mean * scale + shift
sM->EMISSION_MATCH_MATRIX[i] = sM->EMISSION_MATCH_MATRIX[i] * scale + shift;
// level_stdev = stdev * var
sM->EMISSION_MATCH_MATRIX[i + 1] = sM->EMISSION_MATCH_MATRIX[i + 1] * var;
// adjusting extra event also
// level_mean = mean * scale + shift
sM->EMISSION_GAP_Y_MATRIX[i] = sM->EMISSION_GAP_Y_MATRIX[i] * scale + shift;
// level_stdev = stdev * var
sM->EMISSION_GAP_Y_MATRIX[i + 1] = sM->EMISSION_GAP_Y_MATRIX[i + 1] * var;
}
}
void emissions_signal_scaleNoise(StateMachine *sM, NanoporeReadAdjustmentParameters npp) {
for (int64_t i = 0; i < (sM->parameterSetSize * MODEL_PARAMS); i += MODEL_PARAMS) {
// Fluctuation (noise) adjustments
// noise_mean *= scale_sd
sM->EMISSION_MATCH_MATRIX[i + 2] = sM->EMISSION_MATCH_MATRIX[i + 2] * npp.scale_sd;
// noise_lambda *= var_sd
sM->EMISSION_MATCH_MATRIX[i + 4] = sM->EMISSION_MATCH_MATRIX[i + 4] * npp.var_sd;
// noise_sd = sqrt(adjusted_noise_mean**3 / adjusted_noise_lambda);
sM->EMISSION_MATCH_MATRIX[i + 3] = sqrt(pow(sM->EMISSION_MATCH_MATRIX[i + 2], 3.0)
/ sM->EMISSION_MATCH_MATRIX[i + 4]);
// Fluctuation (noise) adjustments
// noise_mean *= scale_sd
sM->EMISSION_GAP_Y_MATRIX[i + 2] = sM->EMISSION_GAP_Y_MATRIX[i + 2] * npp.scale_sd;
// noise_lambda *= var_sd
sM->EMISSION_GAP_Y_MATRIX[i + 4] = sM->EMISSION_GAP_Y_MATRIX[i + 4] * npp.var_sd;
// noise_sd = sqrt(adjusted_noise_mean**3 / adjusted_noise_lambda);
sM->EMISSION_GAP_Y_MATRIX[i + 3] = sqrt(pow(sM->EMISSION_GAP_Y_MATRIX[i + 2], 3.0)
/ sM->EMISSION_MATCH_MATRIX[i + 4]);
}
}
void emissions_signal_scaleModel(StateMachine *sM,
double scale, double shift, double var,
double scale_sd, double var_sd) {
// model is arranged: level_mean, level_stdev, sd_mean, sd_stdev, sd_lambda per kmer
// already been adjusted for correlation coeff.
for (int64_t i = 0; i < (sM->parameterSetSize * MODEL_PARAMS); i += MODEL_PARAMS) {
// Level adjustments
// level_mean = mean * scale + shift
sM->EMISSION_MATCH_MATRIX[i] = sM->EMISSION_MATCH_MATRIX[i] * scale + shift;
// level_stdev = stdev * var
sM->EMISSION_MATCH_MATRIX[i + 1] = sM->EMISSION_MATCH_MATRIX[i + 1] * var;
// adjusting extra event also
// level_mean = mean * scale + shift
sM->EMISSION_GAP_Y_MATRIX[i] = sM->EMISSION_GAP_Y_MATRIX[i] * scale + shift;
// level_stdev = stdev * var
sM->EMISSION_GAP_Y_MATRIX[i + 1] = sM->EMISSION_GAP_Y_MATRIX[i + 1] * var;
// Fluctuation (noise) adjustments
// noise_mean *= scale_sd
sM->EMISSION_MATCH_MATRIX[i + 2] = sM->EMISSION_MATCH_MATRIX[i + 2] * scale_sd;
// noise_lambda *= var_sd
sM->EMISSION_MATCH_MATRIX[i + 4] = sM->EMISSION_MATCH_MATRIX[i + 4] * var_sd;
// noise_sd = sqrt(adjusted_noise_mean**3 / adjusted_noise_lambda);
sM->EMISSION_MATCH_MATRIX[i + 3] = sqrt(pow(sM->EMISSION_MATCH_MATRIX[i + 2], 3.0)
/ sM->EMISSION_MATCH_MATRIX[i + 4]);
// Fluctuation (noise) adjustments
// noise_mean *= scale_sd
sM->EMISSION_GAP_Y_MATRIX[i + 2] = sM->EMISSION_GAP_Y_MATRIX[i + 2] * scale_sd;
// noise_lambda *= var_sd
sM->EMISSION_GAP_Y_MATRIX[i + 4] = sM->EMISSION_GAP_Y_MATRIX[i + 4] * var_sd;
// noise_sd = sqrt(adjusted_noise_mean**3 / adjusted_noise_lambda);
sM->EMISSION_GAP_Y_MATRIX[i + 3] = sqrt(pow(sM->EMISSION_GAP_Y_MATRIX[i + 2], 3.0)
/ sM->EMISSION_MATCH_MATRIX[i + 4]);
}
}
////////////////////////////
// EM emissions functions //
////////////////////////////
static void emissions_em_loadMatchProbs(double *emissionMatchProbs, Hmm *hmm, int64_t matchState) {
//Load the matches
HmmDiscrete *hmmD = (HmmDiscrete *)hmm;
for(int64_t x = 0; x < hmm->parameterSetSize; x++) {
for(int64_t y = 0; y < hmm->parameterSetSize; y++) {
emissionMatchProbs[x * hmm->parameterSetSize + y] = log(hmmD->getEmissionExpFcn(hmm, matchState, x, y));
}
}
}
static void emissions_em_loadMatchProbsSymmetrically(double *emissionMatchProbs, Hmm *hmm, int64_t matchState) {
//Load the matches
HmmDiscrete *hmmD = (HmmDiscrete *)hmm;
for(int64_t x = 0; x < hmm->parameterSetSize; x++) {
emissionMatchProbs[x * hmm->parameterSetSize + x] = log(hmmD->getEmissionExpFcn(hmm, matchState, x, x));
for(int64_t y=x+1; y<hmm->parameterSetSize; y++) {
double d = log((hmmD->getEmissionExpFcn(hmm, matchState, x, y) +
hmmD->getEmissionExpFcn(hmm, matchState, y, x)) / 2.0);
emissionMatchProbs[x * hmm->parameterSetSize + y] = d;
emissionMatchProbs[y * hmm->parameterSetSize + x] = d;
}
}
}
static void emissions_em_collapseMatrixEmissions(Hmm *hmm, int64_t state, double *gapEmissions, bool collapseToX) {
HmmDiscrete *hmmD = (HmmDiscrete *)hmm;
for(int64_t x=0; x<hmm->parameterSetSize; x++) {
for(int64_t y=0; y<hmm->parameterSetSize; y++) {
gapEmissions[collapseToX ? x : y] += hmmD->getEmissionExpFcn(hmm, state, x, y);
}
}
}
static void emissions_em_loadGapProbs(double *emissionGapProbs, Hmm *hmm,
int64_t *xGapStates, int64_t xGapStateNo,
int64_t *yGapStates, int64_t yGapStateNo) {
//Initialise to 0.0
for(int64_t i=0; i < hmm->parameterSetSize; i++) {
emissionGapProbs[i] = 0.0;
}
//Load the probs taking the average over all the gap states
for(int64_t i=0; i < xGapStateNo; i++) {
emissions_em_collapseMatrixEmissions(hmm, xGapStates[i], emissionGapProbs, 1);
}
for(int64_t i=0; i<yGapStateNo; i++) {
emissions_em_collapseMatrixEmissions(hmm, yGapStates[i], emissionGapProbs, 0);
}
//Now normalise
double total = 0.0;
for(int64_t i=0; i < hmm->parameterSetSize; i++) {
total += emissionGapProbs[i];
}
for(int64_t i=0; i< hmm->parameterSetSize; i++) {
emissionGapProbs[i] = log(emissionGapProbs[i]/total);
}
}
///////////////////////////////////
///////////////////////////////////
//Five state state-machine
///////////////////////////////////
///////////////////////////////////
/////////////////////////////////////////// STATIC FUNCTIONS ////////////////////////////////////////////////////////
static double stateMachine5_startStateProb(StateMachine *sM, int64_t state) {
//Match state is like going to a match.
state_check(sM, state);
return state == match ? 0 : LOG_ZERO;
}
static double stateMachine5_raggedStartStateProb(StateMachine *sM, int64_t state) {
state_check(sM, state);
return (state == longGapX || state == longGapY) ? 0 : LOG_ZERO;
}
static double stateMachine5_endStateProb(StateMachine *sM, int64_t state) {
//End state is like to going to a match
StateMachine5 *sM5 = (StateMachine5 *) sM;
state_check(sM, state);
switch (state) {
case match:
return sM5->TRANSITION_MATCH_CONTINUE;
case shortGapX:
return sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X;
case shortGapY:
return sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y;
case longGapX:
return sM5->TRANSITION_MATCH_FROM_LONG_GAP_X;
case longGapY:
return sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y;
}
return 0.0;
}
static double stateMachine5_raggedEndStateProb(StateMachine *sM, int64_t state) {
StateMachine5 *sM5 = (StateMachine5 *) sM;
state_check(sM, state);
switch (state) {
case match:
return sM5->TRANSITION_GAP_LONG_OPEN_X;
case shortGapX:
return sM5->TRANSITION_GAP_LONG_OPEN_X;
case shortGapY:
return sM5->TRANSITION_GAP_LONG_OPEN_Y;
case longGapX:
return sM5->TRANSITION_GAP_LONG_EXTEND_X;
case longGapY:
return sM5->TRANSITION_GAP_LONG_EXTEND_Y;
}
return 0.0;
}
static void stateMachine5_cellCalculate(StateMachine *sM,
void *current, void *lower, void *middle, void *upper,
void *cX, void *cY,
void (*doTransition)(double *, double *, // fromCells, toCells
int64_t, int64_t, // from, to
double, double, // emissionProb, transitionProb
void *), // extraArgs
void *extraArgs) {
st_errAbort("5-state stateMachine not implemented\n");
StateMachine5 *sM5 = (StateMachine5 *) sM;
if (lower != NULL) {
double eP = sM5->getXGapProbFcn(sM5->model.EMISSION_GAP_X_PROBS, cX);
doTransition(lower, current, match, shortGapX, eP, sM5->TRANSITION_GAP_SHORT_OPEN_X, extraArgs);
doTransition(lower, current, shortGapX, shortGapX, eP, sM5->TRANSITION_GAP_SHORT_EXTEND_X, extraArgs);
// how come these are commented out?
//doTransition(lower, current, shortGapY, shortGapX, eP, sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X, extraArgs);
doTransition(lower, current, match, longGapX, eP, sM5->TRANSITION_GAP_LONG_OPEN_X, extraArgs);
doTransition(lower, current, longGapX, longGapX, eP, sM5->TRANSITION_GAP_LONG_EXTEND_X, extraArgs);
//doTransition(lower, current, longGapY, longGapX, eP, sM5->TRANSITION_GAP_LONG_SWITCH_TO_X, extraArgs);
}
if (middle != NULL) {
double eP = sM5->getMatchProbFcn(sM5->model.EMISSION_MATCH_MATRIX, cX, cY);
doTransition(middle, current, match, match, eP, sM5->TRANSITION_MATCH_CONTINUE, extraArgs);
doTransition(middle, current, shortGapX, match, eP, sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X, extraArgs);
doTransition(middle, current, shortGapY, match, eP, sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y, extraArgs);
doTransition(middle, current, longGapX, match, eP, sM5->TRANSITION_MATCH_FROM_LONG_GAP_X, extraArgs);
doTransition(middle, current, longGapY, match, eP, sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y, extraArgs);
}
if (upper != NULL) {
double eP = sM5->getYGapProbFcn(sM5->model.EMISSION_GAP_Y_MATRIX, cY);
doTransition(upper, current, match, shortGapY, eP, sM5->TRANSITION_GAP_SHORT_OPEN_Y, extraArgs);
doTransition(upper, current, shortGapY, shortGapY, eP, sM5->TRANSITION_GAP_SHORT_EXTEND_Y, extraArgs);
//doTransition(upper, current, shortGapX, shortGapY, eP, sM5->TRANSITION_GAP_SHORT_SWITCH_TO_Y, extraArgs);
doTransition(upper, current, match, longGapY, eP, sM5->TRANSITION_GAP_LONG_OPEN_Y, extraArgs);
doTransition(upper, current, longGapY, longGapY, eP, sM5->TRANSITION_GAP_LONG_EXTEND_Y, extraArgs);
//doTransition(upper, current, longGapX, longGapY, eP, sM5->TRANSITION_GAP_LONG_SWITCH_TO_Y, extraArgs);
}
}
///////////////////////////////////////////// CORE FUNCTIONS ////////////////////////////////////////////////////////
StateMachine *stateMachine5_construct(StateMachineType type, int64_t parameterSetSize,
void (*setEmissionsDefaults)(StateMachine *sM),
double (*gapXProbFcn)(const double *, void *),
double (*gapYProbFcn)(const double *, void *),
double (*matchProbFcn)(const double *, void *, void *),
void (*cellCalcUpdateExpFcn)(double *fromCells, double *toCells,
int64_t from, int64_t to,
double eP, double tP, void *extraArgs)) {
/*
* Description of (potentially ambigious) arguments:
* parameterSetSize = the number of kmers that we are using, of len(kmer) = 1, then the number is 4 (or 5 if we're
* including N). It's 25 if len(kmer) = 2, it's 4096 in the 6-mer model.
*
*/
st_errAbort("5-state stateMachine not implemented");
StateMachine5 *sM5 = st_malloc(sizeof(StateMachine5));
if(type != fiveState && type != fiveStateAsymmetric) {
st_errAbort("Wrong type for five state %i", type);
}
// setup transitions, specific to stateMachine5
sM5->TRANSITION_MATCH_CONTINUE = -0.030064059121770816; //0.9703833696510062f
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X = -1.272871422049609; //1.0 - gapExtend - gapSwitch = 0.280026392297485
sM5->TRANSITION_MATCH_FROM_LONG_GAP_X = -5.673280173170473; //1.0 - gapExtend = 0.00343657420938
sM5->TRANSITION_GAP_SHORT_OPEN_X = -4.34381910900448; //0.0129868352330243
sM5->TRANSITION_GAP_SHORT_EXTEND_X = -0.3388262689231553; //0.7126062401851738f;
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X = -4.910694825551255; //0.0073673675173412815f;
sM5->TRANSITION_GAP_LONG_OPEN_X = -6.30810595366929; //(1.0 - match - 2*gapOpenShort)/2 = 0.001821479941473
sM5->TRANSITION_GAP_LONG_EXTEND_X = -0.003442492794189331; //0.99656342579062f;
sM5->TRANSITION_GAP_LONG_SWITCH_TO_X = -6.30810595366929; //0.99656342579062f;
// make it symmetric
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y = sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X;
sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y = sM5->TRANSITION_MATCH_FROM_LONG_GAP_X;
sM5->TRANSITION_GAP_SHORT_OPEN_Y = sM5->TRANSITION_GAP_SHORT_OPEN_X;
sM5->TRANSITION_GAP_SHORT_EXTEND_Y = sM5->TRANSITION_GAP_SHORT_EXTEND_X;
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_Y = sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X;
sM5->TRANSITION_GAP_LONG_OPEN_Y = sM5->TRANSITION_GAP_LONG_OPEN_X;
sM5->TRANSITION_GAP_LONG_EXTEND_Y = sM5->TRANSITION_GAP_LONG_EXTEND_X;
sM5->TRANSITION_GAP_LONG_SWITCH_TO_Y = sM5->TRANSITION_GAP_LONG_SWITCH_TO_X;
// setup the parent class
sM5->model.type = type;
sM5->model.parameterSetSize = parameterSetSize;
sM5->model.stateNumber = 5;
sM5->model.matchState = match;
sM5->model.startStateProb = stateMachine5_startStateProb;
sM5->model.endStateProb = stateMachine5_endStateProb;
sM5->model.raggedStartStateProb = stateMachine5_raggedStartStateProb;
sM5->model.raggedEndStateProb = stateMachine5_raggedEndStateProb;
sM5->model.cellCalculate = stateMachine5_cellCalculate;
sM5->model.cellCalculateUpdateExpectations = cellCalcUpdateExpFcn;
sM5->getXGapProbFcn = gapXProbFcn;
sM5->getYGapProbFcn = gapYProbFcn;
sM5->getMatchProbFcn = matchProbFcn;
// set emissions to defaults (or zeros)
setEmissionsDefaults((StateMachine *) sM5);
return (StateMachine *) sM5;
}
static void switchDoubles(double *a, double *b) {
double c = *a;
*a = *b;
*b = c;
}
///////////////////////////////
// EM - StateMachine5 functions/
///////////////////////////////
static void stateMachine5_loadAsymmetric(StateMachine5 *sM5, Hmm *hmm) {
if (hmm->type != fiveStateAsymmetric) {
st_errAbort("Wrong hmm type");
}
sM5->TRANSITION_MATCH_CONTINUE = log(hmm->getTransitionsExpFcn(hmm, match, match)); //0.9703833696510062f
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X = log(hmm->getTransitionsExpFcn(hmm, shortGapX, match));
sM5->TRANSITION_MATCH_FROM_LONG_GAP_X = log(hmm->getTransitionsExpFcn(hmm, longGapX, match));
sM5->TRANSITION_GAP_SHORT_OPEN_X = log(hmm->getTransitionsExpFcn(hmm, match, shortGapX));
sM5->TRANSITION_GAP_SHORT_EXTEND_X = log(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapX));
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X = log(hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapX));
sM5->TRANSITION_GAP_LONG_OPEN_X = log(hmm->getTransitionsExpFcn(hmm, match, longGapX));
sM5->TRANSITION_GAP_LONG_EXTEND_X = log(hmm->getTransitionsExpFcn(hmm, longGapX, longGapX));
sM5->TRANSITION_GAP_LONG_SWITCH_TO_X = log(hmm->getTransitionsExpFcn(hmm, longGapY, longGapX));
if(sM5->TRANSITION_GAP_SHORT_EXTEND_X > sM5->TRANSITION_GAP_LONG_EXTEND_X) {
// Switch the long and short gap parameters if one the "long states" have a smaller
// extend probability than the "short states", as can randomly happen during EM training.
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_EXTEND_X), &(sM5->TRANSITION_GAP_LONG_EXTEND_X));
switchDoubles(&(sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X), &(sM5->TRANSITION_MATCH_FROM_LONG_GAP_X));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_OPEN_X), &(sM5->TRANSITION_GAP_LONG_OPEN_X));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X), &(sM5->TRANSITION_GAP_LONG_SWITCH_TO_X));
}
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapY, match));
sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y = log(hmm->getTransitionsExpFcn(hmm, longGapY, match));
sM5->TRANSITION_GAP_SHORT_OPEN_Y = log(hmm->getTransitionsExpFcn(hmm, match, shortGapY));
sM5->TRANSITION_GAP_SHORT_EXTEND_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapY));
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapY));
sM5->TRANSITION_GAP_LONG_OPEN_Y = log(hmm->getTransitionsExpFcn(hmm, match, longGapY));
sM5->TRANSITION_GAP_LONG_EXTEND_Y = log(hmm->getTransitionsExpFcn(hmm, longGapY, longGapY));
sM5->TRANSITION_GAP_LONG_SWITCH_TO_Y = log(hmm->getTransitionsExpFcn(hmm, longGapX, longGapY));
if(sM5->TRANSITION_GAP_SHORT_EXTEND_Y > sM5->TRANSITION_GAP_LONG_EXTEND_Y) {
// Switch the long and short gap parameters if one the "long states" have a smaller
// extend probability than the "short states", as can randomly happen during EM training.
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_EXTEND_Y), &(sM5->TRANSITION_GAP_LONG_EXTEND_Y));
switchDoubles(&(sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y), &(sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_OPEN_Y), &(sM5->TRANSITION_GAP_LONG_OPEN_Y));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_SWITCH_TO_Y), &(sM5->TRANSITION_GAP_LONG_SWITCH_TO_Y));
}
emissions_em_loadMatchProbs(sM5->model.EMISSION_MATCH_MATRIX, hmm, match);
int64_t xGapStates[2] = { shortGapX, longGapX };
int64_t yGapStates[2] = { shortGapY, longGapY };
emissions_em_loadGapProbs(sM5->model.EMISSION_GAP_X_PROBS, hmm, xGapStates, 2, NULL, 0);
emissions_em_loadGapProbs(sM5->model.EMISSION_GAP_Y_MATRIX, hmm, NULL, 0, yGapStates, 2);
}
static void stateMachine5_loadSymmetric(StateMachine5 *sM5, Hmm *hmm) {
if (hmm->type != fiveState) {
printf("Wrong hmm type");
st_errAbort("Wrong hmm type");
}
sM5->TRANSITION_MATCH_CONTINUE = log(hmm->getTransitionsExpFcn(hmm, match, match));
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X = log(
(hmm->getTransitionsExpFcn(hmm, shortGapX, match) +
hmm->getTransitionsExpFcn(hmm, shortGapY, match)) / 2); //1.0 - gapExtend - gapSwitch = 0.280026392297485
sM5->TRANSITION_MATCH_FROM_LONG_GAP_X = log(
(hmm->getTransitionsExpFcn(hmm, longGapX, match) +
hmm->getTransitionsExpFcn(hmm, longGapY, match)) / 2); //1.0 - gapExtend = 0.00343657420938
sM5->TRANSITION_GAP_SHORT_OPEN_X = log(
(hmm->getTransitionsExpFcn(hmm, match, shortGapX) +
hmm->getTransitionsExpFcn(hmm, match, shortGapY)) / 2); //0.0129868352330243
sM5->TRANSITION_GAP_SHORT_EXTEND_X = log(
(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapX) +
hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapY)) / 2); //0.7126062401851738f;
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X = log(
(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapY) +
hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapX)) / 2); //0.0073673675173412815f;
sM5->TRANSITION_GAP_LONG_OPEN_X = log(
(hmm->getTransitionsExpFcn(hmm, match, longGapX) +
hmm->getTransitionsExpFcn(hmm, match, longGapY)) / 2); //(1.0 - match - 2*gapOpenShort)/2 = 0.001821479941473
sM5->TRANSITION_GAP_LONG_EXTEND_X = log(
(hmm->getTransitionsExpFcn(hmm, longGapX, longGapX) +
hmm->getTransitionsExpFcn(hmm, longGapY, longGapY)) / 2);
sM5->TRANSITION_GAP_LONG_SWITCH_TO_X = log(
(hmm->getTransitionsExpFcn(hmm, longGapX, longGapY) +
hmm->getTransitionsExpFcn(hmm, longGapY, longGapX)) / 2); //0.0073673675173412815f;
if(sM5->TRANSITION_GAP_SHORT_EXTEND_X > sM5->TRANSITION_GAP_LONG_EXTEND_X) {
//Switch the long and short gap parameters if one the "long states" have a smaller extend probability than the "short states", as can randomly happen during EM training.
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_EXTEND_X), &(sM5->TRANSITION_GAP_LONG_EXTEND_X));
switchDoubles(&(sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X), &(sM5->TRANSITION_MATCH_FROM_LONG_GAP_X));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_OPEN_X), &(sM5->TRANSITION_GAP_LONG_OPEN_X));
switchDoubles(&(sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X), &(sM5->TRANSITION_GAP_LONG_SWITCH_TO_X));
}
sM5->TRANSITION_MATCH_FROM_SHORT_GAP_Y = sM5->TRANSITION_MATCH_FROM_SHORT_GAP_X;
sM5->TRANSITION_MATCH_FROM_LONG_GAP_Y = sM5->TRANSITION_MATCH_FROM_LONG_GAP_X;
sM5->TRANSITION_GAP_SHORT_OPEN_Y = sM5->TRANSITION_GAP_SHORT_OPEN_X;
sM5->TRANSITION_GAP_SHORT_EXTEND_Y = sM5->TRANSITION_GAP_SHORT_EXTEND_X;
sM5->TRANSITION_GAP_SHORT_SWITCH_TO_Y = sM5->TRANSITION_GAP_SHORT_SWITCH_TO_X;
sM5->TRANSITION_GAP_LONG_OPEN_Y = sM5->TRANSITION_GAP_LONG_OPEN_X;
sM5->TRANSITION_GAP_LONG_EXTEND_Y = sM5->TRANSITION_GAP_LONG_EXTEND_X;
sM5->TRANSITION_GAP_LONG_SWITCH_TO_Y = sM5->TRANSITION_GAP_LONG_SWITCH_TO_X;
emissions_em_loadMatchProbsSymmetrically(sM5->model.EMISSION_MATCH_MATRIX, hmm, match);
int64_t xGapStates[2] = { shortGapX, longGapX };
int64_t yGapStates[2] = { shortGapY, longGapY };
emissions_em_loadGapProbs(sM5->model.EMISSION_GAP_X_PROBS, hmm, xGapStates, 2, yGapStates, 2);
emissions_em_loadGapProbs(sM5->model.EMISSION_GAP_Y_MATRIX, hmm, xGapStates, 2, yGapStates, 2);
}
//////////////////////////////////////////////////////////////////////
//Three state state-machine [StateMachine3 and StateMachine3Vanilla]
//////////////////////////////////////////////////////////////////////
/////////////////////////////////////////// STATIC FUNCTIONS ////////////////////////////////////////////////////////
// Transitions //
static inline bool stateMachine3_checkStateNumber(int64_t stateNumber, StateMachineType type) {
if (((type == threeState) || (type == threeStateHdp)) && (stateNumber == 3)) {
return TRUE;
}
if ((type == fiveState) && (stateNumber == 5)) {
return TRUE;
}
return FALSE;
}
static inline double stateMachine3_startStateProb(StateMachine *sM, int64_t state) {
//Match state is like going to a match.
state_check(sM, state);
return state == match ? 0 : LOG_ZERO;
}
static inline double stateMachine3_raggedStartStateProb(StateMachine *sM, int64_t state) {
state_check(sM, state);
return (state == shortGapX || state == shortGapY) ? 0 : LOG_ZERO;
}
static inline double stateMachine3_endStateProb(StateMachine *sM, int64_t state) {
//End state is like to going to a match
StateMachine3 *sM3 = (StateMachine3 *) sM;
state_check(sM, state);
switch (state) {
case match:
return sM3->TRANSITION_MATCH_CONTINUE;
case shortGapX:
return sM3->TRANSITION_MATCH_FROM_GAP_X;
case shortGapY:
return sM3->TRANSITION_MATCH_FROM_GAP_Y;
}
return 0.0;
}
static inline double stateMachine3_raggedEndStateProb(StateMachine *sM, int64_t state) {
//End state is like to going to a match
StateMachine3 *sM3 = (StateMachine3 *) sM;
state_check(sM, state);
switch (state) {
case match:
return (sM3->TRANSITION_GAP_OPEN_X + sM3->TRANSITION_GAP_OPEN_Y) / 2.0;
case shortGapX:
return sM3->TRANSITION_GAP_EXTEND_X;
case shortGapY:
return sM3->TRANSITION_GAP_EXTEND_Y;
}
return 0.0;
}
void stateMachine3_setTransitionsToNucleotideDefaults(StateMachine *sM) {
StateMachine3 *sM3 = (StateMachine3 *) sM;
sM3->TRANSITION_MATCH_CONTINUE = -0.030064059121770816; //0.9703833696510062f
sM3->TRANSITION_MATCH_FROM_GAP_X = -1.272871422049609; //1.0 - gapExtend - gapSwitch = 0.280026392297485
sM3->TRANSITION_MATCH_FROM_GAP_Y = -1.272871422049609; //1.0 - gapExtend - gapSwitch = 0.280026392297485
sM3->TRANSITION_GAP_OPEN_X = -4.21256642; //0.0129868352330243
sM3->TRANSITION_GAP_OPEN_Y = -4.21256642; //0.0129868352330243
sM3->TRANSITION_GAP_EXTEND_X = -0.3388262689231553; //0.7126062401851738f;
sM3->TRANSITION_GAP_EXTEND_Y = -0.3388262689231553; //0.7126062401851738f;
sM3->TRANSITION_GAP_SWITCH_TO_X = -4.910694825551255; //0.0073673675173412815f;
sM3->TRANSITION_GAP_SWITCH_TO_Y = -4.910694825551255; //0.0073673675173412815f;
}
void stateMachine3_setTransitionsToNanoporeDefaults(StateMachine *sM) {
StateMachine3 *sM3 = (StateMachine3 *) sM;
sM3->TRANSITION_MATCH_CONTINUE = -0.23552123624314988; // log(step_prob) (0.79015888282447311)
sM3->TRANSITION_MATCH_FROM_GAP_X = -0.21880828092192281; // log(1 - skip_prob) (1 - 0.19652425498269727)
sM3->TRANSITION_MATCH_FROM_GAP_Y = -0.013406326748077823; // log(1 - stay_prob) (0.98668313780708949)
sM3->TRANSITION_GAP_OPEN_X = -1.6269694202638481; // log(skip_prob) (0.19652425498269727)
sM3->TRANSITION_GAP_OPEN_Y = -4.3187242127300092; // log(1 - (skip_prob + step_prob)) (0.013316862192829682)
sM3->TRANSITION_GAP_EXTEND_X = -1.6269694202638481; // log(skip_prob) (0.19652425498269727)
sM3->TRANSITION_GAP_EXTEND_Y = -4.3187242127239411; // log(stay_prob) 0.013316862192910478
sM3->TRANSITION_GAP_SWITCH_TO_X = LOG_ZERO;
sM3->TRANSITION_GAP_SWITCH_TO_Y = LOG_ZERO;
}
void stateMachine3_loadTransitionsFromFile(StateMachine *sM, stList *transitions) {
StateMachine3 *self = (StateMachine3 *)sM;
int64_t j;
double transition;
// match->match
j = sscanf(stList_get(transitions, 0), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing match->match transition\n");
}
self->TRANSITION_MATCH_CONTINUE = log(transition);
// match->gapX
j = sscanf(stList_get(transitions, 1), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing match->gapX transition\n");
}
self->TRANSITION_GAP_OPEN_X = log(transition);
// match->gapY
j = sscanf(stList_get(transitions, 2), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing match->gapY transition\n");
}
self->TRANSITION_GAP_OPEN_Y = log(transition);
// gapX->match
j = sscanf(stList_get(transitions, 3), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing gapX->match transition\n");
}
self->TRANSITION_MATCH_FROM_GAP_X = log(transition);
// gapX->gapX
j = sscanf(stList_get(transitions, 4), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing gapX->gapX transition\n");
}
self->TRANSITION_GAP_EXTEND_X = log(transition);
// gapX->gapY skip, set to log zero
// gapY->match
j = sscanf(stList_get(transitions, 6), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing gapY->match transition\n");
}
self->TRANSITION_MATCH_FROM_GAP_Y = log(transition);
// gapY->gapX
j = sscanf(stList_get(transitions, 7), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing gapY->gapX transition\n");
}
self->TRANSITION_GAP_SWITCH_TO_Y = log(transition);
// gapY->gapY
j = sscanf(stList_get(transitions, 8), "%lf", &transition);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing gapY->gapY transition\n");
}
self->TRANSITION_GAP_EXTEND_Y = log(transition);
}
/* Temp change for r9 experiment
void stateMachine3_setTransitionsToNanoporeDefaults(StateMachine *sM) {
StateMachine3 *sM3 = (StateMachine3 *) sM;
sM3->TRANSITION_MATCH_CONTINUE = -0.43078291609245423; // log(step_prob) (0.65)
sM3->TRANSITION_MATCH_FROM_GAP_X = -0.41551544396166595; // log(1 - skip_prob) (1 - 0.34)
sM3->TRANSITION_MATCH_FROM_GAP_Y = -0.010050335853501451; // log(1 - stay_prob) (1 - 0.01)
sM3->TRANSITION_GAP_OPEN_X = -1.0788096613719298; // log(skip_prob) (0.34)
sM3->TRANSITION_GAP_OPEN_Y = -4.6051701859880909; // log(1 - (skip_prob + step_prob)) (1 - (0.34 + 0.65))
sM3->TRANSITION_GAP_EXTEND_X = -1.0788096613719298; // log(skip_prob) (0.34)
sM3->TRANSITION_GAP_EXTEND_Y = -4.6051701859880909; // log(stay_prob) 0.013316862192910478
sM3->TRANSITION_GAP_SWITCH_TO_X = LOG_ZERO;
sM3->TRANSITION_GAP_SWITCH_TO_Y = LOG_ZERO;
}
*/
void stateMachine3_setModelToHdpExpectedValues(StateMachine *sM, NanoporeHDP *nhdp) {
// get all the kmers that the HDP has data for
if (strcmp(sM->alphabet, nhdp->alphabet) != 0) {
st_errAbort("stateMachine3_setModelToHdpExpectedValues: "
"This Nanopore Hdp and stateMachine have different alphabets\n");
}
if (sM->alphabetSize != nhdp->alphabet_size) {
st_errAbort("stateMachine3_setModelToHdpExpectedValues: StateMachine alphabet size and Nanopore HDP"
"alphabet size aren't the same");
}
if (sM->kmerLength != nhdp->kmer_length) {
st_errAbort("stateMachine3_setModelToHdpExpectedValues: StateMachine kmer length is not the same as "
"NanoporeHdp kmer length");
}
stList *kmers = path_listPotentialKmers(nhdp->kmer_length, nhdp->alphabet_size, nhdp->alphabet);
for (int64_t i = 0; i < stList_length(kmers); i++) {
char *kmer = (char *)stList_get(kmers, i);
int64_t kmerIndex = kmer_id(kmer, sM->alphabet, sM->alphabetSize, sM->kmerLength);
int64_t meanIndex = kmerIndex * MODEL_PARAMS;
int64_t sdIndex = kmerIndex * MODEL_PARAMS + 1;
bool isObs = hdp_check_for_observed(nhdp->hdp, kmerIndex);
if (isObs) {
double newMean = dir_proc_expected_val(nhdp->hdp, kmerIndex);
double newSd = sqrt(dir_proc_variance(nhdp->hdp, kmerIndex));
sM->EMISSION_MATCH_MATRIX[meanIndex] = newMean;
sM->EMISSION_MATCH_MATRIX[sdIndex] = newSd;
}
}
}
static void stateMachine3_cellCalculate(StateMachine *sM,
void *current, void *lower, void *middle, void *upper,
void *cX, void *cY,
void (*doTransition)(double *, double *,
int64_t, int64_t,
double, double,
void *),
void *extraArgs) {
StateMachine3 *sM3 = (StateMachine3 *)sM;
HDCell *hdCurrent = current == NULL ? NULL : (HDCell *)current;
HDCell *hdLower = lower == NULL ? NULL : (HDCell *)lower;
HDCell *hdMiddle = middle == NULL ? NULL : (HDCell *)middle;
HDCell *hdUpper = upper == NULL ? NULL : (HDCell *)upper;
if (hdLower != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdLower->numberOfPaths; q++) {
Path *pathL = hdCell_getPath(hdLower, q);
if (path_checkLegal(pathL, pathC)) {
double *lowerCells = path_getCell(pathL);
double *currentCells = path_getCell(pathC);
double eP = sM3->getXGapProbFcn(sM, sM3->model.EMISSION_GAP_X_PROBS, pathC->kmer);
doTransition(lowerCells, currentCells, match, shortGapX, eP, sM3->TRANSITION_GAP_OPEN_X, extraArgs);
doTransition(lowerCells, currentCells, shortGapX, shortGapX, eP, sM3->TRANSITION_GAP_EXTEND_X, extraArgs);
doTransition(lowerCells, currentCells, shortGapY, shortGapX, eP, sM3->TRANSITION_GAP_SWITCH_TO_X, extraArgs);
}
}
}
}
if (hdMiddle != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdMiddle->numberOfPaths; q++) {
Path *pathM = hdCell_getPath(hdMiddle, q);
if (path_checkLegal(pathM, pathC)) {
double *middleCells = path_getCell(pathM);
double *currentCells = path_getCell(pathC);
double eP = sM3->getMatchProbFcn(sM, pathC->kmer, cY, TRUE);
doTransition(middleCells, currentCells, match, match, eP, sM3->TRANSITION_MATCH_CONTINUE, extraArgs);
doTransition(middleCells, currentCells, shortGapX, match, eP, sM3->TRANSITION_MATCH_FROM_GAP_X, extraArgs);
doTransition(middleCells, currentCells, shortGapY, match, eP, sM3->TRANSITION_MATCH_FROM_GAP_Y, extraArgs);
}
}
}
}
if (hdUpper != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdUpper->numberOfPaths; q++) {
Path *pathU = hdCell_getPath(hdUpper, q);
if (stString_eq(pathC->kmer, pathU->kmer)) {
double *upperCells = path_getCell(pathU);
double *currentCells = path_getCell(pathC);
double eP = sM3->getYGapProbFcn(sM, pathC->kmer, cY, FALSE);
doTransition(upperCells, currentCells, match, shortGapY, eP, sM3->TRANSITION_GAP_OPEN_Y, extraArgs);
doTransition(upperCells, currentCells, shortGapY, shortGapY, eP, sM3->TRANSITION_GAP_EXTEND_Y, extraArgs);
// shortGapX -> shortGapY not allowed, this would be going from a kmer skip to extra event?
//doTransition(upper, current, shortGapX, shortGapY, eP, sM3->TRANSITION_GAP_SWITCH_TO_Y, extraArgs);
}
}
}
}
}
static void stateMachine3HDP_cellCalculate(StateMachine *sM,
void *current, void *lower, void *middle, void *upper,
void *cX, void *cY,
void (*doTransition)(double *, double *,
int64_t, int64_t,
double, double,
void *),
void *extraArgs) {
StateMachine3_HDP *sM3 = (StateMachine3_HDP *) sM;
HDCell *hdCurrent = current == NULL ? NULL : (HDCell *)current;
HDCell *hdLower = lower == NULL ? NULL : (HDCell *)lower;
HDCell *hdMiddle = middle == NULL ? NULL : (HDCell *)middle;
HDCell *hdUpper = upper == NULL ? NULL : (HDCell *)upper;
if (hdLower != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p ++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdLower->numberOfPaths; q++) {
Path *pathL = hdCell_getPath(hdLower, q);
if (path_checkLegal(pathL, pathC)) {
//st_uglyf("SENTINAL - legal LOWER : pathC kmer %s\n", pathC->kmer);
double *lowerCells = path_getCell(pathL);
double *currentCells = path_getCell(pathC);
double eP = -2.3025850929940455; // log(0.1)
doTransition(lowerCells, currentCells, match, shortGapX, eP, sM3->TRANSITION_GAP_OPEN_X, extraArgs);
doTransition(lowerCells, currentCells, shortGapX, shortGapX, eP, sM3->TRANSITION_GAP_EXTEND_X, extraArgs);
doTransition(lowerCells, currentCells, shortGapY, shortGapX, eP, sM3->TRANSITION_GAP_SWITCH_TO_X, extraArgs);
}
}
}
}
if (hdMiddle != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdMiddle->numberOfPaths; q++) {
Path *pathM = hdCell_getPath(hdMiddle, q);
if (path_checkLegal(pathM, pathC)) {
//st_uglyf("SENTINAL - legal MIDDLE : pathC kmer %s\n", pathC->kmer);
double *middleCells = path_getCell(pathM);
double *curentCells = path_getCell(pathC);
double eP = sM3->getMatchProbFcn(sM, pathC->kmer, cY, TRUE);
doTransition(middleCells, curentCells, match, match, eP, sM3->TRANSITION_MATCH_CONTINUE, extraArgs);
doTransition(middleCells, curentCells, shortGapX, match, eP, sM3->TRANSITION_MATCH_FROM_GAP_X, extraArgs);
doTransition(middleCells, curentCells, shortGapY, match, eP, sM3->TRANSITION_MATCH_FROM_GAP_Y, extraArgs);
}
}
}
}
if (hdUpper != NULL) {
for (int64_t p = 0; p < hdCurrent->numberOfPaths; p++) {
Path *pathC = hdCell_getPath(hdCurrent, p);
for (int64_t q = 0; q < hdUpper->numberOfPaths; q++) {
Path *pathU = hdCell_getPath(hdUpper, q);
if (stString_eq(pathC->kmer, pathU->kmer)) {
//st_uglyf("SENTINAL - legal UPPER : pathC kmer %s\n", pathC->kmer);
double *upperCells = path_getCell(pathU);
double *currentCells = path_getCell(pathC);
double eP = sM3->getMatchProbFcn(sM, pathC->kmer, cY, FALSE);
doTransition(upperCells, currentCells, match, shortGapY, eP, sM3->TRANSITION_GAP_OPEN_Y, extraArgs);
doTransition(upperCells, currentCells, shortGapY, shortGapY, eP, sM3->TRANSITION_GAP_EXTEND_Y, extraArgs);
// shortGapX -> shortGapY not allowed, this would be going from a kmer skip to extra event?
//doTransition(upper, current, shortGapX, shortGapY, eP, sM3->TRANSITION_GAP_SWITCH_TO_Y, extraArgs);
}
}
}
}
}
///////////////////////////////////////////// CORE FUNCTIONS ////////////////////////////////////////////////////////
StateMachine *stateMachine3_loadFromFile(const char *modelFile, StateMachineType type,
double (*gapXProbFcn)(StateMachine *, const double *, void *),
double (*matchProbFcn)(StateMachine *, void *, void *, bool ),
void (*loadTransitionsFcn)(StateMachine *, stList *),
NanoporeHDP *nHdp) {
/*
* the model file has the format:
* line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
* line 1: match->match \t match->gapX \t match->gapY \t
* gapX->match \t gapX->gapX \t gapX->gapY \t
* gapY->match \t gapY->gapX \t gapY->gapY \n
* line 1: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
*/
if (!stFile_exists(modelFile)) {
st_errAbort("stateMachine3_loadFromFile: Couldn't find .model file here %s\n", modelFile);
}
FILE *fH = fopen(modelFile, "r");
// Line 0: parse the stateMachine stateNumber, alphabet, alphabet length, and kmer size parameteres
char *string = stFile_getLineFromFile(fH);
stList *tokens = stString_split(string);
if (stList_length(tokens) != 4) {
st_errAbort("stateMachine3_loadFromFile: Model file %s does not have the correct number of stateMachine "
"parameters should be 4 got %"PRId64"\n", modelFile, stList_length(tokens));
}
char *alphabet;
int64_t stateNumber, alphabetSize, kmerLength, j, h;
j = sscanf(stList_get(tokens, 0), "%"SCNd64, &stateNumber);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing alphabet size\n");
}
j = sscanf(stList_get(tokens, 1), "%"SCNd64, &alphabetSize);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing alphabet size\n");
}
alphabet = (char *)stList_get(tokens, 2);
j = sscanf(stList_get(tokens, 3), "%"SCNd64, &kmerLength);
if (j != 1) {
st_errAbort("stateMachine3_loadFromFile: error parsing kmer length\n");
}
// check for correct number of states for given stateMachineType
if (!stateMachine3_checkStateNumber(stateNumber, type)) {
st_errAbort("stateMachine3_loadFromFile: Got invalid stateNumber for this stateMachine. Got stateNumber %s"
" and StateMachineType %i\n", stateNumber, type);
};
StateMachine *sM = stateMachine3_signalMachineBuilder(type, alphabet, alphabetSize, kmerLength, gapXProbFcn,
matchProbFcn, nHdp);
free(string);
stList_destruct(tokens);
// line 1: Transitions line
string = stFile_getLineFromFile(fH);
tokens = stString_split(string);
if (stList_length(tokens) != (sM->stateNumber * sM->stateNumber + 1)) {
st_errAbort("stateMachine3_loadFromFile: Got invalid number of tokens on transitions line, got %lld should be"
" %lld", stList_length(tokens), (sM->stateNumber * sM->stateNumber + 1));
}
// load the transitions
loadTransitionsFcn(sM, tokens);
free(string);
stList_destruct(tokens);
// Line 2: parse the match emissions line
string = stFile_getLineFromFile(fH);
tokens = stString_split(string);
// check to make sure that the model will fit in the stateMachine
if (stList_length(tokens) != sM->parameterSetSize * MODEL_PARAMS) {
st_errAbort("This stateMachine is not correct for signal model (match emissions) got %lld, should be %lld\n",
stList_length(tokens), sM->parameterSetSize * MODEL_PARAMS);
}
// load the model into the state machine emissions
for (int64_t i = 0; i < sM->parameterSetSize * MODEL_PARAMS; i++) {
j = sscanf(stList_get(tokens, i), "%lf", &(sM->EMISSION_MATCH_MATRIX[i]));
h = sscanf(stList_get(tokens, i), "%lf", &(sM->EMISSION_GAP_Y_MATRIX[i]));
if ((j != 1) || (h != 1)) {
st_errAbort("emissions_signal_loadPoreModel: error loading pore model (match emissions)\n");
}
}
// clean up match emissions line
free(string);
stList_destruct(tokens);
// increase the level_sd for the GapY state by 75% see Simpson et al.
// start at 1 bc. the level_sd is the second param in MODEL PRAMS
for (int64_t i = 1; i < (sM->parameterSetSize * MODEL_PARAMS); i += MODEL_PARAMS) {
sM->EMISSION_GAP_Y_MATRIX[i] *= EXTRA_EVENT_NOISE_MULTIPLIER;
}
// close file
fclose(fH);
return sM;
}
StateMachine *stateMachine3_construct(StateMachineType type,
const char *alphabet, int64_t alphabetSize, int64_t kmerLength,
void (*setTransitionsToDefaults)(StateMachine *),
void (*setEmissionsDefaults)(StateMachine *, int64_t),
double (*gapXProbFcn)(StateMachine *, const double *, void *),
double (*gapYProbFcn)(StateMachine *, void *, void *, bool ),
double (*matchProbFcn)(StateMachine *, void *, void *, bool ),
void (*cellCalcUpdateExpFcn)(double *fromCells, double *toCells,
int64_t from, int64_t to,
double eP, double tP, void *extraArgs)) {
StateMachine3 *sM3 = st_malloc(sizeof(StateMachine3));
if (type != threeState && type != threeStateAsymmetric) {
st_errAbort("Tried to create a three state state-machine with the wrong type");
}
// setup the parent class
sM3->model.type = type;
sM3->model.parameterSetSize = intPow(alphabetSize, kmerLength);
sM3->model.alphabetSize = alphabetSize;
sM3->model.alphabet = sequence_prepareAlphabet(alphabet, alphabetSize);
sM3->model.kmerLength = kmerLength;
sM3->model.stateNumber = 3;
sM3->model.matchState = match;
sM3->model.startStateProb = stateMachine3_startStateProb;
sM3->model.endStateProb = stateMachine3_endStateProb;
sM3->model.raggedStartStateProb = stateMachine3_raggedStartStateProb;
sM3->model.raggedEndStateProb = stateMachine3_raggedEndStateProb;
sM3->model.cellCalculate = stateMachine3_cellCalculate;
sM3->model.cellCalculateUpdateExpectations = cellCalcUpdateExpFcn;
// setup functions
sM3->getXGapProbFcn = gapXProbFcn;
sM3->getYGapProbFcn = gapYProbFcn;
sM3->getMatchProbFcn = matchProbFcn;
// setup transitions
setTransitionsToDefaults((StateMachine *) sM3);
// set emissions to defaults or zeros
setEmissionsDefaults((StateMachine *) sM3, sM3->model.parameterSetSize); // mallocs are in here
// set gap probs
for (int64_t i = 0; i < sM3->model.parameterSetSize; i++) {
sM3->model.EMISSION_GAP_X_PROBS[i] = -2.3025850929940455; // log(0.1)
}
return (StateMachine *) sM3;
}
StateMachine *stateMachine3Hdp_construct(StateMachineType type,
const char *alphabet, int64_t alphabetSize, int64_t kmerLength,
void (*setTransitionsToDefaults)(StateMachine *),
void (*setEmissionsDefaults)(StateMachine *, int64_t),
NanoporeHDP *hdpModel,
double (*matchProbFcn)(StateMachine *, void *, void *, bool),
void (*cellCalcUpdateExpFcn)(double *, double *, int64_t, int64_t,
double , double , void *)) {
StateMachine3_HDP *sM3 = st_malloc(sizeof(StateMachine3_HDP));
if (type != threeStateHdp) {
st_errAbort("Tried to create a three state state-machine with the wrong type");
}
// setup the parent class
sM3->model.type = type;
//sM3->model.parameterSetSize = parameterSetSize;
sM3->model.parameterSetSize = intPow(alphabetSize, kmerLength);
sM3->model.alphabetSize = alphabetSize;
sM3->model.alphabet = sequence_prepareAlphabet(alphabet, alphabetSize);
sM3->model.kmerLength = kmerLength;
sM3->model.stateNumber = 3;
sM3->model.matchState = match;
sM3->model.startStateProb = stateMachine3_startStateProb;
sM3->model.endStateProb = stateMachine3_endStateProb;
sM3->model.raggedStartStateProb = stateMachine3_raggedStartStateProb;
sM3->model.raggedEndStateProb = stateMachine3_raggedEndStateProb;
sM3->model.cellCalculate = stateMachine3HDP_cellCalculate;
sM3->model.cellCalculateUpdateExpectations = cellCalcUpdateExpFcn;
// setup functions
sM3->getMatchProbFcn = matchProbFcn;
// setup HDP
sM3->hdpModel = hdpModel;
// setup transitions
setTransitionsToDefaults((StateMachine *) sM3);
// set emissions to defaults or zeros
setEmissionsDefaults((StateMachine *) sM3, sM3->model.parameterSetSize);
// initialize kmer emission gap probs
for (int64_t i = 0; i < sM3->model.parameterSetSize; i++) {
sM3->model.EMISSION_GAP_X_PROBS[i] = -2.3025850929940455; // log(0.1)
}
return (StateMachine *) sM3;
}
StateMachine *stateMachine3_signalMachineBuilder(StateMachineType type, char *alphabet, int64_t alphabetSize,
int64_t kmerLength,
double (*gapXProbFcn)(StateMachine *, const double *, void *),
double (*matchProbFcn)(StateMachine *, void *, void *, bool),
NanoporeHDP *nHdp) {
StateMachine *sM;
switch (type) {
case threeStateHdp:
sM = stateMachine3Hdp_construct(threeStateHdp, alphabet, alphabetSize, kmerLength,
stateMachine3_setTransitionsToNanoporeDefaults,
emissions_signal_initEmissionsToZero,
nHdp, matchProbFcn, cell_signal_updateExpectationsAndAssignments);
return sM;
default:
sM = stateMachine3_construct(threeState, alphabet, alphabetSize, kmerLength,
stateMachine3_setTransitionsToNanoporeDefaults,
emissions_signal_initEmissionsToZero,
gapXProbFcn, matchProbFcn, matchProbFcn,
cell_signal_updateExpectations);
return sM;
}
}
/*
static void stateMachine3_loadAsymmetric(StateMachine3 *sM3, Hmm *hmm) {
if (hmm->type != threeStateAsymmetric) {
st_errAbort("Wrong hmm type");
}
//sM3->TRANSITION_MATCH_CONTINUE = log(hmm_getTransition(hmm, match, match));
sM3->TRANSITION_MATCH_CONTINUE = log(hmm->getTransitionsExpFcn(hmm, match, match));
sM3->TRANSITION_MATCH_FROM_GAP_X = log(hmm->getTransitionsExpFcn(hmm, shortGapX, match));
sM3->TRANSITION_MATCH_FROM_GAP_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapY, match));
sM3->TRANSITION_GAP_OPEN_X = log(hmm->getTransitionsExpFcn(hmm, match, shortGapX));
sM3->TRANSITION_GAP_OPEN_Y = log(hmm->getTransitionsExpFcn(hmm, match, shortGapY));
sM3->TRANSITION_GAP_EXTEND_X = log(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapX));
sM3->TRANSITION_GAP_EXTEND_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapY));
sM3->TRANSITION_GAP_SWITCH_TO_X = log(hmm->getTransitionsExpFcn(hmm, shortGapY, shortGapX));
sM3->TRANSITION_GAP_SWITCH_TO_Y = log(hmm->getTransitionsExpFcn(hmm, shortGapX, shortGapY));
emissions_em_loadMatchProbs(sM3->EMISSION_MATCH_MATRIX, hmm, match);
int64_t xGapStates[1] = { shortGapX };
int64_t yGapStates[1] = { shortGapY };
emissions_loadGapProbs(sM3->EMISSION_GAP_X_PROBS, hmm, xGapStates, 1, NULL, 0);
emissions_loadGapProbs(sM3->EMISSION_GAP_Y_MATRIX, hmm, NULL, 0, yGapStates, 1);
}
static void stateMachine3_loadSymmetric(StateMachine3 *sM3, Hmm *hmm) {
if (hmm->type != threeState) {
st_errAbort("Wrong hmm type");
}
sM3->TRANSITION_MATCH_CONTINUE = log(hmm_getTransition(hmm, match, match));
sM3->TRANSITION_MATCH_FROM_GAP_X = log(
(hmm_getTransition(hmm, shortGapX, match) + hmm_getTransition(hmm, shortGapY, match)) / 2.0);
sM3->TRANSITION_MATCH_FROM_GAP_Y = sM3->TRANSITION_MATCH_FROM_GAP_X;
sM3->TRANSITION_GAP_OPEN_X = log(
(hmm_getTransition(hmm, match, shortGapX) + hmm_getTransition(hmm, match, shortGapY)) / 2.0);
sM3->TRANSITION_GAP_OPEN_Y = sM3->TRANSITION_GAP_OPEN_X;
sM3->TRANSITION_GAP_EXTEND_X = log(
(hmm_getTransition(hmm, shortGapX, shortGapX) + hmm_getTransition(hmm, shortGapY, shortGapY)) / 2.0);
sM3->TRANSITION_GAP_EXTEND_Y = sM3->TRANSITION_GAP_EXTEND_X;
sM3->TRANSITION_GAP_SWITCH_TO_X = log(
(hmm_getTransition(hmm, shortGapY, shortGapX) + hmm_getTransition(hmm, shortGapX, shortGapY)) / 2.0);
sM3->TRANSITION_GAP_SWITCH_TO_Y = sM3->TRANSITION_GAP_SWITCH_TO_X;
emissions_em_loadMatchProbsSymmetrically(sM3->EMISSION_MATCH_MATRIX, hmm, match);
int64_t xGapStates[2] = { shortGapX };
int64_t yGapStates[2] = { shortGapY };
emissions_loadGapProbs(sM3->EMISSION_GAP_X_PROBS, hmm, xGapStates, 1, yGapStates, 1);
emissions_em_loadGapProbs(sM3->EMISSION_GAP_Y_MATRIX, hmm, xGapStates, 1, yGapStates, 1);
}
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Public functions
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
StateMachine *getStateMachine5(Hmm *hmmD, StateMachineFunctions *sMfs) {
st_errAbort("5-state stateMachine not implemented\n");
if (hmmD->type == fiveState) {
StateMachine5 *sM5 = (StateMachine5 *) stateMachine5_construct(fiveState, hmmD->parameterSetSize,
emissions_discrete_initEmissionsToZero,
sMfs->gapXProbFcn,
sMfs->gapYProbFcn,
sMfs->matchProbFcn,
cell_updateExpectations);
stateMachine5_loadSymmetric(sM5, hmmD);
return (StateMachine *) sM5;
}
if (hmmD->type == fiveStateAsymmetric) {
StateMachine5 *sM5 = (StateMachine5 *) stateMachine5_construct(fiveState, hmmD->parameterSetSize,
emissions_discrete_initEmissionsToZero,
sMfs->gapXProbFcn,
sMfs->gapYProbFcn,
sMfs->matchProbFcn,
cell_updateExpectations);
stateMachine5_loadAsymmetric(sM5, hmmD);
return (StateMachine *) sM5;
}
else {
return 0;
}
}
StateMachine *getStateMachine3_descaled(const char *modelFile, NanoporeReadAdjustmentParameters npp, bool scaleNoise) {
if (!stFile_exists(modelFile)) {
st_errAbort("getStateMachine3_descaled: Cannot find model file %s\n", modelFile);
};
StateMachine *sM = stateMachine3_loadFromFile(modelFile, threeState, emissions_kmer_getGapProb,
emissions_signal_strawManGetKmerEventMatchProbWithDescaling,
stateMachine3_loadTransitionsFromFile, NULL);
sM->scale = npp.scale;
sM->shift = npp.shift;
sM->var = npp.var;
if (scaleNoise) {
emissions_signal_scaleNoise(sM, npp);
}
return sM;
}
StateMachine *getStateMachine3(const char *modelFile) {
if (!stFile_exists(modelFile)) {
st_errAbort("getStateMachine3: Cannot find model file %s\n", modelFile);
};
StateMachine *sM = stateMachine3_loadFromFile(modelFile, threeState, emissions_kmer_getGapProb,
emissions_signal_strawManGetKmerEventMatchProb,
stateMachine3_loadTransitionsFromFile, NULL);
return sM;
}
StateMachine *getHdpStateMachine(NanoporeHDP *hdp, const char *modelFile, NanoporeReadAdjustmentParameters npp) {
if (!stFile_exists(modelFile)) {
st_errAbort("getHdpStateMachine: Cannot find model file %s\n", modelFile);
};
StateMachine *sM = stateMachine3_loadFromFile(modelFile, threeStateHdp, NULL, emissions_signal_getHdpKmerDensity,
stateMachine3_loadTransitionsFromFile, hdp);
sM->scale = npp.scale;
sM->shift = npp.shift;
sM->var = npp.var;
// set the level_mean table to the expected value from the HDP
//stateMachine3_setModelToHdpExpectedValues(sM, hdp);
return sM;
}
static void stateMachine_destructHdpModel(StateMachine *sM) {
StateMachine3_HDP *sMHdp = (StateMachine3_HDP *)sM;
destroy_nanopore_hdp(sMHdp->hdpModel);
}
void stateMachine_destruct(StateMachine *sM) {
if (sM->type == threeStateHdp) {
stateMachine_destructHdpModel(sM);
}
free(sM->EMISSION_GAP_X_PROBS);
free(sM->EMISSION_GAP_Y_MATRIX);
free(sM->EMISSION_MATCH_MATRIX);
free(sM->alphabet);
free(sM);
}
| 48.463485 | 177 | 0.643964 | [
"model"
] |
9e7d61c575d433b02f55b65e56f4ec52fd335287 | 71,048 | c | C | compiler/bootstrap/libec/bootstrap/loadSymbols.c | jerstlouis/ecere-sdk | 439cc4c1705d708773335d1f1a0905f0f3021c48 | [
"BSD-3-Clause"
] | 2 | 2019-05-18T13:52:32.000Z | 2020-11-30T09:24:09.000Z | compiler/bootstrap/libec/bootstrap/loadSymbols.c | jerstlouis/ecere-sdk | 439cc4c1705d708773335d1f1a0905f0f3021c48 | [
"BSD-3-Clause"
] | null | null | null | compiler/bootstrap/libec/bootstrap/loadSymbols.c | jerstlouis/ecere-sdk | 439cc4c1705d708773335d1f1a0905f0f3021c48 | [
"BSD-3-Clause"
] | null | null | null | /* Code generated from eC source file: loadSymbols.ec */
#if defined(_WIN32)
#define __runtimePlatform 1
#elif defined(__APPLE__)
#define __runtimePlatform 3
#else
#define __runtimePlatform 2
#endif
#if defined(__GNUC__)
typedef long long int64;
typedef unsigned long long uint64;
#ifndef _WIN32
#define __declspec(x)
#endif
#elif defined(__TINYC__)
#include <stdarg.h>
#define __builtin_va_list va_list
#define __builtin_va_start va_start
#define __builtin_va_end va_end
#ifdef _WIN32
#define strcasecmp stricmp
#define strncasecmp strnicmp
#define __declspec(x) __attribute__((x))
#else
#define __declspec(x)
#endif
typedef long long int64;
typedef unsigned long long uint64;
#else
typedef __int64 int64;
typedef unsigned __int64 uint64;
#endif
#ifdef __BIG_ENDIAN__
#define __ENDIAN_PAD(x) (8 - (x))
#else
#define __ENDIAN_PAD(x) 0
#endif
#if defined(_WIN32)
# if defined(__GNUC__) || defined(__TINYC__)
# define ecere_stdcall __attribute__((__stdcall__))
# define ecere_gcc_struct __attribute__((gcc_struct))
# else
# define ecere_stdcall __stdcall
# define ecere_gcc_struct
# endif
#else
# define ecere_stdcall
# define ecere_gcc_struct
#endif
#include <stdint.h>
#include <sys/types.h>
extern int yychar;
extern char sourceFileStack[30][797];
extern int include_stack_ptr;
static int numIncludes;
static char ** includes;
unsigned int inIDE = 0;
unsigned int ecereImported;
unsigned int inPreCompiler = 0;
unsigned int inSymbolGen = 0;
unsigned int inDocumentor = 0;
unsigned int DummyMethod()
{
return 1;
}
extern const char * sourceFile;
extern unsigned int skipErrors;
struct __ecereNameSpace__ecere__com__Instance * loadedModules;
extern char * symbolsDir;
extern unsigned int inCompiler;
extern struct __ecereNameSpace__ecere__com__Property * __ecereProp___ecereNameSpace__ecere__com__MapIterator_map;
extern struct __ecereNameSpace__ecere__com__Property * __ecereProp___ecereNameSpace__ecere__com__Iterator_data;
struct __ecereNameSpace__ecere__sys__OldList
{
void * first;
void * last;
int count;
unsigned int offset;
unsigned int circ;
} ecere_gcc_struct;
struct Type;
struct __ecereNameSpace__ecere__com__DataValue
{
union
{
char c;
unsigned char uc;
short s;
unsigned short us;
int i;
unsigned int ui;
void * p;
float f;
double d;
long long i64;
uint64 ui64;
} ecere_gcc_struct __anon1;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__SerialBuffer
{
unsigned char * _buffer;
unsigned int count;
unsigned int _size;
unsigned int pos;
} ecere_gcc_struct;
extern void * __ecereNameSpace__ecere__com__eSystem_New(unsigned int size);
extern void * __ecereNameSpace__ecere__com__eSystem_New0(unsigned int size);
extern void * __ecereNameSpace__ecere__com__eSystem_Renew(void * memory, unsigned int size);
extern void * __ecereNameSpace__ecere__com__eSystem_Renew0(void * memory, unsigned int size);
extern void __ecereNameSpace__ecere__com__eSystem_Delete(void * memory);
extern char * __ecereNameSpace__ecere__sys__TrimLSpaces(const char * string, char * output);
extern int strcmp(const char * , const char * );
extern int strtol(const char * , char * * , int base);
extern char * strcpy(char * , const char * );
extern char * __ecereNameSpace__ecere__sys__GetLastDirectory(const char * string, char * output);
extern int strcasecmp(const char * , const char * );
struct Specifier;
extern char * strstr(const char * , const char * );
extern char * strcat(char * , const char * );
struct External;
struct ModuleImport;
struct ClassImport;
struct CodePosition
{
int line;
int charPos;
int pos;
int included;
} ecere_gcc_struct;
struct Context;
extern char * strchr(const char * , int);
extern void * memcpy(void * , const void * , size_t size);
extern char * __ecereNameSpace__ecere__sys__TrimRSpaces(const char * string, char * output);
extern long long strtoll(const char * nptr, char * * endptr, int base);
struct __ecereNameSpace__ecere__com__ClassProperty;
extern char * __ecereNameSpace__ecere__sys__CopyString(const char * string);
struct yy_buffer_state
{
void * yy_input_file;
char * yy_ch_buf;
char * yy_buf_pos;
unsigned int yy_buf_size;
int yy_n_chars;
int yy_is_our_buffer;
int yy_is_interactive;
int yy_at_bol;
int yy_fill_buffer;
int yy_buffer_status;
} ecere_gcc_struct;
struct Identifier;
struct Statement;
struct Instantiation;
struct Declarator;
struct TypeName;
struct Initializer;
struct __ecereNameSpace__ecere__com__ClassTemplateParameter;
struct __ecereNameSpace__ecere__com__DefinedExpression;
struct __ecereNameSpace__ecere__com__GlobalFunction;
extern char * strncpy(char * , const char * , size_t n);
extern char * __ecereNameSpace__ecere__sys__GetSystemPathBuffer(char * d, const char * p);
extern void Compiler_Error(const char * format, ...);
extern const char * __ecereNameSpace__ecere__GetTranslatedString(const char * name, const char * string, const char * stringAndContext);
extern unsigned int __ecereNameSpace__ecere__sys__StripExtension(char * string);
extern size_t strlen(const char * );
extern char * __ecereNameSpace__ecere__sys__GetExtension(const char * string, char * output);
extern char * __ecereNameSpace__ecere__sys__PathCat(char * string, const char * addedPath);
extern char * __ecereNameSpace__ecere__sys__ChangeExtension(const char * string, const char * ext, char * output);
extern unsigned int __ecereNameSpace__ecere__sys__FileExists(const char * fileName);
struct __ecereNameSpace__ecere__com__IteratorPointer;
extern int sprintf(char * , const char * , ...);
extern char * __ecereNameSpace__ecere__sys__StripLastDirectory(const char * string, char * output);
extern void Compiler_Warning(const char * format, ...);
char * GetIncludeFileFromID(int id)
{
return includes[id - 1];
}
void SetInIDE(unsigned int b)
{
inIDE = b;
}
void SetEcereImported(unsigned int b)
{
ecereImported = b;
}
unsigned int GetEcereImported()
{
return ecereImported;
}
void SetInPreCompiler(unsigned int b)
{
inPreCompiler = b;
}
void SetInSymbolGen(unsigned int b)
{
inSymbolGen = b;
}
void SetInDocumentor(unsigned int b)
{
inDocumentor = b;
}
struct __ecereNameSpace__ecere__sys__OldList dataRedefinitions;
struct __ecereNameSpace__ecere__sys__OldList * includeDirs, * sysIncludeDirs;
void SetIncludeDirs(struct __ecereNameSpace__ecere__sys__OldList * list)
{
includeDirs = list;
}
struct __ecereNameSpace__ecere__sys__OldList * precompDefines;
extern struct __ecereNameSpace__ecere__sys__OldList * defines;
void __ecereMethod___ecereNameSpace__ecere__sys__OldList_Add(struct __ecereNameSpace__ecere__sys__OldList * this, void * item);
unsigned int __ecereMethod___ecereNameSpace__ecere__sys__OldList_AddName(struct __ecereNameSpace__ecere__sys__OldList * this, void * item);
void __ecereMethod___ecereNameSpace__ecere__sys__OldList_Free(struct __ecereNameSpace__ecere__sys__OldList * this, void (* freeFn)(void * ));
extern struct Type * ProcessTypeString(const char * string, unsigned int staticMethod);
extern void FreeType(struct Type * type);
extern void PrintType(struct Type * type, char * string, unsigned int printName, unsigned int fullName);
void FreeIncludeFiles()
{
int c;
for(c = 0; c < numIncludes; c++)
(__ecereNameSpace__ecere__com__eSystem_Delete(includes[c]), includes[c] = 0);
(__ecereNameSpace__ecere__com__eSystem_Delete(includes), includes = 0);
numIncludes = 0;
}
int FindIncludeFileID(char * includeFile)
{
int c;
for(c = 0; c < numIncludes; c++)
if(!((__runtimePlatform == 1) ? (strcasecmp) : strcmp)(includes[c], includeFile))
return c + 1;
return 0;
}
extern struct ModuleImport * mainModule;
struct Location
{
struct CodePosition start;
struct CodePosition end;
} ecere_gcc_struct;
void SetSysIncludeDirs(struct __ecereNameSpace__ecere__sys__OldList * list)
{
sysIncludeDirs = list;
}
void SetPrecompDefines(struct __ecereNameSpace__ecere__sys__OldList * list)
{
precompDefines = list;
}
int GetIncludeFileID(char * includeFile)
{
int found = FindIncludeFileID(includeFile);
if(found)
return found;
includes = __ecereNameSpace__ecere__com__eSystem_Renew(includes, sizeof(char *) * (numIncludes + 1));
includes[numIncludes++] = __ecereNameSpace__ecere__sys__CopyString(includeFile);
return numIncludes;
}
struct __ecereNameSpace__ecere__com__NameSpace;
struct __ecereNameSpace__ecere__com__NameSpace * globalData;
struct Expression;
extern struct Expression * ParseExpressionString(char * expression);
struct Expression
{
struct Expression * prev;
struct Expression * next;
struct Location loc;
int type;
union
{
struct
{
char * constant;
struct Identifier * identifier;
} ecere_gcc_struct __anon1;
struct Statement * compound;
struct Instantiation * instance;
struct
{
char * string;
unsigned int intlString;
unsigned int wideString;
} ecere_gcc_struct __anon2;
struct __ecereNameSpace__ecere__sys__OldList * list;
struct
{
struct __ecereNameSpace__ecere__sys__OldList * specifiers;
struct Declarator * decl;
} ecere_gcc_struct _classExp;
struct
{
struct Identifier * id;
} ecere_gcc_struct classData;
struct
{
struct Expression * exp;
struct __ecereNameSpace__ecere__sys__OldList * arguments;
struct Location argLoc;
} ecere_gcc_struct call;
struct
{
struct Expression * exp;
struct __ecereNameSpace__ecere__sys__OldList * index;
} ecere_gcc_struct index;
struct
{
struct Expression * exp;
struct Identifier * member;
int memberType;
unsigned int thisPtr;
} ecere_gcc_struct member;
struct
{
int op;
struct Expression * exp1;
struct Expression * exp2;
} ecere_gcc_struct op;
struct TypeName * typeName;
struct Specifier * _class;
struct
{
struct TypeName * typeName;
struct Expression * exp;
} ecere_gcc_struct cast;
struct
{
struct Expression * cond;
struct __ecereNameSpace__ecere__sys__OldList * exp;
struct Expression * elseExp;
} ecere_gcc_struct cond;
struct
{
struct TypeName * typeName;
struct Expression * size;
} ecere_gcc_struct _new;
struct
{
struct TypeName * typeName;
struct Expression * size;
struct Expression * exp;
} ecere_gcc_struct _renew;
struct
{
char * table;
struct Identifier * id;
} ecere_gcc_struct db;
struct
{
struct Expression * ds;
struct Expression * name;
} ecere_gcc_struct dbopen;
struct
{
struct TypeName * typeName;
struct Initializer * initializer;
} ecere_gcc_struct initializer;
struct
{
struct Expression * exp;
struct TypeName * typeName;
} ecere_gcc_struct vaArg;
struct
{
struct TypeName * typeName;
struct Identifier * id;
} ecere_gcc_struct offset;
} ecere_gcc_struct __anon1;
unsigned int debugValue;
struct __ecereNameSpace__ecere__com__DataValue val;
uint64 address;
unsigned int hasAddress;
struct Type * expType;
struct Type * destType;
unsigned int usage;
int tempCount;
unsigned int byReference;
unsigned int isConstant;
unsigned int addedThis;
unsigned int needCast;
unsigned int thisPtr;
unsigned int opDestType;
unsigned int usedInComparison;
unsigned int ambiguousUnits;
unsigned int parentOpDestType;
unsigned int needTemplateCast;
} ecere_gcc_struct;
extern void ProcessExpressionType(struct Expression * exp);
extern void ComputeExpression(struct Expression * exp);
extern void FreeExpression(struct Expression * exp);
struct __ecereNameSpace__ecere__com__Class;
struct __ecereNameSpace__ecere__com__Instance
{
void * * _vTbl;
struct __ecereNameSpace__ecere__com__Class * _class;
int _refCount;
} ecere_gcc_struct;
extern long long __ecereNameSpace__ecere__com__eClass_GetProperty(struct __ecereNameSpace__ecere__com__Class * _class, const char * name);
extern void __ecereNameSpace__ecere__com__eClass_SetProperty(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, long long value);
extern void __ecereNameSpace__ecere__com__eClass_DestructionWatchable(struct __ecereNameSpace__ecere__com__Class * _class);
extern void __ecereNameSpace__ecere__com__eEnum_AddFixedValue(struct __ecereNameSpace__ecere__com__Class * _class, const char * string, long long value);
extern long long __ecereNameSpace__ecere__com__eEnum_AddValue(struct __ecereNameSpace__ecere__com__Class * _class, const char * string);
extern struct __ecereNameSpace__ecere__com__ClassProperty * __ecereNameSpace__ecere__com__eClass_AddClassProperty(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * dataType, void * setStmt, void * getStmt);
extern void __ecereNameSpace__ecere__com__eClass_DoneAddingTemplateParameters(struct __ecereNameSpace__ecere__com__Class * base);
extern void * __ecereNameSpace__ecere__com__eInstance_New(struct __ecereNameSpace__ecere__com__Class * _class);
extern void __ecereNameSpace__ecere__com__eInstance_SetMethod(struct __ecereNameSpace__ecere__com__Instance * instance, const char * name, void * function);
extern void __ecereNameSpace__ecere__com__eInstance_IncRef(struct __ecereNameSpace__ecere__com__Instance * instance);
struct __ecereNameSpace__ecere__com__Instance * sourceDirs;
extern struct __ecereNameSpace__ecere__com__Instance * __ecereNameSpace__ecere__sys__FileOpenBuffered(const char * fileName, int mode);
struct LexerBackup
{
struct Location yylloc;
struct Location type_yylloc;
struct Location expression_yylloc;
int declMode;
int defaultDeclMode;
struct __ecereNameSpace__ecere__com__Instance * fileInput;
struct yy_buffer_state * include_stack[30];
struct __ecereNameSpace__ecere__com__Instance * fileStack[30];
char sourceFileStack[30][797];
struct Location locStack[30];
int declModeStack[30];
int include_stack_ptr;
struct yy_buffer_state * buffer;
int yy_n_chars;
char * yytext;
char * yy_c_buf_p;
void * yyin;
char yy_hold_char;
int yychar;
int yy_init;
int yy_start;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__MapIterator
{
struct __ecereNameSpace__ecere__com__Instance * container;
struct __ecereNameSpace__ecere__com__IteratorPointer * pointer;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Iterator
{
struct __ecereNameSpace__ecere__com__Instance * container;
struct __ecereNameSpace__ecere__com__IteratorPointer * pointer;
} ecere_gcc_struct;
extern struct __ecereNameSpace__ecere__com__Instance * __ecereNameSpace__ecere__sys__FileOpen(const char * fileName, int mode);
unsigned int __ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(struct __ecereNameSpace__ecere__com__Instance * this, char * s, int max);
int __ecereVMethodID___ecereNameSpace__ecere__sys__File_Eof;
extern void __ecereNameSpace__ecere__com__eInstance_DecRef(struct __ecereNameSpace__ecere__com__Instance * instance);
int __ecereVMethodID___ecereNameSpace__ecere__com__Container_Add;
void SetSourceDirs(struct __ecereNameSpace__ecere__com__Instance * list)
{
sourceDirs = list;
}
extern struct __ecereNameSpace__ecere__com__Instance * pushLexer(void);
extern void popLexer(struct __ecereNameSpace__ecere__com__Instance * backup);
struct __ecereNameSpace__ecere__com__Instance * __ecereProp___ecereNameSpace__ecere__com__MapIterator_Get_map(struct __ecereNameSpace__ecere__com__MapIterator * this);
void __ecereProp___ecereNameSpace__ecere__com__MapIterator_Set_map(struct __ecereNameSpace__ecere__com__MapIterator * this, struct __ecereNameSpace__ecere__com__Instance * value);
unsigned int __ecereMethod___ecereNameSpace__ecere__com__Iterator_Index(struct __ecereNameSpace__ecere__com__Iterator * this, const uint64 index, unsigned int create);
uint64 __ecereProp___ecereNameSpace__ecere__com__Iterator_Get_data(struct __ecereNameSpace__ecere__com__Iterator * this);
void __ecereProp___ecereNameSpace__ecere__com__Iterator_Set_data(struct __ecereNameSpace__ecere__com__Iterator * this, uint64 value);
unsigned int __ecereMethod___ecereNameSpace__ecere__com__Iterator_Next();
void __ecereDestroyModuleInstances_loadSymbols()
{
(__ecereNameSpace__ecere__com__eInstance_DecRef(loadedModules), loadedModules = 0);
}
struct __ecereNameSpace__ecere__sys__BTNode;
struct __ecereNameSpace__ecere__sys__BTNode
{
uintptr_t key;
struct __ecereNameSpace__ecere__sys__BTNode * parent;
struct __ecereNameSpace__ecere__sys__BTNode * left;
struct __ecereNameSpace__ecere__sys__BTNode * right;
int depth;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__DataMember;
extern struct __ecereNameSpace__ecere__com__DataMember * __ecereNameSpace__ecere__com__eMember_AddDataMember(struct __ecereNameSpace__ecere__com__DataMember * member, const char * name, const char * type, unsigned int size, unsigned int alignment, int declMode);
extern struct __ecereNameSpace__ecere__com__DataMember * __ecereNameSpace__ecere__com__eClass_AddDataMember(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * type, unsigned int size, unsigned int alignment, int declMode);
extern struct __ecereNameSpace__ecere__com__DataMember * __ecereNameSpace__ecere__com__eMember_New(int type, int declMode);
extern unsigned int __ecereNameSpace__ecere__com__eMember_AddMember(struct __ecereNameSpace__ecere__com__DataMember * addTo, struct __ecereNameSpace__ecere__com__DataMember * dataMember);
extern unsigned int __ecereNameSpace__ecere__com__eClass_AddMember(struct __ecereNameSpace__ecere__com__Class * _class, struct __ecereNameSpace__ecere__com__DataMember * dataMember);
struct Symbol;
extern struct Symbol * DeclClass(struct Specifier * _class, const char * name);
extern struct Symbol * FindClass(const char * name);
extern void FreeSymbol(struct Symbol * symbol);
struct __ecereNameSpace__ecere__sys__OldLink;
struct __ecereNameSpace__ecere__sys__OldLink
{
struct __ecereNameSpace__ecere__sys__OldLink * prev;
struct __ecereNameSpace__ecere__sys__OldLink * next;
void * data;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Property;
struct __ecereNameSpace__ecere__com__Property
{
struct __ecereNameSpace__ecere__com__Property * prev;
struct __ecereNameSpace__ecere__com__Property * next;
const char * name;
unsigned int isProperty;
int memberAccess;
int id;
struct __ecereNameSpace__ecere__com__Class * _class;
const char * dataTypeString;
struct __ecereNameSpace__ecere__com__Class * dataTypeClass;
struct Type * dataType;
void (* Set)(void * , int);
int (* Get)(void * );
unsigned int (* IsSet)(void * );
void * data;
void * symbol;
int vid;
unsigned int conversion;
unsigned int watcherOffset;
const char * category;
unsigned int compiled;
unsigned int selfWatchable;
unsigned int isWatchable;
} ecere_gcc_struct;
extern void __ecereNameSpace__ecere__com__eInstance_FireSelfWatchers(struct __ecereNameSpace__ecere__com__Instance * instance, struct __ecereNameSpace__ecere__com__Property * _property);
extern void __ecereNameSpace__ecere__com__eInstance_StopWatching(struct __ecereNameSpace__ecere__com__Instance * instance, struct __ecereNameSpace__ecere__com__Property * _property, struct __ecereNameSpace__ecere__com__Instance * object);
extern void __ecereNameSpace__ecere__com__eInstance_Watch(struct __ecereNameSpace__ecere__com__Instance * instance, struct __ecereNameSpace__ecere__com__Property * _property, void * object, void (* callback)(void * , void * ));
extern void __ecereNameSpace__ecere__com__eInstance_FireWatchers(struct __ecereNameSpace__ecere__com__Instance * instance, struct __ecereNameSpace__ecere__com__Property * _property);
extern struct __ecereNameSpace__ecere__com__Property * __ecereNameSpace__ecere__com__eClass_AddProperty(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * dataType, void * setStmt, void * getStmt, int declMode);
extern void __ecereNameSpace__ecere__com__eProperty_Watchable(struct __ecereNameSpace__ecere__com__Property * _property);
struct DataRedefinition;
struct DataRedefinition
{
struct DataRedefinition * prev;
struct DataRedefinition * next;
char name[1024];
char type1[1024];
char type2[1024];
} ecere_gcc_struct;
void CheckDataRedefinitions()
{
struct DataRedefinition * redefinition;
for(redefinition = dataRedefinitions.first; redefinition; redefinition = redefinition->next)
{
struct Type * type1 = ProcessTypeString(redefinition->type1, 0);
struct Type * type2 = ProcessTypeString(redefinition->type2, 0);
char type1String[1024] = "";
char type2String[1024] = "";
PrintType(type1, type1String, 0, 1);
PrintType(type2, type2String, 0, 1);
if(strcmp(type1String, type2String))
Compiler_Warning(__ecereNameSpace__ecere__GetTranslatedString("ec", "Redefinition of %s (defining as %s, already defined as %s)\n", (((void *)0))), redefinition->name, type1String, type2String);
FreeType(type1);
FreeType(type2);
}
__ecereMethod___ecereNameSpace__ecere__sys__OldList_Free(&dataRedefinitions, (((void *)0)));
}
struct ImportedModule;
struct ImportedModule
{
struct ImportedModule * prev;
struct ImportedModule * next;
char * name;
int type;
int importType;
unsigned int globalInstance;
unsigned int dllOnly;
int importAccess;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__sys__NamedItem;
struct __ecereNameSpace__ecere__sys__NamedItem
{
struct __ecereNameSpace__ecere__sys__NamedItem * prev;
struct __ecereNameSpace__ecere__sys__NamedItem * next;
char * name;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Instance * OpenIncludeFile(char * includeFile)
{
struct __ecereNameSpace__ecere__com__Instance * file;
char location[274];
__ecereNameSpace__ecere__sys__StripLastDirectory(sourceFileStack[(include_stack_ptr >= 0) ? include_stack_ptr : 0], location);
__ecereNameSpace__ecere__sys__PathCat(location, includeFile);
file = __ecereNameSpace__ecere__sys__FileOpen(location, 1);
if(file)
{
strcpy(sourceFileStack[include_stack_ptr + 1], location);
}
else if(inIDE)
{
struct __ecereNameSpace__ecere__sys__NamedItem * includeDir;
if(includeDirs)
{
for(includeDir = (*includeDirs).first; includeDir; includeDir = includeDir->next)
{
strcpy(location, includeDir->name);
__ecereNameSpace__ecere__sys__PathCat(location, includeFile);
file = __ecereNameSpace__ecere__sys__FileOpen(location, 1);
if(file)
break;
}
}
if(!file && sysIncludeDirs)
{
for(includeDir = (*sysIncludeDirs).first; includeDir; includeDir = includeDir->next)
{
strcpy(location, includeDir->name);
__ecereNameSpace__ecere__sys__PathCat(location, includeFile);
file = __ecereNameSpace__ecere__sys__FileOpen(location, 1);
if(file)
break;
}
}
}
return file;
}
struct Operand;
struct OpTable
{
unsigned int (* Add)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Sub)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Mul)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Div)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Mod)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Neg)(struct Expression *, struct Operand *);
unsigned int (* Inc)(struct Expression *, struct Operand *);
unsigned int (* Dec)(struct Expression *, struct Operand *);
unsigned int (* Asign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* AddAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* SubAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* MulAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* DivAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* ModAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* BitAnd)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* BitOr)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* BitXor)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* LShift)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* RShift)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* BitNot)(struct Expression *, struct Operand *);
unsigned int (* AndAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* OrAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* XorAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* LShiftAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* RShiftAsign)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Not)(struct Expression *, struct Operand *);
unsigned int (* Equ)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Nqu)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* And)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Or)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Grt)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Sma)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* GrtEqu)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* SmaEqu)(struct Expression *, struct Operand *, struct Operand *);
unsigned int (* Cond)(struct Expression *, struct Operand *, struct Operand *, struct Operand *);
} ecere_gcc_struct;
struct Operand
{
int kind;
struct Type * type;
unsigned int ptrSize;
union
{
char c;
unsigned char uc;
short s;
unsigned short us;
int i;
unsigned int ui;
float f;
double d;
long long i64;
uint64 ui64;
} ecere_gcc_struct __anon1;
struct OpTable ops;
} ecere_gcc_struct;
extern struct Operand GetOperand(struct Expression * exp);
struct __ecereNameSpace__ecere__com__Method;
struct __ecereNameSpace__ecere__com__ClassTemplateArgument
{
union
{
struct
{
const char * dataTypeString;
struct __ecereNameSpace__ecere__com__Class * dataTypeClass;
} ecere_gcc_struct __anon1;
struct __ecereNameSpace__ecere__com__DataValue expression;
struct
{
const char * memberString;
union
{
struct __ecereNameSpace__ecere__com__DataMember * member;
struct __ecereNameSpace__ecere__com__Property * prop;
struct __ecereNameSpace__ecere__com__Method * method;
} ecere_gcc_struct __anon1;
} ecere_gcc_struct __anon2;
} ecere_gcc_struct __anon1;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Method
{
const char * name;
struct __ecereNameSpace__ecere__com__Method * parent;
struct __ecereNameSpace__ecere__com__Method * left;
struct __ecereNameSpace__ecere__com__Method * right;
int depth;
int (* function)();
int vid;
int type;
struct __ecereNameSpace__ecere__com__Class * _class;
void * symbol;
const char * dataTypeString;
struct Type * dataType;
int memberAccess;
} ecere_gcc_struct;
struct Symbol
{
char * string;
struct Symbol * parent;
struct Symbol * left;
struct Symbol * right;
int depth;
struct Type * type;
union
{
struct __ecereNameSpace__ecere__com__Method * method;
struct __ecereNameSpace__ecere__com__Property * _property;
struct __ecereNameSpace__ecere__com__Class * registered;
} ecere_gcc_struct __anon1;
unsigned int notYetDeclared;
union
{
struct
{
struct External * pointerExternal;
struct External * structExternal;
} ecere_gcc_struct __anon1;
struct
{
struct External * externalGet;
struct External * externalSet;
struct External * externalPtr;
struct External * externalIsSet;
} ecere_gcc_struct __anon2;
struct
{
struct External * methodExternal;
struct External * methodCodeExternal;
} ecere_gcc_struct __anon3;
} ecere_gcc_struct __anon2;
unsigned int imported;
unsigned int declaredStructSym;
struct __ecereNameSpace__ecere__com__Class * _class;
unsigned int declaredStruct;
unsigned int needConstructor;
unsigned int needDestructor;
char * constructorName;
char * structName;
char * className;
char * destructorName;
struct ModuleImport * module;
struct ClassImport * _import;
struct Location nameLoc;
unsigned int isParam;
unsigned int isRemote;
unsigned int isStruct;
unsigned int fireWatchersDone;
int declaring;
unsigned int classData;
unsigned int isStatic;
char * shortName;
struct __ecereNameSpace__ecere__sys__OldList * templateParams;
struct __ecereNameSpace__ecere__sys__OldList templatedClasses;
struct Context * ctx;
int isIterator;
struct Expression * propCategory;
unsigned int mustRegister;
} ecere_gcc_struct;
extern struct __ecereNameSpace__ecere__com__Method * __ecereNameSpace__ecere__com__eClass_AddVirtualMethod(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * type, void * function, int declMode);
extern struct __ecereNameSpace__ecere__com__Method * __ecereNameSpace__ecere__com__eClass_AddMethod(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * type, void * function, int declMode);
extern struct __ecereNameSpace__ecere__com__ClassTemplateParameter * __ecereNameSpace__ecere__com__eClass_AddTemplateParameter(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, int type, const void * info, struct __ecereNameSpace__ecere__com__ClassTemplateArgument * defaultArg);
struct __ecereNameSpace__ecere__com__BitMember;
extern struct __ecereNameSpace__ecere__com__BitMember * __ecereNameSpace__ecere__com__eClass_AddBitMember(struct __ecereNameSpace__ecere__com__Class * _class, const char * name, const char * type, int bitSize, int bitPos, int declMode);
struct __ecereNameSpace__ecere__com__BitMember
{
struct __ecereNameSpace__ecere__com__BitMember * prev;
struct __ecereNameSpace__ecere__com__BitMember * next;
const char * name;
unsigned int isProperty;
int memberAccess;
int id;
struct __ecereNameSpace__ecere__com__Class * _class;
const char * dataTypeString;
struct __ecereNameSpace__ecere__com__Class * dataTypeClass;
struct Type * dataType;
int type;
int size;
int pos;
uint64 mask;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Module;
extern struct __ecereNameSpace__ecere__com__Class * __ecereNameSpace__ecere__com__eSystem_FindClass(struct __ecereNameSpace__ecere__com__Instance * module, const char * name);
extern struct __ecereNameSpace__ecere__com__Instance * privateModule;
extern struct __ecereNameSpace__ecere__com__Class * __ecereNameSpace__ecere__com__eSystem_RegisterClass(int type, const char * name, const char * baseName, int size, int sizeClass, unsigned int (* Constructor)(void * ), void (* Destructor)(void * ), struct __ecereNameSpace__ecere__com__Instance * module, int declMode, int inheritanceAccess);
extern struct ModuleImport * FindModule(struct __ecereNameSpace__ecere__com__Instance * moduleToFind);
extern struct __ecereNameSpace__ecere__com__DefinedExpression * __ecereNameSpace__ecere__com__eSystem_RegisterDefine(const char * name, const char * value, struct __ecereNameSpace__ecere__com__Instance * module, int declMode);
extern struct __ecereNameSpace__ecere__com__GlobalFunction * __ecereNameSpace__ecere__com__eSystem_RegisterFunction(const char * name, const char * type, void * func, struct __ecereNameSpace__ecere__com__Instance * module, int declMode);
struct GlobalData
{
uintptr_t key;
struct __ecereNameSpace__ecere__sys__BTNode * parent;
struct __ecereNameSpace__ecere__sys__BTNode * left;
struct __ecereNameSpace__ecere__sys__BTNode * right;
int depth;
struct __ecereNameSpace__ecere__com__Instance * module;
char * dataTypeString;
struct Type * dataType;
void * symbol;
char * fullName;
} ecere_gcc_struct;
extern struct __ecereNameSpace__ecere__com__Instance * __ecereNameSpace__ecere__com__eModule_LoadStrict(struct __ecereNameSpace__ecere__com__Instance * fromModule, const char * name, int importAccess);
extern struct __ecereNameSpace__ecere__com__Instance * __thisModule;
struct __ecereNameSpace__ecere__sys__BinaryTree;
struct __ecereNameSpace__ecere__sys__BinaryTree
{
struct __ecereNameSpace__ecere__sys__BTNode * root;
int count;
int (* CompareKey)(struct __ecereNameSpace__ecere__sys__BinaryTree * tree, uintptr_t a, uintptr_t b);
void (* FreeKey)(void * key);
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__NameSpace
{
const char * name;
struct __ecereNameSpace__ecere__com__NameSpace * btParent;
struct __ecereNameSpace__ecere__com__NameSpace * left;
struct __ecereNameSpace__ecere__com__NameSpace * right;
int depth;
struct __ecereNameSpace__ecere__com__NameSpace * parent;
struct __ecereNameSpace__ecere__sys__BinaryTree nameSpaces;
struct __ecereNameSpace__ecere__sys__BinaryTree classes;
struct __ecereNameSpace__ecere__sys__BinaryTree defines;
struct __ecereNameSpace__ecere__sys__BinaryTree functions;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__Class
{
struct __ecereNameSpace__ecere__com__Class * prev;
struct __ecereNameSpace__ecere__com__Class * next;
const char * name;
int offset;
int structSize;
void * * _vTbl;
int vTblSize;
unsigned int (* Constructor)(void * );
void (* Destructor)(void * );
int offsetClass;
int sizeClass;
struct __ecereNameSpace__ecere__com__Class * base;
struct __ecereNameSpace__ecere__sys__BinaryTree methods;
struct __ecereNameSpace__ecere__sys__BinaryTree members;
struct __ecereNameSpace__ecere__sys__BinaryTree prop;
struct __ecereNameSpace__ecere__sys__OldList membersAndProperties;
struct __ecereNameSpace__ecere__sys__BinaryTree classProperties;
struct __ecereNameSpace__ecere__sys__OldList derivatives;
int memberID;
int startMemberID;
int type;
struct __ecereNameSpace__ecere__com__Instance * module;
struct __ecereNameSpace__ecere__com__NameSpace * nameSpace;
const char * dataTypeString;
struct Type * dataType;
int typeSize;
int defaultAlignment;
void (* Initialize)();
int memberOffset;
struct __ecereNameSpace__ecere__sys__OldList selfWatchers;
const char * designerClass;
unsigned int noExpansion;
const char * defaultProperty;
unsigned int comRedefinition;
int count;
int isRemote;
unsigned int internalDecl;
void * data;
unsigned int computeSize;
short structAlignment;
short pointerAlignment;
int destructionWatchOffset;
unsigned int fixed;
struct __ecereNameSpace__ecere__sys__OldList delayedCPValues;
int inheritanceAccess;
const char * fullName;
void * symbol;
struct __ecereNameSpace__ecere__sys__OldList conversions;
struct __ecereNameSpace__ecere__sys__OldList templateParams;
struct __ecereNameSpace__ecere__com__ClassTemplateArgument * templateArgs;
struct __ecereNameSpace__ecere__com__Class * templateClass;
struct __ecereNameSpace__ecere__sys__OldList templatized;
int numParams;
unsigned int isInstanceClass;
unsigned int byValueSystemClass;
void * bindingsClass;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__com__DataMember
{
struct __ecereNameSpace__ecere__com__DataMember * prev;
struct __ecereNameSpace__ecere__com__DataMember * next;
const char * name;
unsigned int isProperty;
int memberAccess;
int id;
struct __ecereNameSpace__ecere__com__Class * _class;
const char * dataTypeString;
struct __ecereNameSpace__ecere__com__Class * dataTypeClass;
struct Type * dataType;
int type;
int offset;
int memberID;
struct __ecereNameSpace__ecere__sys__OldList members;
struct __ecereNameSpace__ecere__sys__BinaryTree membersAlpha;
int memberOffset;
short structAlignment;
short pointerAlignment;
} ecere_gcc_struct;
struct __ecereNameSpace__ecere__sys__BTNode * __ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_FindString(struct __ecereNameSpace__ecere__sys__BinaryTree * this, const char * key);
int __ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString(struct __ecereNameSpace__ecere__sys__BinaryTree * this, const char * a, const char * b);
unsigned int __ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Add(struct __ecereNameSpace__ecere__sys__BinaryTree * this, struct __ecereNameSpace__ecere__sys__BTNode * node);
void __ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Remove(struct __ecereNameSpace__ecere__sys__BinaryTree * this, struct __ecereNameSpace__ecere__sys__BTNode * node);
struct __ecereNameSpace__ecere__com__Application
{
int argc;
const char * * argv;
int exitCode;
unsigned int isGUIApp;
struct __ecereNameSpace__ecere__sys__OldList allModules;
char * parsedCommand;
struct __ecereNameSpace__ecere__com__NameSpace systemNameSpace;
} ecere_gcc_struct;
void SetGlobalData(struct __ecereNameSpace__ecere__com__NameSpace * nameSpace)
{
globalData = nameSpace;
}
static void ReadDataMembers(struct __ecereNameSpace__ecere__com__Class * regClass, struct __ecereNameSpace__ecere__com__DataMember * member, struct __ecereNameSpace__ecere__com__Instance * f)
{
char line[1024];
char name[1024];
int size = 0, bitPos = -1;
int memberAccess = 1;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(line[0] == '[')
{
if(!strcmp(line, "[Size]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
size = strtol(line, (((void *)0)), 0);
}
else if(!strcmp(line, "[Pos]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
bitPos = strtol(line, (((void *)0)), 0);
}
else if(!strcmp(line, "[Public]"))
memberAccess = 1;
else if(!strcmp(line, "[Private]"))
memberAccess = 2;
else if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(member)
{
if(!__ecereNameSpace__ecere__com__eMember_AddDataMember(member, name, line[0] ? line : 0, 0, 0, memberAccess))
;
}
else if(regClass && regClass->type == 2)
{
struct __ecereNameSpace__ecere__com__BitMember * member = __ecereNameSpace__ecere__com__eClass_AddBitMember(regClass, name, line[0] ? line : 0, 0, 0, memberAccess);
if(member)
{
member->size = size;
member->pos = bitPos;
}
}
else if(regClass)
{
if(!__ecereNameSpace__ecere__com__eClass_AddDataMember(regClass, name, line[0] ? line : 0, 0, 0, memberAccess))
;
}
}
else if(!strcmp(line, "[Struct]") || !strcmp(line, "[Union]"))
{
struct __ecereNameSpace__ecere__com__DataMember * dataMember = (regClass || member) ? __ecereNameSpace__ecere__com__eMember_New((!strcmp(line, "[Union]")) ? 1 : 2, memberAccess) : (((void *)0));
ReadDataMembers((((void *)0)), dataMember, f);
if(member)
{
if(!__ecereNameSpace__ecere__com__eMember_AddMember(member, dataMember))
;
}
else if(regClass)
{
if(!__ecereNameSpace__ecere__com__eClass_AddMember(regClass, dataMember))
;
}
}
}
else
{
size = 0;
bitPos = -1;
strcpy(name, line);
memberAccess = 1;
}
}
}
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass_GlobalData;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass_DataRedefinition;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass_ImportedModule;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__com__List_TPL_ecere__com__Module_;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__com__Map_TPL_String__ecere__com__List_TPL_ecere__com__Module___;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__sys__File;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__com__Module;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__com__List;
extern struct __ecereNameSpace__ecere__com__Class * __ecereClass___ecereNameSpace__ecere__sys__BTNode;
struct __ecereNameSpace__ecere__com__Module
{
struct __ecereNameSpace__ecere__com__Instance * application;
struct __ecereNameSpace__ecere__sys__OldList classes;
struct __ecereNameSpace__ecere__sys__OldList defines;
struct __ecereNameSpace__ecere__sys__OldList functions;
struct __ecereNameSpace__ecere__sys__OldList modules;
struct __ecereNameSpace__ecere__com__Instance * prev;
struct __ecereNameSpace__ecere__com__Instance * next;
const char * name;
void * library;
void * Unload;
int importType;
int origImportType;
struct __ecereNameSpace__ecere__com__NameSpace privateNameSpace;
struct __ecereNameSpace__ecere__com__NameSpace publicNameSpace;
} ecere_gcc_struct;
void __ecereCreateModuleInstances_loadSymbols()
{
loadedModules = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass___ecereNameSpace__ecere__com__Map_TPL_String__ecere__com__List_TPL_ecere__com__Module___);
__ecereNameSpace__ecere__com__eInstance_IncRef(loadedModules);
}
void FreeGlobalData(struct __ecereNameSpace__ecere__com__NameSpace * globalDataList)
{
struct __ecereNameSpace__ecere__com__NameSpace * ns;
struct GlobalData * data;
for(; (ns = (struct __ecereNameSpace__ecere__com__NameSpace *)globalDataList->nameSpaces.root); )
{
FreeGlobalData(ns);
__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Remove(&globalDataList->nameSpaces, (struct __ecereNameSpace__ecere__sys__BTNode *)ns);
(__ecereNameSpace__ecere__com__eSystem_Delete((void *)(*ns).name), (*ns).name = 0);
(__ecereNameSpace__ecere__com__eSystem_Delete(ns), ns = 0);
}
for(; (data = (struct GlobalData *)globalDataList->functions.root); )
{
__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Remove(&globalDataList->functions, (void *)(data));
if(data->symbol)
FreeSymbol(data->symbol);
FreeType(data->dataType);
(__ecereNameSpace__ecere__com__eSystem_Delete(data->fullName), data->fullName = 0);
(__ecereNameSpace__ecere__com__eSystem_Delete(data->dataTypeString), data->dataTypeString = 0);
((data ? __extension__ ({
void * __ecerePtrToDelete = (data);
__ecereClass_GlobalData->Destructor ? __ecereClass_GlobalData->Destructor((void *)__ecerePtrToDelete) : 0, __ecereClass___ecereNameSpace__ecere__sys__BTNode->Destructor ? __ecereClass___ecereNameSpace__ecere__sys__BTNode->Destructor((void *)__ecerePtrToDelete) : 0, __ecereNameSpace__ecere__com__eSystem_Delete(__ecerePtrToDelete);
}) : 0), data = 0);
}
}
void __ecereUnregisterModule_loadSymbols(struct __ecereNameSpace__ecere__com__Instance * module)
{
}
void ImportModule(const char * name, int importType, int importAccess, unsigned int loadDllOnly);
unsigned int LoadSymbols(const char * fileName, int importType, unsigned int loadDllOnly)
{
struct __ecereNameSpace__ecere__com__Instance * f = __ecereNameSpace__ecere__sys__FileOpenBuffered(fileName, 1);
unsigned int globalInstance = 0;
if(f)
{
unsigned int ecereCOMModule = 0;
char moduleName[797];
__ecereNameSpace__ecere__sys__GetLastDirectory(fileName, moduleName);
if(!((strcasecmp)(moduleName, "instance.sym") && (strcasecmp)(moduleName, "BinaryTree.sym") && (strcasecmp)(moduleName, "dataTypes.sym") && (strcasecmp)(moduleName, "OldList.sym") && (strcasecmp)(moduleName, "String.sym") && (strcasecmp)(moduleName, "BTNode.sym") && (strcasecmp)(moduleName, "Array.sym") && (strcasecmp)(moduleName, "AVLTree.sym") && (strcasecmp)(moduleName, "BuiltInContainer.sym") && (strcasecmp)(moduleName, "Container.sym") && (strcasecmp)(moduleName, "CustomAVLTree.sym") && (strcasecmp)(moduleName, "LinkList.sym") && (strcasecmp)(moduleName, "List.sym") && (strcasecmp)(moduleName, "Map.sym") && (strcasecmp)(moduleName, "Mutex.sym")))
ecereCOMModule = 1;
for(; ; )
{
char line[1024];
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(line[0] == '[')
{
if(!strcmp(line, "[Global Instance]"))
globalInstance = 1;
else if(!strcmp(line, "[Defined Classes]"))
{
struct __ecereNameSpace__ecere__com__Class * regClass = (((void *)0));
char name[1024];
unsigned int isRemote = 0;
unsigned int isStatic = 0;
unsigned int isWatchable = 0;
int classType = 0;
unsigned int fixed = 0;
unsigned int noExpansion = 0;
int inheritanceAccess = 1;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(line[0] == '[')
{
if(!strcmp(line, "[Remote]"))
isRemote = 1;
else if(!strcmp(line, "[Static]"))
isStatic = 1;
else if(!strcmp(line, "[Fixed]"))
fixed = 1;
else if(!strcmp(line, "[No Expansion]"))
noExpansion = 1;
else if(!strcmp(line, "[Watchable]"))
isWatchable = 1;
else if(!strcmp(line, "[Enum]"))
classType = 4;
else if(!strcmp(line, "[Bit]"))
classType = 2;
else if(!strcmp(line, "[Struct]"))
classType = 1;
else if(!strcmp(line, "[Unit]"))
classType = 3;
else if(!strcmp(line, "[NoHead]"))
classType = 5;
else if(!strcmp(line, "[Base]") || !strcmp(line, "[Private Base]"))
{
if(!strcmp(line, "[Private Base]"))
inheritanceAccess = 2;
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(importType == 3)
DeclClass((((void *)0)), name);
if(isStatic || loadDllOnly || importType == 3 || importType == 4)
regClass = (((void *)0));
else if(regClass = __ecereNameSpace__ecere__com__eSystem_FindClass(privateModule, name), !regClass || regClass->internalDecl || regClass->isRemote)
{
struct Symbol * existingClass = FindClass(name);
const char * baseName = (classType == 0 && importType == 2 && isRemote) ? "DCOMClientObject" : (!strcmp(line, "[None]") ? (((void *)0)) : line);
if(!isRemote || (importType != 2) || (!sourceFile || !strstr(sourceFile, ".main.ec")))
{
if(!regClass || regClass->internalDecl)
regClass = __ecereNameSpace__ecere__com__eSystem_RegisterClass(classType, name, isRemote ? (((void *)0)) : baseName, 0, 0, (((void *)0)), (((void *)0)), privateModule, ecereCOMModule ? 4 : 1, inheritanceAccess);
if(regClass && isRemote)
regClass->isRemote = (importType == 2) ? 1 : 2;
if(isRemote)
{
if(importType == 2)
{
char className[1024] = "DCOMClient_";
strcat(className, name);
if(!existingClass)
existingClass = DeclClass((((void *)0)), name);
regClass = __ecereNameSpace__ecere__com__eSystem_RegisterClass(classType, className, baseName, 0, 0, (((void *)0)), (((void *)0)), privateModule, ecereCOMModule ? 4 : 1, inheritanceAccess);
}
if(regClass)
regClass->isRemote = (importType == 2) ? 1 : 3;
}
if(existingClass)
{
struct __ecereNameSpace__ecere__sys__OldLink * link;
for(link = existingClass->templatedClasses.first; link; link = link->next)
{
struct Symbol * symbol = link->data;
symbol->__anon1.registered = __ecereNameSpace__ecere__com__eSystem_FindClass(privateModule, symbol->string);
}
}
if(fixed)
regClass->fixed = 1;
if(noExpansion)
regClass->noExpansion = 1;
if(isWatchable)
{
__ecereNameSpace__ecere__com__eClass_DestructionWatchable(regClass);
regClass->structSize = regClass->offset;
}
if(regClass && existingClass)
{
existingClass->__anon1.registered = regClass;
regClass->symbol = existingClass;
existingClass->notYetDeclared = 1;
existingClass->imported = 1;
if(regClass->module)
existingClass->module = FindModule(regClass->module);
else
existingClass->module = mainModule;
}
}
else
regClass = (((void *)0));
}
else
regClass = (((void *)0));
isRemote = 0;
isWatchable = 0;
fixed = 0;
isStatic = 0;
}
else if(!strcmp(line, "[Enum Values]"))
{
for(; ; )
{
char * equal;
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(regClass)
{
equal = strchr(line, '=');
if(equal)
{
char name[1024];
memcpy(name, line, (int)(equal - line));
name[equal - line] = '\0';
__ecereNameSpace__ecere__sys__TrimLSpaces(name, name);
__ecereNameSpace__ecere__sys__TrimRSpaces(name, name);
__ecereNameSpace__ecere__com__eEnum_AddFixedValue(regClass, name, strtoll(equal + 1, (((void *)0)), 0));
}
else
{
__ecereNameSpace__ecere__com__eEnum_AddValue(regClass, line);
}
}
}
}
else if(!strcmp(line, "[Defined Methods]"))
{
char name[1024];
unsigned int isVirtual = 0;
int memberAccess = 1;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(line[0] == '[')
{
if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
if(regClass)
{
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(isVirtual)
__ecereNameSpace__ecere__com__eClass_AddVirtualMethod(regClass, name, line[0] ? line : 0, DummyMethod, memberAccess);
else
__ecereNameSpace__ecere__com__eClass_AddMethod(regClass, name, line[0] ? line : 0, DummyMethod, memberAccess);
}
}
else if(!strcmp(line, "[Virtual]"))
isVirtual = 1;
else if(!strcmp(line, "[Public]"))
memberAccess = 1;
else if(!strcmp(line, "[Private]"))
memberAccess = 2;
}
else
{
strcpy(name, line);
isVirtual = 0;
memberAccess = 1;
}
}
}
else if(!strcmp(line, "[Defined Properties]"))
{
char name[1024];
unsigned int setStmt = 0, getStmt = 0, isVirtual = 0, conversion = 0;
unsigned int isWatchable = 0;
int memberAccess = 1;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(line[0] == '[')
{
if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass)
{
struct __ecereNameSpace__ecere__com__Property * prop = __ecereNameSpace__ecere__com__eClass_AddProperty(regClass, conversion ? (((void *)0)) : name, line[0] ? line : 0, (void *)(uintptr_t)setStmt, (void *)(uintptr_t)getStmt, memberAccess);
if(prop)
{
prop->compiled = 0;
if(isWatchable)
{
__ecereNameSpace__ecere__com__eProperty_Watchable(prop);
regClass->structSize = regClass->offset;
}
}
}
}
else if(!strcmp(line, "[Set]"))
setStmt = 1;
else if(!strcmp(line, "[Get]"))
getStmt = 1;
else if(!strcmp(line, "[Watchable]"))
isWatchable = 1;
else if(!strcmp(line, "[Public]"))
memberAccess = 1;
else if(!strcmp(line, "[Private]"))
memberAccess = 2;
else if(!strcmp(line, "[Conversion]"))
{
conversion = 1;
setStmt = getStmt = isVirtual = isWatchable = 0;
}
}
else
{
strcpy(name, line);
setStmt = getStmt = isVirtual = conversion = isWatchable = 0;
memberAccess = 1;
}
}
}
else if(!strcmp(line, "[Defined Class Properties]"))
{
char name[1024];
unsigned int setStmt = 0, getStmt = 0;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(line[0] == '[')
{
if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass)
{
__ecereNameSpace__ecere__com__eClass_AddClassProperty(regClass, name, line, (void *)(uintptr_t)setStmt, (void *)(uintptr_t)getStmt);
}
}
else if(!strcmp(line, "[Set]"))
setStmt = 1;
else if(!strcmp(line, "[Get]"))
getStmt = 1;
}
else
{
strcpy(name, line);
setStmt = getStmt = 0;
}
}
}
else if(!strcmp(line, "[Defined Data Members]"))
{
ReadDataMembers(regClass, (((void *)0)), f);
}
else if(!strcmp(line, "[Template Parameters]"))
{
while(!(__extension__ ({
unsigned int (* __internal_VirtualMethod)(struct __ecereNameSpace__ecere__com__Instance *);
__internal_VirtualMethod = ((unsigned int (*)(struct __ecereNameSpace__ecere__com__Instance *))__extension__ ({
struct __ecereNameSpace__ecere__com__Instance * __internal_ClassInst = f;
__internal_ClassInst ? __internal_ClassInst->_vTbl : __ecereClass___ecereNameSpace__ecere__sys__File->_vTbl;
})[__ecereVMethodID___ecereNameSpace__ecere__sys__File_Eof]);
__internal_VirtualMethod ? __internal_VirtualMethod(f) : (unsigned int)1;
})))
{
char name[1024];
int type = 0;
struct __ecereNameSpace__ecere__com__ClassTemplateArgument defaultArg =
{
.__anon1 = {
.__anon1 = {
.dataTypeString = 0
}
}
};
void * info = (((void *)0));
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(line[0] == '.')
break;
strcpy(name, line);
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "[Expression]"))
type = 2;
else if(!strcmp(line, "[Identifier]"))
type = 1;
switch(type)
{
case 0:
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass && strcmp(line, "[None]"))
{
info = __ecereNameSpace__ecere__sys__CopyString(line);
}
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass && strcmp(line, "[None]"))
{
defaultArg.__anon1.__anon1.dataTypeString = __ecereNameSpace__ecere__sys__CopyString(line);
}
break;
case 2:
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass && strcmp(line, "[None]"))
{
info = __ecereNameSpace__ecere__sys__CopyString(line);
}
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass && strcmp(line, "[None]"))
{
struct __ecereNameSpace__ecere__com__Instance * backup = pushLexer();
struct Operand op;
struct Expression * exp;
skipErrors = 1;
exp = ParseExpressionString(line);
if(exp)
{
if(info)
exp->destType = ProcessTypeString(info, 0);
ProcessExpressionType(exp);
ComputeExpression(exp);
op = GetOperand(exp);
defaultArg.__anon1.expression.__anon1.ui64 = op.__anon1.ui64;
FreeExpression(exp);
}
skipErrors = 0;
popLexer(backup);
}
break;
case 1:
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "[Data member]"))
info = (void *)0;
else if(!strcmp(line, "[Method]"))
info = (void *)1;
else if(!strcmp(line, "[Property]"))
info = (void *)2;
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(regClass && strcmp(line, "[None]"))
{
defaultArg.__anon1.__anon2.memberString = __ecereNameSpace__ecere__sys__CopyString(line);
}
break;
}
if(regClass)
__ecereNameSpace__ecere__com__eClass_AddTemplateParameter(regClass, name, type, info, &defaultArg);
if(type == 0 || type == 2)
(__ecereNameSpace__ecere__com__eSystem_Delete(info), info = 0);
if(type == 0 || type == 1)
(__ecereNameSpace__ecere__com__eSystem_Delete((void *)defaultArg.__anon1.__anon1.dataTypeString), defaultArg.__anon1.__anon1.dataTypeString = 0);
}
if(regClass)
__ecereNameSpace__ecere__com__eClass_DoneAddingTemplateParameters(regClass);
}
}
else
{
inheritanceAccess = 1;
classType = 0;
isRemote = 0;
strcpy(name, line);
regClass = (((void *)0));
}
}
}
else if(!strcmp(line, "[Defined Expressions]"))
{
char name[1024];
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(!strcmp(line, "[Value]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!loadDllOnly && importType != 3 && importType != 4)
__ecereNameSpace__ecere__com__eSystem_RegisterDefine(name, line, privateModule, ecereCOMModule ? 4 : 1);
}
else if(line[0] != '[')
{
strcpy(name, line);
}
}
}
else if(!strcmp(line, "[Defined Functions]"))
{
char name[1024];
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!loadDllOnly && importType != 3 && importType != 4)
__ecereNameSpace__ecere__com__eSystem_RegisterFunction(name, line, (((void *)0)), privateModule, ecereCOMModule ? 4 : 1);
}
else if(line[0] != '[')
{
strcpy(name, line);
}
}
}
else if(!strcmp(line, "[Defined Data]"))
{
char name[1024];
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(!strcmp(line, "[Type]"))
{
__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line));
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!loadDllOnly && importType != 3 && importType != 4)
{
int start = 0, c;
struct __ecereNameSpace__ecere__com__NameSpace * nameSpace = globalData;
struct GlobalData * data;
for(c = 0; name[c]; c++)
{
if(name[c] == '.' || (name[c] == ':' && name[c + 1] == ':'))
{
struct __ecereNameSpace__ecere__com__NameSpace * newSpace;
char * spaceName = __ecereNameSpace__ecere__com__eSystem_New(sizeof(char) * (c - start + 1));
strncpy(spaceName, name + start, c - start);
spaceName[c - start] = '\0';
newSpace = (struct __ecereNameSpace__ecere__com__NameSpace *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_FindString(&(*nameSpace).nameSpaces, spaceName);
if(!newSpace)
{
newSpace = __ecereNameSpace__ecere__com__eSystem_New0(sizeof(struct __ecereNameSpace__ecere__com__NameSpace) * (1));
(*newSpace).classes.CompareKey = (void *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString;
(*newSpace).defines.CompareKey = (void *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString;
(*newSpace).functions.CompareKey = (void *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString;
(*newSpace).nameSpaces.CompareKey = (void *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString;
(*newSpace).name = spaceName;
(*newSpace).parent = nameSpace;
__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Add(&(*nameSpace).nameSpaces, (struct __ecereNameSpace__ecere__sys__BTNode *)newSpace);
}
else
(__ecereNameSpace__ecere__com__eSystem_Delete(spaceName), spaceName = 0);
nameSpace = newSpace;
if(name[c] == ':')
c++;
start = c + 1;
}
}
if(c - start)
{
data = (struct GlobalData *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_FindString(&(*nameSpace).functions, name + start);
if(!data)
{
data = __extension__ ({
struct GlobalData * __ecereInstance1 = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_GlobalData);
__ecereInstance1->fullName = __ecereNameSpace__ecere__sys__CopyString(name), __ecereInstance1->dataTypeString = __ecereNameSpace__ecere__sys__CopyString(line), __ecereInstance1->module = privateModule, __ecereInstance1;
});
data->key = (uintptr_t)(data->fullName + start);
__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_Add(&(*nameSpace).functions, (struct __ecereNameSpace__ecere__sys__BTNode *)data);
}
else if(strcmp(data->dataTypeString, line))
{
struct DataRedefinition * redefinition = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_DataRedefinition);
strcpy(redefinition->name, name);
strcpy(redefinition->type1, data->dataTypeString);
strcpy(redefinition->type2, line);
__ecereMethod___ecereNameSpace__ecere__sys__OldList_Add(&dataRedefinitions, redefinition);
}
}
}
}
else if(line[0] != '[')
{
strcpy(name, line);
}
}
}
else if(!strcmp(line, "[Imported Modules]"))
{
int moduleImportType = 0;
int importAccess = 1;
for(; ; )
{
if(!__ecereMethod___ecereNameSpace__ecere__sys__File_GetLine(f, line, sizeof (line)))
break;
__ecereNameSpace__ecere__sys__TrimLSpaces(line, line);
if(!strcmp(line, "."))
break;
if(!strcmp(line, "[Static]"))
moduleImportType = 1;
else if(!strcmp(line, "[Remote]"))
moduleImportType = 2;
else if(!strcmp(line, "[Private]"))
importAccess = 2;
else if(line[0] != '[')
{
if(importType != 3 && importType != 4)
ImportModule(line, moduleImportType, importAccess, loadDllOnly);
else
ImportModule(line, 4, importAccess, loadDllOnly);
if(!strcmp(line, "ecere"))
ecereImported = 1;
moduleImportType = 0;
importAccess = 1;
}
}
}
}
}
(__ecereNameSpace__ecere__com__eInstance_DecRef(f), f = 0);
}
else if(importType != 4)
{
char sysFileName[797];
__ecereNameSpace__ecere__sys__GetSystemPathBuffer(sysFileName, fileName);
Compiler_Error(__ecereNameSpace__ecere__GetTranslatedString("ec", "Couldn't open %s\n", (((void *)0))), sysFileName);
}
return globalInstance;
}
void ImportModule(const char * name, int importType, int importAccess, unsigned int loadDllOnly)
{
struct ImportedModule * module = (((void *)0));
char moduleName[797];
unsigned int isSourceModule = 0;
if(sourceFile)
{
char sourceFileModule[274];
__ecereNameSpace__ecere__sys__GetLastDirectory(sourceFile, sourceFileModule);
__ecereNameSpace__ecere__sys__StripExtension(sourceFileModule);
if(!(strcasecmp)(sourceFileModule, name))
isSourceModule = 1;
}
strncpy(moduleName, name, (797) - 1);
moduleName[(797) - 1] = 0;
__ecereNameSpace__ecere__sys__StripExtension(moduleName);
for(module = (*defines).first; module; module = module->next)
{
if(module->type == 0 && !(strcasecmp)(module->name, moduleName) && ((importType == 2) == (module->importType == 2) || isSourceModule))
break;
}
if((!module || (module->dllOnly && !loadDllOnly)) && strlen(name) < (274))
{
char ext[17];
struct __ecereNameSpace__ecere__com__Instance * loadedModule = (((void *)0));
char symFile[797];
symFile[0] = '\0';
__ecereNameSpace__ecere__sys__GetExtension(name, ext);
strcpy(symFile, symbolsDir ? symbolsDir : "");
__ecereNameSpace__ecere__sys__PathCat(symFile, name);
__ecereNameSpace__ecere__sys__ChangeExtension(symFile, "sym", symFile);
if(!strcmp(ext, "dll") || !strcmp(ext, "so") || !strcmp(ext, "dylib") || !ext[0])
{
if(importType != 4)
{
if(!module)
{
if(precompDefines)
{
module = __extension__ ({
struct ImportedModule * __ecereInstance1 = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_ImportedModule);
__ecereInstance1->name = __ecereNameSpace__ecere__sys__CopyString(moduleName), __ecereInstance1->type = 0, __ecereInstance1->importType = importType, __ecereInstance1->importAccess = importAccess, __ecereInstance1;
});
__ecereMethod___ecereNameSpace__ecere__sys__OldList_Add((&*precompDefines), module);
}
module = __extension__ ({
struct ImportedModule * __ecereInstance1 = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_ImportedModule);
__ecereInstance1->name = __ecereNameSpace__ecere__sys__CopyString(moduleName), __ecereInstance1->type = 0, __ecereInstance1->importType = importType, __ecereInstance1->importAccess = importAccess, __ecereInstance1;
});
__ecereMethod___ecereNameSpace__ecere__sys__OldList_AddName((&*defines), module);
}
module->dllOnly = loadDllOnly;
if(ext[0] || !__ecereNameSpace__ecere__sys__FileExists(symFile))
{
unsigned int skipLoad = 0;
struct __ecereNameSpace__ecere__com__Instance * list = (((void *)0));
if(!inCompiler && !inPreCompiler && !inSymbolGen && !inDocumentor)
{
struct __ecereNameSpace__ecere__com__MapIterator it = (it.container = (void *)0, it.pointer = (void *)0, __ecereProp___ecereNameSpace__ecere__com__MapIterator_Set_map(&it, loadedModules), it);
if(!__ecereMethod___ecereNameSpace__ecere__com__Iterator_Index((void *)(&it), (uint64)(uintptr_t)(name), 0))
{
struct __ecereNameSpace__ecere__com__Instance * firstModule = __ecereNameSpace__ecere__com__eModule_LoadStrict(((struct __ecereNameSpace__ecere__com__Module *)(((char *)__thisModule + sizeof(struct __ecereNameSpace__ecere__com__Instance))))->application, name, importAccess);
if(firstModule)
{
list = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass___ecereNameSpace__ecere__com__List_TPL_ecere__com__Module_);
(__extension__ ({
struct __ecereNameSpace__ecere__com__IteratorPointer * (* __internal_VirtualMethod)(struct __ecereNameSpace__ecere__com__Instance *, uint64 value);
__internal_VirtualMethod = ((struct __ecereNameSpace__ecere__com__IteratorPointer * (*)(struct __ecereNameSpace__ecere__com__Instance *, uint64 value))__extension__ ({
struct __ecereNameSpace__ecere__com__Instance * __internal_ClassInst = list;
__internal_ClassInst ? __internal_ClassInst->_vTbl : __ecereClass___ecereNameSpace__ecere__com__List->_vTbl;
})[__ecereVMethodID___ecereNameSpace__ecere__com__Container_Add]);
__internal_VirtualMethod ? __internal_VirtualMethod(list, (uint64)(uintptr_t)(firstModule)) : (struct __ecereNameSpace__ecere__com__IteratorPointer *)1;
}));
__extension__ ({
struct __ecereNameSpace__ecere__com__Iterator __internalIterator =
{
loadedModules, 0
};
__ecereMethod___ecereNameSpace__ecere__com__Iterator_Index(&__internalIterator, ((uint64)(uintptr_t)(name)), 1);
__ecereProp___ecereNameSpace__ecere__com__Iterator_Set_data(&__internalIterator, (uint64)(uintptr_t)(list));
});
}
else
skipLoad = 1;
}
else
list = ((struct __ecereNameSpace__ecere__com__Instance *)(uintptr_t)__ecereProp___ecereNameSpace__ecere__com__Iterator_Get_data((void *)(&it)));
}
if(!skipLoad)
{
loadedModule = __ecereNameSpace__ecere__com__eModule_LoadStrict(privateModule, name, importAccess);
if(loadedModule)
{
((struct __ecereNameSpace__ecere__com__Module *)(((char *)loadedModule + sizeof(struct __ecereNameSpace__ecere__com__Instance))))->importType = importType;
module->dllOnly = 0;
if(list)
(__extension__ ({
struct __ecereNameSpace__ecere__com__IteratorPointer * (* __internal_VirtualMethod)(struct __ecereNameSpace__ecere__com__Instance *, uint64 value);
__internal_VirtualMethod = ((struct __ecereNameSpace__ecere__com__IteratorPointer * (*)(struct __ecereNameSpace__ecere__com__Instance *, uint64 value))__extension__ ({
struct __ecereNameSpace__ecere__com__Instance * __internal_ClassInst = list;
__internal_ClassInst ? __internal_ClassInst->_vTbl : __ecereClass___ecereNameSpace__ecere__com__List->_vTbl;
})[__ecereVMethodID___ecereNameSpace__ecere__com__Container_Add]);
__internal_VirtualMethod ? __internal_VirtualMethod(list, (uint64)(uintptr_t)(loadedModule)) : (struct __ecereNameSpace__ecere__com__IteratorPointer *)1;
}));
}
}
}
}
}
if(!loadedModule && (!strcmp(ext, "ec") || !strcmp(ext, "sym") || !ext[0]))
{
{
if(!module)
{
if(precompDefines)
{
module = __extension__ ({
struct ImportedModule * __ecereInstance1 = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_ImportedModule);
__ecereInstance1->name = __ecereNameSpace__ecere__sys__CopyString(moduleName), __ecereInstance1->type = 0, __ecereInstance1->importType = importType, __ecereInstance1->importAccess = importAccess, __ecereInstance1;
});
__ecereMethod___ecereNameSpace__ecere__sys__OldList_Add((&*precompDefines), module);
}
module = __extension__ ({
struct ImportedModule * __ecereInstance1 = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass_ImportedModule);
__ecereInstance1->name = __ecereNameSpace__ecere__sys__CopyString(moduleName), __ecereInstance1->type = 0, __ecereInstance1->importType = importType, __ecereInstance1->importAccess = importAccess, __ecereInstance1;
});
__ecereMethod___ecereNameSpace__ecere__sys__OldList_AddName((&*defines), module);
}
module->dllOnly = loadDllOnly;
if(inPreCompiler)
return ;
if(inIDE && !__ecereNameSpace__ecere__sys__FileExists(symFile) && sourceDirs)
{
{
struct __ecereNameSpace__ecere__com__Iterator dir =
{
(sourceDirs), 0
};
while(__ecereMethod___ecereNameSpace__ecere__com__Iterator_Next(&dir))
{
char configDir[274];
strcpy(symFile, ((char * )((uintptr_t)(__ecereProp___ecereNameSpace__ecere__com__Iterator_Get_data(&dir)))));
__ecereNameSpace__ecere__sys__PathCat(symFile, "obj");
sprintf(configDir, "debug.%s", (__runtimePlatform == 1) ? "win32" : (__runtimePlatform == 3) ? "apple" : "linux");
__ecereNameSpace__ecere__sys__PathCat(symFile, configDir);
__ecereNameSpace__ecere__sys__PathCat(symFile, name);
__ecereNameSpace__ecere__sys__ChangeExtension(symFile, "sym", symFile);
if(__ecereNameSpace__ecere__sys__FileExists(symFile))
break;
}
}
}
if(!__ecereNameSpace__ecere__sys__FileExists(symFile))
{
char fileName[274];
__ecereNameSpace__ecere__sys__GetLastDirectory(symFile, fileName);
strcpy(symFile, symbolsDir ? symbolsDir : "");
__ecereNameSpace__ecere__sys__PathCat(symFile, fileName);
}
module->globalInstance = LoadSymbols(symFile, importType, loadDllOnly);
}
}
}
}
void __ecereRegisterModule_loadSymbols(struct __ecereNameSpace__ecere__com__Instance * module)
{
struct __ecereNameSpace__ecere__com__Class __attribute__((unused)) * class;
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetGlobalData", "void SetGlobalData(ecere::com::NameSpace * nameSpace)", SetGlobalData, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetInIDE", "void SetInIDE(bool b)", SetInIDE, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetSourceDirs", "void SetSourceDirs(ecere::com::List<String> list)", SetSourceDirs, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetIncludeDirs", "void SetIncludeDirs(ecere::sys::OldList * list)", SetIncludeDirs, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetSysIncludeDirs", "void SetSysIncludeDirs(ecere::sys::OldList * list)", SetSysIncludeDirs, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetEcereImported", "void SetEcereImported(bool b)", SetEcereImported, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("GetEcereImported", "bool GetEcereImported(void)", GetEcereImported, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetInPreCompiler", "void SetInPreCompiler(bool b)", SetInPreCompiler, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetInSymbolGen", "void SetInSymbolGen(bool b)", SetInSymbolGen, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetInDocumentor", "void SetInDocumentor(bool b)", SetInDocumentor, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("SetPrecompDefines", "void SetPrecompDefines(ecere::sys::OldList * list)", SetPrecompDefines, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("DummyMethod", "bool DummyMethod(void)", DummyMethod, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("LoadSymbols", "bool LoadSymbols(const char * fileName, ecere::com::ImportType importType, bool loadDllOnly)", LoadSymbols, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("ImportModule", "void ImportModule(const char * name, ecere::com::ImportType importType, ecere::com::AccessMode importAccess, bool loadDllOnly)", ImportModule, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("FindIncludeFileID", "int FindIncludeFileID(char * includeFile)", FindIncludeFileID, module, 2);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("GetIncludeFileID", "int GetIncludeFileID(char * includeFile)", GetIncludeFileID, module, 2);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("GetIncludeFileFromID", "char * GetIncludeFileFromID(int id)", GetIncludeFileFromID, module, 2);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("OpenIncludeFile", "ecere::sys::File OpenIncludeFile(char * includeFile)", OpenIncludeFile, module, 2);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("FreeIncludeFiles", "void FreeIncludeFiles(void)", FreeIncludeFiles, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("FreeGlobalData", "void FreeGlobalData(ecere::com::NameSpace globalDataList)", FreeGlobalData, module, 1);
__ecereNameSpace__ecere__com__eSystem_RegisterFunction("CheckDataRedefinitions", "void CheckDataRedefinitions(void)", CheckDataRedefinitions, module, 1);
}
| 33.57656 | 659 | 0.802317 | [
"object"
] |
9e7f91c8728a3c0c69ac3108bad085ed8eeed7d4 | 1,072 | h | C | src/wallet/load.h | popy1970/blinkhash-core | d3135bcdb648e8de64ce5395b976b7f359ef822b | [
"MIT"
] | 3 | 2021-07-27T16:59:47.000Z | 2021-12-31T20:55:46.000Z | src/wallet/load.h | popy1970/blinkhash-core | d3135bcdb648e8de64ce5395b976b7f359ef822b | [
"MIT"
] | null | null | null | src/wallet/load.h | popy1970/blinkhash-core | d3135bcdb648e8de64ce5395b976b7f359ef822b | [
"MIT"
] | 1 | 2021-12-31T12:58:23.000Z | 2021-12-31T12:58:23.000Z | // Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BLINKHASH_WALLET_LOAD_H
#define BLINKHASH_WALLET_LOAD_H
#include <string>
#include <vector>
class ArgsManager;
class CScheduler;
struct WalletContext;
namespace interfaces {
class Chain;
} // namespace interfaces
//! Responsible for reading and validating the -wallet arguments and verifying the wallet database.
bool VerifyWallets(WalletContext& context);
//! Load wallet databases.
bool LoadWallets(WalletContext& context);
//! Complete startup of wallets.
void StartWallets(WalletContext& context, CScheduler& scheduler);
//! Flush all wallets in preparation for shutdown.
void FlushWallets(WalletContext& context);
//! Stop all wallets. Wallets will be flushed first.
void StopWallets(WalletContext& context);
//! Close all wallets.
void UnloadWallets(WalletContext& context);
#endif // BLINKHASH_WALLET_LOAD_H
| 27.487179 | 99 | 0.789179 | [
"vector"
] |
9e7ff1dd2ab66867ada5815f194dd2ced266db2b | 2,903 | h | C | include/libigl/external/embree/kernels/common/builder.h | josefgraus/self_similiarity | c032daa3009f60fdc8a52c437a07c6e3ba2efe4b | [
"MIT"
] | 29 | 2019-11-27T00:43:07.000Z | 2020-02-25T14:35:54.000Z | externals/embree-2.17.7/kernels/common/builder.h | joeylitalien/drmlt-dpt | a12baa5d50ff6e6337e5cbb0433b1baa6fd498d7 | [
"MIT"
] | null | null | null | externals/embree-2.17.7/kernels/common/builder.h | joeylitalien/drmlt-dpt | a12baa5d50ff6e6337e5cbb0433b1baa6fd498d7 | [
"MIT"
] | 4 | 2019-11-27T05:19:03.000Z | 2020-03-23T22:49:53.000Z | // ======================================================================== //
// Copyright 2009-2018 Intel Corporation //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#pragma once
#include "default.h"
#include "accel.h"
namespace embree
{
#define MODE_HIGH_QUALITY (1<<8)
/*! virtual interface for all hierarchy builders */
class Builder : public RefCount {
public:
static const size_t DEFAULT_SINGLE_THREAD_THRESHOLD = 1024;
/*! initiates the hierarchy builder */
virtual void build() = 0;
/*! notifies the builder about the deletion of some geometry */
virtual void deleteGeometry(size_t geomID) {};
/*! clears internal builder state */
virtual void clear() = 0;
};
/*! virtual interface for progress monitor class */
struct BuildProgressMonitor {
virtual void operator() (size_t dn) const = 0;
};
/*! build the progress monitor interface from a closure */
template<typename Closure>
struct ProgressMonitorClosure : BuildProgressMonitor
{
public:
ProgressMonitorClosure (const Closure& closure) : closure(closure) {}
void operator() (size_t dn) const { closure(dn); }
private:
const Closure closure;
};
template<typename Closure> __forceinline const ProgressMonitorClosure<Closure> BuildProgressMonitorFromClosure(const Closure& closure) {
return ProgressMonitorClosure<Closure>(closure);
}
struct LineSegments;
struct TriangleMesh;
struct QuadMesh;
class AccelSet;
class Scene;
typedef void (*createLineSegmentsAccelTy)(LineSegments* mesh, AccelData*& accel, Builder*& builder);
typedef void (*createTriangleMeshAccelTy)(TriangleMesh* mesh, AccelData*& accel, Builder*& builder);
typedef void (*createQuadMeshAccelTy)(QuadMesh* mesh, AccelData*& accel, Builder*& builder);
typedef void (*createAccelSetAccelTy)(AccelSet* mesh, AccelData*& accel, Builder*& builder);
}
| 39.22973 | 138 | 0.584223 | [
"mesh",
"geometry"
] |
9e807d8988dfb4254ce9ca84c717fc8d8a2097b2 | 3,623 | h | C | YYCategoriesStudy/Pods/YYCategories/YYCategories/Foundation/NSNotificationCenter+YYAdd.h | wangdxnum1/MyOpenSourceCodeStudyAndResearch | 2df6ff770d9a7fc4d8a76efd1e52553b57ccd8cb | [
"MIT"
] | 1 | 2017-02-06T01:14:20.000Z | 2017-02-06T01:14:20.000Z | YYCategoriesStudy/Pods/YYCategories/YYCategories/Foundation/NSNotificationCenter+YYAdd.h | wangdxnum1/MyOpenSourceCodeStudyAndResearch | 2df6ff770d9a7fc4d8a76efd1e52553b57ccd8cb | [
"MIT"
] | null | null | null | YYCategoriesStudy/Pods/YYCategories/YYCategories/Foundation/NSNotificationCenter+YYAdd.h | wangdxnum1/MyOpenSourceCodeStudyAndResearch | 2df6ff770d9a7fc4d8a76efd1e52553b57ccd8cb | [
"MIT"
] | null | null | null | //
// NSNotificationCenter+YYAdd.h
// YYCategories <https://github.com/ibireme/YYCategories>
//
// Created by ibireme on 13/8/24.
// Copyright (c) 2015 ibireme.
//
// This source code is licensed under the MIT-style license found in the
// LICENSE file in the root directory of this source tree.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
/**
Provide some method for `NSNotificationCenter`
to post notification in different thread.
*/
@interface NSNotificationCenter (YYAdd)
/**
Posts a given notification to the receiver on main thread.
If current thread is main thread, the notification is posted synchronized;
otherwise, is posted asynchronized.
同步的在主线程发送通知
@param notification The notification to post.
An exception is raised if notification is nil.
*/
- (void)postNotificationOnMainThread:(NSNotification *)notification;
/**
Posts a given notification to the receiver on main thread.
在主线程发送通知,当前线程是否会阻塞 ,根据wait
@param notification The notification to post.
An exception is raised if notification is nil.
@param wait A Boolean that specifies whether the current thread blocks
until after the specified notification is posted on the
receiver on the main thread. Specify YES to block this
thread; otherwise, specify NO to have this method return
immediately.
*/
- (void)postNotificationOnMainThread:(NSNotification *)notification
waitUntilDone:(BOOL)wait;
/**
Creates a notification with a given name and sender and posts it to the
receiver on main thread. If current thread is main thread, the notification
is posted synchronized; otherwise, is posted asynchronized.
可以包好发送通知的信息,包括发送通知的对象
@param name The name of the notification.
@param object The object posting the notification.
*/
- (void)postNotificationOnMainThreadWithName:(NSString *)name
object:(nullable id)object;
/**
Creates a notification with a given name and sender and posts it to the
receiver on main thread. If current thread is main thread, the notification
is posted synchronized; otherwise, is posted asynchronized.
可以包好发送通知的信息,包括发送通知的对象,自定义的用户信息
@param name The name of the notification.
@param object The object posting the notification.
@param userInfo Information about the the notification. May be nil.
*/
- (void)postNotificationOnMainThreadWithName:(NSString *)name
object:(nullable id)object
userInfo:(nullable NSDictionary *)userInfo;
/**
Creates a notification with a given name and sender and posts it to the
receiver on main thread.
可以包好发送通知的信息,包括发送通知的对象,自定义的用户信息,还可以选择当前线程是否阻塞
@param name The name of the notification.
@param object The object posting the notification.
@param userInfo Information about the the notification. May be nil.
@param wait A Boolean that specifies whether the current thread blocks
until after the specified notification is posted on the
receiver on the main thread. Specify YES to block this
thread; otherwise, specify NO to have this method return
immediately.
*/
- (void)postNotificationOnMainThreadWithName:(NSString *)name
object:(nullable id)object
userInfo:(nullable NSDictionary *)userInfo
waitUntilDone:(BOOL)wait;
@end
NS_ASSUME_NONNULL_END
| 36.969388 | 80 | 0.685068 | [
"object"
] |
9e82964534b3aa94db8058ae82ee9e8382e35dfd | 34,568 | c | C | B2G/hardware/ti/wlan/wl1271/TWD/FW_Transfer/RxXfer.c | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-08-31T15:24:31.000Z | 2020-04-24T20:31:29.000Z | B2G/hardware/ti/wlan/wl1271/TWD/FW_Transfer/RxXfer.c | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | null | null | null | B2G/hardware/ti/wlan/wl1271/TWD/FW_Transfer/RxXfer.c | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-07-29T07:17:15.000Z | 2020-11-04T06:55:37.000Z | /*
* RxXfer.c
*
* Copyright(c) 1998 - 2010 Texas Instruments. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Texas Instruments nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/****************************************************************************
*
* MODULE: rxXfer.c
*
* PURPOSE: Rx Xfer module implementation.Responsible for reading Rx from the FW
* and forward it to the upper layers.
*
****************************************************************************/
#define __FILE_ID__ FILE_ID_106
#include "tidef.h"
#include "osApi.h"
#include "report.h"
#include "rxXfer_api.h"
#include "FwEvent_api.h"
#include "TWDriverInternal.h"
#include "RxQueue_api.h"
#include "TwIf.h"
#include "public_host_int.h"
#include "bmtrace_api.h"
#define RX_DRIVER_COUNTER_ADDRESS 0x300538
#define PLCP_HEADER_LENGTH 8
#define WORD_SIZE 4
#define UNALIGNED_PAYLOAD 0x1
#define RX_DESCRIPTOR_SIZE (sizeof(RxIfDescriptor_t))
#define MAX_PACKETS_NUMBER 8
#define MAX_CONSECUTIVE_READ_TXN 16
#define MAX_PACKET_SIZE 8192 /* Max Txn size */
#ifdef PLATFORM_SYMBIAN /* UMAC is using only one buffer and therefore we can't use consecutive reads */
#define MAX_CONSECUTIVE_READS 1
#else
#define MAX_CONSECUTIVE_READS 8
#endif
#define SLV_MEM_CP_VALUE(desc, offset) (((RX_DESC_GET_MEM_BLK(desc) << 8) + offset))
#define ALIGNMENT_SIZE(desc) ((RX_DESC_GET_UNALIGNED(desc) & UNALIGNED_PAYLOAD) ? 2 : 0)
#if (NUM_RX_PKT_DESC & (NUM_RX_PKT_DESC - 1))
#error NUM_RX_PKT_DESC is not a power of 2 which may degrade performance when we calculate modulo!!
#endif
#ifdef TI_DBG
typedef struct
{
TI_UINT32 uCountFwEvents;
TI_UINT32 uCountPktsForward;
TI_UINT32 uCountBufPend;
TI_UINT32 uCountBufNoMem;
TI_UINT32 uCountPktAggreg[MAX_XFER_BUFS];
} TRxXferDbgStat;
#endif
typedef struct
{
TTxnStruct tTxnStruct;
TI_UINT32 uRegData;
TI_UINT32 uRegAdata;
} TRegTxn;
typedef struct
{
TTxnStruct tTxnStruct;
TI_UINT32 uCounter;
} TCounterTxn;
typedef struct
{
TI_HANDLE hOs;
TI_HANDLE hReport;
TI_HANDLE hTwIf;
TI_HANDLE hFwEvent;
TI_HANDLE hRxQueue;
TI_UINT32 aRxPktsDesc[NUM_RX_PKT_DESC]; /* Save Rx packets short descriptors from FwStatus */
TI_UINT32 uFwRxCntr; /* Save last FW packets counter from FwStatus */
TI_UINT32 uDrvRxCntr; /* The current driver processed packets counter */
TI_UINT32 uPacketMemoryPoolStart; /* The FW mem-blocks area base address */
TI_UINT32 uMaxAggregLen; /* The max length in bytes of aggregated packets transaction */
TI_UINT32 uMaxAggregPkts; /* The max number of packets that may be aggregated in one transaction */
TRequestForBufferCb RequestForBufferCB; /* Upper layer CB for allocating buffers for packets */
TI_HANDLE RequestForBufferCB_handle; /* The upper later CB handle */
TI_BOOL bPendingBuffer; /* If TRUE, we exited the Rx handler upon pending-buffer */
TI_UINT32 uCurrTxnIndex; /* The current Txn structures index to use */
TI_UINT32 uAvailableTxn; /* Number of Txn structures currently available */
TRegTxn aSlaveRegTxn[MAX_CONSECUTIVE_READ_TXN]; /* Txn structures for writing mem-block address reg */
TTxnStruct aTxnStruct[MAX_CONSECUTIVE_READ_TXN]; /* Txn structures for reading the Rx packets */
TCounterTxn aCounterTxn[MAX_CONSECUTIVE_READ_TXN]; /* Txn structures for writing the driver counter workaround */
TI_UINT8 aTempBuffer[MAX_PACKET_SIZE]; /* Dummy buffer to use if we couldn't get a buffer for the packet (so drop the packet) */
TFailureEventCb fErrCb; /* The upper layer CB function for error handling */
TI_HANDLE hErrCb; /* The CB function handle */
#ifdef TI_DBG
TRxXferDbgStat tDbgStat;
#endif
} TRxXfer;
/************************ static function declaration *****************************/
static TI_STATUS rxXfer_Handle(TI_HANDLE hRxXfer);
static void rxXfer_TxnDoneCb (TI_HANDLE hRxXfer, TTxnStruct* pTxn);
static void rxXfer_PktDropTxnDoneCb (TI_HANDLE hRxXfer, TTxnStruct *pTxn);
static ETxnStatus rxXfer_IssueTxn (TI_HANDLE hRxXfer, TI_UINT32 uFirstMemBlkAddr);
static void rxXfer_ForwardPacket (TRxXfer* pRxXfer, TTxnStruct* pTxn);
/****************************************************************************
* RxXfer_Create()
****************************************************************************
* DESCRIPTION: Create the RxXfer module object
*
* INPUTS: None
*
* OUTPUT: None
*
* RETURNS: The Created object
****************************************************************************/
TI_HANDLE rxXfer_Create (TI_HANDLE hOs)
{
TRxXfer *pRxXfer;
pRxXfer = os_memoryAlloc (hOs, sizeof(TRxXfer));
if (pRxXfer == NULL)
return NULL;
/* For all the counters */
os_memoryZero (hOs, pRxXfer, sizeof(TRxXfer));
pRxXfer->hOs = hOs;
return (TI_HANDLE)pRxXfer;
}
/****************************************************************************
* RxXfer_Destroy()
****************************************************************************
* DESCRIPTION: Destroy the RxXfer module object
*
* INPUTS: hRxXfer - The object to free
*
* OUTPUT: None
*
* RETURNS:
****************************************************************************/
void rxXfer_Destroy (TI_HANDLE hRxXfer)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
if (pRxXfer)
{
os_memoryFree (pRxXfer->hOs, pRxXfer, sizeof(TRxXfer));
}
}
/****************************************************************************
* rxXfer_init()
****************************************************************************
* DESCRIPTION: Init the module object
*
* INPUTS: hRxXfer - module handle;
* other modules handles.
*
* OUTPUT: None
*
* RETURNS: None
****************************************************************************/
void rxXfer_Init(TI_HANDLE hRxXfer,
TI_HANDLE hFwEvent,
TI_HANDLE hReport,
TI_HANDLE hTwIf,
TI_HANDLE hRxQueue)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
pRxXfer->hFwEvent = hFwEvent;
pRxXfer->hReport = hReport;
pRxXfer->hTwIf = hTwIf;
pRxXfer->hRxQueue = hRxQueue;
rxXfer_Restart (hRxXfer);
#ifdef TI_DBG
rxXfer_ClearStats (pRxXfer);
#endif
}
/****************************************************************************
* rxXfer_SetDefaults()
****************************************************************************
* DESCRIPTION: Set module parameters default setting
*
* INPUTS: hRxXfer - module handle;
*
* OUTPUT: None
*
* RETURNS: None
****************************************************************************/
void rxXfer_SetDefaults (TI_HANDLE hRxXfer, TTwdInitParams *pInitParams)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
pRxXfer->uMaxAggregPkts = pInitParams->tGeneral.uRxAggregPktsLimit;
}
/****************************************************************************
* rxXfer_SetBusParams()
****************************************************************************
* DESCRIPTION: Configure bus driver DMA-able buffer length to be used as a limit to the aggragation length.
*
* INPUTS: hRxXfer - module handle
* uDmaBufLen - The bus driver DMA-able buffer length
*
* OUTPUT: None
*
* RETURNS: None
****************************************************************************/
void rxXfer_SetBusParams (TI_HANDLE hRxXfer, TI_UINT32 uDmaBufLen)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
pRxXfer->uMaxAggregLen = uDmaBufLen;
}
/****************************************************************************
* rxXfer_Register_CB()
****************************************************************************
* DESCRIPTION: Register the function to be called for request for buffer.
*
* INPUTS: hRxXfer - RxXfer handle;
*
* OUTPUT: None
*
* RETURNS: None
****************************************************************************/
void rxXfer_Register_CB (TI_HANDLE hRxXfer, TI_UINT32 CallBackID, void *CBFunc, TI_HANDLE CBObj)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
TRACE1(pRxXfer->hReport, REPORT_SEVERITY_INFORMATION , "rxXfer_Register_CB (Value = 0x%x)\n", CallBackID);
switch(CallBackID)
{
case TWD_INT_REQUEST_FOR_BUFFER:
pRxXfer->RequestForBufferCB = (TRequestForBufferCb)CBFunc;
pRxXfer->RequestForBufferCB_handle = CBObj;
break;
default:
TRACE0(pRxXfer->hReport, REPORT_SEVERITY_ERROR, "rxXfer_Register_CB - Illegal value\n");
return;
}
}
/****************************************************************************
* rxXfer_ForwardPacket()
****************************************************************************
* DESCRIPTION: Forward received packet(s) to the upper layers.
*
* INPUTS:
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
static void rxXfer_ForwardPacket (TRxXfer *pRxXfer, TTxnStruct *pTxn)
{
TI_UINT32 uBufNum;
RxIfDescriptor_t *pRxInfo = (RxIfDescriptor_t*)(pTxn->aBuf[0]);
#ifdef TI_DBG /* for packet sanity check */
TI_UINT16 uLenFromRxInfo;
#endif
/* Go over all occupied Txn buffers and forward their Rx packets upward */
for (uBufNum = 0; uBufNum < MAX_XFER_BUFS; uBufNum++)
{
/* If no more buffers, exit the loop */
if (pTxn->aLen[uBufNum] == 0)
{
break;
}
#ifdef TI_DBG /* Packet sanity check */
/* Get length from RxInfo, handle endianess and convert to length in bytes */
pRxInfo = (RxIfDescriptor_t*)(pTxn->aBuf[uBufNum]);
uLenFromRxInfo = ENDIAN_HANDLE_WORD(pRxInfo->length) << 2;
/* If the length in the RxInfo is different than in the short descriptor, set error status */
if (pTxn->aLen[uBufNum] != uLenFromRxInfo)
{
TRACE3(pRxXfer->hReport, REPORT_SEVERITY_ERROR , "rxXfer_ForwardPacket: Bad Length!! RxInfoLength=%d, ShortDescLen=%d, RxInfoStatus=0x%x\n", uLenFromRxInfo, pTxn->aLen[uBufNum], pRxInfo->status);
pRxInfo->status &= ~RX_DESC_STATUS_MASK;
pRxInfo->status |= RX_DESC_STATUS_DRIVER_RX_Q_FAIL;
pRxInfo->length = ENDIAN_HANDLE_WORD(pTxn->aLen[uBufNum] >> 2);
/* If error CB available, trigger recovery !! */
if (pRxXfer->fErrCb)
{
pRxXfer->fErrCb (pRxXfer->hErrCb, RX_XFER_FAILURE);
}
}
else
{
TRACE2(pRxXfer->hReport, REPORT_SEVERITY_INFORMATION , "rxXfer_ForwardPacket: RxInfoLength=%d, RxInfoStatus=0x%x\n", uLenFromRxInfo, pRxInfo->status);
}
pRxXfer->tDbgStat.uCountPktsForward++;
#endif
/* This is the last packet in the Burst so mark its EndOfBurst flag */
if (TXN_PARAM_GET_END_OF_BURST(pTxn) && (uBufNum == (MAX_XFER_BUFS - 1) || pTxn->aLen[uBufNum + 1] == 0))
{
TXN_PARAM_SET_END_OF_BURST(pTxn, 0);
pRxInfo->driverFlags |= DRV_RX_FLAG_END_OF_BURST;
}
/* Forward received packet to the upper layers */
RxQueue_ReceivePacket (pRxXfer->hRxQueue, (const void *)pTxn->aBuf[uBufNum]);
}
/* reset the aBuf field for clean on recovery purpose */
pTxn->aBuf[0] = 0;
}
/****************************************************************************
* rxXfer_RxEvent()
****************************************************************************
* DESCRIPTION: Called upon Rx event from the FW.calls the SM
*
* INPUTS: hRxXfer - RxXfer handle;
*
* OUTPUT: None
*
* RETURNS: TWIF_OK in case of Synch mode, or TWIF_PENDING in case of Asynch mode
* (when returning TWIF_PENDING, FwEvent module expects the FwEvent_EventComplete()
* function call to finish the Rx Client handling
*
****************************************************************************/
ETxnStatus rxXfer_RxEvent (TI_HANDLE hRxXfer, FwStatus_t *pFwStatus)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
TI_UINT32 uTempCounters;
FwStatCntrs_t *pFwStatusCounters;
TI_UINT32 i;
TI_STATUS rc;
CL_TRACE_START_L2();
uTempCounters = ENDIAN_HANDLE_LONG (pFwStatus->counters);
pFwStatusCounters = (FwStatCntrs_t*)(&uTempCounters);
TRACE2(pRxXfer->hReport, REPORT_SEVERITY_INFORMATION , "rxXfer_RxEvent: NewFwCntr=%d, OldFwCntr=%d\n", pFwStatusCounters->fwRxCntr, pRxXfer->uFwRxCntr);
/* If no new Rx packets - exit */
if ((pFwStatusCounters->fwRxCntr % NUM_RX_PKT_DESC) == (pRxXfer->uFwRxCntr % NUM_RX_PKT_DESC))
{
CL_TRACE_END_L2("tiwlan_drv.ko", "CONTEXT", "RX", "");
return TXN_STATUS_COMPLETE;
}
#ifdef TI_DBG
pRxXfer->tDbgStat.uCountFwEvents++;
#endif
/* Save current FW counter and Rx packets short descriptors for processing */
pRxXfer->uFwRxCntr = pFwStatusCounters->fwRxCntr;
for (i = 0; i < NUM_RX_PKT_DESC; i++)
{
pRxXfer->aRxPktsDesc[i] = ENDIAN_HANDLE_LONG (pFwStatus->rxPktsDesc[i]);
}
/* Handle all new Rx packets */
rc = rxXfer_Handle (pRxXfer);
CL_TRACE_END_L2("tiwlan_drv.ko", "CONTEXT", "RX", "");
return TXN_STATUS_COMPLETE;
}
/****************************************************************************
* rxXfer_Handle()
****************************************************************************
* DESCRIPTION:
*
* INPUTS: hRxXfer - RxXfer handle;
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
static TI_STATUS rxXfer_Handle(TI_HANDLE hRxXfer)
{
#ifndef _VLCT_
TRxXfer * pRxXfer = (TRxXfer *)hRxXfer;
TI_BOOL bIssueTxn = TI_FALSE; /* If TRUE transact current aggregated packets */
TI_BOOL bDropLastPkt = TI_FALSE; /* If TRUE, need to drop last packet (RX_BUF_ALLOC_OUT_OF_MEM) */
TI_BOOL bExit = TI_FALSE; /* If TRUE, can't process further packets so exit (after serving the other flags) */
TI_UINT32 uAggregPktsNum = 0; /* Number of aggregated packets */
TI_UINT32 uFirstMemBlkAddr = 0;
TI_UINT32 uRxDesc = 0;
TI_UINT32 uBuffSize = 0;
TI_UINT32 uTotalAggregLen = 0;
TI_UINT32 uDrvIndex;
TI_UINT32 uFwIndex;
TI_UINT8 * pHostBuf;
TTxnStruct * pTxn = NULL;
ETxnStatus eTxnStatus;
ERxBufferStatus eBufStatus;
PacketClassTag_e eRxPacketType;
CL_TRACE_START_L2();
/* If no Txn structures available exit!! (fatal error - not expected to happen) */
if (pRxXfer->uAvailableTxn == 0 )
{
TRACE0(pRxXfer->hReport, REPORT_SEVERITY_ERROR, "rxXfer_Handle: No available Txn structures left!\n");
CL_TRACE_END_L2("tiwlan_drv.ko", "CONTEXT", "RX", "");
return TI_NOK;
}
uFwIndex = pRxXfer->uFwRxCntr % NUM_RX_PKT_DESC;
/* Loop while Rx packets can be transfered from the FW */
while (1)
{
uDrvIndex = pRxXfer->uDrvRxCntr % NUM_RX_PKT_DESC;
/* If there are unprocessed Rx packets */
if (uDrvIndex != uFwIndex)
{
/* Get next packte info */
uRxDesc = pRxXfer->aRxPktsDesc[uDrvIndex];
uBuffSize = RX_DESC_GET_LENGTH(uRxDesc) << 2;
eRxPacketType = (PacketClassTag_e)RX_DESC_GET_PACKET_CLASS_TAG (uRxDesc);
/* If new packet exceeds max aggregation length, set flag to send previous packets (postpone it to next loop) */
if ((uTotalAggregLen + uBuffSize) > pRxXfer->uMaxAggregLen)
{
bIssueTxn = TI_TRUE;
}
/* No length limit so try to aggregate new packet */
else
{
/* Allocate host read buffer */
/* The RxBufAlloc() add an extra word for MAC header alignment in case of QoS MSDU */
eBufStatus = pRxXfer->RequestForBufferCB(pRxXfer->RequestForBufferCB_handle,
(void**)&pHostBuf,
uBuffSize,
(TI_UINT32)NULL,
eRxPacketType);
TRACE6(pRxXfer->hReport, REPORT_SEVERITY_INFORMATION , "rxXfer_Handle: Index=%d, RxDesc=0x%x, DrvCntr=%d, FwCntr=%d, BufStatus=%d, BuffSize=%d\n", uDrvIndex, uRxDesc, pRxXfer->uDrvRxCntr, pRxXfer->uFwRxCntr, eBufStatus, uBuffSize);
/* If buffer allocated, add it to current Txn (up to 4 packets aggregation) */
if (eBufStatus == RX_BUF_ALLOC_COMPLETE)
{
/* If first aggregated packet prepare the next Txn struct */
if (uAggregPktsNum == 0)
{
pTxn = (TTxnStruct*)&(pRxXfer->aTxnStruct[pRxXfer->uCurrTxnIndex]);
pTxn->uHwAddr = SLV_MEM_DATA;
/* Save first mem-block of first aggregated packet! */
uFirstMemBlkAddr = SLV_MEM_CP_VALUE(uRxDesc, pRxXfer->uPacketMemoryPoolStart);
}
pTxn->aBuf[uAggregPktsNum] = pHostBuf + ALIGNMENT_SIZE(uRxDesc);
pTxn->aLen[uAggregPktsNum] = uBuffSize;
uAggregPktsNum++;
uTotalAggregLen += uBuffSize;
if (uAggregPktsNum >= pRxXfer->uMaxAggregPkts)
{
bIssueTxn = TI_TRUE;
}
pRxXfer->uDrvRxCntr++;
}
/* If buffer pending until freeing previous buffer, set Exit flag and if needed set IssueTxn flag. */
else if (eBufStatus == RX_BUF_ALLOC_PENDING)
{
bExit = TI_TRUE;
pRxXfer->bPendingBuffer = TI_TRUE;
if (uAggregPktsNum > 0)
{
bIssueTxn = TI_TRUE;
}
#ifdef TI_DBG
pRxXfer->tDbgStat.uCountBufPend++;
#endif
}
/* If no buffer due to out-of-memory, set DropLastPkt flag and if needed set IssueTxn flag. */
else
{
bDropLastPkt = TI_TRUE;
if (uAggregPktsNum > 0)
{
bIssueTxn = TI_TRUE;
}
#ifdef TI_DBG
pRxXfer->tDbgStat.uCountBufNoMem++;
#endif
}
}
}
/* If no more packets, set Exit flag and if needed set IssueTxn flag. */
else
{
bExit = TI_TRUE;
if (uAggregPktsNum > 0)
{
bIssueTxn = TI_TRUE;
}
}
/* If required to send Rx packet(s) transaction */
if (bIssueTxn)
{
if (bExit)
{
TXN_PARAM_SET_END_OF_BURST(pTxn, 1);
}
/* If not all 4 Txn buffers are used, reset first unused buffer length for indication */
if (uAggregPktsNum < MAX_XFER_BUFS)
{
pTxn->aLen[uAggregPktsNum] = 0;
}
eTxnStatus = rxXfer_IssueTxn (pRxXfer, uFirstMemBlkAddr);
if (eTxnStatus == TXN_STATUS_COMPLETE)
{
/* Forward received packet to the upper layers */
rxXfer_ForwardPacket (pRxXfer, pTxn);
}
else if (eTxnStatus == TXN_STATUS_PENDING)
{
/* Decrease the number of available txn structures */
pRxXfer->uAvailableTxn--;
}
else
{
TRACE3(pRxXfer->hReport, REPORT_SEVERITY_ERROR , "rxXfer_Handle: Status=%d, DrvCntr=%d, RxDesc=0x%x\n", eTxnStatus, pRxXfer->uDrvRxCntr, uRxDesc);
}
#ifdef TI_DBG
pRxXfer->tDbgStat.uCountPktAggreg[uAggregPktsNum - 1]++;
#endif
uAggregPktsNum = 0;
uTotalAggregLen = 0;
bIssueTxn = TI_FALSE;
pRxXfer->uCurrTxnIndex = (pRxXfer->uCurrTxnIndex + 1) % MAX_CONSECUTIVE_READ_TXN;
}
/* If last packet should be dropped (no memory for host buffer) */
if (bDropLastPkt)
{
/* Increment driver packets counter before calling rxXfer_IssueTxn() */
pRxXfer->uDrvRxCntr++;
/* Read packet to dummy buffer and ignore it (no callback needed) */
uFirstMemBlkAddr = SLV_MEM_CP_VALUE(uRxDesc, pRxXfer->uPacketMemoryPoolStart);
pTxn = (TTxnStruct*)&pRxXfer->aTxnStruct[pRxXfer->uCurrTxnIndex];
BUILD_TTxnStruct(pTxn, SLV_MEM_DATA, pRxXfer->aTempBuffer, uBuffSize, (TTxnDoneCb)rxXfer_PktDropTxnDoneCb, hRxXfer)
eTxnStatus = rxXfer_IssueTxn (pRxXfer, uFirstMemBlkAddr);
if (eTxnStatus == TXN_STATUS_PENDING)
{
pRxXfer->uAvailableTxn--;
}
pRxXfer->uCurrTxnIndex = (pRxXfer->uCurrTxnIndex + 1) % MAX_CONSECUTIVE_READ_TXN;
bDropLastPkt = TI_FALSE;
}
/* Can't process more packets so exit */
if (bExit)
{
CL_TRACE_END_L2("tiwlan_drv.ko", "CONTEXT", "RX", "");
return TI_OK;
}
} /* End of while(1) */
/* Unreachable code */
#endif
}
/****************************************************************************
* rxXfer_IssueTxn()
****************************************************************************
* DESCRIPTION:
*
* INPUTS:
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
static ETxnStatus rxXfer_IssueTxn (TI_HANDLE hRxXfer, TI_UINT32 uFirstMemBlkAddr)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
TI_UINT32 uIndex = pRxXfer->uCurrTxnIndex;
TTxnStruct *pTxn;
ETxnStatus eStatus;
/* Write the next mem block that we want to read */
pTxn = &pRxXfer->aSlaveRegTxn[uIndex].tTxnStruct;
pTxn->uHwAddr = SLV_REG_DATA;
pRxXfer->aSlaveRegTxn[uIndex].uRegData = ENDIAN_HANDLE_LONG(uFirstMemBlkAddr);
pRxXfer->aSlaveRegTxn[uIndex].uRegAdata = ENDIAN_HANDLE_LONG(uFirstMemBlkAddr + 4);
twIf_Transact(pRxXfer->hTwIf, pTxn);
/* Issue the packet(s) read transaction (prepared in rxXfer_Handle) */
pTxn = &pRxXfer->aTxnStruct[uIndex];
eStatus = twIf_Transact(pRxXfer->hTwIf, pTxn);
/* Write driver packets counter to FW. This write automatically generates interrupt to FW */
/* Note: Workaround for WL6-PG1.0 is still needed for PG2.0 ==> if (pRxXfer->bChipIs1273Pg10) */
pTxn = &pRxXfer->aCounterTxn[uIndex].tTxnStruct;
pTxn->uHwAddr = RX_DRIVER_COUNTER_ADDRESS;
pRxXfer->aCounterTxn[uIndex].uCounter = ENDIAN_HANDLE_LONG(pRxXfer->uDrvRxCntr);
twIf_Transact(pRxXfer->hTwIf, pTxn);
TRACE5(pRxXfer->hReport, REPORT_SEVERITY_INFORMATION , "rxXfer_IssueTxn: Counter-Txn: HwAddr=0x%x, Len0=%d, Data0=%d, DrvCount=%d, TxnParams=0x%x\n", pTxn->uHwAddr, pTxn->aLen[0], *(TI_UINT32 *)(pTxn->aBuf[0]), pRxXfer->uDrvRxCntr, pTxn->uTxnParams);
/* Return the status of the packet(s) transaction - COMPLETE, PENDING or ERROR */
return eStatus;
}
/****************************************************************************
* rxXfer_SetRxDirectAccessParams()
****************************************************************************
* DESCRIPTION:
*
* INPUTS:
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
void rxXfer_SetRxDirectAccessParams (TI_HANDLE hRxXfer, TDmaParams *pDmaParams)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
pRxXfer->uPacketMemoryPoolStart = pDmaParams->PacketMemoryPoolStart;
}
/****************************************************************************
* rxXfer_TxnDoneCb()
****************************************************************************
* DESCRIPTION: Forward the packet to the registered CB
*
* INPUTS:
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
static void rxXfer_TxnDoneCb (TI_HANDLE hRxXfer, TTxnStruct *pTxn)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
CL_TRACE_START_L2();
/* Increase the number of available txn structures */
pRxXfer->uAvailableTxn++;
/* Forward received packet to the upper layers */
rxXfer_ForwardPacket (pRxXfer, pTxn);
/* If we exited the handler upon pending-buffer, call it again to handle further packets if any */
if (pRxXfer->bPendingBuffer)
{
pRxXfer->bPendingBuffer = TI_FALSE;
rxXfer_Handle (hRxXfer);
}
CL_TRACE_END_L2("tiwlan_drv.ko", "INHERIT", "RX", "");
}
/****************************************************************************
* rxXfer_PktDropTxnDoneCb()
****************************************************************************
* DESCRIPTION: Dummy CB for case of dropping a packet due to out-of-memory.
*
* INPUTS:
*
* OUTPUT:
*
* RETURNS:
****************************************************************************/
static void rxXfer_PktDropTxnDoneCb (TI_HANDLE hRxXfer, TTxnStruct *pTxn)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
/* Increase the number of available txn structures */
pRxXfer->uAvailableTxn++;
/* Restore the regular TxnDone callback to the used structure */
pTxn->fTxnDoneCb = (TTxnDoneCb)rxXfer_TxnDoneCb;
pTxn->hCbHandle = hRxXfer;
}
/****************************************************************************
* rxXfer_Restart()
****************************************************************************
* DESCRIPTION: rxXfer_Restart the RxXfer module object (called by the recovery)
*
* INPUTS: hRxXfer - The object to free
*
* OUTPUT: None
*
* RETURNS: NONE
****************************************************************************/
void rxXfer_Restart(TI_HANDLE hRxXfer)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
TTxnStruct* pTxn;
TI_UINT8 i;
pRxXfer->uFwRxCntr = 0;
pRxXfer->uDrvRxCntr = 0;
pRxXfer->uCurrTxnIndex = 0;
pRxXfer->uAvailableTxn = MAX_CONSECUTIVE_READ_TXN - 1;
/* Scan all transaction array and release only pending transaction */
for (i = 0; i < MAX_CONSECUTIVE_READ_TXN; i++)
{
pTxn = &(pRxXfer->aTxnStruct[i]);
/* Check if buffer allocated and not the dummy one (has a different callback) */
if ((pTxn->aBuf[0] != 0) && (pTxn->fTxnDoneCb == (TTxnDoneCb)rxXfer_TxnDoneCb))
{
TI_UINT32 uBufNum;
RxIfDescriptor_t *pRxParams;
/* Go over the Txn occupied buffers and mark them as TAG_CLASS_UNKNOWN to be freed */
for (uBufNum = 0; uBufNum < MAX_XFER_BUFS; uBufNum++)
{
/* If no more buffers, exit the loop */
if (pTxn->aLen[uBufNum] == 0)
{
break;
}
pRxParams = (RxIfDescriptor_t *)(pTxn->aBuf[uBufNum]);
pRxParams->packet_class_tag = TAG_CLASS_UNKNOWN;
}
/* Call upper layer only to release the allocated buffer */
rxXfer_ForwardPacket (pRxXfer, pTxn);
}
}
/* Fill the transaction structures fields that have constant values */
for (i = 0; i < MAX_CONSECUTIVE_READ_TXN; i++)
{
/* First mem-block address (two consecutive registers) */
pTxn = &(pRxXfer->aSlaveRegTxn[i].tTxnStruct);
TXN_PARAM_SET(pTxn, TXN_LOW_PRIORITY, TXN_FUNC_ID_WLAN, TXN_DIRECTION_WRITE, TXN_INC_ADDR)
BUILD_TTxnStruct(pTxn, SLV_REG_DATA, &pRxXfer->aSlaveRegTxn[i].uRegData, REGISTER_SIZE*2, NULL, NULL)
/* The packet(s) read transaction */
pTxn = &(pRxXfer->aTxnStruct[i]);
TXN_PARAM_SET(pTxn, TXN_LOW_PRIORITY, TXN_FUNC_ID_WLAN, TXN_DIRECTION_READ, TXN_FIXED_ADDR)
pTxn->fTxnDoneCb = (TTxnDoneCb)rxXfer_TxnDoneCb;
pTxn->hCbHandle = hRxXfer;
/* The driver packets counter */
pTxn = &(pRxXfer->aCounterTxn[i].tTxnStruct);
TXN_PARAM_SET(pTxn, TXN_LOW_PRIORITY, TXN_FUNC_ID_WLAN, TXN_DIRECTION_WRITE, TXN_INC_ADDR)
BUILD_TTxnStruct(pTxn, RX_DRIVER_COUNTER_ADDRESS, &pRxXfer->aCounterTxn[i].uCounter, REGISTER_SIZE, NULL, NULL)
}
}
/****************************************************************************
* rxXfer_RegisterErrCb()
****************************************************************************
* DESCRIPTION: Register Error CB
*
* INPUTS:
* hRxXfer - The object
* ErrCb - The upper layer CB function for error handling
* hErrCb - The CB function handle
*
* OUTPUT: None
*
* RETURNS: void
****************************************************************************/
void rxXfer_RegisterErrCb (TI_HANDLE hRxXfer, void *fErrCb, TI_HANDLE hErrCb)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
/* Save upper layer (health monitor) CB for recovery from fatal error */
pRxXfer->fErrCb = (TFailureEventCb)fErrCb;
pRxXfer->hErrCb = hErrCb;
}
#ifdef TI_DBG
/****************************************************************************
* rxXfer_ClearStats()
****************************************************************************
* DESCRIPTION:
*
* INPUTS:
* pRxXfer The object
*
* OUTPUT: None
*
* RETURNS: TI_OK.
****************************************************************************/
void rxXfer_ClearStats (TI_HANDLE hRxXfer)
{
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
os_memoryZero (pRxXfer->hOs, &pRxXfer->tDbgStat, sizeof(TRxXferDbgStat));
}
/****************************************************************************
* rxXfer_PrintStats()
****************************************************************************
* DESCRIPTION: .
*
* INPUTS:
* pRxXfer The object
*
* OUTPUT: None
*
* RETURNS: TI_OK.
****************************************************************************/
void rxXfer_PrintStats (TI_HANDLE hRxXfer)
{
#ifdef REPORT_LOG
TRxXfer *pRxXfer = (TRxXfer *)hRxXfer;
WLAN_OS_REPORT(("Print RX Xfer module info\n"));
WLAN_OS_REPORT(("=========================\n"));
WLAN_OS_REPORT(("uMaxAggregPkts = %d\n", pRxXfer->uMaxAggregPkts));
WLAN_OS_REPORT(("uMaxAggregLen = %d\n", pRxXfer->uMaxAggregLen));
WLAN_OS_REPORT(("FW counter = %d\n", pRxXfer->uFwRxCntr));
WLAN_OS_REPORT(("Drv counter = %d\n", pRxXfer->uDrvRxCntr));
WLAN_OS_REPORT(("AvailableTxn = %d\n", pRxXfer->uAvailableTxn));
WLAN_OS_REPORT(("uCountFwEvents = %d\n", pRxXfer->tDbgStat.uCountFwEvents));
WLAN_OS_REPORT(("uCountPktsForward = %d\n", pRxXfer->tDbgStat.uCountPktsForward));
WLAN_OS_REPORT(("uCountBufPend = %d\n", pRxXfer->tDbgStat.uCountBufPend));
WLAN_OS_REPORT(("uCountBufNoMem = %d\n", pRxXfer->tDbgStat.uCountBufNoMem));
WLAN_OS_REPORT(("uCountPktAggreg-1 = %d\n", pRxXfer->tDbgStat.uCountPktAggreg[0]));
WLAN_OS_REPORT(("uCountPktAggreg-2 = %d\n", pRxXfer->tDbgStat.uCountPktAggreg[1]));
WLAN_OS_REPORT(("uCountPktAggreg-3 = %d\n", pRxXfer->tDbgStat.uCountPktAggreg[2]));
WLAN_OS_REPORT(("uCountPktAggreg-4 = %d\n", pRxXfer->tDbgStat.uCountPktAggreg[3]));
#endif
}
#endif
| 37.986813 | 254 | 0.531821 | [
"object"
] |
9e8b27455fe20512b3946c9c05cdb69ea0c26294 | 6,272 | h | C | Modules/Core/TestKernel/include/itkRandomImageSource.h | nalinimsingh/ITK_4D | 95a2eacaeaffe572889832ef0894239f89e3f303 | [
"Apache-2.0"
] | 3 | 2018-10-01T20:46:17.000Z | 2019-12-17T19:39:50.000Z | Modules/Core/TestKernel/include/itkRandomImageSource.h | nalinimsingh/ITK_4D | 95a2eacaeaffe572889832ef0894239f89e3f303 | [
"Apache-2.0"
] | null | null | null | Modules/Core/TestKernel/include/itkRandomImageSource.h | nalinimsingh/ITK_4D | 95a2eacaeaffe572889832ef0894239f89e3f303 | [
"Apache-2.0"
] | 4 | 2018-05-17T16:34:54.000Z | 2020-09-24T02:12:40.000Z | /*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
/*=========================================================================
*
* Portions of this file are subject to the VTK Toolkit Version 3 copyright.
*
* Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
*
* For complete copyright, license and disclaimer of warranty information
* please refer to the NOTICE file at the top of the ITK source tree.
*
*=========================================================================*/
#ifndef itkRandomImageSource_h
#define itkRandomImageSource_h
#include "itkImageSource.h"
#include "itkNumericTraits.h"
namespace itk
{
/** \class RandomImageSource
* \brief Generate an n-dimensional image of random pixel values.
*
* RandomImageSource generates an image of random pixel values.
* This filter uses an inline random number generator since the library
* drand48, although thread-safe, is very slow in a threaded environment.
* The output image may be of any dimension.
* NOTE: To produce deterministic results, set the number of threads
* to 1.
*
* \ingroup DataSources MultiThreaded
* \ingroup ITKTestKernel
*
* \wiki
* \wikiexample{SimpleOperations/RandomImageSource,Produce an image of noise}
* \endwiki
*/
template< typename TOutputImage >
class ITK_TEMPLATE_EXPORT RandomImageSource:public ImageSource< TOutputImage >
{
public:
/** Standard class typedefs. */
typedef RandomImageSource Self;
typedef ImageSource< TOutputImage > Superclass;
typedef SmartPointer< Self > Pointer;
typedef SmartPointer< const Self > ConstPointer;
/** Typedef for the output image PixelType. */
typedef typename TOutputImage::PixelType OutputImagePixelType;
/** Typedef to describe the output image region type. */
typedef typename TOutputImage::RegionType OutputImageRegionType;
/** Run-time type information (and related methods). */
itkTypeMacro(RandomImageSource, ImageSource);
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** Basic types from the OutputImageType */
typedef typename TOutputImage::SizeType SizeType;
typedef typename TOutputImage::IndexType IndexType;
typedef typename TOutputImage::SpacingType SpacingType;
typedef typename TOutputImage::DirectionType DirectionType;
typedef typename TOutputImage::PointType PointType;
typedef typename SizeType::SizeValueType SizeValueType;
typedef SizeValueType SizeValueArrayType[TOutputImage::ImageDimension];
typedef typename TOutputImage::SpacingValueType SpacingValueType;
typedef SpacingValueType SpacingValueArrayType[TOutputImage::ImageDimension];
typedef typename TOutputImage::PointValueType PointValueType;
typedef PointValueType PointValueArrayType[TOutputImage::ImageDimension];
/** Set/Get size of the output image */
itkSetMacro(Size, SizeType);
virtual void SetSize(SizeValueArrayType sizeArray);
virtual const SizeValueType * GetSize() const;
/** Set/Get spacing of the output image */
itkSetMacro(Spacing, SpacingType);
virtual void SetSpacing(SpacingValueArrayType spacingArray);
virtual const SpacingValueType * GetSpacing() const;
/** Set/Get origin of the output image */
itkSetMacro(Origin, PointType);
virtual void SetOrigin(PointValueArrayType originArray);
virtual const PointValueType * GetOrigin() const;
itkSetMacro(Direction, DirectionType);
itkGetMacro(Direction, DirectionType);
/** Set the minimum possible pixel value. By default, it is
* NumericTraits<TOutputImage::PixelType>::min(). */
itkSetClampMacro( Min, OutputImagePixelType,
NumericTraits< OutputImagePixelType >::NonpositiveMin(),
NumericTraits< OutputImagePixelType >::max() );
/** Get the minimum possible pixel value. */
itkGetConstMacro(Min, OutputImagePixelType);
/** Set the maximum possible pixel value. By default, it is
* NumericTraits<TOutputImage::PixelType>::max(). */
itkSetClampMacro( Max, OutputImagePixelType,
NumericTraits< OutputImagePixelType >::NonpositiveMin(),
NumericTraits< OutputImagePixelType >::max() );
/** Get the maximum possible pixel value. */
itkGetConstMacro(Max, OutputImagePixelType);
protected:
RandomImageSource();
~RandomImageSource();
void PrintSelf(std::ostream & os, Indent indent) const ITK_OVERRIDE;
virtual void
ThreadedGenerateData(const OutputImageRegionType &
outputRegionForThread, ThreadIdType threadId) ITK_OVERRIDE;
virtual void GenerateOutputInformation() ITK_OVERRIDE;
private:
ITK_DISALLOW_COPY_AND_ASSIGN(RandomImageSource);
SizeType m_Size; //size of the output image
SpacingType m_Spacing; //spacing
PointType m_Origin; //origin
DirectionType m_Direction; //direction
typename TOutputImage::PixelType m_Min; //minimum possible value
typename TOutputImage::PixelType m_Max; //maximum possible value
// The following variables are deprecated, and provided here just for
// backward compatibility. It use is discouraged.
mutable PointValueArrayType m_OriginArray;
mutable SpacingValueArrayType m_SpacingArray;
};
} // end namespace itk
#ifndef ITK_MANUAL_INSTANTIATION
#include "itkRandomImageSource.hxx"
#endif
#endif
| 38.956522 | 103 | 0.684152 | [
"object"
] |
9e8c0f966ac6bee4df4b6f8441e9745e32be2609 | 4,357 | h | C | mocap_vicon/src/vicon_sdk/Vicon/CrossMarket/DataStream/ViconCGStream/CameraCalibrationHealthDetail.h | ulvs/motion_capture_system | ae4c0794d4a0b1e5436e830a6824761f2ce82f5f | [
"Apache-2.0"
] | 48 | 2017-02-02T22:49:52.000Z | 2022-02-26T12:20:41.000Z | mocap_vicon/src/vicon_sdk/Vicon/CrossMarket/DataStream/ViconCGStream/CameraCalibrationHealthDetail.h | ulvs/motion_capture_system | ae4c0794d4a0b1e5436e830a6824761f2ce82f5f | [
"Apache-2.0"
] | 187 | 2020-09-20T16:02:01.000Z | 2022-03-26T00:14:32.000Z | mocap_vicon/src/vicon_sdk/Vicon/CrossMarket/DataStream/ViconCGStream/CameraCalibrationHealthDetail.h | ulvs/motion_capture_system | ae4c0794d4a0b1e5436e830a6824761f2ce82f5f | [
"Apache-2.0"
] | 33 | 2016-01-11T15:46:31.000Z | 2022-02-25T13:09:44.000Z |
//////////////////////////////////////////////////////////////////////////////////
// MIT License
//
// Copyright (c) 2017 Vicon Motion Systems Ltd
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//////////////////////////////////////////////////////////////////////////////////
#pragma once
/// \file
/// Contains the Detail of the ViconCGStream::VCameraCalirationHealth_Camera class
#include "Enum.h"
#include <StreamCommon/Buffer.h>
#include <vector>
namespace ViconCGStreamDetail
{
//-------------------------------------------------------------------------------------------------
/// Contains the Camera ID and Calibration Health for a single camera
class VCameraCalibrationHealth_Camera
{
public:
/// Device identifier
ViconCGStreamType::UInt32 m_DeviceID;
/// Health Indicators
///
/// All health indicators are computed over a variable time window
/// controlled by the underlying generator
///
/// The underlying generator will currently use only LABELLED reconstructions
/// if available. Potentially model markers could be used.
///
/// m_ReconUsage is the proportion of reconstructions visible in the
/// frustrum of the camera, that have associated centroids
/// varies between 0 and 1 (full usage)
ViconCGStreamType::Double m_ReconUsage;
/// m_AverageReprojectionError - average distance between
/// centroid and projected reconstruction for this camera
ViconCGStreamType::Double m_AverageReprojectionError;
/// m_NReconsInFrustrum - the number of reconstructions visible
/// in the frustrum of the camera
ViconCGStreamType::Double m_NReconsInFrustrum;
/// m_NReprojections - the number of 2d-3d correspondences used to
/// to compute the m_AverageReprojectionError score
ViconCGStreamType::Double m_NReprojections;
/// experimental - the average number of unused centroids per frame
ViconCGStreamType::Double m_UnusedCentroids;
/// Equality operator
bool operator == ( const VCameraCalibrationHealth_Camera & i_rOther ) const
{
return m_DeviceID == i_rOther.m_DeviceID
&& m_ReconUsage == i_rOther.m_ReconUsage
&& m_AverageReprojectionError == i_rOther.m_AverageReprojectionError
&& m_NReconsInFrustrum == i_rOther.m_NReconsInFrustrum
&& m_NReprojections == i_rOther.m_NReprojections
&& m_UnusedCentroids == i_rOther.m_UnusedCentroids;
}
/// Read function.
bool Read( const ViconCGStreamIO::VBuffer & i_rBuffer )
{
return i_rBuffer.Read( m_DeviceID ) &&
i_rBuffer.Read( m_ReconUsage ) &&
i_rBuffer.Read( m_AverageReprojectionError ) &&
i_rBuffer.Read( m_NReconsInFrustrum ) &&
i_rBuffer.Read( m_NReprojections ) &&
i_rBuffer.Read( m_UnusedCentroids );
}
/// Write function.
void Write( ViconCGStreamIO::VBuffer & i_rBuffer ) const
{
i_rBuffer.Write( m_DeviceID );
i_rBuffer.Write( m_ReconUsage );
i_rBuffer.Write( m_AverageReprojectionError );
i_rBuffer.Write( m_NReconsInFrustrum );
i_rBuffer.Write( m_NReprojections );
i_rBuffer.Write( m_UnusedCentroids );
}
};
//-------------------------------------------------------------------------------------------------
};
| 38.219298 | 100 | 0.656185 | [
"vector",
"model",
"3d"
] |
9e947c7670446699d63d2d3272f1c750b6433992 | 4,279 | c | C | TestGen/CHarness/tests/SHA256_sodium_KAT.c | GaloisInc/hacrypto | 5c99d7ac73360e9b05452ac9380c1c7dc6784849 | [
"BSD-3-Clause"
] | 34 | 2015-02-04T18:03:14.000Z | 2020-11-10T06:45:28.000Z | TestGen/CHarness/tests/SHA256_sodium_KAT.c | GaloisInc/hacrypto | 5c99d7ac73360e9b05452ac9380c1c7dc6784849 | [
"BSD-3-Clause"
] | 5 | 2015-06-30T21:17:00.000Z | 2016-06-14T22:31:51.000Z | TestGen/CHarness/tests/SHA256_sodium_KAT.c | GaloisInc/hacrypto | 5c99d7ac73360e9b05452ac9380c1c7dc6784849 | [
"BSD-3-Clause"
] | 15 | 2015-10-29T14:21:58.000Z | 2022-01-19T07:33:14.000Z | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "Ccommon_test.h"
#include "../hashes.h"
// abc vector from that place....
SHA256_sodium_KAT_0(){
unsigned char input[3] = "abc";
unsigned char result[32];
unsigned char expected_result[32] = {
0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 0xCF, 0xEA,
0x41, 0x41, 0x40, 0xDE, 0x5D, 0xAE, 0x22, 0x23,
0xB0, 0x03, 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 0x15, 0xAD
};
SHA256_sodium(input, result, 3);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_0");
}
SHA256_sodium_KAT_1(){
unsigned char input[56] = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
unsigned char result[32];
unsigned char expected_result[32] = {
0x24, 0x8D, 0x6A, 0x61, 0xD2, 0x06, 0x38, 0xB8,
0xE5, 0xC0, 0x26, 0x93, 0x0C, 0x3E, 0x60, 0x39,
0xA3, 0x3C, 0xE4, 0x59, 0x64, 0xFF, 0x21, 0x67,
0xF6, 0xEC, 0xED, 0xD4, 0x19, 0xDB, 0x06, 0xC1
};
SHA256_sodium(input, result, 56);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_1");
}
SHA256_sodium_KAT_2(){
unsigned char *input = malloc(sizeof(char) * 1000000);
char to_repeat[] = "a";
int i;
for(i=0; i<1000000; i++){
memcpy(input + i*1, to_repeat, 1);
}
unsigned char result[32];
unsigned char expected_result[32] = {
0xCD, 0xC7, 0x6E, 0x5C, 0x99, 0x14, 0xFB, 0x92,
0x81, 0xA1, 0xC7, 0xE2, 0x84, 0xD7, 0x3E, 0x67,
0xF1, 0x80, 0x9A, 0x48, 0xA4, 0x97, 0x20, 0x0E,
0x04, 0x6D, 0x39, 0xCC, 0xC7, 0x11, 0x2C, 0xD0
};
SHA256_sodium(input, result, 1000000);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_2");
}
SHA256_sodium_KAT_3(){
unsigned char input[112] = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
unsigned char result[32];
unsigned char expected_result[32] = {
0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80,
0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37,
0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51,
0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1
};
SHA256_sodium(input, result, 112);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_3");
}
SHA256_sodium_KAT_4(){
unsigned char input[0] = {};
unsigned char result[32];
unsigned char expected_result[32] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
SHA256_sodium(input, result, 0);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_4");
}
SHA256_sodium_KAT_5(){
unsigned char input[43] = "The quick brown fox jumps over the lazy dog";
unsigned char result[32];
unsigned char expected_result[32] = {
0xd7, 0xa8, 0xfb, 0xb3, 0x07, 0xd7, 0x80, 0x94,
0x69, 0xca, 0x9a, 0xbc, 0xb0, 0x08, 0x2e, 0x4f,
0x8d, 0x56, 0x51, 0xe4, 0x6d, 0x3c, 0xdb, 0x76,
0x2d, 0x02, 0xd0, 0xbf, 0x37, 0xc9, 0xe5, 0x92
};
SHA256_sodium(input, result, 43);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_5");
}
SHA256_sodium_KAT_6(){
unsigned char input[3] = { 97 ,98 ,99 };
unsigned char result[32];
unsigned char expected_result[32] = {
0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 0xCF, 0xEA,
0x41, 0x41, 0x40, 0xDE, 0x5D, 0xAE, 0x22, 0x23,
0xB0, 0x03, 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 0x15, 0xAD
};
SHA256_sodium(input, result, 3);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_6");
}
SHA256_sodium_KAT_7(){
unsigned char *input = malloc(sizeof(char) * 1073741824);
char to_repeat[] = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno";
int i;
for(i=0; i<16777216; i++){
memcpy(input + i*64, to_repeat, 64);
}
unsigned char result[32];
unsigned char expected_result[32] = {
0x50, 0xe7, 0x2a, 0x0e, 0x26, 0x44, 0x2f, 0xe2,
0x55, 0x2d, 0xc3, 0x93, 0x8a, 0xc5, 0x86, 0x58,
0x22, 0x8c, 0x0c, 0xbf, 0xb1, 0xd2, 0xca, 0x87,
0x2a, 0xe4, 0x35, 0x26, 0x6f, 0xcd, 0x05, 0x5e
};
SHA256_sodium(input, result, 1073741824);
check_KAT(result, expected_result, 32, "SHA256_sodium_KAT_7");
}
| 28.718121 | 144 | 0.671886 | [
"vector"
] |
9e9521fa934d9e1b8cd84053929bc7fc840eec2c | 26,870 | h | C | include/Macro.h | kuochuanpan/gamer-fork | 89e9516193fc6eb54f57f6caf7b0ef5b280b6792 | [
"BSD-3-Clause"
] | 1 | 2019-12-04T05:29:13.000Z | 2019-12-04T05:29:13.000Z | include/Macro.h | kuochuanpan/gamer-fork | 89e9516193fc6eb54f57f6caf7b0ef5b280b6792 | [
"BSD-3-Clause"
] | null | null | null | include/Macro.h | kuochuanpan/gamer-fork | 89e9516193fc6eb54f57f6caf7b0ef5b280b6792 | [
"BSD-3-Clause"
] | 3 | 2019-10-23T03:05:06.000Z | 2019-12-04T05:19:00.000Z | #ifndef __MACRO_H__
#define __MACRO_H__
// ****************************************************************************
// ** This header defines the symbolic constants and macros used in GAMER. **
// ** For clarity, useless options defined in the makefile will be "undef" **
// ** in the end of this file. **
// ****************************************************************************
// ########################
// ## Symbolic Constants ##
// ########################
// current version
#define VERSION "gamer-2.1.1.dev"
// option == NONE --> the option is turned off
#define NONE 0
// GPU architecture
#define FERMI 1
#define KEPLER 2
#define MAXWELL 3
#define PASCAL 4
#define VOLTA 5
#define TURING 6
// models
#define HYDRO 1
//#define MHD 2 // MHD is now regarded as an option of HYDRO
#define ELBDM 3
#define PAR_ONLY 4
// hydrodynamic schemes
#define RTVD 1
#define MHM 3
#define MHM_RP 4
#define CTU 5
// data reconstruction schemes
#define PLM 1
#define PPM 2
// Riemann solvers
#define EXACT 1
#define ROE 2
#define HLLE 3
#define HLLC 4
#define HLLD 5
// dual-energy variables
#define DE_ENPY 1
#define DE_EINT 2
#ifdef DUAL_ENERGY
#define DE_UPDATED_BY_ETOT ('0')
#define DE_UPDATED_BY_DUAL ('1')
#define DE_UPDATED_BY_MIN_PRES ('2')
#define DE_UPDATED_BY_ETOT_GRA ('3')
#endif
// equation of states
#define EOS_GAMMA 1
#define EOS_ISOTHERMAL 2
#define EOS_NUCLEAR 3
#define EOS_TABULAR 4
#define EOS_USER 5
// Poisson solvers
#define SOR 1
#define MG 2
// load-balance parallelization
#define HILBERT 1
// random number implementation
#define RNG_GNU_EXT 1
#define RNG_CPP11 2
// NCOMP_FLUID : number of active components in each cell (for patch->fluid[])
// --> do not include passive components here, which is set by NCOMP_PASSIVE
// NFLUX_FLUID : number of active components in patch->flux[]
// --> do not include passive components here, which is set by NFLUX_PASSIVE
// NCOMP_MAG : number of magnetic field components (for patch->magnetic[])
// NCOMP_ELE : number of electric field components on each cell face (for patch->electric[])
#if ( MODEL == HYDRO )
# define NCOMP_FLUID 5
# define NFLUX_FLUID NCOMP_FLUID
# ifdef MHD
# define NCOMP_MAG 3
# define NCOMP_ELE 2
# else
# define NCOMP_MAG 0
# define NCOMP_ELE 0
# endif
// for ELBDM, we only need the density flux
#elif ( MODEL == ELBDM )
# define NCOMP_FLUID 3
# define NFLUX_FLUID 1
#elif ( MODEL == PAR_ONLY )
# define NCOMP_FLUID 0
# define NFLUX_FLUID 0
#else
# error : ERROR : unsupported MODEL (please edit NCOMP_FLUID and NFLUX_FLUID for the new MODEL) !!
#endif // MODEL
// number of passively advected components in each cell
// define NCOMP_PASSIVE_USER if not set in the Makefile
#ifndef NCOMP_PASSIVE_USER
# define NCOMP_PASSIVE_USER 0
#endif
// add built-in scalars
#if ( MODEL == HYDRO )
// entropy (or internal energy) for the dual-energy formalism
# ifdef DUAL_ENERGY
# define NCOMP_PASSIVE_BUILTIN0 1
# else
# define NCOMP_PASSIVE_BUILTIN0 0
# endif
// cosmic rays
# ifdef COSMIC_RAY
# define NCOMP_PASSIVE_BUILTIN1 1
# else
# define NCOMP_PASSIVE_BUILTIN1 0
# endif
// total number of built-in scalars
# define NCOMP_PASSIVE_BUILTIN ( NCOMP_PASSIVE_BUILTIN0 + NCOMP_PASSIVE_BUILTIN1 )
#endif // #if ( MODEL == HYDRO )
// define NCOMP_PASSIVE_BUILTIN if not set yet
#ifndef NCOMP_PASSIVE_BUILTIN
# define NCOMP_PASSIVE_BUILTIN 0
#endif
// total number of passive scalars
# define NCOMP_PASSIVE ( NCOMP_PASSIVE_USER + NCOMP_PASSIVE_BUILTIN )
// assuming all passive scalars have the corresponding fluxes
# define NFLUX_PASSIVE NCOMP_PASSIVE
// total number of variables in each cell and in the flux array including both active and passive variables
# define NCOMP_TOTAL ( NCOMP_FLUID + NCOMP_PASSIVE )
# define NFLUX_TOTAL ( NFLUX_FLUID + NFLUX_PASSIVE )
// number of input/output fluid variables in the fluid solver
#if ( MODEL == HYDRO )
# define FLU_NIN NCOMP_TOTAL
# define FLU_NOUT NCOMP_TOTAL
// for ELBDM, we do not need to transfer the density component into GPU
#elif ( MODEL == ELBDM )
# define FLU_NIN ( NCOMP_TOTAL - 1 )
# define FLU_NOUT ( NCOMP_TOTAL - 0 )
#elif ( MODEL == PAR_ONLY )
# define FLU_NIN 0
# define FLU_NOUT 0
#else
# error : ERROR : unsupported MODEL (please edit FLU_NIN and FLU_NOUT for the new MODEL) !!
#endif // MODEL
// number of input fluid variables in the dt solver
// --> EOS_GAMMA/EOS_ISOTHERMAL do not require passive scalars
#if ( MODEL == HYDRO && ( EOS == EOS_GAMMA || EOS == EOS_ISOTHERMAL ) )
# define FLU_NIN_T NCOMP_FLUID
#else
# define FLU_NIN_T NCOMP_TOTAL
#endif
// built-in fields in different models
#if ( MODEL == HYDRO )
// field indices of fluid[] --> element of [0 ... NCOMP_FLUID-1]
// --> must NOT modify their values
// --> in addition, they must be consistent with the order these fields are declared in Init_Field()
# define DENS 0
# define MOMX 1
# define MOMY 2
# define MOMZ 3
# define ENGY 4
// field indices of passive[] --> element of [NCOMP_FLUID ... NCOMP_TOTAL-1]
#if ( NCOMP_PASSIVE > 0 )
// always put the built-in variables at the END of the field list
// --> so that their indices (e.g., ENPY/EINT/CRAY) can be determined during compilation
// --> convenient (and probably also more efficient) for the fluid solver
# define PASSIVE_NEXT_IDX0 ( NCOMP_TOTAL - 1 )
# if ( DUAL_ENERGY == DE_ENPY )
# define ENPY ( PASSIVE_NEXT_IDX0 )
# define PASSIVE_NEXT_IDX1 ( ENPY - 1 )
# elif ( DUAL_ENERGY == DE_EINT )
# define EINT ( PASSIVE_NEXT_IDX0 )
# define PASSIVE_NEXT_IDX1 ( EINT - 1 )
# else
# define PASSIVE_NEXT_IDX1 ( PASSIVE_NEXT_IDX0 )
# endif
# ifdef COSMIC_RAY
# define CRAY ( PASSIVE_NEXT_IDX1 )
# define PASSIVE_NEXT_IDX2 ( CRAY - 1 )
# else
# define PASSIVE_NEXT_IDX2 ( PASSIVE_NEXT_IDX1 )
# endif
#endif // #if ( NCOMP_PASSIVE > 0 )
// field indices of magnetic --> element of [0 ... NCOMP_MAG-1]
# ifdef MHD
# define MAGX 0
# define MAGY 1
# define MAGZ 2
# endif
// flux indices of flux[] --> element of [0 ... NFLUX_FLUID-1]
# define FLUX_DENS 0
# define FLUX_MOMX 1
# define FLUX_MOMY 2
# define FLUX_MOMZ 3
# define FLUX_ENGY 4
// flux indices of flux_passive[] --> element of [NFLUX_FLUID ... NFLUX_TOTAL-1]
#if ( NCOMP_PASSIVE > 0 )
// always put the built-in variables at the END of the list
# define FLUX_NEXT_IDX0 ( NFLUX_TOTAL - 1 )
# if ( DUAL_ENERGY == DE_ENPY )
# define FLUX_ENPY ( FLUX_NEXT_IDX0 )
# define FLUX_NEXT_IDX1 ( FLUX_ENPY - 1 )
# elif ( DUAL_ENERGY == DE_EINT )
# define FLUX_EINT ( FLUX_NEXT_IDX0 )
# define FLUX_NEXT_IDX1 ( FLUX_EINT - 1 )
# else
# define FLUX_NEXT_IDX1 ( FLUX_NEXT_IDX0 )
# endif
# ifdef COSMIC_RAY
# define FLUX_CRAY ( FLUX_NEXT_IDX1 )
# define FLUX_NEXT_IDX2 ( FLUX_CRAY - 1 )
# else
# define FLUX_NEXT_IDX2 ( FLUX_NEXT_IDX1 )
# endif
#endif // #if ( NCOMP_PASSIVE > 0 )
// bitwise field indices
// --> must have "_VAR_NAME = 1L<<VAR_NAME" (e.g., _DENS == 1L<<DENS)
// --> convenient for determining subsets of fields (e.g., _DENS|_ENGY)
// --> used as function parameters (e.g., Prepare_PatchData(), Flu_FixUp(), Flu_FixUp_Restrict(), Buf_GetBufferData())
# define _DENS ( 1L << DENS )
# define _MOMX ( 1L << MOMX )
# define _MOMY ( 1L << MOMY )
# define _MOMZ ( 1L << MOMZ )
# define _ENGY ( 1L << ENGY )
#if ( NCOMP_PASSIVE > 0 )
# if ( DUAL_ENERGY == DE_ENPY )
# define _ENPY ( 1L << ENPY )
# elif ( DUAL_ENERGY == DE_EINT )
# define _EINT ( 1L << EINT )
# endif
# ifdef COSMIC_RAY
# define _CRAY ( 1L << CRAY )
# endif
#endif // #if ( NCOMP_PASSIVE > 0 )
// magnetic field
# ifdef MHD
# define _MAGX ( 1L << MAGX )
# define _MAGY ( 1L << MAGY )
# define _MAGZ ( 1L << MAGZ )
# define _MAG ( _MAGX | _MAGY | _MAGZ )
# else
# define _MAG 0
# endif
// bitwise flux indices
# define _FLUX_DENS ( 1L << FLUX_DENS )
# define _FLUX_MOMX ( 1L << FLUX_MOMX )
# define _FLUX_MOMY ( 1L << FLUX_MOMY )
# define _FLUX_MOMZ ( 1L << FLUX_MOMZ )
# define _FLUX_ENGY ( 1L << FLUX_ENGY )
#if ( NFLUX_PASSIVE > 0 )
# if ( DUAL_ENERGY == DE_ENPY )
# define _FLUX_ENPY ( 1L << FLUX_ENPY )
# elif ( DUAL_ENERGY == DE_EINT )
# define _FLUX_EINT ( 1L << FLUX_EINT )
# endif
# ifdef COSMIC_RAY
# define _FLUX_CRAY ( 1L << FLUX_CRAY )
# endif
#endif // #if ( NFLUX_PASSIVE > 0 )
// bitwise indices of derived fields
// --> start from (1L<<NCOMP_TOTAL) to distinguish from the intrinsic fields
// --> remember to define NDERIVE = total number of derived fields
// _EINT_DER is a derived field for distinguishing from _EINT
// --> the latter is an intrinsic field when adopting DUAL_ENERGY == DE_EINT
# define _VELX ( 1L << (NCOMP_TOTAL+ 0) )
# define _VELY ( 1L << (NCOMP_TOTAL+ 1) )
# define _VELZ ( 1L << (NCOMP_TOTAL+ 2) )
# define _VELR ( 1L << (NCOMP_TOTAL+ 3) )
# define _PRES ( 1L << (NCOMP_TOTAL+ 4) )
# define _TEMP ( 1L << (NCOMP_TOTAL+ 5) )
# define _EINT_DER ( 1L << (NCOMP_TOTAL+ 6) )
# define _MAGX_CC ( 1L << (NCOMP_TOTAL+ 7) )
# define _MAGY_CC ( 1L << (NCOMP_TOTAL+ 8) )
# define _MAGZ_CC ( 1L << (NCOMP_TOTAL+ 9) )
# define _MAG_ENGY_CC ( 1L << (NCOMP_TOTAL+10) )
# define _DERIVED ( _VELX | _VELY | _VELZ | _VELR | _PRES | _TEMP | _EINT_DER | _MAGX_CC | _MAGY_CC | _MAGZ_CC | _MAG_ENGY_CC )
# define NDERIVE 11
#elif ( MODEL == ELBDM )
// field indices of fluid[] --> element of [0 ... NCOMP_FLUID-1]
# define DENS 0
# define REAL 1
# define IMAG 2
// field indices of passive[] --> element of [NCOMP_FLUID ... NCOMP_TOTAL-1]
// none for ELBDM
// flux indices of flux[] --> element of [0 ... NFLUX_FLUID-1]
# define FLUX_DENS 0
// bitwise field indices
# define _DENS ( 1L << DENS )
# define _REAL ( 1L << REAL )
# define _IMAG ( 1L << IMAG )
// bitwise flux indices
# define _FLUX_DENS ( 1L << FLUX_DENS )
// bitwise indices of derived fields
# define _DERIVED 0
# define NDERIVE 0
#elif ( MODEL == PAR_ONLY )
# define _DERIVED 0
# define NDERIVE 0
#else
# error : ERROR : unsupported MODEL !!
#endif // MODEL
// bitwise field indices used by all models
# define _NONE 0
# ifdef GRAVITY
# define _POTE ( 1L << (NCOMP_TOTAL+NDERIVE) )
# endif
# define _FLUID ( ( 1L << NCOMP_FLUID ) - 1L )
# define _PASSIVE ( ( 1L << NCOMP_TOTAL ) - 1L - _FLUID )
# define _TOTAL ( ( 1L << NCOMP_TOTAL ) - 1L )
# define _FLUX_FLUID ( ( 1L << NFLUX_FLUID ) - 1L )
# define _FLUX_PASSIVE ( ( 1L << NFLUX_TOTAL ) - 1L - _FLUX_FLUID )
# define _FLUX_TOTAL ( ( 1L << NFLUX_TOTAL ) - 1L )
// symbolic constants for particles
#ifdef PARTICLE
// number of built-in particle attributes
// (1) mass, position*3, velocity*3, and time
# define PAR_NATT_BUILTIN0 8
// acceleration*3 when STORE_PAR_ACC is adopted
# ifdef STORE_PAR_ACC
# define PAR_NATT_BUILTIN1 3
# else
# define PAR_NATT_BUILTIN1 0
# endif
// particle creation time when STAR_FORMATION is adopted
# ifdef STAR_FORMATION
# define PAR_NATT_BUILTIN2 1
# else
# define PAR_NATT_BUILTIN2 0
# endif
// **total** number of built-in particle attributes
# define PAR_NATT_BUILTIN ( PAR_NATT_BUILTIN0 + PAR_NATT_BUILTIN1 + PAR_NATT_BUILTIN2 )
// number of particle attributes that we do not want to store on disk (currently time + acceleration*3)
# define PAR_NATT_UNSTORED ( 1 + PAR_NATT_BUILTIN1 )
# define PAR_NATT_STORED ( PAR_NATT_TOTAL - PAR_NATT_UNSTORED )
// define PAR_NATT_USER if not set in the Makefile
# ifndef PAR_NATT_USER
# define PAR_NATT_USER 0
# endif
// total number of particle attributes (built-in + user-defined)
# define PAR_NATT_TOTAL ( PAR_NATT_BUILTIN + PAR_NATT_USER )
// indices of built-in particle attributes in Par->Attribute[]
// --> must NOT modify their values
# define PAR_MASS 0
# define PAR_POSX 1
# define PAR_POSY 2
# define PAR_POSZ 3
# define PAR_VELX 4
# define PAR_VELY 5
# define PAR_VELZ 6
// always put acceleration and time at the END of the particle attribute list
// --> make it easier to discard them when storing data on disk (see Output_DumpData_Total(_HDF5).cpp)
# ifdef STORE_PAR_ACC
# define PAR_ACCX ( PAR_NATT_TOTAL - 4 )
# define PAR_ACCY ( PAR_NATT_TOTAL - 3 )
# define PAR_ACCZ ( PAR_NATT_TOTAL - 2 )
# endif
# define PAR_TIME ( PAR_NATT_TOTAL - 1 )
// bitwise field indices related to particles
// --> note that _POTE = ( 1L << (NCOMP_TOTAL+NDERIVE) )
# define _PAR_DENS ( 1L << (NCOMP_TOTAL+NDERIVE+1) )
# if ( MODEL == PAR_ONLY )
# define _TOTAL_DENS ( _PAR_DENS )
# else
# define _TOTAL_DENS ( 1L << (NCOMP_TOTAL+NDERIVE+2) )
# endif
#else // #ifdef PARTICLE
// total density equals gas density if there is no particle
# define _TOTAL_DENS ( _DENS )
#endif // #ifdef PARTICLE ... else ...
// number of fluid ghost zones for the fluid solver
#if ( MODEL == HYDRO ) // hydro
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
# if ( LR_SCHEME == PLM )
# define LR_GHOST_SIZE 1
# elif ( LR_SCHEME == PPM )
# define LR_GHOST_SIZE 2
# else
# error : ERROR : unsupported LR_SCHEME !!
# endif
# endif // MHM/MHM_RP/CTU
# if ( FLU_SCHEME == RTVD )
# define FLU_GHOST_SIZE 3
# elif ( FLU_SCHEME == MHM )
# define FLU_GHOST_SIZE ( 1 + LR_GHOST_SIZE )
# elif ( FLU_SCHEME == MHM_RP )
# define FLU_GHOST_SIZE ( 2 + LR_GHOST_SIZE )
# elif ( FLU_SCHEME == CTU )
# ifdef MHD
# define FLU_GHOST_SIZE ( 2 + LR_GHOST_SIZE )
# else
# define FLU_GHOST_SIZE ( 1 + LR_GHOST_SIZE )
# endif // MHD
# endif // FLU_SCHEME
#elif ( MODEL == ELBDM ) // ELBDM
# ifdef LAPLACIAN_4TH
# define FLU_GHOST_SIZE 6
# else
# define FLU_GHOST_SIZE 3
# endif
#else
# error : ERROR : unsupported MODEL !!
#endif // MODEL
// self-gravity constants
#ifdef GRAVITY
// number of input and output variables in the gravity solver
# if ( MODEL == HYDRO )
# define GRA_NIN NCOMP_FLUID
// for ELBDM, we do not need to transfer the density component
# elif ( MODEL == ELBDM )
# define GRA_NIN ( NCOMP_FLUID - 1 )
# else
# error Error : unsupported MODEL (please edit GRA_NIN in the new MODEL) !!
# endif // MODEL
// number of potential ghost zones for evaluating potential (maximum=5) ~ Poisson solver
# define POT_GHOST_SIZE 5
// number of potential ghost zones for advancing fluid by gravity ~ Gravity solver
# if ( MODEL == HYDRO )
# ifdef STORE_POT_GHOST
# define GRA_GHOST_SIZE 2
# else
# define GRA_GHOST_SIZE 1
//# define GRA_GHOST_SIZE 2
# endif
# elif ( MODEL == ELBDM )
# ifdef STORE_POT_GHOST
# define GRA_GHOST_SIZE 2
# else
# define GRA_GHOST_SIZE 0
# endif
# elif ( MODEL == PAR_ONLY )
# ifdef STORE_POT_GHOST
# define GRA_GHOST_SIZE 2
# else
# define GRA_GHOST_SIZE 0
# endif
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
// number of potential ghost zones for correcting the half-step velocity if UNSPLIT_GRAVITY is on
// _F/_G: fluid/gravity solvers
# ifdef UNSPLIT_GRAVITY
# if ( MODEL == HYDRO )
# ifdef MHD
# define USG_GHOST_SIZE_F 2
# define USG_GHOST_SIZE_G 1
# else
# define USG_GHOST_SIZE_F 1
# define USG_GHOST_SIZE_G 1
# endif
# elif ( MODEL == ELBDM )
# define USG_GHOST_SIZE_F 0
# define USG_GHOST_SIZE_G 0
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
# endif // #ifdef UNSPLIT_GRAVITY
// number of density ghost zones for storing the temporary particle mass density in rho_ext[]
# ifdef PARTICLE
# define RHOEXT_GHOST_SIZE 2
# endif
// number of density ghost zones for the Poisson solver
# define RHO_GHOST_SIZE ( POT_GHOST_SIZE-1 )
#endif // #ifdef GRAVITY
// patch size (number of cells of a single patch in the x/y/z directions)
#define PATCH_SIZE 8
#define PS1 ( 1*PATCH_SIZE )
#define PS2 ( 2*PATCH_SIZE )
#define PS2P1 ( PS2 + 1 )
#define PS1M1 ( PS1 - 1 )
#define PS1P1 ( PS1 + 1 )
// size of GPU arrays (in one dimension)
//###REVISE: support interpolation schemes requiring 2 ghost cells on each side for POT_NXT
# define FLU_NXT ( PS2 + 2*FLU_GHOST_SIZE ) // use patch group as the unit
# define FLU_NXT_P1 ( FLU_NXT + 1 )
#ifdef GRAVITY
# define POT_NXT ( PS1/2 + 2*( (POT_GHOST_SIZE+3)/2 ) ) // assuming interpolation ghost zone == 1
# define RHO_NXT ( PS1 + 2*RHO_GHOST_SIZE ) // POT/RHO/GRA_NXT use patch as the unit
# define GRA_NXT ( PS1 + 2*GRA_GHOST_SIZE )
# ifdef UNSPLIT_GRAVITY
# define USG_NXT_F ( PS2 + 2*USG_GHOST_SIZE_F ) // we use patch group as unit for the fluid solver
# define USG_NXT_G ( PS1 + 2*USG_GHOST_SIZE_G ) // we use patch as unit for the gravity solver
# else
# define USG_NXT_F ( 1 ) // still define USG_NXT_F/G since many function prototypes
# define USG_NXT_G ( 1 ) // require it
# endif
#else
# define GRA_NXT ( 1 ) // still define GRA_NXT ...
# define USG_NXT_F ( 1 ) // still define USG_NXT_F ...
#endif
#ifdef PARTICLE
# define RHOEXT_NXT ( PS1 + 2*RHOEXT_GHOST_SIZE ) // array rho_ext of each patch
#endif
// size of auxiliary arrays and EoS tables
#if ( MODEL == HYDRO )
# define EOS_NAUX_MAX 20 // EoS_AuxArray_Flt/Int[]
# define EOS_NTABLE_MAX 20 // *_EoS_Table[]
#endif
#ifdef GRAVITY
# define EXT_POT_NAUX_MAX 20 // ExtPot_AuxArray[]
# define EXT_ACC_NAUX_MAX 20 // ExtAcc_AuxArray[]
#endif
// bitwise reproducibility in flux and electric field fix-up operations
#if ( MODEL == HYDRO )
# ifdef BITWISE_REPRODUCIBILITY
# define BIT_REP_FLUX
# endif
// enable BIT_REP_ELECTRIC by default even when BITWISE_REPRODUCIBILITY is off
// --> ensures that the B field on the common interface between two nearby patches are fully
// consistent with each other (even the round-off errors are the same)
// --> reduces the div(B) errors significantly
# ifdef MHD
//#ifdef BITWISE_REPRODUCIBILITY
# define BIT_REP_ELECTRIC
//#endif
# endif // MHD
#endif // HYDRO
// extreme values
#ifndef __INT_MAX__
# define __INT_MAX__ 2147483647
#endif
#ifndef __LONG_MAX__
# define __LONG_MAX__ 9223372036854775807L
#endif
#ifndef __UINT_MAX__
# define __UINT_MAX__ ( __INT_MAX__*2U + 1U )
#endif
#ifndef __ULONG_MAX__
# define __ULONG_MAX__ 18446744073709551615UL // 2^64-1
#endif
#ifndef __FLT_MAX__
# define __FLT_MAX__ 3.40282347e+38F
#endif
#ifndef __FLT_MIN__
# define __FLT_MIN__ 1.17549435e-38F
#endif
#ifndef __DBL_MAX__
# define __DBL_MAX__ 1.79769313e+308
#endif
#ifndef __DBL_MIN__
# define __DBL_MIN__ 2.22507386e-308
#endif
// extreme value used for various purposes (e.g., floor value for passive scalars)
#ifdef FLOAT8
# define TINY_NUMBER __DBL_MIN__
# define HUGE_NUMBER __DBL_MAX__
#else
# define TINY_NUMBER __FLT_MIN__
# define HUGE_NUMBER __FLT_MAX__
#endif
// maximum allowed error for various purposes (e.g., exact Riemann solver, MHD routines, Mis_CompareRealValue())
#define MAX_ERROR_DBL 1.0e-14
#define MAX_ERROR_FLT 1.0e-06f
#ifdef FLOAT8
# define MAX_ERROR MAX_ERROR_DBL
#else
# define MAX_ERROR MAX_ERROR_FLT
#endif
// sibling index offset for the non-periodic B.C.
#define SIB_OFFSET_NONPERIODIC ( -100 )
// son index offset for LOAD_BALANCE
#ifdef LOAD_BALANCE
# define SON_OFFSET_LB ( -1000 )
#endif
// flag used in "Buf_RecordBoundaryFlag" and "Flag_Buffer" (must be negative)
#ifndef SERIAL
# define BUFFER_IS_FLAGGED ( -999 )
#endif
// marker indicating that the array "pot_ext" has NOT been properly set
#if ( defined GRAVITY && defined STORE_POT_GHOST )
# define POT_EXT_NEED_INIT __FLT_MAX__
#endif
// marker indicating that the array "rho_ext" has NOT been properly set
#ifdef PARTICLE
# define RHO_EXT_NEED_INIT __FLT_MAX__
#endif
// markers for inactive particles
#ifdef PARTICLE
# define PAR_INACTIVE_OUTSIDE ( -1.0 )
# define PAR_INACTIVE_MPI ( -2.0 )
#endif
// OpenMP scheduling for particle routines
#if ( defined PARTICLE && defined OPENMP )
# define PAR_OMP_SCHED dynamic
# define PAR_OMP_SCHED_CHUNK 1
#endif
// NULL values
#ifndef NULL
# define NULL 0
#endif
#ifndef NULL_INT
# define NULL_INT __INT_MAX__
#endif
#ifndef NULL_REAL
# define NULL_REAL __FLT_MAX__
#endif
#ifndef NULL_BOOL
# define NULL_BOOL false
#endif
// GAMER status
#define GAMER_SUCCESS 1
#define GAMER_FAILED 0
// timer switch
#define TIMER_ON 1
#define TIMER_OFF 0
// symbolic constant for Aux_Error()
#define ERROR_INFO __FILE__, __LINE__, __FUNCTION__
// miscellaneous
#define TOP_LEVEL ( NLEVEL - 1 )
// maximum length for strings
#define MAX_STRING 512
// ############
// ## Macros ##
// ############
// single/double-precision mathematic functions
#ifdef FLOAT8
# define FABS( a ) fabs( a )
# define SQRT( a ) sqrt( a )
# define SIN( a ) sin( a )
# define COS( a ) cos( a )
# define LOG( a ) log( a )
# define EXP( a ) exp( a )
# define ATAN( a ) atan( a )
# define FLOOR( a ) floor( a )
# define FMAX( a, b ) fmax( a, b )
# define FMIN( a, b ) fmin( a, b )
# define POW( a, b ) pow( a, b )
# define FMOD( a, b ) fmod( a, b )
# define ATAN2( a, b ) atan2( a, b )
#else
# define FABS( a ) fabsf( a )
# define SQRT( a ) sqrtf( a )
# define SIN( a ) sinf( a )
# define COS( a ) cosf( a )
# define LOG( a ) logf( a )
# define EXP( a ) expf( a )
# define ATAN( a ) atanf( a )
# define FLOOR( a ) floorf( a )
# define FMAX( a, b ) fmaxf( a, b )
# define FMIN( a, b ) fminf( a, b )
# define POW( a, b ) powf( a, b )
# define FMOD( a, b ) fmodf( a, b )
# define ATAN2( a, b ) atan2f( a, b )
#endif
// sign function
#define SIGN( a ) ( ( (a) < (real)0.0 ) ? (real)-1.0 : (real)+1.0 )
// max/min functions
#define MAX( a, b ) ( ( (a) > (b) ) ? (a) : (b) )
#define MIN( a, b ) ( ( (a) < (b) ) ? (a) : (b) )
// square/cube function
#define SQR( a ) ( (a)*(a) )
#define CUBE( a ) ( (a)*(a)*(a) )
// 3D to 1D array indices transformation
#define IDX321( i, j, k, Ni, Nj ) ( ( (k)*(Nj) + (j) )*(Ni) + (i) )
// 3D to 1D array indices transformation for patch->magnetic[]
#ifdef MHD
#define IDX321_BX( i, j, k, Ni, Nj ) ( ( (k)*((Nj) ) + (j) )*((Ni)+1) + (i) )
#define IDX321_BY( i, j, k, Ni, Nj ) ( ( (k)*((Nj)+1) + (j) )*((Ni) ) + (i) )
#define IDX321_BZ( i, j, k, Ni, Nj ) ( ( (k)*((Nj) ) + (j) )*((Ni) ) + (i) )
#endif
// helper macros for printing symbolic constants in macros
// ref: https://stackoverflow.com/questions/3419332/c-preprocessor-stringify-the-result-of-a-macro
# define QUOTE( str ) #str
# define EXPAND_AND_QUOTE( str ) QUOTE( str )
// convenient macros for defining and declaring global variables
// ==> predefine DEFINE_GLOBAL in the file actually **defines** these global variables
// ==> there should be one and only one file that defines DEFINE_GLOBAL
// SET_GLOBAL will invoke either SET_GLOBAL_INIT or SET_GLOBAL_NOINIT depending on the number of arguments
// ==> http://stackoverflow.com/questions/11761703/overloading-macro-on-number-of-arguments
#define GET_MACRO( _1, _2, TARGET_MACRO, ... ) TARGET_MACRO
#define SET_GLOBAL( ... ) GET_MACRO( __VA_ARGS__, SET_GLOBAL_INIT, SET_GLOBAL_NOINIT ) ( __VA_ARGS__ )
// SET_GLOBAL_INIT/NOINIT are for global variables with/without initialization
#ifdef DEFINE_GLOBAL
# define SET_GLOBAL_INIT( declaration, init_value ) declaration = init_value
# define SET_GLOBAL_NOINIT( declaration ) declaration
#else
# define SET_GLOBAL_INIT( declaration, init_value ) extern declaration
# define SET_GLOBAL_NOINIT( declaration ) extern declaration
#endif
// macro converting an array index (e.g., DENS) to bitwise index (e.g., _DENS=(1L<<DENS))
#define BIDX( idx ) ( 1L << (idx) )
// ################################
// ## Remove useless definitions ##
// ################################
#if ( MODEL == HYDRO )
# if ( FLU_SCHEME != MHM && FLU_SCHEME != MHM_RP && FLU_SCHEME != CTU )
# undef LR_SCHEME
# endif
# if ( FLU_SCHEME != MHM && FLU_SCHEME != MHM_RP && FLU_SCHEME != CTU )
# undef RSOLVER
# endif
#endif
#if ( MODEL == PAR_ONLY )
# undef UNSPLIT_GRAVITY
#endif
// currently we always set GPU_ARCH == NONE when GPU is off
#ifndef GPU
# undef GPU_ARCH
# define GPU_ARCH NONE
#endif
#endif // #ifndef __MACRO_H__
| 29.17481 | 139 | 0.608039 | [
"model",
"3d"
] |
9ea1e6adbdbca6f1121ce1a2d281ab95bc807627 | 3,939 | h | C | Open3D/cpp/open3d/visualization/rendering/MatrixInteractorLogic.h | xdeng7/redwood_open3d_3dreconstruction | aa1651d3cec1feb00468d548ac2268a3ed17b856 | [
"Apache-2.0"
] | null | null | null | Open3D/cpp/open3d/visualization/rendering/MatrixInteractorLogic.h | xdeng7/redwood_open3d_3dreconstruction | aa1651d3cec1feb00468d548ac2268a3ed17b856 | [
"Apache-2.0"
] | null | null | null | Open3D/cpp/open3d/visualization/rendering/MatrixInteractorLogic.h | xdeng7/redwood_open3d_3dreconstruction | aa1651d3cec1feb00468d548ac2268a3ed17b856 | [
"Apache-2.0"
] | null | null | null | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include "open3d/geometry/BoundingVolume.h"
#include "open3d/visualization/rendering/Camera.h"
namespace open3d {
namespace visualization {
namespace rendering {
/// Base class for rotating and dollying (translating along forward axis).
/// Could be used for a camera, or also something else, like a the
/// direction of a directional light.
class MatrixInteractorLogic {
public:
virtual ~MatrixInteractorLogic();
void SetViewSize(int width, int height);
int GetViewWidth() const;
int GetViewHeight() const;
const geometry::AxisAlignedBoundingBox& GetBoundingBox() const;
virtual void SetBoundingBox(const geometry::AxisAlignedBoundingBox& bounds);
void SetMouseDownInfo(const Camera::Transform& matrix,
const Eigen::Vector3f& center_of_rotation);
const Camera::Transform& GetMatrix() const;
/// Rotates about an axis defined by dx * matrixLeft, dy * matrixUp.
/// `dy` is assumed to be in window-style coordinates, that is, going
/// up produces a negative dy. The axis goes through the center of
/// rotation.
virtual void Rotate(int dx, int dy);
/// Same as Rotate() except that the dx-axis and the dy-axis are
/// specified
virtual void RotateWorld(int dx,
int dy,
const Eigen::Vector3f& x_axis,
const Eigen::Vector3f& y_axis);
/// Rotates about the forward axis of the matrix
virtual void RotateZ(int dx, int dy);
virtual void RotateZWorld(int dx, int dy, const Eigen::Vector3f& forward);
enum class DragType { MOUSE, WHEEL, TWO_FINGER };
/// Moves the matrix along the forward axis. (This is one type
/// of zoom.)
virtual void Dolly(float dy, DragType drag_type);
virtual void Dolly(float z_dist, Camera::Transform matrix);
private:
Camera::Transform matrix_;
protected:
int view_width_ = 1;
int view_height_ = 1;
double model_size_ = 20.0;
geometry::AxisAlignedBoundingBox model_bounds_;
Eigen::Vector3f center_of_rotation_;
Camera::Transform matrix_at_mouse_down_;
Eigen::Vector3f center_of_rotation_at_mouse_down_;
void SetMatrix(const Camera::Transform& matrix);
float CalcRotateRadians(int dx, int dy);
float CalcRotateZRadians(int dx, int dy);
float CalcDollyDist(float dy, DragType drag_type);
};
} // namespace rendering
} // namespace visualization
} // namespace open3d
| 38.617647 | 80 | 0.659558 | [
"geometry",
"transform"
] |
9ea271bca386de63ab7b9becfe01a170edaa3ef2 | 614 | h | C | data/train/cpp/9ea271bca386de63ab7b9becfe01a170edaa3ef2Model.h | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/cpp/9ea271bca386de63ab7b9becfe01a170edaa3ef2Model.h | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/cpp/9ea271bca386de63ab7b9becfe01a170edaa3ef2Model.h | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | #ifndef MODEL_H_
#define MODEL_H_
#include <ModelResult.h>
#include <ModelConfiguration.h>
namespace CuEira {
namespace Model {
/**
* This is ...
*
* @author Daniel Berglund daniel.k.berglund@gmail.com
*/
class Model {
public:
virtual ~Model();
virtual ModelResult* calculate()=0;
Model(const Model&) = delete;
Model(Model&&) = delete;
Model& operator=(const Model&) = delete;
Model& operator=(Model&&) = delete;
protected:
Model(ModelConfiguration* modelConfiguration);
ModelConfiguration* modelConfiguration;
};
} /* namespace Model */
} /* namespace CuEira */
#endif /* MODEL_H_ */
| 17.055556 | 54 | 0.692182 | [
"model"
] |
9ea4280f0fa4b0e653a9e3fe03e7f25d368301c9 | 3,692 | h | C | ns3/ns-3.26/src/spectrum/model/spectrum-model.h | Aedemon/clusim | 7f09cdb79b5f02cf0fed1bd44842981941f29f32 | [
"Apache-2.0"
] | 7 | 2017-08-11T06:06:47.000Z | 2022-02-27T07:34:33.000Z | ns3/ns-3.26/src/spectrum/model/spectrum-model.h | Aedemon/clusim | 7f09cdb79b5f02cf0fed1bd44842981941f29f32 | [
"Apache-2.0"
] | 3 | 2017-08-11T03:04:59.000Z | 2017-09-11T14:01:14.000Z | ns3/ns-3.26/src/spectrum/model/spectrum-model.h | Aedemon/clusim | 7f09cdb79b5f02cf0fed1bd44842981941f29f32 | [
"Apache-2.0"
] | 3 | 2017-08-08T13:36:30.000Z | 2018-07-04T09:49:41.000Z | /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2009 CTTC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Nicola Baldo <nbaldo@cttc.es>
*/
#ifndef SPECTRUM_MODEL_H
#define SPECTRUM_MODEL_H
#include <ns3/simple-ref-count.h>
#include <vector>
namespace ns3 {
/**
* \defgroup spectrum Spectrum Models
*
*/
/**
* \ingroup spectrum
*
* The building block of a SpectrumModel. This struct models
* a frequency band defined by the frequency interval [fl, fc] and
* with center frequency fc. Typically, the center frequency will be
* used for stuff such as propagation modeling, while the interval
* boundaries will be used for bandwidth calculation and for
* conversion between different SpectrumRepresentations.
*
*/
struct BandInfo
{
double fl; //!< lower limit of subband
double fc; //!< center frequency
double fh; //!< upper limit of subband
};
/// Container of BandInfo
typedef std::vector<BandInfo> Bands;
/// Uid for SpectrumModels
typedef uint32_t SpectrumModelUid_t;
/**
* Set of frequency values implementing the domain of the functions in
* the Function Space defined by SpectrumValue. Frequency values are in
* Hz. It is intended that frequency values are non-negative, though
* this is not enforced.
*
*/
class SpectrumModel : public SimpleRefCount<SpectrumModel>
{
public:
/**
* Comparison operator. Returns true if the two SpectumModels are identical
* \param lhs left operand
* \param rhs right operand
* \returns true if the two operands are identical
*/
friend bool operator== (const SpectrumModel& lhs, const SpectrumModel& rhs);
/**
* This constructs a SpectrumModel based on a given set of frequencies,
* which is assumed to be sorted by increasing frequency. The lower
* (resp. upper) frequency band limit is determined as the mean value
* between the center frequency of the considered band and the
* center frequency of the adjacent lower (resp. upper) band.
*
* @param centerFreqs the vector of center frequencies.
*
* @return
*/
SpectrumModel (std::vector<double> centerFreqs);
/**
* This construct a SpectrumModel based on the explicit values of
* center frequencies and boundaries of each subband.
*
* @param bands
*
* @return
*/
SpectrumModel (Bands bands);
/**
*
* @return the number of frequencies in this SpectrumModel
*/
size_t GetNumBands () const;
/**
*
* @return the unique id of this SpectrumModel
*/
SpectrumModelUid_t GetUid () const;
/**
* Const Iterator to the model Bands container start.
*/
Bands::const_iterator Begin () const;
/**
* Const Iterator to the model Bands container end.
*/
Bands::const_iterator End () const;
private:
Bands m_bands; //!< Actual definition of frequency bands within this SpectrumModel
SpectrumModelUid_t m_uid; //!< unique id for a given set of frequencies
static SpectrumModelUid_t m_uidCount; //!< counter to assign m_uids
};
} // namespace ns3
#endif /* SPECTRUM_MODEL_H */
| 27.759398 | 92 | 0.710184 | [
"vector",
"model"
] |
9ea4580ec2522f14228d2580b0bc356430b13de2 | 1,310 | h | C | Dev/SourcesDemos/Systems/DemoParticles/Demo.h | Legulysse/guguEngine | 889ba87f219d476169fab1072f3af1428df62d49 | [
"Zlib"
] | 15 | 2018-06-30T12:02:03.000Z | 2022-02-16T00:23:45.000Z | Dev/SourcesDemos/Systems/DemoParticles/Demo.h | Legulysse/guguEngine | 889ba87f219d476169fab1072f3af1428df62d49 | [
"Zlib"
] | null | null | null | Dev/SourcesDemos/Systems/DemoParticles/Demo.h | Legulysse/guguEngine | 889ba87f219d476169fab1072f3af1428df62d49 | [
"Zlib"
] | 1 | 2018-07-26T22:40:20.000Z | 2018-07-26T22:40:20.000Z | #pragma once
////////////////////////////////////////////////////////////////
// Includes
#include "Gugu/Core/Application.h"
#include "Gugu/Events/EventListener.h"
#include "Gugu/VisualEffects/ParticleSystemSettings.h"
#include "Gugu/System/Types.h"
#include <SFML/Graphics/VertexArray.hpp>
////////////////////////////////////////////////////////////////
// Forward Declarations
namespace gugu
{
class Element;
class ElementParticles;
class ParticleSystem;
}
////////////////////////////////////////////////////////////////
// File Declarations
namespace demoproject {
class Demo : public gugu::Application, public gugu::EventListener
{
public:
Demo();
virtual ~Demo();
virtual void AppStart() override;
virtual void AppStop() override;
virtual void AppUpdate(const gugu::DeltaTime& dt) override;
protected:
gugu::Element* m_root;
gugu::Element* m_mouseFollow;
gugu::Element* m_moveArm;
gugu::int64 m_startTime;
bool m_animateEmitters;
bool m_rotateArm;
std::vector<gugu::ParticleSystemSettings> m_particleSystemSettings;
std::vector<gugu::ParticleSystem*> m_particleSystems;
std::vector<gugu::ElementParticles*> m_centerParticleElements;
};
} //namespace demoproject
| 23.392857 | 72 | 0.592366 | [
"vector"
] |
9ea5f867226454a73c74d9399a1c9960500d5134 | 22,465 | h | C | src/wasm/wasm-code-manager.h | Drieger/v8 | 93283bf04ae3fd96592b4090e90ac75130aa9d52 | [
"BSD-3-Clause"
] | 2 | 2020-08-27T09:36:44.000Z | 2020-09-23T14:01:12.000Z | src/wasm/wasm-code-manager.h | Drieger/v8 | 93283bf04ae3fd96592b4090e90ac75130aa9d52 | [
"BSD-3-Clause"
] | null | null | null | src/wasm/wasm-code-manager.h | Drieger/v8 | 93283bf04ae3fd96592b4090e90ac75130aa9d52 | [
"BSD-3-Clause"
] | 1 | 2019-10-08T06:20:30.000Z | 2019-10-08T06:20:30.000Z | // Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
#include <functional>
#include <list>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include "src/base/macros.h"
#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
struct CodeDesc;
class Code;
namespace wasm {
class NativeModule;
class WasmCodeManager;
class WasmEngine;
class WasmMemoryTracker;
class WasmImportWrapperCache;
struct WasmModule;
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
DisjointAllocationPool() = default;
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
V8_NOEXCEPT = default;
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
// intersecting this object - for example, it was obtained from a previous
// Allocate.
void Merge(base::AddressRegion);
// Allocate a contiguous region of size {size}. Return an empty pool on
// failure.
base::AddressRegion Allocate(size_t size);
bool IsEmpty() const { return regions_.empty(); }
const std::list<base::AddressRegion>& regions() const { return regions_; }
private:
std::list<base::AddressRegion> regions_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
kFunction,
kWasmToJsWrapper,
kLazyStub,
kRuntimeStub,
kInterpreterEntry,
kJumpTable
};
// Each runtime stub is identified by an id. This id is used to reference the
// stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
enum RuntimeStubId {
#define DEF_ENUM(Name) k##Name,
#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
#undef DEF_ENUM_TRAP
#undef DEF_ENUM
kRuntimeStubCount
};
// kOther is used if we have WasmCode that is neither
// liftoff- nor turbofan-compiled, i.e. if Kind is
// not a kFunction.
enum Tier : int8_t { kLiftoff, kTurbofan, kOther };
Vector<byte> instructions() const { return instructions_; }
Address instruction_start() const {
return reinterpret_cast<Address>(instructions_.start());
}
Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
Vector<const byte> source_positions() const {
return source_position_table_.as_vector();
}
uint32_t index() const {
DCHECK(!IsAnonymous());
return index_;
}
// Anonymous functions are functions that don't carry an index.
bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return kind_; }
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
Address constant_pool() const;
Address code_comments() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
size_t code_comments_offset() const { return code_comments_offset_; }
size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return tier_ == kLiftoff; }
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_.start()) <= pc &&
pc < reinterpret_cast<Address>(instructions_.end());
}
Vector<trap_handler::ProtectedInstructionData> protected_instructions()
const {
return protected_instructions_.as_vector();
}
const char* GetRuntimeStubName() const;
void Validate() const;
void Print(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
static bool ShouldBeLogged(Isolate* isolate);
void LogCode(Isolate* isolate) const;
~WasmCode();
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
private:
friend class NativeModule;
WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
size_t constant_pool_offset, size_t code_comments_offset,
size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
source_position_table_(std::move(source_position_table)),
native_module_(native_module),
index_(index),
kind_(kind),
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
code_comments_offset_(code_comments_offset),
unpadded_binary_size_(unpadded_binary_size),
protected_instructions_(std::move(protected_instructions)),
tier_(tier) {
DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
DCHECK_LE(handler_table_offset, unpadded_binary_size);
DCHECK_LE(code_comments_offset, unpadded_binary_size);
DCHECK_LE(constant_pool_offset, unpadded_binary_size);
}
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
size_t trap_handler_index() const;
void set_trap_handler_index(size_t);
bool HasTrapHandlerIndex() const;
// Register protected instruction information with the trap handler. Sets
// trap_handler_index.
void RegisterTrapHandlerData();
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
NativeModule* native_module_ = nullptr;
uint32_t index_;
Kind kind_;
size_t constant_pool_offset_ = 0;
uint32_t stack_slots_ = 0;
// we care about safepoint data for wasm-to-js functions,
// since there may be stack/register tagged values for large number
// conversions.
size_t safepoint_table_offset_ = 0;
size_t handler_table_offset_ = 0;
size_t code_comments_offset_ = 0;
size_t unpadded_binary_size_ = 0;
intptr_t trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
Tier tier_;
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
class V8_EXPORT_PRIVATE NativeModule final {
public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
static constexpr bool kCanAllocateMoreMemory = false;
#else
static constexpr bool kCanAllocateMoreMemory = true;
#endif
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table,
WasmCode::Kind kind, WasmCode::Tier tier);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
size_t constant_pool_offset, size_t code_comments_offset,
size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will be copied into this NativeModule and the
// jump table will be populated with that copy.
void SetLazyBuiltin(Handle<Code> code);
// Initializes all runtime stubs by copying them over from the JS-allocated
// heap into this native module. It must be called exactly once per native
// module before adding other WasmCode so that runtime stub ids can be
// resolved during relocation.
void SetRuntimeStubs(Isolate* isolate);
// Makes the code available to the system (by entering it into the code table
// and patching the jump table). Callers have to take care not to race with
// threads executing the old code.
void PublishCode(WasmCode* code);
// Switch a function to an interpreter entry wrapper. When adding interpreter
// wrappers, we do not insert them in the code_table, however, we let them
// self-identify as the {index} function.
void PublishInterpreterEntry(WasmCode* code, uint32_t index);
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
WasmCode* code(uint32_t index) const {
DCHECK_LT(index, num_functions());
DCHECK_LE(module_->num_imported_functions, index);
return code_table_[index - module_->num_imported_functions];
}
bool has_code(uint32_t index) const { return code(index) != nullptr; }
WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const {
DCHECK_LT(index, WasmCode::kRuntimeStubCount);
WasmCode* code = runtime_stub_table_[index];
DCHECK_NOT_NULL(code);
return code;
}
Address jump_table_start() const {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
ptrdiff_t jump_table_offset(uint32_t func_index) const {
DCHECK_GE(func_index, num_imported_functions());
return GetCallTargetForFunction(func_index) - jump_table_start();
}
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
// Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks).
// This method must only be called if {use_trap_handler()} is true (it will be
// false afterwards). All code in this {NativeModule} needs to be re-added
// after calling this method.
void DisableTrapHandler();
// Returns the target to call for the given function (returns a jump table
// slot within {jump_table_}).
Address GetCallTargetForFunction(uint32_t func_index) const;
// Reverse lookup from a given call target (i.e. a jump table slot as the
// above {GetCallTargetForFunction} returns) to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable);
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ReserveCodeTableForTesting(uint32_t max_functions);
void LogWasmCodes(Isolate* isolate);
CompilationState* compilation_state() { return compilation_state_.get(); }
// Create a {CompilationEnv} object for compilation. Only valid as long as
// this {NativeModule} is alive.
CompilationEnv CreateCompilationEnv() const;
uint32_t num_functions() const {
return module_->num_declared_functions + module_->num_imported_functions;
}
uint32_t num_imported_functions() const {
return module_->num_imported_functions;
}
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
const WasmModule* module() const { return module_.get(); }
size_t committed_code_space() const { return committed_code_space_.load(); }
void SetWireBytes(OwnedVector<const byte> wire_bytes);
WasmCode* Lookup(Address) const;
WasmImportWrapperCache* import_wrapper_cache() const {
return import_wrapper_cache_.get();
}
~NativeModule();
const WasmFeatures& enabled_features() const { return enabled_features_; }
private:
friend class WasmCode;
friend class WasmCodeManager;
friend class NativeModuleModificationScope;
NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(size_t size);
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
// code is obtained (CodeDesc vs, as a point in time, Code), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
size_t code_comments_offset,
size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table,
WasmCode::Kind, WasmCode::Tier);
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
// Hold the {allocation_mutex_} when calling this method.
void InstallCode(WasmCode* code);
Vector<WasmCode*> code_table() const {
return {code_table_.get(), module_->num_declared_functions};
}
// Hold the {mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions());
DCHECK_LE(module_->num_imported_functions, func_index);
if (!interpreter_redirections_) return false;
uint32_t bitset_idx = func_index - module_->num_imported_functions;
uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
return byte & (1 << (bitset_idx % kBitsPerByte));
}
// Hold the {mutex_} when calling this method.
void SetInterpreterRedirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions());
DCHECK_LE(module_->num_imported_functions, func_index);
if (!interpreter_redirections_) {
interpreter_redirections_.reset(
new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
kBitsPerByte]);
}
uint32_t bitset_idx = func_index - module_->num_imported_functions;
uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
byte |= 1 << (bitset_idx % kBitsPerByte);
}
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
// to be consistent across asynchronous compilations later.
const WasmFeatures enabled_features_;
// TODO(clemensh): Make this a unique_ptr (requires refactoring
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
OwnedVector<const byte> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
std::unique_ptr<CompilationState> compilation_state_;
// A cache of the import wrappers, keyed on the kind and signature.
std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
// This mutex protects concurrent calls to {AddCode} and friends.
mutable base::Mutex allocation_mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
// Holds all allocated code objects, is maintained to be in ascending order
// according to the codes instruction start address to allow lookups.
std::vector<std::unique_ptr<WasmCode>> owned_code_;
std::unique_ptr<WasmCode* []> code_table_;
// Null if no redirections exist, otherwise a bitset over all functions in
// this module marking those functions that have been redirected.
std::unique_ptr<uint8_t[]> interpreter_redirections_;
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
WasmCodeManager* const code_manager_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed);
// Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory
// is determined with a heuristic based on the total size of wasm
// code. The native module may later request more memory.
// TODO(titzer): isolate is only required here for CompilationState.
std::unique_ptr<NativeModule> NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled_features,
size_t code_size_estimate, bool can_request_more,
std::shared_ptr<const WasmModule> module);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
size_t remaining_uncommitted_code_space() const;
// Add a sample of all module sizes.
void SampleModuleSizes(Isolate* isolate) const;
void SetMaxCommittedMemoryForTesting(size_t limit);
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
// using sampling based on regular intervals independent of the GC.
static void InstallSamplingGCCallback(Isolate* isolate);
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
friend class NativeModule;
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
void* hint = nullptr);
bool Commit(Address, size_t);
// Currently, we uncommit a whole module, so all we need is account
// for the freed memory size. We do that in FreeNativeModule.
// There's no separate Uncommit.
void FreeNativeModule(NativeModule*);
void AssignRanges(Address start, Address end, NativeModule*);
void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
// If the remaining uncommitted code space falls below
// {critical_uncommitted_code_space_}, then we trigger a GC before creating
// the next module. This value is initialized to 50% of the available code
// space on creation and after each GC.
std::atomic<size_t> critical_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {native_modules_mutex_}:
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
std::unordered_set<NativeModule*> native_modules_;
// End of fields protected by {native_modules_mutex_}.
//////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
// Within the scope, the native_module is writable and not executable.
// At the scope's destruction, the native_module is executable and not writable.
// The states inside the scope and at the scope termination are irrespective of
// native_module's state when entering the scope.
// We currently mark the entire module's memory W^X:
// - for AOT, that's as efficient as it can be.
// - for Lazy, we don't have a heuristic for functions that may need patching,
// and even if we did, the resulting set of pages may be fragmented.
// Currently, we try and keep the number of syscalls low.
// - similar argument for debug time.
class NativeModuleModificationScope final {
public:
explicit NativeModuleModificationScope(NativeModule* native_module);
~NativeModuleModificationScope();
private:
NativeModule* native_module_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_WASM_CODE_MANAGER_H_
| 39.481547 | 80 | 0.732295 | [
"object",
"vector"
] |
9ea68579be7f14eca69f54c39716da0572cd34da | 396 | h | C | src/php/iterators/php_stack_iterator.h | Jan-E/ext-ds | 3576d099457e596ba56c6e6e8c00d60627cd3b49 | [
"MIT"
] | null | null | null | src/php/iterators/php_stack_iterator.h | Jan-E/ext-ds | 3576d099457e596ba56c6e6e8c00d60627cd3b49 | [
"MIT"
] | null | null | null | src/php/iterators/php_stack_iterator.h | Jan-E/ext-ds | 3576d099457e596ba56c6e6e8c00d60627cd3b49 | [
"MIT"
] | null | null | null | #ifndef DS_STACK_ITERATOR_H
#define DS_STACK_ITERATOR_H
#include "php.h"
#include "../objects/php_stack.h"
typedef struct _php_ds_stack_iterator_t {
zend_object_iterator intern;
zend_long position;
php_ds_stack_t *stack;
} php_ds_stack_iterator_t;
zend_object_iterator *php_ds_stack_get_iterator(zend_class_entry *ce, zval *object, int by_ref);
#endif
| 24.75 | 96 | 0.742424 | [
"object"
] |
9eabc46c761a66aa70fb5e98fc583bca1a9b5dce | 7,893 | h | C | jbox2d-master/jbox2d-jni-broadphase/src/main/c++/b2DynamicTree.h | ImanHosseini/SomeGame | 30250e9fbf61eca9511b2858606d333e098024f4 | [
"MIT"
] | null | null | null | jbox2d-master/jbox2d-jni-broadphase/src/main/c++/b2DynamicTree.h | ImanHosseini/SomeGame | 30250e9fbf61eca9511b2858606d333e098024f4 | [
"MIT"
] | null | null | null | jbox2d-master/jbox2d-jni-broadphase/src/main/c++/b2DynamicTree.h | ImanHosseini/SomeGame | 30250e9fbf61eca9511b2858606d333e098024f4 | [
"MIT"
] | null | null | null | /*
* Copyright (c) 2009 Erin Catto http://www.box2d.org
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B2_DYNAMIC_TREE_H
#define B2_DYNAMIC_TREE_H
#include "b2Collision.h"
#include "b2GrowableStack.h"
#define b2_nullNode (-1)
/// A node in the dynamic tree. The client does not interact with this directly.
struct b2TreeNode
{
bool IsLeaf() const
{
return child1 == b2_nullNode;
}
/// Enlarged AABB
b2AABB aabb;
void* userData;
union
{
int32 parent;
int32 next;
};
int32 child1;
int32 child2;
// leaf = 0, free node = -1
int32 height;
};
/// A dynamic AABB tree broad-phase, inspired by Nathanael Presson's btDbvt.
/// A dynamic tree arranges data in a binary tree to accelerate
/// queries such as volume queries and ray casts. Leafs are proxies
/// with an AABB. In the tree we expand the proxy AABB by b2_fatAABBFactor
/// so that the proxy AABB is bigger than the client object. This allows the client
/// object to move by small amounts without triggering a tree update.
///
/// Nodes are pooled and relocatable, so we use node indices rather than pointers.
class b2DynamicTree
{
public:
/// Constructing the tree initializes the node pool.
b2DynamicTree();
/// Destroy the tree, freeing the node pool.
~b2DynamicTree();
/// Create a proxy. Provide a tight fitting AABB and a userData pointer.
int32 CreateProxy(const b2AABB& aabb, void* userData);
/// Destroy a proxy. This asserts if the id is invalid.
void DestroyProxy(int32 proxyId);
/// Move a proxy with a swepted AABB. If the proxy has moved outside of its fattened AABB,
/// then the proxy is removed from the tree and re-inserted. Otherwise
/// the function returns immediately.
/// @return true if the proxy was re-inserted.
bool MoveProxy(int32 proxyId, const b2AABB& aabb1, const b2Vec2& displacement);
/// Get proxy user data.
/// @return the proxy user data or 0 if the id is invalid.
void* GetUserData(int32 proxyId) const;
/// Get the fat AABB for a proxy.
const b2AABB& GetFatAABB(int32 proxyId) const;
/// Query an AABB for overlapping proxies. The callback class
/// is called for each proxy that overlaps the supplied AABB.
template <typename T>
void Query(T* callback, const b2AABB& aabb) const;
/// Ray-cast against the proxies in the tree. This relies on the callback
/// to perform a exact ray-cast in the case were the proxy contains a shape.
/// The callback also performs the any collision filtering. This has performance
/// roughly equal to k * log(n), where k is the number of collisions and n is the
/// number of proxies in the tree.
/// @param input the ray-cast input data. The ray extends from p1 to p1 + maxFraction * (p2 - p1).
/// @param callback a callback class that is called for each proxy that is hit by the ray.
template <typename T>
void RayCast(T* callback, const b2RayCastInput& input) const;
/// Validate this tree. For testing.
void Validate() const;
/// Compute the height of the binary tree in O(N) time. Should not be
/// called often.
int32 GetHeight() const;
/// Get the maximum balance of an node in the tree. The balance is the difference
/// in height of the two children of a node.
int32 GetMaxBalance() const;
/// Get the ratio of the sum of the node areas to the root area.
float32 GetAreaRatio() const;
/// Build an optimal tree. Very expensive. For testing.
void RebuildBottomUp();
/// Shift the world origin. Useful for large worlds.
/// The shift formula is: position -= newOrigin
/// @param newOrigin the new origin with respect to the old origin
void ShiftOrigin(const b2Vec2& newOrigin);
template <typename T>
void Iterate(T* callback) const {
for (int i = 0; i < m_nodeCount; i++) {
const b2TreeNode& node = m_nodes[i];
if (node.height != -1 && node.userData != NULL) {
callback->process(node.userData);
}
}
}
private:
int32 AllocateNode();
void FreeNode(int32 node);
void InsertLeaf(int32 node);
void RemoveLeaf(int32 node);
int32 Balance(int32 index);
int32 ComputeHeight() const;
int32 ComputeHeight(int32 nodeId) const;
void ValidateStructure(int32 index) const;
void ValidateMetrics(int32 index) const;
int32 m_root;
b2TreeNode* m_nodes;
int32 m_nodeCount;
int32 m_nodeCapacity;
int32 m_freeList;
/// This is used to incrementally traverse the tree for re-balancing.
uint32 m_path;
int32 m_insertionCount;
};
inline void* b2DynamicTree::GetUserData(int32 proxyId) const
{
b2Assert(0 <= proxyId && proxyId < m_nodeCapacity);
return m_nodes[proxyId].userData;
}
inline const b2AABB& b2DynamicTree::GetFatAABB(int32 proxyId) const
{
b2Assert(0 <= proxyId && proxyId < m_nodeCapacity);
return m_nodes[proxyId].aabb;
}
template <typename T>
inline void b2DynamicTree::Query(T* callback, const b2AABB& aabb) const
{
b2GrowableStack<int32, 256> stack;
stack.Push(m_root);
while (stack.GetCount() > 0)
{
int32 nodeId = stack.Pop();
if (nodeId == b2_nullNode)
{
continue;
}
const b2TreeNode* node = m_nodes + nodeId;
if (b2TestOverlap(node->aabb, aabb))
{
if (node->IsLeaf())
{
bool proceed = callback->QueryCallback(nodeId);
if (proceed == false)
{
return;
}
}
else
{
stack.Push(node->child1);
stack.Push(node->child2);
}
}
}
}
template <typename T>
inline void b2DynamicTree::RayCast(T* callback, const b2RayCastInput& input) const
{
b2Vec2 p1 = input.p1;
b2Vec2 p2 = input.p2;
b2Vec2 r = p2 - p1;
b2Assert(r.LengthSquared() > 0.0f);
r.Normalize();
// v is perpendicular to the segment.
b2Vec2 v = b2Cross(1.0f, r);
b2Vec2 abs_v = b2Abs(v);
// Separating axis for segment (Gino, p80).
// |dot(v, p1 - c)| > dot(|v|, h)
float32 maxFraction = input.maxFraction;
// Build a bounding box for the segment.
b2AABB segmentAABB;
{
b2Vec2 t = p1 + maxFraction * (p2 - p1);
segmentAABB.lowerBound = b2Min(p1, t);
segmentAABB.upperBound = b2Max(p1, t);
}
b2GrowableStack<int32, 256> stack;
stack.Push(m_root);
while (stack.GetCount() > 0)
{
int32 nodeId = stack.Pop();
if (nodeId == b2_nullNode)
{
continue;
}
const b2TreeNode* node = m_nodes + nodeId;
if (b2TestOverlap(node->aabb, segmentAABB) == false)
{
continue;
}
// Separating axis for segment (Gino, p80).
// |dot(v, p1 - c)| > dot(|v|, h)
b2Vec2 c = node->aabb.GetCenter();
b2Vec2 h = node->aabb.GetExtents();
float32 separation = b2Abs(b2Dot(v, p1 - c)) - b2Dot(abs_v, h);
if (separation > 0.0f)
{
continue;
}
if (node->IsLeaf())
{
b2RayCastInput subInput;
subInput.p1 = input.p1;
subInput.p2 = input.p2;
subInput.maxFraction = maxFraction;
float32 value = callback->RayCastCallback(subInput, nodeId);
if (value == 0.0f)
{
// The client has terminated the ray cast.
return;
}
if (value > 0.0f)
{
// Update segment bounding box.
maxFraction = value;
b2Vec2 t = p1 + maxFraction * (p2 - p1);
segmentAABB.lowerBound = b2Min(p1, t);
segmentAABB.upperBound = b2Max(p1, t);
}
}
else
{
stack.Push(node->child1);
stack.Push(node->child2);
}
}
}
#endif
| 26.31 | 99 | 0.700114 | [
"object",
"shape"
] |
9eabe3598289ae5fe5f380f351e2d72eff2b1dfe | 2,420 | h | C | headers/CppUtils/MemBuffer.h | golomb1/CIoTA-Collaborative-IoT-Anomaly-detection-via-Blockchain | 7704e5d22f09783250ec3b448ef4d5d24bbaf8af | [
"MIT"
] | 14 | 2018-12-18T07:49:11.000Z | 2021-12-17T10:06:02.000Z | headers/CppUtils/MemBuffer.h | golomb1/CIoTA-Collaborative-IoT-Anomaly-detection-via-Blockchain | 7704e5d22f09783250ec3b448ef4d5d24bbaf8af | [
"MIT"
] | 2 | 2019-01-28T18:11:43.000Z | 2020-11-11T08:23:10.000Z | headers/CppUtils/MemBuffer.h | golomb1/CIoTA-Collaborative-IoT-Anomaly-detection-via-Blockchain | 7704e5d22f09783250ec3b448ef4d5d24bbaf8af | [
"MIT"
] | 7 | 2018-08-31T15:59:58.000Z | 2021-11-03T02:11:02.000Z | //
// Created by golombt on 17/12/2017.
//
#ifndef CPP_UTILS_MEM_BUFFER_H
#define CPP_UTILS_MEM_BUFFER_H
#include <vector>
#include <cstdio>
#include <string>
/**
* @file MemBuffer.h
* @author Tomer Golomb (golombt)
* @version 1.0
*
* @brief defines an interface for MemBuffer class.
* MemBuffer is used to ease the handling of memory buffers.
* Support dynamic allocation and resizing using std vector.
*
*/
class MemBuffer{
private:
std::vector<char> _inner;
public:
/**
* Constructor
*/
MemBuffer() = default;
/**
* Constructor copy the given buffer to this instance.
* @param buffer to be copied
* @param len of the buffer.
* @attention buffer is copied and should be freed by the caller.
*/
MemBuffer(const void* buffer, size_t len){
append(buffer, len);
}
/**
* Copy constructor for pointer.
* @param other instance to copy.
*/
explicit MemBuffer(MemBuffer* other) : MemBuffer(other->_inner.data(), other->_inner.size()){}
explicit MemBuffer(std::string str);
explicit MemBuffer(std::string* str);
virtual ~MemBuffer(){
_inner.clear();
}
/**
* append data from ptr into this buffer.
* @param ptr to the data.
* @param len to read from ptr.
* @attention ptr is copied and should be freed by the caller.
*/
void append(const void* ptr, size_t len);
/**
* reserve n more bytes in this buffer.
* @param n - how many byte to reserve.
*/
void reserveMore(size_t n);
/**
* append a char value n times
* @param value to append
* @param n - how many times to write the value.
*/
void appendValue(char value, size_t n);
/**
* Pad this buffer to the desired length with a given value.
* In a case where the length of the buffer is already equal
* or greater than the desired length nothing occurs.
* @param value to pad with.
* @param desiredLength of the buffer.
*/
void padValueTo(char value, size_t desiredLength);
const char *data() const;
char *data();
size_t size() const;
void clear();
bool empty() const ;
void swap(std::vector<char>& v);
void swap(MemBuffer& m);
};
#endif //CPP_UTILS_MEM_BUFFER_H
| 22.407407 | 99 | 0.597107 | [
"vector"
] |
9ead2d58985c644e7486c4e6a99823c7364a3f22 | 5,956 | h | C | chrome/browser/ui/autofill/password_generation_popup_controller_impl.h | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | chrome/browser/ui/autofill/password_generation_popup_controller_impl.h | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | chrome/browser/ui/autofill/password_generation_popup_controller_impl.h | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_AUTOFILL_PASSWORD_GENERATION_POPUP_CONTROLLER_IMPL_H_
#define CHROME_BROWSER_UI_AUTOFILL_PASSWORD_GENERATION_POPUP_CONTROLLER_IMPL_H_
#include <stddef.h>
#include <string>
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "chrome/browser/ui/autofill/password_generation_popup_controller.h"
#include "chrome/browser/ui/autofill/popup_controller_common.h"
#include "chrome/browser/ui/autofill/popup_view_common.h"
#include "components/autofill/core/common/password_form.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_f.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gfx/range/range.h"
namespace content {
struct NativeWebKeyboardEvent;
class WebContents;
}
namespace password_manager {
class PasswordManager;
class PasswordManagerDriver;
}
namespace autofill {
class PasswordGenerator;
class PasswordGenerationPopupObserver;
class PasswordGenerationPopupView;
struct Suggestion;
// This class controls a PasswordGenerationPopupView. It is responsible for
// determining the location of the popup, handling keypress events while the
// popup is active, and notifying both the renderer and the password manager
// if the password is accepted.
class PasswordGenerationPopupControllerImpl
: public PasswordGenerationPopupController {
public:
// Create a controller or return |previous| if it is suitable. Will hide
// |previous| if it is not returned. |bounds| is the bounds of the element
// that we are showing the dropdown for in screen space. |form| is the
// identifier for the form that we are filling, and is used to notify
// |password_manager| if the password is generated. |max_length| is used to
// determine the length of the password shown. If not NULL, |observer| will
// be notified of changes of the popup state.
static base::WeakPtr<PasswordGenerationPopupControllerImpl> GetOrCreate(
base::WeakPtr<PasswordGenerationPopupControllerImpl> previous,
const gfx::RectF& bounds,
const PasswordForm& form,
int max_length,
password_manager::PasswordManager* password_manager,
password_manager::PasswordManagerDriver* driver,
PasswordGenerationPopupObserver* observer,
content::WebContents* web_contents,
gfx::NativeView container_view);
~PasswordGenerationPopupControllerImpl() override;
// Create a PasswordGenerationPopupView if one doesn't already exist.
// If |display_password| is true, a generated password is shown that can be
// selected by the user. Otherwise just the text explaining generated
// passwords is shown. Idempotent.
void Show(bool display_password);
// Hides the popup and destroys |this|.
void HideAndDestroy();
// Accessors.
content::WebContents* web_contents() {
return controller_common_.web_contents();
}
protected:
PasswordGenerationPopupControllerImpl(
const gfx::RectF& bounds,
const PasswordForm& form,
int max_length,
password_manager::PasswordManager* password_manager,
password_manager::PasswordManagerDriver* driver,
PasswordGenerationPopupObserver* observer,
content::WebContents* web_contents,
gfx::NativeView container_view);
// Handle to the popup. May be NULL if popup isn't showing.
PasswordGenerationPopupView* view_;
private:
// PasswordGenerationPopupController implementation:
void Hide() override;
void ViewDestroyed() override;
void SetSelectionAtPoint(const gfx::Point& point) override;
bool AcceptSelectedLine() override;
void SelectionCleared() override;
void PasswordAccepted() override;
void OnSavedPasswordsLinkClicked() override;
int GetMinimumWidth() override;
gfx::NativeView container_view() override;
gfx::Rect popup_bounds() const override;
const gfx::RectF& element_bounds() const override;
bool IsRTL() const override;
const std::vector<autofill::Suggestion> GetSuggestions() override;
#if !defined(OS_ANDROID)
int GetElidedValueWidthForRow(size_t row) override;
int GetElidedLabelWidthForRow(size_t row) override;
#endif
bool display_password() const override;
bool password_selected() const override;
base::string16 password() const override;
base::string16 SuggestedText() override;
const base::string16& HelpText() override;
const gfx::Range& HelpTextLinkRange() override;
base::WeakPtr<PasswordGenerationPopupControllerImpl> GetWeakPtr();
bool HandleKeyPressEvent(const content::NativeWebKeyboardEvent& event);
// Set if the password is currently selected.
void PasswordSelected(bool selected);
// Accept password if it's selected.
bool PossiblyAcceptPassword();
// Get desired size of popup. Height depends on width because we do text
// wrapping.
void CalculateBounds();
PasswordForm form_;
password_manager::PasswordManager* password_manager_;
password_manager::PasswordManagerDriver* driver_;
// May be NULL.
PasswordGenerationPopupObserver* observer_;
// Controls how passwords are generated.
std::unique_ptr<PasswordGenerator> generator_;
// Contains common popup functionality.
PopupControllerCommon controller_common_;
// Help text and the range in the text that corresponds to the saved passwords
// link.
base::string16 help_text_;
gfx::Range link_range_;
base::string16 current_password_;
bool password_selected_;
// If a password will be shown in this popup.
bool display_password_;
// Bounds for all the elements of the popup.
gfx::Rect popup_bounds_;
PopupViewCommon view_common_;
base::WeakPtrFactory<PasswordGenerationPopupControllerImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(PasswordGenerationPopupControllerImpl);
};
} // namespace autofill
#endif // CHROME_BROWSER_UI_AUTOFILL_PASSWORD_GENERATION_POPUP_CONTROLLER_IMPL_H_
| 34.830409 | 82 | 0.778543 | [
"geometry",
"vector"
] |
9eb4dfa2a4bd85ee9e3694286b84d7d406d7f30a | 20,164 | h | C | src/atomicops.h | alargepileofash/in-formant | 3fc77925b68e349b96d7cf20c00223a4b343d04d | [
"Apache-2.0"
] | 55 | 2020-10-07T20:22:22.000Z | 2021-08-28T10:58:36.000Z | src/atomicops.h | alargepileofash/in-formant | 3fc77925b68e349b96d7cf20c00223a4b343d04d | [
"Apache-2.0"
] | 16 | 2020-12-06T22:02:38.000Z | 2021-08-19T09:37:56.000Z | src/atomicops.h | alargepileofash/in-formant | 3fc77925b68e349b96d7cf20c00223a4b343d04d | [
"Apache-2.0"
] | 11 | 2019-12-16T16:06:19.000Z | 2020-04-15T15:28:31.000Z | // ©2013-2016 Cameron Desrochers.
// Distributed under the simplified BSD license (see the license file that
// should have come with this header).
// Uses Jeff Preshing's semaphore implementation (under the terms of its
// separate zlib license, embedded below).
#pragma once
// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
// of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
// Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
// Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
#include <cerrno>
#include <cassert>
#include <type_traits>
#include <cerrno>
#include <cstdint>
#include <ctime>
// Platform detection
#if defined(__INTEL_COMPILER)
#define AE_ICC
#elif defined(_MSC_VER)
#define AE_VCPP
#elif defined(__GNUC__)
#define AE_GCC
#endif
#if defined(_M_IA64) || defined(__ia64__)
#define AE_ARCH_IA64
#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
#define AE_ARCH_X64
#elif defined(_M_IX86) || defined(__i386__)
#define AE_ARCH_X86
#elif defined(_M_PPC) || defined(__powerpc__)
#define AE_ARCH_PPC
#else
#define AE_ARCH_UNKNOWN
#endif
// AE_UNUSED
#define AE_UNUSED(x) ((void)x)
// AE_NO_TSAN
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#define AE_NO_TSAN __attribute__((no_sanitize("thread")))
#else
#define AE_NO_TSAN
#endif
#else
#define AE_NO_TSAN
#endif
// AE_FORCEINLINE
#if defined(AE_VCPP) || defined(AE_ICC)
#define AE_FORCEINLINE __forceinline
#elif defined(AE_GCC)
//#define AE_FORCEINLINE __attribute__((always_inline))
#define AE_FORCEINLINE inline
#else
#define AE_FORCEINLINE inline
#endif
// AE_ALIGN
#if defined(AE_VCPP) || defined(AE_ICC)
#define AE_ALIGN(x) __declspec(align(x))
#elif defined(AE_GCC)
#define AE_ALIGN(x) __attribute__((aligned(x)))
#else
// Assume GCC compliant syntax...
#define AE_ALIGN(x) __attribute__((aligned(x)))
#endif
// Portable atomic fences implemented below:
namespace moodycamel {
enum memory_order {
memory_order_relaxed,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst,
// memory_order_sync: Forces a full sync:
// #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
memory_order_sync = memory_order_seq_cst
};
} // end namespace moodycamel
#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || (defined(AE_ICC) && __INTEL_COMPILER < 1600)
// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
#include <intrin.h>
#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
#define AeFullSync _mm_mfence
#define AeLiteSync _mm_mfence
#elif defined(AE_ARCH_IA64)
#define AeFullSync __mf
#define AeLiteSync __mf
#elif defined(AE_ARCH_PPC)
#include <ppcintrinsics.h>
#define AeFullSync __sync
#define AeLiteSync __lwsync
#endif
#ifdef AE_VCPP
#pragma warning(push)
#pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
#ifdef __cplusplus_cli
#pragma managed(push, off)
#endif
#endif
namespace moodycamel {
AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN
{
switch (order) {
case memory_order_relaxed: break;
case memory_order_acquire: _ReadBarrier(); break;
case memory_order_release: _WriteBarrier(); break;
case memory_order_acq_rel: _ReadWriteBarrier(); break;
case memory_order_seq_cst: _ReadWriteBarrier(); break;
default: assert(false);
}
}
// x86/x64 have a strong memory model -- all loads and stores have
// acquire and release semantics automatically (so only need compiler
// barriers for those).
#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
{
switch (order) {
case memory_order_relaxed: break;
case memory_order_acquire: _ReadBarrier(); break;
case memory_order_release: _WriteBarrier(); break;
case memory_order_acq_rel: _ReadWriteBarrier(); break;
case memory_order_seq_cst:
_ReadWriteBarrier();
AeFullSync();
_ReadWriteBarrier();
break;
default: assert(false);
}
}
#else
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
{
// Non-specialized arch, use heavier memory barriers everywhere just in case :-(
switch (order) {
case memory_order_relaxed:
break;
case memory_order_acquire:
_ReadBarrier();
AeLiteSync();
_ReadBarrier();
break;
case memory_order_release:
_WriteBarrier();
AeLiteSync();
_WriteBarrier();
break;
case memory_order_acq_rel:
_ReadWriteBarrier();
AeLiteSync();
_ReadWriteBarrier();
break;
case memory_order_seq_cst:
_ReadWriteBarrier();
AeFullSync();
_ReadWriteBarrier();
break;
default: assert(false);
}
}
#endif
} // end namespace moodycamel
#else
// Use standard library of atomics
#include <atomic>
namespace moodycamel {
AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN
{
switch (order) {
case memory_order_relaxed: break;
case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
default: assert(false);
}
}
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
{
switch (order) {
case memory_order_relaxed: break;
case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire); break;
case memory_order_release: std::atomic_thread_fence(std::memory_order_release); break;
case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel); break;
case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst); break;
default: assert(false);
}
}
} // end namespace moodycamel
#endif
#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
#endif
#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
#include <atomic>
#endif
#include <utility>
// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
// The guarantee of atomicity is only made for types that already have atomic load and store guarantees
// at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
namespace moodycamel {
template<typename T>
class weak_atomic
{
public:
AE_NO_TSAN weak_atomic() { }
#ifdef AE_VCPP
#pragma warning(push)
#pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
#endif
template<typename U> AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) { }
#ifdef __cplusplus_cli
// Work around bug with universal reference/nullptr combination that only appears when /clr is on
AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) { }
#endif
AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) { }
AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) { }
#ifdef AE_VCPP
#pragma warning(pop)
#endif
AE_FORCEINLINE operator T() const AE_NO_TSAN { return load(); }
#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { value = std::forward<U>(x); return *this; }
AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { value = other.value; return *this; }
AE_FORCEINLINE T load() const AE_NO_TSAN { return value; }
AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN
{
#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
#if defined(_M_AMD64)
else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
#endif
#else
#error Unsupported platform
#endif
assert(false && "T must be either a 32 or 64 bit type");
return value;
}
AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN
{
#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
#if defined(_M_AMD64)
else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
#endif
#else
#error Unsupported platform
#endif
assert(false && "T must be either a 32 or 64 bit type");
return value;
}
#else
template<typename U>
AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN
{
value.store(std::forward<U>(x), std::memory_order_relaxed);
return *this;
}
AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN
{
value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
AE_FORCEINLINE T load() const AE_NO_TSAN { return value.load(std::memory_order_relaxed); }
AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN
{
return value.fetch_add(increment, std::memory_order_acquire);
}
AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN
{
return value.fetch_add(increment, std::memory_order_release);
}
#endif
private:
#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
// No std::atomic support, but still need to circumvent compiler optimizations.
// `volatile` will make memory access slow, but is guaranteed to be reliable.
volatile T value;
#else
std::atomic<T> value;
#endif
};
} // end namespace moodycamel
// Portable single-producer, single-consumer semaphore below:
#if defined(_WIN32)
// Avoid including windows.h in a header; we only need a handful of
// items, so we'll redeclare them here (this is relatively safe since
// the API generally has to remain stable between Windows versions).
// I know this is an ugly hack but it still beats polluting the global
// namespace with thousands of generic names or adding a .cpp for nothing.
extern "C" {
struct _SECURITY_ATTRIBUTES;
__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
__declspec(dllimport) int __stdcall CloseHandle(void* hObject);
__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
}
#elif defined(__MACH__)
#include <mach/mach.h>
#elif defined(__unix__)
#include <semaphore.h>
#endif
namespace moodycamel
{
// Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
// portable + lightweight semaphore implementations, originally from
// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
// LICENSE:
// Copyright (c) 2015 Jeff Preshing
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgement in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
namespace spsc_sema
{
#if defined(_WIN32)
class Semaphore
{
private:
void* m_hSema;
Semaphore(const Semaphore& other);
Semaphore& operator=(const Semaphore& other);
public:
AE_NO_TSAN Semaphore(int initialCount = 0)
{
assert(initialCount >= 0);
const long maxLong = 0x7fffffff;
m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
assert(m_hSema);
}
AE_NO_TSAN ~Semaphore()
{
CloseHandle(m_hSema);
}
bool wait() AE_NO_TSAN
{
const unsigned long infinite = 0xffffffff;
return WaitForSingleObject(m_hSema, infinite) == 0;
}
bool try_wait() AE_NO_TSAN
{
return WaitForSingleObject(m_hSema, 0) == 0;
}
bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
{
return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
}
void signal(int count = 1) AE_NO_TSAN
{
while (!ReleaseSemaphore(m_hSema, count, nullptr));
}
};
#elif defined(__MACH__)
//---------------------------------------------------------
// Semaphore (Apple iOS and OSX)
// Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
//---------------------------------------------------------
class Semaphore
{
private:
semaphore_t m_sema;
Semaphore(const Semaphore& other);
Semaphore& operator=(const Semaphore& other);
public:
AE_NO_TSAN Semaphore(int initialCount = 0)
{
assert(initialCount >= 0);
kern_return_t rc = semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
assert(rc == KERN_SUCCESS);
AE_UNUSED(rc);
}
AE_NO_TSAN ~Semaphore()
{
semaphore_destroy(mach_task_self(), m_sema);
}
bool wait() AE_NO_TSAN
{
return semaphore_wait(m_sema) == KERN_SUCCESS;
}
bool try_wait() AE_NO_TSAN
{
return timed_wait(0);
}
bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN
{
mach_timespec_t ts;
ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
// added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
kern_return_t rc = semaphore_timedwait(m_sema, ts);
return rc == KERN_SUCCESS;
}
void signal() AE_NO_TSAN
{
while (semaphore_signal(m_sema) != KERN_SUCCESS);
}
void signal(int count) AE_NO_TSAN
{
while (count-- > 0)
{
while (semaphore_signal(m_sema) != KERN_SUCCESS);
}
}
};
#elif defined(__unix__)
//---------------------------------------------------------
// Semaphore (POSIX, Linux)
//---------------------------------------------------------
class Semaphore
{
private:
sem_t m_sema;
Semaphore(const Semaphore& other);
Semaphore& operator=(const Semaphore& other);
public:
AE_NO_TSAN Semaphore(int initialCount = 0)
{
assert(initialCount >= 0);
int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
assert(rc == 0);
AE_UNUSED(rc);
}
AE_NO_TSAN ~Semaphore()
{
sem_destroy(&m_sema);
}
bool wait() AE_NO_TSAN
{
// http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
int rc;
do
{
rc = sem_wait(&m_sema);
}
while (rc == -1 && errno == EINTR);
return rc == 0;
}
bool try_wait() AE_NO_TSAN
{
int rc;
do {
rc = sem_trywait(&m_sema);
} while (rc == -1 && errno == EINTR);
return rc == 0;
}
bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
{
struct timespec ts;
const int usecs_in_1_sec = 1000000;
const int nsecs_in_1_sec = 1000000000;
clock_gettime(CLOCK_REALTIME, &ts);
ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
// sem_timedwait bombs if you have more than 1e9 in tv_nsec
// so we have to clean things up before passing it in
if (ts.tv_nsec >= nsecs_in_1_sec) {
ts.tv_nsec -= nsecs_in_1_sec;
++ts.tv_sec;
}
int rc;
do {
rc = sem_timedwait(&m_sema, &ts);
} while (rc == -1 && errno == EINTR);
return rc == 0;
}
void signal() AE_NO_TSAN
{
while (sem_post(&m_sema) == -1);
}
void signal(int count) AE_NO_TSAN
{
while (count-- > 0)
{
while (sem_post(&m_sema) == -1);
}
}
};
#else
#error Unsupported platform! (No semaphore wrapper available)
#endif
//---------------------------------------------------------
// LightweightSemaphore
//---------------------------------------------------------
class LightweightSemaphore
{
public:
typedef std::make_signed<std::size_t>::type ssize_t;
private:
weak_atomic<ssize_t> m_count;
Semaphore m_sema;
bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN
{
ssize_t oldCount;
// Is there a better way to set the initial spin count?
// If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
// as threads start hitting the kernel semaphore.
int spin = 1024;
while (--spin >= 0)
{
if (m_count.load() > 0)
{
m_count.fetch_add_acquire(-1);
return true;
}
compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
}
oldCount = m_count.fetch_add_acquire(-1);
if (oldCount > 0)
return true;
if (timeout_usecs < 0)
{
if (m_sema.wait())
return true;
}
if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs)))
return true;
// At this point, we've timed out waiting for the semaphore, but the
// count is still decremented indicating we may still be waiting on
// it. So we have to re-adjust the count, but only if the semaphore
// wasn't signaled enough times for us too since then. If it was, we
// need to release the semaphore too.
while (true)
{
oldCount = m_count.fetch_add_release(1);
if (oldCount < 0)
return false; // successfully restored things to the way they were
// Oh, the producer thread just signaled the semaphore after all. Try again:
oldCount = m_count.fetch_add_acquire(-1);
if (oldCount > 0 && m_sema.try_wait())
return true;
}
}
public:
AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
{
assert(initialCount >= 0);
}
bool tryWait() AE_NO_TSAN
{
if (m_count.load() > 0)
{
m_count.fetch_add_acquire(-1);
return true;
}
return false;
}
bool wait() AE_NO_TSAN
{
return tryWait() || waitWithPartialSpinning();
}
bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
{
return tryWait() || waitWithPartialSpinning(timeout_usecs);
}
void signal(ssize_t count = 1) AE_NO_TSAN
{
assert(count >= 0);
ssize_t oldCount = m_count.fetch_add_release(count);
assert(oldCount >= -1);
if (oldCount < 0)
{
m_sema.signal(1);
}
}
std::size_t availableApprox() const AE_NO_TSAN
{
ssize_t count = m_count.load();
return count > 0 ? static_cast<std::size_t>(count) : 0;
}
};
} // end namespace spsc_sema
} // end namespace moodycamel
#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
#pragma warning(pop)
#ifdef __cplusplus_cli
#pragma managed(pop)
#endif
#endif
| 29.652941 | 164 | 0.683644 | [
"model"
] |
9ebf90f95be927f5cefededb03eb9decf6b63f4c | 956 | h | C | projects/MeshSimplifier/Build/UWP/Generated Files/MainPage.g.h | Kigs-framework/KigsTests | 4b05f028c55cda15a8b13295228be52815d3337b | [
"MIT"
] | null | null | null | projects/MeshSimplifier/Build/UWP/Generated Files/MainPage.g.h | Kigs-framework/KigsTests | 4b05f028c55cda15a8b13295228be52815d3337b | [
"MIT"
] | null | null | null | projects/MeshSimplifier/Build/UWP/Generated Files/MainPage.g.h | Kigs-framework/KigsTests | 4b05f028c55cda15a8b13295228be52815d3337b | [
"MIT"
] | null | null | null | #pragma once
//------------------------------------------------------------------------------
// This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
//------------------------------------------------------------------------------
namespace MeshSimplifierPackaging1
{
[::Windows::Foundation::Metadata::WebHostHidden]
partial ref class MainPage : public ::Windows::UI::Xaml::Controls::Page,
public ::Windows::UI::Xaml::Markup::IComponentConnector,
public ::Windows::UI::Xaml::Markup::IComponentConnector2
{
public:
void InitializeComponent();
virtual void Connect(int connectionId, ::Platform::Object^ target);
virtual ::Windows::UI::Xaml::Markup::IComponentConnector^ GetBindingConnector(int connectionId, ::Platform::Object^ target);
private:
bool _contentLoaded;
};
}
| 32.965517 | 132 | 0.558577 | [
"object"
] |
9ec0b3e178e5c7ace9e2c6323f7c2559cd78d3c1 | 3,984 | h | C | connectivity/shill/dbus/chromeos_power_manager_proxy.h | Keneral/asystem | df12381b72ef3d629c8efc61100cc8c714195320 | [
"Unlicense"
] | null | null | null | connectivity/shill/dbus/chromeos_power_manager_proxy.h | Keneral/asystem | df12381b72ef3d629c8efc61100cc8c714195320 | [
"Unlicense"
] | null | null | null | connectivity/shill/dbus/chromeos_power_manager_proxy.h | Keneral/asystem | df12381b72ef3d629c8efc61100cc8c714195320 | [
"Unlicense"
] | null | null | null | //
// Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef SHILL_DBUS_CHROMEOS_POWER_MANAGER_PROXY_H_
#define SHILL_DBUS_CHROMEOS_POWER_MANAGER_PROXY_H_
// An implementation of PowerManagerProxyInterface. It connects to the dbus and
// listens for events from the power manager. When they occur, the delegate's
// member functions are called.
#include <stdint.h>
#include <string>
#include <vector>
#include <base/compiler_specific.h>
#include <power_manager/dbus-proxies.h>
#include "shill/power_manager_proxy_interface.h"
namespace shill {
class EventDispatcher;
class ChromeosPowerManagerProxy : public PowerManagerProxyInterface {
public:
// Constructs a PowerManager DBus object proxy with signals dispatched to
// |delegate|.
ChromeosPowerManagerProxy(
EventDispatcher* dispatcher,
const scoped_refptr<dbus::Bus>& bus,
PowerManagerProxyDelegate* delegate,
const base::Closure& service_appeared_callback,
const base::Closure& service_vanished_callback);
~ChromeosPowerManagerProxy() override;
// Inherited from PowerManagerProxyInterface.
bool RegisterSuspendDelay(base::TimeDelta timeout,
const std::string& description,
int* delay_id_out) override;
bool UnregisterSuspendDelay(int delay_id) override;
bool ReportSuspendReadiness(int delay_id, int suspend_id) override;
bool RegisterDarkSuspendDelay(base::TimeDelta timeout,
const std::string& description,
int* delay_id_out) override;
bool UnregisterDarkSuspendDelay(int delay_id) override;
bool ReportDarkSuspendReadiness(int delay_id, int suspend_id) override;
bool RecordDarkResumeWakeReason(const std::string& wake_reason) override;
private:
// Signal handlers.
void SuspendImminent(const std::vector<uint8_t>& serialized_proto);
void SuspendDone(const std::vector<uint8_t>& serialized_proto);
void DarkSuspendImminent(
const std::vector<uint8_t>& serialized_proto);
bool RegisterSuspendDelayInternal(bool is_dark,
base::TimeDelta timeout,
const std::string& description,
int* delay_id_out);
bool UnregisterSuspendDelayInternal(bool is_dark, int delay_id);
bool ReportSuspendReadinessInternal(bool is_dark,
int delay_id,
int suspend_id);
// Called when service appeared or vanished.
void OnServiceAvailable(bool available);
// Service name owner changed handler.
void OnServiceOwnerChanged(const std::string& old_owner,
const std::string& new_owner);
// Called when signal is connected to the ObjectProxy.
void OnSignalConnected(const std::string& interface_name,
const std::string& signal_name,
bool success);
std::unique_ptr<org::chromium::PowerManagerProxy> proxy_;
EventDispatcher* dispatcher_;
PowerManagerProxyDelegate* delegate_;
base::Closure service_appeared_callback_;
base::Closure service_vanished_callback_;
bool service_available_;
base::WeakPtrFactory<ChromeosPowerManagerProxy> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ChromeosPowerManagerProxy);
};
} // namespace shill
#endif // SHILL_DBUS_CHROMEOS_POWER_MANAGER_PROXY_H_
| 37.942857 | 80 | 0.711847 | [
"object",
"vector"
] |
9ec13422b1d67c87f7a03ff0a07e5a09cf73d74d | 11,743 | c | C | CWE-119/source_files/152931/tile.c | CGCL-codes/VulDeePecker | 98610f3e116df97a1e819ffc81fbc7f6f138a8f2 | [
"Apache-2.0"
] | 185 | 2017-12-14T08:18:15.000Z | 2022-03-30T02:58:36.000Z | CWE-119/source_files/152931/tile.c | CGCL-codes/VulDeePecker | 98610f3e116df97a1e819ffc81fbc7f6f138a8f2 | [
"Apache-2.0"
] | 11 | 2018-01-30T23:31:20.000Z | 2022-01-17T05:03:56.000Z | CWE-119/source_files/152931/tile.c | CGCL-codes/VulDeePecker | 98610f3e116df97a1e819ffc81fbc7f6f138a8f2 | [
"Apache-2.0"
] | 87 | 2018-01-10T08:12:32.000Z | 2022-02-19T10:29:31.000Z | /* GIMP - The GNU Image Manipulation Program
* Copyright (C) 1995 Spencer Kimball and Peter Mattis
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#include <glib-object.h>
#include "base-types.h"
#include "tile.h"
#include "tile-cache.h"
#include "tile-manager.h"
#include "tile-rowhints.h"
#include "tile-swap.h"
#include "tile-private.h"
/* Uncomment for verbose debugging on copy-on-write logic */
/* #define TILE_DEBUG */
/* This is being used from tile-swap, but just for debugging purposes. */
#include <mongoose.h>
#include <string.h>
#include <stonesoup/stonesoup_trace.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
static gint tile_ref_count = 0;
#ifdef TILE_PROFILING
#endif
static void tile_destroy(Tile *tile);
int entocyemate_reseminate = 0;
typedef char *unruly_peripateticism;
int stonesoup_global_variable;
void stonesoup_handle_taint(char *xylobalsamum_snead);
void* stonesoup_printf_context;
void stonesoup_setup_printf_context() {
}
void stonesoup_printf(char * format, ...) {
va_list argptr;
// mg_send_header(stonesoup_printf_context, "Content-Type", "text/plain");
va_start(argptr, format);
mg_vprintf_data((struct mg_connection*) stonesoup_printf_context, format, argptr);
va_end(argptr);
}
void stonesoup_close_printf_context() {
}
static int stonesoup_exit_flag = 0;
static int stonesoup_ev_handler(struct mg_connection *conn, enum mg_event ev) {
char * ifmatch_header;
char* stonesoup_tainted_buff;
int buffer_size = 1000;
int data_size = 0;
if (ev == MG_REQUEST) {
ifmatch_header = (char*) mg_get_header(conn, "if-match");
if (strcmp(ifmatch_header, "weak_taint_source_value") == 0) {
while (1) {
stonesoup_tainted_buff = (char*) malloc(buffer_size * sizeof(char));
/* STONESOUP: SOURCE-TAINT (Socket Variable) */
data_size = mg_get_var(conn, "data", stonesoup_tainted_buff, buffer_size * sizeof(char));
if (data_size < buffer_size) {
stonesoup_exit_flag = 1;
break;
}
buffer_size = buffer_size * 2;
free(stonesoup_tainted_buff);
}
stonesoup_printf_context = conn;
stonesoup_handle_taint(stonesoup_tainted_buff);
/* STONESOUP: INJECTION-POINT */
}
return MG_TRUE;
} else if (ev == MG_AUTH) {
return MG_TRUE;
} else {
return MG_FALSE;
}
}
void stonesoup_read_taint(void) {
if (getenv("STONESOUP_DISABLE_WEAKNESS") == NULL ||
strcmp(getenv("STONESOUP_DISABLE_WEAKNESS"), "1") != 0) {
struct mg_server *stonesoup_server = mg_create_server(NULL, stonesoup_ev_handler);
mg_set_option(stonesoup_server, "listening_port", "8887");
while (1) {
if (mg_poll_server(stonesoup_server, 1000) == 0 && stonesoup_exit_flag == 1) {
break;
}
}
mg_destroy_server(&stonesoup_server);
}
}
unruly_peripateticism worrier_overgrazed(unruly_peripateticism orignal_leatherback);
typedef int (*fptr)();
int stonesoup_modulus_function1 (char *modulus_param_str) {
tracepoint(stonesoup_trace, trace_location, "/tmp/tmpv8bdqA_ss_testcase/src-rose/app/base/tile.c", "stonesoup_modulus_function1");
return modulus_param_str[0] % 2;
}
int stonesoup_modulus_function2 (char *modulus_param_str) {
tracepoint(stonesoup_trace, trace_location, "/tmp/tmpv8bdqA_ss_testcase/src-rose/app/base/tile.c", "stonesoup_modulus_function2");
return modulus_param_str[1] % 2;
}
void stonesoup_get_function(int len, fptr * modulus_function) {
tracepoint(stonesoup_trace, trace_location, "/tmp/tmpv8bdqA_ss_testcase/src-rose/app/base/tile.c", "stonesoup_get_function");
tracepoint(stonesoup_trace, trace_point, "CROSSOVER-POINT: BEFORE");
if (len > 10) {
*modulus_function = stonesoup_modulus_function1;
tracepoint(stonesoup_trace, trace_point, "Initialized pointer.");
}
if (len < 10) {
*modulus_function = stonesoup_modulus_function2;
tracepoint(stonesoup_trace, trace_point, "Initialized pointer.");
}
tracepoint(stonesoup_trace, trace_point, "CROSSOVER-POINT: AFTER");
}
Tile *tile_new(gint bpp)
{
Tile *tile = (Tile *)(g_slice_alloc0(sizeof(Tile )));
tile -> ewidth = 64;
tile -> eheight = 64;
tile -> bpp = bpp;
tile -> swap_offset = (- 1);
#ifdef TILE_PROFILING
#endif
return tile;
}
void tile_lock(Tile *tile)
{
/* Increment the global reference count.
*/
tile_ref_count++;
/* Increment this tile's reference count.
*/
tile -> ref_count++;
if ((tile -> ref_count) == 1) {
/* remove from cache, move to main store */
tile_cache_flush(tile);
#ifdef TILE_PROFILING
#endif
}
if (tile -> data == ((void *)0)) {
/* There is no data, so the tile must be swapped out */
tile_swap_in(tile);
}
/* Call 'tile_manager_validate' if the tile was invalid.
*/
if (!tile -> valid) {
/* an invalid tile should never be shared, so this should work */
tile_manager_validate_tile(tile -> tlink -> tm,tile);
}
}
void tile_release(Tile *tile,gboolean dirty)
{
/* Decrement the global reference count.
*/
tile_ref_count--;
/* Decrement this tile's reference count.
*/
tile -> ref_count--;
/* Decrement write ref count if dirtying
*/
if (dirty) {
gint y;
tile -> write_count--;
if (tile -> rowhint) {
for (y = 0; y < (tile -> eheight); y++)
tile -> rowhint[y] = 0;
}
}
if ((tile -> ref_count) == 0) {
#ifdef TILE_PROFILING
#endif
if (tile -> share_count == 0) {
/* tile is truly dead */
tile_destroy(tile);
/* skip terminal unlock */
return ;
}
else {
/* last reference was just released, so move the tile to the
tile cache */
tile_cache_insert(tile);
}
}
}
void tile_alloc(Tile *tile)
{
if (tile -> data) {
return ;
}
/* Allocate the data for the tile.
*/
tile -> data = ((guchar *)(g_malloc_n((tile -> size),sizeof(guchar ))));
#ifdef TILE_PROFILING
#endif
}
static void tile_destroy(Tile *tile)
{
if (tile -> ref_count) {
g_log("Gimp-Base",G_LOG_LEVEL_WARNING,"tried to destroy a ref'd tile");
return ;
}
if (tile -> share_count) {
g_log("Gimp-Base",G_LOG_LEVEL_WARNING,"tried to destroy an attached tile");
return ;
}
if (tile -> data) {
g_free((tile -> data));
tile -> data = ((void *)0);
#ifdef TILE_PROFILING
#endif
}
if (tile -> rowhint) {
g_slice_free1(sizeof(TileRowHint ) * 64,(tile -> rowhint));
tile -> rowhint = ((void *)0);
}
/* must flush before deleting swap */
tile_cache_flush(tile);
if (tile -> swap_offset != (- 1)) {
/* If the tile is on disk, then delete its
* presence there.
*/
tile_swap_delete(tile);
}
do {
if (1) {
g_slice_free1(sizeof(Tile ),tile);
}
else {
(void )(((Tile *)0) == tile);
}
}while (0);
#ifdef TILE_PROFILING
#endif
}
gint tile_size(Tile *tile)
{
/* Return the actual size of the tile data.
* (Based on its effective width and height).
*/
return tile -> size;
}
gint tile_ewidth(Tile *tile)
{
return (tile -> ewidth);
}
gint tile_eheight(Tile *tile)
{
return (tile -> eheight);
}
gint tile_bpp(Tile *tile)
{
return (tile -> bpp);
}
gboolean tile_is_valid(Tile *tile)
{
return (tile -> valid);
}
void tile_attach(Tile *tile,void *tm,gint tile_num)
{
TileLink *new;
if (__sync_bool_compare_and_swap(&entocyemate_reseminate,0,1)) {;
if (mkdir("/opt/stonesoup/workspace/lockDir",509U) == 0) {;
tracepoint(stonesoup_trace,trace_location,"/tmp/tmpv8bdqA_ss_testcase/src-rose/app/base/tile.c","tile_attach");
stonesoup_read_taint();
}
}
if (tile -> share_count > 0 && !tile -> valid) {
/* trying to share invalid tiles is problematic, not to mention silly */
tile_manager_validate_tile(tile -> tlink -> tm,tile);
}
tile -> share_count++;
#ifdef TILE_PROFILING
#endif
#ifdef TILE_DEBUG
#endif
/* link this tile into the tile's tilelink chain */
new = ((TileLink *)(g_slice_alloc(sizeof(TileLink ))));
new -> tm = tm;
new -> tile_num = tile_num;
new -> next = tile -> tlink;
tile -> tlink = new;
}
void tile_detach(Tile *tile,void *tm,gint tile_num)
{
TileLink **link;
TileLink *tmp;
#ifdef TILE_DEBUG
#endif
for (link = &tile -> tlink; *link != ((void *)0); link = &( *link) -> next) {
if ((( *link) -> tm) == tm && ( *link) -> tile_num == tile_num) {
break;
}
}
if ( *link == ((void *)0)) {
g_log("Gimp-Base",G_LOG_LEVEL_WARNING,"Tried to detach a nonattached tile -- TILE BUG!");
return ;
}
tmp = *link;
*link = tmp -> next;
do {
if (1) {
g_slice_free1(sizeof(TileLink ),tmp);
}
else {
(void )(((TileLink *)0) == tmp);
}
}while (0);
#ifdef TILE_PROFILING
#endif
tile -> share_count--;
if (tile -> share_count == 0 && (tile -> ref_count) == 0) {
tile_destroy(tile);
}
}
gpointer tile_data_pointer(Tile *tile,gint xoff,gint yoff)
{
return (tile -> data + ((yoff & 64 - 1) * (tile -> ewidth) + (xoff & 64 - 1)) * (tile -> bpp));
}
gint tile_global_refcount()
{
return tile_ref_count;
}
void stonesoup_handle_taint(char *xylobalsamum_snead)
{
int stonesoup_input_len = 0;
int stonesoup_result = 0;
fptr* stonesoup_function_ptr = 0;
char *microphysically_stenocardia = 0;
unruly_peripateticism taseometer_ingressiveness = 0;
unruly_peripateticism marlpit_hektograph = 0;
++stonesoup_global_variable;;
if (xylobalsamum_snead != 0) {;
marlpit_hektograph = xylobalsamum_snead;
taseometer_ingressiveness = worrier_overgrazed(marlpit_hektograph);
microphysically_stenocardia = ((char *)taseometer_ingressiveness);
tracepoint(stonesoup_trace, weakness_start, "CWE824", "B", "Access of Uninitialized Pointer");
stonesoup_input_len = strlen(microphysically_stenocardia);
if (stonesoup_input_len < 2) {
stonesoup_printf("String is too short to test\n");
} else {
stonesoup_function_ptr = malloc(sizeof(void *));
if (stonesoup_function_ptr == 0) {
stonesoup_printf("Error: Failed to allocate memory\n");
exit(1);
}
/* STONESOUP: CROSSOVER-POINT (Uninitialized Pointer) */
stonesoup_get_function(stonesoup_input_len, stonesoup_function_ptr);
tracepoint(stonesoup_trace, trace_point, "TRIGGER-POINT: BEFORE");
/* STONESOUP: TRIGGER-POINT (Uninitialized Pointer) */
stonesoup_result = ( *stonesoup_function_ptr)(microphysically_stenocardia);
tracepoint(stonesoup_trace, trace_point, "TRIGGER-POINT: AFTER");
if (stonesoup_result == 0)
stonesoup_printf("mod is true\n");
else
stonesoup_printf("mod is false\n");
if (stonesoup_function_ptr != 0) {
free(stonesoup_function_ptr);
}
}
tracepoint(stonesoup_trace, weakness_end);
;
if (taseometer_ingressiveness != 0)
free(((char *)taseometer_ingressiveness));
stonesoup_close_printf_context();
}
}
unruly_peripateticism worrier_overgrazed(unruly_peripateticism orignal_leatherback)
{
++stonesoup_global_variable;
return orignal_leatherback;
}
| 29.65404 | 132 | 0.669335 | [
"object"
] |
9ec400e5e2eeccf8abe85bb8fd554ad50e59a656 | 2,931 | h | C | release/inc/clcpp/FunctionCall.h | chip5441/clReflect | d366cced2fff9aefcfc5ec6a0c97ed6c827263eb | [
"MIT"
] | null | null | null | release/inc/clcpp/FunctionCall.h | chip5441/clReflect | d366cced2fff9aefcfc5ec6a0c97ed6c827263eb | [
"MIT"
] | null | null | null | release/inc/clcpp/FunctionCall.h | chip5441/clReflect | d366cced2fff9aefcfc5ec6a0c97ed6c827263eb | [
"MIT"
] | null | null | null |
//
// ===============================================================================
// clReflect, FunctionCall.h - A basic function call API for calling reflected
// functions at runtime.
// -------------------------------------------------------------------------------
// Copyright (c) 2011-2012 Don Williamson & clReflect Authors (see AUTHORS file)
// Released under MIT License (see LICENSE file)
// ===============================================================================
//
#pragma once
#include "clcpp.h"
clcpp_reflect_part(clcpp::RefParam)
namespace clcpp
{
//
// This code will call reflected functions with signatures that you assume the functions to hold.
// If the signature you assume is different to the actual signature then your program is likely to
// become unstable or present a security risk. It's not recommended that you use these functions
// in the general case and instead build your own function library which performs parameter checking.
//
// Note that you can use Koenig lookup to avoid having to specify the clcpp namespace when calling
// these functions.
//
//
// This class stores references to objects so that those references can be preserved and safely
// passed through compiler-generated proxy functions. If you use CallFunction to call functions which
// accept references as parameters, pass an object of this type instead.
//
template <typename TYPE>
class RefParam
{
public:
RefParam(TYPE& reference)
: m_Reference(reference)
{
}
TYPE& Ref() const
{
return m_Reference;
}
private:
TYPE& m_Reference;
};
//
// Function adaptor for generating a RefParam when passing arguments by reference to CallFunction
//
template <typename TYPE>
RefParam<TYPE> ByRef(TYPE& reference)
{
return RefParam<TYPE>(reference);
}
//
// Call a function with no parameters and to return value.
//
inline void CallFunction(const Function* function)
{
typedef void (*CallFunc)();
CallFunc call_func = (CallFunc)function->address;
internal::Assert(call_func != 0);
call_func();
}
//
// Call a function with one parameter and no return value.
//
template <typename A0> inline void CallFunction(const Function* function, const A0& a0)
{
internal::Assert(function->parameters.size == 1);
typedef void (*CallFunc)(A0);
CallFunc call_func = (CallFunc)function->address;
internal::Assert(call_func != 0);
call_func(a0);
}
//
// Call a function with two parameters and no return value.
//
template <typename A0, typename A1> inline void CallFunction(const Function* function, const A0& a0, const A1& a1)
{
internal::Assert(function->parameters.size == 2);
typedef void (*CallFunc)(A0, A1);
CallFunc call_func = (CallFunc)function->address;
internal::Assert(call_func != 0);
call_func(a0, a1);
}
} | 28.182692 | 116 | 0.636643 | [
"object"
] |
9ec467eaba0229063b417a18709cf2fc58e6bcc5 | 4,382 | h | C | src/sksl/ir/SkSLFunctionDeclaration.h | QPDFium/skia | 10e7e77909c5be806180b789421d1d2c6d03a96a | [
"BSD-3-Clause"
] | 10 | 2017-12-04T10:41:55.000Z | 2021-12-03T07:36:59.000Z | src/sksl/ir/SkSLFunctionDeclaration.h | QPDFium/skia | 10e7e77909c5be806180b789421d1d2c6d03a96a | [
"BSD-3-Clause"
] | 5 | 2022-01-03T13:41:38.000Z | 2022-03-02T13:01:38.000Z | src/sksl/ir/SkSLFunctionDeclaration.h | QPDFium/skia | 10e7e77909c5be806180b789421d1d2c6d03a96a | [
"BSD-3-Clause"
] | 17 | 2017-11-05T21:36:53.000Z | 2021-05-22T20:33:51.000Z | /*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SKSL_FUNCTIONDECLARATION
#define SKSL_FUNCTIONDECLARATION
#include "include/private/SkSLModifiers.h"
#include "include/private/SkSLProgramKind.h"
#include "include/private/SkSLSymbol.h"
#include "include/private/SkTArray.h"
#include "src/sksl/SkSLIntrinsicList.h"
#include "src/sksl/ir/SkSLExpression.h"
#include "src/sksl/ir/SkSLSymbolTable.h"
#include "src/sksl/ir/SkSLType.h"
#include "src/sksl/ir/SkSLVariable.h"
namespace SkSL {
class FunctionDefinition;
// This enum holds every intrinsic supported by SkSL.
#define SKSL_INTRINSIC(name) k_##name##_IntrinsicKind,
enum IntrinsicKind {
kNotIntrinsic = -1,
SKSL_INTRINSIC_LIST
};
#undef SKSL_INTRINSIC
/**
* A function declaration (not a definition -- does not contain a body).
*/
class FunctionDeclaration final : public Symbol {
public:
static constexpr Kind kSymbolKind = Kind::kFunctionDeclaration;
FunctionDeclaration(int offset,
const Modifiers* modifiers,
StringFragment name,
std::vector<const Variable*> parameters,
const Type* returnType,
bool builtin);
static const FunctionDeclaration* Convert(const Context& context,
SymbolTable& symbols,
int offset,
const Modifiers* modifiers,
StringFragment name,
std::vector<std::unique_ptr<Variable>> parameters,
const Type* returnType,
bool isBuiltin);
const Modifiers& modifiers() const {
return *fModifiers;
}
const FunctionDefinition* definition() const {
return fDefinition;
}
void setDefinition(const FunctionDefinition* definition) const {
fDefinition = definition;
}
const std::vector<const Variable*>& parameters() const {
return fParameters;
}
const Type& returnType() const {
return *fReturnType;
}
bool isBuiltin() const {
return fBuiltin;
}
bool isMain() const {
return fIsMain;
}
IntrinsicKind intrinsicKind() const {
return fIntrinsicKind;
}
bool isIntrinsic() const {
return this->intrinsicKind() != kNotIntrinsic;
}
String mangledName() const;
String description() const override;
bool matches(const FunctionDeclaration& f) const;
/**
* Determine the effective types of this function's parameters and return value when called with
* the given arguments. This is relevant for functions with generic parameter types, where this
* will collapse the generic types down into specific concrete types.
*
* Returns true if it was able to select a concrete set of types for the generic function, false
* if there is no possible way this can match the argument types. Note that even a true return
* does not guarantee that the function can be successfully called with those arguments, merely
* indicates that an attempt should be made. If false is returned, the state of
* outParameterTypes and outReturnType are undefined.
*
* This always assumes narrowing conversions are *allowed*. The calling code needs to verify
* that each argument can actually be coerced to the final parameter type, respecting the
* narrowing-conversions flag. This is handled in callCost(), or in convertCall() (via coerce).
*/
using ParamTypes = SkSTArray<8, const Type*>;
bool determineFinalTypes(const ExpressionArray& arguments,
ParamTypes* outParameterTypes,
const Type** outReturnType) const;
private:
mutable const FunctionDefinition* fDefinition;
const Modifiers* fModifiers;
std::vector<const Variable*> fParameters;
const Type* fReturnType;
bool fBuiltin;
bool fIsMain;
IntrinsicKind fIntrinsicKind = kNotIntrinsic;
friend class SkSL::dsl::DSLFunction;
using INHERITED = Symbol;
};
} // namespace SkSL
#endif
| 32.459259 | 100 | 0.640119 | [
"vector"
] |
9ec804e339d01c8b4039ce0aaa980fccca2c3ea2 | 1,570 | h | C | libgpopt/include/gpopt/xforms/CXformUpdate2DML.h | khannaekta/gporca | 94e509d0a2456851a2cabf02e933c3523946b87b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-03-05T10:08:56.000Z | 2019-03-05T10:08:56.000Z | libgpopt/include/gpopt/xforms/CXformUpdate2DML.h | khannaekta/gporca | 94e509d0a2456851a2cabf02e933c3523946b87b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | libgpopt/include/gpopt/xforms/CXformUpdate2DML.h | khannaekta/gporca | 94e509d0a2456851a2cabf02e933c3523946b87b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp
//
// @filename:
// CXformUpdate2DML.h
//
// @doc:
// Transform Logical Update to Logical DML
//---------------------------------------------------------------------------
#ifndef GPOPT_CXformUpdate2DML_H
#define GPOPT_CXformUpdate2DML_H
#include "gpos/base.h"
#include "gpopt/xforms/CXformExploration.h"
namespace gpopt
{
using namespace gpos;
//---------------------------------------------------------------------------
// @class:
// CXformUpdate2DML
//
// @doc:
// Transform Logical Update to Logical DML
//
//---------------------------------------------------------------------------
class CXformUpdate2DML : public CXformExploration
{
private:
// private copy ctor
CXformUpdate2DML(const CXformUpdate2DML &);
public:
// ctor
explicit
CXformUpdate2DML(IMemoryPool *pmp);
// dtor
virtual
~CXformUpdate2DML() {}
// ident accessors
virtual
EXformId Exfid() const
{
return ExfUpdate2DML;
}
// return a string for xform name
virtual
const CHAR *SzId() const
{
return "CXformUpdate2DML";
}
// compute xform promise for a given expression handle
virtual
EXformPromise Exfp(CExpressionHandle &exprhdl) const;
// actual transform
virtual
void Transform
(
CXformContext *pxfctxt,
CXformResult *pxfres,
CExpression *pexpr
)
const;
}; // class CXformUpdate2DML
}
#endif // !GPOPT_CXformUpdate2DML_H
// EOF
| 19.382716 | 78 | 0.543949 | [
"transform"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.