hexsha
stringlengths
40
40
size
int64
19
11.4M
ext
stringclasses
13 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
270
max_stars_repo_name
stringlengths
5
110
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
9
max_stars_count
float64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
270
max_issues_repo_name
stringlengths
5
116
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
9
max_issues_count
float64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
270
max_forks_repo_name
stringlengths
5
116
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
9
max_forks_count
float64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
19
11.4M
avg_line_length
float64
1.93
229k
max_line_length
int64
12
688k
alphanum_fraction
float64
0.07
0.99
matches
listlengths
1
10
a81433d50ba2f8d038ecf05bac2907dee88bfa12
11,724
cpp
C++
routing/features_road_graph.cpp
pastk/organicmaps
0b2b090090cfb535a4a8415b066e5cba8a7b1b7b
[ "Apache-2.0" ]
null
null
null
routing/features_road_graph.cpp
pastk/organicmaps
0b2b090090cfb535a4a8415b066e5cba8a7b1b7b
[ "Apache-2.0" ]
null
null
null
routing/features_road_graph.cpp
pastk/organicmaps
0b2b090090cfb535a4a8415b066e5cba8a7b1b7b
[ "Apache-2.0" ]
null
null
null
#include "routing/features_road_graph.hpp" #include "routing/routing_helpers.hpp" #include "routing/nearest_edge_finder.hpp" #include "routing/route.hpp" #include "routing_common/vehicle_model.hpp" #include "indexer/classificator.hpp" #include "indexer/data_source.hpp" #include "indexer/ftypes_matcher.hpp" #include "indexer/scales.hpp" #include "coding/point_coding.hpp" #include "geometry/distance_on_sphere.hpp" #include "base/logging.hpp" #include "base/macros.hpp" #include <limits> using namespace std; namespace routing { namespace { uint32_t constexpr kPowOfTwoForFeatureCacheSize = 10; // cache contains 2 ^ kPowOfTwoForFeatureCacheSize elements double constexpr kMwmRoadCrossingRadiusMeters = 2.0; auto constexpr kInvalidSpeedKMPH = numeric_limits<double>::max(); } // namespace double GetRoadCrossingRadiusMeters() { return kMwmRoadCrossingRadiusMeters; } FeaturesRoadGraph::Value::Value(DataSource const & dataSource, MwmSet::MwmHandle handle) : m_mwmHandle(move(handle)) { if (!m_mwmHandle.IsAlive()) return; m_altitudeLoader = make_unique<feature::AltitudeLoader>(dataSource, m_mwmHandle.GetId()); } FeaturesRoadGraph::CrossCountryVehicleModel::CrossCountryVehicleModel( shared_ptr<VehicleModelFactoryInterface> vehicleModelFactory) : m_vehicleModelFactory(vehicleModelFactory) , m_maxSpeed(m_vehicleModelFactory->GetVehicleModel()->GetMaxWeightSpeed()) , m_offroadSpeedKMpH(m_vehicleModelFactory->GetVehicleModel()->GetOffroadSpeed()) { } SpeedKMpH FeaturesRoadGraph::CrossCountryVehicleModel::GetSpeed( FeatureType & f, SpeedParams const & speedParams) const { return GetVehicleModel(f.GetID())->GetSpeed(f, speedParams); } std::optional<HighwayType> FeaturesRoadGraph::CrossCountryVehicleModel::GetHighwayType(FeatureType & f) const { return GetVehicleModel(f.GetID())->GetHighwayType(f); } SpeedKMpH const & FeaturesRoadGraph::CrossCountryVehicleModel::GetOffroadSpeed() const { return m_offroadSpeedKMpH; } bool FeaturesRoadGraph::CrossCountryVehicleModel::IsOneWay(FeatureType & f) const { return GetVehicleModel(f.GetID())->IsOneWay(f); } bool FeaturesRoadGraph::CrossCountryVehicleModel::IsRoad(FeatureType & f) const { return GetVehicleModel(f.GetID())->IsRoad(f); } bool FeaturesRoadGraph::CrossCountryVehicleModel::IsPassThroughAllowed(FeatureType & f) const { return GetVehicleModel(f.GetID())->IsPassThroughAllowed(f); } VehicleModelInterface * FeaturesRoadGraph::CrossCountryVehicleModel::GetVehicleModel( FeatureID const & featureId) const { auto itr = m_cache.find(featureId.m_mwmId); if (itr != m_cache.end()) return itr->second.get(); auto const vehicleModel = m_vehicleModelFactory->GetVehicleModelForCountry( featureId.m_mwmId.GetInfo()->GetCountryName()); ASSERT(vehicleModel, ()); ASSERT_EQUAL(m_maxSpeed, vehicleModel->GetMaxWeightSpeed(), ()); itr = m_cache.emplace(featureId.m_mwmId, move(vehicleModel)).first; return itr->second.get(); } void FeaturesRoadGraph::CrossCountryVehicleModel::Clear() { m_cache.clear(); } IRoadGraph::RoadInfo & FeaturesRoadGraph::RoadInfoCache::Find(FeatureID const & featureId, bool & found) { std::lock_guard lock(m_mutexCache); auto res = m_cache.emplace(featureId.m_mwmId, TMwmFeatureCache()); if (res.second) res.first->second.Init(kPowOfTwoForFeatureCacheSize); return res.first->second.Find(featureId.m_index, found); } void FeaturesRoadGraph::RoadInfoCache::Clear() { std::lock_guard lock(m_mutexCache); m_cache.clear(); } FeaturesRoadGraph::FeaturesRoadGraph(DataSource const & dataSource, IRoadGraph::Mode mode, shared_ptr<VehicleModelFactoryInterface> vehicleModelFactory) : m_dataSource(dataSource), m_mode(mode), m_vehicleModel(vehicleModelFactory) { } int FeaturesRoadGraph::GetStreetReadScale() { return scales::GetUpperScale(); } class CrossFeaturesLoader { public: CrossFeaturesLoader(FeaturesRoadGraph const & graph, IRoadGraph::ICrossEdgesLoader & edgesLoader) : m_graph(graph), m_edgesLoader(edgesLoader) {} void operator()(FeatureType & ft) { if (!m_graph.IsRoad(ft)) return; FeatureID const & featureId = ft.GetID(); IRoadGraph::RoadInfo const & roadInfo = m_graph.GetCachedRoadInfo(featureId, ft, kInvalidSpeedKMPH); CHECK_EQUAL(roadInfo.m_speedKMPH, kInvalidSpeedKMPH, ()); m_edgesLoader(featureId, roadInfo.m_junctions, roadInfo.m_bidirectional); } private: FeaturesRoadGraph const & m_graph; IRoadGraph::ICrossEdgesLoader & m_edgesLoader; }; IRoadGraph::RoadInfo FeaturesRoadGraph::GetRoadInfo(FeatureID const & featureId, SpeedParams const & speedParams) const { RoadInfo const & ri = GetCachedRoadInfo(featureId, speedParams); ASSERT_GREATER(ri.m_speedKMPH, 0.0, ()); return ri; } double FeaturesRoadGraph::GetSpeedKMpH(FeatureID const & featureId, SpeedParams const & speedParams) const { double const speedKMPH = GetCachedRoadInfo(featureId, speedParams).m_speedKMPH; ASSERT_GREATER(speedKMPH, 0.0, ()); return speedKMPH; } double FeaturesRoadGraph::GetMaxSpeedKMpH() const { return m_vehicleModel.GetMaxWeightSpeed(); } void FeaturesRoadGraph::ForEachFeatureClosestToCross(m2::PointD const & cross, ICrossEdgesLoader & edgesLoader) const { CrossFeaturesLoader featuresLoader(*this, edgesLoader); m2::RectD const rect = mercator::RectByCenterXYAndSizeInMeters(cross, kMwmRoadCrossingRadiusMeters); m_dataSource.ForEachInRect(featuresLoader, rect, GetStreetReadScale()); } void FeaturesRoadGraph::FindClosestEdges( m2::RectD const & rect, uint32_t count, vector<pair<Edge, geometry::PointWithAltitude>> & vicinities) const { NearestEdgeFinder finder(rect.Center(), nullptr /* IsEdgeProjGood */); auto const f = [&finder, this](FeatureType & ft) { if (!m_vehicleModel.IsRoad(ft)) return; FeatureID const & featureId = ft.GetID(); IRoadGraph::RoadInfo const & roadInfo = GetCachedRoadInfo(featureId, ft, kInvalidSpeedKMPH); finder.AddInformationSource(IRoadGraph::FullRoadInfo(featureId, roadInfo)); }; m_dataSource.ForEachInRect(f, rect, GetStreetReadScale()); finder.MakeResult(vicinities, count); } vector<IRoadGraph::FullRoadInfo> FeaturesRoadGraph::FindRoads(m2::RectD const & rect, IsGoodFeatureFn const & isGoodFeature) const { vector<IRoadGraph::FullRoadInfo> roads; auto const f = [&roads, &isGoodFeature, &rect, this](FeatureType & ft) { if (!m_vehicleModel.IsRoad(ft)) return; FeatureID const & featureId = ft.GetID(); if (isGoodFeature && !isGoodFeature(featureId)) return; // DataSource::ForEachInRect() gives not only features inside |rect| but some other features // which lie close to the rect. Removes all the features which don't cross |rect|. auto const & roadInfo = GetCachedRoadInfo(featureId, ft, kInvalidSpeedKMPH); if (!RectCoversPolyline(roadInfo.m_junctions, rect)) return; roads.emplace_back(featureId, roadInfo); }; m_dataSource.ForEachInRect(f, rect, GetStreetReadScale()); return roads; } void FeaturesRoadGraph::GetFeatureTypes(FeatureID const & featureId, feature::TypesHolder & types) const { FeaturesLoaderGuard loader(m_dataSource, featureId.m_mwmId); auto ft = loader.GetFeatureByIndex(featureId.m_index); if (!ft) return; ASSERT_EQUAL(ft->GetGeomType(), feature::GeomType::Line, ()); types = feature::TypesHolder(*ft); } void FeaturesRoadGraph::GetJunctionTypes(geometry::PointWithAltitude const & junction, feature::TypesHolder & types) const { types = feature::TypesHolder(); m2::PointD const & cross = junction.GetPoint(); auto const f = [&types, &cross](FeatureType & ft) { if (!types.Empty()) return; if (ft.GetGeomType() != feature::GeomType::Point) return; if (!base::AlmostEqualAbs(ft.GetCenter(), cross, kMwmPointAccuracy)) return; feature::TypesHolder typesHolder(ft); if (!typesHolder.Empty()) types = typesHolder; }; m2::RectD const rect = mercator::RectByCenterXYAndSizeInMeters(cross, kMwmRoadCrossingRadiusMeters); m_dataSource.ForEachInRect(f, rect, GetStreetReadScale()); } IRoadGraph::Mode FeaturesRoadGraph::GetMode() const { return m_mode; }; void FeaturesRoadGraph::ClearState() { m_cache.Clear(); m_vehicleModel.Clear(); m_mwmLocks.clear(); } bool FeaturesRoadGraph::IsRoad(FeatureType & ft) const { return m_vehicleModel.IsRoad(ft); } IRoadGraph::PointWithAltitudeVec FeaturesRoadGraph::GetRoadGeom(FeatureType & ft) const { FeatureID const & featureId = ft.GetID(); IRoadGraph::RoadInfo const & roadInfo = GetCachedRoadInfo(featureId, ft, kInvalidSpeedKMPH); CHECK_EQUAL(roadInfo.m_speedKMPH, kInvalidSpeedKMPH, ()); return roadInfo.m_junctions; } bool FeaturesRoadGraph::IsOneWay(FeatureType & ft) const { return m_vehicleModel.IsOneWay(ft); } double FeaturesRoadGraph::GetSpeedKMpHFromFt(FeatureType & ft, SpeedParams const & speedParams) const { return m_vehicleModel.GetSpeed(ft, speedParams).m_weight; } void FeaturesRoadGraph::ExtractRoadInfo(FeatureID const & featureId, FeatureType & ft, double speedKMpH, RoadInfo & ri) const { ri.m_speedKMPH = speedKMpH; Value const & value = LockMwm(featureId.m_mwmId); if (!value.IsAlive()) return; ri.m_bidirectional = !IsOneWay(ft); ft.ParseGeometry(FeatureType::BEST_GEOMETRY); size_t const pointsCount = ft.GetPointsCount(); geometry::Altitudes altitudes; if (value.m_altitudeLoader) { altitudes = value.m_altitudeLoader->GetAltitudes(featureId.m_index, ft.GetPointsCount()); } else { ASSERT(false, ()); altitudes = geometry::Altitudes(ft.GetPointsCount(), geometry::kDefaultAltitudeMeters); } CHECK_EQUAL(altitudes.size(), pointsCount, ("altitudeLoader->GetAltitudes(", featureId.m_index, "...) returns wrong altitudes:", altitudes)); ri.m_junctions.resize(pointsCount); for (size_t i = 0; i < pointsCount; ++i) ri.m_junctions[i] = geometry::PointWithAltitude(ft.GetPoint(i), altitudes[i]); } IRoadGraph::RoadInfo const & FeaturesRoadGraph::GetCachedRoadInfo(FeatureID const & featureId, SpeedParams const & speedParams) const { bool found = false; RoadInfo & ri = m_cache.Find(featureId, found); if (found) return ri; FeaturesLoaderGuard loader(m_dataSource, featureId.m_mwmId); auto ft = loader.GetFeatureByIndex(featureId.m_index); if (!ft) return ri; ASSERT_EQUAL(ft->GetGeomType(), feature::GeomType::Line, ()); ExtractRoadInfo(featureId, *ft, GetSpeedKMpHFromFt(*ft, speedParams), ri); return ri; } IRoadGraph::RoadInfo const & FeaturesRoadGraph::GetCachedRoadInfo(FeatureID const & featureId, FeatureType & ft, double speedKMPH) const { bool found = false; RoadInfo & ri = m_cache.Find(featureId, found); if (found) return ri; // ft must be set ASSERT_EQUAL(featureId, ft.GetID(), ()); ExtractRoadInfo(featureId, ft, speedKMPH, ri); return ri; } FeaturesRoadGraph::Value const & FeaturesRoadGraph::LockMwm(MwmSet::MwmId const & mwmId) const { ASSERT(mwmId.IsAlive(), ()); auto const itr = m_mwmLocks.find(mwmId); if (itr != m_mwmLocks.end()) return itr->second; return m_mwmLocks.emplace(move(mwmId), Value(m_dataSource, m_dataSource.GetMwmHandleById(mwmId))) .first->second; } } // namespace routing
30.852632
113
0.72262
[ "geometry", "vector" ]
a81da0e911a20932d55d3846643d6136c23f6685
1,034
hpp
C++
src/hittable_list.hpp
TonyZYT2000/MyRayTracer
96996feeac065fc5c8a44a06f617ce3a8c3d584e
[ "MIT" ]
1
2020-09-22T20:57:52.000Z
2020-09-22T20:57:52.000Z
src/hittable_list.hpp
TonyZYT2000/MyRayTracer
96996feeac065fc5c8a44a06f617ce3a8c3d584e
[ "MIT" ]
null
null
null
src/hittable_list.hpp
TonyZYT2000/MyRayTracer
96996feeac065fc5c8a44a06f617ce3a8c3d584e
[ "MIT" ]
null
null
null
#ifndef HITTABLE_LIST_HPP #define HITTABLE_LIST_HPP #include <memory> #include <vector> #include "hittable.hpp" using std::make_shared; using std::shared_ptr; class hittable_list : public hittable { private: std::vector<shared_ptr<hittable>> objects; public: hittable_list() {} hittable_list(shared_ptr<hittable> object) { add(object); } void clear() { objects.clear(); } void add(shared_ptr<hittable> object) { objects.push_back(object); } virtual bool hit(const ray& r, double t_min, double t_max, hit_record& record) const override; }; bool hittable_list::hit(const ray& r, double t_min, double t_max, hit_record& record) const { hit_record temp_rec; bool hit_flag = false; double closest = t_max; for (const auto& object : objects) { if (object->hit(r, t_min, closest, temp_rec)) { hit_flag = true; closest = temp_rec.t; record = temp_rec; } } return hit_flag; } #endif
22.977778
72
0.634429
[ "object", "vector" ]
a81ebf6ed94e1ea49ab747d3b8bdf36191672dd6
500
hpp
C++
framework/shape.hpp
balu94/programmiersprachen-raytracer
3720c7d24f42b3b09412fd1ff365cd0288932455
[ "MIT" ]
null
null
null
framework/shape.hpp
balu94/programmiersprachen-raytracer
3720c7d24f42b3b09412fd1ff365cd0288932455
[ "MIT" ]
null
null
null
framework/shape.hpp
balu94/programmiersprachen-raytracer
3720c7d24f42b3b09412fd1ff365cd0288932455
[ "MIT" ]
null
null
null
#ifndef BUW_SHAPE_HPP #define BUW_SHAPE_HPP #include <string> #include "color.hpp" class Shape { public: Shape(); Shape(std::string const& name); virtual ~Shape(); virtual double area() const = 0; virtual double volume() const = 0; std::string const& name() const; Color const& color() const; virtual std::ostream& print(std::ostream& os) const; private: std::string name_; Color color_; }; std::ostream& operator <<(std::ostream& os, Shape const& s); #endif
19.230769
60
0.66
[ "shape" ]
a821e9694e97fbb3205a42aebcab9143e3da94fa
1,916
cpp
C++
BashuOJ-Code/3540.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
BashuOJ-Code/3540.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
BashuOJ-Code/3540.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
#include<iostream> #include<cstdio> #include<cmath> #include<iomanip> #include<algorithm> #include<cstring> #include<cstdlib> #include<queue> #include<vector> #include<stack> #define ri register int #define ll long long using namespace std; const int MAXN=40005; const int INF=0x7fffffff/2; int n,m,len,part; int num[MAXN],val[MAXN],block[MAXN],Cnt[MAXN],f[205][205]; vector<int>a[MAXN]; inline int GetInt() { int num=0,bj=1; char c=getchar(); while(!isdigit(c))bj=(bj==-1||c=='-')?-1:1,c=getchar(); while(isdigit(c))num=num*10+c-'0',c=getchar(); return num*bj; } inline void Init(int x) { memset(Cnt,0,sizeof(Cnt)); int cnt=0,ans=0; for(ri i=(x-1)*part+1;i<=n;i++) { int belong=block[i]; Cnt[val[i]]++; if(Cnt[val[i]]>cnt||(cnt==Cnt[val[i]]&&ans>val[i])) cnt=Cnt[val[i]],ans=val[i]; f[x][belong]=ans; } } inline int Binary(int l,int r,int v) { return upper_bound(a[v].begin(),a[v].end(),r)-lower_bound(a[v].begin(),a[v].end(),l); } inline int Query(int l,int r) { int ans=f[block[l]+1][block[r]-1]; int cnt=Binary(l,r,ans); for(ri i=l;i<=min(block[l]*part,r);i++) { int Count=Binary(l,r,val[i]); if(Count>cnt||(cnt==Count&&ans>val[i])) cnt=Count,ans=val[i]; } if(block[l]==block[r])return num[ans]; for(ri i=(block[r]-1)*part+1;i<=r;i++) { int Count=Binary(l,r,val[i]); if(Count>cnt||(cnt==Count&&ans>val[i])) cnt=Count,ans=val[i]; } return num[ans]; } int main() { n=GetInt(),m=GetInt(),part=sqrt(n); for(ri i=1;i<=n;i++) val[i]=num[i]=GetInt(); sort(num+1,num+n+1); len=unique(num+1,num+n+1)-(num+1); for(ri i=1;i<=n;i++) { int x=lower_bound(num+1,num+len+1,val[i])-num; val[i]=x; a[x].push_back(i); } for(ri i=1;i<=n;i++)block[i]=(i-1)/part+1; for(ri i=1;i<=block[n];i++)Init(i); int x=0; for(ri i=1;i<=m;i++) { int u=GetInt(),v=GetInt(); int l=(u+x-1)%n+1,r=(v+x-1)%n+1; if(l>r)swap(l,r); printf("%d\n",x=Query(l,r)); } return 0; }
21.772727
86
0.604384
[ "vector" ]
a826783dfb4f78700a197630b05f38b8cc61d51f
599
cpp
C++
STL/Adapter/bind2nd_3.cpp
liangjisheng/C-Cpp
8b33ba1f43580a7bdded8bb4ce3d92983ccedb81
[ "MIT" ]
5
2019-09-17T09:12:15.000Z
2021-05-29T10:54:39.000Z
STL/Adapter/bind2nd_3.cpp
liangjisheng/C-Cpp
8b33ba1f43580a7bdded8bb4ce3d92983ccedb81
[ "MIT" ]
null
null
null
STL/Adapter/bind2nd_3.cpp
liangjisheng/C-Cpp
8b33ba1f43580a7bdded8bb4ce3d92983ccedb81
[ "MIT" ]
2
2021-07-26T06:36:12.000Z
2022-01-23T15:20:30.000Z
#include"iostream" #include"tchar.h" #include"vector" #include"algorithm" using namespace std; void print(int n) { cout<<n<<' '; } int _tmain(int argc,_TCHAR* argv[]) { vector<int> vec; vec.push_back(6); vec.push_back(9); vec.push_back(78); vec.push_back(60); vec.push_back(63); vec.push_back(45); vec.push_back(234); vec.push_back(1); cout<<"Data:"<<endl; for_each(vec.begin(),vec.end(),print); cout<<endl; // 找出大于60的数据个数 int nCount=count_if(vec.begin(),vec.end(), bind2nd(greater<int>(),60)); cout<<"data greater 60 nCount=" <<nCount<<endl; system("pause"); return 0; }
18.71875
48
0.66611
[ "vector" ]
a829374e58c50cac402e51fa461113a26ed080d4
5,240
cc
C++
cephalon/src/world/world.cc
alanjian85/cephalon
22db3fd91ea1d626fdb63a28cfa1c74e65c94484
[ "MIT" ]
2
2022-03-12T12:33:18.000Z
2022-03-17T08:07:31.000Z
cephalon/src/world/world.cc
alanjian85/cephalon
22db3fd91ea1d626fdb63a28cfa1c74e65c94484
[ "MIT" ]
null
null
null
cephalon/src/world/world.cc
alanjian85/cephalon
22db3fd91ea1d626fdb63a28cfa1c74e65c94484
[ "MIT" ]
null
null
null
#include "world.h" using namespace cephalon; #include <thread> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/type_ptr.hpp> #include "terrains/terrain.h" World::World(const char* save_path) : load_thread_pool_(4), rebuild_thread_pool_(4), database_(save_path) { terrain_ = nullptr; } World::~World() { load_thread_pool_.join(); rebuild_thread_pool_.join(); } void World::setTerrain(const std::string& name) { terrain_ = Terrain::getTerrain(name); } void World::setSeed(unsigned seed) { terrain_->setSeed(seed); } void World::setBlock(glm::ivec3 pos, const Block& block) { database_.insertBlock(pos, block.getName().c_str()); auto region = getRegion(pos); std::shared_lock lock(mutex_); auto it = chunks_.find(region); if (it != chunks_.cend()) { auto flags = it->second->setBlock(getOffset(pos), block); if ((flags & NeighborChunk::kLeft) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2(-1, 0), true); if ((flags & NeighborChunk::kRight) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2( 1, 0), true); if ((flags & NeighborChunk::kDown) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2( 0, -1), true); if ((flags & NeighborChunk::kUp) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2( 0, 1), true); if ((flags & NeighborChunk::kLowerLeft) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2(-1, -1), true); if ((flags & NeighborChunk::kUpperLeft) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2(-1, 1), true); if ((flags & NeighborChunk::kLowerRight) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2( 1, -1), true); if ((flags & NeighborChunk::kUpperRight) != NeighborChunk::kNone) setChunkDirty(region + glm::ivec2( 1, 1), true); } } const Block* World::getBlock(glm::ivec3 pos) const { std::shared_lock lock(mutex_); auto it = chunks_.find(getRegion(pos)); if (it != chunks_.cend()) return &it->second->getBlock(getOffset(pos)); return nullptr; } void World::update(glm::vec3 player_pos) { auto player_region = getRegion(glm::ivec3(player_pos)); std::lock_guard lock(mutex_); for (auto i = chunks_.begin(); i != chunks_.end();) { auto& [region, chunk] = *i; if (glm::distance(glm::vec2(region), glm::vec2(player_region)) > Config::viewDistance + 1) { chunks_.erase(i++); } else { ++i; } } int load_count = 0; for (auto x = player_region.x - Config::viewDistance - 1; x <= player_region.x + Config::viewDistance + 1; ++x) { for (auto y = player_region.y - Config::viewDistance - 1; y <= player_region.y + Config::viewDistance + 1; ++y) { glm::ivec2 region(x, y); if (load_count < Config::chunkLoadLimit && glm::distance(glm::vec2(region), glm::vec2(player_region)) <= Config::viewDistance + 1) { auto [it, created] = chunks_.emplace(region, std::make_shared<Chunk>(*this, region)); if (created) { auto chunk = it->second; boost::asio::post(load_thread_pool_, [this, chunk = std::move(chunk)]() { terrain_->genChunk(*chunk); database_.loadChunk(*chunk); for (int x = -1; x <= 1; ++x) { for (int y = -1; y <= 1; ++y) { setChunkDirty(chunk->getRegion() + glm::ivec2(x, y), true); } } }); ++load_count; } } } } } void World::render(PerspectiveCamera cam) { std::shared_lock lock(mutex_); for (auto& [region, chunk] : chunks_) { if (chunk->inbound(cam)) { if (chunk->isDirty()) { auto diff = getRegion(cam.pos) - region; if (diff.x >= -1 && diff.x <= 1 && diff.y >= -1 && diff.y <= 1) { chunk->rebuild(); } else { auto new_chunk = chunk; boost::asio::post(rebuild_thread_pool_, [chunk = std::move(new_chunk)]() { chunk->rebuild(); }); } chunk->setDirty(false); } chunk->render(cam); } } } bool World::intersect(PerspectiveCamera cam, Direction& dir, glm::ivec3& pos) const { std::shared_lock lock(mutex_); bool intersected = false; auto dmax = static_cast<float>(Config::destroyDistance); for (auto& [region, chunk] : chunks_) { glm::ivec3 offset; if (chunk->inbound(cam) && chunk->intersect(Ray(cam.pos, cam.dir), cam.near, dmax, dir, offset, dmax)) { intersected = true; pos = getPosition(chunk->getRegion(), offset); } } return intersected; } void World::setChunkDirty(glm::ivec2 region, bool dirty) { auto it = chunks_.find(region); if (it != chunks_.cend()) it->second->setDirty(dirty); }
36.388889
144
0.550573
[ "render" ]
a82a4d7456932352e603c59f246f5f5d2be0e05d
22,627
cpp
C++
LighthouseTracking.cpp
kimsama/openvr_vive-trcker_exp
06269117a5e5e09b24155bad12b881836ece42d7
[ "MIT" ]
1
2020-07-01T07:37:29.000Z
2020-07-01T07:37:29.000Z
LighthouseTracking.cpp
kimsama/openvr_vive-trcker_exp
06269117a5e5e09b24155bad12b881836ece42d7
[ "MIT" ]
1
2020-06-30T09:47:34.000Z
2020-06-30T09:47:34.000Z
LighthouseTracking.cpp
kimsama/openvr_vive-trcker_exp
06269117a5e5e09b24155bad12b881836ece42d7
[ "MIT" ]
null
null
null
// The main file for dealing with VR specifically. See LighthouseTracking.h for descriptions of each function in the class. #define _CRT_SECURE_NO_WARNINGS #include "LighthouseTracking.h" // Destructor for the LighthouseTracking object LighthouseTracking::~LighthouseTracking() { if (vr_pointer != NULL) { // VR Shutdown: https://github.com/ValveSoftware/openvr/wiki/API-Documentation#initialization-and-cleanup VR_Shutdown(); vr_pointer = NULL; } } // Constructor for the LighthouseTracking object LighthouseTracking::LighthouseTracking(InitFlags f) { flags = f; coordsBuf = new char[1024]; trackBuf = new char[1024]; rotBuf = new char[1024]; trackers = new TrackerData[16]; // Definition of the init error EVRInitError eError = VRInitError_None; /* VR_Init ( arg1: Pointer to EVRInitError type (enum defined in openvr.h) arg2: Must be of type EVRApplicationType The type of VR Applicaion. This example uses the SteamVR instance that is already running. Because of this, the init function will fail if SteamVR is not already running. Other EVRApplicationTypes include: * VRApplication_Scene - "A 3D application that will be drawing an environment."" * VRApplication_Overlay - "An application that only interacts with overlays or the dashboard."" * VRApplication_Utility */ vr_pointer = VR_Init(&eError, VRApplication_Background); // If the init failed because of an error if (eError != VRInitError_None) { vr_pointer = NULL; printf("Unable to init VR runtime: %s \n", VR_GetVRInitErrorAsEnglishDescription(eError)); exit(EXIT_FAILURE); } //If the init didn't fail, init the Cylinder object array cylinders = new Cylinder*[MAX_CYLINDERS]; for(int i = 0 ; i < MAX_CYLINDERS; i++) { cylinders[i] = new Cylinder(); } } bool LighthouseTracking::RunProcedure() { // Define a VREvent VREvent_t event; if(vr_pointer->PollNextEvent(&event, sizeof(event))) { /* ProcessVREvent is a function defined in this module. It returns false if the function determines the type of error to be fatal or signal some kind of quit. */ if (!ProcessVREvent(event)) { // If ProcessVREvent determined that OpenVR quit, print quit message printf("\nEVENT--(OpenVR) service quit"); return false; } } // ParseTrackingFrame() is where the tracking and vibration code starts ParseTrackingFrame(); return true; } bool LighthouseTracking::ProcessVREvent(const VREvent_t & event) { char* buf = new char[100]; bool ret = true; switch (event.eventType) { case VREvent_TrackedDeviceActivated: sprintf(buf, "\nEVENT--(OpenVR) Device : %d attached", event.trackedDeviceIndex); break; case VREvent_TrackedDeviceDeactivated: sprintf(buf, "\nEVENT--(OpenVR) Device : %d detached", event.trackedDeviceIndex); break; case VREvent_TrackedDeviceUpdated: sprintf(buf, "\nEVENT--(OpenVR) Device : %d updated", event.trackedDeviceIndex); break; case VREvent_DashboardActivated: sprintf(buf, "\nEVENT--(OpenVR) Dashboard activated"); break; case VREvent_DashboardDeactivated: sprintf(buf, "\nEVENT--(OpenVR) Dashboard deactivated"); break; case VREvent_ChaperoneDataHasChanged: sprintf(buf, "\nEVENT--(OpenVR) Chaperone data has changed"); break; case VREvent_ChaperoneSettingsHaveChanged: sprintf(buf, "\nEVENT--(OpenVR) Chaperone settings have changed"); break; case VREvent_ChaperoneUniverseHasChanged: sprintf(buf, "\nEVENT--(OpenVR) Chaperone universe has changed"); break; //case VREvent_ApplicationTransitionStarted: // sprintf(buf, "\nEVENT--(OpenVR) Application Transition: Transition has started"); //break; //case VREvent_ApplicationTransitionNewAppStarted: // sprintf(buf, "\nEVENT--(OpenVR) Application transition: New app has started"); //break; case VREvent_Quit: { sprintf(buf, "\nEVENT--(OpenVR) Received SteamVR Quit (%d%s", VREvent_Quit, ")"); ret = false; } break; case VREvent_ProcessQuit: { sprintf(buf, "\nEVENT--(OpenVR) SteamVR Quit Process (%d%s", VREvent_ProcessQuit, ")"); ret = false; } break; //case VREvent_QuitAborted_UserPrompt: //{ // sprintf(buf, "\nEVENT--(OpenVR) SteamVR Quit Aborted UserPrompt (%d%s", VREvent_QuitAborted_UserPrompt, ")"); // ret = false; //} //break; case VREvent_QuitAcknowledged: { sprintf(buf, "\nEVENT--(OpenVR) SteamVR Quit Acknowledged (%d%s", VREvent_QuitAcknowledged, ")"); ret = false; } break; case VREvent_TrackedDeviceRoleChanged: sprintf(buf, "\nEVENT--(OpenVR) TrackedDeviceRoleChanged: %d", event.trackedDeviceIndex); break; case VREvent_TrackedDeviceUserInteractionStarted: sprintf(buf, "\nEVENT--(OpenVR) TrackedDeviceUserInteractionStarted: %d", event.trackedDeviceIndex); break; default: if (event.eventType >= 200 && event.eventType <= 203) //Button events range from 200-203 dealWithButtonEvent(event); else sprintf(buf, "\nEVENT--(OpenVR) Event: %d", event.eventType); // Check entire event list starts on line #452: https://github.com/ValveSoftware/openvr/blob/master/headers/openvr.h } if(flags.printEvents) printf("%s",buf); return ret; } //This method deals exclusively with button events void LighthouseTracking::dealWithButtonEvent(VREvent_t event) { int controllerIndex; //The index of the controllers[] array that corresponds with the controller that had a buttonEvent for (int i = 0; i < 2; i++) //Iterates across the array of controllers { ControllerData* pController = &(controllers[i]); if(flags.printBEvents && event.trackedDeviceIndex == pController->deviceId) //prints the event data to the terminal printf("\nBUTTON-E--index=%d deviceId=%d hand=%d button=%d event=%d",i,pController->deviceId,pController->hand,event.data.controller.button,event.eventType); if(pController->deviceId == event.trackedDeviceIndex) //This tests to see if the current controller from the loop is the same from the event controllerIndex = i; } ControllerData* pC = &(controllers[controllerIndex]); //The pointer to the ControllerData struct if (event.data.controller.button == k_EButton_ApplicationMenu //Test if the ApplicationButton was pressed && event.eventType == VREvent_ButtonUnpress) //Test if the button is being released (the action happens on release, not press) { inDrawingMode = !inDrawingMode; doRumbleNow = true; } if(inDrawingMode) switch( event.data.controller.button ) { case k_EButton_Grip: //If it is the grip button that was... switch(event.eventType) { case VREvent_ButtonPress: // ...pressed... if(cpMillis() - gripMillis > 500) // ...and it's been half a second since the grip was last released... cylinders[cylinderIndex]->s1[1] = pC->pos.v[1]; //...then set the cylinder's y 1 to the controllers y coordinate. break; case VREvent_ButtonUnpress: // ...released... if(cpMillis() - gripMillis > 500) // ...and it's been half a second since the grip was last released... cylinders[cylinderIndex]->s2[1] = pC->pos.v[1]; //...then set the cylinder's y 2 to the controllers y coordinate. else // ...and it' hasn't been half a second since the grip was last released... { if(cylinders[cylinderIndex]->s1[1] > pC->pos.v[1]) // ...if the controller's position is **below** the starting position... cylinders[cylinderIndex]->s2[1] = -std::numeric_limits<float>::max(); // ...set the cylinder's y 2 to negative infinity. else // ...if the controller's position is **above** the starting position... cylinders[cylinderIndex]->s2[1] = std::numeric_limits<float>::max(); // ...set the cylinder's y 2 to positive infinity. } cylinders[cylinderIndex]->init(); gripMillis = cpMillis(); break; } break; case k_EButton_SteamVR_Trigger: switch(event.eventType) { case VREvent_ButtonPress: //If the trigger was pressed... cylinders[cylinderIndex]->s1[0] = pC->pos.v[0]; //Set the cylinder's x 1 to the controller's x cylinders[cylinderIndex]->s1[2] = pC->pos.v[2]; //Set the cylinder's z 1 to the controller's z break; case VREvent_ButtonUnpress://If the trigger was released... cylinders[cylinderIndex]->s2[0] = pC->pos.v[0]; //Set the cylinder's x 2 to the controller's x cylinders[cylinderIndex]->s2[2] = pC->pos.v[2]; //Set the cylinder's z 2 to the controller's z cylinders[cylinderIndex]->init(); break; } break; case k_EButton_SteamVR_Touchpad: switch(event.eventType) { case VREvent_ButtonPress: break; case VREvent_ButtonUnpress://If the touchpad was just pressed if(std::abs(pC->padX) > std::abs(pC->padY)) //Tests if the left or right of the pad was pressed { if (pC->padX < 0 && cylinderIndex != 0) //If left side of pad was pressed and there is a previous cylinder cylinderIndex = cylinderIndex-1; //Switch index to previous cylinder else if (pC->padX > 0 && cylinderIndex < MAX_CYLINDERS) //If the right side of the pad was pressed cylinderIndex = cylinderIndex+1; //Switch the index to the next cylinder doRumbleNow = true; } else //If the top/bottom of the pad was pressed { if (pC->padY > 0) //If the top was pressed doRumbleNow = true; else if (pC->padY < 0) //If the bottom was pressed, reset the current cylinder cylinders[cylinderIndex] = new Cylinder(); } break; } break; } } HmdVector3_t LighthouseTracking::GetPosition(HmdMatrix34_t matrix) { HmdVector3_t vector; vector.v[0] = matrix.m[0][3]; vector.v[1] = matrix.m[1][3]; vector.v[2] = matrix.m[2][3]; return vector; } long lastPRCall = 0; HmdQuaternion_t LighthouseTracking::GetRotation(HmdMatrix34_t matrix) { HmdQuaternion_t q; q.w = sqrt(fmax(0, 1 + matrix.m[0][0] + matrix.m[1][1] + matrix.m[2][2])) / 2; q.x = sqrt(fmax(0, 1 + matrix.m[0][0] - matrix.m[1][1] - matrix.m[2][2])) / 2; q.y = sqrt(fmax(0, 1 - matrix.m[0][0] + matrix.m[1][1] - matrix.m[2][2])) / 2; q.z = sqrt(fmax(0, 1 - matrix.m[0][0] - matrix.m[1][1] + matrix.m[2][2])) / 2; q.x = copysign(q.x, matrix.m[2][1] - matrix.m[1][2]); q.y = copysign(q.y, matrix.m[0][2] - matrix.m[2][0]); q.z = copysign(q.z, matrix.m[1][0] - matrix.m[0][1]); return q; } HmdQuaternion_t LighthouseTracking::ProcessRotation(HmdQuaternion_t quat) { HmdQuaternion_t out; out.w = 2 * acos(quat.w); out.x = quat.x / sin(out.w/2); out.y = quat.y / sin(out.w/2); out.z = quat.z / sin(out.w/2); printf("\nPROCESSED w:%.3f x:%.3f y:%.3f z:%.3f",out.w,out.x,out.y,out.z); return out; } void LighthouseTracking::iterateAssignIds() { //Un-assigns the deviceIds and hands of controllers. If they are truely connected, will be re-assigned later in this function controllers[0].deviceId = -1; controllers[1].deviceId = -1; controllers[0].hand = -1; controllers[1].hand = -1; int numTrackersInitialized = 0; int numControllersInitialized = 0; for (unsigned int i = 0; i < k_unMaxTrackedDeviceCount; i++) // Iterates across all of the potential device indicies { if (!vr_pointer->IsTrackedDeviceConnected(i)) continue; //Doesn't use the id if the device isn't connected //vr_pointer points to the VRSystem that was in init'ed in the constructor. ETrackedDeviceClass trackedDeviceClass = vr_pointer->GetTrackedDeviceClass(i); //Finding the type of device if (trackedDeviceClass == ETrackedDeviceClass::TrackedDeviceClass_HMD) { hmdDeviceId = i; if(flags.printSetIds) printf("\nSETID--Assigned hmdDeviceId=%d",hmdDeviceId); } else if (trackedDeviceClass == ETrackedDeviceClass::TrackedDeviceClass_Controller && numControllersInitialized < 2) { ControllerData* pC = &(controllers[numControllersInitialized]); int sHand = -1; ETrackedControllerRole role = vr_pointer->GetControllerRoleForTrackedDeviceIndex(i); if (role == TrackedControllerRole_Invalid) //Invalid hand is actually very common, always need to test for invalid hand (lighthouses have lost tracking) sHand = 0; else if (role == TrackedControllerRole_LeftHand) sHand = 1; else if (role == TrackedControllerRole_RightHand) sHand = 2; pC->hand = sHand; pC->deviceId = i; //Used to get/store property ids for the xy of the pad and the analog reading of the trigger for(int x=0; x<k_unControllerStateAxisCount; x++ ) { int prop = vr_pointer->GetInt32TrackedDeviceProperty(pC->deviceId, (ETrackedDeviceProperty)(Prop_Axis0Type_Int32 + x)); if( prop==k_eControllerAxis_Trigger ) pC->idtrigger = x; else if( prop==k_eControllerAxis_TrackPad ) pC->idpad = x; } if(flags.printSetIds) printf("\nSETID--Assigned controllers[%d] .hand=%d .deviceId=%d .idtrigger=%d .idpad=%d",numControllersInitialized,sHand, i , pC->idtrigger, pC->idpad); numControllersInitialized++; //Increment this count so that the other controller gets initialized after initializing this one } else if(trackedDeviceClass == ETrackedDeviceClass::TrackedDeviceClass_GenericTracker) { TrackerData* pT = &(trackers[numTrackersInitialized]); pT->deviceId = i; if(flags.printSetIds) printf("\nSETID--Assigned tracker[%d] .deviceId=%d",numTrackersInitialized,pT->deviceId); numTrackersInitialized++; } } } void LighthouseTracking::setHands() { for (int z =0; z < 2; z++) { ControllerData* pC = &(controllers[z]); if (pC->deviceId < 0 || !vr_pointer->IsTrackedDeviceConnected(pC->deviceId)) continue; int sHand = -1; //Invalid hand is actually very common, always need to test for invalid hand (lighthouses have lost tracking) ETrackedControllerRole role = vr_pointer->GetControllerRoleForTrackedDeviceIndex(pC->deviceId); if (role == TrackedControllerRole_Invalid) sHand = 0; else if (role == TrackedControllerRole_LeftHand) sHand = 1; else if (role == TrackedControllerRole_RightHand) sHand = 2; pC->hand = sHand; } } void LighthouseTracking::ParseTrackingFrame() { //Runs the iterateAssignIds() method if... if(hmdDeviceId < 0 || // HMD id not yet initialized controllers[0].deviceId < 0 || // One of the controllers not yet initialized controllers[1].deviceId < 0 || controllers[0].deviceId == controllers[1].deviceId || //Both controllerData structs store the same deviceId controllers[0].hand == controllers[1].hand || //Both controllerData structs are the same hand (cpMillis() / 60000) > minuteCount) //It has been a minute since last init time { minuteCount = (cpMillis() / 60000); iterateAssignIds(); } HMDCoords(); ControllerCoords(); TrackerCoords(); if(flags.printCoords) printf("\nCOORDS-- %s",coordsBuf); if(flags.printTrack) printf("\nTRACK-- %s",trackBuf); if(flags.printRotation) printf("\nROT-- %s",rotBuf); } void LighthouseTracking::HMDCoords() { if (!vr_pointer->IsTrackedDeviceConnected(hmdDeviceId)) return; //TrackedDevicePose_t struct is a OpenVR struct. See line 180 in the openvr.h header. TrackedDevicePose_t trackedDevicePose; HmdVector3_t position; HmdQuaternion_t rot; //if (vr_pointer->IsInputFocusCapturedByAnotherProcess()) // printf( "\nINFO--Input Focus by Another Process"); vr_pointer->GetDeviceToAbsoluteTrackingPose(TrackingUniverseStanding, 0, &trackedDevicePose, 1); position = GetPosition(trackedDevicePose.mDeviceToAbsoluteTracking); rot = GetRotation(trackedDevicePose.mDeviceToAbsoluteTracking); sprintf(coordsBuf,"HMD %-28.28s", getPoseXYZString(trackedDevicePose,0)); sprintf(trackBuf,"HMD: %-25.25s %-7.7s " , getEnglishTrackingResultForPose(trackedDevicePose) , getEnglishPoseValidity(trackedDevicePose)); sprintf(rotBuf,"HMD: qw:%.2f qx:%.2f qy:%.2f qz:%.2f",rot.w,rot.x,rot.y,rot.z); } void LighthouseTracking::ControllerCoords() { setHands(); if(doRumbleNow) { rumbleMsOffset = cpMillis(); doRumbleNow = false; } TrackedDevicePose_t trackedDevicePose; VRControllerState_t controllerState; HmdQuaternion_t rot; //Arrays to contain information about the results of the button state sprintf call // so that the button state information can be printed all on one line for both controllers char** bufs = new char*[2]; bool* isOk = new bool[2]; //Stores the number of times 150ms have elapsed (loops with the % operator because // the "cylinder count" rumbling starts when indexN is one). int indexN = ((cpMillis()-rumbleMsOffset)/150)%(125); //Loops for each ControllerData struct for(int i = 0; i < 2; i++) { isOk[i] = false; char* buf = new char[100]; ControllerData* pC = &(controllers[i]); if (pC->deviceId < 0 || !vr_pointer->IsTrackedDeviceConnected(pC->deviceId) || pC->hand </*= Allow printing coordinates for invalid hand? Yes.*/ 0) continue; vr_pointer->GetControllerStateWithPose(TrackingUniverseStanding, pC->deviceId, &controllerState, sizeof(controllerState), &trackedDevicePose); pC->pos = GetPosition(trackedDevicePose.mDeviceToAbsoluteTracking); rot = GetRotation(trackedDevicePose.mDeviceToAbsoluteTracking); char handString[6]; if (pC->hand == 1) sprintf(handString, "LEFT"); else if (pC->hand == 2) sprintf(handString, "RIGHT"); else if(pC->hand == 0) sprintf(handString, "INVALID"); pC->isValid =trackedDevicePose.bPoseIsValid; sprintf(coordsBuf,"%s %s: %-28.28s",coordsBuf, handString, getPoseXYZString(trackedDevicePose,pC->hand)); sprintf(trackBuf,"%s %s: %-25.25s %-7.7s" , trackBuf, handString, getEnglishTrackingResultForPose(trackedDevicePose), getEnglishPoseValidity(trackedDevicePose)); sprintf(rotBuf,"%s %s qw:%.2f qx:%.2f qy:%.2f qz:%.2f",rotBuf,handString,rot.w,rot.x,rot.y,rot.z); int t = pC->idtrigger; int p = pC->idpad; //This is the call to get analog button data from the controllers pC->trigVal = controllerState.rAxis[t].x; pC->padX = controllerState.rAxis[p].x; pC->padY = controllerState.rAxis[p].y; sprintf(buf,"hand=%s handid=%d trigger=%f padx=%f pady=%f", handString, pC->hand , pC->trigVal , pC->padX , pC->padY); bufs[i] = buf; isOk[i] = true; //The following block controlls the rumbling of the controllers if(!inDrawingMode) //Will iterate across all cylinders if in sensing mode for(int x = 0; x < MAX_CYLINDERS; x++) { Cylinder* currCy = cylinders[x]; if(currCy->hasInit && currCy->isInside(pC->pos.v[0],pC->pos.v[1],pC->pos.v[2])) vr_pointer->TriggerHapticPulse(pC->deviceId,pC->idpad,500); //Vibrates if the controller is colliding with the cylinder bounds } if (inDrawingMode && indexN % 3 == 0 && indexN < (cylinderIndex+1)*3) //Vibrates the current cylinderIndex every thirty seconds or so vr_pointer->TriggerHapticPulse(pC->deviceId,pC->idpad,300); // see the definition of indexN above before the for loop } if(flags.printAnalog && isOk[0] == true) { printf("\nANALOG-- %s", bufs[0]); if(isOk[1] == true) { printf(" %s", bufs[1]); } } } void LighthouseTracking::TrackerCoords() { TrackedDevicePose_t trackedDevicePose; VRControllerState_t controllerState; HmdQuaternion_t rot; for(int i = 0; i < 16; i++) { TrackerData* pT = &(trackers[i]); if (pT->deviceId < 0 || !vr_pointer->IsTrackedDeviceConnected(pT->deviceId) ) continue; vr_pointer->GetControllerStateWithPose(TrackingUniverseStanding, pT->deviceId, &controllerState, sizeof(controllerState), &trackedDevicePose); pT->pos = GetPosition(trackedDevicePose.mDeviceToAbsoluteTracking); rot = GetRotation(trackedDevicePose.mDeviceToAbsoluteTracking); pT->isValid =trackedDevicePose.bPoseIsValid; sprintf(coordsBuf,"%s T%d: %-28.28s",coordsBuf, i, getPoseXYZString(trackedDevicePose,0)); sprintf(trackBuf,"%s T%d: %-25.25s %-7.7s" , trackBuf, i, getEnglishTrackingResultForPose(trackedDevicePose), getEnglishPoseValidity(trackedDevicePose)); sprintf(rotBuf,"%s T%d: qw:%.2f qx:%.2f qy:%.2f qz:%.2f",rotBuf,i,rot.w,rot.x,rot.y,rot.z); } } char* LighthouseTracking::getEnglishTrackingResultForPose(TrackedDevicePose_t pose) { char* buf = new char[50]; switch (pose.eTrackingResult) { case vr::ETrackingResult::TrackingResult_Uninitialized: sprintf(buf, "Invalid tracking result"); break; case vr::ETrackingResult::TrackingResult_Calibrating_InProgress: sprintf(buf, "Calibrating in progress"); break; case vr::ETrackingResult::TrackingResult_Calibrating_OutOfRange: sprintf(buf, "Calibrating Out of range"); break; case vr::ETrackingResult::TrackingResult_Running_OK: sprintf(buf, "Running OK"); break; case vr::ETrackingResult::TrackingResult_Running_OutOfRange: sprintf(buf, "WARNING: Running Out of Range"); break; default: sprintf(buf, "Default"); break; } return buf; } char* LighthouseTracking::getEnglishPoseValidity(TrackedDevicePose_t pose) { char* buf = new char[50]; if(pose.bPoseIsValid) sprintf(buf, "Valid"); else sprintf(buf, "Invalid"); return buf; } char* LighthouseTracking::getPoseXYZString(TrackedDevicePose_t pose, int hand) { HmdVector3_t pos = GetPosition(pose.mDeviceToAbsoluteTracking); char* cB = new char[50]; if(pose.bPoseIsValid) sprintf(cB, "x:%.3f y:%.3f z:%.3f",pos.v[0], pos.v[1], pos.v[2]); else sprintf(cB, " INVALID"); if(flags.pipeCoords) for(int i = 0; i < 3; i++) if(pose.bPoseIsValid) printf("%.5f\n",pos.v[i]); else printf("invalid\n",pos.v[i]); return cB; }
36.495161
165
0.667565
[ "object", "vector", "3d" ]
a82c5833958498ad42c34ee1684f06e19761faa6
1,420
cpp
C++
src/Addon.cpp
FabianTerhorst/Curium
ad0e7aa50ac11b8fa922da614100b0470931bc35
[ "CC0-1.0" ]
1
2016-05-26T22:47:38.000Z
2016-05-26T22:47:38.000Z
src/Addon.cpp
FabianTerhorst/Curium
ad0e7aa50ac11b8fa922da614100b0470931bc35
[ "CC0-1.0" ]
null
null
null
src/Addon.cpp
FabianTerhorst/Curium
ad0e7aa50ac11b8fa922da614100b0470931bc35
[ "CC0-1.0" ]
null
null
null
#include <windows.h> #include <tlhelp32.h> #include <node.h> #include "process.h" using namespace v8; void GetProcessByName(const FunctionCallbackInfo<Value>& args) { Isolate* isolate = Isolate::GetCurrent(); HandleScope scope(isolate); if (args.Length() < 1) { isolate->ThrowException(Exception::TypeError( String::NewFromUtf8(isolate, "Wrong number of arguments"))); return; } if (!args[0]->IsString()) { isolate->ThrowException(Exception::TypeError( String::NewFromUtf8(isolate, "Wrong arguments"))); return; } String::Utf8Value procName(args[0]->ToString()); HANDLE hdl = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);; PROCESSENTRY32 pe32; pe32.dwSize = sizeof(PROCESSENTRY32); BOOL rval; for (rval = Process32First(hdl, &pe32); rval == TRUE && strcmpi(pe32.szExeFile, *procName) != 0; rval = Process32Next(hdl, &pe32)); if (hdl != INVALID_HANDLE_VALUE) { CloseHandle(hdl); } Local<Number> num; if (rval == TRUE) { num = Number::New(isolate, pe32.th32ProcessID); } else { num = Number::New(isolate, 0); } args.GetReturnValue().Set(num); } void InitAll(Handle<Object> exports) { Process::Init(exports); NODE_SET_METHOD(exports, "getProcessByName", GetProcessByName); } NODE_MODULE(rpm, InitAll)
21.515152
72
0.624648
[ "object" ]
a82cfcf16318e1ab37a88ddcf6321e41061f09ba
57,217
cpp
C++
SRC/recorder/PVDRecorder.cpp
steva44/OpenSees
417c3be117992a108c6bbbcf5c9b63806b9362ab
[ "TCL" ]
8
2019-03-05T16:25:10.000Z
2020-04-17T14:12:03.000Z
SRC/recorder/PVDRecorder.cpp
steva44/OpenSees
417c3be117992a108c6bbbcf5c9b63806b9362ab
[ "TCL" ]
null
null
null
SRC/recorder/PVDRecorder.cpp
steva44/OpenSees
417c3be117992a108c6bbbcf5c9b63806b9362ab
[ "TCL" ]
3
2019-09-21T03:11:11.000Z
2020-01-19T07:29:37.000Z
/* ****************************************************************** ** ** OpenSees - Open System for Earthquake Engineering Simulation ** ** Pacific Earthquake Engineering Research Center ** ** ** ** ** ** (C) Copyright 1999, The Regents of the University of California ** ** All Rights Reserved. ** ** ** ** Commercial use of this program without express permission of the ** ** University of California, Berkeley, is strictly prohibited. See ** ** file 'COPYRIGHT' in main directory for information on usage and ** ** redistribution, and for a DISCLAIMER OF ALL WARRANTIES. ** ** ** ** Developed by: ** ** Frank McKenna (fmckenna@ce.berkeley.edu) ** ** Gregory L. Fenves (fenves@ce.berkeley.edu) ** ** Filip C. Filippou (filippou@ce.berkeley.edu) ** ** ** ** ****************************************************************** */ // $Revision: 1.0 $ // $Date: 2015-11-12 $ // Save all data into paraview format #include "PVDRecorder.h" #include <sstream> #include <elementAPI.h> #include <OPS_Globals.h> #include <Domain.h> #include <Element.h> #include <ElementIter.h> #include <Node.h> #include <Pressure_Constraint.h> #include <Pressure_ConstraintIter.h> #include <Matrix.h> #include <classTags.h> #include <NodeIter.h> #include "PFEMElement/BackgroundDef.h" #include "PFEMElement/Particle.h" #include "PFEMElement/ParticleGroup.h" std::map<int,PVDRecorder::VtkType> PVDRecorder::vtktypes; void* OPS_PVDRecorder() { int numdata = OPS_GetNumRemainingInputArgs(); if(numdata < 1) { opserr<<"WARNING: insufficient number of arguments\n"; return 0; } // filename const char* name = OPS_GetString(); // plotting options numdata = OPS_GetNumRemainingInputArgs(); int indent=2; int precision = 10; PVDRecorder::NodeData nodedata; std::vector<PVDRecorder::EleData> eledata; double dT = 0.0; while(numdata > 0) { const char* type = OPS_GetString(); if(strcmp(type, "disp") == 0) { nodedata.disp = true; } else if(strcmp(type, "vel") == 0) { nodedata.vel = true; } else if(strcmp(type, "accel") == 0) { nodedata.accel = true; } else if(strcmp(type, "incrDisp") == 0) { nodedata.incrdisp = true; } else if(strcmp(type, "reaction") == 0) { nodedata.reaction = true; } else if(strcmp(type, "pressure") == 0) { nodedata.pressure = true; } else if(strcmp(type, "unbalancedLoad") == 0) { nodedata.unbalanced = true; } else if(strcmp(type, "mass") == 0) { nodedata.mass = true; } else if(strcmp(type, "eigen") == 0) { numdata = OPS_GetNumRemainingInputArgs(); if(numdata < 1) { opserr<<"WARNING: eigen needs 'numEigenvector'\n"; return 0; } numdata = 1; if(OPS_GetIntInput(&numdata,&nodedata.numeigen) < 0) { opserr << "WARNING: failed to read numeigen\n"; return 0; } } else if(strcmp(type, "-precision") == 0) { numdata = OPS_GetNumRemainingInputArgs(); if(numdata < 1) { opserr<<"WARNING: needs precision \n"; return 0; } numdata = 1; if(OPS_GetIntInput(&numdata,&precision) < 0) { opserr << "WARNING: failed to read precision\n"; return 0; } } else if(strcmp(type, "eleResponse") == 0) { numdata = OPS_GetNumRemainingInputArgs(); if(numdata < 1) { opserr<<"WANRING: elementResponse needs 'argc','argv'\n"; return 0; } PVDRecorder::EleData edata; numdata = OPS_GetNumRemainingInputArgs(); edata.resize(numdata); for(int i=0; i<numdata; i++) { edata[i] = OPS_GetString(); } eledata.push_back(edata); } else if(strcmp(type, "-dT") == 0) { numdata = OPS_GetNumRemainingInputArgs(); if(numdata < 1) { opserr<<"WARNING: needs dT \n"; return 0; } numdata = 1; if(OPS_GetDoubleInput(&numdata,&dT) < 0) { opserr << "WARNING: failed to read dT\n"; return 0; } if (dT < 0) dT = 0; } numdata = OPS_GetNumRemainingInputArgs(); } // create recorder return new PVDRecorder(name,nodedata,eledata,indent,precision,dT); } PVDRecorder::PVDRecorder(const char *name, const NodeData& ndata, const std::vector<EleData>& edata, int ind, int pre, double dt) :Recorder(RECORDER_TAGS_PVDRecorder), indentsize(ind), precision(pre), indentlevel(0), pathname(), basename(), timestep(), timeparts(), theFile(), quota('\"'), parts(), nodedata(ndata), eledata(edata), theDomain(0), partnum(), dT(dt), nextTime(0.0) { PVDRecorder::setVTKType(); getfilename(name); } PVDRecorder::PVDRecorder() :Recorder(RECORDER_TAGS_PVDRecorder) { } PVDRecorder::~PVDRecorder() { } // PVD // part 0 - all nodes // part 1 - all particles // part n - element type n int PVDRecorder::record(int ctag, double timestamp) { if (dT>0 && nextTime>timestamp) { return 0; } if (dT > 0) { nextTime = timestamp+dT; } if(precision==0) return 0; // get current time timestep.push_back(timestamp); // save vtu file if(vtu() < 0) return -1; // save pvd file if(pvd() < 0) return -1; return 0; } int PVDRecorder::restart() { timestep.clear(); timeparts.clear(); return 0; } int PVDRecorder::domainChanged() { return 0; } int PVDRecorder::setDomain(Domain& domain) { theDomain = &domain; return 0; } int PVDRecorder::pvd() { // open pvd file theFile.close(); std::string pvdname = pathname+basename+".pvd"; theFile.open(pvdname.c_str(), std::ios::trunc|std::ios::out); if(theFile.fail()) { opserr<<"WARNING: Failed to open file "<<pvdname.c_str()<<"\n"; return -1; } theFile.precision(precision); theFile << std::scientific; // header theFile<<"<?xml version="<<quota<<"1.0"<<quota<<"?>\n"; theFile<<"<VTKFile type="<<quota<<"Collection"<<quota; theFile<<" compressor="<<quota<<"vtkZLibDataCompressor"<<quota; theFile<<">\n"; // collection this->incrLevel(); this->indent(); theFile<<"<Collection>\n"; // all data files this->incrLevel(); for(int i=0; i<(int)timestep.size(); i++) { double t = timestep[i]; const ID& partno = timeparts[i]; for(int j=0; j<partno.Size(); j++) { this->indent(); theFile<<"<DataSet timestep="<<quota<<t<<quota; theFile<<" group="<<quota<<quota; theFile<<" part="<<quota<<partno(j)<<quota; theFile<<" file="<<quota<<basename.c_str(); theFile<<"/"<<basename.c_str()<<"_T"<<t<<"_P"; theFile<<partno(j)<<".vtu"<<quota; theFile<<"/>\n"; } } // end colloection this->decrLevel(); this->indent(); theFile<<"</Collection>\n"; // end VTKFile this->decrLevel(); this->indent(); theFile<<"</VTKFile>\n"; theFile.close(); return 0; } int PVDRecorder::vtu() { if (theDomain == 0) { opserr << "WARNING: failed to get domain -- PVDRecorder::vtu\n"; return -1; } // get node ndf NodeIter& theNodes = theDomain->getNodes(); Node* theNode = 0; int nodendf = 0; while ((theNode = theNodes()) != 0) { if(nodendf < theNode->getNumberDOF()) { nodendf = theNode->getNumberDOF(); } } if (nodendf < 3) { nodendf = 3; } // get parts this->getParts(); // get background mesh VInt gtags; TaggedObjectIter& meshes = OPS_getAllMesh(); Mesh* mesh = 0; while((mesh = dynamic_cast<Mesh*>(meshes())) != 0) { ParticleGroup* group = dynamic_cast<ParticleGroup*>(mesh); if (group == 0) { continue; } gtags.push_back(group->getTag()); } // part 0: all nodes ID partno(0, (int)parts.size()+(int)gtags.size()+1); partno[0] = 0; if (this->savePart0(nodendf) < 0) { return -1; } // particle parts for (int i=0; i<(int)gtags.size(); ++i) { partno[1+i] = 1+i; if (this->savePartParticle(1+i, gtags[i],nodendf) < 0) { return -1; } } // save other parts // int index = 1; for(std::map<int,ID>::iterator it=parts.begin(); it!=parts.end(); it++) { // int& no = partnum[it->first]; // if (no == 0) { // no = (int)partnum.size(); // } int no = partno.Size(); partno[no] = no; if(this->savePart(no,it->first,nodendf) < 0) return -1; } timeparts.push_back(partno); // clear parts parts.clear(); return 0; } void PVDRecorder::getParts() { if (theDomain == 0) { opserr<<"WARNING: setDomain has not been called -- PVDRecorder\n"; return; } ElementIter* eiter = &(theDomain->getElements()); Element* theEle = 0; while((theEle = (*eiter)()) != 0) { int ctag = theEle->getClassTag(); int etag = theEle->getTag(); parts[ctag].insert(etag); } } int PVDRecorder::savePart0(int nodendf) { if (theDomain == 0) { opserr<<"WARNING: setDomain has not been called -- PVDRecorder\n"; return -1; } // get time and part std::stringstream ss; ss.precision(precision); ss << std::scientific; ss << 0 << ' ' << timestep.back(); std::string stime, spart; ss >> spart >> stime; // open file theFile.close(); std::string vtuname = pathname+basename+"/"+basename+"_T"+stime+"_P"+spart+".vtu"; theFile.open(vtuname.c_str(), std::ios::trunc|std::ios::out); if(theFile.fail()) { opserr<<"WARNING: Failed to open file "<<vtuname.c_str()<<"\n"; return -1; } theFile.precision(precision); theFile << std::scientific; // header theFile<<"<?xml version="<<quota<<"1.0"<<quota<<"?>\n"; theFile<<"<VTKFile type="<<quota<<"UnstructuredGrid"<<quota; theFile<<" version="<<quota<<"1.0"<<quota; theFile<<" byte_order="<<quota<<"LittleEndian"<<quota; theFile<<" compressor="<<quota<<"vtkZLibDataCompressor"<<quota; theFile<<">\n"; this->incrLevel(); this->indent(); theFile<<"<UnstructuredGrid>\n"; // get pressure nodes ID ptags(0,theDomain->getNumPCs()); Pressure_ConstraintIter& thePCs = theDomain->getPCs(); Pressure_Constraint* thePC = 0; while ((thePC = thePCs()) != 0) { Node* pnode = thePC->getPressureNode(); if (pnode != 0) { ptags.insert(pnode->getTag()); } } // get all nodes except pressure nodes std::vector<Node*> nodes; NodeIter& theNodes = theDomain->getNodes(); Node* theNode = 0; while ((theNode = theNodes()) != 0) { int nd = theNode->getTag(); if (ptags.getLocationOrdered(nd) < 0) { nodes.push_back(theNode); } } // Piece this->incrLevel(); this->indent(); theFile<<"<Piece NumberOfPoints="<<quota<<(int)nodes.size()<<quota; theFile<<" NumberOfCells="<<quota<<1<<quota<<">\n"; // points this->incrLevel(); this->indent(); theFile<<"<Points>\n"; // points header this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Points"<<quota; theFile<<" NumberOfComponents="<<quota<<3<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; // points coordinates this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& crds = nodes[i]->getCrds(); this->indent(); for(int j=0; j<3; j++) { if(j < crds.Size()) { theFile<<crds(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } // points footer this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; this->decrLevel(); this->indent(); theFile<<"</Points>\n"; // cells this->indent(); theFile<<"<Cells>\n"; // connectivity this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"connectivity"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { this->indent(); theFile<<i<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // offsets this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"offsets"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<(int)nodes.size()<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // types this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"types"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<VTK_POLY_VERTEX<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // cells footer this->decrLevel(); this->indent(); theFile<<"</Cells>\n"; // point data this->indent(); theFile<<"<PointData>\n"; // node tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"NodeTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { this->indent(); theFile<<nodes[i]->getTag()<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // node velocity if(nodedata.vel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Velocity"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getTrialVel(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node displacement if(nodedata.disp) { // all displacement // this->indent(); // theFile<<"<DataArray type="<<quota<<"Float32"<<quota; // theFile<<" Name="<<quota<<"AllDisplacement"<<quota; // theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; // theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; // this->incrLevel(); // for(int i=0; i<(int)nodes.size(); i++) { // const Vector& vel = nodes[i]->getTrialDisp(); // this->indent(); // for(int j=0; j<nodendf; j++) { // if(j < vel.Size()) { // theFile<<vel(j)<<' '; // } else { // theFile<<0.0<<' '; // } // } // theFile<<std::endl; // } // this->decrLevel(); // this->indent(); // theFile<<"</DataArray>\n"; // displacement this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Displacement"<<quota; theFile<<" NumberOfComponents="<<quota<<3<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getTrialDisp(); this->indent(); for(int j=0; j<3; j++) { if(j < vel.Size() && j < nodes[i]->getCrds().Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node incr displacement if(nodedata.incrdisp) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"IncrDisplacement"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getIncrDisp(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node acceleration if(nodedata.accel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Acceleration"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getTrialAccel(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node pressure if(nodedata.pressure) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Pressure"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { double pressure = 0.0; Pressure_Constraint* thePC = theDomain->getPressure_Constraint(nodes[i]->getTag()); if(thePC != 0) { pressure = thePC->getPressure(); } this->indent(); theFile<<pressure<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node reaction if(nodedata.reaction) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Reaction"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getReaction(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node unbalanced load if(nodedata.unbalanced) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"UnbalancedLoad"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Vector& vel = nodes[i]->getUnbalancedLoad(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node mass if(nodedata.mass) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"NodeMass"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Matrix& mat = nodes[i]->getMass(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < mat.noRows()) { theFile<<mat(j,j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node eigen vector for(int k=0; k<nodedata.numeigen; k++) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"EigenVector"<<k+1<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)nodes.size(); i++) { const Matrix& eigens = nodes[i]->getEigenvectors(); if(k >= eigens.noCols()) { opserr<<"WARNING: eigenvector "<<k+1<<" is too large\n"; return -1; } this->indent(); for(int j=0; j<nodendf; j++) { if(j < eigens.noRows()) { theFile<<eigens(j,k)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // point data footer this->decrLevel(); this->indent(); theFile<<"</PointData>\n"; // cell data this->indent(); theFile<<"<CellData>\n"; // element tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"ElementTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<0<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // cell data footer this->decrLevel(); this->indent(); theFile<<"</CellData>\n"; // footer this->decrLevel(); this->indent(); theFile<<"</Piece>\n"; this->decrLevel(); this->indent(); theFile<<"</UnstructuredGrid>\n"; this->decrLevel(); this->indent(); theFile<<"</VTKFile>\n"; theFile.close(); return 0; } int PVDRecorder::savePartParticle(int pno, int bgtag, int nodendf) { if (theDomain == 0) { opserr<<"WARNING: setDomain has not been called -- PVDRecorder\n"; return -1; } // get time and part std::stringstream ss; ss.precision(precision); ss << std::scientific; ss << pno << ' ' << timestep.back(); std::string stime, spart; ss >> spart >> stime; // open file theFile.close(); std::string vtuname = pathname+basename+"/"+basename+"_T"+stime+"_P"+spart+".vtu"; theFile.open(vtuname.c_str(), std::ios::trunc|std::ios::out); if(theFile.fail()) { opserr<<"WARNING: Failed to open file "<<vtuname.c_str()<<"\n"; return -1; } theFile.precision(precision); theFile << std::scientific; // header theFile<<"<?xml version="<<quota<<"1.0"<<quota<<"?>\n"; theFile<<"<VTKFile type="<<quota<<"UnstructuredGrid"<<quota; theFile<<" version="<<quota<<"1.0"<<quota; theFile<<" byte_order="<<quota<<"LittleEndian"<<quota; theFile<<" compressor="<<quota<<"vtkZLibDataCompressor"<<quota; theFile<<">\n"; this->incrLevel(); this->indent(); theFile<<"<UnstructuredGrid>\n"; // get particles in group VParticle particles; ParticleGroup* group = dynamic_cast<ParticleGroup*>(OPS_getMesh(bgtag)); if (group == 0) { opserr << "WARNING: particle group "<<bgtag<<"doesn't exist\n"; return -1; } for(int j=0; j<group->numParticles(); j++) { Particle* p = group->getParticle(j); if(p == 0) continue; particles.push_back(p); } // Piece this->incrLevel(); this->indent(); theFile<<"<Piece NumberOfPoints="<<quota<<(int)particles.size()<<quota; theFile<<" NumberOfCells="<<quota<<1<<quota<<">\n"; // points this->incrLevel(); this->indent(); theFile<<"<Points>\n"; // points header this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Points"<<quota; theFile<<" NumberOfComponents="<<quota<<3<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; // points coordinates this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { const VDouble& crds = particles[i]->getCrds(); this->indent(); for(int j=0; j<3; j++) { if(j < (int)crds.size()) { theFile<<crds[j]<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } // points footer this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; this->decrLevel(); this->indent(); theFile<<"</Points>\n"; // cells this->indent(); theFile<<"<Cells>\n"; // connectivity this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"connectivity"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); theFile<<i<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // offsets this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"offsets"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<(int)particles.size()<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // types this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"types"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<VTK_POLY_VERTEX<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // cells footer this->decrLevel(); this->indent(); theFile<<"</Cells>\n"; // point data this->indent(); theFile<<"<PointData>\n"; // node tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"NodeTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); theFile<<particles[i]->getTag()<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // node velocity if(nodedata.vel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Velocity"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { const VDouble& vel = particles[i]->getVel(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < (int)vel.size()) { theFile<<vel[j]<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node displacement if(nodedata.disp) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Displacement"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node incr displacement if(nodedata.incrdisp) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"IncrDisplacement"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node acceleration if(nodedata.accel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Acceleration"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node pressure if(nodedata.pressure) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Pressure"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { double pressure = particles[i]->getPressure(); this->indent(); theFile<<pressure<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node reaction if(nodedata.reaction) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Reaction"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node unbalanced load if(nodedata.unbalanced) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"UnbalancedLoad"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node mass if(nodedata.mass) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"NodeMass"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node eigen vector for(int k=0; k<nodedata.numeigen; k++) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"EigenVector"<<k+1<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<(int)particles.size(); i++) { this->indent(); for(int j=0; j<nodendf; j++) { theFile<<0.0<<' '; } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // point data footer this->decrLevel(); this->indent(); theFile<<"</PointData>\n"; // cell data this->indent(); theFile<<"<CellData>\n"; // element tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"ElementTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); this->indent(); theFile<<0<<std::endl; this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // cell data footer this->decrLevel(); this->indent(); theFile<<"</CellData>\n"; // footer this->decrLevel(); this->indent(); theFile<<"</Piece>\n"; this->decrLevel(); this->indent(); theFile<<"</UnstructuredGrid>\n"; this->decrLevel(); this->indent(); theFile<<"</VTKFile>\n"; theFile.close(); return 0; } int PVDRecorder::savePart(int partno, int ctag, int nodendf) { if (theDomain == 0) { opserr<<"WARNING: setDomain has not been called -- PVDRecorder\n"; return -1; } // get time and part std::stringstream ss; ss.precision(precision); ss << std::scientific; ss << partno << ' ' << timestep.back(); std::string stime, spart; ss >> spart >> stime; // open file theFile.close(); std::string vtuname = pathname+basename+"/"+basename+"_T"+stime+"_P"+spart+".vtu"; theFile.open(vtuname.c_str(), std::ios::trunc|std::ios::out); if(theFile.fail()) { opserr<<"WARNING: Failed to open file "<<vtuname.c_str()<<"\n"; return -1; } theFile.precision(precision); theFile << std::scientific; // header theFile<<"<?xml version="<<quota<<"1.0"<<quota<<"?>\n"; theFile<<"<VTKFile type="<<quota<<"UnstructuredGrid"<<quota; theFile<<" version="<<quota<<"1.0"<<quota; theFile<<" byte_order="<<quota<<"LittleEndian"<<quota; theFile<<" compressor="<<quota<<"vtkZLibDataCompressor"<<quota; theFile<<">\n"; this->incrLevel(); this->indent(); theFile<<"<UnstructuredGrid>\n"; // get nodes const ID& eletags = parts[ctag]; ID ndtags(0,eletags.Size()*3); std::vector<Element*> eles(eletags.Size()); int numelenodes = 0; int increlenodes = 1; for(int i=0; i<eletags.Size(); i++) { eles[i] = theDomain->getElement(eletags(i)); if (eles[i] == 0) { opserr<<"WARNING: element "<<eletags(i)<<" is not defined--pvdRecorder\n"; return -1; } const ID& elenodes = eles[i]->getExternalNodes(); if(numelenodes == 0) { numelenodes = elenodes.Size(); if(ctag==ELE_TAG_PFEMElement2D|| ctag==ELE_TAG_PFEMElement2DCompressible|| ctag==ELE_TAG_PFEMElement2DBubble|| ctag==ELE_TAG_PFEMElement2Dmini || ctag==ELE_TAG_MINI || ctag==ELE_TAG_PFEMElement2DQuasi) { numelenodes = 3; increlenodes = 2; } else if (ctag==ELE_TAG_TaylorHood2D) { numelenodes = 6; increlenodes = 1; } else if (ctag==ELE_TAG_PFEMElement3DBubble) { numelenodes = 4; increlenodes = 2; } } for(int j=0; j<numelenodes; j++) { ndtags.insert(elenodes(j*increlenodes)); } } // Piece this->incrLevel(); this->indent(); theFile<<"<Piece NumberOfPoints="<<quota<<ndtags.Size()<<quota; theFile<<" NumberOfCells="<<quota<<eletags.Size()<<quota<<">\n"; // points this->incrLevel(); this->indent(); theFile<<"<Points>\n"; // points header this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Points"<<quota; theFile<<" NumberOfComponents="<<quota<<3<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; // points coordinates this->incrLevel(); std::vector<Node*> nodes(ndtags.Size()); for(int i=0; i<ndtags.Size(); i++) { nodes[i] = theDomain->getNode(ndtags(i)); if(nodes[i] == 0) { opserr<<"WARNIG: Node "<<ndtags(i)<<" is not defined -- pvdRecorder\n"; return -1; } const Vector& crds = nodes[i]->getCrds(); this->indent(); for(int j=0; j<3; j++) { if(j < crds.Size()) { theFile<<crds(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } // points footer this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; this->decrLevel(); this->indent(); theFile<<"</Points>\n"; // cells this->indent(); theFile<<"<Cells>\n"; // connectivity this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"connectivity"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<eletags.Size(); i++) { const ID& elenodes = eles[i]->getExternalNodes(); this->indent(); if (ctag==ELE_TAG_TaylorHood2D) { // for 2nd order element, the order of mid nodes // is different to VTK int vtkOrder[] = {0,1,2,5,3,4}; for(int j=0; j<numelenodes; j++) { theFile<<ndtags.getLocationOrdered(elenodes(vtkOrder[j]*increlenodes))<<' '; } } else { for(int j=0; j<numelenodes; j++) { theFile<<ndtags.getLocationOrdered(elenodes(j*increlenodes))<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // offsets this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"offsets"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); int offset = numelenodes; for(int i=0; i<eletags.Size(); i++) { this->indent(); theFile<<offset<<std::endl; offset += numelenodes; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // types this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"types"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); int type = vtktypes[ctag]; if (type == 0) { opserr<<"WARNING: the element type cannot be assigned a VTK type\n"; return -1; } for(int i=0; i<eletags.Size(); i++) { this->indent(); theFile<<type<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // cells footer this->decrLevel(); this->indent(); theFile<<"</Cells>\n"; // point data this->indent(); theFile<<"<PointData>\n"; // node tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"NodeTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { this->indent(); theFile<<ndtags(i)<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // node velocity if(nodedata.vel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Velocity"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getTrialVel(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node displacement if(nodedata.disp) { // all displacement // this->indent(); // theFile<<"<DataArray type="<<quota<<"Float32"<<quota; // theFile<<" Name="<<quota<<"AllDisplacement"<<quota; // theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; // theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; // this->incrLevel(); // for(int i=0; i<ndtags.Size(); i++) { // const Vector& vel = nodes[i]->getTrialDisp(); // this->indent(); // for(int j=0; j<nodendf; j++) { // if(j < vel.Size()) { // theFile<<vel(j)<<' '; // } else { // theFile<<0.0<<' '; // } // } // theFile<<std::endl; // } // this->decrLevel(); // this->indent(); // theFile<<"</DataArray>\n"; // displacement this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Displacement"<<quota; theFile<<" NumberOfComponents="<<quota<<3<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getTrialDisp(); this->indent(); for(int j=0; j<3; j++) { if(j < vel.Size() && j < nodes[i]->getCrds().Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node incr displacement if(nodedata.incrdisp) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"IncrDisplacement"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getIncrDisp(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node acceleration if(nodedata.accel) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Acceleration"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getTrialAccel(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node pressure if(nodedata.pressure) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Pressure"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { double pressure = 0.0; Pressure_Constraint* thePC = theDomain->getPressure_Constraint(ndtags(i)); if(thePC != 0) { pressure = thePC->getPressure(); } this->indent(); theFile<<pressure<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node reaction if(nodedata.reaction) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"Reaction"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getReaction(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node unbalanced load if(nodedata.unbalanced) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"UnbalancedLoad"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Vector& vel = nodes[i]->getUnbalancedLoad(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < vel.Size()) { theFile<<vel(j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node mass if(nodedata.mass) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"NodeMass"<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Matrix& mat = nodes[i]->getMass(); this->indent(); for(int j=0; j<nodendf; j++) { if(j < mat.noRows()) { theFile<<mat(j,j)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // node eigen vector for(int k=0; k<nodedata.numeigen; k++) { this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<"EigenVector"<<k+1<<quota; theFile<<" NumberOfComponents="<<quota<<nodendf<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<ndtags.Size(); i++) { const Matrix& eigens = nodes[i]->getEigenvectors(); if(k >= eigens.noCols()) { opserr<<"WARNING: eigenvector "<<k+1<<" is too large\n"; return -1; } this->indent(); for(int j=0; j<nodendf; j++) { if(j < eigens.noRows()) { theFile<<eigens(j,k)<<' '; } else { theFile<<0.0<<' '; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // point data footer this->decrLevel(); this->indent(); theFile<<"</PointData>\n"; // cell data this->indent(); theFile<<"<CellData>\n"; // element tags this->incrLevel(); this->indent(); theFile<<"<DataArray type="<<quota<<"Int32"<<quota; theFile<<" Name="<<quota<<"ElementTag"<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int i=0; i<eletags.Size(); i++) { this->indent(); theFile<<eletags(i)<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; // element response for(int i=0; i<(int)eledata.size(); i++) { if(eletags.Size() == 0) break; // check data int argc = (int)eledata[i].size(); if(argc == 0) continue; std::vector<const char*> argv(argc); for(int j=0; j<argc; j++) { argv[j] = eledata[i][j].c_str(); } const Vector* data =theDomain->getElementResponse(eletags(0),&(argv[0]),argc); if(data==0) continue; int eressize = data->Size(); if(eressize == 0) continue; // save data this->indent(); theFile<<"<DataArray type="<<quota<<"Float32"<<quota; theFile<<" Name="<<quota<<eles[0]->getClassType(); for(int j=0; j<argc; j++) { theFile<<argv[j]; } theFile<<quota; theFile<<" NumberOfComponents="<<quota<<eressize<<quota; theFile<<" format="<<quota<<"ascii"<<quota<<">\n"; this->incrLevel(); for(int j=0; j<eletags.Size(); j++) { data=theDomain->getElementResponse(eletags(j),&(argv[0]),argc); if(data==0) { opserr<<"WARNING: can't get response for element "<<eletags(j)<<"\n"; return -1; } this->indent(); for(int k=0; k<eressize; k++) { if (k>=data->Size()) { theFile<<0.0<<" "; } else { theFile<<(*data)(k)<<" "; } } theFile<<std::endl; } this->decrLevel(); this->indent(); theFile<<"</DataArray>\n"; } // cell data footer this->decrLevel(); this->indent(); theFile<<"</CellData>\n"; // footer this->decrLevel(); this->indent(); theFile<<"</Piece>\n"; this->decrLevel(); this->indent(); theFile<<"</UnstructuredGrid>\n"; this->decrLevel(); this->indent(); theFile<<"</VTKFile>\n"; theFile.close(); return 0; } void PVDRecorder::indent() { for(int i=0; i<indentlevel*indentsize; i++) { theFile<<' '; } } int PVDRecorder::sendSelf(int commitTag, Channel &theChannel) { return 0; } int PVDRecorder::recvSelf(int commitTag, Channel &theChannel, FEM_ObjectBroker &theBroker) { return 0; } void PVDRecorder::setVTKType() { if (vtktypes.empty() == false) { return; } vtktypes[ELE_TAG_Subdomain] = VTK_POLY_VERTEX; vtktypes[ELEMENT_TAGS_WrapperElement] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ElasticBeam2d] = VTK_LINE; vtktypes[ELE_TAG_ModElasticBeam2d] = VTK_LINE; vtktypes[ELE_TAG_ElasticBeam3d] = VTK_LINE; vtktypes[ELE_TAG_Beam2d] = VTK_LINE; vtktypes[ELE_TAG_beam2d02] = VTK_LINE; vtktypes[ELE_TAG_beam2d03] = VTK_LINE; vtktypes[ELE_TAG_beam2d04] = VTK_LINE; vtktypes[ELE_TAG_beam3d01] = VTK_LINE; vtktypes[ELE_TAG_beam3d02] = VTK_LINE; vtktypes[ELE_TAG_Truss] = VTK_LINE; vtktypes[ELE_TAG_TrussSection] = VTK_LINE; vtktypes[ELE_TAG_CorotTruss] = VTK_LINE; vtktypes[ELE_TAG_CorotTrussSection] = VTK_LINE; vtktypes[ELE_TAG_fElmt05] = VTK_LINE; vtktypes[ELE_TAG_fElmt02] = VTK_LINE; vtktypes[ELE_TAG_MyTruss] = VTK_LINE; vtktypes[ELE_TAG_ZeroLength] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthSection] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthND] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthContact2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthContact3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthContactASDimplex] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthContactNTS2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthInterface2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_CoupledZeroLength] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ZeroLengthRocking] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_NLBeamColumn2d] = VTK_LINE; vtktypes[ELE_TAG_NLBeamColumn3d] = VTK_LINE; vtktypes[ELE_TAG_LargeDispBeamColumn3d] = VTK_LINE; vtktypes[ELE_TAG_FourNodeQuad] = VTK_QUAD; vtktypes[ELE_TAG_FourNodeQuad3d] = VTK_QUAD; vtktypes[ELE_TAG_Tri31] = VTK_TRIANGLE; vtktypes[ELE_TAG_SixNodeTri] = VTK_TRIANGLE; vtktypes[ELE_TAG_BeamWithHinges2d] = VTK_LINE; vtktypes[ELE_TAG_BeamWithHinges3d] = VTK_LINE; vtktypes[ELE_TAG_EightNodeBrick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_TwentyNodeBrick] = VTK_QUADRATIC_HEXAHEDRON; vtktypes[ELE_TAG_EightNodeBrick_u_p_U] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_TwentyNodeBrick_u_p_U] = VTK_QUADRATIC_HEXAHEDRON; vtktypes[ELE_TAG_FourNodeQuadUP] = VTK_QUAD; vtktypes[ELE_TAG_TotalLagrangianFD20NodeBrick] = VTK_QUADRATIC_HEXAHEDRON; vtktypes[ELE_TAG_TotalLagrangianFD8NodeBrick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_EightNode_LDBrick_u_p] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_EightNode_Brick_u_p] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_TwentySevenNodeBrick] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_BrickUP] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_Nine_Four_Node_QuadUP] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_Twenty_Eight_Node_BrickUP] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_Twenty_Node_Brick] = VTK_QUADRATIC_HEXAHEDRON; vtktypes[ELE_TAG_BBarFourNodeQuadUP] = VTK_QUAD; vtktypes[ELE_TAG_BBarBrickUP] = VTK_QUAD; vtktypes[ELE_TAG_PlateMITC4] = VTK_QUAD; vtktypes[ELE_TAG_ShellMITC4] = VTK_QUAD; vtktypes[ELE_TAG_ShellMITC9] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ASDShellQ4] = VTK_QUAD; vtktypes[ELE_TAG_ASDShellT3] = VTK_TRIANGLE; vtktypes[ELE_TAG_Plate1] = VTK_QUAD; vtktypes[ELE_TAG_Brick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_BbarBrick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_FLBrick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_EnhancedQuad] = VTK_QUAD; vtktypes[ELE_TAG_ConstantPressureVolumeQuad] = VTK_QUAD; vtktypes[ELE_TAG_NineNodeMixedQuad] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_NineNodeQuad] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_EightNodeQuad] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_DispBeamColumn2d] = VTK_LINE; vtktypes[ELE_TAG_TimoshenkoBeamColumn2d] = VTK_LINE; vtktypes[ELE_TAG_DispBeamColumn3d] = VTK_LINE; vtktypes[ELE_TAG_DispBeamColumnWarping3d] = VTK_LINE; vtktypes[ELE_TAG_HingedBeam2d] = VTK_LINE; vtktypes[ELE_TAG_HingedBeam3d] = VTK_LINE; vtktypes[ELE_TAG_TwoPointHingedBeam2d] = VTK_LINE; vtktypes[ELE_TAG_TwoPointHingedBeam3d] = VTK_LINE; vtktypes[ELE_TAG_OnePointHingedBeam2d] = VTK_LINE; vtktypes[ELE_TAG_OnePointHingedBeam3d] = VTK_LINE; vtktypes[ELE_TAG_BeamColumnJoint2d] = VTK_QUAD; vtktypes[ELE_TAG_BeamColumnJoint3d] = VTK_QUAD; vtktypes[ELE_TAG_ForceBeamColumn2d] = VTK_LINE; vtktypes[ELE_TAG_ForceBeamColumnWarping2d] = VTK_LINE; vtktypes[ELE_TAG_ForceBeamColumn3d] = VTK_LINE; vtktypes[ELE_TAG_ElasticForceBeamColumn2d] = VTK_LINE; vtktypes[ELE_TAG_ElasticForceBeamColumnWarping2d] = VTK_LINE; vtktypes[ELE_TAG_ElasticForceBeamColumn3d] = VTK_LINE; vtktypes[ELE_TAG_ForceBeamColumnCBDI2d] = VTK_LINE; vtktypes[ELE_TAG_ForceBeamColumnCBDI3d] = VTK_LINE; vtktypes[ELE_TAG_DispBeamColumn2dInt] = VTK_LINE; vtktypes[ELE_TAG_InternalSpring] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SimpleJoint2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_Joint2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_Joint3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ElastomericBearingPlasticity3d] = VTK_LINE; vtktypes[ELE_TAG_ElastomericBearingPlasticity2d] = VTK_LINE; vtktypes[ELE_TAG_TwoNodeLink] = VTK_LINE; vtktypes[ELE_TAG_ActuatorCorot] = VTK_LINE; vtktypes[ELE_TAG_Actuator] = VTK_LINE; vtktypes[ELE_TAG_Adapter] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ElastomericBearingBoucWen2d] = VTK_LINE; vtktypes[ELE_TAG_ElastomericBearingBoucWen3d] = VTK_LINE; vtktypes[ELE_TAG_FlatSliderSimple2d] = VTK_LINE; vtktypes[ELE_TAG_FlatSliderSimple3d] = VTK_LINE; vtktypes[ELE_TAG_FlatSlider2d] = VTK_LINE; vtktypes[ELE_TAG_FlatSlider3d] = VTK_LINE; vtktypes[ELE_TAG_SingleFPSimple2d] = VTK_LINE; vtktypes[ELE_TAG_SingleFPSimple3d] = VTK_LINE; vtktypes[ELE_TAG_SingleFP2d] = VTK_LINE; vtktypes[ELE_TAG_SingleFP3d] = VTK_LINE; vtktypes[ELE_TAG_DoubleFPSimple2d] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_DoubleFPSimple3d] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_DoubleFP2d] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_DoubleFP3d] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_TripleFPSimple2d] = VTK_LINE; vtktypes[ELE_TAG_TripleFPSimple3d] = VTK_LINE; vtktypes[ELE_TAG_TripleFP2d] = VTK_LINE; vtktypes[ELE_TAG_TripleFP3d] = VTK_LINE; vtktypes[ELE_TAG_MultiFP2d] = VTK_LINE; vtktypes[ELE_TAG_MultiFP3d] = VTK_LINE; vtktypes[ELE_TAG_GenericClient] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_GenericCopy] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_PY_MACRO2D] = VTK_LINE; vtktypes[ELE_TAG_SimpleContact2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SimpleContact3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_BeamContact3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SurfaceLoad] = VTK_QUAD; vtktypes[ELE_TAG_BeamContact2D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_BeamEndContact3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SSPquad] = VTK_QUAD; vtktypes[ELE_TAG_SSPquadUP] = VTK_QUAD; vtktypes[ELE_TAG_SSPbrick] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_SSPbrickUP] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_BeamContact2Dp] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_BeamContact3Dp] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_BeamEndContact3Dp] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_Quad4FiberOverlay] = VTK_QUAD; vtktypes[ELE_TAG_Brick8FiberOverlay] = VTK_HEXAHEDRON; vtktypes[ELE_TAG_QuadBeamEmbedContact] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_DispBeamColumn2dThermal] = VTK_LINE; vtktypes[ELE_TAG_TPB1D] = VTK_LINE; vtktypes[ELE_TAG_TFP_Bearing] = VTK_LINE; vtktypes[ELE_TAG_TFP_Bearing2d] = VTK_LINE; vtktypes[ELE_TAG_TripleFrictionPendulum] = VTK_LINE; vtktypes[ELE_TAG_PFEMElement2D] = VTK_TRIANGLE; vtktypes[ELE_TAG_FourNodeQuad02] = VTK_QUAD; vtktypes[ELE_TAG_cont2d01] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_cont2d02] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_CST] = VTK_TRIANGLE; vtktypes[ELE_TAG_Truss2] = VTK_LINE; vtktypes[ELE_TAG_CorotTruss2] = VTK_LINE; vtktypes[ELE_Tag_ZeroLengthImpact3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_PFEMElement3D] = VTK_TETRA; vtktypes[ELE_TAG_PFEMElement2DCompressible] = VTK_TRIANGLE; vtktypes[ELE_TAG_PFEMElement2DBubble] = VTK_TRIANGLE; vtktypes[ELE_TAG_PFEMElement2Dmini] = VTK_TRIANGLE; vtktypes[ELE_TAG_ElasticTimoshenkoBeam2d] = VTK_LINE; vtktypes[ELE_TAG_ElasticTimoshenkoBeam3d] = VTK_LINE; vtktypes[ELE_TAG_ElastomericBearingUFRP2d] = VTK_LINE; vtktypes[ELE_TAG_ElastomericBearingUFRP3d] = VTK_LINE; vtktypes[ELE_TAG_RJWatsonEQS2d] = VTK_LINE; vtktypes[ELE_TAG_RJWatsonEQS3d] = VTK_LINE; vtktypes[ELE_TAG_HDR] = VTK_LINE; vtktypes[ELE_TAG_ElastomericX] = VTK_LINE; vtktypes[ELE_TAG_LeadRubberX] = VTK_LINE; vtktypes[ELE_TAG_PileToe3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_N4BiaxialTruss] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_ShellDKGQ] = VTK_QUAD; vtktypes[ELE_TAG_ShellNLDKGQ] = VTK_QUAD; vtktypes[ELE_TAG_MultipleShearSpring] = VTK_LINE; vtktypes[ELE_TAG_MultipleNormalSpring] = VTK_LINE; vtktypes[ELE_TAG_KikuchiBearing] = VTK_LINE; vtktypes[ELE_TAG_YamamotoBiaxialHDR] = VTK_LINE; vtktypes[ELE_TAG_MVLEM] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SFI_MVLEM] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_MVLEM_3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_SFI_MVLEM_3D] = VTK_POLY_VERTEX; vtktypes[ELE_TAG_PFEMElement2DFIC] = VTK_TRIANGLE; vtktypes[ELE_TAG_TaylorHood2D] = VTK_QUADRATIC_TRIANGLE; vtktypes[ELE_TAG_PFEMElement2DQuasi] = VTK_TRIANGLE; vtktypes[ELE_TAG_MINI] = VTK_TRIANGLE; vtktypes[ELE_TAG_CatenaryCable] = VTK_LINE; vtktypes[ELE_TAG_FourNodeTetrahedron] = VTK_TETRA; vtktypes[ELE_TAG_PFEMElement3DBubble] = VTK_TETRA; vtktypes[ELE_TAG_TriSurfaceLoad] = VTK_TRIANGLE; vtktypes[ELE_TAG_ShellANDeS] = VTK_TRIANGLE; vtktypes[ELE_TAG_ShellDKGT] = VTK_TRIANGLE; vtktypes[ELE_TAG_ShellNLDKGT] = VTK_TRIANGLE; vtktypes[ELE_TAG_PFEMContact2D] = VTK_TRIANGLE; vtktypes[ELE_TAG_InertiaTruss] = VTK_LINE; vtktypes[ELE_TAG_ASDAbsorbingBoundary2D] = VTK_QUAD; vtktypes[ELE_TAG_ASDAbsorbingBoundary3D] = VTK_HEXAHEDRON; } void PVDRecorder::getfilename(const char* name) { // use string std::string fname(name); // no slash at all std::size_t found = fname.find_last_of("/\\"); if (found == std::string::npos) { pathname = "./"; basename = fname; return; } // remove trailing slash if (found == fname.length()-1) { fname = fname.substr(0,fname.length()-1); found = fname.find_last_of("/\\"); } // only trailing slash if(found == std::string::npos) { pathname = "./"; basename = fname; return; } // more slash pathname = fname.substr(0,found+1); basename = fname.substr(found+1); }
28.35332
88
0.606708
[ "mesh", "vector" ]
a8300716c56277e8a7c202a9d4f51ca05fddac8e
18,157
cpp
C++
src/core/geometry/qgspoint.cpp
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
null
null
null
src/core/geometry/qgspoint.cpp
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
null
null
null
src/core/geometry/qgspoint.cpp
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
1
2021-12-25T08:40:30.000Z
2021-12-25T08:40:30.000Z
/*************************************************************************** qgspointv2.cpp -------------- begin : September 2014 copyright : (C) 2014 by Marco Hugentobler email : marco at sourcepole dot ch ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgspoint.h" #include "qgsapplication.h" #include "qgscoordinatetransform.h" #include "qgsgeometryutils.h" #include "qgsmaptopixel.h" #include "qgswkbptr.h" #include <cmath> #include <QPainter> #include <QRegularExpression> /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ QgsPoint::QgsPoint( double x, double y, double z, double m, QgsWkbTypes::Type wkbType ) : mX( x ) , mY( y ) , mZ( z ) , mM( m ) { if ( wkbType != QgsWkbTypes::Unknown ) { Q_ASSERT( QgsWkbTypes::flatType( wkbType ) == QgsWkbTypes::Point ); mWkbType = wkbType; } else if ( std::isnan( z ) ) { if ( std::isnan( m ) ) mWkbType = QgsWkbTypes::Point; else mWkbType = QgsWkbTypes::PointM; } else if ( std::isnan( m ) ) mWkbType = QgsWkbTypes::PointZ; else mWkbType = QgsWkbTypes::PointZM; } QgsPoint::QgsPoint( const QgsPointXY &p ) : mX( p.x() ) , mY( p.y() ) , mZ( std::numeric_limits<double>::quiet_NaN() ) , mM( std::numeric_limits<double>::quiet_NaN() ) { mWkbType = QgsWkbTypes::Point; } QgsPoint::QgsPoint( QPointF p ) : mX( p.x() ) , mY( p.y() ) , mZ( std::numeric_limits<double>::quiet_NaN() ) , mM( std::numeric_limits<double>::quiet_NaN() ) { mWkbType = QgsWkbTypes::Point; } QgsPoint::QgsPoint( QgsWkbTypes::Type wkbType, double x, double y, double z, double m ) : mX( x ) , mY( y ) , mZ( QgsWkbTypes::hasZ( wkbType ) ? z : std::numeric_limits<double>::quiet_NaN() ) , mM( QgsWkbTypes::hasM( wkbType ) ? m : std::numeric_limits<double>::quiet_NaN() ) { Q_ASSERT( QgsWkbTypes::flatType( wkbType ) == QgsWkbTypes::Point ); mWkbType = wkbType; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ QgsPoint *QgsPoint::clone() const { return new QgsPoint( *this ); } QgsPoint *QgsPoint::snappedToGrid( double hSpacing, double vSpacing, double dSpacing, double mSpacing ) const { // helper function auto gridifyValue = []( double value, double spacing, bool extraCondition = true ) -> double { if ( spacing > 0 && extraCondition ) return std::round( value / spacing ) * spacing; else return value; }; // Get the new values auto x = gridifyValue( mX, hSpacing ); auto y = gridifyValue( mY, vSpacing ); auto z = gridifyValue( mZ, dSpacing, QgsWkbTypes::hasZ( mWkbType ) ); auto m = gridifyValue( mM, mSpacing, QgsWkbTypes::hasM( mWkbType ) ); // return the new object return new QgsPoint( mWkbType, x, y, z, m ); } bool QgsPoint::removeDuplicateNodes( double, bool ) { return false; } bool QgsPoint::fromWkb( QgsConstWkbPtr &wkbPtr ) { QgsWkbTypes::Type type = wkbPtr.readHeader(); if ( QgsWkbTypes::flatType( type ) != QgsWkbTypes::Point ) { clear(); return false; } mWkbType = type; wkbPtr >> mX; wkbPtr >> mY; if ( is3D() ) wkbPtr >> mZ; if ( isMeasure() ) wkbPtr >> mM; clearCache(); return true; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ bool QgsPoint::fromWkt( const QString &wkt ) { clear(); QPair<QgsWkbTypes::Type, QString> parts = QgsGeometryUtils::wktReadBlock( wkt ); if ( QgsWkbTypes::flatType( parts.first ) != QgsWkbTypes::Point ) return false; mWkbType = parts.first; QRegularExpression rx( QStringLiteral( "\\s" ) ); QStringList coordinates = parts.second.split( rx, QString::SkipEmptyParts ); if ( coordinates.size() < 2 ) { clear(); return false; } else if ( coordinates.size() == 3 && !is3D() && !isMeasure() ) { // 3 dimensional coordinates, but not specifically marked as such. We allow this // anyway and upgrade geometry to have Z dimension mWkbType = QgsWkbTypes::addZ( mWkbType ); } else if ( coordinates.size() >= 4 && ( !is3D() || !isMeasure() ) ) { // 4 (or more) dimensional coordinates, but not specifically marked as such. We allow this // anyway and upgrade geometry to have Z&M dimensions mWkbType = QgsWkbTypes::addZ( mWkbType ); mWkbType = QgsWkbTypes::addM( mWkbType ); } int idx = 0; mX = coordinates[idx++].toDouble(); mY = coordinates[idx++].toDouble(); if ( is3D() && coordinates.length() > 2 ) mZ = coordinates[idx++].toDouble(); if ( isMeasure() && coordinates.length() > 2 + is3D() ) mM = coordinates[idx++].toDouble(); return true; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ QByteArray QgsPoint::asWkb() const { int binarySize = sizeof( char ) + sizeof( quint32 ); binarySize += ( 2 + is3D() + isMeasure() ) * sizeof( double ); QByteArray wkbArray; wkbArray.resize( binarySize ); QgsWkbPtr wkb( wkbArray ); wkb << static_cast<char>( QgsApplication::endian() ); wkb << static_cast<quint32>( wkbType() ); wkb << mX << mY; if ( is3D() ) { wkb << mZ; } if ( isMeasure() ) { wkb << mM; } return wkbArray; } QString QgsPoint::asWkt( int precision ) const { QString wkt = wktTypeStr() + QLatin1String( " (" ); wkt += qgsDoubleToString( mX, precision ) + ' ' + qgsDoubleToString( mY, precision ); if ( is3D() ) wkt += ' ' + qgsDoubleToString( mZ, precision ); if ( isMeasure() ) wkt += ' ' + qgsDoubleToString( mM, precision ); wkt += ')'; return wkt; } QDomElement QgsPoint::asGml2( QDomDocument &doc, int precision, const QString &ns, const QgsAbstractGeometry::AxisOrder axisOrder ) const { QDomElement elemPoint = doc.createElementNS( ns, QStringLiteral( "Point" ) ); QDomElement elemCoordinates = doc.createElementNS( ns, QStringLiteral( "coordinates" ) ); // coordinate separator QString cs = QStringLiteral( "," ); // tupel separator QString ts = QStringLiteral( " " ); elemCoordinates.setAttribute( QStringLiteral( "cs" ), cs ); elemCoordinates.setAttribute( QStringLiteral( "ts" ), ts ); QString strCoordinates; if ( axisOrder == QgsAbstractGeometry::AxisOrder::XY ) strCoordinates = qgsDoubleToString( mX, precision ) + cs + qgsDoubleToString( mY, precision ); else strCoordinates = qgsDoubleToString( mY, precision ) + cs + qgsDoubleToString( mX, precision ); elemCoordinates.appendChild( doc.createTextNode( strCoordinates ) ); elemPoint.appendChild( elemCoordinates ); return elemPoint; } QDomElement QgsPoint::asGml3( QDomDocument &doc, int precision, const QString &ns, const QgsAbstractGeometry::AxisOrder axisOrder ) const { QDomElement elemPoint = doc.createElementNS( ns, QStringLiteral( "Point" ) ); QDomElement elemPosList = doc.createElementNS( ns, QStringLiteral( "pos" ) ); elemPosList.setAttribute( QStringLiteral( "srsDimension" ), is3D() ? 3 : 2 ); QString strCoordinates; if ( axisOrder == QgsAbstractGeometry::AxisOrder::XY ) strCoordinates = qgsDoubleToString( mX, precision ) + ' ' + qgsDoubleToString( mY, precision ); else strCoordinates = qgsDoubleToString( mY, precision ) + ' ' + qgsDoubleToString( mX, precision ); if ( is3D() ) strCoordinates += ' ' + qgsDoubleToString( mZ, precision ); elemPosList.appendChild( doc.createTextNode( strCoordinates ) ); elemPoint.appendChild( elemPosList ); return elemPoint; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ QString QgsPoint::asJson( int precision ) const { return "{\"type\": \"Point\", \"coordinates\": [" + qgsDoubleToString( mX, precision ) + QLatin1String( ", " ) + qgsDoubleToString( mY, precision ) + QLatin1String( "]}" ); } void QgsPoint::draw( QPainter &p ) const { p.drawRect( QRectF( mX - 2, mY - 2, 4, 4 ) ); } void QgsPoint::clear() { mX = mY = 0.; if ( is3D() ) mZ = 0.; else mZ = std::numeric_limits<double>::quiet_NaN(); if ( isMeasure() ) mM = 0.; else mM = std::numeric_limits<double>::quiet_NaN(); clearCache(); } void QgsPoint::transform( const QgsCoordinateTransform &ct, QgsCoordinateTransform::TransformDirection d, bool transformZ ) { clearCache(); if ( transformZ ) { ct.transformInPlace( mX, mY, mZ, d ); } else { double z = 0.0; ct.transformInPlace( mX, mY, z, d ); } } QgsCoordinateSequence QgsPoint::coordinateSequence() const { QgsCoordinateSequence cs; cs.append( QgsRingSequence() ); cs.back().append( QgsPointSequence() << QgsPoint( *this ) ); return cs; } int QgsPoint::nCoordinates() const { return 1; } int QgsPoint::vertexNumberFromVertexId( QgsVertexId id ) const { if ( id.vertex != 0 ) return -1; else return 0; } QgsAbstractGeometry *QgsPoint::boundary() const { return nullptr; } bool QgsPoint::isValid( QString &, int ) const { return true; } bool QgsPoint::insertVertex( QgsVertexId position, const QgsPoint &vertex ) { Q_UNUSED( position ); Q_UNUSED( vertex ); return false; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ bool QgsPoint::moveVertex( QgsVertexId position, const QgsPoint &newPos ) { Q_UNUSED( position ); clearCache(); mX = newPos.mX; mY = newPos.mY; if ( is3D() && newPos.is3D() ) { mZ = newPos.mZ; } if ( isMeasure() && newPos.isMeasure() ) { mM = newPos.mM; } return true; } bool QgsPoint::deleteVertex( QgsVertexId position ) { Q_UNUSED( position ); return false; } double QgsPoint::closestSegment( const QgsPoint &pt, QgsPoint &segmentPt, QgsVertexId &vertexAfter, int *leftOf, double epsilon ) const { Q_UNUSED( pt ); Q_UNUSED( segmentPt ); Q_UNUSED( vertexAfter ); if ( leftOf ) *leftOf = 0; Q_UNUSED( epsilon ); return -1; // no segments - return error } bool QgsPoint::nextVertex( QgsVertexId &id, QgsPoint &vertex ) const { if ( id.vertex < 0 ) { id.vertex = 0; if ( id.part < 0 ) { id.part = 0; } if ( id.ring < 0 ) { id.ring = 0; } vertex = *this; return true; } else { return false; } } void QgsPoint::adjacentVertices( QgsVertexId, QgsVertexId &previousVertex, QgsVertexId &nextVertex ) const { previousVertex = QgsVertexId(); nextVertex = QgsVertexId(); } double QgsPoint::vertexAngle( QgsVertexId vertex ) const { Q_UNUSED( vertex ); return 0.0; } int QgsPoint::vertexCount( int, int ) const { return 1; } int QgsPoint::ringCount( int ) const { return 1; } int QgsPoint::partCount() const { return 1; } QgsPoint QgsPoint::vertexAt( QgsVertexId ) const { return *this; } QgsPoint *QgsPoint::toCurveType() const { return clone(); } double QgsPoint::segmentLength( QgsVertexId ) const { return 0.0; } /*************************************************************************** * This class is considered CRITICAL and any change MUST be accompanied with * full unit tests. * See details in QEP #17 ****************************************************************************/ bool QgsPoint::addZValue( double zValue ) { if ( QgsWkbTypes::hasZ( mWkbType ) ) return false; mWkbType = QgsWkbTypes::addZ( mWkbType ); mZ = zValue; clearCache(); return true; } bool QgsPoint::addMValue( double mValue ) { if ( QgsWkbTypes::hasM( mWkbType ) ) return false; mWkbType = QgsWkbTypes::addM( mWkbType ); mM = mValue; clearCache(); return true; } void QgsPoint::transform( const QTransform &t, double zTranslate, double zScale, double mTranslate, double mScale ) { clearCache(); qreal x, y; t.map( mX, mY, &x, &y ); mX = x; mY = y; if ( is3D() ) { mZ = mZ * zScale + zTranslate; } if ( isMeasure() ) { mM = mM * mScale + mTranslate; } } bool QgsPoint::dropZValue() { if ( !is3D() ) return false; mWkbType = QgsWkbTypes::dropZ( mWkbType ); mZ = std::numeric_limits<double>::quiet_NaN(); clearCache(); return true; } bool QgsPoint::dropMValue() { if ( !isMeasure() ) return false; mWkbType = QgsWkbTypes::dropM( mWkbType ); mM = std::numeric_limits<double>::quiet_NaN(); clearCache(); return true; } void QgsPoint::swapXy() { std::swap( mX, mY ); clearCache(); } bool QgsPoint::convertTo( QgsWkbTypes::Type type ) { if ( type == mWkbType ) return true; clearCache(); switch ( type ) { case QgsWkbTypes::Point: mZ = std::numeric_limits<double>::quiet_NaN(); mM = std::numeric_limits<double>::quiet_NaN(); mWkbType = type; return true; case QgsWkbTypes::PointZ: case QgsWkbTypes::Point25D: mM = std::numeric_limits<double>::quiet_NaN(); mWkbType = type; return true; case QgsWkbTypes::PointM: mZ = std::numeric_limits<double>::quiet_NaN(); mWkbType = type; return true; case QgsWkbTypes::PointZM: mWkbType = type; return true; default: break; } return false; } void QgsPoint::filterVertices( const std::function<bool ( const QgsPoint & )> & ) { // no meaning for points } void QgsPoint::transformVertices( const std::function<QgsPoint( const QgsPoint & )> &transform ) { QgsPoint res = transform( *this ); mX = res.x(); mY = res.y(); if ( is3D() ) mZ = res.z(); if ( isMeasure() ) mM = res.m(); clearCache(); } double QgsPoint::distance3D( double x, double y, double z ) const { double zDistSquared = 0.0; if ( is3D() || !std::isnan( z ) ) zDistSquared = ( mZ - z ) * ( mZ - z ); return std::sqrt( ( mX - x ) * ( mX - x ) + ( mY - y ) * ( mY - y ) + zDistSquared ); } double QgsPoint::distance3D( const QgsPoint &other ) const { double zDistSquared = 0.0; if ( is3D() || other.is3D() ) zDistSquared = ( mZ - other.z() ) * ( mZ - other.z() ); return std::sqrt( ( mX - other.x() ) * ( mX - other.x() ) + ( mY - other.y() ) * ( mY - other.y() ) + zDistSquared ); } double QgsPoint::distanceSquared3D( double x, double y, double z ) const { double zDistSquared = 0.0; if ( is3D() || !std::isnan( z ) ) zDistSquared = ( mZ - z ) * ( mZ - z ); return ( mX - x ) * ( mX - x ) + ( mY - y ) * ( mY - y ) + zDistSquared; } double QgsPoint::distanceSquared3D( const QgsPoint &other ) const { double zDistSquared = 0.0; if ( is3D() || other.is3D() ) zDistSquared = ( mZ - other.z() ) * ( mZ - other.z() ); return ( mX - other.x() ) * ( mX - other.x() ) + ( mY - other.y() ) * ( mY - other.y() ) + zDistSquared; } double QgsPoint::azimuth( const QgsPoint &other ) const { double dx = other.x() - mX; double dy = other.y() - mY; return ( std::atan2( dx, dy ) * 180.0 / M_PI ); } double QgsPoint::inclination( const QgsPoint &other ) const { double distance = distance3D( other ); if ( qgsDoubleNear( distance, 0.0 ) ) { return 90.0; } double dz = other.z() - mZ; return ( std::acos( dz / distance ) * 180.0 / M_PI ); } QgsPoint QgsPoint::project( double distance, double azimuth, double inclination ) const { QgsWkbTypes::Type pType = mWkbType; double radsXy = azimuth * M_PI / 180.0; double dx = 0.0, dy = 0.0, dz = 0.0; inclination = std::fmod( inclination, 360.0 ); if ( !qgsDoubleNear( inclination, 90.0 ) ) pType = QgsWkbTypes::addZ( pType ); if ( !is3D() && qgsDoubleNear( inclination, 90.0 ) ) { dx = distance * std::sin( radsXy ); dy = distance * std::cos( radsXy ); } else { double radsZ = inclination * M_PI / 180.0; dx = distance * std::sin( radsZ ) * std::sin( radsXy ); dy = distance * std::sin( radsZ ) * std::cos( radsXy ); dz = distance * std::cos( radsZ ); } return QgsPoint( mX + dx, mY + dy, mZ + dz, mM, pType ); } bool QgsPoint::isEmpty() const { return false; } QgsRectangle QgsPoint::boundingBox() const { return QgsRectangle( mX, mY, mX, mY ); } QString QgsPoint::geometryType() const { return QStringLiteral( "Point" ); } int QgsPoint::dimension() const { return 0; } int QgsPoint::childCount() const { return 1; } QgsPoint QgsPoint::childPoint( int index ) const { Q_ASSERT( index == 0 ); return *this; } QgsPoint *QgsPoint::createEmptyWithSameType() const { double nan = std::numeric_limits<double>::quiet_NaN(); return new QgsPoint( nan, nan, nan, nan, mWkbType ); }
25.537271
137
0.584127
[ "geometry", "object", "transform" ]
a83a686bc780f48ee6cf5714d21cb76cea5c8ed7
1,588
hpp
C++
source/quantum-script-extension-socket.hpp
g-stefan/quantum-script-extension-socket
6297acf5fb8a3f0e651b6a8e8f7a8ff6e5f147f2
[ "MIT", "Unlicense" ]
null
null
null
source/quantum-script-extension-socket.hpp
g-stefan/quantum-script-extension-socket
6297acf5fb8a3f0e651b6a8e8f7a8ff6e5f147f2
[ "MIT", "Unlicense" ]
null
null
null
source/quantum-script-extension-socket.hpp
g-stefan/quantum-script-extension-socket
6297acf5fb8a3f0e651b6a8e8f7a8ff6e5f147f2
[ "MIT", "Unlicense" ]
null
null
null
// // Quantum Script Extension Socket // // Copyright (c) 2020-2021 Grigore Stefan <g_stefan@yahoo.com> // Created by Grigore Stefan <g_stefan@yahoo.com> // // MIT License (MIT) <http://opensource.org/licenses/MIT> // #ifndef QUANTUM_SCRIPT_EXTENSION_SOCKET_HPP #define QUANTUM_SCRIPT_EXTENSION_SOCKET_HPP #ifndef QUANTUM_SCRIPT_HPP #include "quantum-script.hpp" #endif #ifndef QUANTUM_SCRIPT_EXTENSION_SOCKET__EXPORT_HPP #include "quantum-script-extension-socket--export.hpp" #endif #ifndef QUANTUM_SCRIPT_EXTENSION_SOCKET_COPYRIGHT_HPP #include "quantum-script-extension-socket-copyright.hpp" #endif #ifndef QUANTUM_SCRIPT_EXTENSION_SOCKET_LICENSE_HPP #include "quantum-script-extension-socket-license.hpp" #endif #ifndef QUANTUM_SCRIPT_EXTENSION_SOCKET_VERSION_HPP #include "quantum-script-extension-socket-version.hpp" #endif namespace Quantum { namespace Script { namespace Extension { namespace Socket { using namespace Quantum::Script; class SocketContext: public Object { XYO_DISALLOW_COPY_ASSIGN_MOVE(SocketContext); public: Symbol symbolFunctionSocket; TPointerX<Prototype> prototypeSocket; QUANTUM_SCRIPT_EXTENSION_SOCKET_EXPORT SocketContext(); }; QUANTUM_SCRIPT_EXTENSION_SOCKET_EXPORT SocketContext *getContext(); QUANTUM_SCRIPT_EXTENSION_SOCKET_EXPORT void initExecutive(Executive *executive, void *extensionId); QUANTUM_SCRIPT_EXTENSION_SOCKET_EXPORT void registerInternalExtension(Executive *executive); }; }; }; }; #endif
25.206349
104
0.758816
[ "object" ]
a83abacf6a726b893250af9fa1980dcffba54bdd
8,568
cpp
C++
core/src/Module/Control/stubs/Stub_generic_status_stableResults.cpp
ConnectedVision/ConnectedVision
210e49205ca50f73584178b6cedb298a74cea798
[ "MIT" ]
3
2017-08-12T18:14:00.000Z
2018-11-19T09:15:35.000Z
core/src/Module/Control/stubs/Stub_generic_status_stableResults.cpp
ConnectedVision/ConnectedVision
210e49205ca50f73584178b6cedb298a74cea798
[ "MIT" ]
null
null
null
core/src/Module/Control/stubs/Stub_generic_status_stableResults.cpp
ConnectedVision/ConnectedVision
210e49205ca50f73584178b6cedb298a74cea798
[ "MIT" ]
1
2018-11-09T15:57:13.000Z
2018-11-09T15:57:13.000Z
/** * Connected Vision - https://github.com/ConnectedVision * MIT License */ // auto-generated header by CodeFromTemplate - Connected Vision - https://github.com/ConnectedVision // CodeFromTemplate Version: 0.3 alpha // stubs/Stub_generic_status_stableResults.cpp // NEVER TOUCH this file! #include <utility> #include <rapidjson/document.h> #include <rapidjson/prettywriter.h> #include <rapidjson/stringbuffer.h> #include <rapidjson/error/en.h> #include <boost/make_shared.hpp> #include "Stub_generic_status_stableResults.h" #include "../Class_generic_status_stableResults.h" // --> Do NOT EDIT <-- namespace ConnectedVision { // --> Do NOT EDIT <-- /* copy constructors */ Stub_generic_status_stableResults::Stub_generic_status_stableResults(const Stub_generic_status_stableResults& other) { // TODO: other.readLock // pinID if ( other.pinID ) pinID = boost::make_shared<std::string>(*other.pinID); // indexStart indexStart = other.indexStart; // indexEnd indexEnd = other.indexEnd; // timestampStart timestampStart = other.timestampStart; // timestampEnd timestampEnd = other.timestampEnd; } // --> Do NOT EDIT <-- /* copy assignment operator */ Stub_generic_status_stableResults& Stub_generic_status_stableResults::operator =(const Stub_generic_status_stableResults& other) { Stub_generic_status_stableResults tmp(other); // re-use copy-constructor *this = std::move(tmp); // re-use move-assignment return *this; } // --> Do NOT EDIT <-- /* mopy assignment operator */ Stub_generic_status_stableResults& Stub_generic_status_stableResults::operator =(Stub_generic_status_stableResults&& other) noexcept { // pinID std::swap(pinID, other.pinID); // indexStart std::swap(indexStart, other.indexStart); // indexEnd std::swap(indexEnd, other.indexEnd); // timestampStart std::swap(timestampStart, other.timestampStart); // timestampEnd std::swap(timestampEnd, other.timestampEnd); return *this; } // --> Do NOT EDIT <-- /* default constructors */ Stub_generic_status_stableResults::Stub_generic_status_stableResults() { clear(); } // --> Do NOT EDIT <-- Stub_generic_status_stableResults::Stub_generic_status_stableResults(const rapidjson::Value& value) { clear(); parseJson( value ); } // --> Do NOT EDIT <-- Stub_generic_status_stableResults::Stub_generic_status_stableResults(const std::string& str) { clear(); parseJson( str ); } // --> Do NOT EDIT <-- Stub_generic_status_stableResults::~Stub_generic_status_stableResults() { } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::clear() { this->pinID.reset( new std::string("") ); this->indexStart = static_cast<int64_t>(0); this->indexEnd = static_cast<int64_t>(0); this->timestampStart = 0; this->timestampEnd = 0; } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::parseJson(const char *str) { // ignore empty data if ( str[0] == 0 ) return; // parse data rapidjson::Document document; if (document.Parse<0>(str).HasParseError()) { std::string context; size_t off = document.GetErrorOffset(); size_t i, line_start = 0, context_start = 0; int num_line = 1; for ( i = 0; (i < off) && str[i]; i++ ) { if ( str[i] == '\n' ) { line_start = i+1; context_start = i+1; num_line++; } if ( str[i] == '{' || str[i] == '[' ) { context_start = i; } } for ( i = context_start; str[i]; i++ ) { if ( str[i] == '\n' || str[i] == '\r' ) break; context += str[i]; if ( str[i] == '}' || str[i] == ']' ) break; } throw ConnectedVision::runtime_error( std::string("parse error of JSON Object: ") + rapidjson::GetParseError_En(document.GetParseError()) + std::string(" at line ") + ConnectedVision::intToStr( num_line ) + ": " + context); } parseJson(document); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::parseJson(const rapidjson::Value& value) { clear(); if ( !value.IsObject() ) throw ConnectedVision::runtime_error( "no JSON Object"); // pinID if ((value.HasMember("pinID")) && value["pinID"].IsString()) { set_pinID( boost::shared_ptr<std::string>( new std::string( value["pinID"].GetString() ) ) ); } else throw ConnectedVision::runtime_error( "required member is missing: 'pinID'"); // indexStart if ((value.HasMember("indexStart")) && value["indexStart"].IsInt64()) { set_indexStart( value["indexStart"].GetInt64() ); } // indexEnd if ((value.HasMember("indexEnd")) && value["indexEnd"].IsInt64()) { set_indexEnd( value["indexEnd"].GetInt64() ); } else throw ConnectedVision::runtime_error( "required member is missing: 'indexEnd'"); // timestampStart if ((value.HasMember("timestampStart")) && value["timestampStart"].IsInt64()) { set_timestampStart( value["timestampStart"].GetInt64() ); } // timestampEnd if ((value.HasMember("timestampEnd")) && value["timestampEnd"].IsInt64()) { set_timestampEnd( value["timestampEnd"].GetInt64() ); } else throw ConnectedVision::runtime_error( "required member is missing: 'timestampEnd'"); } // --> Do NOT EDIT <-- std::string Stub_generic_status_stableResults::toJsonStr() const { rapidjson::StringBuffer s; rapidjson::Document doc; doc.SetObject(); this->toJson(doc, doc.GetAllocator()); rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(s); doc.Accept(writer); return std::string(s.GetString()); } // --> Do NOT EDIT <-- std::string Stub_generic_status_stableResults::toJson() const { rapidjson::StringBuffer s; rapidjson::Document doc; doc.SetObject(); this->toJson(doc, doc.GetAllocator()); rapidjson::Writer<rapidjson::StringBuffer> writer(s); doc.Accept(writer); return std::string(s.GetString()); } // --> Do NOT EDIT <-- rapidjson::Value& Stub_generic_status_stableResults::toJson(rapidjson::Value& node, rapidjson::Value::AllocatorType& allocator) const { { // pinID node.AddMember("pinID", rapidjson::Value().SetString( get_pinID()->c_str(), allocator), allocator); } { // indexStart node.AddMember("indexStart", rapidjson::Value().SetInt64( get_indexStart() ), allocator); } { // indexEnd node.AddMember("indexEnd", rapidjson::Value().SetInt64( get_indexEnd() ), allocator); } { // timestampStart node.AddMember("timestampStart", rapidjson::Value().SetInt64( get_timestampStart() ), allocator); } { // timestampEnd node.AddMember("timestampEnd", rapidjson::Value().SetInt64( get_timestampEnd() ), allocator); } return node; } // --> Do NOT EDIT <-- boost::shared_ptr<std::string> Stub_generic_status_stableResults::get_pinID() const { return( this->pinID ); } // --> Do NOT EDIT <-- const boost::shared_ptr<const std::string> Stub_generic_status_stableResults::getconst_pinID() const { return( boost::static_pointer_cast<const std::string>(this->pinID) ); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::set_pinID(boost::shared_ptr<std::string> value) { this->pinID = value; } // --> Do NOT EDIT <-- int64_t Stub_generic_status_stableResults::get_indexStart() const { return( this->indexStart ); } // --> Do NOT EDIT <-- const int64_t Stub_generic_status_stableResults::getconst_indexStart() const { return( this->indexStart ); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::set_indexStart(int64_t value) { this->indexStart = value; } // --> Do NOT EDIT <-- int64_t Stub_generic_status_stableResults::get_indexEnd() const { return( this->indexEnd ); } // --> Do NOT EDIT <-- const int64_t Stub_generic_status_stableResults::getconst_indexEnd() const { return( this->indexEnd ); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::set_indexEnd(int64_t value) { this->indexEnd = value; } // --> Do NOT EDIT <-- ConnectedVision::timestamp_t Stub_generic_status_stableResults::get_timestampStart() const { return( this->timestampStart ); } // --> Do NOT EDIT <-- const ConnectedVision::timestamp_t Stub_generic_status_stableResults::getconst_timestampStart() const { return( this->timestampStart ); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::set_timestampStart(ConnectedVision::timestamp_t value) { this->timestampStart = value; } // --> Do NOT EDIT <-- ConnectedVision::timestamp_t Stub_generic_status_stableResults::get_timestampEnd() const { return( this->timestampEnd ); } // --> Do NOT EDIT <-- const ConnectedVision::timestamp_t Stub_generic_status_stableResults::getconst_timestampEnd() const { return( this->timestampEnd ); } // --> Do NOT EDIT <-- void Stub_generic_status_stableResults::set_timestampEnd(ConnectedVision::timestamp_t value) { this->timestampEnd = value; } } // namespace ConnectedVision
26.775
225
0.710317
[ "object" ]
a83c8974d8c31e1b0a5df049a70a906232834c19
1,472
hpp
C++
2nd/ass/parser.hpp
keitaroskmt/cpuex2020_1
33cdc033a6184286232f4ce7729030708213b6f8
[ "BSD-3-Clause-Attribution", "FSFAP" ]
null
null
null
2nd/ass/parser.hpp
keitaroskmt/cpuex2020_1
33cdc033a6184286232f4ce7729030708213b6f8
[ "BSD-3-Clause-Attribution", "FSFAP" ]
null
null
null
2nd/ass/parser.hpp
keitaroskmt/cpuex2020_1
33cdc033a6184286232f4ce7729030708213b6f8
[ "BSD-3-Clause-Attribution", "FSFAP" ]
3
2020-10-08T04:16:58.000Z
2021-10-11T13:19:30.000Z
#pragma once #include <iostream> #include <regex> #include <map> #include <fstream> #include <string> #include <cassert> using namespace std; class Parser { public: Parser(string); int parse_file(); // void preprocess_file(fstream &); void print_label(); void print_code(); int total_num; // vector<string> preprocess_buffer; // vector<string> buffer_tmp; map<string, int> label_map; map<int, vector<string>> code_map; protected: string file_name; int data_num; int current_num; void parse_code(string); int get_linenum_by_label(string); }; const map<string, string> inst_format = { {"add", "R"}, {"sub", "R"}, {"and", "R"}, {"or", "R"}, {"nor", "R"}, {"sll", "R"}, {"srl", "R"}, {"jr", "R"}, {"jalr", "R"}, {"j", "J"}, {"jal", "J"}, {"beq", "I"}, {"bne", "I"}, {"blt", "I"}, {"addi", "I"}, {"ori", "I"}, {"lui", "I"}, {"lw", "I"}, {"sw", "I"}, {"in", "I"}, {"fin", "I"}, {"out", "I"}, {"beqi", "II"}, {"blti", "II"}, {"fadd", "FR"}, {"fsub", "FR"}, {"fmul", "FR"}, {"fdiv", "FR"}, {"fneg", "FR"}, {"fabs", "FR"}, {"fsqrt", "FR"}, {"fmov", "FR"}, {"fbeq", "FI"}, {"fbne", "FI"}, {"fblt", "FI"}, {"flw", "FI"}, {"fsw", "FI"}, {"ftoi", "FI"}, {"itof", "FI"}, {"floor", "FI"}, };
19.116883
44
0.429348
[ "vector" ]
a841a90ada3ab99d516fd1ee0330a06a034eb347
599
cpp
C++
codelib/sorting/heapsort.cpp
TissueRoll/admu-progvar-notebook
efd1c48872d40aeabe2b03af7b986bb831c062b1
[ "MIT" ]
null
null
null
codelib/sorting/heapsort.cpp
TissueRoll/admu-progvar-notebook
efd1c48872d40aeabe2b03af7b986bb831c062b1
[ "MIT" ]
null
null
null
codelib/sorting/heapsort.cpp
TissueRoll/admu-progvar-notebook
efd1c48872d40aeabe2b03af7b986bb831c062b1
[ "MIT" ]
null
null
null
#include <iostream> #include <vector> void sink(std::vector<int> &ar, int k, int N) { while (2*k + 1 < N) { int j = 2*k + 1; if (j + 1 < N and ar[j] < ar[j+1]) j++; if (ar[k] > ar[j]) break; std::swap(ar[k], ar[j]); k = j; } } void heapsort(std::vector<int> &ar, int N) { for (int k = (N-1) / 2; k >= 0; --k) sink(ar, k, N-1); while (N) { std::swap(ar[0], ar[--N]); sink(ar, 0, N); } } int main() { std::vector<int> ar = {5, 2, 6, 1, 7, 4, 3}; heapsort(ar, 7); for (int x : ar) std::cout<<x<<" "; std::cout<<"\n"; return 0; }
17.114286
47
0.447412
[ "vector" ]
61b58ab5ece8a7bf5ac37b137978492d9bc31fc9
488
hpp
C++
src/systems/entity_component_system/components/model_component.hpp
jotask/SurvivalVoxel
d80347309b893750f79d86f9fc00aafd3804783d
[ "MIT" ]
null
null
null
src/systems/entity_component_system/components/model_component.hpp
jotask/SurvivalVoxel
d80347309b893750f79d86f9fc00aafd3804783d
[ "MIT" ]
null
null
null
src/systems/entity_component_system/components/model_component.hpp
jotask/SurvivalVoxel
d80347309b893750f79d86f9fc00aafd3804783d
[ "MIT" ]
null
null
null
#pragma once #include "systems/entity_component_system/components/component.hpp" #include <glad/glad.h> #include <glm/glm.hpp> #include <string> #include <vector> #include <map> namespace aiko { class Entity; class Model; class ModelComponent : public Component { public: ModelComponent(Entity* entity, Model* model); virtual ~ModelComponent() = default; virtual void render() override; private: Model* m_model; }; }
14.787879
67
0.653689
[ "render", "vector", "model" ]
61b97fe5b907119a22e2891d3a04b978e4fdf347
783
cpp
C++
LeetCode-27.cpp
therainmak3r/dirty-laundry
39e295e9390b62830bef53282cdcb63716efac45
[ "MIT" ]
20
2015-12-22T14:14:59.000Z
2019-10-25T12:14:23.000Z
LeetCode-27.cpp
therainmak3r/dirty-laundry
39e295e9390b62830bef53282cdcb63716efac45
[ "MIT" ]
null
null
null
LeetCode-27.cpp
therainmak3r/dirty-laundry
39e295e9390b62830bef53282cdcb63716efac45
[ "MIT" ]
2
2016-06-27T13:34:08.000Z
2018-10-02T20:36:54.000Z
#include <iostream> #include <vector> #include <cstring> #include <cmath> #include <queue> using namespace std; class Solution { public: int removeElement(int A[], int n, int elem) { queue<int> q; for (int i = 0; i < n; i++) { if (A[i] == elem) q.push(i); } int i = n - 1; while (q.empty() == 0) { if (i == -1 || i < q.front()) break; if (A[i] == elem) i--; else { A[q.front()] = A[i]; q.pop(); i--; } } return i + 1; } }; int main() { int a[] = {4, 5}; Solution obj; int ans = obj.removeElement(a, 2, 5); cout << "Ans is " << ans << endl; return 0; }
17.4
48
0.393359
[ "vector" ]
61bb736b4af2da2537f37f0fe1b56889eeae62d7
3,472
hpp
C++
ThirdParty-mod/java2cpp/android/util/FloatMath.hpp
kakashidinho/HQEngine
8125b290afa7c62db6cc6eac14e964d8138c7fd0
[ "MIT" ]
1
2019-04-03T01:53:28.000Z
2019-04-03T01:53:28.000Z
ThirdParty-mod/java2cpp/android/util/FloatMath.hpp
kakashidinho/HQEngine
8125b290afa7c62db6cc6eac14e964d8138c7fd0
[ "MIT" ]
null
null
null
ThirdParty-mod/java2cpp/android/util/FloatMath.hpp
kakashidinho/HQEngine
8125b290afa7c62db6cc6eac14e964d8138c7fd0
[ "MIT" ]
null
null
null
/*================================================================================ code generated by: java2cpp author: Zoran Angelov, mailto://baldzar@gmail.com class: android.util.FloatMath ================================================================================*/ #ifndef J2CPP_INCLUDE_IMPLEMENTATION #ifndef J2CPP_ANDROID_UTIL_FLOATMATH_HPP_DECL #define J2CPP_ANDROID_UTIL_FLOATMATH_HPP_DECL namespace j2cpp { namespace java { namespace lang { class Object; } } } #include <java/lang/Object.hpp> namespace j2cpp { namespace android { namespace util { class FloatMath; class FloatMath : public object<FloatMath> { public: J2CPP_DECLARE_CLASS J2CPP_DECLARE_METHOD(0) J2CPP_DECLARE_METHOD(1) J2CPP_DECLARE_METHOD(2) J2CPP_DECLARE_METHOD(3) J2CPP_DECLARE_METHOD(4) J2CPP_DECLARE_METHOD(5) explicit FloatMath(jobject jobj) : object<FloatMath>(jobj) { } operator local_ref<java::lang::Object>() const; static jfloat floor(jfloat); static jfloat ceil(jfloat); static jfloat sin(jfloat); static jfloat cos(jfloat); static jfloat sqrt(jfloat); }; //class FloatMath } //namespace util } //namespace android } //namespace j2cpp #endif //J2CPP_ANDROID_UTIL_FLOATMATH_HPP_DECL #else //J2CPP_INCLUDE_IMPLEMENTATION #ifndef J2CPP_ANDROID_UTIL_FLOATMATH_HPP_IMPL #define J2CPP_ANDROID_UTIL_FLOATMATH_HPP_IMPL namespace j2cpp { android::util::FloatMath::operator local_ref<java::lang::Object>() const { return local_ref<java::lang::Object>(get_jobject()); } jfloat android::util::FloatMath::floor(jfloat a0) { return call_static_method< android::util::FloatMath::J2CPP_CLASS_NAME, android::util::FloatMath::J2CPP_METHOD_NAME(1), android::util::FloatMath::J2CPP_METHOD_SIGNATURE(1), jfloat >(a0); } jfloat android::util::FloatMath::ceil(jfloat a0) { return call_static_method< android::util::FloatMath::J2CPP_CLASS_NAME, android::util::FloatMath::J2CPP_METHOD_NAME(2), android::util::FloatMath::J2CPP_METHOD_SIGNATURE(2), jfloat >(a0); } jfloat android::util::FloatMath::sin(jfloat a0) { return call_static_method< android::util::FloatMath::J2CPP_CLASS_NAME, android::util::FloatMath::J2CPP_METHOD_NAME(3), android::util::FloatMath::J2CPP_METHOD_SIGNATURE(3), jfloat >(a0); } jfloat android::util::FloatMath::cos(jfloat a0) { return call_static_method< android::util::FloatMath::J2CPP_CLASS_NAME, android::util::FloatMath::J2CPP_METHOD_NAME(4), android::util::FloatMath::J2CPP_METHOD_SIGNATURE(4), jfloat >(a0); } jfloat android::util::FloatMath::sqrt(jfloat a0) { return call_static_method< android::util::FloatMath::J2CPP_CLASS_NAME, android::util::FloatMath::J2CPP_METHOD_NAME(5), android::util::FloatMath::J2CPP_METHOD_SIGNATURE(5), jfloat >(a0); } J2CPP_DEFINE_CLASS(android::util::FloatMath,"android/util/FloatMath") J2CPP_DEFINE_METHOD(android::util::FloatMath,0,"<init>","()V") J2CPP_DEFINE_METHOD(android::util::FloatMath,1,"floor","(F)F") J2CPP_DEFINE_METHOD(android::util::FloatMath,2,"ceil","(F)F") J2CPP_DEFINE_METHOD(android::util::FloatMath,3,"sin","(F)F") J2CPP_DEFINE_METHOD(android::util::FloatMath,4,"cos","(F)F") J2CPP_DEFINE_METHOD(android::util::FloatMath,5,"sqrt","(F)F") } //namespace j2cpp #endif //J2CPP_ANDROID_UTIL_FLOATMATH_HPP_IMPL #endif //J2CPP_INCLUDE_IMPLEMENTATION
24.8
83
0.690956
[ "object" ]
61bd8727139e0463f061786cae7ad996ebc6d1eb
17,838
cpp
C++
src/imaging/ossimMaxMosaic.cpp
martidi/ossim
44268fa9d7fc5a3038642e702e85ccd339a4ff9f
[ "MIT" ]
null
null
null
src/imaging/ossimMaxMosaic.cpp
martidi/ossim
44268fa9d7fc5a3038642e702e85ccd339a4ff9f
[ "MIT" ]
null
null
null
src/imaging/ossimMaxMosaic.cpp
martidi/ossim
44268fa9d7fc5a3038642e702e85ccd339a4ff9f
[ "MIT" ]
1
2018-10-11T11:36:16.000Z
2018-10-11T11:36:16.000Z
//******************************************************************* // Copyright (C) 2005 SANZ Inc. // // License: LGPL // // See LICENSE.txt file in the top level directory for more details. // // Author: Kenneth Melero (kmelero@sanz.com) // // Description: This combiner is designed to "float" the maximum pixel value // of all inputs to top of the mosaic output. // //************************************************************************* // $Id: ossimMaxMosaic.cpp 23257 2015-04-13 16:57:14Z dburken $ #include <ossim/imaging/ossimMaxMosaic.h> #include <ossim/imaging/ossimImageData.h> #include <ossim/imaging/ossimImageDataFactory.h> #include <ossim/base/ossimTrace.h> static const ossimTrace traceDebug("ossimMaxMosaic:debug"); using namespace std; RTTI_DEF1(ossimMaxMosaic, "ossimMaxMosaic", ossimImageCombiner) ossimMaxMosaic::ossimMaxMosaic() :ossimImageCombiner(), theTile(NULL) { } ossimMaxMosaic::ossimMaxMosaic(ossimConnectableObject::ConnectableObjectList& inputSources) : ossimImageCombiner(inputSources), theTile(NULL) { } ossimMaxMosaic::~ossimMaxMosaic() { } ossimRefPtr<ossimImageData> ossimMaxMosaic::getTile( const ossimIrect& tileRect, ossim_uint32 resLevel) { long size = getNumberOfInputs(); ossim_uint32 layerIdx = 0; // If there is only one in the mosaic then just return it. if(size == 1) { return getNextTile(layerIdx, 0, tileRect, resLevel); } ossimIpt origin = tileRect.ul(); ossim_uint32 w = tileRect.width(); ossim_uint32 h = tileRect.height(); if(!theTile.valid()) { // First time through... allocate(); // If we still don't have a buffer then we will leave. if(!theTile.valid()) { return ossimRefPtr<ossimImageData>(); } } ossim_uint32 tileW = theTile->getWidth(); ossim_uint32 tileH = theTile->getHeight(); if((w != tileW)|| (h != tileH)) { theTile->setWidth(w); theTile->setHeight(h); if((w*h)!=(tileW*tileH)) { theTile->initialize(); } } theTile->setOrigin(origin); //--- // General Note: // // Note: I will not check for disabled or enabled since we have // no clear way to handle this within a mosaic. The default will be // to do a simple a A over B type mosaic. Derived classes should // check for the enabled and disabled and always // use this default implementation if they are disabled. //--- theTile->setOrigin(origin); theTile->makeBlank(); switch(theTile->getScalarType()) { case OSSIM_UCHAR: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_uint8>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_uint8>(0), tileRect, resLevel); } } case OSSIM_SINT8: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_sint8>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_sint8>(0), tileRect, resLevel); } } case OSSIM_FLOAT: case OSSIM_NORMALIZED_FLOAT: { if(!hasDifferentInputs()) { return combine(static_cast<float>(0), tileRect, resLevel); } else { return combineNorm(static_cast<float>(0), tileRect, resLevel); } } case OSSIM_USHORT16: case OSSIM_USHORT11: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_uint16>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_uint16>(0), tileRect, resLevel); } } case OSSIM_SSHORT16: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_sint16>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_sint16>(0), tileRect, resLevel); } } case OSSIM_SINT32: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_sint32>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_sint32>(0), tileRect, resLevel); } } case OSSIM_UINT32: { if(!hasDifferentInputs()) { return combine(static_cast<ossim_uint32>(0), tileRect, resLevel); } else { return combineNorm(static_cast<ossim_uint32>(0), tileRect, resLevel); } } case OSSIM_DOUBLE: case OSSIM_NORMALIZED_DOUBLE: { if(!hasDifferentInputs()) { return combine(static_cast<double>(0), tileRect, resLevel); } else { return combineNorm(static_cast<double>(0), tileRect, resLevel); } } case OSSIM_SCALAR_UNKNOWN: default: { ossimNotify(ossimNotifyLevel_WARN) << "Scalar type = " << theTile->getScalarType() << " Not supported by ossimMaxMosaic" << endl; } } return ossimRefPtr<ossimImageData>(); } void ossimMaxMosaic::initialize() { ossimImageCombiner::initialize(); theTile = NULL; } void ossimMaxMosaic::allocate() { theTile = NULL; if( (getNumberOfInputs() > 0) && getInput(0) ) { theTile = ossimImageDataFactory::instance()->create(this, this); theTile->initialize(); } } bool ossimMaxMosaic::saveState(ossimKeywordlist& kwl, const char* prefix)const { return ossimImageCombiner::saveState(kwl, prefix); } bool ossimMaxMosaic::loadState(const ossimKeywordlist& kwl, const char* prefix) { return ossimImageCombiner::loadState(kwl, prefix); } template <class T> ossimRefPtr<ossimImageData> ossimMaxMosaic::combineNorm( T,// dummy template variable const ossimIrect& tileRect, ossim_uint32 resLevel) { ossim_uint32 layerIdx = 0; ossimRefPtr<ossimImageData> destination = theTile; ossimRefPtr<ossimImageData> currentImageData = getNextNormTile(layerIdx, 0, tileRect, resLevel); if(!currentImageData) { return currentImageData; } std::vector<float*> srcBands(theLargestNumberOfInputBands); std::vector<float> srcBandsNullPix(theLargestNumberOfInputBands); std::vector<T*> destBands(theLargestNumberOfInputBands); std::vector<T> destBandsNullPix(theLargestNumberOfInputBands); std::vector<T> destBandsMinPix(theLargestNumberOfInputBands); std::vector<T> destBandsMaxPix(theLargestNumberOfInputBands); //float** srcBands = new float*[theLargestNumberOfInputBands]; //float* srcBandsNullPix = new float[theLargestNumberOfInputBands]; //T** destBands = new T*[theLargestNumberOfInputBands]; //T* destBandsNullPix = new T[theLargestNumberOfInputBands]; //T* destBandsMinPix = new T[theLargestNumberOfInputBands]; //T* destBandsMaxPix = new T[theLargestNumberOfInputBands]; ossim_uint32 band; ossim_uint32 upperBound = destination->getWidth()*destination->getHeight(); ossim_uint32 minNumberOfBands = currentImageData->getNumberOfBands(); for(band = 0; band < minNumberOfBands; ++band) { srcBands[band] = static_cast<float*>(currentImageData->getBuf(band)); srcBandsNullPix[band] = static_cast<float>(currentImageData->getNullPix(band)); destBands[band] = static_cast<T*>(theTile->getBuf(band)); destBandsNullPix[band] = static_cast<T>(theTile->getNullPix(band)); destBandsMinPix[band] = static_cast<T>(theTile->getMinPix(band)); destBandsMaxPix[band] = static_cast<T>(theTile->getMaxPix(band)); } // if the src is smaller than the destination in number // of bands we will just duplicate the last band. for(;band < theLargestNumberOfInputBands; ++band) { srcBands[band] = static_cast<float*>(srcBands[minNumberOfBands - 1]); srcBandsNullPix[band] = static_cast<float>(currentImageData->getNullPix(minNumberOfBands - 1)); destBands[band] = static_cast<T*>(theTile->getBuf(band)); destBandsNullPix[band] = static_cast<T>(theTile->getNullPix(band)); destBandsMinPix[band] = static_cast<T>(theTile->getMinPix(band)); destBandsMaxPix[band] = static_cast<T>(theTile->getMaxPix(band)); } // most of the time we will not overlap so let's // copy the first tile into destination and check later. // ossim_uint32 tempBandIdx = 0; for(band = 0; band < theTile->getNumberOfBands();++band) { if(band < currentImageData->getNumberOfBands()) { theTile->copyNormalizedBufferToTile(band, (float*)currentImageData->getBuf(band)); ++tempBandIdx; } else { if(tempBandIdx) { theTile->copyNormalizedBufferToTile(band, (float*)currentImageData->getBuf(tempBandIdx-1)); } } } destination->validate(); currentImageData = getNextNormTile(layerIdx, tileRect, resLevel); while(currentImageData.valid()) { ossim_uint32 minNumberOfBands = currentImageData->getNumberOfBands(); ossimDataObjectStatus currentStatus = currentImageData->getDataObjectStatus(); ossimDataObjectStatus destinationStatus = destination->getDataObjectStatus(); if(destinationStatus == OSSIM_FULL) { return destination; } for(band = 0; band < minNumberOfBands; ++band) { srcBands[band] = static_cast<float*>(currentImageData->getBuf(band)); srcBandsNullPix[band] = static_cast<float>(currentImageData->getNullPix(band)); } // if the src is smaller than the destination in number // of bands we will just duplicate the last band. for(;band < theLargestNumberOfInputBands; ++band) { srcBands[band] = srcBands[minNumberOfBands - 1]; srcBandsNullPix[band] = static_cast<T>(currentImageData->getNullPix(minNumberOfBands - 1)); } if((destinationStatus == OSSIM_EMPTY)&& (currentStatus != OSSIM_EMPTY)&& (currentStatus != OSSIM_NULL)) { ossim_uint32 upperBound = destination->getWidth()*destination->getHeight(); for(band=0; band < theLargestNumberOfInputBands; ++band) { float delta = destBandsMaxPix[band] - destBandsMinPix[band]; float minP = destBandsMinPix[band]; for(ossim_uint32 offset = 0; offset < upperBound; ++offset) { destBands[band][offset] = (T)( minP + delta*srcBands[band][offset]); } } } else if((destinationStatus == OSSIM_PARTIAL)&& (currentStatus != OSSIM_EMPTY)&& (currentStatus != OSSIM_NULL)) { for(band = 0; band < theLargestNumberOfInputBands; ++band) { float delta = destBandsMaxPix[band] - destBandsMinPix[band]; float minP = destBandsMinPix[band]; for(ossim_uint32 offset = 0; offset < upperBound; ++offset) { if(destBands[band][offset] == destBandsNullPix[band]) { destBands[band][offset] = (T)(minP + delta*srcBands[band][offset]); } } } } destination->validate(); currentImageData = getNextNormTile(layerIdx, tileRect, resLevel); } // Cleanup... // delete [] srcBands; // delete [] srcBandsNullPix; // delete [] destBands; // delete [] destBandsNullPix; // delete [] destBandsMinPix; // delete [] destBandsMaxPix; return destination; } template <class T> ossimRefPtr<ossimImageData> ossimMaxMosaic::combine( T,// dummy template variable const ossimIrect& tileRect, ossim_uint32 resLevel) { ossim_uint32 layerIdx = 0; ossimRefPtr<ossimImageData> destination = theTile; ossimRefPtr<ossimImageData> currentImageData = getNextTile(layerIdx, 0, tileRect, resLevel); if(!currentImageData) { return currentImageData; } T** srcBands = new T*[theLargestNumberOfInputBands]; T* srcBandsNullPix = new T[theLargestNumberOfInputBands]; T** destBands = new T*[theLargestNumberOfInputBands]; T* destBandsNullPix = new T[theLargestNumberOfInputBands]; ossim_uint32 band; ossim_uint32 upperBound = destination->getWidth()*destination->getHeight(); ossim_uint32 bandIndex = 0; ossim_uint32 offset=0; ossim_uint32 minNumberOfBands = currentImageData->getNumberOfBands(); for(band = 0; band < minNumberOfBands; ++band) { srcBands[band] = static_cast<T*>(currentImageData->getBuf(band)); destBands[band] = static_cast<T*>(theTile->getBuf(band)); srcBandsNullPix[band] = static_cast<T>(currentImageData->getNullPix(band)); destBandsNullPix[band] = static_cast<T>(theTile->getNullPix(band)); } // if the src is smaller than the destination in number // of bands we will just duplicate the last band. for(;band < theLargestNumberOfInputBands; ++band) { srcBands[band] = static_cast<T*>(srcBands[minNumberOfBands - 1]); destBands[band] = static_cast<T*>(theTile->getBuf(band)); srcBandsNullPix[band] = static_cast<T>(currentImageData->getNullPix(minNumberOfBands - 1)); destBandsNullPix[band] = static_cast<T>(theTile->getNullPix(band)); } // most of the time we will not overlap so let's // copy the first tile into destination and check later. // for(band = 0; band < theTile->getNumberOfBands();++band) { T* destBand = destBands[band]; T* srcBand = srcBands[band]; if(destBand&&srcBand) { for(offset = 0; offset < upperBound;++offset) { *destBand = *srcBand; ++srcBand; ++destBand; } } } destination->setDataObjectStatus(currentImageData->getDataObjectStatus()); currentImageData = getNextTile(layerIdx, tileRect, resLevel); while(currentImageData.valid()) { ossim_uint32 minNumberOfBands = currentImageData->getNumberOfBands(); ossimDataObjectStatus currentStatus = currentImageData->getDataObjectStatus(); ossimDataObjectStatus destinationStatus = destination->getDataObjectStatus(); for(band = 0; band < minNumberOfBands; ++band) { srcBands[band] = static_cast<T*>(currentImageData->getBuf(band)); srcBandsNullPix[band] = static_cast<T>(currentImageData->getNullPix(band)); } // if the src is smaller than the destination in number // of bands we will just duplicate the last band. for(;band < theLargestNumberOfInputBands; ++band) { srcBands[band] = srcBands[minNumberOfBands - 1]; srcBandsNullPix[band] = static_cast<T>(currentImageData->getNullPix(minNumberOfBands - 1)); } if((destinationStatus == OSSIM_PARTIAL)&& (currentStatus != OSSIM_EMPTY)&& (currentStatus != OSSIM_NULL)) { for(bandIndex = 0; bandIndex < theLargestNumberOfInputBands; ++bandIndex) { for(ossim_uint32 offset = 0; offset < upperBound; ++offset) { if(srcBands[bandIndex][offset] > destBands[bandIndex][offset]) { destBands[bandIndex][offset] = srcBands[bandIndex][offset]; } } } } else { ossim_uint32 upperBound = destination->getWidth()*destination->getHeight(); for(ossim_uint32 band=0; band < theLargestNumberOfInputBands; ++band) { for(ossim_uint32 offset = 0; offset < upperBound; ++offset) { if(srcBands[band][offset] > destBands[band][offset]) { destBands[band][offset] = srcBands[band][offset]; } } } } destination->validate(); currentImageData = getNextTile(layerIdx,tileRect, resLevel); } // Cleanup... delete [] srcBands; delete [] srcBandsNullPix; delete [] destBands; delete [] destBandsNullPix; return destination; } ossimString ossimMaxMosaic::getShortName()const { return ossimString("ossimMaxMosaic"); } ossimString ossimMaxMosaic::getLongName()const { return ossimString("Max Mosaic"); } ossimString ossimMaxMosaic::getDescription()const { return ossimString("Combiner which puts maximum dn value on image."); }
32.082734
101
0.581455
[ "vector" ]
61cb0f0ec592c236a6b1afe2ae3008357dd9baba
1,027
cpp
C++
topic_wise/stacksAndqueues/ContainerWithMostWater.cpp
archit-1997/LeetCode
7c0f74da0836d3b0855f09bae8960f81a384f3f3
[ "MIT" ]
1
2021-01-27T16:37:36.000Z
2021-01-27T16:37:36.000Z
topic_wise/stacksAndqueues/ContainerWithMostWater.cpp
archit-1997/LeetCode
7c0f74da0836d3b0855f09bae8960f81a384f3f3
[ "MIT" ]
null
null
null
topic_wise/stacksAndqueues/ContainerWithMostWater.cpp
archit-1997/LeetCode
7c0f74da0836d3b0855f09bae8960f81a384f3f3
[ "MIT" ]
null
null
null
/*11. Container With Most Water Medium 2520 393 Favorite Share Given n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water. Note: You may not slant the container and n is at least 2. The above vertical lines are represented by array [1,8,6,2,5,4,8,3,7]. In this case, the max area of water (blue section) the container can contain is 49. Example: Input: [1,8,6,2,5,4,8,3,7] Output: 49*/ class Solution { public: int maxArea(vector<int> &height) { int l = 0, r = height.size() - 1; int ans = INT_MIN; while (l < r) { int area = (r - l) * min(height[l], height[r]); ans = max(area, ans); if (height[l] < height[r]) l++; else r--; } return ans; } };
19.018519
81
0.596884
[ "vector" ]
61cbe75ba6ba40c0351a8e169ea97f5f1e0628ff
18,850
cc
C++
src/ServerTrackerTest.cc
taschik/ramcloud
6ef2e1cd61111995881d54bda6f9296b4777b928
[ "0BSD" ]
1
2016-01-18T12:41:28.000Z
2016-01-18T12:41:28.000Z
src/ServerTrackerTest.cc
taschik/ramcloud
6ef2e1cd61111995881d54bda6f9296b4777b928
[ "0BSD" ]
null
null
null
src/ServerTrackerTest.cc
taschik/ramcloud
6ef2e1cd61111995881d54bda6f9296b4777b928
[ "0BSD" ]
null
null
null
/* Copyright (c) 2011-2012 Stanford University * * Permission to use, copy, modify, and distribute this software for any purpose * with or without fee is hereby granted, provided that the above copyright * notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF * CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "TestUtil.h" #include "FailSession.h" #include "TestLog.h" #include "ServerList.h" #include "ServerListBuilder.h" #include "ServerTracker.h" #include "ShortMacros.h" namespace RAMCloud { struct CountCallback : public ServerTracker<int>::Callback { CountCallback() : callbacksFired() {} void trackerChangesEnqueued() { ++callbacksFired; } int callbacksFired; }; class ServerTrackerTest : public ::testing::Test { public: ServerTrackerTest() : context() , callback() , sl(&context) , tr(&context) , trcb(&context, &callback) { } Context context; CountCallback callback; ServerList sl; ServerTracker<int> tr; ServerTracker<int> trcb; DISALLOW_COPY_AND_ASSIGN(ServerTrackerTest); }; TEST_F(ServerTrackerTest, constructors) { EXPECT_EQ(0U, tr.serverList.size()); EXPECT_FALSE(tr.changes.hasChanges()); EXPECT_FALSE(tr.eventCallback); EXPECT_EQ(static_cast<uint32_t>(-1), tr.lastRemovedIndex); EXPECT_EQ(0U, trcb.serverList.size()); EXPECT_FALSE(trcb.changes.hasChanges()); EXPECT_TRUE(trcb.eventCallback); EXPECT_TRUE(&callback == trcb.eventCallback); EXPECT_EQ(static_cast<uint32_t>(-1), trcb.lastRemovedIndex); } TEST_F(ServerTrackerTest, destructor) { EXPECT_EQ(2U, sl.trackers.size()); ServerTracker<int>* tr2 = new ServerTracker<int>(&context); EXPECT_EQ(3U, sl.trackers.size()); delete tr2; EXPECT_EQ(2U, sl.trackers.size()); } TEST_F(ServerTrackerTest, enqueueChange) { EXPECT_EQ(0U, tr.serverList.size()); EXPECT_EQ(0U, tr.changes.changes.size()); tr.enqueueChange(ServerDetails(ServerId(2, 0), ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_EQ(0U, tr.serverList.size()); // No vector resize before getChange()! EXPECT_EQ(1U, tr.changes.changes.size()); // Ensure nothing was actually added to the lists. for (size_t i = 0; i < tr.serverList.size(); i++) { EXPECT_FALSE(tr.serverList[i].server.serverId.isValid()); EXPECT_TRUE(tr.serverList[i].pointer == NULL); } for (size_t i = 0; i < trcb.serverList.size(); i++) { EXPECT_FALSE(trcb.serverList[i].server.serverId.isValid()); EXPECT_TRUE(trcb.serverList[i].pointer == NULL); } } struct EnsureBothHaveChangesCallback : public ServerTracker<int>::Callback { typedef ServerTracker<int> Tr; EnsureBothHaveChangesCallback() : tr1() , tr2() , ok() {} void trackerChangesEnqueued() { if (!tr1 || !tr2) return; ServerChangeEvent event; ServerDetails details; // Make sure the tr2 callback hasn't been fired yet. EXPECT_EQ(0, static_cast<CountCallback*>(tr2->eventCallback)->callbacksFired); // Ensure that both trackers have the change enqueued. ok = tr1->getChange(details, event) && tr2->getChange(details, event); // Also asserted in the unit test to ensure the code gets run. EXPECT_TRUE(ok); } Tr* tr1; Tr* tr2; bool ok; DISALLOW_COPY_AND_ASSIGN(EnsureBothHaveChangesCallback); }; TEST_F(ServerTrackerTest, fireCallback) { callback.callbacksFired = 0; ProtoBuf::ServerList wholeList; ServerListBuilder{wholeList} ({WireFormat::MASTER_SERVICE}, *ServerId(1, 0), "mock:host=one", 101, 0) ({WireFormat::BACKUP_SERVICE}, *ServerId(2, 0), "mock:host=two", 102, 0, ServerStatus::CRASHED); wholeList.set_version_number(1u); sl.applyServerList(wholeList); EXPECT_EQ(1, callback.callbacksFired); ProtoBuf::ServerList update; ServerListBuilder{update} ({WireFormat::MASTER_SERVICE}, *ServerId(1, 5), "mock:host=oneBeta", 101); update.set_version_number(2); update.set_type(ProtoBuf::ServerList_Type_UPDATE); TestLog::Enable _; sl.applyServerList(update); EXPECT_EQ(2, callback.callbacksFired); ServerChangeEvent event; ServerDetails details; // Ensure that all trackers have changes enqueued // before any of the trackers receives notification. EnsureBothHaveChangesCallback orderCheckCb; ServerTracker<int> tr1(&context, &orderCheckCb); CountCallback countCb; ServerTracker<int> tr2(&context, &countCb); orderCheckCb.tr1 = &tr1; orderCheckCb.tr2 = &tr2; while (tr1.getChange(details, event)); // clear out both queues while (tr2.getChange(details, event)); countCb.callbacksFired = 0; ProtoBuf::ServerList update2; ServerListBuilder{update2} ({WireFormat::MASTER_SERVICE}, *ServerId(1, 6), "mock:host=oneBeta", 101); update2.set_version_number(3); update2.set_type(ProtoBuf::ServerList_Type_UPDATE); sl.applyServerList(update2); // Make sure the normal cb got called. EXPECT_EQ(1, countCb.callbacksFired); // Make sure the second cb got called. // If it returns true then that means tr1 and tr2 both had the add // enqueued even though only tr1 had its callback fired yet. EXPECT_TRUE(orderCheckCb.ok); } TEST_F(ServerTrackerTest, hasChanges) { EXPECT_FALSE(tr.hasChanges()); tr.enqueueChange(ServerDetails(ServerId(2, 0), ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_TRUE(tr.hasChanges()); } static bool getChangeFilter(string s) { return (s == "getChange"); } TEST_F(ServerTrackerTest, getChange) { TestLog::Enable _(&getChangeFilter); ServerDetails server; ServerChangeEvent event; // Add EXPECT_FALSE(tr.getChange(server, event)); EXPECT_EQ(0U, tr.serverList.size()); tr.enqueueChange(ServerDetails(ServerId(2, 0), "Prophylaxis", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_EQ(0U, tr.serverList.size()); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_EQ(3U, tr.serverList.size()); EXPECT_EQ(ServerId(2, 0), server.serverId); EXPECT_EQ("Prophylaxis", server.serviceLocator); EXPECT_TRUE(server.services.has(WireFormat::BACKUP_SERVICE)); EXPECT_FALSE(server.services.has(WireFormat::MASTER_SERVICE)); EXPECT_EQ(ServerChangeEvent::SERVER_ADDED, event); EXPECT_FALSE(tr.getChange(server, event)); EXPECT_EQ(ServerId(2, 0), tr.serverList[2].server.serverId); EXPECT_TRUE(tr.serverList[2].pointer == NULL); // Crashed tr.enqueueChange(ServerDetails(ServerId(2, 0), ServerStatus::CRASHED), ServerChangeEvent::SERVER_CRASHED); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_EQ(ServerId(2, 0), server.serverId); EXPECT_EQ(ServerChangeEvent::SERVER_CRASHED, event); EXPECT_EQ(ServerStatus::CRASHED, tr.getServerDetails({2, 0})->status); // Remove tr[ServerId(2, 0)] = reinterpret_cast<int*>(57); tr.enqueueChange(ServerDetails(ServerId(2, 0), ServerStatus::REMOVE), ServerChangeEvent::SERVER_REMOVED); EXPECT_EQ(reinterpret_cast<void*>(57), tr[ServerId(2, 0)]); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_EQ(ServerId(2, 0), server.serverId); EXPECT_EQ(ServerChangeEvent::SERVER_REMOVED, event); EXPECT_EQ(2U, tr.lastRemovedIndex); tr.testing_avoidGetChangeAssertion = true; EXPECT_FALSE(tr.getChange(server, event)); EXPECT_EQ("getChange: User of this ServerTracker did not NULL out previous " "pointer for index 2 (ServerId 2.0)!", TestLog::get()); EXPECT_FALSE(tr.serverList[2].server.serverId.isValid()); EXPECT_EQ("", tr.serverList[2].server.serviceLocator); EXPECT_EQ(0u, tr.serverList[2].server.services.serialize()); EXPECT_TRUE(tr.serverList[2].pointer == NULL); EXPECT_EQ(static_cast<uint32_t>(-1), tr.lastRemovedIndex); } TEST_F(ServerTrackerTest, getRandomServerIdWithService) { Logger::get().setLogLevels(SILENT_LOG_LEVEL); ServerDetails server; ServerChangeEvent event; EXPECT_FALSE(tr.getRandomServerIdWithService( WireFormat::MASTER_SERVICE).isValid()); tr.enqueueChange(ServerDetails(ServerId(0, 1), "", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_FALSE(tr.getRandomServerIdWithService( WireFormat::MASTER_SERVICE).isValid()); EXPECT_TRUE(tr.getChange(server, event)); for (int i = 0; i < 10; i++) { // Ensure asking for a specific service filters properly. // Should find one with low order bit set. EXPECT_EQ(ServerId(0, 1), tr.getRandomServerIdWithService(WireFormat::MASTER_SERVICE)); // No host available with this service bit set. EXPECT_EQ(ServerId(), tr.getRandomServerIdWithService(WireFormat::BACKUP_SERVICE)); } tr.enqueueChange(ServerDetails(ServerId(1, 1), "", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_TRUE(tr.getChange(server, event)); bool firstSeen = false; bool secondSeen = false; for (int i = 0; i < 100; i++) { ServerId id = tr.getRandomServerIdWithService( WireFormat::MASTER_SERVICE); EXPECT_TRUE(id == ServerId(0, 1) || id == ServerId(1, 1)); if (id == ServerId(0, 1)) firstSeen = true; if (id == ServerId(1, 1)) secondSeen = true; } EXPECT_TRUE(firstSeen); EXPECT_TRUE(secondSeen); // Ensure looping over empty list terminates. tr.enqueueChange(ServerDetails(ServerId(0, 1), ServerStatus::REMOVE), ServerChangeEvent::SERVER_REMOVED); tr.enqueueChange(ServerDetails(ServerId(1, 1), ServerStatus::REMOVE), ServerChangeEvent::SERVER_REMOVED); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_FALSE(tr.getRandomServerIdWithService( {WireFormat::MASTER_SERVICE}).isValid()); } TEST_F(ServerTrackerTest, getRandomServerIdWithService_evenDistribution) { Logger::get().setLogLevels(SILENT_LOG_LEVEL); ServerDetails server; ServerChangeEvent event; tr.enqueueChange(ServerDetails(ServerId(1, 0), "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); tr.enqueueChange(ServerDetails(ServerId(2, 0), "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); tr.enqueueChange(ServerDetails(ServerId(3, 0), "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_FALSE(tr.getChange(server, event)); ASSERT_EQ(3u, tr.size()); std::vector<uint32_t> counts(tr.size(), 0); for (int i = 0; i < 10000; ++i) { ServerId id = tr.getRandomServerIdWithService(WireFormat::BACKUP_SERVICE); counts[id.indexNumber() - 1]++; } // Check to make sure the most-significant digit is what we expect: // Each backup should be returned about 1/3 of the time (3333 times). foreach (uint32_t count, counts) { LOG(ERROR, "%u", count); EXPECT_EQ(3u, count / 1000); } } TEST_F(ServerTrackerTest, getLocator) { EXPECT_THROW(tr.getLocator(ServerId(1, 0)), Exception); tr.enqueueChange(ServerDetails(ServerId(1, 1), "mock:", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); ServerDetails server; ServerChangeEvent event; EXPECT_TRUE(tr.getChange(server, event)); EXPECT_THROW(tr.getLocator(ServerId(2, 0)), Exception); EXPECT_EQ("mock:", tr.getLocator(ServerId(1, 1))); } TEST_F(ServerTrackerTest, getServerDetails) { EXPECT_THROW(tr.getLocator(ServerId(1, 0)), Exception); ServerDetails details(ServerId(1, 1), "mock:", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP); tr.enqueueChange(details, ServerChangeEvent::SERVER_ADDED); ServerDetails server; ServerChangeEvent event; EXPECT_TRUE(tr.getChange(server, event)); EXPECT_THROW(tr.getLocator(ServerId(2, 0)), Exception); EXPECT_EQ(details.services.serialize(), tr.getServerDetails(ServerId(1, 1))->services.serialize()); details.status = ServerStatus::CRASHED; tr.enqueueChange(details, ServerChangeEvent::SERVER_CRASHED); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_EQ(details.services.serialize(), tr.getServerDetails(ServerId(1, 1))->services.serialize()); EXPECT_EQ(details.status, tr.getServerDetails(ServerId(1, 1))->status); } TEST_F(ServerTrackerTest, indexOperator) { TestLog::Enable _; // suck up getChange WARNING ServerDetails server; ServerChangeEvent event; EXPECT_THROW(tr[ServerId(0, 0)], Exception); tr.enqueueChange(ServerDetails(ServerId(0, 0), ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_TRUE(tr.getChange(server, event)); tr[ServerId(0, 0)] = reinterpret_cast<int*>(45); EXPECT_THROW(tr[ServerId(0, 1)], Exception); EXPECT_EQ(reinterpret_cast<int*>(45), tr[ServerId(0, 0)]); EXPECT_THROW(tr[ServerId(0, 1)], Exception); tr.enqueueChange(ServerDetails(ServerId(0, 0), ServerStatus::REMOVE), ServerChangeEvent::SERVER_REMOVED); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_NO_THROW(tr[ServerId(0, 0)]); EXPECT_NE(static_cast<int*>(NULL), tr.serverList[0].pointer); tr.testing_avoidGetChangeAssertion = true; EXPECT_FALSE(tr.getChange(server, event)); EXPECT_THROW(tr[ServerId(0, 0)], Exception); EXPECT_EQ(static_cast<int*>(NULL), tr.serverList[0].pointer); } TEST_F(ServerTrackerTest, size) { ServerDetails server; ServerChangeEvent event; EXPECT_EQ(0U, tr.size()); tr.enqueueChange(ServerDetails(ServerId(0, 0), ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); EXPECT_EQ(0U, tr.size()); tr.getChange(server, event); EXPECT_EQ(1U, tr.size()); tr.enqueueChange(ServerDetails(ServerId(0, 0), ServerStatus::REMOVE), ServerChangeEvent::SERVER_REMOVED); EXPECT_EQ(1U, tr.size()); tr.getChange(server, event); EXPECT_EQ(0U, tr.size()); } TEST_F(ServerTrackerTest, toString) { EXPECT_EQ("", tr.toString()); tr.enqueueChange(ServerDetails(ServerId(1, 0), "mock:", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); ServerDetails server; ServerChangeEvent event; EXPECT_EQ("", tr.toString()); EXPECT_TRUE(tr.getChange(server, event)); EXPECT_EQ( "server 1.0 at mock: with MASTER_SERVICE is UP\n", tr.toString()); } TEST_F(ServerTrackerTest, getServersWithService) { tr.enqueueChange({{1, 0}, "", {WireFormat::MASTER_SERVICE}, 100, ServerStatus::UP}, ServerChangeEvent::SERVER_ADDED); tr.enqueueChange({{2, 0}, "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP}, ServerChangeEvent::SERVER_ADDED); tr.enqueueChange({{3, 0}, "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP}, ServerChangeEvent::SERVER_ADDED); tr.enqueueChange({{3, 0}, "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::CRASHED}, ServerChangeEvent::SERVER_CRASHED); tr.enqueueChange({{4, 0}, "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::UP}, ServerChangeEvent::SERVER_ADDED); tr.enqueueChange({{4, 0}, "", {WireFormat::BACKUP_SERVICE}, 100, ServerStatus::REMOVE}, ServerChangeEvent::SERVER_REMOVED); ServerDetails server; ServerChangeEvent event; while (tr.getChange(server, event)); auto servers = tr.getServersWithService(WireFormat::MASTER_SERVICE); ASSERT_EQ(1lu, servers.size()); EXPECT_EQ(ServerId(1, 0), servers[0]); servers = tr.getServersWithService(WireFormat::BACKUP_SERVICE); ASSERT_EQ(1lu, servers.size()); EXPECT_EQ(ServerId(2, 0), servers[0]); } TEST_F(ServerTrackerTest, ChangeQueue_addChange) { EXPECT_EQ(0U, tr.changes.changes.size()); auto details = ServerDetails(ServerId(5, 4), ServerStatus::UP); tr.changes.addChange(details, ServerChangeEvent::SERVER_ADDED); EXPECT_EQ(1U, tr.changes.changes.size()); EXPECT_EQ(ServerId(5, 4), tr.changes.changes.front().server.serverId); EXPECT_EQ(ServerChangeEvent::SERVER_ADDED, tr.changes.changes.front().event); } TEST_F(ServerTrackerTest, ChangeQueue_getChange) { EXPECT_THROW(tr.changes.getChange(), Exception); tr.changes.addChange(ServerDetails(ServerId(5, 4), ServerStatus::UP), ServerChangeEvent::SERVER_ADDED); ServerTracker<int>::ServerChange change = tr.changes.getChange(); EXPECT_EQ(0U, tr.changes.changes.size()); EXPECT_EQ(ServerId(5, 4), change.server.serverId); EXPECT_EQ(ServerChangeEvent::SERVER_ADDED, change.event); EXPECT_THROW(tr.changes.getChange(), Exception); } TEST_F(ServerTrackerTest, ChangeQueue_hasChanges) { } TEST_F(ServerTrackerTest, setParent) { EXPECT_EQ(&sl, tr.parent); tr.setParent(NULL); EXPECT_EQ(static_cast<AbstractServerList*>(NULL), tr.parent); tr.setParent(&sl); EXPECT_EQ(&sl, tr.parent); } } // namespace RAMCloud
39.026915
80
0.65931
[ "vector" ]
61cf7001f061e9f96157e387e42a7df34caa185d
8,858
hpp
C++
src/openvslam/util/KDTree.hpp
Patrixe/openvslam
8ec940dc4498e25bebd541939b8c25801f789f6f
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
1
2021-03-11T10:12:31.000Z
2021-03-11T10:12:31.000Z
src/openvslam/util/KDTree.hpp
Patrixe/openvslam
8ec940dc4498e25bebd541939b8c25801f789f6f
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
null
null
null
src/openvslam/util/KDTree.hpp
Patrixe/openvslam
8ec940dc4498e25bebd541939b8c25801f789f6f
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
1
2021-03-10T09:05:47.000Z
2021-03-10T09:05:47.000Z
#pragma once #include <vector> #include <numeric> #include <algorithm> #include <exception> #include <functional> namespace kdt { /** @brief k-d tree class. */ template <class PointT> class KDTree { public: /** @brief The constructors. */ KDTree() : root_(nullptr) {}; KDTree(const std::vector<PointT>& points) : root_(nullptr) { build(points); } /** @brief The destructor. */ ~KDTree() { clear(); } /** @brief Re-builds k-d tree. */ void build(const std::vector<PointT>& points) { clear(); points_ = points; std::vector<int> indices(points.size()); std::iota(std::begin(indices), std::end(indices), 0); root_ = buildRecursive(indices.data(), (int)points.size(), 0); } /** @brief Clears k-d tree. */ void clear() { clearRecursive(root_); root_ = nullptr; points_.clear(); } /** @brief Validates k-d tree. */ bool validate() const { try { validateRecursive(root_, 0); } catch (const Exception&) { return false; } return true; } /** @brief Searches the nearest neighbor. */ int nnSearch(const PointT& query, double* minDist = nullptr) const { int guess; double _minDist = std::numeric_limits<double>::max(); nnSearchRecursive(query, root_, &guess, &_minDist); if (minDist) *minDist = _minDist; return guess; } /** @brief Searches k-nearest neighbors. */ std::vector<int> knnSearch(const PointT& query, int k) const { KnnQueue queue(k); knnSearchRecursive(query, root_, queue, k); std::vector<int> indices(queue.size()); for (size_t i = 0; i < queue.size(); i++) indices[i] = queue[i].second; return indices; } /** @brief Searches neighbors within radius. */ std::vector<int> radiusSearch(const PointT& query, double radius) const { std::vector<int> indices; radiusSearchRecursive(query, root_, indices, radius); return indices; } private: /** @brief k-d tree node. */ struct Node { int idx; //!< index to the original point Node* next[2]; //!< pointers to the child nodes int axis; //!< dimension's axis Node() : idx(-1), axis(-1) { next[0] = next[1] = nullptr; } }; /** @brief k-d tree exception. */ class Exception : public std::exception { using std::exception::exception; }; /** @brief Bounded priority queue. */ template <class T, class Compare = std::less<T>> class BoundedPriorityQueue { public: BoundedPriorityQueue() = delete; BoundedPriorityQueue(size_t bound) : bound_(bound) { elements_.reserve(bound + 1); }; void push(const T& val) { auto it = std::find_if(std::begin(elements_), std::end(elements_), [&](const T& element){ return Compare()(val, element); }); elements_.insert(it, val); if (elements_.size() > bound_) elements_.resize(bound_); } const T& back() const { return elements_.back(); }; const T& operator[](size_t index) const { return elements_[index]; } size_t size() const { return elements_.size(); } private: size_t bound_; std::vector<T> elements_; }; /** @brief Priority queue of <distance, index> pair. */ using KnnQueue = BoundedPriorityQueue<std::pair<double, int>>; /** @brief Builds k-d tree recursively. */ Node* buildRecursive(int* indices, int npoints, int depth) { if (npoints <= 0) return nullptr; const int axis = depth % PointT::DIM; const int mid = (npoints - 1) / 2; std::nth_element(indices, indices + mid, indices + npoints, [&](int lhs, int rhs) { return points_[lhs][axis] < points_[rhs][axis]; }); Node* node = new Node(); node->idx = indices[mid]; node->axis = axis; node->next[0] = buildRecursive(indices, mid, depth + 1); node->next[1] = buildRecursive(indices + mid + 1, npoints - mid - 1, depth + 1); return node; } /** @brief Clears k-d tree recursively. */ void clearRecursive(Node* node) { if (node == nullptr) return; if (node->next[0]) clearRecursive(node->next[0]); if (node->next[1]) clearRecursive(node->next[1]); delete node; } /** @brief Validates k-d tree recursively. */ void validateRecursive(const Node* node, int depth) const { if (node == nullptr) return; const int axis = node->axis; const Node* node0 = node->next[0]; const Node* node1 = node->next[1]; if (node0 && node1) { if (points_[node->idx][axis] < points_[node0->idx][axis]) throw Exception(); if (points_[node->idx][axis] > points_[node1->idx][axis]) throw Exception(); } if (node0) validateRecursive(node0, depth + 1); if (node1) validateRecursive(node1, depth + 1); } static double distance(const PointT& p, const PointT& q) { double dist = 0; for (size_t i = 0; i < PointT::DIM; i++) dist += (p[i] - q[i]) * (p[i] - q[i]); return sqrt(dist); } /** @brief Searches the nearest neighbor recursively. */ void nnSearchRecursive(const PointT& query, const Node* node, int *guess, double *minDist) const { if (node == nullptr) return; const PointT& train = points_[node->idx]; const double dist = distance(query, train); if (dist < *minDist) { *minDist = dist; *guess = node->idx; } const int axis = node->axis; const int dir = query[axis] < train[axis] ? 0 : 1; nnSearchRecursive(query, node->next[dir], guess, minDist); const double diff = fabs(query[axis] - train[axis]); if (diff < *minDist) nnSearchRecursive(query, node->next[!dir], guess, minDist); } /** @brief Searches k-nearest neighbors recursively. */ void knnSearchRecursive(const PointT& query, const Node* node, KnnQueue& queue, int k) const { if (node == nullptr) return; const PointT& train = points_[node->idx]; const double dist = distance(query, train); queue.push(std::make_pair(dist, node->idx)); const int axis = node->axis; const int dir = query[axis] < train[axis] ? 0 : 1; knnSearchRecursive(query, node->next[dir], queue, k); const double diff = fabs(query[axis] - train[axis]); if ((int)queue.size() < k || diff < queue.back().first) knnSearchRecursive(query, node->next[!dir], queue, k); } /** @brief Searches neighbors within radius. */ void radiusSearchRecursive(const PointT& query, const Node* node, std::vector<int>& indices, double radius) const { if (node == nullptr) return; const PointT& train = points_[node->idx]; const double dist = distance(query, train); if (dist < radius) indices.push_back(node->idx); const int axis = node->axis; const int dir = query[axis] < train[axis] ? 0 : 1; radiusSearchRecursive(query, node->next[dir], indices, radius); const double diff = fabs(query[axis] - train[axis]); if (diff < radius) radiusSearchRecursive(query, node->next[!dir], indices, radius); } Node* root_; //!< root node std::vector<PointT> points_; //!< points }; } // kdt
29.331126
121
0.489953
[ "vector" ]
61d8d6ac252bc0281c904aa3674d6d44c5188770
9,539
cpp
C++
MVJ_Engine_base/ModuleScene.cpp
expelthegrace/ThomasTheEngine
d570c9746725e3f8232753799cce90cdc47a4b48
[ "Unlicense" ]
null
null
null
MVJ_Engine_base/ModuleScene.cpp
expelthegrace/ThomasTheEngine
d570c9746725e3f8232753799cce90cdc47a4b48
[ "Unlicense" ]
null
null
null
MVJ_Engine_base/ModuleScene.cpp
expelthegrace/ThomasTheEngine
d570c9746725e3f8232753799cce90cdc47a4b48
[ "Unlicense" ]
null
null
null
#include "Brofiler.h" #include "ModuleScene.h" #include "GameObject.h" #include "Application.h" #include "ModuleRender.h" #include "ModuleMenu.h" #include <queue> #include "ComponentTransform.h" #include "ComponentBB.h" #include "ComponentCamera.h" #include "ComponentMaterial.h" #include <vector> #include "Quadtree.h" #include "JSONManager.h" #include <string> #include "debugdraw.h" #include "ModuleDebugDraw.h" using namespace std; ModuleScene::ModuleScene() { } ModuleScene::~ModuleScene() { } update_status ModuleScene::Update() { BROFILER_CATEGORY("Component Updates", Profiler::Color::Orchid); //if (App->input->mouse_buttons[SDL_BUTTON_LEFT - 1] == KEY_DOWN) mouseClick(App->input->mouse_position.x, App->input->mouse_position.y); if (App->input->keyboard[SDL_SCANCODE_DELETE]) DeleteSelected(); ROOT->Update(); if (showQuad) quadTree->Draw(); return UPDATE_CONTINUE; } void ModuleScene::NewGOSelected(GameObject* newGO) { if (GO_selected != nullptr) GO_selected->selected = false; GO_selected = newGO; newGO->SelectGO(true); } void ModuleScene::NewGameObject(char* name) { if (name == "") name = "Untittled"; GameObject* newGo = new GameObject(name, true, GO_selected); GO_selected->children.push_back(newGo); gameObjects[newGo->UID] = newGo; } GameObject* ModuleScene::CreateModel(char* name, GameObject* parent, char * path) { GameObject* GO = App->renderer->CreateModel(path); GO->name = name; GO->parent = parent; if (parent == nullptr) GO->parent = ROOT; GO->parent->children.push_back(GO); gameObjects[GO->UID] = GO; return GO; } void ModuleScene::DeleteSelected() { if (GO_selected != nullptr && GO_selected != ROOT) { quadTree->RemoveAndMerge(GO_selected); gameObjects.erase(GO_selected->UID); GO_selected->RemoveFromParent(); delete GO_selected; NewGOSelected(ROOT); } } void ModuleScene::DuplicateSelected() { if (GO_selected != nullptr && GO_selected != ROOT) { } } bool ModuleScene::Init() { ROOT = new GameObject("ROOT", true, nullptr); GO_selected = ROOT; float quadTreeSize = 20.0f * App->GameScale; quadTree = new Quadtree(nullptr, float3(-quadTreeSize), float3 (quadTreeSize), 5, 8); LoadScene(); // GameObject* casa1 = CreateModel("Casa1", ROOT, "BakerHouse.fbx"); // quadTree->Insert(casa1); // // //GameObject* casa2 = CreateModel("Casa2", ROOT, "BakerHouse.fbx"); // //quadTree->Insert(casa2); // // //GameObject* casa3 = CreateModel("Casa3", ROOT, "BakerHouse.fbx"); // //quadTree->Insert(casa3); // // /*GameObject* bunny1 = CreateModel("Bunny1", ROOT, "Assets/Zombunny.fbx"); // quadTree->Insert(bunny1); //*/ // // // GameObject* camObject = new GameObject("ObjectCamera", true, ROOT); // gameObjects[camObject->UID] = camObject; // ComponentCamera* camComp = new ComponentCamera(camObject); // camObject->AddComponent(camComp); // // GameObject* lightObject = new GameObject("ObjectLight", true, ROOT); // gameObjects[lightObject->UID] = lightObject; // lightObject->CreateComponent(LIGHT); return true; } void ModuleScene::DragInputManager(char * path) { string pathString = path; for (int i = 0; i < pathString.length(); i++) { if (pathString[i] == '\\') pathString[i] = '/'; } uint pos_slash = pathString.find_last_of('/'); uint pos_dot = pathString.find_last_of('.'); string extension = pathString.substr(pos_dot + 1); string name = pathString.substr(pos_slash + 1, pos_dot - pos_slash - 1); if (extension == "fbx" || extension == "FBX" || extension == "obj" || extension == "OBJ") { char * newName = new char[name.size() + 1]; strcpy(newName, name.c_str()); CreateModel(newName, App->scene->ROOT, path); } else if (extension == "png" || extension == "dds" || extension == "tga") { if (GO_selected->material != nullptr) GO_selected->material->LoadTexture(path); } } /**Find game object by name **/ GameObject* ModuleScene::FindByName(char * name) { std::queue<GameObject*> GOqueue; GOqueue.push(ROOT); while (!GOqueue.empty()) { GameObject* GOactual = GOqueue.front(); GOqueue.pop(); if (GOactual->name == name) return GOactual; for (int i = 0; i < GOactual->children.size(); ++i) GOqueue.push(GOactual->children[i]); } return nullptr; } GameObject* ModuleScene::getGOByID(unsigned uid) { GameObject* ret = gameObjects[uid]; if (ret == nullptr) { gameObjects.erase(uid); return ROOT; } return ret; } void ModuleScene::ClearScene() { quadTree->Clear(); for (int i = 0; i < ROOT->children.size(); ++i) RELEASE( ROOT->children[i]); ROOT->children.clear(); gameObjects.clear(); GO_selected = ROOT; mainCamera = nullptr; mainLight = nullptr; } void ModuleScene::mouseClick(int mouseXi, int mouseYi) { float mouseX = (float)mouseXi; float mouseY = (float)mouseYi; math::float2 viewportTopLeft(0.f, 0.f); //math::float2 viewportSize(App->camera->screenWidth - 2 * App->menu->columnWidth, App->camera->screenHeight - App->menu->consoleHeight - 20); math::float2 windowSize(App->camera->editorWidth, App->camera->editorHeight); float endpointX = App->menu->columnWidth + App->camera->screenWidth - 2 * App->menu->columnWidth; float endpointY = 20.0f + App->camera->screenHeight - App->menu->consoleHeight - 20; if (mouseX > App->menu->columnWidth && mouseX < (endpointX) && mouseY > 20.0f && mouseY < endpointY) { float sy, sx, ty, tx; /*sy = (1 + 1) / (-viewportSize.y); sx = (1 + 1) / (viewportSize.x); ty = (-(viewportSize.y + viewportTopLeft.y) - viewportTopLeft.y) / (-viewportSize.y); tx = (-(viewportSize.x + viewportTopLeft.x) - viewportTopLeft.x) / (viewportSize.x);*/ sy = (1 + 1) / (-windowSize.y); sx = (1 + 1) / (windowSize.x); ty = (-(windowSize.y + viewportTopLeft.y) - viewportTopLeft.y) / (-windowSize.y); tx = (-(windowSize.x + viewportTopLeft.x) - viewportTopLeft.x) / (windowSize.x); float normX = sx * (mouseX)+tx - 0.01; float normY = sy * (mouseY)+ty + 0.06; //float normX = -(1.0f - (float(mouseX - App->camera->editorWidth) * 2.0f) / sceneCamera->screenWidth); //float normY = 1.0f - (float(mouseY - App->renderer->sceneViewportY) * 2.0f) / sceneCamera->screenHeight; ray = App->camera->frustum.UnProjectLineSegment(normX, normY); //future implementation: make quadtree work fully and use it to make this algorithm more efficient //now we check collisions map<unsigned int, GameObject*>::iterator it; std::vector<GameObject*> collisions; for (it = gameObjects.begin(); it != gameObjects.end(); ++it) { if (it->second->BB->Aabb->IsFinite() && ray.Intersects(*(it->second->BB->Aabb))) { collisions.push_back(it->second); } } GameObject* picked = nullptr; if (collisions.size() > 0) picked = collisions[0]; for (int i = 0; i < collisions.size(); ++i) { picked = closestToCam(picked, collisions[i]); } //now that we have the picked object, make it selected if (picked != nullptr) { NewGOSelected(picked); } for (int i = 0; i < 10; ++i) { for (int j = 0; j < 10; ++j) { float newStartX = App->scene->ray.a.x + 0.001*i * App->GameScale; float newStartY = App->scene->ray.a.y + 0.001*j* App->GameScale; float newEndX = App->scene->ray.b.x + 0.001*i* App->GameScale; float newEndY = App->scene->ray.b.y + 0.001*j* App->GameScale; math:float3 newStart(newStartX, newStartY, App->scene->ray.a.z); math::float3 newEnd(newEndX, newEndY, App->scene->ray.b.z); dd::line(newStart, newEnd, math::float3(0.8, 0.3, 0.3)); } } } } GameObject* ModuleScene::closestToCam(GameObject* go1, GameObject* go2) { if (go1->BB->Aabb->Distance(App->camera->frustum.pos) < go2->BB->Aabb->Distance(App->camera->frustum.pos)) return go1; else return go2; } void ModuleScene::SaveScene() { JSON_File* scene = App->JSON_manager->openWriteFile(scenePath); JSON_Value* gameObjectsJSON = scene->createValue(); gameObjectsJSON->convertToArray(); for (int i = 0; i < ROOT->children.size(); ++i) ROOT->children[i]->Save(gameObjectsJSON); scene->addValue("GameObjects", gameObjectsJSON); //Scene Properties JSON_Value* sceneProperties = scene->createValue(); sceneProperties->addFloat("GameScale", App->GameScale); scene->addValue("Scene_properties", sceneProperties); scene->Write(); App->JSON_manager->closeFile(scene); } void ModuleScene::LoadScene() { ClearScene(); JSON_File* sceneJSON = App->JSON_manager->openReadFile(scenePath); if (sceneJSON == nullptr) { char* b = new char[50]; sprintf(b, "-- ERROR: %s not found, scene not loaded -- \n", scenePath); App->menu->console.AddLog(b); } else { JSON_Value* gameObjectsJSON = sceneJSON->getValue("GameObjects"); App->GameScale = sceneJSON->getValue("Scene_properties")->getFloat("GameScale"); App->GameScaleIni = App->GameScale; float quadTreeSize = 20.0f * App->GameScale; quadTree->Resize(float3(-quadTreeSize), float3(quadTreeSize)); if (gameObjectsJSON->getRapidJSONValue()->IsArray()) { for (int i = 0; i < gameObjectsJSON->getRapidJSONValue()->Size(); i++) { GameObject* GO = new GameObject(); unsigned UIDTemp = GO->Load(gameObjectsJSON->getValueFromArray(i)); gameObjects[UIDTemp] = GO; } for (std::map<unsigned, GameObject*>::iterator it = gameObjects.begin(); it != gameObjects.end(); ++it) { GameObject * parentAux = getGOByID(it->second->parentUID); it->second->parent = parentAux; parentAux->AddChild(it->second); } } App->camera->UpdateFrustum(); char* b = new char[50]; sprintf(b, "-- %s loaded --\n", App->scene->scenePath); App->menu->console.AddLog(b); } }
28.645646
143
0.676801
[ "object", "vector" ]
61dc66fc1e861f22d3591a8e430942b213168bd7
7,602
cpp
C++
sslpod/src/v20190605/model/DescribeDomainsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
null
null
null
sslpod/src/v20190605/model/DescribeDomainsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
null
null
null
sslpod/src/v20190605/model/DescribeDomainsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
null
null
null
/* * Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tencentcloud/sslpod/v20190605/model/DescribeDomainsRequest.h> #include <tencentcloud/core/utils/rapidjson/document.h> #include <tencentcloud/core/utils/rapidjson/writer.h> #include <tencentcloud/core/utils/rapidjson/stringbuffer.h> using namespace TencentCloud::Sslpod::V20190605::Model; using namespace std; DescribeDomainsRequest::DescribeDomainsRequest() : m_offsetHasBeenSet(false), m_limitHasBeenSet(false), m_searchTypeHasBeenSet(false), m_tagHasBeenSet(false), m_gradeHasBeenSet(false), m_brandHasBeenSet(false), m_codeHasBeenSet(false), m_hashHasBeenSet(false), m_itemHasBeenSet(false), m_statusHasBeenSet(false), m_domainHasBeenSet(false) { } string DescribeDomainsRequest::ToJsonString() const { rapidjson::Document d; d.SetObject(); rapidjson::Document::AllocatorType& allocator = d.GetAllocator(); if (m_offsetHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Offset"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, m_offset, allocator); } if (m_limitHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Limit"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, m_limit, allocator); } if (m_searchTypeHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "SearchType"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_searchType.c_str(), allocator).Move(), allocator); } if (m_tagHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Tag"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_tag.c_str(), allocator).Move(), allocator); } if (m_gradeHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Grade"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_grade.c_str(), allocator).Move(), allocator); } if (m_brandHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Brand"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_brand.c_str(), allocator).Move(), allocator); } if (m_codeHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Code"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_code.c_str(), allocator).Move(), allocator); } if (m_hashHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Hash"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_hash.c_str(), allocator).Move(), allocator); } if (m_itemHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Item"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_item.c_str(), allocator).Move(), allocator); } if (m_statusHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Status"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_status.c_str(), allocator).Move(), allocator); } if (m_domainHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Domain"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_domain.c_str(), allocator).Move(), allocator); } rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); d.Accept(writer); return buffer.GetString(); } int64_t DescribeDomainsRequest::GetOffset() const { return m_offset; } void DescribeDomainsRequest::SetOffset(const int64_t& _offset) { m_offset = _offset; m_offsetHasBeenSet = true; } bool DescribeDomainsRequest::OffsetHasBeenSet() const { return m_offsetHasBeenSet; } int64_t DescribeDomainsRequest::GetLimit() const { return m_limit; } void DescribeDomainsRequest::SetLimit(const int64_t& _limit) { m_limit = _limit; m_limitHasBeenSet = true; } bool DescribeDomainsRequest::LimitHasBeenSet() const { return m_limitHasBeenSet; } string DescribeDomainsRequest::GetSearchType() const { return m_searchType; } void DescribeDomainsRequest::SetSearchType(const string& _searchType) { m_searchType = _searchType; m_searchTypeHasBeenSet = true; } bool DescribeDomainsRequest::SearchTypeHasBeenSet() const { return m_searchTypeHasBeenSet; } string DescribeDomainsRequest::GetTag() const { return m_tag; } void DescribeDomainsRequest::SetTag(const string& _tag) { m_tag = _tag; m_tagHasBeenSet = true; } bool DescribeDomainsRequest::TagHasBeenSet() const { return m_tagHasBeenSet; } string DescribeDomainsRequest::GetGrade() const { return m_grade; } void DescribeDomainsRequest::SetGrade(const string& _grade) { m_grade = _grade; m_gradeHasBeenSet = true; } bool DescribeDomainsRequest::GradeHasBeenSet() const { return m_gradeHasBeenSet; } string DescribeDomainsRequest::GetBrand() const { return m_brand; } void DescribeDomainsRequest::SetBrand(const string& _brand) { m_brand = _brand; m_brandHasBeenSet = true; } bool DescribeDomainsRequest::BrandHasBeenSet() const { return m_brandHasBeenSet; } string DescribeDomainsRequest::GetCode() const { return m_code; } void DescribeDomainsRequest::SetCode(const string& _code) { m_code = _code; m_codeHasBeenSet = true; } bool DescribeDomainsRequest::CodeHasBeenSet() const { return m_codeHasBeenSet; } string DescribeDomainsRequest::GetHash() const { return m_hash; } void DescribeDomainsRequest::SetHash(const string& _hash) { m_hash = _hash; m_hashHasBeenSet = true; } bool DescribeDomainsRequest::HashHasBeenSet() const { return m_hashHasBeenSet; } string DescribeDomainsRequest::GetItem() const { return m_item; } void DescribeDomainsRequest::SetItem(const string& _item) { m_item = _item; m_itemHasBeenSet = true; } bool DescribeDomainsRequest::ItemHasBeenSet() const { return m_itemHasBeenSet; } string DescribeDomainsRequest::GetStatus() const { return m_status; } void DescribeDomainsRequest::SetStatus(const string& _status) { m_status = _status; m_statusHasBeenSet = true; } bool DescribeDomainsRequest::StatusHasBeenSet() const { return m_statusHasBeenSet; } string DescribeDomainsRequest::GetDomain() const { return m_domain; } void DescribeDomainsRequest::SetDomain(const string& _domain) { m_domain = _domain; m_domainHasBeenSet = true; } bool DescribeDomainsRequest::DomainHasBeenSet() const { return m_domainHasBeenSet; }
23.75625
95
0.700342
[ "model" ]
61de417fdd3cf40e606a803b92f55419aeaf76aa
1,009
hpp
C++
goldfilter/vendor/v8pp/v8pp/factory.hpp
orinocoz/dripcap
096f464e8855da9882fbf0ec3294ff6d7e329dc9
[ "MIT" ]
5
2019-12-20T05:48:26.000Z
2021-10-13T12:32:50.000Z
paperfilter/vendor/v8pp/v8pp/factory.hpp
sbilly/dripcap
895af8cc8f2a0b1881df73f0a1df19f78c1c47d7
[ "MIT" ]
null
null
null
paperfilter/vendor/v8pp/v8pp/factory.hpp
sbilly/dripcap
895af8cc8f2a0b1881df73f0a1df19f78c1c47d7
[ "MIT" ]
2
2020-03-07T11:40:38.000Z
2022-01-24T22:37:40.000Z
// // Copyright (c) 2013-2016 Pavel Medvedev. All rights reserved. // // This file is part of v8pp (https://github.com/pmed/v8pp) project. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef V8PP_FACTORY_HPP_INCLUDED #define V8PP_FACTORY_HPP_INCLUDED #include <utility> #include <v8.h> namespace v8pp { // Factory that calls C++ constructor template<typename T> struct factory { static size_t const object_size = sizeof(T); template<typename ...Args> static T* create(v8::Isolate* isolate, Args... args) { T* object = new T(std::forward<Args>(args)...); isolate->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(object_size)); return object; } static void destroy(v8::Isolate* isolate, T* object) { delete object; isolate->AdjustAmountOfExternalAllocatedMemory(-static_cast<int64_t>(object_size)); } }; } //namespace v8pp #endif // V8PP_FACTORY_HPP_INCLUDED
24.02381
85
0.739346
[ "object" ]
61e318bcbdece515e35d38d2125b2f128c241fff
3,013
hpp
C++
include/graph/topological_sort.hpp
naskya/cp-library-cpp
e045f244060af4588a4a62cb150f07940002c448
[ "CC0-1.0" ]
null
null
null
include/graph/topological_sort.hpp
naskya/cp-library-cpp
e045f244060af4588a4a62cb150f07940002c448
[ "CC0-1.0" ]
null
null
null
include/graph/topological_sort.hpp
naskya/cp-library-cpp
e045f244060af4588a4a62cb150f07940002c448
[ "CC0-1.0" ]
null
null
null
//! @file topologocal_sort.hpp #ifndef CP_LIBRARY_TOPOLOGICAL_SORT_HPP #define CP_LIBRARY_TOPOLOGICAL_SORT_HPP #include <iostream> #include <queue> #include <type_traits> #include <vector> namespace lib { namespace internal::topological_sort_hpp { template <typename Comp> constexpr auto invert_compare_function = [](auto lhs, auto rhs) -> bool { static_assert(std::is_same_v<decltype(lhs), decltype(rhs)>); static_assert(std::is_same_v<decltype(Comp {}(rhs, lhs)), bool>); return Comp {}(rhs, lhs); }; template <typename Comp> [[nodiscard]] auto queue() { if constexpr (std::is_void_v<Comp>) { return std::queue<int>(); } else { return std::priority_queue<int, std::vector<int>, decltype(invert_compare_function<Comp>)>(invert_compare_function<Comp>); } } template <typename Container> [[nodiscard]] std::vector<int> in_degree(const Container& adjacency_list) { const int vertices = static_cast<int>(std::size(adjacency_list)); std::vector<int> res(vertices); for (int from = 0; from < vertices; ++from) { for (const auto to : adjacency_list[from]) { ++res[to]; } } return res; } } // namespace internal::topological_sort_hpp //! @brief Sort the vertices in the given directed graph in topological order. //! @tparam Comp Compare function (e.g. std::less<void>) //! @tparam Container Container type (deduced from parameter) //! @param adjacency_list Graph in the adjacency list format (i.e. adjacency_list[i] = {nodes adjacent to node i}) //! @return List of the vertices (std::vector<int>) sorted in topological order. //! @note If a compare function is specified, the result will be further sorted maintaining topological order. //! @note The length of the result will be less than the number of the vertices if the given graph has a cycle. //! @note time complexity: O(V + E) if a compare function is not specified //! @note time complexity: O(V log V + E) if a compare function is specified template <typename Comp = void, typename Container> [[nodiscard]] std::vector<int> topological_sort(const Container& adjacency_list) { const int vertices = static_cast<int>(std::size(adjacency_list)); std::vector<int> in_degree = internal::topological_sort_hpp::in_degree(adjacency_list); auto q = internal::topological_sort_hpp::queue<Comp>(); for (int i = 0; i < vertices; ++i) { if (in_degree[i] == 0) { q.emplace(i); } } std::vector<int> res; res.reserve(vertices); while (!q.empty()) { int from; if constexpr (std::is_void_v<Comp>) { from = q.front(); } else { from = q.top(); } q.pop(); res.emplace_back(from); for (const int to : adjacency_list[from]) { --in_degree[to]; if (in_degree[to] == 0) { q.emplace(to); } } } return res; } } // namespace lib #endif // CP_LIBRARY_TOPOLOGICAL_SORT_HPP
31.715789
114
0.65317
[ "vector" ]
61e5eb3f63a616befe426e0c30958d6626669923
6,800
cpp
C++
clients/client-ui/qt/src/client_ui_api.cpp
TGAC/grassroots-api
9692dcb428fc7a2a93b22a8510abff05deda1234
[ "Apache-2.0" ]
2
2017-12-30T14:39:48.000Z
2019-11-20T23:49:38.000Z
clients/client-ui/qt/src/client_ui_api.cpp
TGAC/grassroots-api
9692dcb428fc7a2a93b22a8510abff05deda1234
[ "Apache-2.0" ]
null
null
null
clients/client-ui/qt/src/client_ui_api.cpp
TGAC/grassroots-api
9692dcb428fc7a2a93b22a8510abff05deda1234
[ "Apache-2.0" ]
null
null
null
/* ** Copyright 2014-2016 The Earlham Institute ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ #include <QHBoxLayout> #include <QWidget> #include <QMessageBox> #include <QApplication> #include <QDialog> #include <QStyleFactory> #include <QDialogButtonBox> #include <QPushButton> #include "client_ui_api.h" #include "prefs_widget.h" #include "results_widget.h" #include "main_window.h" #include "memory_allocations.h" #include "string_utils.h" #include "results_window.h" #include "qt_client_data.h" #include "progress_window.h" #include "json_util.h" #include "viewer_widget.h" #ifdef _DEBUG #define CLIENT_UI_API_DEBUG (DEBUG_FINE) #else #define CLIENT_UI_API_DEBUG (DEBUG_NONE) #endif static int s_dummy_argc = 1; static QTClientData *AllocateQTClientData (void); static void FreeQTClientData (QTClientData *qt_data_p); static const char *GetQTClientName (ClientData *client_data_p); static const char *GetQTClientDescription (ClientData *client_data_p); static json_t *RunQTClient (ClientData *client_data_p); static int AddServiceToQTClient (ClientData *client_p, const char * const service_name_s, const char * const service_description_s, const char * const service_info_uri_s, const char * const service_icon_uri_s, const json_t * const provider_p, ParameterSet *params_p); static json_t *DisplayResultsInQTClient (ClientData *client_data_p, json_t *response_p); Client *GetClient (Connection *connection_p) { Client *client_p = NULL; QTClientData *data_p = AllocateQTClientData (); if (data_p) { client_p = (Client *) AllocMemory (sizeof (Client)); if (client_p) { InitialiseClient (client_p, GetQTClientName, GetQTClientDescription, RunQTClient, DisplayResultsInQTClient, AddServiceToQTClient, ReleaseClient, reinterpret_cast <ClientData *> (data_p), connection_p); } else { FreeMemory (data_p); } } return client_p; } bool ReleaseClient (Client *client_p) { QTClientData *qt_data_p = reinterpret_cast <QTClientData *> (client_p -> cl_data_p); FreeQTClientData (qt_data_p); FreeMemory (client_p); return true; } static QTClientData *AllocateQTClientData (void) { QTClientData *data_p = (QTClientData *) AllocMemory (sizeof (QTClientData)); if (data_p) { /* * Before Qt widgets can be created a valid QApplication * must be created. This requires a valid argc, argv pair * that remain in scope for the entire lifetime of the QApplication * object, In addition, argc must be greater than zero and argv must * contain at least one valid character string. */ data_p -> qcd_dummy_arg_s = CopyToNewString ("Grassroots Client", 0, false); if (data_p -> qcd_dummy_arg_s) { /* * Ubuntu 12.04 has some theme bugs with various styles giving messages such as * * (client:1574): Gtk-CRITICAL **: IA__gtk_widget_style_get: assertion * `GTK_IS_WIDGET (widget)' failed * * The solution is to use a theme that isn't broken on Ubuntu such as Plastique. */ QStyle *style_p = QStyleFactory :: create ("Fusion"); QApplication :: setStyle (style_p); qDebug() << QStyleFactory::keys(); data_p -> qcd_app_p = new QApplication (s_dummy_argc, & (data_p -> qcd_dummy_arg_s)); data_p -> qcd_window_p = new MainWindow (data_p); data_p -> qcd_window_p -> setWindowIcon (QIcon ("images/cog")); QObject :: connect (data_p -> qcd_window_p, &MainWindow :: Closed, data_p -> qcd_app_p, &QApplication :: quit); data_p -> qcd_results_widgets_p = new QLinkedList <ResultsWindow *>; data_p -> qcd_progress_p = new ProgressWindow (data_p -> qcd_window_p, data_p); data_p -> qcd_viewer_widgets_p = new QLinkedList <ViewerWidget *>; data_p -> qcd_init_flag = false; } else { FreeMemory (data_p); data_p = NULL; } } return data_p; } static void FreeQTClientData (QTClientData *qt_data_p) { delete (qt_data_p -> qcd_window_p); FreeCopiedString (qt_data_p -> qcd_dummy_arg_s); while (! (qt_data_p -> qcd_viewer_widgets_p -> isEmpty ())) { ViewerWidget *widget_p = qt_data_p -> qcd_viewer_widgets_p -> first (); qt_data_p -> qcd_viewer_widgets_p -> removeFirst (); widget_p -> close (); delete widget_p; } delete (qt_data_p -> qcd_viewer_widgets_p); delete (qt_data_p -> qcd_progress_p); while (! (qt_data_p -> qcd_results_widgets_p -> isEmpty ())) { ResultsWindow *widget_p = qt_data_p -> qcd_results_widgets_p -> first (); qt_data_p -> qcd_results_widgets_p -> removeFirst (); widget_p -> close (); delete widget_p; } delete (qt_data_p -> qcd_results_widgets_p); delete (qt_data_p -> qcd_app_p); FreeMemory (qt_data_p); } static const char *GetQTClientName (ClientData *client_data_p) { return "Qt-based Grassroots client"; } static const char *GetQTClientDescription (ClientData *client_data_p) { return "A Qt-based Grassroots client user interface"; } static json_t *RunQTClient (ClientData *client_data_p) { QTClientData *qt_data_p = reinterpret_cast <QTClientData *> (client_data_p); json_t *res_p = NULL; qt_data_p -> qcd_window_p -> show (); if (! (qt_data_p -> qcd_init_flag)) { qt_data_p -> qcd_init_flag = true; int res = qt_data_p -> qcd_app_p -> exec (); } return res_p; } static int AddServiceToQTClient (ClientData *client_data_p, const char * const service_name_s, const char * const service_description_s, const char * const service_info_uri_s, const char * const service_icon_uri_s, const json_t * const provider_p, ParameterSet *params_p) { int res = 0; QTClientData *qt_data_p = reinterpret_cast <QTClientData *> (client_data_p); qt_data_p -> qcd_window_p -> CreateAndAddServicePage (service_name_s, service_description_s, service_info_uri_s, service_icon_uri_s, provider_p, params_p); return res; } static json_t *DisplayResultsInQTClient (ClientData *client_data_p, json_t *response_p) { json_t *res_p = NULL; #if CLIENT_UI_API_DEBUG >= DL_FINE PrintJSONToLog (STM_LEVEL_FINE, __FILE__, __LINE__, response_p, "response:\n"); #endif QTClientData *qt_data_p = reinterpret_cast <QTClientData *> (client_data_p); if (! (qt_data_p -> qcd_init_flag)) { qt_data_p -> qcd_init_flag = true; qt_data_p -> qcd_app_p -> exec (); } return res_p; }
27.755102
271
0.725294
[ "object" ]
61e6eb0f78f0ffec5b6121e89c5ea9741bb088ff
4,409
hpp
C++
src/libraries/core/db/dbRegistrator/dbRegistrator.hpp
MrAwesomeRocks/caelus-cml
55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7
[ "mpich2" ]
null
null
null
src/libraries/core/db/dbRegistrator/dbRegistrator.hpp
MrAwesomeRocks/caelus-cml
55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7
[ "mpich2" ]
null
null
null
src/libraries/core/db/dbRegistrator/dbRegistrator.hpp
MrAwesomeRocks/caelus-cml
55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7
[ "mpich2" ]
null
null
null
/*---------------------------------------------------------------------------*\ Copyright (C) 2011 David L. F. Gaden Copyright (C) 2015-2018 Applied CCM ------------------------------------------------------------------------------- License This file is part of CAELUS. CAELUS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CAELUS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CAELUS. If not, see <http://www.gnu.org/licenses/>. Class CML::dbRegistrator Description A wrapper class that allows you to retrieve non-IOobjects through the object registry. Performs no input or output, simply for a const reference retrieval. Syntax: @verbatim // Your non-IOobject, e.g. Type = scalar Type myObject; // Register it to the object registry dbRegistrator<scalar> myIOobject ( IOobject ( "lookupName", instance, // can be anything [local,] // (optional) can be anything registry, // can be anything IOobject::NO_READ, // *must* be NO_READ IOobject::NO_WRITE // *must* be NO_WRITE ), myObject ); // (Now don't let myObject and myIOobject go out of scope) // In another part of your solver, e.g. in a custom boundary condition: Type const& myConstObject = registry.lookupObject<dbRegistrator<Type> > ( "lookupName" )(); // Now you have a local const reference to your non-IOobject @endverbatim SourceFiles dbRegistrator.C Author David L. F. Gaden Modifications Aleksandar Jemcov \*---------------------------------------------------------------------------*/ #ifndef dbRegistrator_HPP #define dbRegistrator_HPP #include "regIOobject.hpp" namespace CML { template <typename Type> class dbRegistrator : public regIOobject { Type const* typePtr_; public: // Construct from IOobject - pointer set to null dbRegistrator(IOobject const& io); // Construct from IOobject - pointer initialized dbRegistrator(IOobject const& io, Type const& reference); virtual ~dbRegistrator(); //- A "do nothing" writeData function, required by regIOobject virtual bool writeData(Ostream&) const; //- Return the pointer as a reference Type const& operator()() const; //- Set the pointer void set(Type const& reference); }; } template <typename Type> CML::dbRegistrator<Type>::dbRegistrator(IOobject const& io ) : regIOobject(io) { if ( (io.readOpt() != IOobject::NO_READ) || (io.writeOpt() != IOobject::NO_WRITE) ) { FatalErrorInFunction << "dbRegistrator can only be NO_READ, NO_WRITE." << abort(FatalError); } } template <typename Type> CML::dbRegistrator<Type>::dbRegistrator ( IOobject const& io, Type const& reference ) : regIOobject(io), typePtr_(& reference) { if ( (io.readOpt() != IOobject::NO_READ) || (io.writeOpt() != IOobject::NO_WRITE) ) { FatalErrorInFunction << "dbRegistrator can only be NO_READ, NO_WRITE." << abort(FatalError); } } template <typename Type> CML::dbRegistrator<Type>::~dbRegistrator() {} template<typename Type> bool CML::dbRegistrator<Type>::writeData(Ostream& os) const { // do nothing return os.good(); } template <typename Type> Type const& CML::dbRegistrator<Type>::operator()() const { if (!typePtr_) { FatalErrorInFunction << "Attempting to derefence a null typePtr - use dbRegistrator::set" << "first." << abort(FatalError); } return * typePtr_; } template <typename Type> void CML::dbRegistrator<Type>::set ( Type const& reference ) { typePtr_ = &reference; } #endif
24.631285
80
0.593785
[ "object" ]
61eec0b988abb0fa10c4082dbe548662c7521946
1,534
cpp
C++
code/geometry/halfplane.cpp
VerasThiago/icpc-notebook
e09e4f1cb34a21ae52a246c463f2130ee83c87d6
[ "MIT" ]
13
2019-04-28T14:18:10.000Z
2021-08-19T12:13:26.000Z
code/geometry/halfplane.cpp
raphasramos/competitive-programming
749b6726bd9d517d9143af7e9236d3e5e8cef49b
[ "MIT" ]
null
null
null
code/geometry/halfplane.cpp
raphasramos/competitive-programming
749b6726bd9d517d9143af7e9236d3e5e8cef49b
[ "MIT" ]
6
2019-07-31T02:47:36.000Z
2020-10-12T01:46:23.000Z
const double eps = 1e-8; typedef pair<long double, long double> pi; bool z(long double x){ return fabs(x) < eps; } struct line{ long double a, b, c; bool operator<(const line &l)const{ bool flag1 = pi(a, b) > pi(0, 0); bool flag2 = pi(l.a, l.b) > pi(0, 0); if(flag1 != flag2) return flag1 > flag2; long double t = ccw(pi(0, 0), pi(a, b), pi(l.a, l.b)); return z(t) ? c * hypot(l.a, l.b) < l.c * hypot(a, b) : t > 0; } pi slope(){ return pi(a, b); } }; pi cross(line a, line b){ long double det = a.a * b.b - b.a * a.b; return pi((a.c * b.b - a.b * b.c) / det, (a.a * b.c - a.c * b.a) / det); } bool bad(line a, line b, line c){ if(ccw(pi(0, 0), a.slope(), b.slope()) <= 0) return false; pi crs = cross(a, b); return crs.first * c.a + crs.second * c.b >= c.c; } bool solve(vector<line> v, vector<pi> &solution){ // ax + by <= c; sort(v.begin(), v.end()); deque<line> dq; for(auto &i : v){ if(!dq.empty() && z(ccw(pi(0, 0), dq.back().slope(), i.slope()))) continue; while(dq.size() >= 2 && bad(dq[dq.size()-2], dq.back(), i)) dq.pop_back(); while(dq.size() >= 2 && bad(i, dq[0], dq[1])) dq.pop_front(); dq.push_back(i); } while(dq.size() > 2 && bad(dq[dq.size()-2], dq.back(), dq[0])) dq.pop_back(); while(dq.size() > 2 && bad(dq.back(), dq[0], dq[1])) dq.pop_front(); vector<pi> tmp; for(int i=0; i<dq.size(); i++){ line cur = dq[i], nxt = dq[(i+1)%dq.size()]; if(ccw(pi(0, 0), cur.slope(), nxt.slope()) <= eps) return false; tmp.push_back(cross(cur, nxt)); } solution = tmp; return true; }
34.863636
78
0.553455
[ "vector" ]
61f7dc115acf65ce0f94be716e314daa1eff5706
12,299
cc
C++
src/systems/hydrodynamics/Hydrodynamics.cc
EricCousineau-TRI/ign-gazebo
4e40b36943ebdf02ed73874eb9b9630ec4771974
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/systems/hydrodynamics/Hydrodynamics.cc
EricCousineau-TRI/ign-gazebo
4e40b36943ebdf02ed73874eb9b9630ec4771974
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/systems/hydrodynamics/Hydrodynamics.cc
EricCousineau-TRI/ign-gazebo
4e40b36943ebdf02ed73874eb9b9630ec4771974
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
/* * Copyright (C) 2021 Open Source Robotics Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <string> #include <Eigen/Eigen> #include <ignition/plugin/Register.hh> #include "ignition/gazebo/components/AngularVelocity.hh" #include "ignition/gazebo/components/LinearVelocity.hh" #include "ignition/gazebo/components/Pose.hh" #include "ignition/gazebo/components/World.hh" #include "ignition/gazebo/Link.hh" #include "ignition/gazebo/Model.hh" #include "ignition/gazebo/System.hh" #include "ignition/gazebo/Util.hh" #include "Hydrodynamics.hh" using namespace ignition; using namespace gazebo; using namespace systems; /// \brief Private Hydrodynamics data class. class ignition::gazebo::systems::HydrodynamicsPrivateData { /// \brief Values to set via Plugin Parameters. /// Plugin Parameter: Added mass in surge, X_\dot{u}. public: double paramXdotU; /// \brief Plugin Parameter: Added mass in sway, Y_\dot{v}. public: double paramYdotV; /// \brief Plugin Parameter: Added mass in heave, Z_\dot{w}. public: double paramZdotW; /// \brief Plugin Parameter: Added mass in roll, K_\dot{p}. public: double paramKdotP; /// \brief Plugin Parameter: Added mass in pitch, M_\dot{q}. public: double paramMdotQ; /// \brief Plugin Parameter: Added mass in yaw, N_\dot{r}. public: double paramNdotR; /// \brief Plugin Parameter: Linear drag in surge. public: double paramXu; /// \brief Plugin Parameter: Quadratic drag in surge. public: double paramXuu; /// \brief Plugin Parameter: Linear drag in sway. public: double paramYv; /// \brief Plugin Parameter: Quadratic drag in sway. public: double paramYvv; /// \brief Plugin Parameter: Linear drag in heave. public: double paramZw; /// \brief Plugin Parameter: Quadratic drag in heave. public: double paramZww; /// \brief Plugin Parameter: Linear drag in roll. public: double paramKp; /// \brief Plugin Parameter: Quadratic drag in roll. public: double paramKpp; /// \brief Plugin Parameter: Linear drag in pitch. public: double paramMq; /// \brief Plugin Parameter: Quadratic drag in pitch. public: double paramMqq; /// \brief Plugin Parameter: Linear drag in yaw. public: double paramNr; /// \brief Plugin Parameter: Quadratic drag in yaw. public: double paramNrr; /// \brief Water density [kg/m^3]. public: double waterDensity; /// \brief Added mass of vehicle; /// See: https://en.wikipedia.org/wiki/Added_mass Eigen::MatrixXd Ma; /// \brief Previous state. public: Eigen::VectorXd prevState; /// Link entity public: ignition::gazebo::Entity linkEntity; }; ///////////////////////////////////////////////// void AddAngularVelocityComponent( const ignition::gazebo::Entity &_entity, ignition::gazebo::EntityComponentManager &_ecm) { if (!_ecm.Component<ignition::gazebo::components::AngularVelocity>(_entity)) { _ecm.CreateComponent(_entity, ignition::gazebo::components::AngularVelocity()); } // Create an angular velocity component if one is not present. if (!_ecm.Component<ignition::gazebo::components::WorldAngularVelocity>( _entity)) { _ecm.CreateComponent(_entity, ignition::gazebo::components::WorldAngularVelocity()); } } ///////////////////////////////////////////////// void AddWorldPose( const ignition::gazebo::Entity &_entity, ignition::gazebo::EntityComponentManager &_ecm) { if (!_ecm.Component<ignition::gazebo::components::WorldPose>(_entity)) { _ecm.CreateComponent(_entity, ignition::gazebo::components::WorldPose()); } } ///////////////////////////////////////////////// void AddWorldLinearVelocity( const ignition::gazebo::Entity &_entity, ignition::gazebo::EntityComponentManager &_ecm) { if (!_ecm.Component<ignition::gazebo::components::WorldLinearVelocity>( _entity)) { _ecm.CreateComponent(_entity, ignition::gazebo::components::WorldLinearVelocity()); } } ///////////////////////////////////////////////// double SdfParamDouble( const std::shared_ptr<const sdf::Element> &_sdf, const std::string& _field, double _default) { return _sdf->Get<double>(_field, _default).first; } ///////////////////////////////////////////////// Hydrodynamics::Hydrodynamics() { this->dataPtr = std::make_unique<HydrodynamicsPrivateData>(); } ///////////////////////////////////////////////// Hydrodynamics::~Hydrodynamics() { // Do nothing } ///////////////////////////////////////////////// void Hydrodynamics::Configure( const ignition::gazebo::Entity &_entity, const std::shared_ptr<const sdf::Element> &_sdf, ignition::gazebo::EntityComponentManager &_ecm, ignition::gazebo::EventManager &/*_eventMgr*/ ) { this->dataPtr->waterDensity = SdfParamDouble(_sdf, "waterDensity", 998); this->dataPtr->paramXdotU = SdfParamDouble(_sdf, "xDotU" , 5); this->dataPtr->paramYdotV = SdfParamDouble(_sdf, "yDotV" , 5); this->dataPtr->paramZdotW = SdfParamDouble(_sdf, "zDotW" , 0.1); this->dataPtr->paramKdotP = SdfParamDouble(_sdf, "kDotP" , 0.1); this->dataPtr->paramMdotQ = SdfParamDouble(_sdf, "mDotQ" , 0.1); this->dataPtr->paramNdotR = SdfParamDouble(_sdf, "nDotR" , 1); this->dataPtr->paramXu = SdfParamDouble(_sdf, "xU" , 20); this->dataPtr->paramXuu = SdfParamDouble(_sdf, "xUU" , 0); this->dataPtr->paramYv = SdfParamDouble(_sdf, "yV" , 20); this->dataPtr->paramYvv = SdfParamDouble(_sdf, "yVV" , 0); this->dataPtr->paramZw = SdfParamDouble(_sdf, "zW" , 20); this->dataPtr->paramZww = SdfParamDouble(_sdf, "zWW" , 0); this->dataPtr->paramKp = SdfParamDouble(_sdf, "kP" , 20); this->dataPtr->paramKpp = SdfParamDouble(_sdf, "kPP" , 0); this->dataPtr->paramMq = SdfParamDouble(_sdf, "mQ" , 20); this->dataPtr->paramMqq = SdfParamDouble(_sdf, "mQQ" , 0); this->dataPtr->paramNr = SdfParamDouble(_sdf, "nR" , 20); this->dataPtr->paramNrr = SdfParamDouble(_sdf, "nRR" , 0); // Create model object, to access convenient functions auto model = ignition::gazebo::Model(_entity); if(!_sdf->HasElement("link_name")) { ignerr << "You musk specify a <link_name> for the hydrodynamic" << " plugin to act upon"; return; } auto linkName = _sdf->Get<std::string>("link_name"); this->dataPtr->linkEntity = model.LinkByName(_ecm, linkName); if(!_ecm.HasEntity(this->dataPtr->linkEntity)) { ignerr << "Link name" << linkName << "does not exist"; return; } this->dataPtr->prevState = Eigen::VectorXd::Zero(6); AddWorldPose(this->dataPtr->linkEntity, _ecm); AddAngularVelocityComponent(this->dataPtr->linkEntity, _ecm); AddWorldLinearVelocity(this->dataPtr->linkEntity, _ecm); // Added mass according to Fossen's equations (p 37) this->dataPtr->Ma = Eigen::MatrixXd::Zero(6, 6); this->dataPtr->Ma(0, 0) = this->dataPtr->paramXdotU; this->dataPtr->Ma(1, 1) = this->dataPtr->paramYdotV; this->dataPtr->Ma(2, 2) = this->dataPtr->paramZdotW; this->dataPtr->Ma(3, 3) = this->dataPtr->paramKdotP; this->dataPtr->Ma(4, 4) = this->dataPtr->paramMdotQ; this->dataPtr->Ma(5, 5) = this->dataPtr->paramNdotR; } ///////////////////////////////////////////////// void Hydrodynamics::PreUpdate( const ignition::gazebo::UpdateInfo &_info, ignition::gazebo::EntityComponentManager &_ecm) { if (_info.paused) return; // These variables follow Fossen's scheme in "Guidance and Control // of Ocean Vehicles." The `state` vector contains the ship's current velocity // in the formate [x_vel, y_vel, z_vel, roll_vel, pitch_vel, yaw_vel]. // `stateDot` consists of the first derivative in time of the state vector. // `Cmat` corresponds to the Centripetal matrix // `Dmat` is the drag matrix // `Ma` is the added mass. Eigen::VectorXd stateDot = Eigen::VectorXd(6); Eigen::VectorXd state = Eigen::VectorXd(6); Eigen::MatrixXd Cmat = Eigen::MatrixXd::Zero(6, 6); Eigen::MatrixXd Dmat = Eigen::MatrixXd::Zero(6, 6); // Get vehicle state ignition::gazebo::Link baseLink(this->dataPtr->linkEntity); auto linearVelocity = _ecm.Component<components::WorldLinearVelocity>(this->dataPtr->linkEntity); auto rotationalVelocity = baseLink.WorldAngularVelocity(_ecm); if (!linearVelocity) { ignerr << "no linear vel" <<"\n"; return; } // Transform state to local frame auto pose = baseLink.WorldPose(_ecm); // Since we are transforming angular and linear velocity we only care about // rotation auto localLinearVelocity = pose->Rot().Inverse() * linearVelocity->Data(); auto localRotationalVelocity = pose->Rot().Inverse() * *rotationalVelocity; state(0) = localLinearVelocity.X(); state(1) = localLinearVelocity.Y(); state(2) = localLinearVelocity.Z(); state(3) = localRotationalVelocity.X(); state(4) = localRotationalVelocity.Y(); state(5) = localRotationalVelocity.Z(); auto dt = static_cast<double>(_info.dt.count())/1e9; stateDot = (state - this->dataPtr->prevState)/dt; this->dataPtr->prevState = state; // The added mass const Eigen::VectorXd kAmassVec = this->dataPtr->Ma * stateDot; // Coriolis and Centripetal forces for under water vehicles (Fossen P. 37) // Note: this is significantly different from VRX because we need to account // for the under water vehicle's additional DOF Cmat(0, 4) = - this->dataPtr->paramZdotW * state(2); Cmat(0, 5) = - this->dataPtr->paramYdotV * state(1); Cmat(1, 3) = this->dataPtr->paramZdotW * state(2); Cmat(1, 5) = - this->dataPtr->paramXdotU * state(0); Cmat(2, 3) = - this->dataPtr->paramYdotV * state(1); Cmat(2, 4) = this->dataPtr->paramXdotU * state(0); Cmat(3, 1) = - this->dataPtr->paramZdotW * state(2); Cmat(3, 2) = this->dataPtr->paramYdotV * state(1); Cmat(3, 4) = - this->dataPtr->paramNdotR * state(5); Cmat(3, 5) = this->dataPtr->paramMdotQ * state(4); Cmat(4, 0) = this->dataPtr->paramZdotW * state(2); Cmat(4, 2) = - this->dataPtr->paramXdotU * state(0); Cmat(4, 3) = this->dataPtr->paramNdotR * state(5); Cmat(4, 5) = - this->dataPtr->paramKdotP * state(3); Cmat(5, 0) = this->dataPtr->paramZdotW * state(2); Cmat(5, 1) = this->dataPtr->paramXdotU * state(0); Cmat(5, 3) = - this->dataPtr->paramMdotQ * state(4); Cmat(5, 4) = this->dataPtr->paramKdotP * state(3); const Eigen::VectorXd kCmatVec = - Cmat * state; // Damping forces (Fossen P. 43) Dmat(1, 1) = - this->dataPtr->paramYv - this->dataPtr->paramYvv * abs(state(1)); Dmat(0, 0) = - this->dataPtr->paramXu - this->dataPtr->paramXuu * abs(state(0)); Dmat(2, 2) = - this->dataPtr->paramZw - this->dataPtr->paramZww * abs(state(2)); Dmat(3, 3) = - this->dataPtr->paramKp - this->dataPtr->paramKpp * abs(state(3)); Dmat(4, 4) = - this->dataPtr->paramMq - this->dataPtr->paramMqq * abs(state(4)); Dmat(5, 5) = - this->dataPtr->paramNr - this->dataPtr->paramNrr * abs(state(5)); const Eigen::VectorXd kDvec = Dmat * state; const Eigen::VectorXd kTotalWrench = kAmassVec + kDvec + kCmatVec; ignition::math::Vector3d totalForce(-kTotalWrench(0), -kTotalWrench(1), -kTotalWrench(2)); ignition::math::Vector3d totalTorque(-kTotalWrench(3), -kTotalWrench(4), -kTotalWrench(5)); baseLink.AddWorldWrench( _ecm, pose->Rot()*(totalForce), pose->Rot()*totalTorque); } IGNITION_ADD_PLUGIN( Hydrodynamics, System, Hydrodynamics::ISystemConfigure, Hydrodynamics::ISystemPreUpdate ) IGNITION_ADD_PLUGIN_ALIAS( Hydrodynamics, "ignition::gazebo::systems::Hydrodynamics")
34.84136
80
0.65607
[ "object", "vector", "model", "transform" ]
61fcd9eae488d4894478c21f4cde5797db9e8416
6,068
cpp
C++
Classes/Utils/DataHandler.cpp
acros/CocosAmEditor
749f5125652eefa6087b67b5044afdc4091142b4
[ "MIT" ]
8
2017-02-06T08:24:02.000Z
2021-06-15T04:23:59.000Z
Classes/Utils/DataHandler.cpp
acros/CocosAmEditor
749f5125652eefa6087b67b5044afdc4091142b4
[ "MIT" ]
null
null
null
Classes/Utils/DataHandler.cpp
acros/CocosAmEditor
749f5125652eefa6087b67b5044afdc4091142b4
[ "MIT" ]
2
2017-08-05T08:32:59.000Z
2019-03-09T03:21:30.000Z
#include "DataHandler.h" #include "json/reader.h" #include "json/document.h" #include "json/writer.h" #include "json/stringbuffer.h" #include <fstream> USING_NS_CC; const float DataHandler::sFrameRate = 30.f; const std::string DataHandler::s_DefaultAnim = "Default Animation"; std::string DataHandler::_DataFileName = "config.json"; bool DataHandler::_InSerializing = false; ResourceDataList DataHandler::s_AnimFileData; ResourceDataList* DataHandler::deserializeFromFile(const std::string& filePath) { _DataFileName = filePath; std::string contentStr = FileUtils::getInstance()->getStringFromFile(filePath); rapidjson::Document doc; doc.Parse<0>(contentStr.c_str()); if ( doc.HasParseError() || !doc.HasMember("data")) return nullptr; s_AnimFileData.clear(); rapidjson::Value& na = doc["data"]; int nodeSize = na.Size(); for (int i = 0; i < nodeSize; ++i) { rapidjson::Value& nodeValue = na[i]; //Parse node context EntityData t; if (nodeValue.HasMember("model")){ t.modelFile = nodeValue["model"].GetString(); } if (nodeValue.HasMember("name")) { t.name = nodeValue["name"].GetString(); } if (nodeValue.HasMember("tex")){ t.texFile = nodeValue["tex"].GetString(); } if (nodeValue.HasMember("anim")){ t.animFile = nodeValue["anim"].GetString(); }else{ t.animFile = t.modelFile; } // if (nodeValue.HasMember("sec") && nodeValue["sec"].IsArray()){ // rapidjson::Value& secValue = nodeValue["sec"][0u]; // for (auto itr = secValue.MemberonBegin(); itr != secValue.MemberonEnd(); ++itr){ // ResourceData::AnimFrames secFrame; // secFrame.name = itr->name.GetString(); // if (itr->value.IsArray()){ // secFrame.start = itr->value[0u].GetInt(); // secFrame.end = itr->value[1].GetInt(); // } // t.animList.push_back(secFrame); // } // } if (nodeValue.HasMember("sec") && nodeValue["sec"].IsObject()){ rapidjson::Value& secValue = nodeValue["sec"]; for (auto itr = secValue.MemberBegin(); itr != secValue.MemberEnd(); ++itr){ EntityData::AnimFrames secFrame; secFrame.name = itr->name.GetString(); if (itr->value.IsArray()){ secFrame.start = itr->value[0u].GetInt(); secFrame.end = itr->value[1].GetInt(); } t.animList.push_back(secFrame); } } s_AnimFileData.push_back(t); } return &s_AnimFileData; } EntityData::AnimFrames* DataHandler::findAnim(const std::string& modelName, const std::string& animName) { auto itr = s_AnimFileData.begin(); for (; itr != s_AnimFileData.end(); ++itr) { if (itr->name == modelName) break; } if (itr != s_AnimFileData.end()) { for (auto animItr = itr->animList.begin(); animItr != itr->animList.end(); ++animItr){ if (animItr->name == animName) return &(*animItr); } } return nullptr; } EntityData* DataHandler::findViewDate(const std::string& modelName) { for (auto itr = s_AnimFileData.begin(); itr != s_AnimFileData.end(); ++itr) { if (itr->name == modelName) return &(*itr); } return nullptr; } EntityData* DataHandler::loadNewModel(const std::string& filePath, const std::string& animPath, const std::string& tex /*= ""*/) { EntityData newData; newData.name = filePath; FileUtils::getInstance()->addSearchPath("data/" + filePath); if (FileUtils::getInstance()->isFileExist(filePath)){ newData.modelFile = filePath; } else if (FileUtils::getInstance()->isFileExist(filePath + ".c3t")){ newData.modelFile = filePath + ".c3t"; } else if (FileUtils::getInstance()->isFileExist(filePath + ".c3b")){ newData.modelFile = filePath + ".c3b"; } if (!tex.empty()){ newData.texFile = tex; } if (newData.modelFile.empty()) return nullptr; if (animPath.empty()) newData.animFile = newData.modelFile; else newData.animFile = animPath; s_AnimFileData.push_back(newData); return &(s_AnimFileData.back()); } bool DataHandler::serializeToFile() { if (_InSerializing) return false; _InSerializing = true; ////////////////////////////////////////////////////////////////////////// //Serialize to json rapidjson::Document Doc; Doc.SetObject(); rapidjson::Document::AllocatorType& allocator = Doc.GetAllocator(); rapidjson::Value vElem(rapidjson::kArrayType); for (rapidjson::SizeType i = 0; i < s_AnimFileData.size(); ++i) { typedef rapidjson::Value::StringRefType rjStr; rapidjson::Value vElemItem(rapidjson::kObjectType); vElemItem.AddMember(rjStr("name"), rjStr(s_AnimFileData.at(i).name.c_str()), allocator); vElemItem.AddMember(rjStr("model"), rjStr(s_AnimFileData.at(i).modelFile.c_str()), allocator); if (!s_AnimFileData.at(i).texFile.empty()){ vElemItem.AddMember(rjStr("tex"), rjStr(s_AnimFileData.at(i).texFile.c_str()), allocator); } if (!s_AnimFileData.at(i).animFile.empty()){ vElemItem.AddMember(rjStr("anim"), rjStr(s_AnimFileData.at(i).modelFile.c_str()), allocator); } if (s_AnimFileData.at(i).animList.size() > 0){ rapidjson::Value vAnimElemItem(rapidjson::kObjectType); for (auto& animElement : s_AnimFileData.at(i).animList){ rapidjson::Value vAnimFrameSec(rapidjson::kArrayType); vAnimFrameSec.PushBack(animElement.start, allocator); vAnimFrameSec.PushBack(animElement.end, allocator); vAnimElemItem.AddMember(rjStr(animElement.name.c_str()), vAnimFrameSec, allocator); } vElemItem.AddMember("sec", vAnimElemItem, allocator); } vElem.PushBack(vElemItem, allocator); } Doc.AddMember("data", vElem, allocator); rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); Doc.Accept(writer); std::string strJson(buffer.GetString(), buffer.GetSize()); auto outputFile = FileUtils::getInstance()->fullPathForFilename(_DataFileName); // std::string filepath = (CCFileUtils::sharedFileUtils()->getWritablePath() + "test.json"); std::ofstream outfile; outfile.open(outputFile.c_str()); if (outfile.fail()) { return false; } outfile << strJson; outfile.close(); ////////////////////////////////////////////////////////////////////////// _InSerializing = false; return true; }
27.089286
128
0.671721
[ "model" ]
61fdbac6e1d2bbeaf0cd3ce14d6a691c7a5bf4ff
12,488
cpp
C++
libraries/SoftwareSerial/src/SoftwareSerial.cpp
cami/Arduino_Core_STM32
508e5975a3853e555d17c5698442ffcd1103e3e3
[ "Apache-2.0" ]
3
2019-08-31T14:44:38.000Z
2021-07-07T11:41:09.000Z
libraries/SoftwareSerial/src/SoftwareSerial.cpp
cami/Arduino_Core_STM32
508e5975a3853e555d17c5698442ffcd1103e3e3
[ "Apache-2.0" ]
1
2020-02-10T12:23:53.000Z
2020-02-11T00:48:17.000Z
libraries/SoftwareSerial/src/SoftwareSerial.cpp
cami/Arduino_Core_STM32
508e5975a3853e555d17c5698442ffcd1103e3e3
[ "Apache-2.0" ]
1
2019-08-31T14:44:50.000Z
2019-08-31T14:44:50.000Z
/* * SoftwareSerial.cpp (formerly NewSoftSerial.cpp) * * Multi-instance software serial library for Arduino/Wiring * -- Interrupt-driven receive and other improvements by ladyada * (http://ladyada.net) * -- Tuning, circular buffer, derivation from class Print/Stream, * multi-instance support, porting to 8MHz processors, * various optimizations, PROGMEM delay tables, inverse logic and * direct port writing by Mikal Hart (http://www.arduiniana.org) * -- Pin change interrupt macros by Paul Stoffregen (http://www.pjrc.com) * -- 20MHz processor support by Garrett Mace (http://www.macetech.com) * -- ATmega1280/2560 support by Brett Hagman (http://www.roguerobotics.com/) * -- STM32 support by Armin van der Togt * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * The latest version of this library can always be found at * http://arduiniana.org. */ // // Includes // #include "SoftwareSerial.h" #define OVERSAMPLE 3 // in RX, Timer will generate interruption OVERSAMPLE time during a bit. Thus OVERSAMPLE ticks in a bit. (interrupt not synchonized with edge). // defined in bit-periods #define HALFDUPLEX_SWITCH_DELAY 5 // It's best to define TIMER_SERIAL in variant.h. If not defined, we choose one here // The order is based on (lack of) features and compare channels, we choose the simplest available // because we only need an update interrupt #if !defined(TIMER_SERIAL) #if defined (TIM18_BASE) #define TIMER_SERIAL TIM18 #elif defined (TIM7_BASE) #define TIMER_SERIAL TIM7 #elif defined (TIM6_BASE) #define TIMER_SERIAL TIM6 #elif defined (TIM22_BASE) #define TIMER_SERIAL TIM22 #elif defined (TIM21_BASE) #define TIMER_SERIAL TIM21 #elif defined (TIM17_BASE) #define TIMER_SERIAL TIM17 #elif defined (TIM16_BASE) #define TIMER_SERIAL TIM16 #elif defined (TIM15_BASE) #define TIMER_SERIAL TIM15 #elif defined (TIM14_BASE) #define TIMER_SERIAL TIM14 #elif defined (TIM13_BASE) #define TIMER_SERIAL TIM13 #elif defined (TIM11_BASE) #define TIMER_SERIAL TIM11 #elif defined (TIM10_BASE) #define TIMER_SERIAL TIM10 #elif defined (TIM12_BASE) #define TIMER_SERIAL TIM12 #elif defined (TIM19_BASE) #define TIMER_SERIAL TIM19 #elif defined (TIM9_BASE) #define TIMER_SERIAL TIM9 #elif defined (TIM5_BASE) #define TIMER_SERIAL TIM5 #elif defined (TIM4_BASE) #define TIMER_SERIAL TIM4 #elif defined (TIM3_BASE) #define TIMER_SERIAL TIM3 #elif defined (TIM2_BASE) #define TIMER_SERIAL TIM2 #elif defined (TIM20_BASE) #define TIMER_SERIAL TIM20 #elif defined (TIM8_BASE) #define TIMER_SERIAL TIM8 #elif defined (TIM1_BASE) #define TIMER_SERIAL TIM1 #else #error No suitable timer found for SoftwareSerial, define TIMER_SERIAL in variant.h #endif #endif // // Statics // HardwareTimer SoftwareSerial::timer(TIMER_SERIAL); SoftwareSerial *SoftwareSerial::active_listener = nullptr; SoftwareSerial *volatile SoftwareSerial::active_out = nullptr; SoftwareSerial *volatile SoftwareSerial::active_in = nullptr; int32_t SoftwareSerial::tx_tick_cnt = 0; // OVERSAMPLE ticks needed for a bit int32_t volatile SoftwareSerial::rx_tick_cnt = 0; // OVERSAMPLE ticks needed for a bit uint32_t SoftwareSerial::tx_buffer = 0; int32_t SoftwareSerial::tx_bit_cnt = 0; uint32_t SoftwareSerial::rx_buffer = 0; int32_t SoftwareSerial::rx_bit_cnt = -1; // rx_bit_cnt = -1 : waiting for start bit uint32_t SoftwareSerial::cur_speed = 0; // // Private methods // void SoftwareSerial::setSpeed(uint32_t speed) { if (speed != cur_speed) { timer.pause(); if (speed != 0) { // Disable the timer uint32_t clock_rate, cmp_value; // Get timer clock clock_rate = timer.getTimerClkFreq(); int pre = 1; // Calculate prescale an compare value do { cmp_value = clock_rate / (speed * OVERSAMPLE); if (cmp_value >= UINT16_MAX) { clock_rate = clock_rate / 2; pre *= 2; } } while (cmp_value >= UINT16_MAX); timer.setPrescaleFactor(pre); timer.setOverflow(cmp_value); timer.setCount(0); timer.attachInterrupt(&handleInterrupt); timer.resume(); } else { timer.detachInterrupt(); } cur_speed = speed; } } // This function sets the current object as the "listening" // one and returns true if it replaces another bool SoftwareSerial::listen() { if (active_listener != this) { // wait for any transmit to complete as we may change speed while (active_out); active_listener->stopListening(); rx_tick_cnt = 1; // 1 : next interrupt will decrease rx_tick_cnt to 0 which means RX pin level will be considered. rx_bit_cnt = -1; // rx_bit_cnt = -1 : waiting for start bit setSpeed(_speed); active_listener = this; if (!_half_duplex) { active_in = this; } return true; } return false; } // Stop listening. Returns true if we were actually listening. bool SoftwareSerial::stopListening() { if (active_listener == this) { // wait for any output to complete while (active_out); if (_half_duplex) { setRXTX(false); } active_listener = nullptr; active_in = nullptr; // turn off ints setSpeed(0); return true; } return false; } inline void SoftwareSerial::setTX() { if (_inverse_logic) { LL_GPIO_ResetOutputPin(_transmitPinPort, _transmitPinNumber); } else { LL_GPIO_SetOutputPin(_transmitPinPort, _transmitPinNumber); } pinMode(_transmitPin, OUTPUT); } inline void SoftwareSerial::setRX() { pinMode(_receivePin, _inverse_logic ? INPUT_PULLDOWN : INPUT_PULLUP); // pullup for normal logic! } inline void SoftwareSerial::setRXTX(bool input) { if (_half_duplex) { if (input) { if (active_in != this) { setRX(); rx_bit_cnt = -1; // rx_bit_cnt = -1 : waiting for start bit rx_tick_cnt = 2; // 2 : next interrupt will be discarded. 2 interrupts required to consider RX pin level active_in = this; } } else { if (active_in == this) { setTX(); active_in = nullptr; } } } } inline void SoftwareSerial::send() { if (--tx_tick_cnt <= 0) { // if tx_tick_cnt > 0 interrupt is discarded. Only when tx_tick_cnt reach 0 we set TX pin. if (tx_bit_cnt++ < 10) { // tx_bit_cnt < 10 transmission is not fiisehed (10 = 1 start +8 bits + 1 stop) // send data (including start and stop bits) if (tx_buffer & 1) { LL_GPIO_SetOutputPin(_transmitPinPort, _transmitPinNumber); } else { LL_GPIO_ResetOutputPin(_transmitPinPort, _transmitPinNumber); } tx_buffer >>= 1; tx_tick_cnt = OVERSAMPLE; // Wait OVERSAMPLE tick to send next bit } else { // Transmission finished tx_tick_cnt = 1; if (_output_pending) { active_out = nullptr; // When in half-duplex mode, wait for HALFDUPLEX_SWITCH_DELAY bit-periods after the byte has // been transmitted before allowing the switch to RX mode } else if (tx_bit_cnt > 10 + OVERSAMPLE * HALFDUPLEX_SWITCH_DELAY) { if (_half_duplex && active_listener == this) { setRXTX(true); } active_out = nullptr; } } } } // // The receive routine called by the interrupt handler // inline void SoftwareSerial::recv() { if (--rx_tick_cnt <= 0) { // if rx_tick_cnt > 0 interrupt is discarded. Only when rx_tick_cnt reach 0 RX pin is considered bool inbit = LL_GPIO_IsInputPinSet(_receivePinPort, _receivePinNumber) ^ _inverse_logic; if (rx_bit_cnt == -1) { // rx_bit_cnt = -1 : waiting for start bit if (!inbit) { // got start bit rx_bit_cnt = 0; // rx_bit_cnt == 0 : start bit received rx_tick_cnt = OVERSAMPLE + 1; // Wait 1 bit (OVERSAMPLE ticks) + 1 tick in order to sample RX pin in the middle of the edge (and not too close to the edge) rx_buffer = 0; } else { rx_tick_cnt = 1; // Waiting for start bit, but we don't get right level. Wait for next Interrupt to ckech RX pin level } } else if (rx_bit_cnt >= 8) { // rx_bit_cnt >= 8 : waiting for stop bit if (inbit) { // stop bit read complete add to buffer uint8_t next = (_receive_buffer_tail + 1) % _SS_MAX_RX_BUFF; if (next != _receive_buffer_head) { // save new data in buffer: tail points to where byte goes _receive_buffer[_receive_buffer_tail] = rx_buffer; // save new byte _receive_buffer_tail = next; } else { // rx_bit_cnt = x with x = [0..7] correspond to new bit x received _buffer_overflow = true; } } // Full trame received. Resart wainting for sart bit at next interrupt rx_tick_cnt = 1; rx_bit_cnt = -1; } else { // data bits rx_buffer >>= 1; if (inbit) { rx_buffer |= 0x80; } rx_bit_cnt++; // Preprare for next bit rx_tick_cnt = OVERSAMPLE; // Wait OVERSAMPLE ticks before sampling next bit } } } // // Interrupt handling // /* static */ inline void SoftwareSerial::handleInterrupt(HardwareTimer *timer) { UNUSED(timer); if (active_in) { active_in->recv(); } if (active_out) { active_out->send(); } } // // Constructor // SoftwareSerial::SoftwareSerial(uint16_t receivePin, uint16_t transmitPin, bool inverse_logic /* = false */) : _receivePin(receivePin), _transmitPin(transmitPin), _receivePinPort(digitalPinToPort(receivePin)), _receivePinNumber(STM_LL_GPIO_PIN(digitalPinToPinName(receivePin))), _transmitPinPort(digitalPinToPort(transmitPin)), _transmitPinNumber(STM_LL_GPIO_PIN(digitalPinToPinName(transmitPin))), _speed(0), _buffer_overflow(false), _inverse_logic(inverse_logic), _half_duplex(receivePin == transmitPin), _output_pending(0), _receive_buffer_tail(0), _receive_buffer_head(0) { if ((receivePin < NUM_DIGITAL_PINS) || (transmitPin < NUM_DIGITAL_PINS)) { /* Enable GPIO clock for tx and rx pin*/ set_GPIO_Port_Clock(STM_PORT(digitalPinToPinName(transmitPin))); set_GPIO_Port_Clock(STM_PORT(digitalPinToPinName(receivePin))); } else { _Error_Handler("ERROR: invalid pin number\n", -1); } } // // Destructor // SoftwareSerial::~SoftwareSerial() { end(); } // // Public methods // void SoftwareSerial::begin(long speed) { #ifdef FORCE_BAUD_RATE speed = FORCE_BAUD_RATE; #endif _speed = speed; if (!_half_duplex) { setTX(); setRX(); listen(); } else { setTX(); } } void SoftwareSerial::end() { stopListening(); } // Read data from buffer int SoftwareSerial::read() { // Empty buffer? if (_receive_buffer_head == _receive_buffer_tail) { return -1; } // Read from "head" uint8_t d = _receive_buffer[_receive_buffer_head]; // grab next byte _receive_buffer_head = (_receive_buffer_head + 1) % _SS_MAX_RX_BUFF; return d; } int SoftwareSerial::available() { return (_receive_buffer_tail + _SS_MAX_RX_BUFF - _receive_buffer_head) % _SS_MAX_RX_BUFF; } size_t SoftwareSerial::write(uint8_t b) { // wait for previous transmit to complete _output_pending = 1; while (active_out) ; // add start and stop bits. tx_buffer = b << 1 | 0x200; if (_inverse_logic) { tx_buffer = ~tx_buffer; } tx_bit_cnt = 0; tx_tick_cnt = OVERSAMPLE; setSpeed(_speed); if (_half_duplex) { setRXTX(false); } _output_pending = 0; // make us active active_out = this; return 1; } void SoftwareSerial::flush() { noInterrupts(); _receive_buffer_head = _receive_buffer_tail = 0; interrupts(); } int SoftwareSerial::peek() { // Empty buffer? if (_receive_buffer_head == _receive_buffer_tail) { return -1; } // Read from "head" return _receive_buffer[_receive_buffer_head]; } void SoftwareSerial::setInterruptPriority(uint32_t preemptPriority, uint32_t subPriority) { timer.setInterruptPriority(preemptPriority, subPriority); }
29.383529
164
0.694907
[ "object" ]
11026602b01c7338ee98612ae451eb50865a08e3
10,188
cpp
C++
src/neural_network/layer/LayerFactory.cpp
MatthieuHernandez/StraightforwardNeuralNetwork
e0b99a80bb1b3f76dcb08134aa0f1bc3e6b705d7
[ "Apache-2.0" ]
14
2019-08-29T07:20:19.000Z
2022-03-22T12:51:02.000Z
src/neural_network/layer/LayerFactory.cpp
MatthieuHernandez/StraightforwardNeuralNetwork
e0b99a80bb1b3f76dcb08134aa0f1bc3e6b705d7
[ "Apache-2.0" ]
7
2020-08-07T11:08:45.000Z
2021-05-08T17:11:12.000Z
src/neural_network/layer/LayerFactory.cpp
MatthieuHernandez/StraightforwardNeuralNetwork
e0b99a80bb1b3f76dcb08134aa0f1bc3e6b705d7
[ "Apache-2.0" ]
3
2020-08-07T10:53:52.000Z
2021-02-16T22:13:22.000Z
#include "LayerFactory.hpp" #include "../../tools/ExtendedExpection.hpp" #include "FullyConnected.hpp" #include "Recurrence.hpp" #include "GruLayer.hpp" #include "Convolution1D.hpp" #include "Convolution2D.hpp" #include "LocallyConnected1D.hpp" #include "LocallyConnected2D.hpp" #include "MaxPooling1D.hpp" #include "MaxPooling2D.hpp" using namespace std; using namespace snn; using namespace internal; inline int computeNumberOfInputs(vector<int>& shapeOfInput) { int numberOfInputs = 1; for (auto size : shapeOfInput) numberOfInputs *= size; return numberOfInputs; } inline int computeNumberOfOutputsForMaxPooling1D(int sizeOfMatrix, vector<int>& shapeOfInput) { const int rest = shapeOfInput[0] % sizeOfMatrix == 0 ? 0 : 1; return ((shapeOfInput[0] / sizeOfMatrix) + rest); } inline int computeNumberOfOutputsForMaxPooling2D(int sizeOfMatrix, vector<int>& shapeOfInput) { const int restX = shapeOfInput[0] % sizeOfMatrix == 0 ? 0 : 1; const int restY = shapeOfInput[1] % sizeOfMatrix == 0 ? 0 : 1; return ((shapeOfInput[0] / sizeOfMatrix) + restX) * ((shapeOfInput[1] / sizeOfMatrix) + restY); } inline int computeNumberOfNeuronsForLocallyConnected1D(int numberOfLocallyConnected, int sizeOfLocalMatrix, vector<int>& shapeOfInput) { const int rest = shapeOfInput[0] % sizeOfLocalMatrix == 0 ? 0 : 1; return numberOfLocallyConnected * ((shapeOfInput[0] / sizeOfLocalMatrix) + rest); } inline int computeNumberOfNeuronsForLocallyConnected2D(int numberOfLocallyConnected, int sizeOfLocalMatrix, vector<int>& shapeOfInput) { const int restX = shapeOfInput[0] % sizeOfLocalMatrix == 0 ? 0 : 1; const int restY = shapeOfInput[1] % sizeOfLocalMatrix == 0 ? 0 : 1; return numberOfLocallyConnected * ((shapeOfInput[0] / sizeOfLocalMatrix) + restX) * ((shapeOfInput[1] / sizeOfLocalMatrix) + restY); } inline int computeNumberOfNeuronsForConvolution1D(int numberOfConvolution, int sizeOfConvolutionMatrix, vector<int>& shapeOfInput) { return numberOfConvolution * (shapeOfInput[0] - (sizeOfConvolutionMatrix - 1)); } inline int computeNumberOfNeuronsForConvolution2D(int numberOfConvolution, int sizeOfConvolutionMatrix, vector<int>& shapeOfInput) { return numberOfConvolution * (shapeOfInput[0] - (sizeOfConvolutionMatrix - 1)) * (shapeOfInput[1] - ( sizeOfConvolutionMatrix - 1)); } inline unique_ptr<BaseLayer> LayerFactory::build(LayerModel& model, vector<int>& shapeOfInput, shared_ptr<NeuralNetworkOptimizer> optimizer) { model.numberOfInputs = computeNumberOfInputs(shapeOfInput); if (shapeOfInput.empty()) throw InvalidArchitectureException("Input of layer has size of 0."); if (model.numberOfInputs > 1000000) throw InvalidArchitectureException("Layer is too big."); switch (model.type) { case fullyConnected: if (model.numberOfInputs <= 0) throw InvalidArchitectureException("Input of layer has size of 0."); model.neuron.numberOfInputs = model.numberOfInputs; model.neuron.numberOfWeights = model.neuron.numberOfInputs; model.numberOfOutputs = model.numberOfNeurons; return make_unique<FullyConnected>(model, optimizer); case recurrence: model.neuron.numberOfInputs = model.numberOfInputs; model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1; model.numberOfOutputs = model.numberOfNeurons; return make_unique<Recurrence>(model, optimizer); case gruLayer: model.neuron.numberOfInputs = model.numberOfInputs; model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1; model.numberOfOutputs = model.numberOfNeurons; return make_unique<GruLayer>(model, optimizer); case maxPooling: if (shapeOfInput.size() == 1) { shapeOfInput.push_back(1); } if (shapeOfInput.size() == 2) { if (model.sizeOfFilerMatrix > shapeOfInput[0]) { throw InvalidArchitectureException("Matrix of max pooling layer is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfOutputs = computeNumberOfOutputsForMaxPooling1D(model.sizeOfFilerMatrix, model.shapeOfInput); return make_unique<MaxPooling1D>(model); } if (shapeOfInput.size() == 3) { if (model.sizeOfFilerMatrix > shapeOfInput[0] || model.sizeOfFilerMatrix > shapeOfInput[1]) { throw InvalidArchitectureException("Matrix of max pooling layer is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfOutputs = computeNumberOfOutputsForMaxPooling2D(model.sizeOfFilerMatrix, model.shapeOfInput); return make_unique<MaxPooling2D>(model); } if (shapeOfInput.size() > 3) throw InvalidArchitectureException("Input with 3 dimensions or higher is not managed."); break; case locallyConnected: if (shapeOfInput.size() == 1) { shapeOfInput.push_back(1); } if (shapeOfInput.size() == 2) { if (model.sizeOfFilerMatrix > shapeOfInput[0]) { throw InvalidArchitectureException("Matrix of locally connected layer is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfNeurons = computeNumberOfNeuronsForLocallyConnected1D( model.numberOfFilters, model.sizeOfFilerMatrix, model.shapeOfInput); model.neuron.numberOfInputs = model.sizeOfFilerMatrix * model.shapeOfInput[1]; model.neuron.numberOfWeights = model.neuron.numberOfInputs; model.numberOfOutputs = model.numberOfNeurons; return make_unique<LocallyConnected1D>(model, optimizer); } if (shapeOfInput.size() == 3) { if (model.sizeOfFilerMatrix > shapeOfInput[0] || model.sizeOfFilerMatrix > shapeOfInput[1]) { throw InvalidArchitectureException("Matrix of locally connected layer is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfNeurons = computeNumberOfNeuronsForLocallyConnected2D( model.numberOfFilters, model.sizeOfFilerMatrix, model.shapeOfInput); model.neuron.numberOfInputs = model.sizeOfFilerMatrix * model.sizeOfFilerMatrix * model.shapeOfInput[2]; model.neuron.numberOfWeights = model.neuron.numberOfInputs; model.numberOfOutputs = model.numberOfNeurons; return make_unique<LocallyConnected2D>(model, optimizer); } if (shapeOfInput.size() > 3) throw InvalidArchitectureException("Input with 3 dimensions or higher is not managed."); break; case convolution: if (shapeOfInput.size() == 1) { shapeOfInput.push_back(1); } if (shapeOfInput.size() == 2) { if (model.sizeOfFilerMatrix > shapeOfInput[0]) { throw InvalidArchitectureException("Convolution matrix is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfNeurons = computeNumberOfNeuronsForConvolution1D( model.numberOfFilters, model.sizeOfFilerMatrix, model.shapeOfInput); model.neuron.numberOfInputs = model.sizeOfFilerMatrix * model.shapeOfInput[1]; model.neuron.numberOfWeights = model.neuron.numberOfInputs; model.numberOfOutputs = model.numberOfNeurons; return make_unique<Convolution1D>(model, optimizer); } if (shapeOfInput.size() == 3) { if (model.sizeOfFilerMatrix > shapeOfInput[0] || model.sizeOfFilerMatrix > shapeOfInput[1]) { throw InvalidArchitectureException("Convolution matrix is too big."); } model.shapeOfInput = shapeOfInput; model.numberOfNeurons = computeNumberOfNeuronsForConvolution2D( model.numberOfFilters, model.sizeOfFilerMatrix, model.shapeOfInput); model.neuron.numberOfInputs = model.sizeOfFilerMatrix * model.sizeOfFilerMatrix * model.shapeOfInput[2]; model.neuron.numberOfWeights = model.neuron.numberOfInputs; model.numberOfOutputs = model.numberOfNeurons; return make_unique<Convolution2D>(model, optimizer); } if (shapeOfInput.size() > 3) throw InvalidArchitectureException("Input with 3 dimensions or higher is not managed."); break; case input: throw InvalidArchitectureException("Input LayerModel should be in first position."); default: throw InvalidArchitectureException("Layer type is not implemented."); } throw InvalidArchitectureException("The layer factory fail to build layer."); } void LayerFactory::build(vector<unique_ptr<BaseLayer>>& layers, vector<LayerModel>& models, shared_ptr<NeuralNetworkOptimizer> optimizer) { if (models.size() > 1000) throw InvalidArchitectureException("Too much layers."); if (models.empty() || models[0].type != input) throw InvalidArchitectureException("First LayerModel must be a Input type LayerModel."); if (models.size() < 2) throw InvalidArchitectureException("Neural Network must have at least 1 layer."); int numberOfInputs = 1; for (auto size : models[0].shapeOfInput) numberOfInputs *= size; if (numberOfInputs > 2073600) throw InvalidArchitectureException("Layer is too big."); auto& currentShapeOfInput = models[0].shapeOfInput; for (size_t i = 1; i < models.size(); ++i) { layers.push_back(build(models[i], currentShapeOfInput, optimizer)); currentShapeOfInput = layers.back()->getShapeOfOutput(); } }
40.110236
119
0.655575
[ "vector", "model" ]
1104e0a2111445592eaa1660ff57ad07aa1f8f17
247,892
cpp
C++
javaStructures/jdk-master/src/hotspot/share/classfile/classFileParser.cpp
IThawk/learnCode
0ac843d28b193eaab33fb33692f18361d71c7331
[ "MIT" ]
1
2020-12-26T04:52:15.000Z
2020-12-26T04:52:15.000Z
javaStructures/jdk-master/src/hotspot/share/classfile/classFileParser.cpp
IThawk/learnCode
0ac843d28b193eaab33fb33692f18361d71c7331
[ "MIT" ]
1
2020-12-26T04:57:19.000Z
2020-12-26T04:57:19.000Z
javaStructures/jdk-master/src/hotspot/share/classfile/classFileParser.cpp
IThawk/learnCode
0ac843d28b193eaab33fb33692f18361d71c7331
[ "MIT" ]
1
2021-12-06T01:13:18.000Z
2021-12-06T01:13:18.000Z
/* * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "jvm.h" #include "classfile/classFileParser.hpp" #include "classfile/classFileStream.hpp" #include "classfile/classLoader.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/classLoadInfo.hpp" #include "classfile/defaultMethods.hpp" #include "classfile/fieldLayoutBuilder.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/moduleEntry.hpp" #include "classfile/packageEntry.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "classfile/vmClasses.hpp" #include "classfile/vmSymbols.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/annotations.hpp" #include "oops/constantPool.inline.hpp" #include "oops/fieldStreams.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/klassVtable.hpp" #include "oops/metadata.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/recordComponent.hpp" #include "oops/symbol.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/reflection.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/signature.hpp" #include "runtime/timer.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/copy.hpp" #include "utilities/formatBuffer.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/resourceHash.hpp" #include "utilities/utf8.hpp" #if INCLUDE_CDS #include "classfile/systemDictionaryShared.hpp" #endif #if INCLUDE_JFR #include "jfr/support/jfrTraceIdExtension.hpp" #endif // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A // temporary area is only needed when parsing utf8 entries in the constant // pool and when parsing line number tables. // We add assert in debug mode when class format is not checked. #define JAVA_CLASSFILE_MAGIC 0xCAFEBABE #define JAVA_MIN_SUPPORTED_VERSION 45 #define JAVA_PREVIEW_MINOR_VERSION 65535 // Used for two backward compatibility reasons: // - to check for new additions to the class file format in JDK1.5 // - to check for bug fixes in the format checker in JDK1.5 #define JAVA_1_5_VERSION 49 // Used for backward compatibility reasons: // - to check for javac bug fixes that happened after 1.5 // - also used as the max version when running in jdk6 #define JAVA_6_VERSION 50 // Used for backward compatibility reasons: // - to disallow argument and require ACC_STATIC for <clinit> methods #define JAVA_7_VERSION 51 // Extension method support. #define JAVA_8_VERSION 52 #define JAVA_9_VERSION 53 #define JAVA_10_VERSION 54 #define JAVA_11_VERSION 55 #define JAVA_12_VERSION 56 #define JAVA_13_VERSION 57 #define JAVA_14_VERSION 58 #define JAVA_15_VERSION 59 #define JAVA_16_VERSION 60 #define JAVA_17_VERSION 61 #define JAVA_18_VERSION 62 void ClassFileParser::set_class_bad_constant_seen(short bad_constant) { assert((bad_constant == JVM_CONSTANT_Module || bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION, "Unexpected bad constant pool entry"); if (_bad_constant_seen == 0) _bad_constant_seen = bad_constant; } void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const stream, ConstantPool* cp, const int length, TRAPS) { assert(stream != NULL, "invariant"); assert(cp != NULL, "invariant"); // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize // this function (_current can be allocated in a register, with scalar // replacement of aggregates). The _current pointer is copied back to // stream() when this function returns. DON'T call another method within // this method that uses stream(). const ClassFileStream cfs1 = *stream; const ClassFileStream* const cfs = &cfs1; assert(cfs->allocated_on_stack(), "should be local"); debug_only(const u1* const old_current = stream->current();) // Used for batching symbol allocations. const char* names[SymbolTable::symbol_alloc_batch_size]; int lengths[SymbolTable::symbol_alloc_batch_size]; int indices[SymbolTable::symbol_alloc_batch_size]; unsigned int hashValues[SymbolTable::symbol_alloc_batch_size]; int names_count = 0; // parsing Index 0 is unused for (int index = 1; index < length; index++) { // Each of the following case guarantees one more byte in the stream // for the following tag or the access_flags following constant pool, // so we don't need bounds-check for reading tag. const u1 tag = cfs->get_u1_fast(); switch (tag) { case JVM_CONSTANT_Class : { cfs->guarantee_more(3, CHECK); // name_index, tag/access_flags const u2 name_index = cfs->get_u2_fast(); cp->klass_index_at_put(index, name_index); break; } case JVM_CONSTANT_Fieldref: { cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags const u2 class_index = cfs->get_u2_fast(); const u2 name_and_type_index = cfs->get_u2_fast(); cp->field_at_put(index, class_index, name_and_type_index); break; } case JVM_CONSTANT_Methodref: { cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags const u2 class_index = cfs->get_u2_fast(); const u2 name_and_type_index = cfs->get_u2_fast(); cp->method_at_put(index, class_index, name_and_type_index); break; } case JVM_CONSTANT_InterfaceMethodref: { cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags const u2 class_index = cfs->get_u2_fast(); const u2 name_and_type_index = cfs->get_u2_fast(); cp->interface_method_at_put(index, class_index, name_and_type_index); break; } case JVM_CONSTANT_String : { cfs->guarantee_more(3, CHECK); // string_index, tag/access_flags const u2 string_index = cfs->get_u2_fast(); cp->string_index_at_put(index, string_index); break; } case JVM_CONSTANT_MethodHandle : case JVM_CONSTANT_MethodType: { if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { classfile_parse_error( "Class file version does not support constant tag %u in class file %s", tag, THREAD); return; } if (tag == JVM_CONSTANT_MethodHandle) { cfs->guarantee_more(4, CHECK); // ref_kind, method_index, tag/access_flags const u1 ref_kind = cfs->get_u1_fast(); const u2 method_index = cfs->get_u2_fast(); cp->method_handle_index_at_put(index, ref_kind, method_index); } else if (tag == JVM_CONSTANT_MethodType) { cfs->guarantee_more(3, CHECK); // signature_index, tag/access_flags const u2 signature_index = cfs->get_u2_fast(); cp->method_type_index_at_put(index, signature_index); } else { ShouldNotReachHere(); } break; } case JVM_CONSTANT_Dynamic : { if (_major_version < Verifier::DYNAMICCONSTANT_MAJOR_VERSION) { classfile_parse_error( "Class file version does not support constant tag %u in class file %s", tag, THREAD); return; } cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags const u2 bootstrap_specifier_index = cfs->get_u2_fast(); const u2 name_and_type_index = cfs->get_u2_fast(); if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) { _max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later } cp->dynamic_constant_at_put(index, bootstrap_specifier_index, name_and_type_index); break; } case JVM_CONSTANT_InvokeDynamic : { if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { classfile_parse_error( "Class file version does not support constant tag %u in class file %s", tag, THREAD); return; } cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags const u2 bootstrap_specifier_index = cfs->get_u2_fast(); const u2 name_and_type_index = cfs->get_u2_fast(); if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) { _max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later } cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index); break; } case JVM_CONSTANT_Integer: { cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags const u4 bytes = cfs->get_u4_fast(); cp->int_at_put(index, (jint)bytes); break; } case JVM_CONSTANT_Float: { cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags const u4 bytes = cfs->get_u4_fast(); cp->float_at_put(index, *(jfloat*)&bytes); break; } case JVM_CONSTANT_Long: { // A mangled type might cause you to overrun allocated memory guarantee_property(index + 1 < length, "Invalid constant pool entry %u in class file %s", index, CHECK); cfs->guarantee_more(9, CHECK); // bytes, tag/access_flags const u8 bytes = cfs->get_u8_fast(); cp->long_at_put(index, bytes); index++; // Skip entry following eigth-byte constant, see JVM book p. 98 break; } case JVM_CONSTANT_Double: { // A mangled type might cause you to overrun allocated memory guarantee_property(index+1 < length, "Invalid constant pool entry %u in class file %s", index, CHECK); cfs->guarantee_more(9, CHECK); // bytes, tag/access_flags const u8 bytes = cfs->get_u8_fast(); cp->double_at_put(index, *(jdouble*)&bytes); index++; // Skip entry following eigth-byte constant, see JVM book p. 98 break; } case JVM_CONSTANT_NameAndType: { cfs->guarantee_more(5, CHECK); // name_index, signature_index, tag/access_flags const u2 name_index = cfs->get_u2_fast(); const u2 signature_index = cfs->get_u2_fast(); cp->name_and_type_at_put(index, name_index, signature_index); break; } case JVM_CONSTANT_Utf8 : { cfs->guarantee_more(2, CHECK); // utf8_length u2 utf8_length = cfs->get_u2_fast(); const u1* utf8_buffer = cfs->current(); assert(utf8_buffer != NULL, "null utf8 buffer"); // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward. cfs->guarantee_more(utf8_length+1, CHECK); // utf8 string, tag/access_flags cfs->skip_u1_fast(utf8_length); // Before storing the symbol, make sure it's legal if (_need_verify) { verify_legal_utf8(utf8_buffer, utf8_length, CHECK); } unsigned int hash; Symbol* const result = SymbolTable::lookup_only((const char*)utf8_buffer, utf8_length, hash); if (result == NULL) { names[names_count] = (const char*)utf8_buffer; lengths[names_count] = utf8_length; indices[names_count] = index; hashValues[names_count++] = hash; if (names_count == SymbolTable::symbol_alloc_batch_size) { SymbolTable::new_symbols(_loader_data, constantPoolHandle(THREAD, cp), names_count, names, lengths, indices, hashValues); names_count = 0; } } else { cp->symbol_at_put(index, result); } break; } case JVM_CONSTANT_Module: case JVM_CONSTANT_Package: { // Record that an error occurred in these two cases but keep parsing so // that ACC_Module can be checked for in the access_flags. Need to // throw NoClassDefFoundError in that case. if (_major_version >= JAVA_9_VERSION) { cfs->guarantee_more(3, CHECK); cfs->get_u2_fast(); set_class_bad_constant_seen(tag); break; } } default: { classfile_parse_error("Unknown constant tag %u in class file %s", tag, THREAD); return; } } // end of switch(tag) } // end of for // Allocate the remaining symbols if (names_count > 0) { SymbolTable::new_symbols(_loader_data, constantPoolHandle(THREAD, cp), names_count, names, lengths, indices, hashValues); } // Copy _current pointer of local copy back to stream. assert(stream->current() == old_current, "non-exclusive use of stream"); stream->set_current(cfs1.current()); } static inline bool valid_cp_range(int index, int length) { return (index > 0 && index < length); } static inline Symbol* check_symbol_at(const ConstantPool* cp, int index) { assert(cp != NULL, "invariant"); if (valid_cp_range(index, cp->length()) && cp->tag_at(index).is_utf8()) { return cp->symbol_at(index); } return NULL; } #ifdef ASSERT PRAGMA_DIAG_PUSH PRAGMA_FORMAT_NONLITERAL_IGNORED void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) const { ResourceMark rm(THREAD); fatal(msg, _class_name->as_C_string()); } void ClassFileParser::report_assert_property_failure(const char* msg, int index, TRAPS) const { ResourceMark rm(THREAD); fatal(msg, index, _class_name->as_C_string()); } PRAGMA_DIAG_POP #endif void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream, ConstantPool* const cp, const int length, TRAPS) { assert(cp != NULL, "invariant"); assert(stream != NULL, "invariant"); // parsing constant pool entries parse_constant_pool_entries(stream, cp, length, CHECK); if (class_bad_constant_seen() != 0) { // a bad CP entry has been detected previously so stop parsing and just return. return; } int index = 1; // declared outside of loops for portability int num_klasses = 0; // first verification pass - validate cross references // and fixup class and string constants for (index = 1; index < length; index++) { // Index 0 is unused const jbyte tag = cp->tag_at(index).value(); switch (tag) { case JVM_CONSTANT_Class: { ShouldNotReachHere(); // Only JVM_CONSTANT_ClassIndex should be present break; } case JVM_CONSTANT_Fieldref: // fall through case JVM_CONSTANT_Methodref: // fall through case JVM_CONSTANT_InterfaceMethodref: { if (!_need_verify) break; const int klass_ref_index = cp->klass_ref_index_at(index); const int name_and_type_ref_index = cp->name_and_type_ref_index_at(index); check_property(valid_klass_reference_at(klass_ref_index), "Invalid constant pool index %u in class file %s", klass_ref_index, CHECK); check_property(valid_cp_range(name_and_type_ref_index, length) && cp->tag_at(name_and_type_ref_index).is_name_and_type(), "Invalid constant pool index %u in class file %s", name_and_type_ref_index, CHECK); break; } case JVM_CONSTANT_String: { ShouldNotReachHere(); // Only JVM_CONSTANT_StringIndex should be present break; } case JVM_CONSTANT_Integer: break; case JVM_CONSTANT_Float: break; case JVM_CONSTANT_Long: case JVM_CONSTANT_Double: { index++; check_property( (index < length && cp->tag_at(index).is_invalid()), "Improper constant pool long/double index %u in class file %s", index, CHECK); break; } case JVM_CONSTANT_NameAndType: { if (!_need_verify) break; const int name_ref_index = cp->name_ref_index_at(index); const int signature_ref_index = cp->signature_ref_index_at(index); check_property(valid_symbol_at(name_ref_index), "Invalid constant pool index %u in class file %s", name_ref_index, CHECK); check_property(valid_symbol_at(signature_ref_index), "Invalid constant pool index %u in class file %s", signature_ref_index, CHECK); break; } case JVM_CONSTANT_Utf8: break; case JVM_CONSTANT_UnresolvedClass: // fall-through case JVM_CONSTANT_UnresolvedClassInError: { ShouldNotReachHere(); // Only JVM_CONSTANT_ClassIndex should be present break; } case JVM_CONSTANT_ClassIndex: { const int class_index = cp->klass_index_at(index); check_property(valid_symbol_at(class_index), "Invalid constant pool index %u in class file %s", class_index, CHECK); cp->unresolved_klass_at_put(index, class_index, num_klasses++); break; } case JVM_CONSTANT_StringIndex: { const int string_index = cp->string_index_at(index); check_property(valid_symbol_at(string_index), "Invalid constant pool index %u in class file %s", string_index, CHECK); Symbol* const sym = cp->symbol_at(string_index); cp->unresolved_string_at_put(index, sym); break; } case JVM_CONSTANT_MethodHandle: { const int ref_index = cp->method_handle_index_at(index); check_property(valid_cp_range(ref_index, length), "Invalid constant pool index %u in class file %s", ref_index, CHECK); const constantTag tag = cp->tag_at(ref_index); const int ref_kind = cp->method_handle_ref_kind_at(index); switch (ref_kind) { case JVM_REF_getField: case JVM_REF_getStatic: case JVM_REF_putField: case JVM_REF_putStatic: { check_property( tag.is_field(), "Invalid constant pool index %u in class file %s (not a field)", ref_index, CHECK); break; } case JVM_REF_invokeVirtual: case JVM_REF_newInvokeSpecial: { check_property( tag.is_method(), "Invalid constant pool index %u in class file %s (not a method)", ref_index, CHECK); break; } case JVM_REF_invokeStatic: case JVM_REF_invokeSpecial: { check_property( tag.is_method() || ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()), "Invalid constant pool index %u in class file %s (not a method)", ref_index, CHECK); break; } case JVM_REF_invokeInterface: { check_property( tag.is_interface_method(), "Invalid constant pool index %u in class file %s (not an interface method)", ref_index, CHECK); break; } default: { classfile_parse_error( "Bad method handle kind at constant pool index %u in class file %s", index, THREAD); return; } } // switch(refkind) // Keep the ref_index unchanged. It will be indirected at link-time. break; } // case MethodHandle case JVM_CONSTANT_MethodType: { const int ref_index = cp->method_type_index_at(index); check_property(valid_symbol_at(ref_index), "Invalid constant pool index %u in class file %s", ref_index, CHECK); break; } case JVM_CONSTANT_Dynamic: { const int name_and_type_ref_index = cp->bootstrap_name_and_type_ref_index_at(index); check_property(valid_cp_range(name_and_type_ref_index, length) && cp->tag_at(name_and_type_ref_index).is_name_and_type(), "Invalid constant pool index %u in class file %s", name_and_type_ref_index, CHECK); // bootstrap specifier index must be checked later, // when BootstrapMethods attr is available // Mark the constant pool as having a CONSTANT_Dynamic_info structure cp->set_has_dynamic_constant(); break; } case JVM_CONSTANT_InvokeDynamic: { const int name_and_type_ref_index = cp->bootstrap_name_and_type_ref_index_at(index); check_property(valid_cp_range(name_and_type_ref_index, length) && cp->tag_at(name_and_type_ref_index).is_name_and_type(), "Invalid constant pool index %u in class file %s", name_and_type_ref_index, CHECK); // bootstrap specifier index must be checked later, // when BootstrapMethods attr is available break; } default: { fatal("bad constant pool tag value %u", cp->tag_at(index).value()); ShouldNotReachHere(); break; } } // switch(tag) } // end of for cp->allocate_resolved_klasses(_loader_data, num_klasses, CHECK); if (!_need_verify) { return; } // second verification pass - checks the strings are of the right format. // but not yet to the other entries for (index = 1; index < length; index++) { const jbyte tag = cp->tag_at(index).value(); switch (tag) { case JVM_CONSTANT_UnresolvedClass: { const Symbol* const class_name = cp->klass_name_at(index); // check the name verify_legal_class_name(class_name, CHECK); break; } case JVM_CONSTANT_NameAndType: { if (_need_verify) { const int sig_index = cp->signature_ref_index_at(index); const int name_index = cp->name_ref_index_at(index); const Symbol* const name = cp->symbol_at(name_index); const Symbol* const sig = cp->symbol_at(sig_index); guarantee_property(sig->utf8_length() != 0, "Illegal zero length constant pool entry at %d in class %s", sig_index, CHECK); guarantee_property(name->utf8_length() != 0, "Illegal zero length constant pool entry at %d in class %s", name_index, CHECK); if (Signature::is_method(sig)) { // Format check method name and signature verify_legal_method_name(name, CHECK); verify_legal_method_signature(name, sig, CHECK); } else { // Format check field name and signature verify_legal_field_name(name, CHECK); verify_legal_field_signature(name, sig, CHECK); } } break; } case JVM_CONSTANT_Dynamic: { const int name_and_type_ref_index = cp->name_and_type_ref_index_at(index); // already verified to be utf8 const int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index); // already verified to be utf8 const int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index); const Symbol* const name = cp->symbol_at(name_ref_index); const Symbol* const signature = cp->symbol_at(signature_ref_index); if (_need_verify) { // CONSTANT_Dynamic's name and signature are verified above, when iterating NameAndType_info. // Need only to be sure signature is the right type. if (Signature::is_method(signature)) { throwIllegalSignature("CONSTANT_Dynamic", name, signature, CHECK); } } break; } case JVM_CONSTANT_InvokeDynamic: case JVM_CONSTANT_Fieldref: case JVM_CONSTANT_Methodref: case JVM_CONSTANT_InterfaceMethodref: { const int name_and_type_ref_index = cp->name_and_type_ref_index_at(index); // already verified to be utf8 const int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index); // already verified to be utf8 const int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index); const Symbol* const name = cp->symbol_at(name_ref_index); const Symbol* const signature = cp->symbol_at(signature_ref_index); if (tag == JVM_CONSTANT_Fieldref) { if (_need_verify) { // Field name and signature are verified above, when iterating NameAndType_info. // Need only to be sure signature is non-zero length and the right type. if (Signature::is_method(signature)) { throwIllegalSignature("Field", name, signature, CHECK); } } } else { if (_need_verify) { // Method name and signature are verified above, when iterating NameAndType_info. // Need only to be sure signature is non-zero length and the right type. if (!Signature::is_method(signature)) { throwIllegalSignature("Method", name, signature, CHECK); } } // 4509014: If a class method name begins with '<', it must be "<init>" const unsigned int name_len = name->utf8_length(); if (tag == JVM_CONSTANT_Methodref && name_len != 0 && name->char_at(0) == JVM_SIGNATURE_SPECIAL && name != vmSymbols::object_initializer_name()) { classfile_parse_error( "Bad method name at constant pool index %u in class file %s", name_ref_index, THREAD); return; } } break; } case JVM_CONSTANT_MethodHandle: { const int ref_index = cp->method_handle_index_at(index); const int ref_kind = cp->method_handle_ref_kind_at(index); switch (ref_kind) { case JVM_REF_invokeVirtual: case JVM_REF_invokeStatic: case JVM_REF_invokeSpecial: case JVM_REF_newInvokeSpecial: { const int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index); const int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index); const Symbol* const name = cp->symbol_at(name_ref_index); if (ref_kind == JVM_REF_newInvokeSpecial) { if (name != vmSymbols::object_initializer_name()) { classfile_parse_error( "Bad constructor name at constant pool index %u in class file %s", name_ref_index, THREAD); return; } } else { if (name == vmSymbols::object_initializer_name()) { classfile_parse_error( "Bad method name at constant pool index %u in class file %s", name_ref_index, THREAD); return; } } break; } // Other ref_kinds are already fully checked in previous pass. } // switch(ref_kind) break; } case JVM_CONSTANT_MethodType: { const Symbol* const no_name = vmSymbols::type_name(); // place holder const Symbol* const signature = cp->method_type_signature_at(index); verify_legal_method_signature(no_name, signature, CHECK); break; } case JVM_CONSTANT_Utf8: { assert(cp->symbol_at(index)->refcount() != 0, "count corrupted"); } } // switch(tag) } // end of for } class NameSigHash: public ResourceObj { public: const Symbol* _name; // name const Symbol* _sig; // signature NameSigHash* _next; // Next entry in hash table }; static const int HASH_ROW_SIZE = 256; static unsigned int hash(const Symbol* name, const Symbol* sig) { unsigned int raw_hash = 0; raw_hash += ((unsigned int)(uintptr_t)name) >> (LogHeapWordSize + 2); raw_hash += ((unsigned int)(uintptr_t)sig) >> LogHeapWordSize; return (raw_hash + (unsigned int)(uintptr_t)name) % HASH_ROW_SIZE; } static void initialize_hashtable(NameSigHash** table) { memset((void*)table, 0, sizeof(NameSigHash*) * HASH_ROW_SIZE); } // Return false if the name/sig combination is found in table. // Return true if no duplicate is found. And name/sig is added as a new entry in table. // The old format checker uses heap sort to find duplicates. // NOTE: caller should guarantee that GC doesn't happen during the life cycle // of table since we don't expect Symbol*'s to move. static bool put_after_lookup(const Symbol* name, const Symbol* sig, NameSigHash** table) { assert(name != NULL, "name in constant pool is NULL"); // First lookup for duplicates int index = hash(name, sig); NameSigHash* entry = table[index]; while (entry != NULL) { if (entry->_name == name && entry->_sig == sig) { return false; } entry = entry->_next; } // No duplicate is found, allocate a new entry and fill it. entry = new NameSigHash(); entry->_name = name; entry->_sig = sig; // Insert into hash table entry->_next = table[index]; table[index] = entry; return true; } // Side-effects: populates the _local_interfaces field void ClassFileParser::parse_interfaces(const ClassFileStream* const stream, const int itfs_len, ConstantPool* const cp, bool* const has_nonstatic_concrete_methods, TRAPS) { assert(stream != NULL, "invariant"); assert(cp != NULL, "invariant"); assert(has_nonstatic_concrete_methods != NULL, "invariant"); if (itfs_len == 0) { _local_interfaces = Universe::the_empty_instance_klass_array(); } else { assert(itfs_len > 0, "only called for len>0"); _local_interfaces = MetadataFactory::new_array<InstanceKlass*>(_loader_data, itfs_len, NULL, CHECK); int index; for (index = 0; index < itfs_len; index++) { const u2 interface_index = stream->get_u2(CHECK); Klass* interf; check_property( valid_klass_reference_at(interface_index), "Interface name has bad constant pool index %u in class file %s", interface_index, CHECK); if (cp->tag_at(interface_index).is_klass()) { interf = cp->resolved_klass_at(interface_index); } else { Symbol* const unresolved_klass = cp->klass_name_at(interface_index); // Don't need to check legal name because it's checked when parsing constant pool. // But need to make sure it's not an array type. guarantee_property(unresolved_klass->char_at(0) != JVM_SIGNATURE_ARRAY, "Bad interface name in class file %s", CHECK); // Call resolve_super so class circularity is checked interf = SystemDictionary::resolve_super_or_fail( _class_name, unresolved_klass, Handle(THREAD, _loader_data->class_loader()), _protection_domain, false, CHECK); } if (!interf->is_interface()) { THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), err_msg("class %s can not implement %s, because it is not an interface (%s)", _class_name->as_klass_external_name(), interf->external_name(), interf->class_in_module_of_loader())); } if (InstanceKlass::cast(interf)->has_nonstatic_concrete_methods()) { *has_nonstatic_concrete_methods = true; } _local_interfaces->at_put(index, InstanceKlass::cast(interf)); } if (!_need_verify || itfs_len <= 1) { return; } // Check if there's any duplicates in interfaces ResourceMark rm(THREAD); NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, NameSigHash*, HASH_ROW_SIZE); initialize_hashtable(interface_names); bool dup = false; const Symbol* name = NULL; { debug_only(NoSafepointVerifier nsv;) for (index = 0; index < itfs_len; index++) { const InstanceKlass* const k = _local_interfaces->at(index); name = k->name(); // If no duplicates, add (name, NULL) in hashtable interface_names. if (!put_after_lookup(name, NULL, interface_names)) { dup = true; break; } } } if (dup) { classfile_parse_error("Duplicate interface name \"%s\" in class file %s", name->as_C_string(), THREAD); } } } void ClassFileParser::verify_constantvalue(const ConstantPool* const cp, int constantvalue_index, int signature_index, TRAPS) const { // Make sure the constant pool entry is of a type appropriate to this field guarantee_property( (constantvalue_index > 0 && constantvalue_index < cp->length()), "Bad initial value index %u in ConstantValue attribute in class file %s", constantvalue_index, CHECK); const constantTag value_type = cp->tag_at(constantvalue_index); switch(cp->basic_type_for_signature_at(signature_index)) { case T_LONG: { guarantee_property(value_type.is_long(), "Inconsistent constant value type in class file %s", CHECK); break; } case T_FLOAT: { guarantee_property(value_type.is_float(), "Inconsistent constant value type in class file %s", CHECK); break; } case T_DOUBLE: { guarantee_property(value_type.is_double(), "Inconsistent constant value type in class file %s", CHECK); break; } case T_BYTE: case T_CHAR: case T_SHORT: case T_BOOLEAN: case T_INT: { guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK); break; } case T_OBJECT: { guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;") && value_type.is_string()), "Bad string initial value in class file %s", CHECK); break; } default: { classfile_parse_error("Unable to set initial value %u in class file %s", constantvalue_index, THREAD); } } } class AnnotationCollector : public ResourceObj{ public: enum Location { _in_field, _in_method, _in_class }; enum ID { _unknown = 0, _method_CallerSensitive, _method_ForceInline, _method_DontInline, _method_InjectedProfile, _method_LambdaForm_Compiled, _method_Hidden, _method_Scoped, _method_IntrinsicCandidate, _jdk_internal_vm_annotation_Contended, _field_Stable, _jdk_internal_vm_annotation_ReservedStackAccess, _jdk_internal_ValueBased, _annotation_LIMIT }; const Location _location; int _annotations_present; u2 _contended_group; AnnotationCollector(Location location) : _location(location), _annotations_present(0), _contended_group(0) { assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, ""); } // If this annotation name has an ID, report it (or _none). ID annotation_index(const ClassLoaderData* loader_data, const Symbol* name, bool can_access_vm_annotations); // Set the annotation name: void set_annotation(ID id) { assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); _annotations_present |= nth_bit((int)id); } void remove_annotation(ID id) { assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); _annotations_present &= ~nth_bit((int)id); } // Report if the annotation is present. bool has_any_annotations() const { return _annotations_present != 0; } bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; } void set_contended_group(u2 group) { _contended_group = group; } u2 contended_group() const { return _contended_group; } bool is_contended() const { return has_annotation(_jdk_internal_vm_annotation_Contended); } void set_stable(bool stable) { set_annotation(_field_Stable); } bool is_stable() const { return has_annotation(_field_Stable); } }; // This class also doubles as a holder for metadata cleanup. class ClassFileParser::FieldAnnotationCollector : public AnnotationCollector { private: ClassLoaderData* _loader_data; AnnotationArray* _field_annotations; AnnotationArray* _field_type_annotations; public: FieldAnnotationCollector(ClassLoaderData* loader_data) : AnnotationCollector(_in_field), _loader_data(loader_data), _field_annotations(NULL), _field_type_annotations(NULL) {} ~FieldAnnotationCollector(); void apply_to(FieldInfo* f); AnnotationArray* field_annotations() { return _field_annotations; } AnnotationArray* field_type_annotations() { return _field_type_annotations; } void set_field_annotations(AnnotationArray* a) { _field_annotations = a; } void set_field_type_annotations(AnnotationArray* a) { _field_type_annotations = a; } }; class MethodAnnotationCollector : public AnnotationCollector{ public: MethodAnnotationCollector() : AnnotationCollector(_in_method) { } void apply_to(const methodHandle& m); }; class ClassFileParser::ClassAnnotationCollector : public AnnotationCollector{ public: ClassAnnotationCollector() : AnnotationCollector(_in_class) { } void apply_to(InstanceKlass* ik); }; static int skip_annotation_value(const u1*, int, int); // fwd decl // Safely increment index by val if does not pass limit #define SAFE_ADD(index, limit, val) \ if (index >= limit - val) return limit; \ index += val; // Skip an annotation. Return >=limit if there is any problem. static int skip_annotation(const u1* buffer, int limit, int index) { assert(buffer != NULL, "invariant"); // annotation := atype:u2 do(nmem:u2) {member:u2 value} // value := switch (tag:u1) { ... } SAFE_ADD(index, limit, 4); // skip atype and read nmem int nmem = Bytes::get_Java_u2((address)buffer + index - 2); while (--nmem >= 0 && index < limit) { SAFE_ADD(index, limit, 2); // skip member index = skip_annotation_value(buffer, limit, index); } return index; } // Skip an annotation value. Return >=limit if there is any problem. static int skip_annotation_value(const u1* buffer, int limit, int index) { assert(buffer != NULL, "invariant"); // value := switch (tag:u1) { // case B, C, I, S, Z, D, F, J, c: con:u2; // case e: e_class:u2 e_name:u2; // case s: s_con:u2; // case [: do(nval:u2) {value}; // case @: annotation; // case s: s_con:u2; // } SAFE_ADD(index, limit, 1); // read tag const u1 tag = buffer[index - 1]; switch (tag) { case 'B': case 'C': case 'I': case 'S': case 'Z': case 'D': case 'F': case 'J': case 'c': case 's': SAFE_ADD(index, limit, 2); // skip con or s_con break; case 'e': SAFE_ADD(index, limit, 4); // skip e_class, e_name break; case '[': { SAFE_ADD(index, limit, 2); // read nval int nval = Bytes::get_Java_u2((address)buffer + index - 2); while (--nval >= 0 && index < limit) { index = skip_annotation_value(buffer, limit, index); } } break; case '@': index = skip_annotation(buffer, limit, index); break; default: return limit; // bad tag byte } return index; } // Sift through annotations, looking for those significant to the VM: static void parse_annotations(const ConstantPool* const cp, const u1* buffer, int limit, AnnotationCollector* coll, ClassLoaderData* loader_data, const bool can_access_vm_annotations) { assert(cp != NULL, "invariant"); assert(buffer != NULL, "invariant"); assert(coll != NULL, "invariant"); assert(loader_data != NULL, "invariant"); // annotations := do(nann:u2) {annotation} int index = 2; // read nann if (index >= limit) return; int nann = Bytes::get_Java_u2((address)buffer + index - 2); enum { // initial annotation layout atype_off = 0, // utf8 such as 'Ljava/lang/annotation/Retention;' count_off = 2, // u2 such as 1 (one value) member_off = 4, // utf8 such as 'value' tag_off = 6, // u1 such as 'c' (type) or 'e' (enum) e_tag_val = 'e', e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;' e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME' e_size = 11, // end of 'e' annotation c_tag_val = 'c', // payload is type c_con_off = 7, // utf8 payload, such as 'I' c_size = 9, // end of 'c' annotation s_tag_val = 's', // payload is String s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;' s_size = 9, min_size = 6 // smallest possible size (zero members) }; // Cannot add min_size to index in case of overflow MAX_INT while ((--nann) >= 0 && (index - 2 <= limit - min_size)) { int index0 = index; index = skip_annotation(buffer, limit, index); const u1* const abase = buffer + index0; const int atype = Bytes::get_Java_u2((address)abase + atype_off); const int count = Bytes::get_Java_u2((address)abase + count_off); const Symbol* const aname = check_symbol_at(cp, atype); if (aname == NULL) break; // invalid annotation name const Symbol* member = NULL; if (count >= 1) { const int member_index = Bytes::get_Java_u2((address)abase + member_off); member = check_symbol_at(cp, member_index); if (member == NULL) break; // invalid member name } // Here is where parsing particular annotations will take place. AnnotationCollector::ID id = coll->annotation_index(loader_data, aname, can_access_vm_annotations); if (AnnotationCollector::_unknown == id) continue; coll->set_annotation(id); if (AnnotationCollector::_jdk_internal_vm_annotation_Contended == id) { // @Contended can optionally specify the contention group. // // Contended group defines the equivalence class over the fields: // the fields within the same contended group are not treated distinct. // The only exception is default group, which does not incur the // equivalence. Naturally, contention group for classes is meaningless. // // While the contention group is specified as String, annotation // values are already interned, and we might as well use the constant // pool index as the group tag. // u2 group_index = 0; // default contended group if (count == 1 && s_size == (index - index0) // match size && s_tag_val == *(abase + tag_off) && member == vmSymbols::value_name()) { group_index = Bytes::get_Java_u2((address)abase + s_con_off); if (cp->symbol_at(group_index)->utf8_length() == 0) { group_index = 0; // default contended group } } coll->set_contended_group(group_index); } } } // Parse attributes for a field. void ClassFileParser::parse_field_attributes(const ClassFileStream* const cfs, u2 attributes_count, bool is_static, u2 signature_index, u2* const constantvalue_index_addr, bool* const is_synthetic_addr, u2* const generic_signature_index_addr, ClassFileParser::FieldAnnotationCollector* parsed_annotations, TRAPS) { assert(cfs != NULL, "invariant"); assert(constantvalue_index_addr != NULL, "invariant"); assert(is_synthetic_addr != NULL, "invariant"); assert(generic_signature_index_addr != NULL, "invariant"); assert(parsed_annotations != NULL, "invariant"); assert(attributes_count > 0, "attributes_count should be greater than 0"); u2 constantvalue_index = 0; u2 generic_signature_index = 0; bool is_synthetic = false; const u1* runtime_visible_annotations = NULL; int runtime_visible_annotations_length = 0; const u1* runtime_invisible_annotations = NULL; int runtime_invisible_annotations_length = 0; const u1* runtime_visible_type_annotations = NULL; int runtime_visible_type_annotations_length = 0; const u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; bool runtime_invisible_annotations_exists = false; bool runtime_invisible_type_annotations_exists = false; const ConstantPool* const cp = _cp; while (attributes_count--) { cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length const u2 attribute_name_index = cfs->get_u2_fast(); const u4 attribute_length = cfs->get_u4_fast(); check_property(valid_symbol_at(attribute_name_index), "Invalid field attribute index %u in class file %s", attribute_name_index, CHECK); const Symbol* const attribute_name = cp->symbol_at(attribute_name_index); if (is_static && attribute_name == vmSymbols::tag_constant_value()) { // ignore if non-static if (constantvalue_index != 0) { classfile_parse_error("Duplicate ConstantValue attribute in class file %s", THREAD); return; } check_property( attribute_length == 2, "Invalid ConstantValue field attribute length %u in class file %s", attribute_length, CHECK); constantvalue_index = cfs->get_u2(CHECK); if (_need_verify) { verify_constantvalue(cp, constantvalue_index, signature_index, CHECK); } } else if (attribute_name == vmSymbols::tag_synthetic()) { if (attribute_length != 0) { classfile_parse_error( "Invalid Synthetic field attribute length %u in class file %s", attribute_length, THREAD); return; } is_synthetic = true; } else if (attribute_name == vmSymbols::tag_deprecated()) { // 4276120 if (attribute_length != 0) { classfile_parse_error( "Invalid Deprecated field attribute length %u in class file %s", attribute_length, THREAD); return; } } else if (_major_version >= JAVA_1_5_VERSION) { if (attribute_name == vmSymbols::tag_signature()) { if (generic_signature_index != 0) { classfile_parse_error( "Multiple Signature attributes for field in class file %s", THREAD); return; } if (attribute_length != 2) { classfile_parse_error( "Wrong size %u for field's Signature attribute in class file %s", attribute_length, THREAD); return; } generic_signature_index = parse_generic_signature_attribute(cfs, CHECK); } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) { if (runtime_visible_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleAnnotations attributes for field in class file %s", THREAD); return; } runtime_visible_annotations_length = attribute_length; runtime_visible_annotations = cfs->current(); assert(runtime_visible_annotations != NULL, "null visible annotations"); cfs->guarantee_more(runtime_visible_annotations_length, CHECK); parse_annotations(cp, runtime_visible_annotations, runtime_visible_annotations_length, parsed_annotations, _loader_data, _can_access_vm_annotations); cfs->skip_u1_fast(runtime_visible_annotations_length); } else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) { if (runtime_invisible_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleAnnotations attributes for field in class file %s", THREAD); return; } runtime_invisible_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_annotations_length = attribute_length; runtime_invisible_annotations = cfs->current(); assert(runtime_invisible_annotations != NULL, "null invisible annotations"); } cfs->skip_u1(attribute_length, CHECK); } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) { if (runtime_visible_type_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", THREAD); return; } runtime_visible_type_annotations_length = attribute_length; runtime_visible_type_annotations = cfs->current(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); cfs->skip_u1(runtime_visible_type_annotations_length, CHECK); } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { if (runtime_invisible_type_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", THREAD); return; } else { runtime_invisible_type_annotations_exists = true; } if (PreserveAllAnnotations) { runtime_invisible_type_annotations_length = attribute_length; runtime_invisible_type_annotations = cfs->current(); assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); } cfs->skip_u1(attribute_length, CHECK); } else { cfs->skip_u1(attribute_length, CHECK); // Skip unknown attributes } } else { cfs->skip_u1(attribute_length, CHECK); // Skip unknown attributes } } *constantvalue_index_addr = constantvalue_index; *is_synthetic_addr = is_synthetic; *generic_signature_index_addr = generic_signature_index; AnnotationArray* a = assemble_annotations(runtime_visible_annotations, runtime_visible_annotations_length, runtime_invisible_annotations, runtime_invisible_annotations_length, CHECK); parsed_annotations->set_field_annotations(a); a = assemble_annotations(runtime_visible_type_annotations, runtime_visible_type_annotations_length, runtime_invisible_type_annotations, runtime_invisible_type_annotations_length, CHECK); parsed_annotations->set_field_type_annotations(a); return; } // Field allocation types. Used for computing field offsets. enum FieldAllocationType { STATIC_OOP, // Oops STATIC_BYTE, // Boolean, Byte, char STATIC_SHORT, // shorts STATIC_WORD, // ints STATIC_DOUBLE, // aligned long or double NONSTATIC_OOP, NONSTATIC_BYTE, NONSTATIC_SHORT, NONSTATIC_WORD, NONSTATIC_DOUBLE, MAX_FIELD_ALLOCATION_TYPE, BAD_ALLOCATION_TYPE = -1 }; static FieldAllocationType _basic_type_to_atype[2 * (T_CONFLICT + 1)] = { BAD_ALLOCATION_TYPE, // 0 BAD_ALLOCATION_TYPE, // 1 BAD_ALLOCATION_TYPE, // 2 BAD_ALLOCATION_TYPE, // 3 NONSTATIC_BYTE , // T_BOOLEAN = 4, NONSTATIC_SHORT, // T_CHAR = 5, NONSTATIC_WORD, // T_FLOAT = 6, NONSTATIC_DOUBLE, // T_DOUBLE = 7, NONSTATIC_BYTE, // T_BYTE = 8, NONSTATIC_SHORT, // T_SHORT = 9, NONSTATIC_WORD, // T_INT = 10, NONSTATIC_DOUBLE, // T_LONG = 11, NONSTATIC_OOP, // T_OBJECT = 12, NONSTATIC_OOP, // T_ARRAY = 13, BAD_ALLOCATION_TYPE, // T_VOID = 14, BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, BAD_ALLOCATION_TYPE, // T_METADATA = 17, BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, BAD_ALLOCATION_TYPE, // 0 BAD_ALLOCATION_TYPE, // 1 BAD_ALLOCATION_TYPE, // 2 BAD_ALLOCATION_TYPE, // 3 STATIC_BYTE , // T_BOOLEAN = 4, STATIC_SHORT, // T_CHAR = 5, STATIC_WORD, // T_FLOAT = 6, STATIC_DOUBLE, // T_DOUBLE = 7, STATIC_BYTE, // T_BYTE = 8, STATIC_SHORT, // T_SHORT = 9, STATIC_WORD, // T_INT = 10, STATIC_DOUBLE, // T_LONG = 11, STATIC_OOP, // T_OBJECT = 12, STATIC_OOP, // T_ARRAY = 13, BAD_ALLOCATION_TYPE, // T_VOID = 14, BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, BAD_ALLOCATION_TYPE, // T_METADATA = 17, BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, }; static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) { assert(type >= T_BOOLEAN && type < T_VOID, "only allowable values"); FieldAllocationType result = _basic_type_to_atype[type + (is_static ? (T_CONFLICT + 1) : 0)]; assert(result != BAD_ALLOCATION_TYPE, "bad type"); return result; } class ClassFileParser::FieldAllocationCount : public ResourceObj { public: u2 count[MAX_FIELD_ALLOCATION_TYPE]; FieldAllocationCount() { for (int i = 0; i < MAX_FIELD_ALLOCATION_TYPE; i++) { count[i] = 0; } } void update(bool is_static, BasicType type) { FieldAllocationType atype = basic_type_to_atype(is_static, type); if (atype != BAD_ALLOCATION_TYPE) { // Make sure there is no overflow with injected fields. assert(count[atype] < 0xFFFF, "More than 65535 fields"); count[atype]++; } } }; // Side-effects: populates the _fields, _fields_annotations, // _fields_type_annotations fields void ClassFileParser::parse_fields(const ClassFileStream* const cfs, bool is_interface, FieldAllocationCount* const fac, ConstantPool* cp, const int cp_size, u2* const java_fields_count_ptr, TRAPS) { assert(cfs != NULL, "invariant"); assert(fac != NULL, "invariant"); assert(cp != NULL, "invariant"); assert(java_fields_count_ptr != NULL, "invariant"); assert(NULL == _fields, "invariant"); assert(NULL == _fields_annotations, "invariant"); assert(NULL == _fields_type_annotations, "invariant"); cfs->guarantee_more(2, CHECK); // length const u2 length = cfs->get_u2_fast(); *java_fields_count_ptr = length; int num_injected = 0; const InjectedField* const injected = JavaClasses::get_injected(_class_name, &num_injected); const int total_fields = length + num_injected; // The field array starts with tuples of shorts // [access, name index, sig index, initial value index, byte offset]. // A generic signature slot only exists for field with generic // signature attribute. And the access flag is set with // JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE for that field. The generic // signature slots are at the end of the field array and after all // other fields data. // // f1: [access, name index, sig index, initial value index, low_offset, high_offset] // f2: [access, name index, sig index, initial value index, low_offset, high_offset] // ... // fn: [access, name index, sig index, initial value index, low_offset, high_offset] // [generic signature index] // [generic signature index] // ... // // Allocate a temporary resource array for field data. For each field, // a slot is reserved in the temporary array for the generic signature // index. After parsing all fields, the data are copied to a permanent // array and any unused slots will be discarded. ResourceMark rm(THREAD); u2* const fa = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, u2, total_fields * (FieldInfo::field_slots + 1)); // The generic signature slots start after all other fields' data. int generic_signature_slot = total_fields * FieldInfo::field_slots; int num_generic_signature = 0; for (int n = 0; n < length; n++) { // access_flags, name_index, descriptor_index, attributes_count cfs->guarantee_more(8, CHECK); AccessFlags access_flags; const jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS; verify_legal_field_modifiers(flags, is_interface, CHECK); access_flags.set_flags(flags); const u2 name_index = cfs->get_u2_fast(); check_property(valid_symbol_at(name_index), "Invalid constant pool index %u for field name in class file %s", name_index, CHECK); const Symbol* const name = cp->symbol_at(name_index); verify_legal_field_name(name, CHECK); const u2 signature_index = cfs->get_u2_fast(); check_property(valid_symbol_at(signature_index), "Invalid constant pool index %u for field signature in class file %s", signature_index, CHECK); const Symbol* const sig = cp->symbol_at(signature_index); verify_legal_field_signature(name, sig, CHECK); u2 constantvalue_index = 0; bool is_synthetic = false; u2 generic_signature_index = 0; const bool is_static = access_flags.is_static(); FieldAnnotationCollector parsed_annotations(_loader_data); const u2 attributes_count = cfs->get_u2_fast(); if (attributes_count > 0) { parse_field_attributes(cfs, attributes_count, is_static, signature_index, &constantvalue_index, &is_synthetic, &generic_signature_index, &parsed_annotations, CHECK); if (parsed_annotations.field_annotations() != NULL) { if (_fields_annotations == NULL) { _fields_annotations = MetadataFactory::new_array<AnnotationArray*>( _loader_data, length, NULL, CHECK); } _fields_annotations->at_put(n, parsed_annotations.field_annotations()); parsed_annotations.set_field_annotations(NULL); } if (parsed_annotations.field_type_annotations() != NULL) { if (_fields_type_annotations == NULL) { _fields_type_annotations = MetadataFactory::new_array<AnnotationArray*>(_loader_data, length, NULL, CHECK); } _fields_type_annotations->at_put(n, parsed_annotations.field_type_annotations()); parsed_annotations.set_field_type_annotations(NULL); } if (is_synthetic) { access_flags.set_is_synthetic(); } if (generic_signature_index != 0) { access_flags.set_field_has_generic_signature(); fa[generic_signature_slot] = generic_signature_index; generic_signature_slot ++; num_generic_signature ++; } } FieldInfo* const field = FieldInfo::from_field_array(fa, n); field->initialize(access_flags.as_short(), name_index, signature_index, constantvalue_index); const BasicType type = cp->basic_type_for_signature_at(signature_index); // Update FieldAllocationCount for this kind of field fac->update(is_static, type); // After field is initialized with type, we can augment it with aux info if (parsed_annotations.has_any_annotations()) { parsed_annotations.apply_to(field); if (field->is_contended()) { _has_contended_fields = true; } } } int index = length; if (num_injected != 0) { for (int n = 0; n < num_injected; n++) { // Check for duplicates if (injected[n].may_be_java) { const Symbol* const name = injected[n].name(); const Symbol* const signature = injected[n].signature(); bool duplicate = false; for (int i = 0; i < length; i++) { const FieldInfo* const f = FieldInfo::from_field_array(fa, i); if (name == cp->symbol_at(f->name_index()) && signature == cp->symbol_at(f->signature_index())) { // Symbol is desclared in Java so skip this one duplicate = true; break; } } if (duplicate) { // These will be removed from the field array at the end continue; } } // Injected field FieldInfo* const field = FieldInfo::from_field_array(fa, index); field->initialize((u2)JVM_ACC_FIELD_INTERNAL, (u2)(injected[n].name_index), (u2)(injected[n].signature_index), 0); const BasicType type = Signature::basic_type(injected[n].signature()); // Update FieldAllocationCount for this kind of field fac->update(false, type); index++; } } assert(NULL == _fields, "invariant"); _fields = MetadataFactory::new_array<u2>(_loader_data, index * FieldInfo::field_slots + num_generic_signature, CHECK); // Sometimes injected fields already exist in the Java source so // the fields array could be too long. In that case the // fields array is trimed. Also unused slots that were reserved // for generic signature indexes are discarded. { int i = 0; for (; i < index * FieldInfo::field_slots; i++) { _fields->at_put(i, fa[i]); } for (int j = total_fields * FieldInfo::field_slots; j < generic_signature_slot; j++) { _fields->at_put(i++, fa[j]); } assert(_fields->length() == i, ""); } if (_need_verify && length > 1) { // Check duplicated fields ResourceMark rm(THREAD); NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, NameSigHash*, HASH_ROW_SIZE); initialize_hashtable(names_and_sigs); bool dup = false; const Symbol* name = NULL; const Symbol* sig = NULL; { debug_only(NoSafepointVerifier nsv;) for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) { name = fs.name(); sig = fs.signature(); // If no duplicates, add name/signature in hashtable names_and_sigs. if (!put_after_lookup(name, sig, names_and_sigs)) { dup = true; break; } } } if (dup) { classfile_parse_error("Duplicate field name \"%s\" with signature \"%s\" in class file %s", name->as_C_string(), sig->as_klass_external_name(), THREAD); } } } const ClassFileParser::unsafe_u2* ClassFileParser::parse_exception_table(const ClassFileStream* const cfs, u4 code_length, u4 exception_table_length, TRAPS) { assert(cfs != NULL, "invariant"); const unsafe_u2* const exception_table_start = cfs->current(); assert(exception_table_start != NULL, "null exception table"); cfs->guarantee_more(8 * exception_table_length, CHECK_NULL); // start_pc, // end_pc, // handler_pc, // catch_type_index // Will check legal target after parsing code array in verifier. if (_need_verify) { for (unsigned int i = 0; i < exception_table_length; i++) { const u2 start_pc = cfs->get_u2_fast(); const u2 end_pc = cfs->get_u2_fast(); const u2 handler_pc = cfs->get_u2_fast(); const u2 catch_type_index = cfs->get_u2_fast(); guarantee_property((start_pc < end_pc) && (end_pc <= code_length), "Illegal exception table range in class file %s", CHECK_NULL); guarantee_property(handler_pc < code_length, "Illegal exception table handler in class file %s", CHECK_NULL); if (catch_type_index != 0) { guarantee_property(valid_klass_reference_at(catch_type_index), "Catch type in exception table has bad constant type in class file %s", CHECK_NULL); } } } else { cfs->skip_u2_fast(exception_table_length * 4); } return exception_table_start; } void ClassFileParser::parse_linenumber_table(u4 code_attribute_length, u4 code_length, CompressedLineNumberWriteStream**const write_stream, TRAPS) { const ClassFileStream* const cfs = _stream; unsigned int num_entries = cfs->get_u2(CHECK); // Each entry is a u2 start_pc, and a u2 line_number const unsigned int length_in_bytes = num_entries * (sizeof(u2) * 2); // Verify line number attribute and table length check_property( code_attribute_length == sizeof(u2) + length_in_bytes, "LineNumberTable attribute has wrong length in class file %s", CHECK); cfs->guarantee_more(length_in_bytes, CHECK); if ((*write_stream) == NULL) { if (length_in_bytes > fixed_buffer_size) { (*write_stream) = new CompressedLineNumberWriteStream(length_in_bytes); } else { (*write_stream) = new CompressedLineNumberWriteStream( _linenumbertable_buffer, fixed_buffer_size); } } while (num_entries-- > 0) { const u2 bci = cfs->get_u2_fast(); // start_pc const u2 line = cfs->get_u2_fast(); // line_number guarantee_property(bci < code_length, "Invalid pc in LineNumberTable in class file %s", CHECK); (*write_stream)->write_pair(bci, line); } } class LVT_Hash : public AllStatic { public: static bool equals(LocalVariableTableElement const& e0, LocalVariableTableElement const& e1) { /* * 3-tuple start_bci/length/slot has to be unique key, * so the following comparison seems to be redundant: * && elem->name_cp_index == entry->_elem->name_cp_index */ return (e0.start_bci == e1.start_bci && e0.length == e1.length && e0.name_cp_index == e1.name_cp_index && e0.slot == e1.slot); } static unsigned int hash(LocalVariableTableElement const& e0) { unsigned int raw_hash = e0.start_bci; raw_hash = e0.length + raw_hash * 37; raw_hash = e0.name_cp_index + raw_hash * 37; raw_hash = e0.slot + raw_hash * 37; return raw_hash; } }; // Class file LocalVariableTable elements. class Classfile_LVT_Element { public: u2 start_bci; u2 length; u2 name_cp_index; u2 descriptor_cp_index; u2 slot; }; static void copy_lvt_element(const Classfile_LVT_Element* const src, LocalVariableTableElement* const lvt) { lvt->start_bci = Bytes::get_Java_u2((u1*) &src->start_bci); lvt->length = Bytes::get_Java_u2((u1*) &src->length); lvt->name_cp_index = Bytes::get_Java_u2((u1*) &src->name_cp_index); lvt->descriptor_cp_index = Bytes::get_Java_u2((u1*) &src->descriptor_cp_index); lvt->signature_cp_index = 0; lvt->slot = Bytes::get_Java_u2((u1*) &src->slot); } // Function is used to parse both attributes: // LocalVariableTable (LVT) and LocalVariableTypeTable (LVTT) const ClassFileParser::unsafe_u2* ClassFileParser::parse_localvariable_table(const ClassFileStream* cfs, u4 code_length, u2 max_locals, u4 code_attribute_length, u2* const localvariable_table_length, bool isLVTT, TRAPS) { const char* const tbl_name = (isLVTT) ? "LocalVariableTypeTable" : "LocalVariableTable"; *localvariable_table_length = cfs->get_u2(CHECK_NULL); const unsigned int size = (*localvariable_table_length) * sizeof(Classfile_LVT_Element) / sizeof(u2); const ConstantPool* const cp = _cp; // Verify local variable table attribute has right length if (_need_verify) { guarantee_property(code_attribute_length == (sizeof(*localvariable_table_length) + size * sizeof(u2)), "%s has wrong length in class file %s", tbl_name, CHECK_NULL); } const unsafe_u2* const localvariable_table_start = cfs->current(); assert(localvariable_table_start != NULL, "null local variable table"); if (!_need_verify) { cfs->skip_u2_fast(size); } else { cfs->guarantee_more(size * 2, CHECK_NULL); for(int i = 0; i < (*localvariable_table_length); i++) { const u2 start_pc = cfs->get_u2_fast(); const u2 length = cfs->get_u2_fast(); const u2 name_index = cfs->get_u2_fast(); const u2 descriptor_index = cfs->get_u2_fast(); const u2 index = cfs->get_u2_fast(); // Assign to a u4 to avoid overflow const u4 end_pc = (u4)start_pc + (u4)length; if (start_pc >= code_length) { classfile_parse_error( "Invalid start_pc %u in %s in class file %s", start_pc, tbl_name, THREAD); return NULL; } if (end_pc > code_length) { classfile_parse_error( "Invalid length %u in %s in class file %s", length, tbl_name, THREAD); return NULL; } const int cp_size = cp->length(); guarantee_property(valid_symbol_at(name_index), "Name index %u in %s has bad constant type in class file %s", name_index, tbl_name, CHECK_NULL); guarantee_property(valid_symbol_at(descriptor_index), "Signature index %u in %s has bad constant type in class file %s", descriptor_index, tbl_name, CHECK_NULL); const Symbol* const name = cp->symbol_at(name_index); const Symbol* const sig = cp->symbol_at(descriptor_index); verify_legal_field_name(name, CHECK_NULL); u2 extra_slot = 0; if (!isLVTT) { verify_legal_field_signature(name, sig, CHECK_NULL); // 4894874: check special cases for double and long local variables if (sig == vmSymbols::type_signature(T_DOUBLE) || sig == vmSymbols::type_signature(T_LONG)) { extra_slot = 1; } } guarantee_property((index + extra_slot) < max_locals, "Invalid index %u in %s in class file %s", index, tbl_name, CHECK_NULL); } } return localvariable_table_start; } static const u1* parse_stackmap_table(const ClassFileStream* const cfs, u4 code_attribute_length, bool need_verify, TRAPS) { assert(cfs != NULL, "invariant"); if (0 == code_attribute_length) { return NULL; } const u1* const stackmap_table_start = cfs->current(); assert(stackmap_table_start != NULL, "null stackmap table"); // check code_attribute_length first cfs->skip_u1(code_attribute_length, CHECK_NULL); if (!need_verify && !DumpSharedSpaces) { return NULL; } return stackmap_table_start; } const ClassFileParser::unsafe_u2* ClassFileParser::parse_checked_exceptions(const ClassFileStream* const cfs, u2* const checked_exceptions_length, u4 method_attribute_length, TRAPS) { assert(cfs != NULL, "invariant"); assert(checked_exceptions_length != NULL, "invariant"); cfs->guarantee_more(2, CHECK_NULL); // checked_exceptions_length *checked_exceptions_length = cfs->get_u2_fast(); const unsigned int size = (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2); const unsafe_u2* const checked_exceptions_start = cfs->current(); assert(checked_exceptions_start != NULL, "null checked exceptions"); if (!_need_verify) { cfs->skip_u2_fast(size); } else { // Verify each value in the checked exception table u2 checked_exception; const u2 len = *checked_exceptions_length; cfs->guarantee_more(2 * len, CHECK_NULL); for (int i = 0; i < len; i++) { checked_exception = cfs->get_u2_fast(); check_property( valid_klass_reference_at(checked_exception), "Exception name has bad type at constant pool %u in class file %s", checked_exception, CHECK_NULL); } } // check exceptions attribute length if (_need_verify) { guarantee_property(method_attribute_length == (sizeof(*checked_exceptions_length) + sizeof(u2) * size), "Exceptions attribute has wrong length in class file %s", CHECK_NULL); } return checked_exceptions_start; } void ClassFileParser::throwIllegalSignature(const char* type, const Symbol* name, const Symbol* sig, TRAPS) const { assert(name != NULL, "invariant"); assert(sig != NULL, "invariant"); ResourceMark rm(THREAD); Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "%s \"%s\" in class %s has illegal signature \"%s\"", type, name->as_C_string(), _class_name->as_C_string(), sig->as_C_string()); } AnnotationCollector::ID AnnotationCollector::annotation_index(const ClassLoaderData* loader_data, const Symbol* name, const bool can_access_vm_annotations) { const vmSymbolID sid = vmSymbols::find_sid(name); // Privileged code can use all annotations. Other code silently drops some. const bool privileged = loader_data->is_boot_class_loader_data() || loader_data->is_platform_class_loader_data() || can_access_vm_annotations; switch (sid) { case VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_CallerSensitive; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ForceInline_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_ForceInline; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_DontInline_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_DontInline; } case VM_SYMBOL_ENUM_NAME(java_lang_invoke_InjectedProfile_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_InjectedProfile; } case VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_LambdaForm_Compiled; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Hidden_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_Hidden; } case VM_SYMBOL_ENUM_NAME(jdk_internal_misc_Scoped_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_Scoped; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_IntrinsicCandidate_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_IntrinsicCandidate; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Stable_signature): { if (_location != _in_field) break; // only allow for fields if (!privileged) break; // only allow in privileged code return _field_Stable; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): { if (_location != _in_field && _location != _in_class) { break; // only allow for fields and classes } if (!EnableContended || (RestrictContended && !privileged)) { break; // honor privileges } return _jdk_internal_vm_annotation_Contended; } case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ReservedStackAccess_signature): { if (_location != _in_method) break; // only allow for methods if (RestrictReservedStack && !privileged) break; // honor privileges return _jdk_internal_vm_annotation_ReservedStackAccess; } case VM_SYMBOL_ENUM_NAME(jdk_internal_ValueBased_signature): { if (_location != _in_class) break; // only allow for classes if (!privileged) break; // only allow in priviledged code return _jdk_internal_ValueBased; } default: { break; } } return AnnotationCollector::_unknown; } void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) { if (is_contended()) f->set_contended_group(contended_group()); if (is_stable()) f->set_stable(true); } ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() { // If there's an error deallocate metadata for field annotations MetadataFactory::free_array<u1>(_loader_data, _field_annotations); MetadataFactory::free_array<u1>(_loader_data, _field_type_annotations); } void MethodAnnotationCollector::apply_to(const methodHandle& m) { if (has_annotation(_method_CallerSensitive)) m->set_caller_sensitive(true); if (has_annotation(_method_ForceInline)) m->set_force_inline(true); if (has_annotation(_method_DontInline)) m->set_dont_inline(true); if (has_annotation(_method_InjectedProfile)) m->set_has_injected_profile(true); if (has_annotation(_method_LambdaForm_Compiled) && m->intrinsic_id() == vmIntrinsics::_none) m->set_intrinsic_id(vmIntrinsics::_compiledLambdaForm); if (has_annotation(_method_Hidden)) m->set_hidden(true); if (has_annotation(_method_Scoped)) m->set_scoped(true); if (has_annotation(_method_IntrinsicCandidate) && !m->is_synthetic()) m->set_intrinsic_candidate(true); if (has_annotation(_jdk_internal_vm_annotation_ReservedStackAccess)) m->set_has_reserved_stack_access(true); } void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) { assert(ik != NULL, "invariant"); if (has_annotation(_jdk_internal_vm_annotation_Contended)) { ik->set_is_contended(is_contended()); } if (has_annotation(_jdk_internal_ValueBased)) { ik->set_has_value_based_class_annotation(); if (DiagnoseSyncOnValueBasedClasses) { ik->set_is_value_based(); ik->set_prototype_header(markWord::prototype()); } } } #define MAX_ARGS_SIZE 255 #define MAX_CODE_SIZE 65535 #define INITIAL_MAX_LVT_NUMBER 256 /* Copy class file LVT's/LVTT's into the HotSpot internal LVT. * * Rules for LVT's and LVTT's are: * - There can be any number of LVT's and LVTT's. * - If there are n LVT's, it is the same as if there was just * one LVT containing all the entries from the n LVT's. * - There may be no more than one LVT entry per local variable. * Two LVT entries are 'equal' if these fields are the same: * start_pc, length, name, slot * - There may be no more than one LVTT entry per each LVT entry. * Each LVTT entry has to match some LVT entry. * - HotSpot internal LVT keeps natural ordering of class file LVT entries. */ void ClassFileParser::copy_localvariable_table(const ConstMethod* cm, int lvt_cnt, u2* const localvariable_table_length, const unsafe_u2** const localvariable_table_start, int lvtt_cnt, u2* const localvariable_type_table_length, const unsafe_u2** const localvariable_type_table_start, TRAPS) { ResourceMark rm(THREAD); typedef ResourceHashtable<LocalVariableTableElement, LocalVariableTableElement*, &LVT_Hash::hash, &LVT_Hash::equals> LVT_HashTable; LVT_HashTable* const table = new LVT_HashTable(); // To fill LocalVariableTable in const Classfile_LVT_Element* cf_lvt; LocalVariableTableElement* lvt = cm->localvariable_table_start(); for (int tbl_no = 0; tbl_no < lvt_cnt; tbl_no++) { cf_lvt = (Classfile_LVT_Element *) localvariable_table_start[tbl_no]; for (int idx = 0; idx < localvariable_table_length[tbl_no]; idx++, lvt++) { copy_lvt_element(&cf_lvt[idx], lvt); // If no duplicates, add LVT elem in hashtable. if (table->put(*lvt, lvt) == false && _need_verify && _major_version >= JAVA_1_5_VERSION) { classfile_parse_error("Duplicated LocalVariableTable attribute " "entry for '%s' in class file %s", _cp->symbol_at(lvt->name_cp_index)->as_utf8(), THREAD); return; } } } // To merge LocalVariableTable and LocalVariableTypeTable const Classfile_LVT_Element* cf_lvtt; LocalVariableTableElement lvtt_elem; for (int tbl_no = 0; tbl_no < lvtt_cnt; tbl_no++) { cf_lvtt = (Classfile_LVT_Element *) localvariable_type_table_start[tbl_no]; for (int idx = 0; idx < localvariable_type_table_length[tbl_no]; idx++) { copy_lvt_element(&cf_lvtt[idx], &lvtt_elem); LocalVariableTableElement** entry = table->get(lvtt_elem); if (entry == NULL) { if (_need_verify) { classfile_parse_error("LVTT entry for '%s' in class file %s " "does not match any LVT entry", _cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(), THREAD); return; } } else if ((*entry)->signature_cp_index != 0 && _need_verify) { classfile_parse_error("Duplicated LocalVariableTypeTable attribute " "entry for '%s' in class file %s", _cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(), THREAD); return; } else { // to add generic signatures into LocalVariableTable (*entry)->signature_cp_index = lvtt_elem.descriptor_cp_index; } } } } void ClassFileParser::copy_method_annotations(ConstMethod* cm, const u1* runtime_visible_annotations, int runtime_visible_annotations_length, const u1* runtime_invisible_annotations, int runtime_invisible_annotations_length, const u1* runtime_visible_parameter_annotations, int runtime_visible_parameter_annotations_length, const u1* runtime_invisible_parameter_annotations, int runtime_invisible_parameter_annotations_length, const u1* runtime_visible_type_annotations, int runtime_visible_type_annotations_length, const u1* runtime_invisible_type_annotations, int runtime_invisible_type_annotations_length, const u1* annotation_default, int annotation_default_length, TRAPS) { AnnotationArray* a; if (runtime_visible_annotations_length + runtime_invisible_annotations_length > 0) { a = assemble_annotations(runtime_visible_annotations, runtime_visible_annotations_length, runtime_invisible_annotations, runtime_invisible_annotations_length, CHECK); cm->set_method_annotations(a); } if (runtime_visible_parameter_annotations_length + runtime_invisible_parameter_annotations_length > 0) { a = assemble_annotations(runtime_visible_parameter_annotations, runtime_visible_parameter_annotations_length, runtime_invisible_parameter_annotations, runtime_invisible_parameter_annotations_length, CHECK); cm->set_parameter_annotations(a); } if (annotation_default_length > 0) { a = assemble_annotations(annotation_default, annotation_default_length, NULL, 0, CHECK); cm->set_default_annotations(a); } if (runtime_visible_type_annotations_length + runtime_invisible_type_annotations_length > 0) { a = assemble_annotations(runtime_visible_type_annotations, runtime_visible_type_annotations_length, runtime_invisible_type_annotations, runtime_invisible_type_annotations_length, CHECK); cm->set_type_annotations(a); } } // Note: the parse_method below is big and clunky because all parsing of the code and exceptions // attribute is inlined. This is cumbersome to avoid since we inline most of the parts in the // Method* to save footprint, so we only know the size of the resulting Method* when the // entire method attribute is parsed. // // The promoted_flags parameter is used to pass relevant access_flags // from the method back up to the containing klass. These flag values // are added to klass's access_flags. Method* ClassFileParser::parse_method(const ClassFileStream* const cfs, bool is_interface, const ConstantPool* cp, AccessFlags* const promoted_flags, TRAPS) { assert(cfs != NULL, "invariant"); assert(cp != NULL, "invariant"); assert(promoted_flags != NULL, "invariant"); ResourceMark rm(THREAD); // Parse fixed parts: // access_flags, name_index, descriptor_index, attributes_count cfs->guarantee_more(8, CHECK_NULL); int flags = cfs->get_u2_fast(); const u2 name_index = cfs->get_u2_fast(); const int cp_size = cp->length(); check_property( valid_symbol_at(name_index), "Illegal constant pool index %u for method name in class file %s", name_index, CHECK_NULL); const Symbol* const name = cp->symbol_at(name_index); verify_legal_method_name(name, CHECK_NULL); const u2 signature_index = cfs->get_u2_fast(); guarantee_property( valid_symbol_at(signature_index), "Illegal constant pool index %u for method signature in class file %s", signature_index, CHECK_NULL); const Symbol* const signature = cp->symbol_at(signature_index); if (name == vmSymbols::class_initializer_name()) { // We ignore the other access flags for a valid class initializer. // (JVM Spec 2nd ed., chapter 4.6) if (_major_version < 51) { // backward compatibility flags = JVM_ACC_STATIC; } else if ((flags & JVM_ACC_STATIC) == JVM_ACC_STATIC) { flags &= JVM_ACC_STATIC | (_major_version <= JAVA_16_VERSION ? JVM_ACC_STRICT : 0); } else { classfile_parse_error("Method <clinit> is not static in class file %s", THREAD); return NULL; } } else { verify_legal_method_modifiers(flags, is_interface, name, CHECK_NULL); } if (name == vmSymbols::object_initializer_name() && is_interface) { classfile_parse_error("Interface cannot have a method named <init>, class file %s", THREAD); return NULL; } int args_size = -1; // only used when _need_verify is true if (_need_verify) { args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) + verify_legal_method_signature(name, signature, CHECK_NULL); if (args_size > MAX_ARGS_SIZE) { classfile_parse_error("Too many arguments in method signature in class file %s", THREAD); return NULL; } } AccessFlags access_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS); // Default values for code and exceptions attribute elements u2 max_stack = 0; u2 max_locals = 0; u4 code_length = 0; const u1* code_start = 0; u2 exception_table_length = 0; const unsafe_u2* exception_table_start = NULL; // (potentially unaligned) pointer to array of u2 elements Array<int>* exception_handlers = Universe::the_empty_int_array(); u2 checked_exceptions_length = 0; const unsafe_u2* checked_exceptions_start = NULL; // (potentially unaligned) pointer to array of u2 elements CompressedLineNumberWriteStream* linenumber_table = NULL; int linenumber_table_length = 0; int total_lvt_length = 0; u2 lvt_cnt = 0; u2 lvtt_cnt = 0; bool lvt_allocated = false; u2 max_lvt_cnt = INITIAL_MAX_LVT_NUMBER; u2 max_lvtt_cnt = INITIAL_MAX_LVT_NUMBER; u2* localvariable_table_length = NULL; const unsafe_u2** localvariable_table_start = NULL; // (potentially unaligned) pointer to array of LVT attributes u2* localvariable_type_table_length = NULL; const unsafe_u2** localvariable_type_table_start = NULL; // (potentially unaligned) pointer to LVTT attributes int method_parameters_length = -1; const u1* method_parameters_data = NULL; bool method_parameters_seen = false; bool parsed_code_attribute = false; bool parsed_checked_exceptions_attribute = false; bool parsed_stackmap_attribute = false; // stackmap attribute - JDK1.5 const u1* stackmap_data = NULL; int stackmap_data_length = 0; u2 generic_signature_index = 0; MethodAnnotationCollector parsed_annotations; const u1* runtime_visible_annotations = NULL; int runtime_visible_annotations_length = 0; const u1* runtime_invisible_annotations = NULL; int runtime_invisible_annotations_length = 0; const u1* runtime_visible_parameter_annotations = NULL; int runtime_visible_parameter_annotations_length = 0; const u1* runtime_invisible_parameter_annotations = NULL; int runtime_invisible_parameter_annotations_length = 0; const u1* runtime_visible_type_annotations = NULL; int runtime_visible_type_annotations_length = 0; const u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; bool runtime_invisible_annotations_exists = false; bool runtime_invisible_type_annotations_exists = false; bool runtime_invisible_parameter_annotations_exists = false; const u1* annotation_default = NULL; int annotation_default_length = 0; // Parse code and exceptions attribute u2 method_attributes_count = cfs->get_u2_fast(); while (method_attributes_count--) { cfs->guarantee_more(6, CHECK_NULL); // method_attribute_name_index, method_attribute_length const u2 method_attribute_name_index = cfs->get_u2_fast(); const u4 method_attribute_length = cfs->get_u4_fast(); check_property( valid_symbol_at(method_attribute_name_index), "Invalid method attribute name index %u in class file %s", method_attribute_name_index, CHECK_NULL); const Symbol* const method_attribute_name = cp->symbol_at(method_attribute_name_index); if (method_attribute_name == vmSymbols::tag_code()) { // Parse Code attribute if (_need_verify) { guarantee_property( !access_flags.is_native() && !access_flags.is_abstract(), "Code attribute in native or abstract methods in class file %s", CHECK_NULL); } if (parsed_code_attribute) { classfile_parse_error("Multiple Code attributes in class file %s", THREAD); return NULL; } parsed_code_attribute = true; // Stack size, locals size, and code size cfs->guarantee_more(8, CHECK_NULL); max_stack = cfs->get_u2_fast(); max_locals = cfs->get_u2_fast(); code_length = cfs->get_u4_fast(); if (_need_verify) { guarantee_property(args_size <= max_locals, "Arguments can't fit into locals in class file %s", CHECK_NULL); guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE, "Invalid method Code length %u in class file %s", code_length, CHECK_NULL); } // Code pointer code_start = cfs->current(); assert(code_start != NULL, "null code start"); cfs->guarantee_more(code_length, CHECK_NULL); cfs->skip_u1_fast(code_length); // Exception handler table cfs->guarantee_more(2, CHECK_NULL); // exception_table_length exception_table_length = cfs->get_u2_fast(); if (exception_table_length > 0) { exception_table_start = parse_exception_table(cfs, code_length, exception_table_length, CHECK_NULL); } // Parse additional attributes in code attribute cfs->guarantee_more(2, CHECK_NULL); // code_attributes_count u2 code_attributes_count = cfs->get_u2_fast(); unsigned int calculated_attribute_length = 0; calculated_attribute_length = sizeof(max_stack) + sizeof(max_locals) + sizeof(code_length); calculated_attribute_length += code_length + sizeof(exception_table_length) + sizeof(code_attributes_count) + exception_table_length * ( sizeof(u2) + // start_pc sizeof(u2) + // end_pc sizeof(u2) + // handler_pc sizeof(u2) ); // catch_type_index while (code_attributes_count--) { cfs->guarantee_more(6, CHECK_NULL); // code_attribute_name_index, code_attribute_length const u2 code_attribute_name_index = cfs->get_u2_fast(); const u4 code_attribute_length = cfs->get_u4_fast(); calculated_attribute_length += code_attribute_length + sizeof(code_attribute_name_index) + sizeof(code_attribute_length); check_property(valid_symbol_at(code_attribute_name_index), "Invalid code attribute name index %u in class file %s", code_attribute_name_index, CHECK_NULL); if (LoadLineNumberTables && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) { // Parse and compress line number table parse_linenumber_table(code_attribute_length, code_length, &linenumber_table, CHECK_NULL); } else if (LoadLocalVariableTables && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) { // Parse local variable table if (!lvt_allocated) { localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, u2, INITIAL_MAX_LVT_NUMBER); localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, const unsafe_u2*, INITIAL_MAX_LVT_NUMBER); localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, u2, INITIAL_MAX_LVT_NUMBER); localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, const unsafe_u2*, INITIAL_MAX_LVT_NUMBER); lvt_allocated = true; } if (lvt_cnt == max_lvt_cnt) { max_lvt_cnt <<= 1; localvariable_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt); localvariable_table_start = REALLOC_RESOURCE_ARRAY(const unsafe_u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt); } localvariable_table_start[lvt_cnt] = parse_localvariable_table(cfs, code_length, max_locals, code_attribute_length, &localvariable_table_length[lvt_cnt], false, // is not LVTT CHECK_NULL); total_lvt_length += localvariable_table_length[lvt_cnt]; lvt_cnt++; } else if (LoadLocalVariableTypeTables && _major_version >= JAVA_1_5_VERSION && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) { if (!lvt_allocated) { localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, u2, INITIAL_MAX_LVT_NUMBER); localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, const unsafe_u2*, INITIAL_MAX_LVT_NUMBER); localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, u2, INITIAL_MAX_LVT_NUMBER); localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, const unsafe_u2*, INITIAL_MAX_LVT_NUMBER); lvt_allocated = true; } // Parse local variable type table if (lvtt_cnt == max_lvtt_cnt) { max_lvtt_cnt <<= 1; localvariable_type_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt); localvariable_type_table_start = REALLOC_RESOURCE_ARRAY(const unsafe_u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt); } localvariable_type_table_start[lvtt_cnt] = parse_localvariable_table(cfs, code_length, max_locals, code_attribute_length, &localvariable_type_table_length[lvtt_cnt], true, // is LVTT CHECK_NULL); lvtt_cnt++; } else if (_major_version >= Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) { // Stack map is only needed by the new verifier in JDK1.5. if (parsed_stackmap_attribute) { classfile_parse_error("Multiple StackMapTable attributes in class file %s", THREAD); return NULL; } stackmap_data = parse_stackmap_table(cfs, code_attribute_length, _need_verify, CHECK_NULL); stackmap_data_length = code_attribute_length; parsed_stackmap_attribute = true; } else { // Skip unknown attributes cfs->skip_u1(code_attribute_length, CHECK_NULL); } } // check method attribute length if (_need_verify) { guarantee_property(method_attribute_length == calculated_attribute_length, "Code segment has wrong length in class file %s", CHECK_NULL); } } else if (method_attribute_name == vmSymbols::tag_exceptions()) { // Parse Exceptions attribute if (parsed_checked_exceptions_attribute) { classfile_parse_error("Multiple Exceptions attributes in class file %s", THREAD); return NULL; } parsed_checked_exceptions_attribute = true; checked_exceptions_start = parse_checked_exceptions(cfs, &checked_exceptions_length, method_attribute_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_method_parameters()) { // reject multiple method parameters if (method_parameters_seen) { classfile_parse_error("Multiple MethodParameters attributes in class file %s", THREAD); return NULL; } method_parameters_seen = true; method_parameters_length = cfs->get_u1_fast(); const u2 real_length = (method_parameters_length * 4u) + 1u; if (method_attribute_length != real_length) { classfile_parse_error( "Invalid MethodParameters method attribute length %u in class file", method_attribute_length, THREAD); return NULL; } method_parameters_data = cfs->current(); cfs->skip_u2_fast(method_parameters_length); cfs->skip_u2_fast(method_parameters_length); // ignore this attribute if it cannot be reflected if (!vmClasses::Parameter_klass_loaded()) method_parameters_length = -1; } else if (method_attribute_name == vmSymbols::tag_synthetic()) { if (method_attribute_length != 0) { classfile_parse_error( "Invalid Synthetic method attribute length %u in class file %s", method_attribute_length, THREAD); return NULL; } // Should we check that there hasn't already been a synthetic attribute? access_flags.set_is_synthetic(); } else if (method_attribute_name == vmSymbols::tag_deprecated()) { // 4276120 if (method_attribute_length != 0) { classfile_parse_error( "Invalid Deprecated method attribute length %u in class file %s", method_attribute_length, THREAD); return NULL; } } else if (_major_version >= JAVA_1_5_VERSION) { if (method_attribute_name == vmSymbols::tag_signature()) { if (generic_signature_index != 0) { classfile_parse_error( "Multiple Signature attributes for method in class file %s", THREAD); return NULL; } if (method_attribute_length != 2) { classfile_parse_error( "Invalid Signature attribute length %u in class file %s", method_attribute_length, THREAD); return NULL; } generic_signature_index = parse_generic_signature_attribute(cfs, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) { if (runtime_visible_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleAnnotations attributes for method in class file %s", THREAD); return NULL; } runtime_visible_annotations_length = method_attribute_length; runtime_visible_annotations = cfs->current(); assert(runtime_visible_annotations != NULL, "null visible annotations"); cfs->guarantee_more(runtime_visible_annotations_length, CHECK_NULL); parse_annotations(cp, runtime_visible_annotations, runtime_visible_annotations_length, &parsed_annotations, _loader_data, _can_access_vm_annotations); cfs->skip_u1_fast(runtime_visible_annotations_length); } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) { if (runtime_invisible_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleAnnotations attributes for method in class file %s", THREAD); return NULL; } runtime_invisible_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_annotations_length = method_attribute_length; runtime_invisible_annotations = cfs->current(); assert(runtime_invisible_annotations != NULL, "null invisible annotations"); } cfs->skip_u1(method_attribute_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_runtime_visible_parameter_annotations()) { if (runtime_visible_parameter_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleParameterAnnotations attributes for method in class file %s", THREAD); return NULL; } runtime_visible_parameter_annotations_length = method_attribute_length; runtime_visible_parameter_annotations = cfs->current(); assert(runtime_visible_parameter_annotations != NULL, "null visible parameter annotations"); cfs->skip_u1(runtime_visible_parameter_annotations_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_parameter_annotations()) { if (runtime_invisible_parameter_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleParameterAnnotations attributes for method in class file %s", THREAD); return NULL; } runtime_invisible_parameter_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_parameter_annotations_length = method_attribute_length; runtime_invisible_parameter_annotations = cfs->current(); assert(runtime_invisible_parameter_annotations != NULL, "null invisible parameter annotations"); } cfs->skip_u1(method_attribute_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_annotation_default()) { if (annotation_default != NULL) { classfile_parse_error( "Multiple AnnotationDefault attributes for method in class file %s", THREAD); return NULL; } annotation_default_length = method_attribute_length; annotation_default = cfs->current(); assert(annotation_default != NULL, "null annotation default"); cfs->skip_u1(annotation_default_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) { if (runtime_visible_type_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s", THREAD); return NULL; } runtime_visible_type_annotations_length = method_attribute_length; runtime_visible_type_annotations = cfs->current(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); // No need for the VM to parse Type annotations cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_NULL); } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { if (runtime_invisible_type_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s", THREAD); return NULL; } else { runtime_invisible_type_annotations_exists = true; } if (PreserveAllAnnotations) { runtime_invisible_type_annotations_length = method_attribute_length; runtime_invisible_type_annotations = cfs->current(); assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); } cfs->skip_u1(method_attribute_length, CHECK_NULL); } else { // Skip unknown attributes cfs->skip_u1(method_attribute_length, CHECK_NULL); } } else { // Skip unknown attributes cfs->skip_u1(method_attribute_length, CHECK_NULL); } } if (linenumber_table != NULL) { linenumber_table->write_terminator(); linenumber_table_length = linenumber_table->position(); } // Make sure there's at least one Code attribute in non-native/non-abstract method if (_need_verify) { guarantee_property(access_flags.is_native() || access_flags.is_abstract() || parsed_code_attribute, "Absent Code attribute in method that is not native or abstract in class file %s", CHECK_NULL); } // All sizing information for a Method* is finally available, now create it InlineTableSizes sizes( total_lvt_length, linenumber_table_length, exception_table_length, checked_exceptions_length, method_parameters_length, generic_signature_index, runtime_visible_annotations_length + runtime_invisible_annotations_length, runtime_visible_parameter_annotations_length + runtime_invisible_parameter_annotations_length, runtime_visible_type_annotations_length + runtime_invisible_type_annotations_length, annotation_default_length, 0); Method* const m = Method::allocate(_loader_data, code_length, access_flags, &sizes, ConstMethod::NORMAL, CHECK_NULL); ClassLoadingService::add_class_method_size(m->size()*wordSize); // Fill in information from fixed part (access_flags already set) m->set_constants(_cp); m->set_name_index(name_index); m->set_signature_index(signature_index); m->compute_from_signature(cp->symbol_at(signature_index)); assert(args_size < 0 || args_size == m->size_of_parameters(), ""); // Fill in code attribute information m->set_max_stack(max_stack); m->set_max_locals(max_locals); if (stackmap_data != NULL) { m->constMethod()->copy_stackmap_data(_loader_data, (u1*)stackmap_data, stackmap_data_length, CHECK_NULL); } // Copy byte codes m->set_code((u1*)code_start); // Copy line number table if (linenumber_table != NULL) { memcpy(m->compressed_linenumber_table(), linenumber_table->buffer(), linenumber_table_length); } // Copy exception table if (exception_table_length > 0) { Copy::conjoint_swap_if_needed<Endian::JAVA>(exception_table_start, m->exception_table_start(), exception_table_length * sizeof(ExceptionTableElement), sizeof(u2)); } // Copy method parameters if (method_parameters_length > 0) { MethodParametersElement* elem = m->constMethod()->method_parameters_start(); for (int i = 0; i < method_parameters_length; i++) { elem[i].name_cp_index = Bytes::get_Java_u2((address)method_parameters_data); method_parameters_data += 2; elem[i].flags = Bytes::get_Java_u2((address)method_parameters_data); method_parameters_data += 2; } } // Copy checked exceptions if (checked_exceptions_length > 0) { Copy::conjoint_swap_if_needed<Endian::JAVA>(checked_exceptions_start, m->checked_exceptions_start(), checked_exceptions_length * sizeof(CheckedExceptionElement), sizeof(u2)); } // Copy class file LVT's/LVTT's into the HotSpot internal LVT. if (total_lvt_length > 0) { promoted_flags->set_has_localvariable_table(); copy_localvariable_table(m->constMethod(), lvt_cnt, localvariable_table_length, localvariable_table_start, lvtt_cnt, localvariable_type_table_length, localvariable_type_table_start, CHECK_NULL); } if (parsed_annotations.has_any_annotations()) parsed_annotations.apply_to(methodHandle(THREAD, m)); if (is_hidden()) { // Mark methods in hidden classes as 'hidden'. m->set_hidden(true); } // Copy annotations copy_method_annotations(m->constMethod(), runtime_visible_annotations, runtime_visible_annotations_length, runtime_invisible_annotations, runtime_invisible_annotations_length, runtime_visible_parameter_annotations, runtime_visible_parameter_annotations_length, runtime_invisible_parameter_annotations, runtime_invisible_parameter_annotations_length, runtime_visible_type_annotations, runtime_visible_type_annotations_length, runtime_invisible_type_annotations, runtime_invisible_type_annotations_length, annotation_default, annotation_default_length, CHECK_NULL); if (name == vmSymbols::finalize_method_name() && signature == vmSymbols::void_method_signature()) { if (m->is_empty_method()) { _has_empty_finalizer = true; } else { _has_finalizer = true; } } if (name == vmSymbols::object_initializer_name() && signature == vmSymbols::void_method_signature() && m->is_vanilla_constructor()) { _has_vanilla_constructor = true; } NOT_PRODUCT(m->verify()); return m; } // The promoted_flags parameter is used to pass relevant access_flags // from the methods back up to the containing klass. These flag values // are added to klass's access_flags. // Side-effects: populates the _methods field in the parser void ClassFileParser::parse_methods(const ClassFileStream* const cfs, bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, bool* declares_nonstatic_concrete_methods, TRAPS) { assert(cfs != NULL, "invariant"); assert(promoted_flags != NULL, "invariant"); assert(has_final_method != NULL, "invariant"); assert(declares_nonstatic_concrete_methods != NULL, "invariant"); assert(NULL == _methods, "invariant"); cfs->guarantee_more(2, CHECK); // length const u2 length = cfs->get_u2_fast(); if (length == 0) { _methods = Universe::the_empty_method_array(); } else { _methods = MetadataFactory::new_array<Method*>(_loader_data, length, NULL, CHECK); for (int index = 0; index < length; index++) { Method* method = parse_method(cfs, is_interface, _cp, promoted_flags, CHECK); if (method->is_final()) { *has_final_method = true; } // declares_nonstatic_concrete_methods: declares concrete instance methods, any access flags // used for interface initialization, and default method inheritance analysis if (is_interface && !(*declares_nonstatic_concrete_methods) && !method->is_abstract() && !method->is_static()) { *declares_nonstatic_concrete_methods = true; } _methods->at_put(index, method); } if (_need_verify && length > 1) { // Check duplicated methods ResourceMark rm(THREAD); NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, NameSigHash*, HASH_ROW_SIZE); initialize_hashtable(names_and_sigs); bool dup = false; const Symbol* name = NULL; const Symbol* sig = NULL; { debug_only(NoSafepointVerifier nsv;) for (int i = 0; i < length; i++) { const Method* const m = _methods->at(i); name = m->name(); sig = m->signature(); // If no duplicates, add name/signature in hashtable names_and_sigs. if (!put_after_lookup(name, sig, names_and_sigs)) { dup = true; break; } } } if (dup) { classfile_parse_error("Duplicate method name \"%s\" with signature \"%s\" in class file %s", name->as_C_string(), sig->as_klass_external_name(), THREAD); } } } } static const intArray* sort_methods(Array<Method*>* methods) { const int length = methods->length(); // If JVMTI original method ordering or sharing is enabled we have to // remember the original class file ordering. // We temporarily use the vtable_index field in the Method* to store the // class file index, so we can read in after calling qsort. // Put the method ordering in the shared archive. if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) { for (int index = 0; index < length; index++) { Method* const m = methods->at(index); assert(!m->valid_vtable_index(), "vtable index should not be set"); m->set_vtable_index(index); } } // Sort method array by ascending method name (for faster lookups & vtable construction) // Note that the ordering is not alphabetical, see Symbol::fast_compare Method::sort_methods(methods); intArray* method_ordering = NULL; // If JVMTI original method ordering or sharing is enabled construct int // array remembering the original ordering if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) { method_ordering = new intArray(length, length, -1); for (int index = 0; index < length; index++) { Method* const m = methods->at(index); const int old_index = m->vtable_index(); assert(old_index >= 0 && old_index < length, "invalid method index"); method_ordering->at_put(index, old_index); m->set_vtable_index(Method::invalid_vtable_index); } } return method_ordering; } // Parse generic_signature attribute for methods and fields u2 ClassFileParser::parse_generic_signature_attribute(const ClassFileStream* const cfs, TRAPS) { assert(cfs != NULL, "invariant"); cfs->guarantee_more(2, CHECK_0); // generic_signature_index const u2 generic_signature_index = cfs->get_u2_fast(); check_property( valid_symbol_at(generic_signature_index), "Invalid Signature attribute at constant pool index %u in class file %s", generic_signature_index, CHECK_0); return generic_signature_index; } void ClassFileParser::parse_classfile_sourcefile_attribute(const ClassFileStream* const cfs, TRAPS) { assert(cfs != NULL, "invariant"); cfs->guarantee_more(2, CHECK); // sourcefile_index const u2 sourcefile_index = cfs->get_u2_fast(); check_property( valid_symbol_at(sourcefile_index), "Invalid SourceFile attribute at constant pool index %u in class file %s", sourcefile_index, CHECK); set_class_sourcefile_index(sourcefile_index); } void ClassFileParser::parse_classfile_source_debug_extension_attribute(const ClassFileStream* const cfs, int length, TRAPS) { assert(cfs != NULL, "invariant"); const u1* const sde_buffer = cfs->current(); assert(sde_buffer != NULL, "null sde buffer"); // Don't bother storing it if there is no way to retrieve it if (JvmtiExport::can_get_source_debug_extension()) { assert((length+1) > length, "Overflow checking"); u1* const sde = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, u1, length+1); for (int i = 0; i < length; i++) { sde[i] = sde_buffer[i]; } sde[length] = '\0'; set_class_sde_buffer((const char*)sde, length); } // Got utf8 string, set stream position forward cfs->skip_u1(length, CHECK); } // Inner classes can be static, private or protected (classic VM does this) #define RECOGNIZED_INNER_CLASS_MODIFIERS ( JVM_RECOGNIZED_CLASS_MODIFIERS | \ JVM_ACC_PRIVATE | \ JVM_ACC_PROTECTED | \ JVM_ACC_STATIC \ ) // Find index of the InnerClasses entry for the specified inner_class_info_index. // Return -1 if none is found. static int inner_classes_find_index(const Array<u2>* inner_classes, int inner, const ConstantPool* cp, int length) { Symbol* cp_klass_name = cp->klass_name_at(inner); for (int idx = 0; idx < length; idx += InstanceKlass::inner_class_next_offset) { int idx_inner = inner_classes->at(idx + InstanceKlass::inner_class_inner_class_info_offset); if (cp->klass_name_at(idx_inner) == cp_klass_name) { return idx; } } return -1; } // Return the outer_class_info_index for the InnerClasses entry containing the // specified inner_class_info_index. Return -1 if no InnerClasses entry is found. static int inner_classes_jump_to_outer(const Array<u2>* inner_classes, int inner, const ConstantPool* cp, int length) { if (inner == 0) return -1; int idx = inner_classes_find_index(inner_classes, inner, cp, length); if (idx == -1) return -1; int result = inner_classes->at(idx + InstanceKlass::inner_class_outer_class_info_offset); return result; } // Return true if circularity is found, false if no circularity is found. // Use Floyd's cycle finding algorithm. static bool inner_classes_check_loop_through_outer(const Array<u2>* inner_classes, int idx, const ConstantPool* cp, int length) { int slow = inner_classes->at(idx + InstanceKlass::inner_class_inner_class_info_offset); int fast = inner_classes->at(idx + InstanceKlass::inner_class_outer_class_info_offset); while (fast != -1 && fast != 0) { if (slow != 0 && (cp->klass_name_at(slow) == cp->klass_name_at(fast))) { return true; // found a circularity } fast = inner_classes_jump_to_outer(inner_classes, fast, cp, length); if (fast == -1) return false; fast = inner_classes_jump_to_outer(inner_classes, fast, cp, length); if (fast == -1) return false; slow = inner_classes_jump_to_outer(inner_classes, slow, cp, length); assert(slow != -1, "sanity check"); } return false; } // Loop through each InnerClasses entry checking for circularities and duplications // with other entries. If duplicate entries are found then throw CFE. Otherwise, // return true if a circularity or entries with duplicate inner_class_info_indexes // are found. bool ClassFileParser::check_inner_classes_circularity(const ConstantPool* cp, int length, TRAPS) { // Loop through each InnerClasses entry. for (int idx = 0; idx < length; idx += InstanceKlass::inner_class_next_offset) { // Return true if there are circular entries. if (inner_classes_check_loop_through_outer(_inner_classes, idx, cp, length)) { return true; } // Check if there are duplicate entries or entries with the same inner_class_info_index. for (int y = idx + InstanceKlass::inner_class_next_offset; y < length; y += InstanceKlass::inner_class_next_offset) { // To maintain compatibility, throw an exception if duplicate inner classes // entries are found. guarantee_property((_inner_classes->at(idx) != _inner_classes->at(y) || _inner_classes->at(idx+1) != _inner_classes->at(y+1) || _inner_classes->at(idx+2) != _inner_classes->at(y+2) || _inner_classes->at(idx+3) != _inner_classes->at(y+3)), "Duplicate entry in InnerClasses attribute in class file %s", CHECK_(true)); // Return true if there are two entries with the same inner_class_info_index. if (_inner_classes->at(y) == _inner_classes->at(idx)) { return true; } } } return false; } // Return number of classes in the inner classes attribute table u2 ClassFileParser::parse_classfile_inner_classes_attribute(const ClassFileStream* const cfs, const ConstantPool* cp, const u1* const inner_classes_attribute_start, bool parsed_enclosingmethod_attribute, u2 enclosing_method_class_index, u2 enclosing_method_method_index, TRAPS) { const u1* const current_mark = cfs->current(); u2 length = 0; if (inner_classes_attribute_start != NULL) { cfs->set_current(inner_classes_attribute_start); cfs->guarantee_more(2, CHECK_0); // length length = cfs->get_u2_fast(); } // 4-tuples of shorts of inner classes data and 2 shorts of enclosing // method data: // [inner_class_info_index, // outer_class_info_index, // inner_name_index, // inner_class_access_flags, // ... // enclosing_method_class_index, // enclosing_method_method_index] const int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0); Array<u2>* inner_classes = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0); _inner_classes = inner_classes; int index = 0; cfs->guarantee_more(8 * length, CHECK_0); // 4-tuples of u2 for (int n = 0; n < length; n++) { // Inner class index const u2 inner_class_info_index = cfs->get_u2_fast(); check_property( valid_klass_reference_at(inner_class_info_index), "inner_class_info_index %u has bad constant type in class file %s", inner_class_info_index, CHECK_0); // Outer class index const u2 outer_class_info_index = cfs->get_u2_fast(); check_property( outer_class_info_index == 0 || valid_klass_reference_at(outer_class_info_index), "outer_class_info_index %u has bad constant type in class file %s", outer_class_info_index, CHECK_0); // Inner class name const u2 inner_name_index = cfs->get_u2_fast(); check_property( inner_name_index == 0 || valid_symbol_at(inner_name_index), "inner_name_index %u has bad constant type in class file %s", inner_name_index, CHECK_0); if (_need_verify) { guarantee_property(inner_class_info_index != outer_class_info_index, "Class is both outer and inner class in class file %s", CHECK_0); } // Access flags jint flags; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { flags = cfs->get_u2_fast() & (RECOGNIZED_INNER_CLASS_MODIFIERS | JVM_ACC_MODULE); } else { flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS; } if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; } verify_legal_class_modifiers(flags, CHECK_0); AccessFlags inner_access_flags(flags); inner_classes->at_put(index++, inner_class_info_index); inner_classes->at_put(index++, outer_class_info_index); inner_classes->at_put(index++, inner_name_index); inner_classes->at_put(index++, inner_access_flags.as_short()); } // 4347400: make sure there's no duplicate entry in the classes array // Also, check for circular entries. bool has_circularity = false; if (_need_verify && _major_version >= JAVA_1_5_VERSION) { has_circularity = check_inner_classes_circularity(cp, length * 4, CHECK_0); if (has_circularity) { // If circularity check failed then ignore InnerClasses attribute. MetadataFactory::free_array<u2>(_loader_data, _inner_classes); index = 0; if (parsed_enclosingmethod_attribute) { inner_classes = MetadataFactory::new_array<u2>(_loader_data, 2, CHECK_0); _inner_classes = inner_classes; } else { _inner_classes = Universe::the_empty_short_array(); } } } // Set EnclosingMethod class and method indexes. if (parsed_enclosingmethod_attribute) { inner_classes->at_put(index++, enclosing_method_class_index); inner_classes->at_put(index++, enclosing_method_method_index); } assert(index == size || has_circularity, "wrong size"); // Restore buffer's current position. cfs->set_current(current_mark); return length; } u2 ClassFileParser::parse_classfile_nest_members_attribute(const ClassFileStream* const cfs, const u1* const nest_members_attribute_start, TRAPS) { const u1* const current_mark = cfs->current(); u2 length = 0; if (nest_members_attribute_start != NULL) { cfs->set_current(nest_members_attribute_start); cfs->guarantee_more(2, CHECK_0); // length length = cfs->get_u2_fast(); } const int size = length; Array<u2>* const nest_members = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0); _nest_members = nest_members; int index = 0; cfs->guarantee_more(2 * length, CHECK_0); for (int n = 0; n < length; n++) { const u2 class_info_index = cfs->get_u2_fast(); check_property( valid_klass_reference_at(class_info_index), "Nest member class_info_index %u has bad constant type in class file %s", class_info_index, CHECK_0); nest_members->at_put(index++, class_info_index); } assert(index == size, "wrong size"); // Restore buffer's current position. cfs->set_current(current_mark); return length; } u2 ClassFileParser::parse_classfile_permitted_subclasses_attribute(const ClassFileStream* const cfs, const u1* const permitted_subclasses_attribute_start, TRAPS) { const u1* const current_mark = cfs->current(); u2 length = 0; if (permitted_subclasses_attribute_start != NULL) { cfs->set_current(permitted_subclasses_attribute_start); cfs->guarantee_more(2, CHECK_0); // length length = cfs->get_u2_fast(); } const int size = length; Array<u2>* const permitted_subclasses = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0); _permitted_subclasses = permitted_subclasses; if (length > 0) { int index = 0; cfs->guarantee_more(2 * length, CHECK_0); for (int n = 0; n < length; n++) { const u2 class_info_index = cfs->get_u2_fast(); check_property( valid_klass_reference_at(class_info_index), "Permitted subclass class_info_index %u has bad constant type in class file %s", class_info_index, CHECK_0); permitted_subclasses->at_put(index++, class_info_index); } assert(index == size, "wrong size"); } // Restore buffer's current position. cfs->set_current(current_mark); return length; } // Record { // u2 attribute_name_index; // u4 attribute_length; // u2 components_count; // component_info components[components_count]; // } // component_info { // u2 name_index; // u2 descriptor_index // u2 attributes_count; // attribute_info_attributes[attributes_count]; // } u2 ClassFileParser::parse_classfile_record_attribute(const ClassFileStream* const cfs, const ConstantPool* cp, const u1* const record_attribute_start, TRAPS) { const u1* const current_mark = cfs->current(); int components_count = 0; unsigned int calculate_attr_size = 0; if (record_attribute_start != NULL) { cfs->set_current(record_attribute_start); cfs->guarantee_more(2, CHECK_0); // num of components components_count = (int)cfs->get_u2_fast(); calculate_attr_size = 2; } Array<RecordComponent*>* const record_components = MetadataFactory::new_array<RecordComponent*>(_loader_data, components_count, NULL, CHECK_0); _record_components = record_components; for (int x = 0; x < components_count; x++) { cfs->guarantee_more(6, CHECK_0); // name_index, descriptor_index, attributes_count const u2 name_index = cfs->get_u2_fast(); check_property(valid_symbol_at(name_index), "Invalid constant pool index %u for name in Record attribute in class file %s", name_index, CHECK_0); const Symbol* const name = cp->symbol_at(name_index); verify_legal_field_name(name, CHECK_0); const u2 descriptor_index = cfs->get_u2_fast(); check_property(valid_symbol_at(descriptor_index), "Invalid constant pool index %u for descriptor in Record attribute in class file %s", descriptor_index, CHECK_0); const Symbol* const descr = cp->symbol_at(descriptor_index); verify_legal_field_signature(name, descr, CHECK_0); const u2 attributes_count = cfs->get_u2_fast(); calculate_attr_size += 6; u2 generic_sig_index = 0; const u1* runtime_visible_annotations = NULL; int runtime_visible_annotations_length = 0; const u1* runtime_invisible_annotations = NULL; int runtime_invisible_annotations_length = 0; bool runtime_invisible_annotations_exists = false; const u1* runtime_visible_type_annotations = NULL; int runtime_visible_type_annotations_length = 0; const u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; bool runtime_invisible_type_annotations_exists = false; // Expected attributes for record components are Signature, Runtime(In)VisibleAnnotations, // and Runtime(In)VisibleTypeAnnotations. Other attributes are ignored. for (int y = 0; y < attributes_count; y++) { cfs->guarantee_more(6, CHECK_0); // attribute_name_index, attribute_length const u2 attribute_name_index = cfs->get_u2_fast(); const u4 attribute_length = cfs->get_u4_fast(); calculate_attr_size += 6; check_property( valid_symbol_at(attribute_name_index), "Invalid Record attribute name index %u in class file %s", attribute_name_index, CHECK_0); const Symbol* const attribute_name = cp->symbol_at(attribute_name_index); if (attribute_name == vmSymbols::tag_signature()) { if (generic_sig_index != 0) { classfile_parse_error( "Multiple Signature attributes for Record component in class file %s", THREAD); return 0; } if (attribute_length != 2) { classfile_parse_error( "Invalid Signature attribute length %u in Record component in class file %s", attribute_length, THREAD); return 0; } generic_sig_index = parse_generic_signature_attribute(cfs, CHECK_0); } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) { if (runtime_visible_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleAnnotations attributes for Record component in class file %s", THREAD); return 0; } runtime_visible_annotations_length = attribute_length; runtime_visible_annotations = cfs->current(); assert(runtime_visible_annotations != NULL, "null record component visible annotation"); cfs->guarantee_more(runtime_visible_annotations_length, CHECK_0); cfs->skip_u1_fast(runtime_visible_annotations_length); } else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) { if (runtime_invisible_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleAnnotations attributes for Record component in class file %s", THREAD); return 0; } runtime_invisible_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_annotations_length = attribute_length; runtime_invisible_annotations = cfs->current(); assert(runtime_invisible_annotations != NULL, "null record component invisible annotation"); } cfs->skip_u1(attribute_length, CHECK_0); } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) { if (runtime_visible_type_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleTypeAnnotations attributes for Record component in class file %s", THREAD); return 0; } runtime_visible_type_annotations_length = attribute_length; runtime_visible_type_annotations = cfs->current(); assert(runtime_visible_type_annotations != NULL, "null record component visible type annotation"); cfs->guarantee_more(runtime_visible_type_annotations_length, CHECK_0); cfs->skip_u1_fast(runtime_visible_type_annotations_length); } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { if (runtime_invisible_type_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleTypeAnnotations attributes for Record component in class file %s", THREAD); return 0; } runtime_invisible_type_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_type_annotations_length = attribute_length; runtime_invisible_type_annotations = cfs->current(); assert(runtime_invisible_type_annotations != NULL, "null record component invisible type annotation"); } cfs->skip_u1(attribute_length, CHECK_0); } else { // Skip unknown attributes cfs->skip_u1(attribute_length, CHECK_0); } calculate_attr_size += attribute_length; } // End of attributes For loop AnnotationArray* annotations = assemble_annotations(runtime_visible_annotations, runtime_visible_annotations_length, runtime_invisible_annotations, runtime_invisible_annotations_length, CHECK_0); AnnotationArray* type_annotations = assemble_annotations(runtime_visible_type_annotations, runtime_visible_type_annotations_length, runtime_invisible_type_annotations, runtime_invisible_type_annotations_length, CHECK_0); RecordComponent* record_component = RecordComponent::allocate(_loader_data, name_index, descriptor_index, attributes_count, generic_sig_index, annotations, type_annotations, CHECK_0); record_components->at_put(x, record_component); } // End of component processing loop // Restore buffer's current position. cfs->set_current(current_mark); return calculate_attr_size; } void ClassFileParser::parse_classfile_synthetic_attribute() { set_class_synthetic_flag(true); } void ClassFileParser::parse_classfile_signature_attribute(const ClassFileStream* const cfs, TRAPS) { assert(cfs != NULL, "invariant"); const u2 signature_index = cfs->get_u2(CHECK); check_property( valid_symbol_at(signature_index), "Invalid constant pool index %u in Signature attribute in class file %s", signature_index, CHECK); set_class_generic_signature_index(signature_index); } void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFileStream* const cfs, ConstantPool* cp, u4 attribute_byte_length, TRAPS) { assert(cfs != NULL, "invariant"); assert(cp != NULL, "invariant"); const u1* const current_start = cfs->current(); guarantee_property(attribute_byte_length >= sizeof(u2), "Invalid BootstrapMethods attribute length %u in class file %s", attribute_byte_length, CHECK); cfs->guarantee_more(attribute_byte_length, CHECK); const int attribute_array_length = cfs->get_u2_fast(); guarantee_property(_max_bootstrap_specifier_index < attribute_array_length, "Short length on BootstrapMethods in class file %s", CHECK); // The attribute contains a counted array of counted tuples of shorts, // represending bootstrap specifiers: // length*{bootstrap_method_index, argument_count*{argument_index}} const int operand_count = (attribute_byte_length - sizeof(u2)) / sizeof(u2); // operand_count = number of shorts in attr, except for leading length // The attribute is copied into a short[] array. // The array begins with a series of short[2] pairs, one for each tuple. const int index_size = (attribute_array_length * 2); Array<u2>* const operands = MetadataFactory::new_array<u2>(_loader_data, index_size + operand_count, CHECK); // Eagerly assign operands so they will be deallocated with the constant // pool if there is an error. cp->set_operands(operands); int operand_fill_index = index_size; const int cp_size = cp->length(); for (int n = 0; n < attribute_array_length; n++) { // Store a 32-bit offset into the header of the operand array. ConstantPool::operand_offset_at_put(operands, n, operand_fill_index); // Read a bootstrap specifier. cfs->guarantee_more(sizeof(u2) * 2, CHECK); // bsm, argc const u2 bootstrap_method_index = cfs->get_u2_fast(); const u2 argument_count = cfs->get_u2_fast(); check_property( valid_cp_range(bootstrap_method_index, cp_size) && cp->tag_at(bootstrap_method_index).is_method_handle(), "bootstrap_method_index %u has bad constant type in class file %s", bootstrap_method_index, CHECK); guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(), "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s", CHECK); operands->at_put(operand_fill_index++, bootstrap_method_index); operands->at_put(operand_fill_index++, argument_count); cfs->guarantee_more(sizeof(u2) * argument_count, CHECK); // argv[argc] for (int j = 0; j < argument_count; j++) { const u2 argument_index = cfs->get_u2_fast(); check_property( valid_cp_range(argument_index, cp_size) && cp->tag_at(argument_index).is_loadable_constant(), "argument_index %u has bad constant type in class file %s", argument_index, CHECK); operands->at_put(operand_fill_index++, argument_index); } } guarantee_property(current_start + attribute_byte_length == cfs->current(), "Bad length on BootstrapMethods in class file %s", CHECK); } void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cfs, ConstantPool* cp, ClassFileParser::ClassAnnotationCollector* parsed_annotations, TRAPS) { assert(cfs != NULL, "invariant"); assert(cp != NULL, "invariant"); assert(parsed_annotations != NULL, "invariant"); // Set inner classes attribute to default sentinel _inner_classes = Universe::the_empty_short_array(); // Set nest members attribute to default sentinel _nest_members = Universe::the_empty_short_array(); // Set _permitted_subclasses attribute to default sentinel _permitted_subclasses = Universe::the_empty_short_array(); cfs->guarantee_more(2, CHECK); // attributes_count u2 attributes_count = cfs->get_u2_fast(); bool parsed_sourcefile_attribute = false; bool parsed_innerclasses_attribute = false; bool parsed_nest_members_attribute = false; bool parsed_permitted_subclasses_attribute = false; bool parsed_nest_host_attribute = false; bool parsed_record_attribute = false; bool parsed_enclosingmethod_attribute = false; bool parsed_bootstrap_methods_attribute = false; const u1* runtime_visible_annotations = NULL; int runtime_visible_annotations_length = 0; const u1* runtime_invisible_annotations = NULL; int runtime_invisible_annotations_length = 0; const u1* runtime_visible_type_annotations = NULL; int runtime_visible_type_annotations_length = 0; const u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; bool runtime_invisible_type_annotations_exists = false; bool runtime_invisible_annotations_exists = false; bool parsed_source_debug_ext_annotations_exist = false; const u1* inner_classes_attribute_start = NULL; u4 inner_classes_attribute_length = 0; u2 enclosing_method_class_index = 0; u2 enclosing_method_method_index = 0; const u1* nest_members_attribute_start = NULL; u4 nest_members_attribute_length = 0; const u1* record_attribute_start = NULL; u4 record_attribute_length = 0; const u1* permitted_subclasses_attribute_start = NULL; u4 permitted_subclasses_attribute_length = 0; // Iterate over attributes while (attributes_count--) { cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length const u2 attribute_name_index = cfs->get_u2_fast(); const u4 attribute_length = cfs->get_u4_fast(); check_property( valid_symbol_at(attribute_name_index), "Attribute name has bad constant pool index %u in class file %s", attribute_name_index, CHECK); const Symbol* const tag = cp->symbol_at(attribute_name_index); if (tag == vmSymbols::tag_source_file()) { // Check for SourceFile tag if (_need_verify) { guarantee_property(attribute_length == 2, "Wrong SourceFile attribute length in class file %s", CHECK); } if (parsed_sourcefile_attribute) { classfile_parse_error("Multiple SourceFile attributes in class file %s", THREAD); return; } else { parsed_sourcefile_attribute = true; } parse_classfile_sourcefile_attribute(cfs, CHECK); } else if (tag == vmSymbols::tag_source_debug_extension()) { // Check for SourceDebugExtension tag if (parsed_source_debug_ext_annotations_exist) { classfile_parse_error( "Multiple SourceDebugExtension attributes in class file %s", THREAD); return; } parsed_source_debug_ext_annotations_exist = true; parse_classfile_source_debug_extension_attribute(cfs, (int)attribute_length, CHECK); } else if (tag == vmSymbols::tag_inner_classes()) { // Check for InnerClasses tag if (parsed_innerclasses_attribute) { classfile_parse_error("Multiple InnerClasses attributes in class file %s", THREAD); return; } else { parsed_innerclasses_attribute = true; } inner_classes_attribute_start = cfs->current(); inner_classes_attribute_length = attribute_length; cfs->skip_u1(inner_classes_attribute_length, CHECK); } else if (tag == vmSymbols::tag_synthetic()) { // Check for Synthetic tag // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec if (attribute_length != 0) { classfile_parse_error( "Invalid Synthetic classfile attribute length %u in class file %s", attribute_length, THREAD); return; } parse_classfile_synthetic_attribute(); } else if (tag == vmSymbols::tag_deprecated()) { // Check for Deprecated tag - 4276120 if (attribute_length != 0) { classfile_parse_error( "Invalid Deprecated classfile attribute length %u in class file %s", attribute_length, THREAD); return; } } else if (_major_version >= JAVA_1_5_VERSION) { if (tag == vmSymbols::tag_signature()) { if (_generic_signature_index != 0) { classfile_parse_error( "Multiple Signature attributes in class file %s", THREAD); return; } if (attribute_length != 2) { classfile_parse_error( "Wrong Signature attribute length %u in class file %s", attribute_length, THREAD); return; } parse_classfile_signature_attribute(cfs, CHECK); } else if (tag == vmSymbols::tag_runtime_visible_annotations()) { if (runtime_visible_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleAnnotations attributes in class file %s", THREAD); return; } runtime_visible_annotations_length = attribute_length; runtime_visible_annotations = cfs->current(); assert(runtime_visible_annotations != NULL, "null visible annotations"); cfs->guarantee_more(runtime_visible_annotations_length, CHECK); parse_annotations(cp, runtime_visible_annotations, runtime_visible_annotations_length, parsed_annotations, _loader_data, _can_access_vm_annotations); cfs->skip_u1_fast(runtime_visible_annotations_length); } else if (tag == vmSymbols::tag_runtime_invisible_annotations()) { if (runtime_invisible_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleAnnotations attributes in class file %s", THREAD); return; } runtime_invisible_annotations_exists = true; if (PreserveAllAnnotations) { runtime_invisible_annotations_length = attribute_length; runtime_invisible_annotations = cfs->current(); assert(runtime_invisible_annotations != NULL, "null invisible annotations"); } cfs->skip_u1(attribute_length, CHECK); } else if (tag == vmSymbols::tag_enclosing_method()) { if (parsed_enclosingmethod_attribute) { classfile_parse_error("Multiple EnclosingMethod attributes in class file %s", THREAD); return; } else { parsed_enclosingmethod_attribute = true; } guarantee_property(attribute_length == 4, "Wrong EnclosingMethod attribute length %u in class file %s", attribute_length, CHECK); cfs->guarantee_more(4, CHECK); // class_index, method_index enclosing_method_class_index = cfs->get_u2_fast(); enclosing_method_method_index = cfs->get_u2_fast(); if (enclosing_method_class_index == 0) { classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", THREAD); return; } // Validate the constant pool indices and types check_property(valid_klass_reference_at(enclosing_method_class_index), "Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK); if (enclosing_method_method_index != 0 && (!cp->is_within_bounds(enclosing_method_method_index) || !cp->tag_at(enclosing_method_method_index).is_name_and_type())) { classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", THREAD); return; } } else if (tag == vmSymbols::tag_bootstrap_methods() && _major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { if (parsed_bootstrap_methods_attribute) { classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", THREAD); return; } parsed_bootstrap_methods_attribute = true; parse_classfile_bootstrap_methods_attribute(cfs, cp, attribute_length, CHECK); } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) { if (runtime_visible_type_annotations != NULL) { classfile_parse_error( "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", THREAD); return; } runtime_visible_type_annotations_length = attribute_length; runtime_visible_type_annotations = cfs->current(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); // No need for the VM to parse Type annotations cfs->skip_u1(runtime_visible_type_annotations_length, CHECK); } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) { if (runtime_invisible_type_annotations_exists) { classfile_parse_error( "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", THREAD); return; } else { runtime_invisible_type_annotations_exists = true; } if (PreserveAllAnnotations) { runtime_invisible_type_annotations_length = attribute_length; runtime_invisible_type_annotations = cfs->current(); assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); } cfs->skip_u1(attribute_length, CHECK); } else if (_major_version >= JAVA_11_VERSION) { if (tag == vmSymbols::tag_nest_members()) { // Check for NestMembers tag if (parsed_nest_members_attribute) { classfile_parse_error("Multiple NestMembers attributes in class file %s", THREAD); return; } else { parsed_nest_members_attribute = true; } if (parsed_nest_host_attribute) { classfile_parse_error("Conflicting NestHost and NestMembers attributes in class file %s", THREAD); return; } nest_members_attribute_start = cfs->current(); nest_members_attribute_length = attribute_length; cfs->skip_u1(nest_members_attribute_length, CHECK); } else if (tag == vmSymbols::tag_nest_host()) { if (parsed_nest_host_attribute) { classfile_parse_error("Multiple NestHost attributes in class file %s", THREAD); return; } else { parsed_nest_host_attribute = true; } if (parsed_nest_members_attribute) { classfile_parse_error("Conflicting NestMembers and NestHost attributes in class file %s", THREAD); return; } if (_need_verify) { guarantee_property(attribute_length == 2, "Wrong NestHost attribute length in class file %s", CHECK); } cfs->guarantee_more(2, CHECK); u2 class_info_index = cfs->get_u2_fast(); check_property( valid_klass_reference_at(class_info_index), "Nest-host class_info_index %u has bad constant type in class file %s", class_info_index, CHECK); _nest_host = class_info_index; } else if (_major_version >= JAVA_16_VERSION) { if (tag == vmSymbols::tag_record()) { if (parsed_record_attribute) { classfile_parse_error("Multiple Record attributes in class file %s", THREAD); return; } parsed_record_attribute = true; record_attribute_start = cfs->current(); record_attribute_length = attribute_length; } else if (_major_version >= JAVA_17_VERSION) { if (tag == vmSymbols::tag_permitted_subclasses()) { if (parsed_permitted_subclasses_attribute) { classfile_parse_error("Multiple PermittedSubclasses attributes in class file %s", CHECK); return; } // Classes marked ACC_FINAL cannot have a PermittedSubclasses attribute. if (_access_flags.is_final()) { classfile_parse_error("PermittedSubclasses attribute in final class file %s", CHECK); return; } parsed_permitted_subclasses_attribute = true; permitted_subclasses_attribute_start = cfs->current(); permitted_subclasses_attribute_length = attribute_length; } } // Skip attribute_length for any attribute where major_verson >= JAVA_17_VERSION cfs->skip_u1(attribute_length, CHECK); } else { // Unknown attribute cfs->skip_u1(attribute_length, CHECK); } } else { // Unknown attribute cfs->skip_u1(attribute_length, CHECK); } } else { // Unknown attribute cfs->skip_u1(attribute_length, CHECK); } } _class_annotations = assemble_annotations(runtime_visible_annotations, runtime_visible_annotations_length, runtime_invisible_annotations, runtime_invisible_annotations_length, CHECK); _class_type_annotations = assemble_annotations(runtime_visible_type_annotations, runtime_visible_type_annotations_length, runtime_invisible_type_annotations, runtime_invisible_type_annotations_length, CHECK); if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) { const u2 num_of_classes = parse_classfile_inner_classes_attribute( cfs, cp, inner_classes_attribute_start, parsed_innerclasses_attribute, enclosing_method_class_index, enclosing_method_method_index, CHECK); if (parsed_innerclasses_attribute && _need_verify && _major_version >= JAVA_1_5_VERSION) { guarantee_property( inner_classes_attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes, "Wrong InnerClasses attribute length in class file %s", CHECK); } } if (parsed_nest_members_attribute) { const u2 num_of_classes = parse_classfile_nest_members_attribute( cfs, nest_members_attribute_start, CHECK); if (_need_verify) { guarantee_property( nest_members_attribute_length == sizeof(num_of_classes) + sizeof(u2) * num_of_classes, "Wrong NestMembers attribute length in class file %s", CHECK); } } if (parsed_record_attribute) { const unsigned int calculated_attr_length = parse_classfile_record_attribute( cfs, cp, record_attribute_start, CHECK); if (_need_verify) { guarantee_property(record_attribute_length == calculated_attr_length, "Record attribute has wrong length in class file %s", CHECK); } } if (parsed_permitted_subclasses_attribute) { const u2 num_subclasses = parse_classfile_permitted_subclasses_attribute( cfs, permitted_subclasses_attribute_start, CHECK); if (_need_verify) { guarantee_property( permitted_subclasses_attribute_length == sizeof(num_subclasses) + sizeof(u2) * num_subclasses, "Wrong PermittedSubclasses attribute length in class file %s", CHECK); } } if (_max_bootstrap_specifier_index >= 0) { guarantee_property(parsed_bootstrap_methods_attribute, "Missing BootstrapMethods attribute in class file %s", CHECK); } } void ClassFileParser::apply_parsed_class_attributes(InstanceKlass* k) { assert(k != NULL, "invariant"); if (_synthetic_flag) k->set_is_synthetic(); if (_sourcefile_index != 0) { k->set_source_file_name_index(_sourcefile_index); } if (_generic_signature_index != 0) { k->set_generic_signature_index(_generic_signature_index); } if (_sde_buffer != NULL) { k->set_source_debug_extension(_sde_buffer, _sde_length); } } // Create the Annotations object that will // hold the annotations array for the Klass. void ClassFileParser::create_combined_annotations(TRAPS) { if (_class_annotations == NULL && _class_type_annotations == NULL && _fields_annotations == NULL && _fields_type_annotations == NULL) { // Don't create the Annotations object unnecessarily. return; } Annotations* const annotations = Annotations::allocate(_loader_data, CHECK); annotations->set_class_annotations(_class_annotations); annotations->set_class_type_annotations(_class_type_annotations); annotations->set_fields_annotations(_fields_annotations); annotations->set_fields_type_annotations(_fields_type_annotations); // This is the Annotations object that will be // assigned to InstanceKlass being constructed. _combined_annotations = annotations; // The annotations arrays below has been transfered the // _combined_annotations so these fields can now be cleared. _class_annotations = NULL; _class_type_annotations = NULL; _fields_annotations = NULL; _fields_type_annotations = NULL; } // Transfer ownership of metadata allocated to the InstanceKlass. void ClassFileParser::apply_parsed_class_metadata( InstanceKlass* this_klass, int java_fields_count) { assert(this_klass != NULL, "invariant"); _cp->set_pool_holder(this_klass); this_klass->set_constants(_cp); this_klass->set_fields(_fields, java_fields_count); this_klass->set_methods(_methods); this_klass->set_inner_classes(_inner_classes); this_klass->set_nest_members(_nest_members); this_klass->set_nest_host_index(_nest_host); this_klass->set_annotations(_combined_annotations); this_klass->set_permitted_subclasses(_permitted_subclasses); this_klass->set_record_components(_record_components); // Delay the setting of _local_interfaces and _transitive_interfaces until after // initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could // be shared with _transitive_interfaces and _transitive_interfaces may be shared with // its _super. If an OOM occurs while loading the current klass, its _super field // may not have been set. When GC tries to free the klass, the _transitive_interfaces // may be deallocated mistakenly in InstanceKlass::deallocate_interfaces(). Subsequent // dereferences to the deallocated _transitive_interfaces will result in a crash. // Clear out these fields so they don't get deallocated by the destructor clear_class_metadata(); } AnnotationArray* ClassFileParser::assemble_annotations(const u1* const runtime_visible_annotations, int runtime_visible_annotations_length, const u1* const runtime_invisible_annotations, int runtime_invisible_annotations_length, TRAPS) { AnnotationArray* annotations = NULL; if (runtime_visible_annotations != NULL || runtime_invisible_annotations != NULL) { annotations = MetadataFactory::new_array<u1>(_loader_data, runtime_visible_annotations_length + runtime_invisible_annotations_length, CHECK_(annotations)); if (runtime_visible_annotations != NULL) { for (int i = 0; i < runtime_visible_annotations_length; i++) { annotations->at_put(i, runtime_visible_annotations[i]); } } if (runtime_invisible_annotations != NULL) { for (int i = 0; i < runtime_invisible_annotations_length; i++) { int append = runtime_visible_annotations_length+i; annotations->at_put(append, runtime_invisible_annotations[i]); } } } return annotations; } const InstanceKlass* ClassFileParser::parse_super_class(ConstantPool* const cp, const int super_class_index, const bool need_verify, TRAPS) { assert(cp != NULL, "invariant"); const InstanceKlass* super_klass = NULL; if (super_class_index == 0) { check_property(_class_name == vmSymbols::java_lang_Object(), "Invalid superclass index %u in class file %s", super_class_index, CHECK_NULL); } else { check_property(valid_klass_reference_at(super_class_index), "Invalid superclass index %u in class file %s", super_class_index, CHECK_NULL); // The class name should be legal because it is checked when parsing constant pool. // However, make sure it is not an array type. bool is_array = false; if (cp->tag_at(super_class_index).is_klass()) { super_klass = InstanceKlass::cast(cp->resolved_klass_at(super_class_index)); if (need_verify) is_array = super_klass->is_array_klass(); } else if (need_verify) { is_array = (cp->klass_name_at(super_class_index)->char_at(0) == JVM_SIGNATURE_ARRAY); } if (need_verify) { guarantee_property(!is_array, "Bad superclass name in class file %s", CHECK_NULL); } } return super_klass; } OopMapBlocksBuilder::OopMapBlocksBuilder(unsigned int max_blocks) { _max_nonstatic_oop_maps = max_blocks; _nonstatic_oop_map_count = 0; if (max_blocks == 0) { _nonstatic_oop_maps = NULL; } else { _nonstatic_oop_maps = NEW_RESOURCE_ARRAY(OopMapBlock, _max_nonstatic_oop_maps); memset(_nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks); } } OopMapBlock* OopMapBlocksBuilder::last_oop_map() const { assert(_nonstatic_oop_map_count > 0, "Has no oop maps"); return _nonstatic_oop_maps + (_nonstatic_oop_map_count - 1); } // addition of super oop maps void OopMapBlocksBuilder::initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) { assert(nof_blocks && _nonstatic_oop_map_count == 0 && nof_blocks <= _max_nonstatic_oop_maps, "invariant"); memcpy(_nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks); _nonstatic_oop_map_count += nof_blocks; } // collection of oops void OopMapBlocksBuilder::add(int offset, int count) { if (_nonstatic_oop_map_count == 0) { _nonstatic_oop_map_count++; } OopMapBlock* nonstatic_oop_map = last_oop_map(); if (nonstatic_oop_map->count() == 0) { // Unused map, set it up nonstatic_oop_map->set_offset(offset); nonstatic_oop_map->set_count(count); } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add nonstatic_oop_map->increment_count(count); } else { // Need a new one... _nonstatic_oop_map_count++; assert(_nonstatic_oop_map_count <= _max_nonstatic_oop_maps, "range check"); nonstatic_oop_map = last_oop_map(); nonstatic_oop_map->set_offset(offset); nonstatic_oop_map->set_count(count); } } // general purpose copy, e.g. into allocated instanceKlass void OopMapBlocksBuilder::copy(OopMapBlock* dst) { if (_nonstatic_oop_map_count != 0) { memcpy(dst, _nonstatic_oop_maps, sizeof(OopMapBlock) * _nonstatic_oop_map_count); } } // Sort and compact adjacent blocks void OopMapBlocksBuilder::compact() { if (_nonstatic_oop_map_count <= 1) { return; } /* * Since field layout sneeks in oops before values, we will be able to condense * blocks. There is potential to compact between super, own refs and values * containing refs. * * Currently compaction is slightly limited due to values being 8 byte aligned. * This may well change: FixMe if it doesn't, the code below is fairly general purpose * and maybe it doesn't need to be. */ qsort(_nonstatic_oop_maps, _nonstatic_oop_map_count, sizeof(OopMapBlock), (_sort_Fn)OopMapBlock::compare_offset); if (_nonstatic_oop_map_count < 2) { return; } // Make a temp copy, and iterate through and copy back into the original ResourceMark rm; OopMapBlock* oop_maps_copy = NEW_RESOURCE_ARRAY(OopMapBlock, _nonstatic_oop_map_count); OopMapBlock* oop_maps_copy_end = oop_maps_copy + _nonstatic_oop_map_count; copy(oop_maps_copy); OopMapBlock* nonstatic_oop_map = _nonstatic_oop_maps; unsigned int new_count = 1; oop_maps_copy++; while(oop_maps_copy < oop_maps_copy_end) { assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant"); if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) { nonstatic_oop_map->increment_count(oop_maps_copy->count()); } else { nonstatic_oop_map++; new_count++; nonstatic_oop_map->set_offset(oop_maps_copy->offset()); nonstatic_oop_map->set_count(oop_maps_copy->count()); } oop_maps_copy++; } assert(new_count <= _nonstatic_oop_map_count, "end up with more maps after compact() ?"); _nonstatic_oop_map_count = new_count; } void OopMapBlocksBuilder::print_on(outputStream* st) const { st->print_cr(" OopMapBlocks: %3d /%3d", _nonstatic_oop_map_count, _max_nonstatic_oop_maps); if (_nonstatic_oop_map_count > 0) { OopMapBlock* map = _nonstatic_oop_maps; OopMapBlock* last_map = last_oop_map(); assert(map <= last_map, "Last less than first"); while (map <= last_map) { st->print_cr(" Offset: %3d -%3d Count: %3d", map->offset(), map->offset() + map->offset_span() - heapOopSize, map->count()); map++; } } } void OopMapBlocksBuilder::print_value_on(outputStream* st) const { print_on(st); } void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) { assert(ik != NULL, "invariant"); const Klass* const super = ik->super(); // Check if this klass has an empty finalize method (i.e. one with return bytecode only), // in which case we don't have to register objects as finalizable if (!_has_empty_finalizer) { if (_has_finalizer || (super != NULL && super->has_finalizer())) { ik->set_has_finalizer(); } } #ifdef ASSERT bool f = false; const Method* const m = ik->lookup_method(vmSymbols::finalize_method_name(), vmSymbols::void_method_signature()); if (m != NULL && !m->is_empty_method()) { f = true; } // Spec doesn't prevent agent from redefinition of empty finalizer. // Despite the fact that it's generally bad idea and redefined finalizer // will not work as expected we shouldn't abort vm in this case if (!ik->has_redefined_this_or_super()) { assert(ik->has_finalizer() == f, "inconsistent has_finalizer"); } #endif // Check if this klass supports the java.lang.Cloneable interface if (vmClasses::Cloneable_klass_loaded()) { if (ik->is_subtype_of(vmClasses::Cloneable_klass())) { ik->set_is_cloneable(); } } // Check if this klass has a vanilla default constructor if (super == NULL) { // java.lang.Object has empty default constructor ik->set_has_vanilla_constructor(); } else { if (super->has_vanilla_constructor() && _has_vanilla_constructor) { ik->set_has_vanilla_constructor(); } #ifdef ASSERT bool v = false; if (super->has_vanilla_constructor()) { const Method* const constructor = ik->find_method(vmSymbols::object_initializer_name(), vmSymbols::void_method_signature()); if (constructor != NULL && constructor->is_vanilla_constructor()) { v = true; } } assert(v == ik->has_vanilla_constructor(), "inconsistent has_vanilla_constructor"); #endif } // If it cannot be fast-path allocated, set a bit in the layout helper. // See documentation of InstanceKlass::can_be_fastpath_allocated(). assert(ik->size_helper() > 0, "layout_helper is initialized"); if ((!RegisterFinalizersAtInit && ik->has_finalizer()) || ik->is_abstract() || ik->is_interface() || (ik->name() == vmSymbols::java_lang_Class() && ik->class_loader() == NULL) || ik->size_helper() >= FastAllocateSizeLimit) { // Forbid fast-path allocation. const jint lh = Klass::instance_layout_helper(ik->size_helper(), true); ik->set_layout_helper(lh); } } // utility methods for appending an array with check for duplicates static void append_interfaces(GrowableArray<InstanceKlass*>* result, const Array<InstanceKlass*>* const ifs) { // iterate over new interfaces for (int i = 0; i < ifs->length(); i++) { InstanceKlass* const e = ifs->at(i); assert(e->is_klass() && e->is_interface(), "just checking"); // add new interface result->append_if_missing(e); } } static Array<InstanceKlass*>* compute_transitive_interfaces(const InstanceKlass* super, Array<InstanceKlass*>* local_ifs, ClassLoaderData* loader_data, TRAPS) { assert(local_ifs != NULL, "invariant"); assert(loader_data != NULL, "invariant"); // Compute maximum size for transitive interfaces int max_transitive_size = 0; int super_size = 0; // Add superclass transitive interfaces size if (super != NULL) { super_size = super->transitive_interfaces()->length(); max_transitive_size += super_size; } // Add local interfaces' super interfaces const int local_size = local_ifs->length(); for (int i = 0; i < local_size; i++) { InstanceKlass* const l = local_ifs->at(i); max_transitive_size += l->transitive_interfaces()->length(); } // Finally add local interfaces max_transitive_size += local_size; // Construct array if (max_transitive_size == 0) { // no interfaces, use canonicalized array return Universe::the_empty_instance_klass_array(); } else if (max_transitive_size == super_size) { // no new local interfaces added, share superklass' transitive interface array return super->transitive_interfaces(); } else if (max_transitive_size == local_size) { // only local interfaces added, share local interface array return local_ifs; } else { ResourceMark rm; GrowableArray<InstanceKlass*>* const result = new GrowableArray<InstanceKlass*>(max_transitive_size); // Copy down from superclass if (super != NULL) { append_interfaces(result, super->transitive_interfaces()); } // Copy down from local interfaces' superinterfaces for (int i = 0; i < local_size; i++) { InstanceKlass* const l = local_ifs->at(i); append_interfaces(result, l->transitive_interfaces()); } // Finally add local interfaces append_interfaces(result, local_ifs); // length will be less than the max_transitive_size if duplicates were removed const int length = result->length(); assert(length <= max_transitive_size, "just checking"); Array<InstanceKlass*>* const new_result = MetadataFactory::new_array<InstanceKlass*>(loader_data, length, CHECK_NULL); for (int i = 0; i < length; i++) { InstanceKlass* const e = result->at(i); assert(e != NULL, "just checking"); new_result->at_put(i, e); } return new_result; } } void ClassFileParser::check_super_class_access(const InstanceKlass* this_klass, TRAPS) { assert(this_klass != NULL, "invariant"); const Klass* const super = this_klass->super(); if (super != NULL) { const InstanceKlass* super_ik = InstanceKlass::cast(super); if (super->is_final()) { classfile_icce_error("class %s cannot inherit from final class %s", super_ik, THREAD); return; } if (super_ik->is_sealed() && !super_ik->has_as_permitted_subclass(this_klass)) { classfile_icce_error("class %s cannot inherit from sealed class %s", super_ik, THREAD); return; } // If the loader is not the boot loader then throw an exception if its // superclass is in package jdk.internal.reflect and its loader is not a // special reflection class loader if (!this_klass->class_loader_data()->is_the_null_class_loader_data()) { PackageEntry* super_package = super->package(); if (super_package != NULL && super_package->name()->fast_compare(vmSymbols::jdk_internal_reflect()) == 0 && !java_lang_ClassLoader::is_reflection_class_loader(this_klass->class_loader())) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalAccessError(), "class %s loaded by %s cannot access jdk/internal/reflect superclass %s", this_klass->external_name(), this_klass->class_loader_data()->loader_name_and_id(), super->external_name()); return; } } Reflection::VerifyClassAccessResults vca_result = Reflection::verify_class_access(this_klass, InstanceKlass::cast(super), false); if (vca_result != Reflection::ACCESS_OK) { ResourceMark rm(THREAD); char* msg = Reflection::verify_class_access_msg(this_klass, InstanceKlass::cast(super), vca_result); if (msg == NULL) { bool same_module = (this_klass->module() == super->module()); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalAccessError(), "class %s cannot access its %ssuperclass %s (%s%s%s)", this_klass->external_name(), super->is_abstract() ? "abstract " : "", super->external_name(), (same_module) ? this_klass->joint_in_module_of_loader(super) : this_klass->class_in_module_of_loader(), (same_module) ? "" : "; ", (same_module) ? "" : super->class_in_module_of_loader()); } else { // Add additional message content. Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalAccessError(), "superclass access check failed: %s", msg); } } } } void ClassFileParser::check_super_interface_access(const InstanceKlass* this_klass, TRAPS) { assert(this_klass != NULL, "invariant"); const Array<InstanceKlass*>* const local_interfaces = this_klass->local_interfaces(); const int lng = local_interfaces->length(); for (int i = lng - 1; i >= 0; i--) { InstanceKlass* const k = local_interfaces->at(i); assert (k != NULL && k->is_interface(), "invalid interface"); if (k->is_sealed() && !k->has_as_permitted_subclass(this_klass)) { classfile_icce_error(this_klass->is_interface() ? "class %s cannot extend sealed interface %s" : "class %s cannot implement sealed interface %s", k, THREAD); return; } Reflection::VerifyClassAccessResults vca_result = Reflection::verify_class_access(this_klass, k, false); if (vca_result != Reflection::ACCESS_OK) { ResourceMark rm(THREAD); char* msg = Reflection::verify_class_access_msg(this_klass, k, vca_result); if (msg == NULL) { bool same_module = (this_klass->module() == k->module()); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalAccessError(), "class %s cannot access its superinterface %s (%s%s%s)", this_klass->external_name(), k->external_name(), (same_module) ? this_klass->joint_in_module_of_loader(k) : this_klass->class_in_module_of_loader(), (same_module) ? "" : "; ", (same_module) ? "" : k->class_in_module_of_loader()); } else { // Add additional message content. Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalAccessError(), "superinterface check failed: %s", msg); } } } } static void check_final_method_override(const InstanceKlass* this_klass, TRAPS) { assert(this_klass != NULL, "invariant"); const Array<Method*>* const methods = this_klass->methods(); const int num_methods = methods->length(); // go thru each method and check if it overrides a final method for (int index = 0; index < num_methods; index++) { const Method* const m = methods->at(index); // skip private, static, and <init> methods if ((!m->is_private() && !m->is_static()) && (m->name() != vmSymbols::object_initializer_name())) { const Symbol* const name = m->name(); const Symbol* const signature = m->signature(); const Klass* k = this_klass->super(); const Method* super_m = NULL; while (k != NULL) { // skip supers that don't have final methods. if (k->has_final_method()) { // lookup a matching method in the super class hierarchy super_m = InstanceKlass::cast(k)->lookup_method(name, signature); if (super_m == NULL) { break; // didn't find any match; get out } if (super_m->is_final() && !super_m->is_static() && !super_m->access_flags().is_private()) { // matching method in super is final, and not static or private bool can_access = Reflection::verify_member_access(this_klass, super_m->method_holder(), super_m->method_holder(), super_m->access_flags(), false, false, CHECK); if (can_access) { // this class can access super final method and therefore override ResourceMark rm(THREAD); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), err_msg("class %s overrides final method %s.%s%s", this_klass->external_name(), super_m->method_holder()->external_name(), name->as_C_string(), signature->as_C_string())); } } // continue to look from super_m's holder's super. k = super_m->method_holder()->super(); continue; } k = k->super(); } } } } // assumes that this_klass is an interface static void check_illegal_static_method(const InstanceKlass* this_klass, TRAPS) { assert(this_klass != NULL, "invariant"); assert(this_klass->is_interface(), "not an interface"); const Array<Method*>* methods = this_klass->methods(); const int num_methods = methods->length(); for (int index = 0; index < num_methods; index++) { const Method* const m = methods->at(index); // if m is static and not the init method, throw a verify error if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_VerifyError(), "Illegal static method %s in interface %s", m->name()->as_C_string(), this_klass->external_name() ); return; } } } // utility methods for format checking void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const { const bool is_module = (flags & JVM_ACC_MODULE) != 0; assert(_major_version >= JAVA_9_VERSION || !is_module, "JVM_ACC_MODULE should not be set"); if (is_module) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_NoClassDefFoundError(), "%s is not a class because access_flag ACC_MODULE is set", _class_name->as_C_string()); return; } if (!_need_verify) { return; } const bool is_interface = (flags & JVM_ACC_INTERFACE) != 0; const bool is_abstract = (flags & JVM_ACC_ABSTRACT) != 0; const bool is_final = (flags & JVM_ACC_FINAL) != 0; const bool is_super = (flags & JVM_ACC_SUPER) != 0; const bool is_enum = (flags & JVM_ACC_ENUM) != 0; const bool is_annotation = (flags & JVM_ACC_ANNOTATION) != 0; const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION; const bool major_gte_14 = _major_version >= JAVA_14_VERSION; if ((is_abstract && is_final) || (is_interface && !is_abstract) || (is_interface && major_gte_1_5 && (is_super || is_enum)) || (!is_interface && major_gte_1_5 && is_annotation)) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Illegal class modifiers in class %s: 0x%X", _class_name->as_C_string(), flags ); return; } } static bool has_illegal_visibility(jint flags) { const bool is_public = (flags & JVM_ACC_PUBLIC) != 0; const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0; const bool is_private = (flags & JVM_ACC_PRIVATE) != 0; return ((is_public && is_protected) || (is_public && is_private) || (is_protected && is_private)); } // A legal major_version.minor_version must be one of the following: // // Major_version >= 45 and major_version < 56, any minor_version. // Major_version >= 56 and major_version <= JVM_CLASSFILE_MAJOR_VERSION and minor_version = 0. // Major_version = JVM_CLASSFILE_MAJOR_VERSION and minor_version = 65535 and --enable-preview is present. // void ClassFileParser::verify_class_version(u2 major, u2 minor, Symbol* class_name, TRAPS){ ResourceMark rm(THREAD); const u2 max_version = JVM_CLASSFILE_MAJOR_VERSION; if (major < JAVA_MIN_SUPPORTED_VERSION) { classfile_ucve_error("%s (class file version %u.%u) was compiled with an invalid major version", class_name, major, minor, THREAD); return; } if (major > max_version) { Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_UnsupportedClassVersionError(), "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), " "this version of the Java Runtime only recognizes class file versions up to %u.0", class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION); return; } if (major < JAVA_12_VERSION || minor == 0) { return; } if (minor == JAVA_PREVIEW_MINOR_VERSION) { if (major != max_version) { Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_UnsupportedClassVersionError(), "%s (class file version %u.%u) was compiled with preview features that are unsupported. " "This version of the Java Runtime only recognizes preview features for class file version %u.%u", class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION, JAVA_PREVIEW_MINOR_VERSION); return; } if (!Arguments::enable_preview()) { classfile_ucve_error("Preview features are not enabled for %s (class file version %u.%u). Try running with '--enable-preview'", class_name, major, minor, THREAD); return; } } else { // minor != JAVA_PREVIEW_MINOR_VERSION classfile_ucve_error("%s (class file version %u.%u) was compiled with an invalid non-zero minor version", class_name, major, minor, THREAD); } } void ClassFileParser::verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS) const { if (!_need_verify) { return; } const bool is_public = (flags & JVM_ACC_PUBLIC) != 0; const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0; const bool is_private = (flags & JVM_ACC_PRIVATE) != 0; const bool is_static = (flags & JVM_ACC_STATIC) != 0; const bool is_final = (flags & JVM_ACC_FINAL) != 0; const bool is_volatile = (flags & JVM_ACC_VOLATILE) != 0; const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0; const bool is_enum = (flags & JVM_ACC_ENUM) != 0; const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION; bool is_illegal = false; if (is_interface) { if (!is_public || !is_static || !is_final || is_private || is_protected || is_volatile || is_transient || (major_gte_1_5 && is_enum)) { is_illegal = true; } } else { // not interface if (has_illegal_visibility(flags) || (is_final && is_volatile)) { is_illegal = true; } } if (is_illegal) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Illegal field modifiers in class %s: 0x%X", _class_name->as_C_string(), flags); return; } } void ClassFileParser::verify_legal_method_modifiers(jint flags, bool is_interface, const Symbol* name, TRAPS) const { if (!_need_verify) { return; } const bool is_public = (flags & JVM_ACC_PUBLIC) != 0; const bool is_private = (flags & JVM_ACC_PRIVATE) != 0; const bool is_static = (flags & JVM_ACC_STATIC) != 0; const bool is_final = (flags & JVM_ACC_FINAL) != 0; const bool is_native = (flags & JVM_ACC_NATIVE) != 0; const bool is_abstract = (flags & JVM_ACC_ABSTRACT) != 0; const bool is_bridge = (flags & JVM_ACC_BRIDGE) != 0; const bool is_strict = (flags & JVM_ACC_STRICT) != 0; const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0; const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0; const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION; const bool major_gte_8 = _major_version >= JAVA_8_VERSION; const bool major_gte_17 = _major_version >= JAVA_17_VERSION; const bool is_initializer = (name == vmSymbols::object_initializer_name()); bool is_illegal = false; if (is_interface) { if (major_gte_8) { // Class file version is JAVA_8_VERSION or later Methods of // interfaces may set any of the flags except ACC_PROTECTED, // ACC_FINAL, ACC_NATIVE, and ACC_SYNCHRONIZED; they must // have exactly one of the ACC_PUBLIC or ACC_PRIVATE flags set. if ((is_public == is_private) || /* Only one of private and public should be true - XNOR */ (is_native || is_protected || is_final || is_synchronized) || // If a specific method of a class or interface has its // ACC_ABSTRACT flag set, it must not have any of its // ACC_FINAL, ACC_NATIVE, ACC_PRIVATE, ACC_STATIC, // ACC_STRICT, or ACC_SYNCHRONIZED flags set. No need to // check for ACC_FINAL, ACC_NATIVE or ACC_SYNCHRONIZED as // those flags are illegal irrespective of ACC_ABSTRACT being set or not. (is_abstract && (is_private || is_static || (!major_gte_17 && is_strict)))) { is_illegal = true; } } else if (major_gte_1_5) { // Class file version in the interval [JAVA_1_5_VERSION, JAVA_8_VERSION) if (!is_public || is_private || is_protected || is_static || is_final || is_synchronized || is_native || !is_abstract || is_strict) { is_illegal = true; } } else { // Class file version is pre-JAVA_1_5_VERSION if (!is_public || is_static || is_final || is_native || !is_abstract) { is_illegal = true; } } } else { // not interface if (has_illegal_visibility(flags)) { is_illegal = true; } else { if (is_initializer) { if (is_static || is_final || is_synchronized || is_native || is_abstract || (major_gte_1_5 && is_bridge)) { is_illegal = true; } } else { // not initializer if (is_abstract) { if ((is_final || is_native || is_private || is_static || (major_gte_1_5 && (is_synchronized || (!major_gte_17 && is_strict))))) { is_illegal = true; } } } } } if (is_illegal) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Method %s in class %s has illegal modifiers: 0x%X", name->as_C_string(), _class_name->as_C_string(), flags); return; } } void ClassFileParser::verify_legal_utf8(const unsigned char* buffer, int length, TRAPS) const { assert(_need_verify, "only called when _need_verify is true"); if (!UTF8::is_legal_utf8(buffer, length, _major_version <= 47)) { classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", THREAD); } } // Unqualified names may not contain the characters '.', ';', '[', or '/'. // In class names, '/' separates unqualified names. This is verified in this function also. // Method names also may not contain the characters '<' or '>', unless <init> // or <clinit>. Note that method names may not be <init> or <clinit> in this // method. Because these names have been checked as special cases before // calling this method in verify_legal_method_name. // // This method is also called from the modular system APIs in modules.cpp // to verify the validity of module and package names. bool ClassFileParser::verify_unqualified_name(const char* name, unsigned int length, int type) { if (length == 0) return false; // Must have at least one char. for (const char* p = name; p != name + length; p++) { switch(*p) { case JVM_SIGNATURE_DOT: case JVM_SIGNATURE_ENDCLASS: case JVM_SIGNATURE_ARRAY: // do not permit '.', ';', or '[' return false; case JVM_SIGNATURE_SLASH: // check for '//' or leading or trailing '/' which are not legal // unqualified name must not be empty if (type == ClassFileParser::LegalClass) { if (p == name || p+1 >= name+length || *(p+1) == JVM_SIGNATURE_SLASH) { return false; } } else { return false; // do not permit '/' unless it's class name } break; case JVM_SIGNATURE_SPECIAL: case JVM_SIGNATURE_ENDSPECIAL: // do not permit '<' or '>' in method names if (type == ClassFileParser::LegalMethod) { return false; } } } return true; } // Take pointer to a UTF8 byte string (not NUL-terminated). // Skip over the longest part of the string that could // be taken as a fieldname. Allow '/' if slash_ok is true. // Return a pointer to just past the fieldname. // Return NULL if no fieldname at all was found, or in the case of slash_ok // being true, we saw consecutive slashes (meaning we were looking for a // qualified path but found something that was badly-formed). static const char* skip_over_field_name(const char* const name, bool slash_ok, unsigned int length) { const char* p; jboolean last_is_slash = false; jboolean not_first_ch = false; for (p = name; p != name + length; not_first_ch = true) { const char* old_p = p; jchar ch = *p; if (ch < 128) { p++; // quick check for ascii if ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch == '_' || ch == '$') || (not_first_ch && ch >= '0' && ch <= '9')) { last_is_slash = false; continue; } if (slash_ok && ch == JVM_SIGNATURE_SLASH) { if (last_is_slash) { return NULL; // Don't permit consecutive slashes } last_is_slash = true; continue; } } else { jint unicode_ch; char* tmp_p = UTF8::next_character(p, &unicode_ch); p = tmp_p; last_is_slash = false; // Check if ch is Java identifier start or is Java identifier part // 4672820: call java.lang.Character methods directly without generating separate tables. EXCEPTION_MARK; // return value JavaValue result(T_BOOLEAN); // Set up the arguments to isJavaIdentifierStart or isJavaIdentifierPart JavaCallArguments args; args.push_int(unicode_ch); if (not_first_ch) { // public static boolean isJavaIdentifierPart(char ch); JavaCalls::call_static(&result, vmClasses::Character_klass(), vmSymbols::isJavaIdentifierPart_name(), vmSymbols::int_bool_signature(), &args, THREAD); } else { // public static boolean isJavaIdentifierStart(char ch); JavaCalls::call_static(&result, vmClasses::Character_klass(), vmSymbols::isJavaIdentifierStart_name(), vmSymbols::int_bool_signature(), &args, THREAD); } if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return NULL; } if(result.get_jboolean()) { continue; } } return (not_first_ch) ? old_p : NULL; } return (not_first_ch) ? p : NULL; } // Take pointer to a UTF8 byte string (not NUL-terminated). // Skip over the longest part of the string that could // be taken as a field signature. Allow "void" if void_ok. // Return a pointer to just past the signature. // Return NULL if no legal signature is found. const char* ClassFileParser::skip_over_field_signature(const char* signature, bool void_ok, unsigned int length, TRAPS) const { unsigned int array_dim = 0; while (length > 0) { switch (signature[0]) { case JVM_SIGNATURE_VOID: if (!void_ok) { return NULL; } case JVM_SIGNATURE_BOOLEAN: case JVM_SIGNATURE_BYTE: case JVM_SIGNATURE_CHAR: case JVM_SIGNATURE_SHORT: case JVM_SIGNATURE_INT: case JVM_SIGNATURE_FLOAT: case JVM_SIGNATURE_LONG: case JVM_SIGNATURE_DOUBLE: return signature + 1; case JVM_SIGNATURE_CLASS: { if (_major_version < JAVA_1_5_VERSION) { // Skip over the class name if one is there const char* const p = skip_over_field_name(signature + 1, true, --length); // The next character better be a semicolon if (p && (p - signature) > 1 && p[0] == JVM_SIGNATURE_ENDCLASS) { return p + 1; } } else { // Skip leading 'L' and ignore first appearance of ';' signature++; const char* c = (const char*) memchr(signature, JVM_SIGNATURE_ENDCLASS, length - 1); // Format check signature if (c != NULL) { int newlen = c - (char*) signature; bool legal = verify_unqualified_name(signature, newlen, LegalClass); if (!legal) { classfile_parse_error("Class name is empty or contains illegal character " "in descriptor in class file %s", THREAD); return NULL; } return signature + newlen + 1; } } return NULL; } case JVM_SIGNATURE_ARRAY: array_dim++; if (array_dim > 255) { // 4277370: array descriptor is valid only if it represents 255 or fewer dimensions. classfile_parse_error("Array type descriptor has more than 255 dimensions in class file %s", THREAD); return NULL; } // The rest of what's there better be a legal signature signature++; length--; void_ok = false; break; default: return NULL; } } return NULL; } // Checks if name is a legal class name. void ClassFileParser::verify_legal_class_name(const Symbol* name, TRAPS) const { if (!_need_verify || _relax_verify) { return; } assert(name->refcount() > 0, "symbol must be kept alive"); char* bytes = (char*)name->bytes(); unsigned int length = name->utf8_length(); bool legal = false; if (length > 0) { const char* p; if (bytes[0] == JVM_SIGNATURE_ARRAY) { p = skip_over_field_signature(bytes, false, length, CHECK); legal = (p != NULL) && ((p - bytes) == (int)length); } else if (_major_version < JAVA_1_5_VERSION) { if (bytes[0] != JVM_SIGNATURE_SPECIAL) { p = skip_over_field_name(bytes, true, length); legal = (p != NULL) && ((p - bytes) == (int)length); } } else { // 4900761: relax the constraints based on JSR202 spec // Class names may be drawn from the entire Unicode character set. // Identifiers between '/' must be unqualified names. // The utf8 string has been verified when parsing cpool entries. legal = verify_unqualified_name(bytes, length, LegalClass); } } if (!legal) { ResourceMark rm(THREAD); assert(_class_name != NULL, "invariant"); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Illegal class name \"%.*s\" in class file %s", length, bytes, _class_name->as_C_string() ); return; } } // Checks if name is a legal field name. void ClassFileParser::verify_legal_field_name(const Symbol* name, TRAPS) const { if (!_need_verify || _relax_verify) { return; } char* bytes = (char*)name->bytes(); unsigned int length = name->utf8_length(); bool legal = false; if (length > 0) { if (_major_version < JAVA_1_5_VERSION) { if (bytes[0] != JVM_SIGNATURE_SPECIAL) { const char* p = skip_over_field_name(bytes, false, length); legal = (p != NULL) && ((p - bytes) == (int)length); } } else { // 4881221: relax the constraints based on JSR202 spec legal = verify_unqualified_name(bytes, length, LegalField); } } if (!legal) { ResourceMark rm(THREAD); assert(_class_name != NULL, "invariant"); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Illegal field name \"%.*s\" in class %s", length, bytes, _class_name->as_C_string() ); return; } } // Checks if name is a legal method name. void ClassFileParser::verify_legal_method_name(const Symbol* name, TRAPS) const { if (!_need_verify || _relax_verify) { return; } assert(name != NULL, "method name is null"); char* bytes = (char*)name->bytes(); unsigned int length = name->utf8_length(); bool legal = false; if (length > 0) { if (bytes[0] == JVM_SIGNATURE_SPECIAL) { if (name == vmSymbols::object_initializer_name() || name == vmSymbols::class_initializer_name()) { legal = true; } } else if (_major_version < JAVA_1_5_VERSION) { const char* p; p = skip_over_field_name(bytes, false, length); legal = (p != NULL) && ((p - bytes) == (int)length); } else { // 4881221: relax the constraints based on JSR202 spec legal = verify_unqualified_name(bytes, length, LegalMethod); } } if (!legal) { ResourceMark rm(THREAD); assert(_class_name != NULL, "invariant"); Exceptions::fthrow( THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(), "Illegal method name \"%.*s\" in class %s", length, bytes, _class_name->as_C_string() ); return; } } // Checks if signature is a legal field signature. void ClassFileParser::verify_legal_field_signature(const Symbol* name, const Symbol* signature, TRAPS) const { if (!_need_verify) { return; } const char* const bytes = (const char* const)signature->bytes(); const unsigned int length = signature->utf8_length(); const char* const p = skip_over_field_signature(bytes, false, length, CHECK); if (p == NULL || (p - bytes) != (int)length) { throwIllegalSignature("Field", name, signature, CHECK); } } // Checks if signature is a legal method signature. // Returns number of parameters int ClassFileParser::verify_legal_method_signature(const Symbol* name, const Symbol* signature, TRAPS) const { if (!_need_verify) { // make sure caller's args_size will be less than 0 even for non-static // method so it will be recomputed in compute_size_of_parameters(). return -2; } // Class initializers cannot have args for class format version >= 51. if (name == vmSymbols::class_initializer_name() && signature != vmSymbols::void_method_signature() && _major_version >= JAVA_7_VERSION) { throwIllegalSignature("Method", name, signature, CHECK_0); return 0; } unsigned int args_size = 0; const char* p = (const char*)signature->bytes(); unsigned int length = signature->utf8_length(); const char* nextp; // The first character must be a '(' if ((length > 0) && (*p++ == JVM_SIGNATURE_FUNC)) { length--; // Skip over legal field signatures nextp = skip_over_field_signature(p, false, length, CHECK_0); while ((length > 0) && (nextp != NULL)) { args_size++; if (p[0] == 'J' || p[0] == 'D') { args_size++; } length -= nextp - p; p = nextp; nextp = skip_over_field_signature(p, false, length, CHECK_0); } // The first non-signature thing better be a ')' if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) { length--; if (name->utf8_length() > 0 && name->char_at(0) == JVM_SIGNATURE_SPECIAL) { // All internal methods must return void if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) { return args_size; } } else { // Now we better just have a return value nextp = skip_over_field_signature(p, true, length, CHECK_0); if (nextp && ((int)length == (nextp - p))) { return args_size; } } } } // Report error throwIllegalSignature("Method", name, signature, CHECK_0); return 0; } int ClassFileParser::static_field_size() const { assert(_field_info != NULL, "invariant"); return _field_info->_static_field_size; } int ClassFileParser::total_oop_map_count() const { assert(_field_info != NULL, "invariant"); return _field_info->oop_map_blocks->_nonstatic_oop_map_count; } jint ClassFileParser::layout_size() const { assert(_field_info != NULL, "invariant"); return _field_info->_instance_size; } static void check_methods_for_intrinsics(const InstanceKlass* ik, const Array<Method*>* methods) { assert(ik != NULL, "invariant"); assert(methods != NULL, "invariant"); // Set up Method*::intrinsic_id as soon as we know the names of methods. // (We used to do this lazily, but now we query it in Rewriter, // which is eagerly done for every method, so we might as well do it now, // when everything is fresh in memory.) const vmSymbolID klass_id = Method::klass_id_for_intrinsics(ik); if (klass_id != vmSymbolID::NO_SID) { for (int j = 0; j < methods->length(); ++j) { Method* method = methods->at(j); method->init_intrinsic_id(klass_id); if (CheckIntrinsics) { // Check if an intrinsic is defined for method 'method', // but the method is not annotated with @IntrinsicCandidate. if (method->intrinsic_id() != vmIntrinsics::_none && !method->intrinsic_candidate()) { tty->print("Compiler intrinsic is defined for method [%s], " "but the method is not annotated with @IntrinsicCandidate.%s", method->name_and_sig_as_C_string(), NOT_DEBUG(" Method will not be inlined.") DEBUG_ONLY(" Exiting.") ); tty->cr(); DEBUG_ONLY(vm_exit(1)); } // Check is the method 'method' is annotated with @IntrinsicCandidate, // but there is no intrinsic available for it. if (method->intrinsic_candidate() && method->intrinsic_id() == vmIntrinsics::_none) { tty->print("Method [%s] is annotated with @IntrinsicCandidate, " "but no compiler intrinsic is defined for the method.%s", method->name_and_sig_as_C_string(), NOT_DEBUG("") DEBUG_ONLY(" Exiting.") ); tty->cr(); DEBUG_ONLY(vm_exit(1)); } } } // end for #ifdef ASSERT if (CheckIntrinsics) { // Check for orphan methods in the current class. A method m // of a class C is orphan if an intrinsic is defined for method m, // but class C does not declare m. // The check is potentially expensive, therefore it is available // only in debug builds. for (auto id : EnumRange<vmIntrinsicID>{}) { if (vmIntrinsics::_compiledLambdaForm == id) { // The _compiledLamdbdaForm intrinsic is a special marker for bytecode // generated for the JVM from a LambdaForm and therefore no method // is defined for it. continue; } if (vmIntrinsics::_blackhole == id) { // The _blackhole intrinsic is a special marker. No explicit method // is defined for it. continue; } if (vmIntrinsics::class_for(id) == klass_id) { // Check if the current class contains a method with the same // name, flags, signature. bool match = false; for (int j = 0; j < methods->length(); ++j) { const Method* method = methods->at(j); if (method->intrinsic_id() == id) { match = true; break; } } if (!match) { char buf[1000]; tty->print("Compiler intrinsic is defined for method [%s], " "but the method is not available in class [%s].%s", vmIntrinsics::short_name_as_C_string(id, buf, sizeof(buf)), ik->name()->as_C_string(), NOT_DEBUG("") DEBUG_ONLY(" Exiting.") ); tty->cr(); DEBUG_ONLY(vm_exit(1)); } } } // end for } // CheckIntrinsics #endif // ASSERT } } InstanceKlass* ClassFileParser::create_instance_klass(bool changed_by_loadhook, const ClassInstanceInfo& cl_inst_info, TRAPS) { if (_klass != NULL) { return _klass; } InstanceKlass* const ik = InstanceKlass::allocate_instance_klass(*this, CHECK_NULL); if (is_hidden()) { mangle_hidden_class_name(ik); } fill_instance_klass(ik, changed_by_loadhook, cl_inst_info, CHECK_NULL); assert(_klass == ik, "invariant"); return ik; } void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loadhook, const ClassInstanceInfo& cl_inst_info, TRAPS) { assert(ik != NULL, "invariant"); // Set name and CLD before adding to CLD ik->set_class_loader_data(_loader_data); ik->set_name(_class_name); // Add all classes to our internal class loader list here, // including classes in the bootstrap (NULL) class loader. const bool publicize = !is_internal(); _loader_data->add_class(ik, publicize); set_klass_to_deallocate(ik); assert(_field_info != NULL, "invariant"); assert(ik->static_field_size() == _field_info->_static_field_size, "sanity"); assert(ik->nonstatic_oop_map_count() == _field_info->oop_map_blocks->_nonstatic_oop_map_count, "sanity"); assert(ik->is_instance_klass(), "sanity"); assert(ik->size_helper() == _field_info->_instance_size, "sanity"); // Fill in information already parsed ik->set_should_verify_class(_need_verify); // Not yet: supers are done below to support the new subtype-checking fields ik->set_nonstatic_field_size(_field_info->_nonstatic_field_size); ik->set_has_nonstatic_fields(_field_info->_has_nonstatic_fields); assert(_fac != NULL, "invariant"); ik->set_static_oop_field_count(_fac->count[STATIC_OOP]); // this transfers ownership of a lot of arrays from // the parser onto the InstanceKlass* apply_parsed_class_metadata(ik, _java_fields_count); // can only set dynamic nest-host after static nest information is set if (cl_inst_info.dynamic_nest_host() != NULL) { ik->set_nest_host(cl_inst_info.dynamic_nest_host()); } // note that is not safe to use the fields in the parser from this point on assert(NULL == _cp, "invariant"); assert(NULL == _fields, "invariant"); assert(NULL == _methods, "invariant"); assert(NULL == _inner_classes, "invariant"); assert(NULL == _nest_members, "invariant"); assert(NULL == _combined_annotations, "invariant"); assert(NULL == _record_components, "invariant"); assert(NULL == _permitted_subclasses, "invariant"); if (_has_final_method) { ik->set_has_final_method(); } ik->copy_method_ordering(_method_ordering, CHECK); // The InstanceKlass::_methods_jmethod_ids cache // is managed on the assumption that the initial cache // size is equal to the number of methods in the class. If // that changes, then InstanceKlass::idnum_can_increment() // has to be changed accordingly. ik->set_initial_method_idnum(ik->methods()->length()); ik->set_this_class_index(_this_class_index); if (_is_hidden) { // _this_class_index is a CONSTANT_Class entry that refers to this // hidden class itself. If this class needs to refer to its own methods // or fields, it would use a CONSTANT_MethodRef, etc, which would reference // _this_class_index. However, because this class is hidden (it's // not stored in SystemDictionary), _this_class_index cannot be resolved // with ConstantPool::klass_at_impl, which does a SystemDictionary lookup. // Therefore, we must eagerly resolve _this_class_index now. ik->constants()->klass_at_put(_this_class_index, ik); } ik->set_minor_version(_minor_version); ik->set_major_version(_major_version); ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods); ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods); if (_is_hidden) { ik->set_is_hidden(); } // Set PackageEntry for this_klass oop cl = ik->class_loader(); Handle clh = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(cl)); ClassLoaderData* cld = ClassLoaderData::class_loader_data_or_null(clh()); ik->set_package(cld, NULL, CHECK); const Array<Method*>* const methods = ik->methods(); assert(methods != NULL, "invariant"); const int methods_len = methods->length(); check_methods_for_intrinsics(ik, methods); // Fill in field values obtained by parse_classfile_attributes if (_parsed_annotations->has_any_annotations()) { _parsed_annotations->apply_to(ik); } apply_parsed_class_attributes(ik); // Miranda methods if ((_num_miranda_methods > 0) || // if this class introduced new miranda methods or (_super_klass != NULL && _super_klass->has_miranda_methods()) // super class exists and this class inherited miranda methods ) { ik->set_has_miranda_methods(); // then set a flag } // Fill in information needed to compute superclasses. ik->initialize_supers(const_cast<InstanceKlass*>(_super_klass), _transitive_interfaces, CHECK); ik->set_transitive_interfaces(_transitive_interfaces); ik->set_local_interfaces(_local_interfaces); _transitive_interfaces = NULL; _local_interfaces = NULL; // Initialize itable offset tables klassItable::setup_itable_offset_table(ik); // Compute transitive closure of interfaces this class implements // Do final class setup OopMapBlocksBuilder* oop_map_blocks = _field_info->oop_map_blocks; if (oop_map_blocks->_nonstatic_oop_map_count > 0) { oop_map_blocks->copy(ik->start_of_nonstatic_oop_maps()); } if (_has_contended_fields || _parsed_annotations->is_contended() || ( _super_klass != NULL && _super_klass->has_contended_annotations())) { ik->set_has_contended_annotations(true); } // Fill in has_finalizer, has_vanilla_constructor, and layout_helper set_precomputed_flags(ik); // check if this class can access its super class check_super_class_access(ik, CHECK); // check if this class can access its superinterfaces check_super_interface_access(ik, CHECK); // check if this class overrides any final method check_final_method_override(ik, CHECK); // reject static interface methods prior to Java 8 if (ik->is_interface() && _major_version < JAVA_8_VERSION) { check_illegal_static_method(ik, CHECK); } // Obtain this_klass' module entry ModuleEntry* module_entry = ik->module(); assert(module_entry != NULL, "module_entry should always be set"); // Obtain java.lang.Module Handle module_handle(THREAD, module_entry->module()); // Allocate mirror and initialize static fields // The create_mirror() call will also call compute_modifiers() java_lang_Class::create_mirror(ik, Handle(THREAD, _loader_data->class_loader()), module_handle, _protection_domain, cl_inst_info.class_data(), CHECK); assert(_all_mirandas != NULL, "invariant"); // Generate any default methods - default methods are public interface methods // that have a default implementation. This is new with Java 8. if (_has_nonstatic_concrete_methods) { DefaultMethods::generate_default_methods(ik, _all_mirandas, CHECK); } // Add read edges to the unnamed modules of the bootstrap and app class loaders. if (changed_by_loadhook && !module_handle.is_null() && module_entry->is_named() && !module_entry->has_default_read_edges()) { if (!module_entry->set_has_default_read_edges()) { // We won a potential race JvmtiExport::add_default_read_edges(module_handle, THREAD); } } ClassLoadingService::notify_class_loaded(ik, false /* not shared class */); if (!is_internal()) { ik->print_class_load_logging(_loader_data, module_entry, _stream); if (ik->minor_version() == JAVA_PREVIEW_MINOR_VERSION && ik->major_version() == JVM_CLASSFILE_MAJOR_VERSION && log_is_enabled(Info, class, preview)) { ResourceMark rm; log_info(class, preview)("Loading class %s that depends on preview features (class file version %d.65535)", ik->external_name(), JVM_CLASSFILE_MAJOR_VERSION); } if (log_is_enabled(Debug, class, resolve)) { ResourceMark rm; // print out the superclass. const char * from = ik->external_name(); if (ik->java_super() != NULL) { log_debug(class, resolve)("%s %s (super)", from, ik->java_super()->external_name()); } // print out each of the interface classes referred to by this class. const Array<InstanceKlass*>* const local_interfaces = ik->local_interfaces(); if (local_interfaces != NULL) { const int length = local_interfaces->length(); for (int i = 0; i < length; i++) { const InstanceKlass* const k = local_interfaces->at(i); const char * to = k->external_name(); log_debug(class, resolve)("%s %s (interface)", from, to); } } } } JFR_ONLY(INIT_ID(ik);) // If we reach here, all is well. // Now remove the InstanceKlass* from the _klass_to_deallocate field // in order for it to not be destroyed in the ClassFileParser destructor. set_klass_to_deallocate(NULL); // it's official set_klass(ik); debug_only(ik->verify();) } void ClassFileParser::update_class_name(Symbol* new_class_name) { // Decrement the refcount in the old name, since we're clobbering it. _class_name->decrement_refcount(); _class_name = new_class_name; // Increment the refcount of the new name. // Now the ClassFileParser owns this name and will decrement in // the destructor. _class_name->increment_refcount(); } static bool relax_format_check_for(ClassLoaderData* loader_data) { bool trusted = loader_data->is_boot_class_loader_data() || loader_data->is_platform_class_loader_data(); bool need_verify = // verifyAll (BytecodeVerificationLocal && BytecodeVerificationRemote) || // verifyRemote (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted); return !need_verify; } ClassFileParser::ClassFileParser(ClassFileStream* stream, Symbol* name, ClassLoaderData* loader_data, const ClassLoadInfo* cl_info, Publicity pub_level, TRAPS) : _stream(stream), _class_name(NULL), _loader_data(loader_data), _is_hidden(cl_info->is_hidden()), _can_access_vm_annotations(cl_info->can_access_vm_annotations()), _orig_cp_size(0), _super_klass(), _cp(NULL), _fields(NULL), _methods(NULL), _inner_classes(NULL), _nest_members(NULL), _nest_host(0), _permitted_subclasses(NULL), _record_components(NULL), _local_interfaces(NULL), _transitive_interfaces(NULL), _combined_annotations(NULL), _class_annotations(NULL), _class_type_annotations(NULL), _fields_annotations(NULL), _fields_type_annotations(NULL), _klass(NULL), _klass_to_deallocate(NULL), _parsed_annotations(NULL), _fac(NULL), _field_info(NULL), _method_ordering(NULL), _all_mirandas(NULL), _vtable_size(0), _itable_size(0), _num_miranda_methods(0), _rt(REF_NONE), _protection_domain(cl_info->protection_domain()), _access_flags(), _pub_level(pub_level), _bad_constant_seen(0), _synthetic_flag(false), _sde_length(false), _sde_buffer(NULL), _sourcefile_index(0), _generic_signature_index(0), _major_version(0), _minor_version(0), _this_class_index(0), _super_class_index(0), _itfs_len(0), _java_fields_count(0), _need_verify(false), _relax_verify(false), _has_nonstatic_concrete_methods(false), _declares_nonstatic_concrete_methods(false), _has_final_method(false), _has_contended_fields(false), _has_finalizer(false), _has_empty_finalizer(false), _has_vanilla_constructor(false), _max_bootstrap_specifier_index(-1) { _class_name = name != NULL ? name : vmSymbols::unknown_class_name(); _class_name->increment_refcount(); assert(_loader_data != NULL, "invariant"); assert(stream != NULL, "invariant"); assert(_stream != NULL, "invariant"); assert(_stream->buffer() == _stream->current(), "invariant"); assert(_class_name != NULL, "invariant"); assert(0 == _access_flags.as_int(), "invariant"); // Figure out whether we can skip format checking (matching classic VM behavior) if (DumpSharedSpaces) { // verify == true means it's a 'remote' class (i.e., non-boot class) // Verification decision is based on BytecodeVerificationRemote flag // for those classes. _need_verify = (stream->need_verify()) ? BytecodeVerificationRemote : BytecodeVerificationLocal; } else { _need_verify = Verifier::should_verify_for(_loader_data->class_loader(), stream->need_verify()); } // synch back verification state to stream stream->set_verify(_need_verify); // Check if verification needs to be relaxed for this class file // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376) _relax_verify = relax_format_check_for(_loader_data); parse_stream(stream, CHECK); post_process_parsed_stream(stream, _cp, CHECK); } void ClassFileParser::clear_class_metadata() { // metadata created before the instance klass is created. Must be // deallocated if classfile parsing returns an error. _cp = NULL; _fields = NULL; _methods = NULL; _inner_classes = NULL; _nest_members = NULL; _permitted_subclasses = NULL; _combined_annotations = NULL; _class_annotations = _class_type_annotations = NULL; _fields_annotations = _fields_type_annotations = NULL; _record_components = NULL; } // Destructor to clean up ClassFileParser::~ClassFileParser() { _class_name->decrement_refcount(); if (_cp != NULL) { MetadataFactory::free_metadata(_loader_data, _cp); } if (_fields != NULL) { MetadataFactory::free_array<u2>(_loader_data, _fields); } if (_methods != NULL) { // Free methods InstanceKlass::deallocate_methods(_loader_data, _methods); } // beware of the Universe::empty_blah_array!! if (_inner_classes != NULL && _inner_classes != Universe::the_empty_short_array()) { MetadataFactory::free_array<u2>(_loader_data, _inner_classes); } if (_nest_members != NULL && _nest_members != Universe::the_empty_short_array()) { MetadataFactory::free_array<u2>(_loader_data, _nest_members); } if (_record_components != NULL) { InstanceKlass::deallocate_record_components(_loader_data, _record_components); } if (_permitted_subclasses != NULL && _permitted_subclasses != Universe::the_empty_short_array()) { MetadataFactory::free_array<u2>(_loader_data, _permitted_subclasses); } // Free interfaces InstanceKlass::deallocate_interfaces(_loader_data, _super_klass, _local_interfaces, _transitive_interfaces); if (_combined_annotations != NULL) { // After all annotations arrays have been created, they are installed into the // Annotations object that will be assigned to the InstanceKlass being created. // Deallocate the Annotations object and the installed annotations arrays. _combined_annotations->deallocate_contents(_loader_data); // If the _combined_annotations pointer is non-NULL, // then the other annotations fields should have been cleared. assert(_class_annotations == NULL, "Should have been cleared"); assert(_class_type_annotations == NULL, "Should have been cleared"); assert(_fields_annotations == NULL, "Should have been cleared"); assert(_fields_type_annotations == NULL, "Should have been cleared"); } else { // If the annotations arrays were not installed into the Annotations object, // then they have to be deallocated explicitly. MetadataFactory::free_array<u1>(_loader_data, _class_annotations); MetadataFactory::free_array<u1>(_loader_data, _class_type_annotations); Annotations::free_contents(_loader_data, _fields_annotations); Annotations::free_contents(_loader_data, _fields_type_annotations); } clear_class_metadata(); _transitive_interfaces = NULL; _local_interfaces = NULL; // deallocate the klass if already created. Don't directly deallocate, but add // to the deallocate list so that the klass is removed from the CLD::_klasses list // at a safepoint. if (_klass_to_deallocate != NULL) { _loader_data->add_to_deallocate_list(_klass_to_deallocate); } } void ClassFileParser::parse_stream(const ClassFileStream* const stream, TRAPS) { assert(stream != NULL, "invariant"); assert(_class_name != NULL, "invariant"); // BEGIN STREAM PARSING stream->guarantee_more(8, CHECK); // magic, major, minor // Magic value const u4 magic = stream->get_u4_fast(); guarantee_property(magic == JAVA_CLASSFILE_MAGIC, "Incompatible magic value %u in class file %s", magic, CHECK); // Version numbers _minor_version = stream->get_u2_fast(); _major_version = stream->get_u2_fast(); // Check version numbers - we check this even with verifier off verify_class_version(_major_version, _minor_version, _class_name, CHECK); stream->guarantee_more(3, CHECK); // length, first cp tag u2 cp_size = stream->get_u2_fast(); guarantee_property( cp_size >= 1, "Illegal constant pool size %u in class file %s", cp_size, CHECK); _orig_cp_size = cp_size; if (is_hidden()) { // Add a slot for hidden class name. cp_size++; } _cp = ConstantPool::allocate(_loader_data, cp_size, CHECK); ConstantPool* const cp = _cp; parse_constant_pool(stream, cp, _orig_cp_size, CHECK); assert(cp_size == (const u2)cp->length(), "invariant"); // ACCESS FLAGS stream->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len // Access flags jint flags; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { flags = stream->get_u2_fast() & (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_MODULE); } else { flags = stream->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; } if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; } verify_legal_class_modifiers(flags, CHECK); short bad_constant = class_bad_constant_seen(); if (bad_constant != 0) { // Do not throw CFE until after the access_flags are checked because if // ACC_MODULE is set in the access flags, then NCDFE must be thrown, not CFE. classfile_parse_error("Unknown constant tag %u in class file %s", bad_constant, THREAD); return; } _access_flags.set_flags(flags); // This class and superclass _this_class_index = stream->get_u2_fast(); check_property( valid_cp_range(_this_class_index, cp_size) && cp->tag_at(_this_class_index).is_unresolved_klass(), "Invalid this class index %u in constant pool in class file %s", _this_class_index, CHECK); Symbol* const class_name_in_cp = cp->klass_name_at(_this_class_index); assert(class_name_in_cp != NULL, "class_name can't be null"); // Don't need to check whether this class name is legal or not. // It has been checked when constant pool is parsed. // However, make sure it is not an array type. if (_need_verify) { guarantee_property(class_name_in_cp->char_at(0) != JVM_SIGNATURE_ARRAY, "Bad class name in class file %s", CHECK); } #ifdef ASSERT // Basic sanity checks if (_is_hidden) { assert(_class_name != vmSymbols::unknown_class_name(), "hidden classes should have a special name"); } #endif // Update the _class_name as needed depending on whether this is a named, un-named, or hidden class. if (_is_hidden) { assert(_class_name != NULL, "Unexpected null _class_name"); #ifdef ASSERT if (_need_verify) { verify_legal_class_name(_class_name, CHECK); } #endif } else { // Check if name in class file matches given name if (_class_name != class_name_in_cp) { if (_class_name != vmSymbols::unknown_class_name()) { ResourceMark rm(THREAD); Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_NoClassDefFoundError(), "%s (wrong name: %s)", class_name_in_cp->as_C_string(), _class_name->as_C_string() ); return; } else { // The class name was not known by the caller so we set it from // the value in the CP. update_class_name(class_name_in_cp); } // else nothing to do: the expected class name matches what is in the CP } } // Verification prevents us from creating names with dots in them, this // asserts that that's the case. assert(is_internal_format(_class_name), "external class name format used internally"); if (!is_internal()) { LogTarget(Debug, class, preorder) lt; if (lt.is_enabled()){ ResourceMark rm(THREAD); LogStream ls(lt); ls.print("%s", _class_name->as_klass_external_name()); if (stream->source() != NULL) { ls.print(" source: %s", stream->source()); } ls.cr(); } } // SUPERKLASS _super_class_index = stream->get_u2_fast(); _super_klass = parse_super_class(cp, _super_class_index, _need_verify, CHECK); // Interfaces _itfs_len = stream->get_u2_fast(); parse_interfaces(stream, _itfs_len, cp, &_has_nonstatic_concrete_methods, CHECK); assert(_local_interfaces != NULL, "invariant"); // Fields (offsets are filled in later) _fac = new FieldAllocationCount(); parse_fields(stream, _access_flags.is_interface(), _fac, cp, cp_size, &_java_fields_count, CHECK); assert(_fields != NULL, "invariant"); // Methods AccessFlags promoted_flags; parse_methods(stream, _access_flags.is_interface(), &promoted_flags, &_has_final_method, &_declares_nonstatic_concrete_methods, CHECK); assert(_methods != NULL, "invariant"); // promote flags from parse_methods() to the klass' flags _access_flags.add_promoted_flags(promoted_flags.as_int()); if (_declares_nonstatic_concrete_methods) { _has_nonstatic_concrete_methods = true; } // Additional attributes/annotations _parsed_annotations = new ClassAnnotationCollector(); parse_classfile_attributes(stream, cp, _parsed_annotations, CHECK); assert(_inner_classes != NULL, "invariant"); // Finalize the Annotations metadata object, // now that all annotation arrays have been created. create_combined_annotations(CHECK); // Make sure this is the end of class file stream guarantee_property(stream->at_eos(), "Extra bytes at the end of class file %s", CHECK); // all bytes in stream read and parsed } void ClassFileParser::mangle_hidden_class_name(InstanceKlass* const ik) { ResourceMark rm; // Construct hidden name from _class_name, "+", and &ik. Note that we can't // use a '/' because that confuses finding the class's package. Also, can't // use an illegal char such as ';' because that causes serialization issues // and issues with hidden classes that create their own hidden classes. char addr_buf[20]; if (DumpSharedSpaces) { // We want stable names for the archived hidden classes (only for static // archive for now). Spaces under default_SharedBaseAddress() will be // occupied by the archive at run time, so we know that no dynamically // loaded InstanceKlass will be placed under there. static volatile size_t counter = 0; Atomic::cmpxchg(&counter, (size_t)0, Arguments::default_SharedBaseAddress()); // initialize it size_t new_id = Atomic::add(&counter, (size_t)1); jio_snprintf(addr_buf, 20, SIZE_FORMAT_HEX, new_id); } else { jio_snprintf(addr_buf, 20, INTPTR_FORMAT, p2i(ik)); } size_t new_name_len = _class_name->utf8_length() + 2 + strlen(addr_buf); char* new_name = NEW_RESOURCE_ARRAY(char, new_name_len); jio_snprintf(new_name, new_name_len, "%s+%s", _class_name->as_C_string(), addr_buf); update_class_name(SymbolTable::new_symbol(new_name)); // Add a Utf8 entry containing the hidden name. assert(_class_name != NULL, "Unexpected null _class_name"); int hidden_index = _orig_cp_size; // this is an extra slot we added _cp->symbol_at_put(hidden_index, _class_name); // Update this_class_index's slot in the constant pool with the new Utf8 entry. // We have to update the resolved_klass_index and the name_index together // so extract the existing resolved_klass_index first. CPKlassSlot cp_klass_slot = _cp->klass_slot_at(_this_class_index); int resolved_klass_index = cp_klass_slot.resolved_klass_index(); _cp->unresolved_klass_at_put(_this_class_index, hidden_index, resolved_klass_index); assert(_cp->klass_slot_at(_this_class_index).name_index() == _orig_cp_size, "Bad name_index"); } void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const stream, ConstantPool* cp, TRAPS) { assert(stream != NULL, "invariant"); assert(stream->at_eos(), "invariant"); assert(cp != NULL, "invariant"); assert(_loader_data != NULL, "invariant"); if (_class_name == vmSymbols::java_lang_Object()) { check_property(_local_interfaces == Universe::the_empty_instance_klass_array(), "java.lang.Object cannot implement an interface in class file %s", CHECK); } // We check super class after class file is parsed and format is checked if (_super_class_index > 0 && NULL == _super_klass) { Symbol* const super_class_name = cp->klass_name_at(_super_class_index); if (_access_flags.is_interface()) { // Before attempting to resolve the superclass, check for class format // errors not checked yet. guarantee_property(super_class_name == vmSymbols::java_lang_Object(), "Interfaces must have java.lang.Object as superclass in class file %s", CHECK); } Handle loader(THREAD, _loader_data->class_loader()); _super_klass = (const InstanceKlass*) SystemDictionary::resolve_super_or_fail(_class_name, super_class_name, loader, _protection_domain, true, CHECK); } if (_super_klass != NULL) { if (_super_klass->has_nonstatic_concrete_methods()) { _has_nonstatic_concrete_methods = true; } if (_super_klass->is_interface()) { classfile_icce_error("class %s has interface %s as super class", _super_klass, THREAD); return; } } // Compute the transitive list of all unique interfaces implemented by this class _transitive_interfaces = compute_transitive_interfaces(_super_klass, _local_interfaces, _loader_data, CHECK); assert(_transitive_interfaces != NULL, "invariant"); // sort methods _method_ordering = sort_methods(_methods); _all_mirandas = new GrowableArray<Method*>(20); Handle loader(THREAD, _loader_data->class_loader()); klassVtable::compute_vtable_size_and_num_mirandas(&_vtable_size, &_num_miranda_methods, _all_mirandas, _super_klass, _methods, _access_flags, _major_version, loader, _class_name, _local_interfaces); // Size of Java itable (in words) _itable_size = _access_flags.is_interface() ? 0 : klassItable::compute_itable_size(_transitive_interfaces); assert(_fac != NULL, "invariant"); assert(_parsed_annotations != NULL, "invariant"); _field_info = new FieldLayoutInfo(); FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields, _parsed_annotations->is_contended(), _field_info); lb.build_layout(); // Compute reference typ _rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type(); } void ClassFileParser::set_klass(InstanceKlass* klass) { #ifdef ASSERT if (klass != NULL) { assert(NULL == _klass, "leaking?"); } #endif _klass = klass; } void ClassFileParser::set_klass_to_deallocate(InstanceKlass* klass) { #ifdef ASSERT if (klass != NULL) { assert(NULL == _klass_to_deallocate, "leaking?"); } #endif _klass_to_deallocate = klass; } // Caller responsible for ResourceMark // clone stream with rewound position const ClassFileStream* ClassFileParser::clone_stream() const { assert(_stream != NULL, "invariant"); return _stream->clone(); } // ---------------------------------------------------------------------------- // debugging #ifdef ASSERT // return true if class_name contains no '.' (internal format is '/') bool ClassFileParser::is_internal_format(Symbol* class_name) { if (class_name != NULL) { ResourceMark rm; char* name = class_name->as_C_string(); return strchr(name, JVM_SIGNATURE_DOT) == NULL; } else { return true; } } #endif
40.85234
143
0.633804
[ "object", "3d" ]
1105ff5a8f765df381bff9682baa20e96df12ec5
947
cpp
C++
Cpp/inheritance/int.cpp
Catamondium/scratch
c67dc76e11a66fc5cf0fe142f161791223ca6861
[ "Unlicense" ]
null
null
null
Cpp/inheritance/int.cpp
Catamondium/scratch
c67dc76e11a66fc5cf0fe142f161791223ca6861
[ "Unlicense" ]
null
null
null
Cpp/inheritance/int.cpp
Catamondium/scratch
c67dc76e11a66fc5cf0fe142f161791223ca6861
[ "Unlicense" ]
null
null
null
#include <iostream> #include <vector> // Abstract class // Uninstantiable // May provide impl, interface only in this case struct Nameable { // 'virtual' gives derived types priority virtual std::string getName() = 0; virtual void setName(std::string) = 0; }; // Must implement Nameable to be instantiable struct S : Nameable { std::string name; S(std::string name) : name(name){}; std::string getName() { return "S: " + name; }; void setName(std::string other) { name = other; }; }; struct A : Nameable { std::string name; A(std::string name) : name(name){}; std::string getName() { return "A: " + name; }; void setName(std::string other) { name = other; }; }; int main() { S s{"Timmy"}; A a{"Ben"}; // Nameable can't be allocated, but can be a casted pointer std::vector<Nameable *> nameables = {&s, &a}; for (auto &n : nameables) { // getName() is available by inheritance std::cout << n->getName() << std::endl; } }
21.522727
60
0.645195
[ "vector" ]
1106ef48961d6a22a8281579145c61b4d169f511
912
cc
C++
lecture_code/essential_operations/Vector_move_assignment.cc
ahurta92/ams562-notes
e66baa1e50654e125902651f388d45cb32c81f00
[ "MIT" ]
1
2021-09-01T19:09:54.000Z
2021-09-01T19:09:54.000Z
lecture_code/essential_operations/Vector_move_assignment.cc
ahurta92/ams562-notes
e66baa1e50654e125902651f388d45cb32c81f00
[ "MIT" ]
null
null
null
lecture_code/essential_operations/Vector_move_assignment.cc
ahurta92/ams562-notes
e66baa1e50654e125902651f388d45cb32c81f00
[ "MIT" ]
1
2021-11-30T19:26:02.000Z
2021-11-30T19:26:02.000Z
#include "Vector_assignment.h" Vector::Vector(const Vector &a) // copy constructor : elem{new double[a.sz]} // allocate space for elements , sz{a.sz} { for (int i = 0; i != sz; ++i) elem[i] = a.elem[i]; // copy each element } Vector &Vector::operator=(const Vector &a) { std::cout << "called copy assignment" << std::endl; double *p = new double[a.sz]; for (int i = 0; i != a.sz; ++i) { p[i] = a.elem[i]; } delete[] elem; // delete resource this is pointing to elem = p; sz = a.sz; return *this; // predefined in a member function // points to the object for which the member function is called } Vector::Vector(Vector &&a) : elem{a.elem}, sz{a.sz} { // grab the elements std::cout << "called move constructor" << std::endl; a.elem = nullptr; // no a has no elements a.sz = 0; } Vector &Vector::operator=(Vector &&a) {} // move assignment
27.636364
79
0.598684
[ "object", "vector" ]
110f06acabd46ec4d75e46f39a8bf7268a36d5db
3,825
cpp
C++
src/bonjour/BonjourServiceRegister.cpp
JoelTroch/mumble
cdaff31b00a713b2a259d9c6a4c81bf7496a592e
[ "BSD-3-Clause" ]
1
2015-02-27T15:40:11.000Z
2015-02-27T15:40:11.000Z
src/bonjour/BonjourServiceRegister.cpp
JoelTroch/mumble
cdaff31b00a713b2a259d9c6a4c81bf7496a592e
[ "BSD-3-Clause" ]
null
null
null
src/bonjour/BonjourServiceRegister.cpp
JoelTroch/mumble
cdaff31b00a713b2a259d9c6a4c81bf7496a592e
[ "BSD-3-Clause" ]
null
null
null
/* Copyright (c) 2007, Trenton Schulz Copyright (c) 2009-2011, Stefan Hacker Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "murmur_pch.h" #include "BonjourServiceRegister.h" BonjourServiceRegister::BonjourServiceRegister(QObject *p) : QObject(p), dnssref(0), bonjourSocket(0) { } BonjourServiceRegister::~BonjourServiceRegister() { if (dnssref) { DNSServiceRefDeallocate(dnssref); dnssref = 0; } } void BonjourServiceRegister::registerService(const BonjourRecord &record, quint16 servicePort) { if (dnssref) { qWarning("Warning: Already registered a service for this object, aborting new register"); return; } quint16 bigEndianPort = servicePort; #if Q_BYTE_ORDER == Q_LITTLE_ENDIAN { bigEndianPort = static_cast<quint16>(((servicePort & 0x00ff) << 8) | ((servicePort & 0xff00) >> 8)); } #endif DNSServiceErrorType err = DNSServiceRegister(&dnssref, 0, 0, record.serviceName.isEmpty() ? 0 : record.serviceName.toUtf8().constData(), record.registeredType.toUtf8().constData(), record.replyDomain.isEmpty() ? 0 : record.replyDomain.toUtf8().constData(), 0, bigEndianPort, 0, 0, bonjourRegisterService, this); if (err != kDNSServiceErr_NoError) { emit error(err); } else { int sockfd = DNSServiceRefSockFD(dnssref); if (sockfd == -1) { emit error(kDNSServiceErr_Invalid); } else { bonjourSocket = new QSocketNotifier(sockfd, QSocketNotifier::Read, this); connect(bonjourSocket, SIGNAL(activated(int)), this, SLOT(bonjourSocketReadyRead())); } } } void BonjourServiceRegister::bonjourSocketReadyRead() { DNSServiceErrorType err = DNSServiceProcessResult(dnssref); if (err != kDNSServiceErr_NoError) emit error(err); } void DNSSD_API BonjourServiceRegister::bonjourRegisterService(DNSServiceRef, DNSServiceFlags, DNSServiceErrorType errorCode, const char *name, const char *regtype, const char *domain, void *data) { BonjourServiceRegister *serviceRegister = static_cast<BonjourServiceRegister *>(data); if (errorCode != kDNSServiceErr_NoError) { emit serviceRegister->error(errorCode); } else { serviceRegister->finalRecord = BonjourRecord(QString::fromUtf8(name), QString::fromUtf8(regtype), QString::fromUtf8(domain)); emit serviceRegister->serviceRegistered(serviceRegister->finalRecord); } }
38.636364
103
0.722092
[ "object" ]
11104bfb18d0c0e4be5a4b9a414235c38c00e3d9
1,522
cpp
C++
src/ShortestPath.cpp
mbychawski/traffic-simulator
ef576cb9b2083e9e1cb8671356032d90dcfa42aa
[ "BSD-3-Clause" ]
null
null
null
src/ShortestPath.cpp
mbychawski/traffic-simulator
ef576cb9b2083e9e1cb8671356032d90dcfa42aa
[ "BSD-3-Clause" ]
null
null
null
src/ShortestPath.cpp
mbychawski/traffic-simulator
ef576cb9b2083e9e1cb8671356032d90dcfa42aa
[ "BSD-3-Clause" ]
null
null
null
#include "ShortestPath.h" #include "Exceptions.h" #include <boost/graph/dijkstra_shortest_paths.hpp> #include <utility> using std::pair; ShortestPath::ShortestPath(Graph* g_) : g(g_) {} ShortestPath::~ShortestPath() { //delete g; } Vertex ShortestPath::findVertex(int id_) { VertexIterator it, end; for(boost::tie(it, end) = boost::vertices(*g); it != end; ++it) { if(getId(it) == id_) return *it; } throw MapException("Wrong vertex id!"); } int ShortestPath::getId(VertexIterator it) { return ((*g)[*(it)]).id; } int ShortestPath::getId(Vertex v) { return ((*g)[v]).id; } vector<int> ShortestPath::getPath(int from_, int to_) { Vertex from = findVertex(from_); Vertex to = findVertex(to_); vector<Vertex> predecessors(boost::num_vertices(*g)); vector<Weight> distances(boost::num_vertices(*g)); IndexMap indexMap; PredecessorMap predecessorMap(&predecessors[0], indexMap); DistanceMap distanceMap(&distances[0], indexMap); boost::dijkstra_shortest_paths(*g, from, boost::distance_map(distanceMap).predecessor_map(predecessorMap)); vector<int> path; Vertex v = to; path.push_back(getId(to)); for(Vertex u = predecessorMap[v]; u != v; v = u, u = predecessorMap[v]) { if(getId(u) != from_) { path.push_back(getId(u)); } } std::reverse(path.begin(), path.end()); path.erase(path.begin()); // usuwamy pierwszy element bo ze spawnu pojedzie prosto do niego return path; }
25.366667
111
0.647832
[ "vector" ]
111f81f2f82ff692eb6b102ee5a015bbf143ec8d
13,251
cpp
C++
ovpn-cli/ovpn-dco-cli.cpp
kostyanf14/ovpn-dco-win
c1bd516fa148e7ccde730ff681c519111082a7d4
[ "MIT" ]
16
2021-04-24T00:40:12.000Z
2022-03-28T09:55:25.000Z
ovpn-cli/ovpn-dco-cli.cpp
kostyanf14/ovpn-dco-win
c1bd516fa148e7ccde730ff681c519111082a7d4
[ "MIT" ]
5
2021-08-10T10:38:26.000Z
2022-01-18T06:22:51.000Z
ovpn-cli/ovpn-dco-cli.cpp
kostyanf14/ovpn-dco-win
c1bd516fa148e7ccde730ff681c519111082a7d4
[ "MIT" ]
12
2021-04-15T06:24:33.000Z
2022-03-23T19:20:08.000Z
/* * ovpn-dco-win OpenVPN protocol accelerator for Windows * * Copyright (C) 2020-2021 OpenVPN Inc <sales@openvpn.net> * * Author: Lev Stipakov <lev@openvpn.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <Winsock2.h> #include <Ws2tcpip.h> #include <iostream> #include <vector> #include <cstdlib> #include <iphlpapi.h> #include <NdisGuid.h> #include <asio.hpp> #include "..\uapi\ovpn-dco.h" struct Transport { bool tcp; bool ipv6; std::string local_ip; int local_port; std::string remote_ip; int remote_port; }; struct Tun { std::string vpn_ip; std::string vpn_netmask; std::string vpn_gw; }; class OvpnCli { public: OvpnCli(const std::string dev_name, const Transport& transport, const Tun& tun, const std::string& key_file, int key_direction, asio::io_context& io_context) : io_context_(io_context) { HANDLE h = CreateFileA(dev_name.c_str(), GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_SYSTEM | FILE_FLAG_OVERLAPPED, NULL); if (h == INVALID_HANDLE_VALUE) { std::cerr << "CreateFileA(" << dev_name << ") failed with code " << GetLastError() << std::endl; throw std::exception(); } handle_ = std::make_unique<asio::windows::stream_handle>(io_context, h); new_peer(transport, [self = this, key_file, key_direction, tun]() { self->peer_added(key_file, key_direction, tun); }); } private: void peer_added(const std::string& key_file, int key_direction, const Tun& tun) { queue_read_(); timer_ = std::make_unique<asio::steady_timer>(io_context_, duration_); timer_->async_wait([this](const asio::error_code& error) { tick_(); }); setup_crypto(key_file, key_direction); setup_keepalive(); start_vpn(); setup_tun(tun); } void start_vpn() { DWORD bytes_returned = 0; if (!DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_START_VPN, NULL, 0, NULL, NULL, &bytes_returned, NULL)) { std::cerr << "DeviceIoControl(OVPN_IOCTL_START_VPN) failed with code " << GetLastError() << std::endl; throw std::exception(); } } void setup_keepalive() { OVPN_SET_PEER peer; peer.KeepaliveInterval = 10; peer.KeepaliveTimeout = 300; DWORD bytes_returned = 0; if (!DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_SET_PEER, &peer, sizeof(peer), NULL, NULL, &bytes_returned, NULL)) { std::cerr << "DeviceIoControl(OVPN_IOCTL_SET_PEER) failed with code " << GetLastError() << std::endl; throw std::exception(); } } // read key_file, get crypto keys/nonce and pass them to the driver void setup_crypto(const std::string& key_file, int key_direction) { if (key_file.length() == 0) { OVPN_CRYPTO_DATA crypto_data; ZeroMemory(&crypto_data, sizeof(crypto_data)); crypto_data.CipherAlg = OVPN_CIPHER_ALG::OVPN_CIPHER_ALG_NONE; DWORD bytes_returned = 0; if (!DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_NEW_KEY, &crypto_data, sizeof(crypto_data), NULL, NULL, &bytes_returned, NULL)) { std::cerr << "DeviceIoControl(OVPN_IOCTL_NEW_KEY) failed with code " << GetLastError() << std::endl; throw std::exception(); } return; } HANDLE h = CreateFileA(key_file.c_str(), GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL); if (h == INVALID_HANDLE_VALUE) { std::cerr << "CreateFileA(" << key_file.c_str() << ") failed with code " << GetLastError() << std::endl; throw std::exception(); } constexpr int max_key_size = 4096; std::unique_ptr<char> buf_b64(new char[max_key_size]); DWORD bytes_read; if (!ReadFile(h, buf_b64.get(), max_key_size, &bytes_read, NULL)) { std::cerr << "ReadFile() failed with code " << GetLastError() << std::endl; throw std::exception(); } // base64 decode crypto keys/nonce DWORD bytes_required = 0; if (!CryptStringToBinaryA(buf_b64.get(), bytes_read, CRYPT_STRING_BASE64, NULL, &bytes_required, NULL, NULL)) { if (GetLastError() != ERROR_MORE_DATA) { std::cerr << "ReadFile() failed with code " << GetLastError() << std::endl; throw std::exception(); } } std::unique_ptr<BYTE> buf(new BYTE[bytes_required]); if (!CryptStringToBinaryA(buf_b64.get(), bytes_read, CRYPT_STRING_BASE64, buf.get(), &bytes_required, NULL, NULL)) { std::cerr << "ReadFile() failed with code " << GetLastError() << std::endl; throw std::exception(); } OVPN_CRYPTO_DATA crypto_data; ZeroMemory(&crypto_data, sizeof(crypto_data)); constexpr int key_len = sizeof(crypto_data.Encrypt.Key); if (key_direction) { CopyMemory(crypto_data.Encrypt.Key, buf.get() + key_len, key_len); CopyMemory(crypto_data.Decrypt.Key, buf.get(), key_len); } else { CopyMemory(crypto_data.Encrypt.Key, buf.get(), key_len); CopyMemory(crypto_data.Decrypt.Key, buf.get() + key_len, key_len); } crypto_data.Encrypt.KeyLen = key_len; // hardcode 256bit key size crypto_data.Decrypt.KeyLen = key_len; // hardcode 256bit key size constexpr int nonce_tail_len = sizeof(crypto_data.Encrypt.NonceTail); // for test purposes decrypt and encrypt nonces are same CopyMemory(crypto_data.Encrypt.NonceTail, buf.get() + key_len * 2, nonce_tail_len); CopyMemory(crypto_data.Decrypt.NonceTail, buf.get() + key_len * 2, nonce_tail_len); crypto_data.CipherAlg = OVPN_CIPHER_ALG::OVPN_CIPHER_ALG_AES_GCM; DWORD bytes_returned = 0; if (!DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_NEW_KEY, &crypto_data, sizeof(crypto_data), NULL, NULL, &bytes_returned, NULL)) { std::cerr << "DeviceIoControl(OVPN_IOCTL_NEW_KEY) failed with code " << GetLastError() << std::endl; throw std::exception(); } } void setup_tun(const Tun& tun) { std::ostringstream ss; auto index = get_adapter_index(); if (index == 0) { std::cerr << "Cannot get ovpn-dco adapter index" << std::endl; throw std::exception(); } ss << "netsh interface ip set address " << index << " static " << tun.vpn_ip << " " << tun.vpn_netmask << " " << tun.vpn_ip; std::string cmd = ss.str(); std::cout << cmd; std::system(cmd.c_str()); ss.str(""); ss.clear(); // decrease MTU so that payload and openvpn header wouldn't exceed physical network adapter MTU ss << "netsh interface ipv4 set subinterface " << index << " mtu = 1428"; cmd = ss.str(); std::cout << cmd; std::system(cmd.c_str()); ss.str(""); ss.clear(); // decrease MTU so that payload and openvpn header wouldn't exceed physical network adapter MTU ss << "netsh interface ipv6 set subinterface " << index << " mtu = 1428"; cmd = ss.str(); std::cout << cmd; std::system(cmd.c_str()); } void tick_() { //std::ostringstream os; //os << "hello, world " << index_ ++; //send_(os.str()); OVPN_STATS stats; DWORD bytes_returned; if (!DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_GET_STATS, NULL, 0, &stats, sizeof(OVPN_STATS), &bytes_returned, NULL)) { std::cerr << "DeviceIoControl(OVPN_IOCTL_GET_STATS) failed with code " << GetLastError() << std::endl; throw std::exception(); } std::cout << "lostInCtrlPkts: " << stats.LostInControlPackets << "\nlostInDataPkts: " << stats.LostInDataPackets << "\nlostOutCtrlPkts: " << stats.LostOutControlPackets << "\nlostOutDataPkts: " << stats.LostOutDataPackets << "\nrcvdCtrlPkts: " << stats.ReceivedControlPackets << "\nrcvdDataPkts: " << stats.ReceivedDataPackets << "\nsentCtrlPkts: " << stats.SentControlPackets << "\nsentDataPkts: " << stats.SentDataPackets << "\ntransportBytesSent: " << stats.TransportBytesSent << "\ntransportBytesReceived: " << stats.TransportBytesReceived << "\ntunBytesSent: " << stats.TunBytesSent << "\ntunBytesReceived: " << stats.TunBytesReceived << "\n\n"; timer_->expires_at(timer_->expires_at() + duration_); timer_->async_wait([this](const asio::error_code& error) { tick_(); }); send_("hello, world!"); }; void queue_read_() { handle_->async_read_some(asio::buffer(buf, sizeof(buf)), [this](const asio::error_code& error, std::size_t bytes_transferred) { if (!error) { handle_read_(bytes_transferred); } else { std::cerr << "error " << error << " reading" << std::endl; } }); } void handle_read_(size_t len) { //std::cout << "Received: " << buf << std::endl; //send_(std::string(buf)); queue_read_(); } void send_(const std::string& str) { handle_->write_some(asio::buffer(str.c_str(), str.length())); } template <class C> void new_peer(const Transport& transport, C callback) { OVPN_NEW_PEER peer = {}; ADDRESS_FAMILY af = transport.ipv6 ? AF_INET6 : AF_INET; if (af == AF_INET6) { peer.Local.Addr6.sin6_family = af; inet_pton(af, transport.local_ip.c_str(), &(peer.Local.Addr6.sin6_addr)); peer.Local.Addr6.sin6_port = htons(transport.local_port); peer.Remote.Addr4.sin_family = af; inet_pton(af, transport.remote_ip.c_str(), &(peer.Remote.Addr6.sin6_addr)); peer.Remote.Addr6.sin6_port = htons(transport.remote_port); } else { peer.Local.Addr4.sin_family = af; inet_pton(af, transport.local_ip.c_str(), &(peer.Local.Addr4.sin_addr)); peer.Local.Addr4.sin_port = htons(transport.local_port); peer.Remote.Addr4.sin_family = transport.ipv6 ? AF_INET6 : AF_INET; inet_pton(AF_INET, transport.remote_ip.c_str(), &(peer.Remote.Addr4.sin_addr)); peer.Remote.Addr4.sin_port = htons(transport.remote_port); } peer.Proto = transport.tcp ? OVPN_PROTO_TCP : OVPN_PROTO_UDP; asio::windows::overlapped_ptr ov{ io_context_, [callback, peer](const asio::error_code& ec, std::size_t len) { if (!ec) { if (peer.Proto == OVPN_PROTO_TCP) std::cout << "TCP connected" << std::endl; callback(); } else { if (peer.Proto == OVPN_PROTO_TCP) std::cerr << "TCP connection error: " << ec.message() << std::endl; throw std::exception(); } } }; BOOL res = DeviceIoControl(handle_->native_handle(), OVPN_IOCTL_NEW_PEER, &peer, sizeof(peer), NULL, 0, NULL, ov.get()); if (!res) { DWORD err = GetLastError(); if (err == ERROR_IO_PENDING) { ov.release(); } else { asio::error_code errCode(err, asio::system_category()); ov.complete(errCode, 0); } } else { callback(); } } public: static ULONG get_adapter_index() { ULONG index = 0; HKEY adapter_key; RegOpenKeyExW(HKEY_LOCAL_MACHINE, ADAPTER_KEY, 0, KEY_READ, &adapter_key); int i = 0; while (true) { wchar_t enum_name[256]; DWORD len = _countof(enum_name); LONG status = RegEnumKeyExW(adapter_key, i, enum_name, &len, NULL, NULL, NULL, NULL); if (status == ERROR_NO_MORE_ITEMS) { break; } wchar_t unit_string[256]; HKEY unit_key; swprintf(unit_string, _countof(unit_string), L"%s\\%s", ADAPTER_KEY, enum_name); RegOpenKeyExW(HKEY_LOCAL_MACHINE, unit_string, 0, KEY_READ, &unit_key); WCHAR component_id_string[] = L"ComponentId"; WCHAR component_id[256]; DWORD data_type; len = sizeof(component_id); RegQueryValueExW(unit_key, component_id_string, NULL, &data_type, (LPBYTE)component_id, &len); if (wcscmp(L"ovpn-dco", component_id) == 0) { WCHAR net_cfg_instance_id_string[] = L"NetCfgInstanceId"; WCHAR net_cfg_instance_id[256]; len = sizeof(net_cfg_instance_id); RegQueryValueExW(unit_key, net_cfg_instance_id_string, NULL, &data_type, (LPBYTE)net_cfg_instance_id, &len); WCHAR buf[256]; swprintf(buf, _countof(buf), L"\\DEVICE_TCPIP_%s", net_cfg_instance_id); GetAdapterIndex(buf, &index); RegCloseKey(unit_key); break; } ++i; } RegCloseKey(adapter_key); return index; } std::unique_ptr<asio::windows::stream_handle> handle_; asio::io_context& io_context_; std::unique_ptr<asio::steady_timer> timer_; std::chrono::milliseconds duration_ = std::chrono::milliseconds(1000); char buf[4096]; int index_ = 0; static inline const wchar_t* ADAPTER_KEY = L"SYSTEM\\CurrentControlSet\\Control\\Class\\{4D36E972-E325-11CE-BFC1-08002BE10318}"; }; int main(int argc, char **argv) { if (argc < 10) { std::cout << "Usage: ovpn-dco-cli.exe <tcp|udp> <i4|i6> <local-ip> <local-port> <remote-ip> <remote-port> <vpn-ip> <vpn-netmask> <vpn-gw> <key-file> <key-direction>"; return 1; } asio::io_context io_context; bool tcp = std::strcmp(argv[1], "tcp") == 0; bool ipv6 = std::strcmp(argv[2], "i6") == 0; Transport transport{ tcp, ipv6, argv[3], std::atoi(argv[4]), argv[5], std::atoi(argv[6]) }; Tun tun{ argv[7], argv[8], argv[9] }; std::string key_file; if (argc > 10) key_file = argv[10]; int key_direction = 0; if (argc > 11) key_direction = std::atoi(argv[11]) > 0 ? 1 : 0; OvpnCli cli("\\\\.\\ovpn-dco", transport, tun, key_file, key_direction, io_context); io_context.run(); return 0; }
31.55
168
0.685231
[ "vector" ]
11208bc0ca6e8648a70bc79cacac269be74ab9f9
98,247
cpp
C++
vpr/src/route/route_timing.cpp
lpawelcz/vtr-verilog-to-routing
116f30cb8e8f791fb00816b1f8df0f5772bc84fa
[ "MIT" ]
null
null
null
vpr/src/route/route_timing.cpp
lpawelcz/vtr-verilog-to-routing
116f30cb8e8f791fb00816b1f8df0f5772bc84fa
[ "MIT" ]
null
null
null
vpr/src/route/route_timing.cpp
lpawelcz/vtr-verilog-to-routing
116f30cb8e8f791fb00816b1f8df0f5772bc84fa
[ "MIT" ]
null
null
null
#include <cstdio> #include <ctime> #include <cmath> #include <vector> #include <unordered_map> #include <algorithm> #include <iostream> #include "vtr_assert.h" #include "vtr_log.h" #include "vtr_time.h" #include "vpr_utils.h" #include "vpr_types.h" #include "vpr_error.h" #include "globals.h" #include "route_export.h" #include "route_common.h" #include "route_tree_timing.h" #include "route_timing.h" #include "net_delay.h" #include "stats.h" #include "echo_files.h" #include "draw.h" #include "breakpoint.h" #include "move_utils.h" #include "rr_graph.h" #include "routing_predictor.h" #include "VprTimingGraphResolver.h" // all functions in profiling:: namespace, which are only activated if PROFILE is defined #include "route_profiling.h" #include "timing_info.h" #include "timing_util.h" #include "route_budgets.h" #include "binary_heap.h" #include "bucket.h" #include "connection_router.h" #include "tatum/TimingReporter.hpp" #include "overuse_report.h" #define CONGESTED_SLOPE_VAL -0.04 enum class RouterCongestionMode { NORMAL, CONFLICTED }; //identifies the two breakpoint types in routing typedef enum router_breakpoint_type { BP_ROUTE_ITER, BP_NET_ID } bp_router_type; struct RoutingMetrics { size_t used_wirelength = 0; float sWNS = std::numeric_limits<float>::quiet_NaN(); float sTNS = std::numeric_limits<float>::quiet_NaN(); float hWNS = std::numeric_limits<float>::quiet_NaN(); float hTNS = std::numeric_limits<float>::quiet_NaN(); tatum::TimingPathInfo critical_path; }; /* * File-scope variables */ /** * @brief Run-time flag to control when router debug information is printed * Note only enables debug output if compiled with VTR_ENABLE_DEBUG_LOGGING defined * f_router_debug is used to stop the router when a breakpoint is reached. When a breakpoint is reached, this flag is set to true. * * In addition f_router_debug is used to print additional debug information during routing, for instance lookahead expected costs * information. */ bool f_router_debug = false; //Count the number of times the router has failed static int num_routing_failed = 0; /******************** Subroutines local to route_timing.c ********************/ template<typename ConnectionRouter> static bool timing_driven_route_sink( ConnectionRouter& router, ClusterNetId net_id, unsigned itarget, int target_pin, const t_conn_cost_params cost_params, const t_router_opts& router_opts, t_rt_node* rt_root, t_rt_node** rt_node_of_sink, SpatialRouteTreeLookup& spatial_rt_lookup, RouterStats& router_stats, route_budgets& budgeting_inf, const RoutingPredictor& routing_predictor); template<typename ConnectionRouter> static bool timing_driven_pre_route_to_clock_root( ConnectionRouter& router, ClusterNetId net_id, int sink_node, const t_conn_cost_params cost_params, int high_fanout_threshold, t_rt_node* rt_root, SpatialRouteTreeLookup& spatial_rt_lookup, RouterStats& router_stats); void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* node); static t_rt_node* setup_routing_resources(int itry, ClusterNetId net_id, unsigned num_sinks, int min_incremental_reroute_fanout, CBRR& incremental_rerouting_res, t_rt_node** rt_node_of_sink, const t_router_opts& router_opts, bool ripup_high_fanout_nets); static bool timing_driven_check_net_delays(ClbNetPinsMatrix<float>& net_delay); void increase_short_path_crit_if_congested(std::vector<ClusterNetId>& rerouted_nets, route_budgets& budgeting_inf, int itry); static bool should_route_net(ClusterNetId net_id, CBRR& connections_inf, bool if_force_reroute); static bool early_exit_heuristic(const t_router_opts& router_opts, const WirelengthInfo& wirelength_info); static bool check_hold(const t_router_opts& router_opts, float worst_neg_slack); struct more_sinks_than { inline bool operator()(const ClusterNetId net_index1, const ClusterNetId net_index2) { auto& cluster_ctx = g_vpr_ctx.clustering(); return cluster_ctx.clb_nlist.net_sinks(net_index1).size() > cluster_ctx.clb_nlist.net_sinks(net_index2).size(); } }; static size_t calculate_wirelength_available(); static WirelengthInfo calculate_wirelength_info(size_t available_wirelength); static void print_route_status_header(); static void print_route_status(int itry, double elapsed_sec, float pres_fac, int num_bb_updated, const RouterStats& router_stats, const OveruseInfo& overuse_info, const WirelengthInfo& wirelength_info, std::shared_ptr<const SetupHoldTimingInfo> timing_info, float est_success_iteration); static void print_overused_nodes_status(const t_router_opts& router_opts, const OveruseInfo& overuse_info); static void print_router_criticality_histogram(const SetupTimingInfo& timing_info, const ClusteredPinAtomPinsLookup& netlist_pin_lookup); static bool is_high_fanout(int fanout, int fanout_threshold); static size_t dynamic_update_bounding_boxes(const std::vector<ClusterNetId>& nets, int high_fanout_threshold); static t_bb calc_current_bb(const t_trace* head); static bool is_better_quality_routing(const vtr::vector<ClusterNetId, t_traceback>& best_routing, const RoutingMetrics& best_routing_metrics, const WirelengthInfo& wirelength_info, std::shared_ptr<const SetupHoldTimingInfo> timing_info); static bool early_reconvergence_exit_heuristic(const t_router_opts& router_opts, int itry_since_last_convergence, std::shared_ptr<const SetupHoldTimingInfo> timing_info, const RoutingMetrics& best_routing_metrics); static void generate_route_timing_reports(const t_router_opts& router_opts, const t_analysis_opts& analysis_opts, const SetupTimingInfo& timing_info, const RoutingDelayCalculator& delay_calc); static void prune_unused_non_configurable_nets(CBRR& connections_inf); static void init_net_delay_from_lookahead(const RouterLookahead& router_lookahead, ClbNetPinsMatrix<float>& net_delay); #ifndef NO_GRAPHICS void update_router_info_and_check_bp(bp_router_type type, int net_id); #endif // The reason that try_timing_driven_route_tmpl (and descendents) are being // templated over is because using a virtual interface instead fully templating // the router results in a 5% runtime increase. // // The reason to template over the router in general is to enable runtime // selection of core router algorithm's, specifically the router heap. template<typename ConnectionRouter> static bool try_timing_driven_route_tmpl(const t_router_opts& router_opts, const t_analysis_opts& analysis_opts, const std::vector<t_segment_inf>& segment_inf, ClbNetPinsMatrix<float>& net_delay, const ClusteredPinAtomPinsLookup& netlist_pin_lookup, std::shared_ptr<SetupHoldTimingInfo> timing_info, std::shared_ptr<RoutingDelayCalculator> delay_calc, ScreenUpdatePriority first_iteration_priority); /************************ Subroutine definitions *****************************/ bool try_timing_driven_route(const t_router_opts& router_opts, const t_analysis_opts& analysis_opts, const std::vector<t_segment_inf>& segment_inf, ClbNetPinsMatrix<float>& net_delay, const ClusteredPinAtomPinsLookup& netlist_pin_lookup, std::shared_ptr<SetupHoldTimingInfo> timing_info, std::shared_ptr<RoutingDelayCalculator> delay_calc, ScreenUpdatePriority first_iteration_priority) { switch (router_opts.router_heap) { case e_heap_type::BINARY_HEAP: return try_timing_driven_route_tmpl<ConnectionRouter<BinaryHeap>>( router_opts, analysis_opts, segment_inf, net_delay, netlist_pin_lookup, timing_info, delay_calc, first_iteration_priority); break; case e_heap_type::BUCKET_HEAP_APPROXIMATION: return try_timing_driven_route_tmpl<ConnectionRouter<Bucket>>( router_opts, analysis_opts, segment_inf, net_delay, netlist_pin_lookup, timing_info, delay_calc, first_iteration_priority); default: VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Unknown heap type %d", router_opts.router_heap); } } template<typename ConnectionRouter> bool try_timing_driven_route_tmpl(const t_router_opts& router_opts, const t_analysis_opts& analysis_opts, const std::vector<t_segment_inf>& segment_inf, ClbNetPinsMatrix<float>& net_delay, const ClusteredPinAtomPinsLookup& netlist_pin_lookup, std::shared_ptr<SetupHoldTimingInfo> timing_info, std::shared_ptr<RoutingDelayCalculator> delay_calc, ScreenUpdatePriority first_iteration_priority) { /* Timing-driven routing algorithm. The timing graph (includes slack) * * must have already been allocated, and net_delay must have been allocated. * * Returns true if the routing succeeds, false otherwise. */ const auto& device_ctx = g_vpr_ctx.device(); const auto& atom_ctx = g_vpr_ctx.atom(); const auto& cluster_ctx = g_vpr_ctx.clustering(); auto& route_ctx = g_vpr_ctx.mutable_routing(); //Initially, the router runs normally trying to reduce congestion while //balancing other metrics (timing, wirelength, run-time etc.) RouterCongestionMode router_congestion_mode = RouterCongestionMode::NORMAL; //Initialize and properly size the lookups for profiling profiling::profiling_initialization(get_max_pins_per_net()); //sort so net with most sinks is routed first. auto sorted_nets = std::vector<ClusterNetId>(cluster_ctx.clb_nlist.nets().begin(), cluster_ctx.clb_nlist.nets().end()); std::sort(sorted_nets.begin(), sorted_nets.end(), more_sinks_than()); /* * Configure the routing predictor */ RoutingPredictor routing_predictor; float abort_iteration_threshold = std::numeric_limits<float>::infinity(); //Default no early abort if (router_opts.routing_failure_predictor == SAFE) { abort_iteration_threshold = ROUTING_PREDICTOR_ITERATION_ABORT_FACTOR_SAFE * router_opts.max_router_iterations; } else if (router_opts.routing_failure_predictor == AGGRESSIVE) { abort_iteration_threshold = ROUTING_PREDICTOR_ITERATION_ABORT_FACTOR_AGGRESSIVE * router_opts.max_router_iterations; } else { VTR_ASSERT_MSG(router_opts.routing_failure_predictor == OFF, "Unrecognized routing failure predictor setting"); } float high_effort_congestion_mode_iteration_threshold = router_opts.congested_routing_iteration_threshold_frac * router_opts.max_router_iterations; /* Set delay of ignored signals to zero. Non-ignored net delays are set by * update_net_delays_from_route_tree() inside timing_driven_route_net(), * which is only called for non-ignored nets. */ for (auto net_id : cluster_ctx.clb_nlist.nets()) { if (cluster_ctx.clb_nlist.net_is_ignored(net_id)) { for (unsigned int ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ++ipin) { net_delay[net_id][ipin] = 0.; } } } CBRR connections_inf{}; route_budgets budgeting_inf; const auto* router_lookahead = get_cached_router_lookahead( router_opts.lookahead_type, router_opts.write_router_lookahead, router_opts.read_router_lookahead, segment_inf); /* * Routing parameters */ float pres_fac = update_pres_fac(router_opts.first_iter_pres_fac); /* Typically 0 -> ignore cong. */ int bb_fac = router_opts.bb_factor; //When routing conflicts are detected the bounding boxes are scaled //by BB_SCALE_FACTOR every BB_SCALE_ITER_COUNT iterations constexpr float BB_SCALE_FACTOR = 2; constexpr int BB_SCALE_ITER_COUNT = 5; size_t available_wirelength = calculate_wirelength_available(); /* * Routing status and metrics */ bool routing_is_successful = false; WirelengthInfo wirelength_info; OveruseInfo overuse_info(device_ctx.rr_nodes.size()); tatum::TimingPathInfo critical_path; int itry; //Routing iteration number int itry_conflicted_mode = 0; /* * Best result so far */ vtr::vector<ClusterNetId, t_traceback> best_routing; t_clb_opins_used best_clb_opins_used_locally; RoutingMetrics best_routing_metrics; int legal_convergence_count = 0; std::vector<int> scratch; ConnectionRouter router( device_ctx.grid, *router_lookahead, device_ctx.rr_nodes, &device_ctx.rr_graph, device_ctx.rr_rc_data, device_ctx.rr_switch_inf, route_ctx.rr_node_route_inf); // Make sure template type ConnectionRouter is a ConnectionRouterInterface. static_assert(std::is_base_of<ConnectionRouterInterface, ConnectionRouter>::value, "ConnectionRouter must implement the ConnectionRouterInterface"); /* * On the first routing iteration ignore congestion to get reasonable net * delay estimates. Set criticalities to 1 when timing analysis is on to * optimize timing, and to 0 when timing analysis is off to optimize routability. * * Subsequent iterations use the net delays from the previous iteration. */ std::shared_ptr<SetupHoldTimingInfo> route_timing_info; { vtr::ScopedStartFinishTimer init_timing_timer("Initializing router criticalities"); if (timing_info) { if (router_opts.initial_timing == e_router_initial_timing::ALL_CRITICAL) { //First routing iteration, make all nets critical for a min-delay routing route_timing_info = make_constant_timing_info(1.); } else { VTR_ASSERT(router_opts.initial_timing == e_router_initial_timing::LOOKAHEAD); { //Estimate initial connection delays from the router lookahead init_net_delay_from_lookahead(*router_lookahead, net_delay); //Run STA to get estimated criticalities timing_info->update(); } route_timing_info = timing_info; } } else { //Not timing driven, force criticality to zero for a routability-driven routing route_timing_info = make_constant_timing_info(0.); } VTR_LOG("Initial Net Connection Criticality Histogram:\n"); print_router_criticality_histogram(*route_timing_info, netlist_pin_lookup); } std::unique_ptr<ClusteredPinTimingInvalidator> pin_timing_invalidator; if (timing_info) { pin_timing_invalidator = std::make_unique<ClusteredPinTimingInvalidator>(cluster_ctx.clb_nlist, netlist_pin_lookup, atom_ctx.nlist, atom_ctx.lookup, *timing_info->timing_graph()); } RouterStats router_stats; timing_driven_route_structs route_structs; float prev_iter_cumm_time = 0; vtr::Timer iteration_timer; int num_net_bounding_boxes_updated = 0; int itry_since_last_convergence = -1; // This heap is used for reserve_locally_used_opins. BinaryHeap small_heap; small_heap.init_heap(device_ctx.grid); // When RCV is enabled the router will not stop unless negative hold slack is 0 // In some cases this isn't doable, due to global nets or intracluster routing issues // In these cases RCV will finish early if it goes RCV_FINISH_EARLY_COUNTDOWN iterations without detecting resolvable negative hold slack // Increasing this will make the router fail occasionally, decreasing will sometimes not let all hold violations be resolved constexpr int RCV_FINISH_EARLY_COUNTDOWN = 15; int rcv_finished_count = RCV_FINISH_EARLY_COUNTDOWN; print_route_status_header(); for (itry = 1; itry <= router_opts.max_router_iterations; ++itry) { RouterStats router_iteration_stats; std::vector<ClusterNetId> rerouted_nets; /* Reset "is_routed" and "is_fixed" flags to indicate nets not pre-routed (yet) */ for (auto net_id : cluster_ctx.clb_nlist.nets()) { route_ctx.net_status.set_is_routed(net_id, false); route_ctx.net_status.set_is_fixed(net_id, false); } if (itry_since_last_convergence >= 0) { ++itry_since_last_convergence; } // Calculate this once and pass it into net routing to check if should reroute for hold float worst_negative_slack = 0; if (budgeting_inf.if_set()) { worst_negative_slack = timing_info->hold_total_negative_slack(); } /* * Route each net */ for (auto net_id : sorted_nets) { bool was_rerouted = false; bool is_routable = try_timing_driven_route_net(router, net_id, itry, pres_fac, router_opts, connections_inf, router_iteration_stats, route_structs.pin_criticality, route_structs.rt_node_of_sink, net_delay, netlist_pin_lookup, route_timing_info, pin_timing_invalidator.get(), budgeting_inf, was_rerouted, worst_negative_slack, routing_predictor); if (!is_routable) { return (false); //Impossible to route } if (was_rerouted) { rerouted_nets.push_back(net_id); #ifndef NO_GRAPHICS update_router_info_and_check_bp(BP_NET_ID, size_t(net_id)); #endif } } // Make sure any CLB OPINs used up by subblocks being hooked directly to them are reserved for that purpose bool rip_up_local_opins = (itry == 1 ? false : true); reserve_locally_used_opins(&small_heap, pres_fac, router_opts.acc_fac, rip_up_local_opins); /* * Calculate metrics for the current routing */ bool routing_is_feasible = feasible_routing(); float est_success_iteration = routing_predictor.estimate_success_iteration(); //Update resource costs and overuse info if (itry == 1) { pathfinder_update_acc_cost_and_overuse_info(0., overuse_info); /* Acc_fac=0 for first iter. */ } else { pathfinder_update_acc_cost_and_overuse_info(router_opts.acc_fac, overuse_info); } wirelength_info = calculate_wirelength_info(available_wirelength); routing_predictor.add_iteration_overuse(itry, overuse_info.overused_nodes); if (timing_info) { //Update timing based on the new routing //Note that the net delays have already been updated by timing_driven_route_net timing_info->update(); timing_info->set_warn_unconstrained(false); //Don't warn again about unconstrained nodes again during routing pin_timing_invalidator->reset(); //Use the real timing analysis criticalities for subsequent routing iterations // 'route_timing_info' is what is actually passed into the net/connection routers, // and for the 1st iteration may not be the actual STA results (e.g. all criticalities set to 1) route_timing_info = timing_info; critical_path = timing_info->least_slack_critical_path(); VTR_ASSERT_SAFE(timing_driven_check_net_delays(net_delay)); if (itry == 1) { generate_route_timing_reports(router_opts, analysis_opts, *timing_info, *delay_calc); } } float iter_cumm_time = iteration_timer.elapsed_sec(); float iter_elapsed_time = iter_cumm_time - prev_iter_cumm_time; //Output progress print_route_status(itry, iter_elapsed_time, pres_fac, num_net_bounding_boxes_updated, router_iteration_stats, overuse_info, wirelength_info, timing_info, est_success_iteration); prev_iter_cumm_time = iter_cumm_time; //Update graphics if (itry == 1) { update_screen(first_iteration_priority, "Routing...", ROUTING, timing_info); } else { update_screen(ScreenUpdatePriority::MINOR, "Routing...", ROUTING, timing_info); } if (router_opts.save_routing_per_iteration) { std::string filename = vtr::string_fmt("iteration_%03d.route", itry); print_route(nullptr, filename.c_str()); } //Update router stats (total) router_stats.connections_routed += router_iteration_stats.connections_routed; router_stats.nets_routed += router_iteration_stats.nets_routed; router_stats.heap_pushes += router_iteration_stats.heap_pushes; router_stats.heap_pops += router_iteration_stats.heap_pops; /* * Are we finished? */ if (is_iteration_complete(routing_is_feasible, router_opts, itry, timing_info, rcv_finished_count == 0)) { auto& router_ctx = g_vpr_ctx.routing(); if (is_better_quality_routing(best_routing, best_routing_metrics, wirelength_info, timing_info)) { //Save routing best_routing = router_ctx.trace; best_clb_opins_used_locally = router_ctx.clb_opins_used_locally; routing_is_successful = true; //Update best metrics if (timing_info) { timing_driven_check_net_delays(net_delay); best_routing_metrics.sTNS = timing_info->setup_total_negative_slack(); best_routing_metrics.sWNS = timing_info->setup_worst_negative_slack(); best_routing_metrics.hTNS = timing_info->hold_total_negative_slack(); best_routing_metrics.hWNS = timing_info->hold_worst_negative_slack(); best_routing_metrics.critical_path = critical_path; } best_routing_metrics.used_wirelength = wirelength_info.used_wirelength(); } //Decrease pres_fac so that critical connections will take more direct routes //Note that we use first_iter_pres_fac here (typically zero), and switch to //use initial_pres_fac on the next iteration. pres_fac = update_pres_fac(router_opts.first_iter_pres_fac); //Reduce timing tolerances to re-route more delay-suboptimal signals connections_inf.set_connection_criticality_tolerance(0.7); connections_inf.set_connection_delay_tolerance(1.01); ++legal_convergence_count; itry_since_last_convergence = 0; VTR_ASSERT(routing_is_successful); } if (itry_since_last_convergence == 1) { //We used first_iter_pres_fac when we started routing again //after the first routing convergence. Since that is often zero, //we want to set pres_fac to a reasonable (i.e. typically non-zero) //value afterwards -- so it grows when multiplied by pres_fac_mult pres_fac = update_pres_fac(router_opts.initial_pres_fac); } //Have we converged the maximum number of times, did not make any changes, or does it seem //unlikely additional convergences will improve QoR? if (legal_convergence_count >= router_opts.max_convergence_count || router_iteration_stats.connections_routed == 0 || early_reconvergence_exit_heuristic(router_opts, itry_since_last_convergence, timing_info, best_routing_metrics)) { #ifndef NO_GRAPHICS update_router_info_and_check_bp(BP_ROUTE_ITER, -1); #endif break; //Done routing } /* * Abort checks: Should we give-up because this routing problem is unlikely to converge to a legal routing? */ if (itry == 1 && early_exit_heuristic(router_opts, wirelength_info)) { #ifndef NO_GRAPHICS update_router_info_and_check_bp(BP_ROUTE_ITER, -1); #endif //Abort break; } //Estimate at what iteration we will converge to a legal routing if (overuse_info.overused_nodes > ROUTING_PREDICTOR_MIN_ABSOLUTE_OVERUSE_THRESHOLD) { //Only consider aborting if we have a significant number of overused resources if (!std::isnan(est_success_iteration) && est_success_iteration > abort_iteration_threshold && router_opts.routing_budgets_algorithm != YOYO) { VTR_LOG("Routing aborted, the predicted iteration for a successful route (%.1f) is too high.\n", est_success_iteration); #ifndef NO_GRAPHICS update_router_info_and_check_bp(BP_ROUTE_ITER, -1); #endif break; //Abort } } if (itry == 1 && router_opts.exit_after_first_routing_iteration) { VTR_LOG("Exiting after first routing iteration as requested\n"); #ifndef NO_GRAPHICS update_router_info_and_check_bp(BP_ROUTE_ITER, -1); #endif break; } /* * Prepare for the next iteration */ if (router_opts.route_bb_update == e_route_bb_update::DYNAMIC) { num_net_bounding_boxes_updated = dynamic_update_bounding_boxes(rerouted_nets, router_opts.high_fanout_threshold); } if (itry >= high_effort_congestion_mode_iteration_threshold) { //We are approaching the maximum number of routing iterations, //and still do not have a legal routing. Switch to a mode which //focuses more on attempting to resolve routing conflicts. router_congestion_mode = RouterCongestionMode::CONFLICTED; } //Update pres_fac if (itry == 1) { pres_fac = update_pres_fac(router_opts.initial_pres_fac); } else { pres_fac *= router_opts.pres_fac_mult; /* Avoid overflow for high iteration counts, even if acc_cost is big */ pres_fac = update_pres_fac(std::min(pres_fac, static_cast<float>(HUGE_POSITIVE_FLOAT / 1e5))); // Increase short path criticality if it's having a hard time resolving hold violations due to congestion if (budgeting_inf.if_set()) { bool rcv_finished = false; /* This constant represents how much extra delay the budget increaser adds to the minimum and maximum delay budgets * Experimentally this value delivers fast hold slack resolution, while not overwhelming the router * Increasing this will make it resolve hold faster, but could result in lower circuit quality */ constexpr float budget_increase_factor = 300e-12; if (itry > 5 && worst_negative_slack != 0) rcv_finished = budgeting_inf.increase_min_budgets_if_struggling(budget_increase_factor, timing_info, worst_negative_slack, netlist_pin_lookup); if (rcv_finished) rcv_finished_count--; else rcv_finished_count = RCV_FINISH_EARLY_COUNTDOWN; } } if (router_congestion_mode == RouterCongestionMode::CONFLICTED) { //The design appears to have routing conflicts which are difficult to resolve: // 1) Don't re-route legal connections due to delay. This allows // the router to focus on the actual conflicts // 2) Increase the net bounding boxes. This potentially allows // the router to route around otherwise congested regions // (at the cost of high run-time). //Increase the size of the net bounding boxes to give the router more //freedom to find alternate paths. // //In the case of routing conflicts there are multiple connections competing //for the same resources which can not resolve the congestion themselves. //In normal routing mode we try to keep the bounding boxes small to minimize //run-time, but this can limits how far signals can detour (i.e. they can't //route outside the bounding box), which can cause conflicts to oscillate back //and forth without resolving. // //By scaling the bounding boxes here, we slowly increase the router's search //space in hopes of it allowing signals to move further out of the way to //alleviate the conflicts. if (itry_conflicted_mode % BB_SCALE_ITER_COUNT == 0) { //We scale the bounding boxes by BB_SCALE_FACTOR, //every BB_SCALE_ITER_COUNT iterations. This ensures //that we give the router some time (BB_SCALE_ITER_COUNT) to try //resolve/negotiate congestion at the new BB factor. // //Note that we increase the BB factor slowly to try and minimize //the bounding box size (since larger bounding boxes slow the router down). auto& grid = g_vpr_ctx.device().grid; int max_grid_dim = std::max(grid.width(), grid.height()); //Scale by BB_SCALE_FACTOR but clip to grid size to avoid overflow bb_fac = std::min<int>(max_grid_dim, bb_fac * BB_SCALE_FACTOR); route_ctx.route_bb = load_route_bb(bb_fac); } ++itry_conflicted_mode; } if (timing_info) { if (should_setup_lower_bound_connection_delays(itry, router_opts)) { // first iteration sets up the lower bound connection delays since only timing is optimized for connections_inf.set_stable_critical_path_delay(critical_path.delay()); connections_inf.set_lower_bound_connection_delays(net_delay); //load budgets using information from uncongested delay information budgeting_inf.load_route_budgets(net_delay, timing_info, netlist_pin_lookup, router_opts); /*for debugging purposes*/ // if (budgeting_inf.if_set()) { // budgeting_inf.print_route_budget(std::string("route_budgets_") + std::to_string(itry) + ".txt", net_delay); // } if (router_opts.routing_budgets_algorithm == YOYO) router.set_rcv_enabled(true); } else { bool stable_routing_configuration = true; /* * Determine if any connection need to be forcibly re-routed due to timing */ //Yes, if explicitly enabled bool should_ripup_for_delay = (router_opts.incr_reroute_delay_ripup == e_incr_reroute_delay_ripup::ON); //Or, if things are not too congested should_ripup_for_delay |= (router_opts.incr_reroute_delay_ripup == e_incr_reroute_delay_ripup::AUTO && router_congestion_mode == RouterCongestionMode::NORMAL); if (should_ripup_for_delay) { if (connections_inf.critical_path_delay_grew_significantly(critical_path.delay())) { // only need to forcibly reroute if critical path grew significantly stable_routing_configuration = connections_inf.forcibly_reroute_connections(router_opts.max_criticality, timing_info, netlist_pin_lookup, net_delay); } } // not stable if any connection needs to be forcibly rerouted if (stable_routing_configuration) { connections_inf.set_stable_critical_path_delay(critical_path.delay()); } } } else { /* If timing analysis is not enabled, make sure that the criticalities and the * net_delays stay as 0 so that wirelength can be optimized. */ for (auto net_id : cluster_ctx.clb_nlist.nets()) { for (unsigned int ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ++ipin) { net_delay[net_id][ipin] = 0.; } } } if (router_opts.congestion_analysis) profiling::congestion_analysis(); if (router_opts.fanout_analysis) profiling::time_on_fanout_analysis(); // profiling::time_on_criticality_analysis(); } if (routing_is_successful) { VTR_LOG("Restoring best routing\n"); auto& router_ctx = g_vpr_ctx.mutable_routing(); /* Restore congestion from best route */ for (auto net_id : cluster_ctx.clb_nlist.nets()) { pathfinder_update_path_occupancy(route_ctx.trace[net_id].head, -1); pathfinder_update_path_occupancy(best_routing[net_id].head, 1); } router_ctx.trace = best_routing; router_ctx.clb_opins_used_locally = best_clb_opins_used_locally; prune_unused_non_configurable_nets(connections_inf); if (timing_info) { VTR_LOG("Critical path: %g ns\n", 1e9 * best_routing_metrics.critical_path.delay()); } VTR_LOG("Successfully routed after %d routing iterations.\n", itry); } else { VTR_LOG("Routing failed.\n"); //If the routing fails, print the overused info print_overused_nodes_status(router_opts, overuse_info); ++num_routing_failed; #ifdef VTR_ENABLE_DEBUG_LOGGING if (f_router_debug) print_invalid_routing_info(); #endif } VTR_LOG("Final Net Connection Criticality Histogram:\n"); print_router_criticality_histogram(*route_timing_info, netlist_pin_lookup); VTR_LOG("Router Stats: total_nets_routed: %zu total_connections_routed: %zu total_heap_pushes: %zu total_heap_pops: %zu\n", router_stats.nets_routed, router_stats.connections_routed, router_stats.heap_pushes, router_stats.heap_pops); return routing_is_successful; } template<typename ConnectionRouter> bool try_timing_driven_route_net(ConnectionRouter& router, ClusterNetId net_id, int itry, float pres_fac, const t_router_opts& router_opts, CBRR& connections_inf, RouterStats& router_stats, float* pin_criticality, t_rt_node** rt_node_of_sink, ClbNetPinsMatrix<float>& net_delay, const ClusteredPinAtomPinsLookup& netlist_pin_lookup, std::shared_ptr<SetupHoldTimingInfo> timing_info, ClusteredPinTimingInvalidator* pin_timing_invalidator, route_budgets& budgeting_inf, bool& was_rerouted, float worst_negative_slack, const RoutingPredictor& routing_predictor) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& route_ctx = g_vpr_ctx.mutable_routing(); bool is_routed = false; connections_inf.prepare_routing_for_net(net_id); bool reroute_for_hold = false; if (budgeting_inf.if_set()) { reroute_for_hold = (budgeting_inf.get_should_reroute(net_id)); reroute_for_hold &= worst_negative_slack != 0; } if (route_ctx.net_status.is_fixed(net_id)) { /* Skip pre-routed nets. */ is_routed = true; } else if (cluster_ctx.clb_nlist.net_is_ignored(net_id)) { /* Skip ignored nets. */ is_routed = true; } else if (!(reroute_for_hold) && should_route_net(net_id, connections_inf, true) == false) { is_routed = true; } else { // track time spent vs fanout profiling::net_fanout_start(); is_routed = timing_driven_route_net(router, net_id, itry, pres_fac, router_opts, connections_inf, router_stats, pin_criticality, rt_node_of_sink, net_delay[net_id].data(), netlist_pin_lookup, timing_info, pin_timing_invalidator, budgeting_inf, worst_negative_slack, routing_predictor); profiling::net_fanout_end(cluster_ctx.clb_nlist.net_sinks(net_id).size()); /* Impossible to route? (disconnected rr_graph) */ if (is_routed) { route_ctx.net_status.set_is_routed(net_id, true); } else { VTR_LOG("Routing failed for net %d\n", net_id); } was_rerouted = true; //Flag to record whether routing was actually changed } return (is_routed); } /* * NOTE: * Suggest using a timing_driven_route_structs struct. Memory is managed for you */ void alloc_timing_driven_route_structs(float** pin_criticality_ptr, int** sink_order_ptr, t_rt_node*** rt_node_of_sink_ptr) { /* Allocates all the structures needed only by the timing-driven router. */ int max_sinks = std::max(get_max_pins_per_net() - 1, 0); *pin_criticality_ptr = new float[max_sinks] - 1; /* First sink is pin #1. */ *sink_order_ptr = new int[max_sinks] - 1; *rt_node_of_sink_ptr = new t_rt_node*[max_sinks] - 1; alloc_route_tree_timing_structs(); } /* * NOTE: * Suggest using a timing_driven_route_structs struct. Memory is managed for you */ void free_timing_driven_route_structs(float* pin_criticality, int* sink_order, t_rt_node** rt_node_of_sink) { /* Frees all the structures needed only by the timing-driven router. */ // coverity[offset_free : Intentional] delete[](pin_criticality + 1); /* Starts at index 1. */ // coverity[offset_free : Intentional] delete[](sink_order + 1); // coverity[offset_free : Intentional] delete[](rt_node_of_sink + 1); free_route_tree_timing_structs(); } timing_driven_route_structs::timing_driven_route_structs() { alloc_timing_driven_route_structs(&pin_criticality, &sink_order, &rt_node_of_sink); } timing_driven_route_structs::~timing_driven_route_structs() { free_timing_driven_route_structs(pin_criticality, sink_order, rt_node_of_sink); } void increase_short_path_crit_if_congested(std::vector<ClusterNetId>& rerouted_nets, route_budgets& budgeting_inf, int itry) { if (budgeting_inf.if_set() && itry > 9) { for (auto net_id : rerouted_nets) { if (budgeting_inf.get_should_reroute(net_id)) { budgeting_inf.update_congestion_times(net_id); } else { budgeting_inf.not_congested_this_iteration(net_id); } budgeting_inf.increase_short_crit(net_id, 4); } } } int get_max_pins_per_net() { int max_pins_per_net = 0; auto& cluster_ctx = g_vpr_ctx.clustering(); for (auto net_id : cluster_ctx.clb_nlist.nets()) { if (!cluster_ctx.clb_nlist.net_is_ignored(net_id)) max_pins_per_net = std::max(max_pins_per_net, (int)cluster_ctx.clb_nlist.net_pins(net_id).size()); } return (max_pins_per_net); } struct Criticality_comp { const float* criticality; Criticality_comp(const float* calculated_criticalities) : criticality{calculated_criticalities} { } bool operator()(int a, int b) const { return criticality[a] > criticality[b]; } }; template<typename ConnectionRouter> bool timing_driven_route_net(ConnectionRouter& router, ClusterNetId net_id, int itry, float pres_fac, const t_router_opts& router_opts, CBRR& connections_inf, RouterStats& router_stats, float* pin_criticality, t_rt_node** rt_node_of_sink, float* net_delay, const ClusteredPinAtomPinsLookup& netlist_pin_lookup, std::shared_ptr<SetupHoldTimingInfo> timing_info, ClusteredPinTimingInvalidator* pin_timing_invalidator, route_budgets& budgeting_inf, float worst_neg_slack, const RoutingPredictor& routing_predictor) { /* Returns true as long as found some way to hook up this net, even if that * * way resulted in overuse of resources (congestion). If there is no way * * to route this net, even ignoring congestion, it returns false. In this * * case the rr_graph is disconnected and you can give up. */ auto& cluster_ctx = g_vpr_ctx.clustering(); auto& device_ctx = g_vpr_ctx.device(); const auto& rr_graph = device_ctx.rr_graph; auto& route_ctx = g_vpr_ctx.routing(); unsigned int num_sinks = cluster_ctx.clb_nlist.net_sinks(net_id).size(); VTR_LOGV_DEBUG(f_router_debug, "Routing Net %zu (%zu sinks)\n", size_t(net_id), num_sinks); t_rt_node* rt_root; rt_root = setup_routing_resources(itry, net_id, num_sinks, router_opts.min_incremental_reroute_fanout, connections_inf, rt_node_of_sink, router_opts, check_hold(router_opts, worst_neg_slack)); bool high_fanout = is_high_fanout(num_sinks, router_opts.high_fanout_threshold); SpatialRouteTreeLookup spatial_route_tree_lookup; if (high_fanout) { spatial_route_tree_lookup = build_route_tree_spatial_lookup(net_id, rt_root); } // after this point the route tree is correct // remaining_targets from this point on are the **pin indices** that have yet to be routed auto& remaining_targets = connections_inf.get_remaining_targets(); // calculate criticality of remaining target pins for (int ipin : remaining_targets) { if (timing_info) { auto clb_pin = cluster_ctx.clb_nlist.net_pin(net_id, ipin); if (!route_ctx.is_clock_net[net_id]) { pin_criticality[ipin] = calculate_clb_net_pin_criticality(*timing_info, netlist_pin_lookup, clb_pin); } else { // Use max_criticality for clock nets. // calculate_clb_net_pin_criticality likely doesn't generate // good values for clock nets. // // This will cause them to use min delay paths rather than // avoid congestion. As a future enchancement, the clock nets // should likely route for min slew, but that is a larger // change. pin_criticality[ipin] = router_opts.max_criticality; } /* Pin criticality is between 0 and 1. * Shift it downwards by 1 - max_criticality (max_criticality is 0.99 by default, * so shift down by 0.01) and cut off at 0. This means that all pins with small * criticalities (<0.01) get criticality 0 and are ignored entirely, and everything * else becomes a bit less critical. This effect becomes more pronounced if * max_criticality is set lower. */ // VTR_ASSERT(pin_criticality[ipin] > -0.01 && pin_criticality[ipin] < 1.01); pin_criticality[ipin] = std::max(pin_criticality[ipin] - (1.0 - router_opts.max_criticality), 0.0); /* Take pin criticality to some power (1 by default). */ pin_criticality[ipin] = std::pow(pin_criticality[ipin], router_opts.criticality_exp); /* Cut off pin criticality at max_criticality. */ pin_criticality[ipin] = std::min(pin_criticality[ipin], router_opts.max_criticality); } else { //No timing info, implies we want a min delay routing, so use criticality of 1. pin_criticality[ipin] = 1.; } } // compare the criticality of different sink nodes sort(begin(remaining_targets), end(remaining_targets), Criticality_comp{pin_criticality}); /* Update base costs according to fanout and criticality rules */ update_rr_base_costs(num_sinks); t_conn_delay_budget conn_delay_budget; t_conn_cost_params cost_params; cost_params.astar_fac = router_opts.astar_fac; cost_params.bend_cost = router_opts.bend_cost; cost_params.pres_fac = pres_fac; cost_params.delay_budget = ((budgeting_inf.if_set()) ? &conn_delay_budget : nullptr); // Pre-route to clock source for clock nets (marked as global nets) if (cluster_ctx.clb_nlist.net_is_global(net_id) && router_opts.two_stage_clock_routing) { //VTR_ASSERT(router_opts.clock_modeling == DEDICATED_NETWORK); int sink_node = device_ctx.virtual_clock_network_root_idx; enable_router_debug(router_opts, net_id, sink_node, itry, &router); VTR_LOGV_DEBUG(f_router_debug, "Pre-routing global net %zu\n", size_t(net_id)); // Set to the max timing criticality which should intern minimize clock insertion // delay by selecting a direct route from the clock source to the virtual sink cost_params.criticality = router_opts.max_criticality; if (!timing_driven_pre_route_to_clock_root( router, net_id, sink_node, cost_params, router_opts.high_fanout_threshold, rt_root, spatial_route_tree_lookup, router_stats)) { return false; } } if (budgeting_inf.if_set()) { budgeting_inf.set_should_reroute(net_id, false); } // explore in order of decreasing criticality (no longer need sink_order array) for (unsigned itarget = 0; itarget < remaining_targets.size(); ++itarget) { int target_pin = remaining_targets[itarget]; int sink_rr = route_ctx.net_rr_terminals[net_id][target_pin]; enable_router_debug(router_opts, net_id, sink_rr, itry, &router); VTR_LOGV_DEBUG(f_router_debug, "Routing Net %zu (%zu sinks)\n", size_t(net_id), num_sinks); cost_params.criticality = pin_criticality[target_pin]; if (budgeting_inf.if_set()) { conn_delay_budget.max_delay = budgeting_inf.get_max_delay_budget(net_id, target_pin); conn_delay_budget.target_delay = budgeting_inf.get_delay_target(net_id, target_pin); conn_delay_budget.min_delay = budgeting_inf.get_min_delay_budget(net_id, target_pin); conn_delay_budget.short_path_criticality = budgeting_inf.get_crit_short_path(net_id, target_pin); conn_delay_budget.routing_budgets_algorithm = router_opts.routing_budgets_algorithm; } profiling::conn_start(); // build a branch in the route tree to the target if (!timing_driven_route_sink(router, net_id, itarget, target_pin, cost_params, router_opts, rt_root, rt_node_of_sink, spatial_route_tree_lookup, router_stats, budgeting_inf, routing_predictor)) return false; profiling::conn_finish(route_ctx.net_rr_terminals[net_id][0], sink_rr, pin_criticality[target_pin]); ++router_stats.connections_routed; } // finished all sinks ++router_stats.nets_routed; profiling::net_finish(); /* For later timing analysis. */ // may have to update timing delay of the previously legally reached sinks since downstream capacitance could be changed update_net_delays_from_route_tree(net_delay, rt_node_of_sink, net_id, timing_info.get(), pin_timing_invalidator); if (router_opts.update_lower_bound_delays) { for (int ipin : remaining_targets) { connections_inf.update_lower_bound_connection_delay(net_id, ipin, net_delay[ipin]); } } if (!cluster_ctx.clb_nlist.net_is_ignored(net_id)) { for (unsigned ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ++ipin) { if (net_delay[ipin] == 0) { // should be SOURCE->OPIN->IPIN->SINK VTR_ASSERT(rr_graph.node_type(RRNodeId(rt_node_of_sink[ipin]->parent_node->parent_node->inode)) == OPIN); } } } VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= rr_graph.node_capacity(RRNodeId(rt_root->inode)), "SOURCE should never be congested"); // route tree is not kept persistent since building it from the traceback the next iteration takes almost 0 time VTR_LOGV_DEBUG(f_router_debug, "Routed Net %zu (%zu sinks)\n", size_t(net_id), num_sinks); free_route_tree(rt_root); router.empty_rcv_route_tree_set(); return (true); } template<typename ConnectionRouter> static bool timing_driven_pre_route_to_clock_root( ConnectionRouter& router, ClusterNetId net_id, int sink_node, const t_conn_cost_params cost_params, int high_fanout_threshold, t_rt_node* rt_root, SpatialRouteTreeLookup& spatial_rt_lookup, RouterStats& router_stats) { auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& cluster_ctx = g_vpr_ctx.clustering(); auto& m_route_ctx = g_vpr_ctx.mutable_routing(); bool high_fanout = is_high_fanout(cluster_ctx.clb_nlist.net_sinks(net_id).size(), high_fanout_threshold); VTR_LOGV_DEBUG(f_router_debug, "Net %zu pre-route to (%s)\n", size_t(net_id), describe_rr_node(sink_node).c_str()); profiling::sink_criticality_start(); VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); t_bb bounding_box = route_ctx.route_bb[net_id]; router.clear_modified_rr_node_info(); bool found_path; t_heap cheapest; std::tie(found_path, cheapest) = router.timing_driven_route_connection_from_route_tree( rt_root, sink_node, cost_params, bounding_box, router_stats); // TODO: Parts of the rest of this function are repetitive to code in timing_driven_route_sink. Should refactor. if (!found_path) { ClusterBlockId src_block = cluster_ctx.clb_nlist.net_driver_block(net_id); VTR_LOG("Failed to route connection from '%s' to '%s' for net '%s' (#%zu)\n", cluster_ctx.clb_nlist.block_name(src_block).c_str(), describe_rr_node(sink_node).c_str(), cluster_ctx.clb_nlist.net_name(net_id).c_str(), size_t(net_id)); if (f_router_debug) { update_screen(ScreenUpdatePriority::MAJOR, "Unable to route connection.", ROUTING, nullptr); } return false; } profiling::sink_criticality_end(cost_params.criticality); /* NB: In the code below I keep two records of the partial routing: the * * traceback and the route_tree. The route_tree enables fast recomputation * * of the Elmore delay to each node in the partial routing. The traceback * * lets me reuse all the routines written for breadth-first routing, which * * all take a traceback structure as input. */ /* This is a special pre-route to a sink that does not correspond to any * * netlist pin, but which can be reached from the global clock root drive * * points. Therefore, we can set the net pin index of the sink node to * * OPEN (meaning illegal) as it is not meaningful for this sink. */ t_trace* new_route_start_tptr = update_traceback(&cheapest, OPEN, net_id); VTR_ASSERT_DEBUG(validate_traceback(route_ctx.trace[net_id].head)); update_route_tree(&cheapest, OPEN, ((high_fanout) ? &spatial_rt_lookup : nullptr)); VTR_ASSERT_DEBUG(verify_route_tree(rt_root)); VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); VTR_ASSERT_DEBUG(!high_fanout || validate_route_tree_spatial_lookup(rt_root, spatial_rt_lookup)); if (f_router_debug) { std::string msg = vtr::string_fmt("Routed Net %zu connection to RR node %d successfully", size_t(net_id), sink_node); update_screen(ScreenUpdatePriority::MAJOR, msg.c_str(), ROUTING, nullptr); } pathfinder_update_path_occupancy(new_route_start_tptr, 1); // need to guarantee ALL nodes' path costs are HUGE_POSITIVE_FLOAT at the start of routing to a sink // do this by resetting all the path_costs that have been touched while routing to the current sink router.reset_path_costs(); // Post route trace back and route tree clean up: // - remove sink from trace back and route tree // - fix routing for all nodes leading to the sink // - free up virtual sink occupancy disable_expansion_and_remove_sink_from_route_tree_nodes(rt_root); VTR_LOGV_DEBUG(f_router_debug, "Traceback tail before update %d \n", route_ctx.trace[net_id].tail->index); drop_traceback_tail(net_id); VTR_LOGV_DEBUG(f_router_debug, "Updated traceback ptrs: %d %d \n", route_ctx.trace[net_id].head->index, route_ctx.trace[net_id].tail->index); m_route_ctx.rr_node_route_inf[sink_node].set_occ(0); // routed to a sink successfully return true; } template<typename ConnectionRouter> static bool timing_driven_route_sink( ConnectionRouter& router, ClusterNetId net_id, unsigned itarget, int target_pin, const t_conn_cost_params cost_params, const t_router_opts& router_opts, t_rt_node* rt_root, t_rt_node** rt_node_of_sink, SpatialRouteTreeLookup& spatial_rt_lookup, RouterStats& router_stats, route_budgets& budgeting_inf, const RoutingPredictor& routing_predictor) { /* Build a path from the existing route tree rooted at rt_root to the target_node * add this branch to the existing route tree and update pathfinder costs and rr_node_route_inf to reflect this */ auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& cluster_ctx = g_vpr_ctx.clustering(); profiling::sink_criticality_start(); int sink_node = route_ctx.net_rr_terminals[net_id][target_pin]; VTR_LOGV_DEBUG(f_router_debug, "Net %zu Target %d (%s)\n", size_t(net_id), itarget, describe_rr_node(sink_node).c_str()); VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); router.clear_modified_rr_node_info(); bool found_path; t_heap cheapest; t_bb bounding_box = route_ctx.route_bb[net_id]; bool net_is_global = cluster_ctx.clb_nlist.net_is_global(net_id); bool high_fanout = is_high_fanout(cluster_ctx.clb_nlist.net_sinks(net_id).size(), router_opts.high_fanout_threshold); constexpr float HIGH_FANOUT_CRITICALITY_THRESHOLD = 0.9; bool sink_critical = (cost_params.criticality > HIGH_FANOUT_CRITICALITY_THRESHOLD); bool net_is_clock = route_ctx.is_clock_net[net_id] != 0; //We normally route high fanout nets by only adding spatially close-by routing to the heap (reduces run-time). //However, if the current sink is 'critical' from a timing perspective, we put the entire route tree back onto //the heap to ensure it has more flexibility to find the best path. if (high_fanout && !sink_critical && !net_is_global && !net_is_clock && -routing_predictor.get_slope() > router_opts.high_fanout_max_slope) { std::tie(found_path, cheapest) = router.timing_driven_route_connection_from_route_tree_high_fanout(rt_root, sink_node, cost_params, bounding_box, spatial_rt_lookup, router_stats); } else { std::tie(found_path, cheapest) = router.timing_driven_route_connection_from_route_tree(rt_root, sink_node, cost_params, bounding_box, router_stats); } if (!found_path) { ClusterBlockId src_block = cluster_ctx.clb_nlist.net_driver_block(net_id); ClusterBlockId sink_block = cluster_ctx.clb_nlist.pin_block(*(cluster_ctx.clb_nlist.net_pins(net_id).begin() + target_pin)); VTR_LOG("Failed to route connection from '%s' to '%s' for net '%s' (#%zu)\n", cluster_ctx.clb_nlist.block_name(src_block).c_str(), cluster_ctx.clb_nlist.block_name(sink_block).c_str(), cluster_ctx.clb_nlist.net_name(net_id).c_str(), size_t(net_id)); if (f_router_debug) { update_screen(ScreenUpdatePriority::MAJOR, "Unable to route connection.", ROUTING, nullptr); } return false; } profiling::sink_criticality_end(cost_params.criticality); /* NB: In the code below I keep two records of the partial routing: the * * traceback and the route_tree. The route_tree enables fast recomputation * * of the Elmore delay to each node in the partial routing. The traceback * * lets me reuse all the routines written for breadth-first routing, which * * all take a traceback structure as input. */ int inode = cheapest.index; route_ctx.rr_node_route_inf[inode].target_flag--; /* Connected to this SINK. */ t_trace* new_route_start_tptr = update_traceback(&cheapest, target_pin, net_id); VTR_ASSERT_DEBUG(validate_traceback(route_ctx.trace[net_id].head)); rt_node_of_sink[target_pin] = update_route_tree(&cheapest, target_pin, ((high_fanout) ? &spatial_rt_lookup : nullptr)); VTR_ASSERT_DEBUG(verify_route_tree(rt_root)); VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); VTR_ASSERT_DEBUG(!high_fanout || validate_route_tree_spatial_lookup(rt_root, spatial_rt_lookup)); if (f_router_debug) { std::string msg = vtr::string_fmt("Routed Net %zu connection %d to RR node %d successfully", size_t(net_id), itarget, sink_node); update_screen(ScreenUpdatePriority::MAJOR, msg.c_str(), ROUTING, nullptr); } if (budgeting_inf.if_set() && cheapest.path_data != nullptr && cost_params.delay_budget) { if (cheapest.path_data->backward_delay < cost_params.delay_budget->min_delay) { budgeting_inf.set_should_reroute(net_id, true); } } pathfinder_update_path_occupancy(new_route_start_tptr, 1); // need to guarantee ALL nodes' path costs are HUGE_POSITIVE_FLOAT at the start of routing to a sink // do this by resetting all the path_costs that have been touched while routing to the current sink router.reset_path_costs(); // routed to a sink successfully return true; } static t_rt_node* setup_routing_resources(int itry, ClusterNetId net_id, unsigned num_sinks, int min_incremental_reroute_fanout, CBRR& connections_inf, t_rt_node** rt_node_of_sink, const t_router_opts& router_opts, bool ripup_high_fanout_nets) { /* Build and return a partial route tree from the legal connections from last iteration. * along the way do: * update pathfinder costs to be accurate to the partial route tree * update the net's traceback to be accurate to the partial route tree * find and store the pins that still need to be reached in incremental_rerouting_resources.remaining_targets * find and store the rt nodes that have been reached in incremental_rerouting_resources.reached_rt_sinks * mark the rr_node sinks as targets to be reached */ auto& route_ctx = g_vpr_ctx.routing(); t_rt_node* rt_root; // for nets below a certain size (min_incremental_reroute_fanout), rip up any old routing // otherwise, we incrementally reroute by reusing legal parts of the previous iteration // convert the previous iteration's traceback into the starting route tree for this iteration if ((int)num_sinks < min_incremental_reroute_fanout || itry == 1 || ripup_high_fanout_nets) { profiling::net_rerouted(); // rip up the whole net pathfinder_update_path_occupancy(route_ctx.trace[net_id].head, -1); free_traceback(net_id); rt_root = init_route_tree_to_source(net_id); for (unsigned int sink_pin = 1; sink_pin <= num_sinks; ++sink_pin) connections_inf.toreach_rr_sink(sink_pin); // since all connections will be rerouted for this net, clear all of net's forced reroute flags connections_inf.clear_force_reroute_for_net(); // when we don't prune the tree, we also don't know the sink node indices // thus we'll use functions that act on pin indices like mark_ends instead // of their versions that act on node indices directly like mark_remaining_ends mark_ends(net_id); } else { auto& reached_rt_sinks = connections_inf.get_reached_rt_sinks(); auto& remaining_targets = connections_inf.get_remaining_targets(); profiling::net_rebuild_start(); // convert the previous iteration's traceback into a route tree rt_root = traceback_to_route_tree(net_id); //Sanity check that route tree and traceback are equivalent before pruning VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); // check for edge correctness VTR_ASSERT_SAFE(is_valid_skeleton_tree(rt_root)); // Skip this check if RCV is enabled, as RCV can use another method to cause reroutes VTR_ASSERT_SAFE(should_route_net(net_id, connections_inf, true) || router_opts.routing_budgets_algorithm == YOYO); //Prune the branches of the tree that don't legally lead to sinks rt_root = prune_route_tree(rt_root, connections_inf); //Now that the tree has been pruned, we can free the old traceback // NOTE: this must happen *after* pruning since it changes the // recorded congestion pathfinder_update_path_occupancy(route_ctx.trace[net_id].head, -1); free_traceback(net_id); if (rt_root) { //Partially pruned profiling::route_tree_preserved(); //Since we have a valid partial routing (to at least one SINK) //we need to make sure the traceback is synchronized to the route tree traceback_from_route_tree(net_id, rt_root, reached_rt_sinks.size()); //Sanity check the traceback for self-consistency VTR_ASSERT_DEBUG(validate_traceback(route_ctx.trace[net_id].head)); //Sanity check that route tree and traceback are equivalent after pruning VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); // put the updated occupancies of the route tree nodes back into pathfinder pathfinder_update_path_occupancy(route_ctx.trace[net_id].head, 1); } else { //Fully destroyed profiling::route_tree_pruned(); //Initialize only to source rt_root = init_route_tree_to_source(net_id); //NOTE: We leave the traceback uninitialized, so update_traceback() // will correctly add the SOURCE node when the branch to // the first SINK is found. VTR_ASSERT(route_ctx.trace[net_id].head == nullptr); VTR_ASSERT(route_ctx.trace[net_id].tail == nullptr); VTR_ASSERT(route_ctx.trace_nodes[net_id].empty()); } //Update R/C load_new_subtree_R_upstream(rt_root); load_new_subtree_C_downstream(rt_root); VTR_ASSERT(reached_rt_sinks.size() + remaining_targets.size() == num_sinks); //Record current routing add_route_tree_to_rr_node_lookup(rt_root); // give lookup on the reached sinks for (t_rt_node* sink_node : reached_rt_sinks) { rt_node_of_sink[sink_node->net_pin_index] = sink_node; } profiling::net_rebuild_end(num_sinks, remaining_targets.size()); // check for R_upstream C_downstream and edge correctness VTR_ASSERT_SAFE(is_valid_route_tree(rt_root)); // congestion should've been pruned away VTR_ASSERT_SAFE(is_uncongested_route_tree(rt_root)); // mark remaining ends mark_remaining_ends(net_id, remaining_targets); // still need to calculate the tree's time delay (0 Tarrival means from SOURCE) load_route_tree_Tdel(rt_root, 0); // mark the lookup (rr_node_route_inf) for existing tree elements as NO_PREVIOUS so add_to_path stops when it reaches one of them load_route_tree_rr_route_inf(rt_root); } // completed constructing the partial route tree and updated all other data structures to match return rt_root; } void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* rt_node) { /* Remove sink in route tree and mark all nodes * leading to the sink as unexpandable. */ auto& device_ctx = g_vpr_ctx.device(); const auto& rr_graph = device_ctx.rr_graph; t_rt_node* child_node; t_linked_rt_edge* linked_rt_edge; linked_rt_edge = rt_node->u.child_list; while (linked_rt_edge != nullptr) { child_node = linked_rt_edge->child; if (rr_graph.node_type(RRNodeId(child_node->inode)) == SINK) { VTR_LOGV_DEBUG(f_router_debug, "Removing sink %d from route tree\n", child_node->inode); rt_node->u.child_list = nullptr; rt_node->u.next = nullptr; free(child_node); break; } else { rt_node->re_expand = false; VTR_LOGV_DEBUG(f_router_debug, "unexpanding: %d in route tree\n", rt_node->inode); } disable_expansion_and_remove_sink_from_route_tree_nodes(child_node); linked_rt_edge = linked_rt_edge->next; } } void update_rr_base_costs(int fanout) { /* Changes the base costs of different types of rr_nodes according to the * * criticality, fanout, etc. of the current net being routed (net_id). */ auto& device_ctx = g_vpr_ctx.mutable_device(); float factor; size_t index; /* Other reasonable values for factor include fanout and 1 */ factor = sqrt(fanout); for (index = CHANX_COST_INDEX_START; index < device_ctx.rr_indexed_data.size(); index++) { if (device_ctx.rr_indexed_data[RRIndexedDataId(index)].T_quadratic > 0.) { /* pass transistor */ device_ctx.rr_indexed_data[RRIndexedDataId(index)].base_cost = device_ctx.rr_indexed_data[RRIndexedDataId(index)].saved_base_cost * factor; } else { device_ctx.rr_indexed_data[RRIndexedDataId(index)].base_cost = device_ctx.rr_indexed_data[RRIndexedDataId(index)].saved_base_cost; } } } static bool timing_driven_check_net_delays(ClbNetPinsMatrix<float>& net_delay) { constexpr float ERROR_TOL = 0.0001; /* Checks that the net delays computed incrementally during timing driven * * routing match those computed from scratch by the net_delay.c module. */ auto& cluster_ctx = g_vpr_ctx.clustering(); unsigned int ipin; ClbNetPinsMatrix<float> net_delay_check = make_net_pins_matrix<float>(cluster_ctx.clb_nlist); load_net_delay_from_routing(net_delay_check); for (auto net_id : cluster_ctx.clb_nlist.nets()) { for (ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ipin++) { if (net_delay_check[net_id][ipin] == 0.) { /* Should be only GLOBAL nets */ if (fabs(net_delay[net_id][ipin]) > ERROR_TOL) { VPR_ERROR(VPR_ERROR_ROUTE, "in timing_driven_check_net_delays: net %lu pin %d.\n" "\tIncremental calc. net_delay is %g, but from scratch net delay is %g.\n", size_t(net_id), ipin, net_delay[net_id][ipin], net_delay_check[net_id][ipin]); } } else { float error = fabs(1.0 - net_delay[net_id][ipin] / net_delay_check[net_id][ipin]); if (error > ERROR_TOL) { VPR_ERROR(VPR_ERROR_ROUTE, "in timing_driven_check_net_delays: net %d pin %lu.\n" "\tIncremental calc. net_delay is %g, but from scratch net delay is %g.\n", size_t(net_id), ipin, net_delay[net_id][ipin], net_delay_check[net_id][ipin]); } } } } return true; } /* Detect if net should be routed or not */ static bool should_route_net(ClusterNetId net_id, CBRR& connections_inf, bool if_force_reroute) { auto& route_ctx = g_vpr_ctx.routing(); auto& device_ctx = g_vpr_ctx.device(); const auto& rr_graph = device_ctx.rr_graph; t_trace* tptr = route_ctx.trace[net_id].head; if (tptr == nullptr) { /* No routing yet. */ return true; } for (;;) { int inode = tptr->index; int occ = route_ctx.rr_node_route_inf[inode].occ(); int capacity = rr_graph.node_capacity(RRNodeId(inode)); if (occ > capacity) { return true; /* overuse detected */ } if (tptr->iswitch == OPEN) { //End of a branch // even if net is fully routed, not complete if parts of it should get ripped up (EXPERIMENTAL) if (if_force_reroute) { if (connections_inf.should_force_reroute_connection(inode)) { return true; } } tptr = tptr->next; /* Skip next segment (duplicate of original branch node). */ if (tptr == nullptr) break; } tptr = tptr->next; } /* End while loop -- did an entire traceback. */ VTR_ASSERT(connections_inf.get_remaining_targets().empty()); return false; /* Current route has no overuse */ } static bool early_exit_heuristic(const t_router_opts& router_opts, const WirelengthInfo& wirelength_info) { /* Early exit code for cases where it is obvious that a successful route will not be found * Heuristic: If total wirelength used in first routing iteration is X% of total available wirelength, exit */ if (wirelength_info.used_wirelength_ratio() > router_opts.init_wirelength_abort_threshold) { VTR_LOG("Wire length usage ratio %g exceeds limit of %g, fail routing.\n", wirelength_info.used_wirelength_ratio(), router_opts.init_wirelength_abort_threshold); return true; } return false; } static bool check_hold(const t_router_opts& router_opts, float worst_neg_slack) { /* When RCV is enabled, it's necessary to be able to completely ripup high fanout nets if there is still negative hold slack * Normally the router will prune the illegal branches of high fanout nets, this will bypass this */ if (router_opts.routing_budgets_algorithm != YOYO) { return false; } else if (worst_neg_slack != 0) { return true; } return false; } static size_t calculate_wirelength_available() { auto& device_ctx = g_vpr_ctx.device(); const auto& rr_graph = device_ctx.rr_graph; size_t available_wirelength = 0; // But really what's happening is that this for loop iterates over every node and determines the available wirelength for (const RRNodeId& rr_id : device_ctx.rr_graph.nodes()) { const t_rr_type channel_type = rr_graph.node_type(rr_id); if (channel_type == CHANX || channel_type == CHANY) { available_wirelength += rr_graph.node_capacity(rr_id) * rr_graph.node_length(rr_id); } } return available_wirelength; } static WirelengthInfo calculate_wirelength_info(size_t available_wirelength) { auto& cluster_ctx = g_vpr_ctx.clustering(); size_t used_wirelength = 0; VTR_ASSERT(available_wirelength > 0); for (auto net_id : cluster_ctx.clb_nlist.nets()) { if (!cluster_ctx.clb_nlist.net_is_ignored(net_id) && cluster_ctx.clb_nlist.net_sinks(net_id).size() != 0) { /* Globals don't count. */ int bends, wirelength, segments; get_num_bends_and_length(net_id, &bends, &wirelength, &segments); used_wirelength += wirelength; } } return WirelengthInfo(available_wirelength, used_wirelength); } static void print_route_status_header() { VTR_LOG("---- ------ ------- ---- ------- ------- ------- ----------------- --------------- -------- ---------- ---------- ---------- ---------- --------\n"); VTR_LOG("Iter Time pres BBs Heap Re-Rtd Re-Rtd Overused RR Nodes Wirelength CPD sTNS sWNS hTNS hWNS Est Succ\n"); VTR_LOG(" (sec) fac Updt push Nets Conns (ns) (ns) (ns) (ns) (ns) Iter\n"); VTR_LOG("---- ------ ------- ---- ------- ------- ------- ----------------- --------------- -------- ---------- ---------- ---------- ---------- --------\n"); } static void print_route_status(int itry, double elapsed_sec, float pres_fac, int num_bb_updated, const RouterStats& router_stats, const OveruseInfo& overuse_info, const WirelengthInfo& wirelength_info, std::shared_ptr<const SetupHoldTimingInfo> timing_info, float est_success_iteration) { //Iteration VTR_LOG("%4d", itry); //Elapsed Time VTR_LOG(" %6.1f", elapsed_sec); //pres_fac constexpr int PRES_FAC_DIGITS = 7; constexpr int PRES_FAC_SCI_PRECISION = 1; pretty_print_float(" ", pres_fac, PRES_FAC_DIGITS, PRES_FAC_SCI_PRECISION); //VTR_LOG(" %5.1f", pres_fac); //Number of bounding boxes updated VTR_LOG(" %4d", num_bb_updated); //Heap push/pop constexpr int HEAP_OP_DIGITS = 7; constexpr int HEAP_OP_SCI_PRECISION = 2; pretty_print_uint(" ", router_stats.heap_pushes, HEAP_OP_DIGITS, HEAP_OP_SCI_PRECISION); VTR_ASSERT(router_stats.heap_pops <= router_stats.heap_pushes); //Rerouted nets constexpr int NET_ROUTED_DIGITS = 7; constexpr int NET_ROUTED_SCI_PRECISION = 2; pretty_print_uint(" ", router_stats.nets_routed, NET_ROUTED_DIGITS, NET_ROUTED_SCI_PRECISION); //Rerouted connections constexpr int CONN_ROUTED_DIGITS = 7; constexpr int CONN_ROUTED_SCI_PRECISION = 2; pretty_print_uint(" ", router_stats.connections_routed, CONN_ROUTED_DIGITS, CONN_ROUTED_SCI_PRECISION); //Overused RR nodes constexpr int OVERUSE_DIGITS = 7; constexpr int OVERUSE_SCI_PRECISION = 2; pretty_print_uint(" ", overuse_info.overused_nodes, OVERUSE_DIGITS, OVERUSE_SCI_PRECISION); VTR_LOG(" (%6.3f%%)", overuse_info.overused_node_ratio() * 100); //Wirelength constexpr int WL_DIGITS = 7; constexpr int WL_SCI_PRECISION = 2; pretty_print_uint(" ", wirelength_info.used_wirelength(), WL_DIGITS, WL_SCI_PRECISION); VTR_LOG(" (%4.1f%%)", wirelength_info.used_wirelength_ratio() * 100); //CPD if (timing_info) { float cpd = timing_info->least_slack_critical_path().delay(); VTR_LOG(" %#8.3f", 1e9 * cpd); } else { VTR_LOG(" %8s", "N/A"); } //sTNS if (timing_info) { float sTNS = timing_info->setup_total_negative_slack(); VTR_LOG(" % #10.4g", 1e9 * sTNS); } else { VTR_LOG(" %10s", "N/A"); } //sWNS if (timing_info) { float sWNS = timing_info->setup_worst_negative_slack(); VTR_LOG(" % #10.3f", 1e9 * sWNS); } else { VTR_LOG(" %10s", "N/A"); } //hTNS if (timing_info) { float hTNS = timing_info->hold_total_negative_slack(); VTR_LOG(" % #10.4g", 1e9 * hTNS); } else { VTR_LOG(" %10s", "N/A"); } //hWNS if (timing_info) { float hWNS = timing_info->hold_worst_negative_slack(); VTR_LOG(" % #10.3f", 1e9 * hWNS); } else { VTR_LOG(" %10s", "N/A"); } //Estimated success iteration if (std::isnan(est_success_iteration)) { VTR_LOG(" %8s", "N/A"); } else { VTR_LOG(" %8.0f", est_success_iteration); } VTR_LOG("\n"); fflush(stdout); } static void print_overused_nodes_status(const t_router_opts& router_opts, const OveruseInfo& overuse_info) { //Print the index of this routing failure VTR_LOG("\nFailed routing attempt #%d\n", num_routing_failed); size_t num_overused = overuse_info.overused_nodes; size_t max_logged_overused_rr_nodes = router_opts.max_logged_overused_rr_nodes; //Overused nodes info logging upper limit VTR_LOG("Total number of overused nodes: %d\n", num_overused); if (num_overused > max_logged_overused_rr_nodes) { VTR_LOG("Total number of overused nodes is larger than the logging limit (%d).\n", max_logged_overused_rr_nodes); VTR_LOG("Displaying the first %d entries.\n", max_logged_overused_rr_nodes); } log_overused_nodes_status(max_logged_overused_rr_nodes); VTR_LOG("\n"); } static void print_router_criticality_histogram(const SetupTimingInfo& timing_info, const ClusteredPinAtomPinsLookup& netlist_pin_lookup) { print_histogram(create_criticality_histogram(timing_info, netlist_pin_lookup, 10)); } //Returns true if the specified net fanout is classified as high fanout static bool is_high_fanout(int fanout, int fanout_threshold) { if (fanout_threshold < 0 || fanout < fanout_threshold) return false; return true; } //In heavily congested designs a static bounding box (BB) can //become problematic for routability (it effectively enforces a //hard blockage restricting where a net can route). // //For instance, the router will try to route non-critical connections //away from congested regions, but may end up hitting the edge of the //bounding box. Limiting how far out-of-the-way it can be routed, and //preventing congestion from resolving. // //To alleviate this, we dynamically expand net bounding boxes if the net's //*current* routing uses RR nodes 'close' to the edge of it's bounding box. // //The result is that connections trying to move out of the way and hitting //their BB will have their bounding boxes will expand slowly in that direction. //This helps spread out regions of heavy congestion (over several routing //iterations). // //By growing the BBs slowly and only as needed we minimize the size of the BBs. //This helps keep the router's graph search fast. // //Typically, only a small minority of nets (typically > 10%) have their BBs updated //each routing iteration. static size_t dynamic_update_bounding_boxes(const std::vector<ClusterNetId>& updated_nets, int high_fanout_threshold) { auto& device_ctx = g_vpr_ctx.device(); auto& cluster_ctx = g_vpr_ctx.clustering(); auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& clb_nlist = cluster_ctx.clb_nlist; auto& grid = device_ctx.grid; //Controls how close a net's routing needs to be to it's bounding box //before the bounding box is expanded. // //A value of zero indicates that the routing needs to be at the bounding box //edge constexpr int DYNAMIC_BB_DELTA_THRESHOLD = 0; //Walk through each net, calculating the bounding box of its current routing, //and then increase the router's bounding box if the two are close together int grid_xmax = grid.width() - 1; int grid_ymax = grid.height() - 1; size_t num_bb_updated = 0; for (ClusterNetId net : updated_nets) { t_trace* routing_head = route_ctx.trace[net].head; if (routing_head == nullptr) continue; //Skip if no routing //We do not adjust the bounding boxes of high fanout nets, since they //use different bounding boxes based on the target location. // //This ensures that the delta values calculated below are always non-negative if (is_high_fanout(clb_nlist.net_sinks(net).size(), high_fanout_threshold)) continue; t_bb curr_bb = calc_current_bb(routing_head); t_bb& router_bb = route_ctx.route_bb[net]; //Calculate the distances between the net's used RR nodes and //the router's bounding box int delta_xmin = curr_bb.xmin - router_bb.xmin; int delta_xmax = router_bb.xmax - curr_bb.xmax; int delta_ymin = curr_bb.ymin - router_bb.ymin; int delta_ymax = router_bb.ymax - curr_bb.ymax; //Note that if the net uses non-configurable switches it's routing //may end-up outside the bounding boxes, so the delta values may be //negative. The code below will expand the bounding box in those //cases. //Expand each dimension by one if within DYNAMIC_BB_DELTA_THRESHOLD threshold bool updated_bb = false; if (delta_xmin <= DYNAMIC_BB_DELTA_THRESHOLD && router_bb.xmin > 0) { --router_bb.xmin; updated_bb = true; } if (delta_ymin <= DYNAMIC_BB_DELTA_THRESHOLD && router_bb.ymin > 0) { --router_bb.ymin; updated_bb = true; } if (delta_xmax <= DYNAMIC_BB_DELTA_THRESHOLD && router_bb.xmax < grid_xmax) { ++router_bb.xmax; updated_bb = true; } if (delta_ymax <= DYNAMIC_BB_DELTA_THRESHOLD && router_bb.ymax < grid_ymax) { ++router_bb.ymax; updated_bb = true; } if (updated_bb) { ++num_bb_updated; //VTR_LOG("Expanded net %6zu router BB to (%d,%d)x(%d,%d) based on net RR node BB (%d,%d)x(%d,%d)\n", size_t(net), //router_bb.xmin, router_bb.ymin, router_bb.xmax, router_bb.ymax, //curr_bb.xmin, curr_bb.ymin, curr_bb.xmax, curr_bb.ymax); } } return num_bb_updated; } //Returns the bounding box of a net's used routing resources static t_bb calc_current_bb(const t_trace* head) { auto& device_ctx = g_vpr_ctx.device(); const auto& rr_graph = device_ctx.rr_graph; auto& grid = device_ctx.grid; t_bb bb; bb.xmin = grid.width() - 1; bb.ymin = grid.height() - 1; bb.xmax = 0; bb.ymax = 0; for (const t_trace* elem = head; elem != nullptr; elem = elem->next) { const t_rr_node& node = device_ctx.rr_nodes[elem->index]; //The router interprets RR nodes which cross the boundary as being //'within' of the BB. Only those which are *strictly* out side the //box are excluded, hence we use the nodes xhigh/yhigh for xmin/xmax, //and xlow/ylow for xmax/ymax calculations bb.xmin = std::min<int>(bb.xmin, rr_graph.node_xhigh(node.id())); bb.ymin = std::min<int>(bb.ymin, rr_graph.node_yhigh(node.id())); bb.xmax = std::max<int>(bb.xmax, rr_graph.node_xlow(node.id())); bb.ymax = std::max<int>(bb.ymax, rr_graph.node_ylow(node.id())); } VTR_ASSERT(bb.xmin <= bb.xmax); VTR_ASSERT(bb.ymin <= bb.ymax); return bb; } void enable_router_debug( const t_router_opts& router_opts, ClusterNetId net, int sink_rr, int router_iteration, ConnectionRouterInterface* router) { bool active_net_debug = (router_opts.router_debug_net >= -1); bool active_sink_debug = (router_opts.router_debug_sink_rr >= 0); bool active_iteration_debug = (router_opts.router_debug_iteration >= 0); bool match_net = (ClusterNetId(router_opts.router_debug_net) == net || router_opts.router_debug_net == -1); bool match_sink = (router_opts.router_debug_sink_rr == sink_rr || router_opts.router_debug_sink_rr < 0); bool match_iteration = (router_opts.router_debug_iteration == router_iteration || router_opts.router_debug_iteration < 0); f_router_debug = active_net_debug || active_sink_debug || active_iteration_debug; router->set_router_debug(f_router_debug); if (active_net_debug) f_router_debug &= match_net; if (active_sink_debug) f_router_debug &= match_sink; if (active_iteration_debug) f_router_debug &= match_iteration; #ifndef VTR_ENABLE_DEBUG_LOGGING VTR_LOGV_WARN(f_router_debug, "Limited router debug output provided since compiled without VTR_ENABLE_DEBUG_LOGGING defined\n"); #endif } bool is_iteration_complete(bool routing_is_feasible, const t_router_opts& router_opts, int itry, std::shared_ptr<const SetupHoldTimingInfo> timing_info, bool rcv_finished) { //This function checks if a routing iteration has completed. //When VPR is run normally, we check if routing_budgets_algorithm is disabled, and if the routing is legal //With the introduction of yoyo budgeting algorithm, we must check if there are no hold violations //in addition to routing being legal and the correct budgeting algorithm being set. if (routing_is_feasible) { if (router_opts.routing_budgets_algorithm != YOYO) { return true; } else if (router_opts.routing_budgets_algorithm == YOYO && (timing_info->hold_worst_negative_slack() == 0 || rcv_finished) && itry != 1) { return true; } } return false; } bool should_setup_lower_bound_connection_delays(int itry, const t_router_opts& /*router_opts*/) { /* Checks to see if router should (re)calculate route budgets * It's currently set to only calculate after the first routing iteration */ if (itry == 1) return true; return false; } static bool is_better_quality_routing(const vtr::vector<ClusterNetId, t_traceback>& best_routing, const RoutingMetrics& best_routing_metrics, const WirelengthInfo& wirelength_info, std::shared_ptr<const SetupHoldTimingInfo> timing_info) { if (best_routing.empty()) { return true; //First legal routing } //Rank first based on sWNS, followed by other timing metrics if (timing_info) { if (timing_info->setup_worst_negative_slack() > best_routing_metrics.sWNS) { return true; } else if (timing_info->setup_worst_negative_slack() < best_routing_metrics.sWNS) { return false; } if (timing_info->setup_total_negative_slack() > best_routing_metrics.sTNS) { return true; } else if (timing_info->setup_total_negative_slack() < best_routing_metrics.sTNS) { return false; } if (timing_info->hold_worst_negative_slack() > best_routing_metrics.hWNS) { return true; } else if (timing_info->hold_worst_negative_slack() > best_routing_metrics.hWNS) { return false; } if (timing_info->hold_total_negative_slack() > best_routing_metrics.hTNS) { return true; } else if (timing_info->hold_total_negative_slack() > best_routing_metrics.hTNS) { return false; } } //Finally, wirelength tie breaker return wirelength_info.used_wirelength() < best_routing_metrics.used_wirelength; } static bool early_reconvergence_exit_heuristic(const t_router_opts& router_opts, int itry_since_last_convergence, std::shared_ptr<const SetupHoldTimingInfo> timing_info, const RoutingMetrics& best_routing_metrics) { //Give-up on reconvergent routing if the CPD improvement after the //first iteration since convergence is small, compared to the best //CPD seen so far if (itry_since_last_convergence == 1) { float cpd_ratio = timing_info->setup_worst_negative_slack() / best_routing_metrics.sWNS; //Give up if we see less than a 1% CPD improvement, //after reducing pres_fac. Typically larger initial //improvements are needed to see an actual improvement //in final legal routing quality. if (cpd_ratio >= router_opts.reconvergence_cpd_threshold) { VTR_LOG("Giving up routing since additional routing convergences seem unlikely to improve quality (CPD ratio: %g)\n", cpd_ratio); return true; //Potential CPD improvement is small, don't spend run-time trying to improve it } } return false; //Don't give up } static void generate_route_timing_reports(const t_router_opts& router_opts, const t_analysis_opts& analysis_opts, const SetupTimingInfo& timing_info, const RoutingDelayCalculator& delay_calc) { auto& timing_ctx = g_vpr_ctx.timing(); auto& atom_ctx = g_vpr_ctx.atom(); VprTimingGraphResolver resolver(atom_ctx.nlist, atom_ctx.lookup, *timing_ctx.graph, delay_calc); resolver.set_detail_level(analysis_opts.timing_report_detail); tatum::TimingReporter timing_reporter(resolver, *timing_ctx.graph, *timing_ctx.constraints); timing_reporter.report_timing_setup(router_opts.first_iteration_timing_report_file, *timing_info.setup_analyzer(), analysis_opts.timing_report_npaths); } // If a route is ripped up during routing, non-configurable sets are left // behind. As a result, the final routing may have stubs at // non-configurable sets. This function tracks non-configurable set usage, // and if the sets are unused, prunes them. static void prune_unused_non_configurable_nets(CBRR& connections_inf) { auto& device_ctx = g_vpr_ctx.device(); auto& cluster_ctx = g_vpr_ctx.clustering(); auto& route_ctx = g_vpr_ctx.routing(); std::vector<int> non_config_node_set_usage(device_ctx.rr_non_config_node_sets.size(), 0); for (auto net_id : cluster_ctx.clb_nlist.nets()) { connections_inf.prepare_routing_for_net(net_id); connections_inf.clear_force_reroute_for_net(); std::fill(non_config_node_set_usage.begin(), non_config_node_set_usage.end(), 0); t_rt_node* rt_root = traceback_to_route_tree(net_id, &non_config_node_set_usage); if (rt_root == nullptr) { continue; } //Sanity check that route tree and traceback are equivalent before pruning VTR_ASSERT(verify_traceback_route_tree_equivalent( route_ctx.trace[net_id].head, rt_root)); // check for edge correctness VTR_ASSERT_SAFE(is_valid_skeleton_tree(rt_root)); //Prune the branches of the tree that don't legally lead to sinks rt_root = prune_route_tree(rt_root, connections_inf, &non_config_node_set_usage); // Free old traceback. free_traceback(net_id); // Update traceback with pruned tree. auto& reached_rt_sinks = connections_inf.get_reached_rt_sinks(); traceback_from_route_tree(net_id, rt_root, reached_rt_sinks.size()); VTR_ASSERT(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root)); free_route_tree(rt_root); } } //Initializes net_delay based on best-case delay estimates from the router lookahead static void init_net_delay_from_lookahead(const RouterLookahead& router_lookahead, ClbNetPinsMatrix<float>& net_delay) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& route_ctx = g_vpr_ctx.routing(); t_conn_cost_params cost_params; cost_params.criticality = 1.; //Ensures lookahead returns delay value for (auto net_id : cluster_ctx.clb_nlist.nets()) { if (cluster_ctx.clb_nlist.net_is_ignored(net_id)) continue; int source_rr = route_ctx.net_rr_terminals[net_id][0]; for (size_t ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ++ipin) { int sink_rr = route_ctx.net_rr_terminals[net_id][ipin]; float est_delay = router_lookahead.get_expected_cost(RRNodeId(source_rr), RRNodeId(sink_rr), cost_params, /*R_upstream=*/0.); VTR_ASSERT(std::isfinite(est_delay) && est_delay < std::numeric_limits<float>::max()); net_delay[net_id][ipin] = est_delay; } } } #ifndef NO_GRAPHICS //updates router iteration information and checks for router iteration and net id breakpoints //stops after the specified router iteration or net id is encountered void update_router_info_and_check_bp(bp_router_type type, int net_id) { t_draw_state* draw_state = get_draw_state_vars(); if (draw_state->list_of_breakpoints.size() != 0) { if (type == BP_ROUTE_ITER) get_bp_state_globals()->get_glob_breakpoint_state()->router_iter++; else if (type == BP_NET_ID) get_bp_state_globals()->get_glob_breakpoint_state()->route_net_id = net_id; f_router_debug = check_for_breakpoints(false); if (f_router_debug) { breakpoint_info_window(get_bp_state_globals()->get_glob_breakpoint_state()->bp_description, *get_bp_state_globals()->get_glob_breakpoint_state(), false); update_screen(ScreenUpdatePriority::MAJOR, "Breakpoint Encountered", ROUTING, nullptr); } } } #endif
45.129536
288
0.638462
[ "vector" ]
1121d216f280ab2492a72dc275b2822b356359b8
8,816
cpp
C++
openstudiocore/src/model/test/CoilHeatingDesuperheater_GTest.cpp
zhouchong90/OpenStudio
f8570cb8297547b5e9cc80fde539240d8f7b9c24
[ "BSL-1.0", "blessing" ]
null
null
null
openstudiocore/src/model/test/CoilHeatingDesuperheater_GTest.cpp
zhouchong90/OpenStudio
f8570cb8297547b5e9cc80fde539240d8f7b9c24
[ "BSL-1.0", "blessing" ]
null
null
null
openstudiocore/src/model/test/CoilHeatingDesuperheater_GTest.cpp
zhouchong90/OpenStudio
f8570cb8297547b5e9cc80fde539240d8f7b9c24
[ "BSL-1.0", "blessing" ]
null
null
null
/********************************************************************** * Copyright (c) 2008-2014, Alliance for Sustainable Energy. * All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********************************************************************/ #include <gtest/gtest.h> #include "ModelFixture.hpp" #include "../CoilHeatingDesuperheater.hpp" #include "../CoilHeatingDesuperheater_Impl.hpp" #include "../RefrigerationCondenserAirCooled.hpp" #include "../RefrigerationCondenserAirCooled_Impl.hpp" #include "../ScheduleCompact.hpp" #include "../ScheduleCompact_Impl.hpp" #include "../AirLoopHVAC.hpp" #include "../PlantLoop.hpp" #include "../Node.hpp" #include "../Node_Impl.hpp" #include "../AirLoopHVACZoneSplitter.hpp" #include "../AirLoopHVACOutdoorAirSystem.hpp" #include "../ControllerOutdoorAir.hpp" using namespace openstudio; using namespace openstudio::model; //Test construction of Coil:Heating:Desuperheater TEST_F(ModelFixture, CoilHeatingDesuperheater_DefaultConstructor) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ASSERT_EXIT ( { Model model; CoilHeatingDesuperheater testObject = CoilHeatingDesuperheater(model); exit(0); } , ::testing::ExitedWithCode(0), ""); } //Test removal of Coil:Heating:Desuperheater TEST_F(ModelFixture, CoilHeatingDesuperheater_Remove) { Model model; CoilHeatingDesuperheater testObject = CoilHeatingDesuperheater(model); RefrigerationCondenserAirCooled condenser = RefrigerationCondenserAirCooled(model); testObject.setHeatingSource(condenser); std::vector<CoilHeatingDesuperheater> coilHeatingDesuperheaters = model.getModelObjects<CoilHeatingDesuperheater>(); EXPECT_EQ(1, coilHeatingDesuperheaters.size()); std::vector<RefrigerationCondenserAirCooled> refrigerationCondensers = model.getModelObjects<RefrigerationCondenserAirCooled>(); EXPECT_EQ(1, refrigerationCondensers.size()); testObject.remove(); coilHeatingDesuperheaters = model.getModelObjects<CoilHeatingDesuperheater>(); EXPECT_EQ(0, coilHeatingDesuperheaters.size()); refrigerationCondensers = model.getModelObjects<RefrigerationCondenserAirCooled>(); EXPECT_EQ(1, refrigerationCondensers.size()); } //Test the methods that set and get the fields TEST_F(ModelFixture, CoilHeatingDesuperheater_set_get) { Model model; CoilHeatingDesuperheater desuperheater(model); desuperheater.setHeatReclaimRecoveryEfficiency(999.0); desuperheater.setParasiticElectricLoad(999.0); EXPECT_DOUBLE_EQ(desuperheater.heatReclaimRecoveryEfficiency(),999.0); EXPECT_DOUBLE_EQ(desuperheater.parasiticElectricLoad(),999.0); } TEST_F(ModelFixture,CoilHeatingDesuperheater_addToNode) { Model m; CoilHeatingDesuperheater testObject(m); AirLoopHVAC airLoop(m); ControllerOutdoorAir controllerOutdoorAir(m); AirLoopHVACOutdoorAirSystem outdoorAirSystem(m,controllerOutdoorAir); Node supplyOutletNode = airLoop.supplyOutletNode(); outdoorAirSystem.addToNode(supplyOutletNode); EXPECT_TRUE(testObject.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)5, airLoop.supplyComponents().size() ); Node inletNode = airLoop.zoneSplitter().lastOutletModelObject()->cast<Node>(); EXPECT_FALSE(testObject.addToNode(inletNode)); EXPECT_EQ((unsigned)5, airLoop.demandComponents().size()); PlantLoop plantLoop(m); supplyOutletNode = plantLoop.supplyOutletNode(); EXPECT_FALSE(testObject.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)5, plantLoop.supplyComponents().size() ); Node demandOutletNode = plantLoop.demandOutletNode(); EXPECT_FALSE(testObject.addToNode(demandOutletNode)); EXPECT_EQ( (unsigned)5, plantLoop.demandComponents().size() ); CoilHeatingDesuperheater testObject2(m); if( boost::optional<Node> OANode = outdoorAirSystem.outboardOANode() ) { EXPECT_FALSE(testObject2.addToNode(*OANode)); EXPECT_EQ( (unsigned)5, airLoop.supplyComponents().size() ); EXPECT_EQ( (unsigned)1, outdoorAirSystem.oaComponents().size() ); } CoilHeatingDesuperheater testObject3(m); if( boost::optional<Node> reliefNode = outdoorAirSystem.outboardReliefNode() ) { EXPECT_FALSE(testObject3.addToNode(*reliefNode)); EXPECT_EQ( (unsigned)5, airLoop.supplyComponents().size() ); EXPECT_EQ( (unsigned)1, outdoorAirSystem.reliefComponents().size() ); } CoilHeatingDesuperheater testObjectClone = testObject.clone(m).cast<CoilHeatingDesuperheater>(); supplyOutletNode = airLoop.supplyOutletNode(); EXPECT_TRUE(testObjectClone.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)7, airLoop.supplyComponents().size() ); } TEST_F(ModelFixture, CoilHeatingDesuperheater_AddBranchForHVACComponent_AirLoop) { Model model; CoilHeatingDesuperheater desuperheater(model); AirLoopHVAC airLoop = AirLoopHVAC(model); EXPECT_FALSE(airLoop.addBranchForHVACComponent(desuperheater)); } TEST_F(ModelFixture, CoilHeatingDesuperheater_AddDemandBranchForComponent_PlantLoop) { Model model; CoilHeatingDesuperheater desuperheater(model); PlantLoop plantLoop = PlantLoop(model); EXPECT_FALSE(plantLoop.addDemandBranchForComponent(desuperheater)); } TEST_F(ModelFixture, CoilHeatingDesuperheater_AddSupplyBranchForComponent_PlantLoop) { Model model; CoilHeatingDesuperheater desuperheater(model); PlantLoop plantLoop = PlantLoop(model); EXPECT_FALSE(plantLoop.addSupplyBranchForComponent(desuperheater)); } //Test clone model with default data TEST_F(ModelFixture, CoilHeatingDesuperheater_CloneModelWithDefaultData) { Model model; CoilHeatingDesuperheater testObject = CoilHeatingDesuperheater(model); CoilHeatingDesuperheater testObjectClone = testObject.clone(model).cast<CoilHeatingDesuperheater>(); EXPECT_DOUBLE_EQ(testObjectClone.heatReclaimRecoveryEfficiency(),0.8); EXPECT_DOUBLE_EQ(testObjectClone.parasiticElectricLoad(),0.0); } //Test clone model with custom data TEST_F(ModelFixture, CoilHeatingDesuperheater_CloneModelWithCustomData) { Model model; CoilHeatingDesuperheater testObject = CoilHeatingDesuperheater(model); RefrigerationCondenserAirCooled condenser = RefrigerationCondenserAirCooled(model); ScheduleCompact alwaysOn = ScheduleCompact(model); testObject.setHeatReclaimRecoveryEfficiency(0.5); testObject.setParasiticElectricLoad(1.0); testObject.setHeatingSource(condenser); testObject.setAvailabilitySchedule(alwaysOn); CoilHeatingDesuperheater testObjectClone = testObject.clone(model).cast<CoilHeatingDesuperheater>(); EXPECT_DOUBLE_EQ(testObjectClone.heatReclaimRecoveryEfficiency(),0.5); EXPECT_DOUBLE_EQ(testObjectClone.parasiticElectricLoad(),1.0); EXPECT_TRUE(testObjectClone.availabilitySchedule()); EXPECT_EQ(alwaysOn, testObjectClone.availabilitySchedule().get()); EXPECT_FALSE(testObjectClone.heatingSource()); } //Test clone two model with custom data TEST_F(ModelFixture, CoilHeatingDesuperheater_CloneTwoModelWithCustomData) { Model model; CoilHeatingDesuperheater testObject = CoilHeatingDesuperheater(model); RefrigerationCondenserAirCooled condenser = RefrigerationCondenserAirCooled(model); ScheduleCompact alwaysOn = ScheduleCompact(model); testObject.setHeatReclaimRecoveryEfficiency(0.5); testObject.setParasiticElectricLoad(1.0); testObject.setHeatingSource(condenser); testObject.setAvailabilitySchedule(alwaysOn); CoilHeatingDesuperheater testObjectClone = testObject.clone(model).cast<CoilHeatingDesuperheater>(); Model model2; CoilHeatingDesuperheater testObjectClone2 = testObject.clone(model2).cast<CoilHeatingDesuperheater>(); EXPECT_DOUBLE_EQ(testObjectClone2.heatReclaimRecoveryEfficiency(),0.5); EXPECT_DOUBLE_EQ(testObjectClone2.parasiticElectricLoad(),1.0); EXPECT_TRUE(testObjectClone2.availabilitySchedule()); EXPECT_NE(alwaysOn, testObjectClone2.availabilitySchedule().get()); EXPECT_FALSE(testObjectClone2.heatingSource()); EXPECT_NE(testObjectClone2, testObjectClone); EXPECT_NE(testObjectClone, testObject); }
37.675214
132
0.773593
[ "vector", "model" ]
1122c58f53cb85180b01945bf4b67ca354a0fc8b
691
cpp
C++
src/UI/Label.cpp
Nickswoboda/Aegis
60e0e770892bdcb378686508ca455f6b5d522e33
[ "MIT" ]
null
null
null
src/UI/Label.cpp
Nickswoboda/Aegis
60e0e770892bdcb378686508ca455f6b5d522e33
[ "MIT" ]
null
null
null
src/UI/Label.cpp
Nickswoboda/Aegis
60e0e770892bdcb378686508ca455f6b5d522e33
[ "MIT" ]
null
null
null
#include "Label.h" #include "../Renderer/Renderer.h" namespace Aegis{ Label::Label(const std::string& text, Vec2 pos, Vec4 color) :color_(color), text_(text) { auto size = font_->GetStringPixelSize(text); rect_ = {pos.x, pos.y, size.x, size.y}; }; void Label::Render() const { if(visible_){ Renderer2D::SetFont(font_); DrawText(text_, rect_.pos, color_); } } void Label::SetFont(std::shared_ptr<Aegis::Font> font) { font_ = font; SetSize(font_->GetStringPixelSize(text_)); } void Label::SetText(const std::string& text) { text_ = text; SetSize(font_->GetStringPixelSize(text)); } const std::string& Label::GetText() const { return text_; } }
17.275
60
0.66136
[ "render" ]
1127304038f1a7ebbd6471912458f53c574d914e
3,262
cc
C++
kmsp11/operation/ecdsa.cc
bdhess/kms-integrations
ed71842310687b174b79cda58fcbec09b32873c2
[ "Apache-2.0" ]
null
null
null
kmsp11/operation/ecdsa.cc
bdhess/kms-integrations
ed71842310687b174b79cda58fcbec09b32873c2
[ "Apache-2.0" ]
null
null
null
kmsp11/operation/ecdsa.cc
bdhess/kms-integrations
ed71842310687b174b79cda58fcbec09b32873c2
[ "Apache-2.0" ]
1
2022-02-07T11:29:09.000Z
2022-02-07T11:29:09.000Z
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "kmsp11/operation/ecdsa.h" #include <string_view> #include "kmsp11/operation/crypter_interfaces.h" #include "kmsp11/operation/preconditions.h" #include "kmsp11/util/crypto_utils.h" #include "kmsp11/util/errors.h" #include "kmsp11/util/status_macros.h" namespace kmsp11 { absl::StatusOr<std::unique_ptr<SignerInterface>> EcdsaSigner::New( std::shared_ptr<Object> key, const CK_MECHANISM* mechanism) { RETURN_IF_ERROR( CheckKeyPreconditions(CKK_EC, CKO_PRIVATE_KEY, CKM_ECDSA, key.get())); RETURN_IF_ERROR(EnsureNoParameters(mechanism)); ASSIGN_OR_RETURN(std::string_view key_der, key->attributes().Value(CKA_PUBLIC_KEY_INFO)); ASSIGN_OR_RETURN(bssl::UniquePtr<EVP_PKEY> parsed_key, ParseX509PublicKeyDer(key_der)); return std::unique_ptr<SignerInterface>(new EcdsaSigner( key, bssl::UniquePtr<EC_KEY>(EVP_PKEY_get1_EC_KEY(parsed_key.get())))); } size_t EcdsaSigner::signature_length() { return EcdsaSigLengthP1363(EC_KEY_get0_group(key_.get())); } absl::Status EcdsaSigner::CopySignature(std::string_view src, absl::Span<uint8_t> dest) { ASSIGN_OR_RETURN(std::vector<uint8_t> p1363_sig, EcdsaSigAsn1ToP1363(src, EC_KEY_get0_group(key_.get()))); if (p1363_sig.size() != signature_length()) { return NewInternalError( absl::StrFormat("unexpected signature length (got %d, want %d)", p1363_sig.size(), signature_length()), SOURCE_LOCATION); } std::copy(p1363_sig.begin(), p1363_sig.end(), dest.data()); return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<VerifierInterface>> EcdsaVerifier::New( std::shared_ptr<Object> key, const CK_MECHANISM* mechanism) { RETURN_IF_ERROR( CheckKeyPreconditions(CKK_EC, CKO_PUBLIC_KEY, CKM_ECDSA, key.get())); RETURN_IF_ERROR(EnsureNoParameters(mechanism)); ASSIGN_OR_RETURN(std::string_view key_der, key->attributes().Value(CKA_PUBLIC_KEY_INFO)); ASSIGN_OR_RETURN(bssl::UniquePtr<EVP_PKEY> parsed_key, ParseX509PublicKeyDer(key_der)); return std::unique_ptr<VerifierInterface>(new EcdsaVerifier( key, bssl::UniquePtr<EC_KEY>(EVP_PKEY_get1_EC_KEY(parsed_key.get())))); } absl::Status EcdsaVerifier::Verify(KmsClient* client, absl::Span<const uint8_t> digest, absl::Span<const uint8_t> signature) { ASSIGN_OR_RETURN(const EVP_MD* md, DigestForMechanism(*object_->algorithm().digest_mechanism)); return EcdsaVerifyP1363(key_.get(), md, digest, signature); } } // namespace kmsp11
38.833333
79
0.70049
[ "object", "vector" ]
112bf8b994b4f88f2da0aadbfeb663da92ab1b2e
2,278
cpp
C++
CodeForces/codeforces766E.cpp
bilibiliShen/CodeBank
49a69b2b2c3603bf105140a9d924946ed3193457
[ "MIT" ]
1
2017-08-19T16:02:15.000Z
2017-08-19T16:02:15.000Z
CodeForces/codeforces766E.cpp
bilibiliShen/CodeBank
49a69b2b2c3603bf105140a9d924946ed3193457
[ "MIT" ]
null
null
null
CodeForces/codeforces766E.cpp
bilibiliShen/CodeBank
49a69b2b2c3603bf105140a9d924946ed3193457
[ "MIT" ]
1
2018-01-05T23:37:23.000Z
2018-01-05T23:37:23.000Z
// <!--encoding UTF-8 UTF-8编码--!> /***************************************************************************** * ----Stay Hungry Stay Foolish---- * * @author : Shen * * @name : Codeforces Round #396 (Div. 2) E * *****************************************************************************/ #include <bits/stdc++.h> using namespace std; typedef long long int64; template<class T>inline bool updateMin(T& a, T b){ return a > b ? a = b, 1: 0; } template<class T>inline bool updateMax(T& a, T b){ return a < b ? a = b, 1: 0; } inline int nextInt() { int x; scanf("%d", &x); return x; } inline int64 nextI64() { int64 d; cin >> d; return d; } inline char nextChr() { scanf(" "); return getchar(); } inline string nextStr() { string s; cin >> s; return s; } inline double nextDbf() { double x; scanf("%lf", &x); return x; } inline int64 nextlld() { int64 d; scanf("%lld", &d); return d; } inline int64 next64d() { int64 d; scanf("%I64d",&d); return d; } /*//Computational Geometry #include <complex> #define x real() #define y imag() typedef complex<double> point; */ const int MAXN = 100005; vector<int> a, edge[MAXN]; using point = pair<int, int>; inline point operator+(const point& a, const point& b) { return { a.first + b.first, a.second + b.second }; } point dfs(int now, int prev, int hoge, int64& cnt) { point ret = { 0, 0 }; int flag = (a[now] >> hoge) & 1; for (int next : edge[now]) { if (next == prev) continue; auto tmp = dfs(next, now, hoge, cnt); if (flag) { cnt += ret.first * tmp.first; cnt += ret.second * tmp.second; } else { cnt += ret.first * tmp.second; cnt += ret.second * tmp.first; } ret = ret + tmp; } ret.first += 1; if (flag) swap(ret.first, ret.second); cnt += ret.second; return ret; } int main() { int n = nextInt(); for (int i = 0; i < n; i++) a.push_back(nextInt()); for (int i = 1; i < n; i++) { int u = nextInt() - 1; int v = nextInt() - 1; edge[u].push_back(v); edge[v].push_back(u); } int64 ans = 0, cnt = 0; for (int i = 0; i < 20; i++) { cnt = 0; dfs(0, 0, i, cnt); ans += (1 << i) * cnt; } cout << ans << endl; return 0; }
26.8
80
0.506146
[ "geometry", "vector" ]
112e54f8c36fdef59ae9a7e8ab8ba54c104832a3
4,476
cpp
C++
PauloDerAlchemist_Original/Paulo_der_Alchemist/src/machine.cpp
Assertores/PauloDerAlchemist
a3a6e92a9a747ac19adead9bf2acd5999630d9ba
[ "MIT" ]
1
2020-03-20T15:59:47.000Z
2020-03-20T15:59:47.000Z
PauloDerAlchemist_Original/Paulo_der_Alchemist/src/machine.cpp
Assertores/PauloDerAlchemist
a3a6e92a9a747ac19adead9bf2acd5999630d9ba
[ "MIT" ]
null
null
null
PauloDerAlchemist_Original/Paulo_der_Alchemist/src/machine.cpp
Assertores/PauloDerAlchemist
a3a6e92a9a747ac19adead9bf2acd5999630d9ba
[ "MIT" ]
null
null
null
#include "machine.h" Machine::Machine(InfMach Var){ OutPrimeIndex = (int)Var.PrimeUsage; OutSecondaryIndex = (int)Var.SecondaryUsage; InPrimeIndex = (int)Var.Prime.second; InSecondaryIndex = (int)Var.Secondary.second; ComIndex.first = Var.Prime.first; ComIndex.second = Var.Secondary.first; Ratio = 50; LastProcess.Prime.first = Var.Prime.first; LastProcess.Prime.second = 0; LastProcess.Secondary.first = Var.Secondary.first; LastProcess.Secondary.second = 0; LastProcess.PrimeUsage = 0; LastProcess.SecondaryUsage = 0; lastEfficiency = 0; on = true; Container t1; Container t2; t1.Amount = t2.Amount = 50; t1.Content = Var.Prime.first; t2.Content = Var.Secondary.first; process(t1, t2); } InfMach Machine::process(Container Prime, Container Secondary){ ComIndex.first = Prime.Content; ComIndex.second = Secondary.Content; Combination *Com = &ComAtlas[ComIndex]; InfMach ret; if(Prime.Amount == 0 || Secondary.Amount == 0 || Ratio <= 0 || Ratio >= 100 || !on){ ret.PrimeUsage = 0; ret.SecondaryUsage = 0; ret.Prime.second = 0; ret.Secondary.second = 0; LastProcess = ret; return ret; } float Efficiency; float Out; if((float)Com->XMax < ((float)Ratio/100)*16){ Efficiency = -0.5*(Com->YMax+1)*cos((((Ratio/100)*16 - 16) * M_PI)/(Com->XMax - 16))+0.5*(Com->YMax+1); }else{ Efficiency = -0.5*(Com->YMax+1)*cos(((Ratio/100)*16 * M_PI)/Com->XMax)+0.5*(Com->YMax+1); } lastEfficiency = Efficiency; Out = (exp2(-(((Ratio/100)*16 - Com->XCurve) * ((Ratio/100)*16 - Com->XCurve))/(10*Com->DevCurve))*Com->AmpCurve+Com->YCurve)/2 + 8; ret.Prime.first = Com->OutPrime; ret.Secondary.first = Com->OutSecondary; ret.PrimeUsage = Efficiency/16 * MaxOutput * Ratio/100; ret.SecondaryUsage = (Efficiency/16 * MaxOutput) - ret.PrimeUsage; float usage = (float)Prime.Amount/ret.PrimeUsage; if ((float)Secondary.Amount/ret.SecondaryUsage < usage){ usage = (float)Secondary.Amount/ret.SecondaryUsage; } if (usage > 1){ usage = 1; } #ifndef NDEBUG std::cout << Prime.Amount << "/" << ret.PrimeUsage << " (" << (float)Prime.Amount/ret.PrimeUsage << ") >? " << Secondary.Amount << "/" << ret.SecondaryUsage << " (" << (float)Secondary.Amount/ret.SecondaryUsage << ")" << std::endl; std::cout << "usage = " << usage << std::endl; #endif ret.PrimeUsage *= usage; ret.SecondaryUsage *= usage; ret.Prime.second = (Out * Efficiency)/256 * MaxOutput * usage; ret.Secondary.second =((16-Out) * Efficiency)/256 * MaxOutput * usage; LastProcess = ret; return ret; } void Machine::ChangeRatio(int New){ if (New < 0){ Ratio = 0; return; } if (New > 100){ Ratio = 100; return; } Ratio = New; } void Machine::Render(unsigned int ContainerSize){ Combination *Com = &ComAtlas[ComIndex]; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << UR << LOR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LOR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LU; std::cout << " State: "; if(on) std::cout << "on"; else std::cout << "off"; std::cout << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << OU << std::left << std::setw(11) << GetName(ComIndex.first) << std::setw(11) << GetName(ComIndex.second) << OU << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << OU << std::left << std::setw(11) << LastProcess.PrimeUsage << std::setw(11) << LastProcess.SecondaryUsage << OU << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } char buf[10]; std::sprintf(buf, "%.2f%%", Ratio); std::cout << OU << std::left << std::setw(11) << buf << std::setw(11) << lastEfficiency << OU << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << OU << std::left << std::setw(11) << LastProcess.Prime.second << std::setw(11) << LastProcess.Secondary.second << OU << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << OU << std::left << std::setw(11) << GetName(Com->OutPrime) << std::setw(11) << GetName(Com->OutSecondary) << OU << std::endl; for (unsigned int i = 0; i < ContainerSize; i++){ std::cout << OU << " "; } std::cout << OR << LUR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LUR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LR << LO << std::endl; }
33.155556
232
0.612824
[ "render" ]
113325a7b42810b3c01c7aa3e7eccedfa148fbc3
17,420
cpp
C++
Source/MaestroForce.cpp
tsw25/MAESTROeX
5c87ed46e64478c0cd8df9e82e0c06c10fdfffed
[ "BSD-3-Clause" ]
null
null
null
Source/MaestroForce.cpp
tsw25/MAESTROeX
5c87ed46e64478c0cd8df9e82e0c06c10fdfffed
[ "BSD-3-Clause" ]
null
null
null
Source/MaestroForce.cpp
tsw25/MAESTROeX
5c87ed46e64478c0cd8df9e82e0c06c10fdfffed
[ "BSD-3-Clause" ]
null
null
null
#include <Maestro.H> using namespace amrex; void Maestro::MakeVelForce (Vector<MultiFab>& vel_force, const Vector<std::array< MultiFab, AMREX_SPACEDIM > >& uedge, const Vector<MultiFab>& rho, const RealVector& rho0, const RealVector& grav_cell, const Vector<MultiFab>& w0_force_cart, int do_add_utilde_force) { // timer for profiling BL_PROFILE_VAR("Maestro::MakeVelForce()",MakeVelForce); Vector<MultiFab> gradw0_cart(finest_level+1); Vector<MultiFab> grav_cart(finest_level+1); Vector<MultiFab> rho0_cart(finest_level+1); for (int lev=0; lev<=finest_level; ++lev) { gradw0_cart[lev].define(grids[lev], dmap[lev], 1, 1); gradw0_cart[lev].setVal(0.); grav_cart[lev].define(grids[lev], dmap[lev], AMREX_SPACEDIM, 1); grav_cart[lev].setVal(0.); rho0_cart[lev].define(grids[lev], dmap[lev], 1, 1); rho0_cart[lev].setVal(0.); } RealVector gradw0( (max_radial_level+1)*nr_fine ); gradw0.shrink_to_fit(); if (use_exact_base_state || average_base_state) { std::fill(gradw0.begin(), gradw0.end(), 0.); } else { compute_grad_phi_rad(w0.dataPtr(), gradw0.dataPtr()); } Put1dArrayOnCart(gradw0,gradw0_cart,0,0,bcs_u,0,1); Put1dArrayOnCart(rho0,rho0_cart,0,0,bcs_s,Rho); Put1dArrayOnCart(grav_cell,grav_cart,0,1,bcs_f,0); for (int lev=0; lev<=finest_level; ++lev) { // get references to the MultiFabs at level lev MultiFab& vel_force_mf = vel_force[lev]; const MultiFab& gpi_mf = gpi[lev]; const MultiFab& rho_mf = rho[lev]; const MultiFab& uedge_mf = uedge[lev][0]; const MultiFab& vedge_mf = uedge[lev][1]; #if (AMREX_SPACEDIM == 3) const MultiFab& wedge_mf = uedge[lev][2]; #endif const MultiFab& w0_mf = w0_cart[lev]; const MultiFab& gradw0_mf = gradw0_cart[lev]; const MultiFab& normal_mf = normal[lev]; const MultiFab& w0force_mf = w0_force_cart[lev]; const MultiFab& grav_mf = grav_cart[lev]; const MultiFab& rho0_mf = rho0_cart[lev]; // Loop over boxes (make sure mfi takes a cell-centered multifab as an argument) #ifdef _OPENMP #pragma omp parallel #endif for ( MFIter mfi(vel_force_mf, true); mfi.isValid(); ++mfi ) { // Get the index space of the valid region const Box& tileBox = mfi.tilebox(); const Box& domainBox = geom[lev].Domain(); const Real* dx = geom[lev].CellSize(); // call fortran subroutine // use macros in AMReX_ArrayLim.H to pass in each FAB's data, // lo/hi coordinates (including ghost cells), and/or the # of components // We will also pass "validBox", which specifies the "valid" region. if (spherical == 0) { #pragma gpu box(tileBox) make_vel_force(AMREX_INT_ANYD(tileBox.loVect()), AMREX_INT_ANYD(tileBox.hiVect()), BL_TO_FORTRAN_ANYD(vel_force_mf[mfi]), BL_TO_FORTRAN_ANYD(gpi_mf[mfi]), BL_TO_FORTRAN_N_ANYD(rho_mf[mfi],Rho), BL_TO_FORTRAN_ANYD(uedge_mf[mfi]), BL_TO_FORTRAN_ANYD(vedge_mf[mfi]), #if (AMREX_SPACEDIM == 3) BL_TO_FORTRAN_ANYD(wedge_mf[mfi]), #endif BL_TO_FORTRAN_ANYD(w0_mf[mfi]), w0_mf.nComp(), BL_TO_FORTRAN_ANYD(w0force_mf[mfi]), BL_TO_FORTRAN_ANYD(rho0_mf[mfi]), BL_TO_FORTRAN_ANYD(grav_mf[mfi]), AMREX_REAL_ANYD(dx), AMREX_INT_ANYD(domainBox.hiVect()), do_add_utilde_force); } else { #if (AMREX_SPACEDIM == 3) #pragma gpu box(tileBox) make_vel_force_sphr(AMREX_INT_ANYD(tileBox.loVect()), AMREX_INT_ANYD(tileBox.hiVect()), BL_TO_FORTRAN_ANYD(vel_force_mf[mfi]), BL_TO_FORTRAN_ANYD(gpi_mf[mfi]), BL_TO_FORTRAN_N_ANYD(rho_mf[mfi],Rho), BL_TO_FORTRAN_ANYD(uedge_mf[mfi]), BL_TO_FORTRAN_ANYD(vedge_mf[mfi]), BL_TO_FORTRAN_ANYD(wedge_mf[mfi]), BL_TO_FORTRAN_ANYD(normal_mf[mfi]), BL_TO_FORTRAN_ANYD(w0_mf[mfi]), BL_TO_FORTRAN_ANYD(gradw0_mf[mfi]), BL_TO_FORTRAN_ANYD(w0force_mf[mfi]), BL_TO_FORTRAN_ANYD(rho0_mf[mfi]), BL_TO_FORTRAN_ANYD(grav_mf[mfi]), AMREX_REAL_ANYD(dx), AMREX_INT_ANYD(domainBox.hiVect()), do_add_utilde_force); #endif } } } // average fine data onto coarser cells AverageDown(vel_force,0,AMREX_SPACEDIM); // note - we need to reconsider the bcs type here // it matches fortran MAESTRO but is that correct? FillPatch(t_old, vel_force, vel_force, vel_force, 0, 0, AMREX_SPACEDIM, 0, bcs_u, 1); } void Maestro::ModifyScalForce(Vector<MultiFab>& scal_force, const Vector<MultiFab>& state, const Vector<std::array< MultiFab, AMREX_SPACEDIM > >& umac, const RealVector& s0, const RealVector& s0_edge, const Vector<MultiFab>& s0_cart, int comp, const Vector<BCRec>& bcs, int fullform) { // timer for profiling BL_PROFILE_VAR("Maestro::ModifyScalForce()",ModifyScalForce); #ifdef AMREX_USE_CUDA auto not_launched = Gpu::notInLaunchRegion(); // turn on GPU if (not_launched) Gpu::setLaunchRegion(true); #endif Vector<MultiFab> s0_edge_cart(finest_level+1); for (int lev=0; lev<=finest_level; ++lev) { s0_edge_cart[lev].define(grids[lev], dmap[lev], 1, 1); } if (spherical == 0) { Put1dArrayOnCart(s0_edge,s0_edge_cart,0,0,bcs_f,0); } RealVector divu; Vector<MultiFab> divu_cart(finest_level+1); if (spherical == 1) { divu.resize(nr_fine); std::fill(divu.begin(), divu.end(), 0.); if (!use_exact_base_state) { for (int r=0; r<nr_fine-1; ++r) { Real dr = r_edge_loc[r+1] - r_edge_loc[r]; divu[r] = (r_edge_loc[r+1]*r_edge_loc[r+1] * w0[r+1] - r_edge_loc[r]*r_edge_loc[r] * w0[r]) / (dr * r_cc_loc[r]*r_cc_loc[r]); } } for (int lev=0; lev<=finest_level; ++lev) { divu_cart[lev].define(grids[lev], dmap[lev], 1, 0); divu_cart[lev].setVal(0.); } Put1dArrayOnCart(divu,divu_cart,0,0,bcs_u,0); } for (int lev=0; lev<=finest_level; ++lev) { // Get the index space and grid spacing of the domain const Box& domainBox = geom[lev].Domain(); const Real* dx = geom[lev].CellSize(); // get references to the MultiFabs at level lev MultiFab& scal_force_mf = scal_force[lev]; const MultiFab& state_mf = state[lev]; const MultiFab& umac_mf = umac[lev][0]; const MultiFab& vmac_mf = umac[lev][1]; const MultiFab& s0_mf = s0_cart[lev]; const MultiFab& s0_edge_mf = s0_edge_cart[lev]; const MultiFab& w0_mf = w0_cart[lev]; #if (AMREX_SPACEDIM == 3) const MultiFab& wmac_mf = umac[lev][2]; const MultiFab& divu_mf = divu_cart[lev]; #endif // loop over boxes (make sure mfi takes a cell-centered multifab as an argument) #ifdef _OPENMP #pragma omp parallel #endif for ( MFIter mfi(scal_force_mf, true); mfi.isValid(); ++mfi ) { // Get the index space of the valid region const Box& tileBox = mfi.tilebox(); // call fortran subroutine // use macros in AMReX_ArrayLim.H to pass in each FAB's data, // lo/hi coordinates (including ghost cells), and/or the # of components // We will also pass "validBox", which specifies the "valid" region. if (spherical == 1) { #if (AMREX_SPACEDIM == 3) #pragma gpu box(tileBox) modify_scal_force_sphr(AMREX_INT_ANYD(tileBox.loVect()), AMREX_INT_ANYD(tileBox.hiVect()), AMREX_INT_ANYD(domainBox.loVect()), AMREX_INT_ANYD(domainBox.hiVect()), scal_force_mf[mfi].dataPtr(comp), AMREX_INT_ANYD(scal_force_mf[mfi].loVect()), AMREX_INT_ANYD(scal_force_mf[mfi].hiVect()), state_mf[mfi].dataPtr(comp), AMREX_INT_ANYD(state_mf[mfi].loVect()), AMREX_INT_ANYD(state_mf[mfi].hiVect()), BL_TO_FORTRAN_ANYD(umac_mf[mfi]), BL_TO_FORTRAN_ANYD(vmac_mf[mfi]), BL_TO_FORTRAN_ANYD(wmac_mf[mfi]), BL_TO_FORTRAN_ANYD(s0_mf[mfi]), AMREX_REAL_ANYD(dx), fullform, BL_TO_FORTRAN_ANYD(divu_mf[mfi])); #else Abort("ModifyScalForce: Spherical is not valid for DIM < 3"); #endif } else { #pragma gpu box(tileBox) modify_scal_force(AMREX_INT_ANYD(tileBox.loVect()), AMREX_INT_ANYD(tileBox.hiVect()), lev, scal_force_mf[mfi].dataPtr(comp), AMREX_INT_ANYD(scal_force_mf[mfi].loVect()), AMREX_INT_ANYD(scal_force_mf[mfi].hiVect()), state_mf[mfi].dataPtr(comp), AMREX_INT_ANYD(state_mf[mfi].loVect()), AMREX_INT_ANYD(state_mf[mfi].hiVect()), BL_TO_FORTRAN_ANYD(umac_mf[mfi]), BL_TO_FORTRAN_ANYD(vmac_mf[mfi]), #if (AMREX_SPACEDIM == 3) BL_TO_FORTRAN_ANYD(wmac_mf[mfi]), #endif BL_TO_FORTRAN_ANYD(s0_mf[mfi]), BL_TO_FORTRAN_ANYD(s0_edge_mf[mfi]), BL_TO_FORTRAN_ANYD(w0_mf[mfi]), AMREX_REAL_ANYD(dx), fullform); } } } // average fine data onto coarser cells AverageDown(scal_force,comp,1); // fill ghost cells FillPatch(t_old, scal_force, scal_force, scal_force, comp, comp, 1, 0, bcs_f); #ifdef AMREX_USE_CUDA // turn off GPU if (not_launched) Gpu::setLaunchRegion(false); #endif } void Maestro::MakeRhoHForce(Vector<MultiFab>& scal_force, int is_prediction, const Vector<MultiFab>& thermal, const Vector<std::array< MultiFab, AMREX_SPACEDIM > >& umac, int add_thermal, const int &which_step) { // timer for profiling BL_PROFILE_VAR("Maestro::MakeRhoHForce()",MakeRhoHForce); #ifdef AMREX_USE_CUDA auto not_launched = Gpu::notInLaunchRegion(); // turn on GPU if (not_launched) Gpu::setLaunchRegion(true); #endif // if we are doing the prediction, then it only makes sense to be in // this routine if the quantity we are predicting is rhoh', h, or rhoh if (is_prediction == 1 && !(enthalpy_pred_type == predict_rhohprime || enthalpy_pred_type == predict_h || enthalpy_pred_type == predict_rhoh) ) { Abort("ERROR: should only call mkrhohforce when predicting rhoh', h, or rhoh"); } RealVector rho0( (max_radial_level+1)*nr_fine ); RealVector p0( (max_radial_level+1)*nr_fine ); RealVector grav( (max_radial_level+1)*nr_fine ); rho0.shrink_to_fit(); p0.shrink_to_fit(); grav.shrink_to_fit(); if (which_step == 1) { rho0 = rho0_old; p0 = p0_old; } else { for(int i=0; i<rho0.size(); ++i) { rho0[i] = 0.5*(rho0_old[i]+rho0_new[i]); p0[i] = 0.5*( p0_old[i]+ p0_new[i]); } } Vector<MultiFab> p0_cart(finest_level+1); Vector<MultiFab> psi_cart(finest_level+1); Vector<MultiFab> grav_cart(finest_level+1); Vector<MultiFab> rho0_cart(finest_level+1); Vector< std::array< MultiFab, AMREX_SPACEDIM > > p0mac(finest_level+1); for (int lev=0; lev<=finest_level; ++lev) { p0_cart[lev].define(grids[lev], dmap[lev], 1, 1); psi_cart[lev].define(grids[lev], dmap[lev], 1, 1); grav_cart[lev].define(grids[lev], dmap[lev], 1, 1); rho0_cart[lev].define(grids[lev], dmap[lev], 1, 1); AMREX_D_TERM(p0mac[lev][0].define(convert(grids[lev],nodal_flag_x), dmap[lev], 1, 1); , p0mac[lev][1].define(convert(grids[lev],nodal_flag_y), dmap[lev], 1, 1); , p0mac[lev][2].define(convert(grids[lev],nodal_flag_z), dmap[lev], 1, 1); ); psi_cart[lev].setVal(0.); grav_cart[lev].setVal(0.); rho0_cart[lev].setVal(0.); p0_cart[lev].setVal(0.); } Put1dArrayOnCart(p0, p0_cart, 0, 0, bcs_f, 0); if (spherical == 1) { MakeS0mac(p0, p0mac); } Put1dArrayOnCart(psi,psi_cart,0,0,bcs_f,0); Put1dArrayOnCart(rho0,rho0_cart,0,0,bcs_s,Rho); make_grav_cell(grav.dataPtr(), rho0.dataPtr(), r_cc_loc.dataPtr(), r_edge_loc.dataPtr()); Put1dArrayOnCart(grav,grav_cart,0,0,bcs_f,0); for (int lev=0; lev<=finest_level; ++lev) { // get references to the MultiFabs at level lev MultiFab& scal_force_mf = scal_force[lev]; const MultiFab& umac_mf = umac[lev][0]; const MultiFab& vmac_mf = umac[lev][1]; #if (AMREX_SPACEDIM == 3) const MultiFab& wmac_mf = umac[lev][2]; #endif const MultiFab& p0cart_mf = p0_cart[lev]; const MultiFab& p0macx_mf = p0mac[lev][0]; const MultiFab& p0macy_mf = p0mac[lev][1]; #if (AMREX_SPACEDIM == 3) const MultiFab& p0macz_mf = p0mac[lev][2]; #endif const MultiFab& thermal_mf = thermal[lev]; const MultiFab& psi_mf = psi_cart[lev]; const MultiFab& grav_mf = grav_cart[lev]; const MultiFab& rho0_mf = rho0_cart[lev]; // loop over boxes (make sure mfi takes a cell-centered multifab as an argument) #ifdef _OPENMP #pragma omp parallel #endif for ( MFIter mfi(scal_force_mf, true); mfi.isValid(); ++mfi ) { // Get the index space of the valid region const Box& tileBox = mfi.tilebox(); const Box& domainBox = geom[lev].Domain(); const Real* dx = geom[lev].CellSize(); // call fortran subroutine // use macros in AMReX_ArrayLim.H to pass in each FAB's data, // lo/hi coordinates (including ghost cells), and/or the # of components // We will also pass "validBox", which specifies the "valid" region. // if use_exact_base_state or average_base_state, // psi is set to dpdt in advance subroutine #pragma gpu box(tileBox) mkrhohforce(AMREX_INT_ANYD(tileBox.loVect()), AMREX_INT_ANYD(tileBox.hiVect()), lev, scal_force_mf[mfi].dataPtr(RhoH), AMREX_INT_ANYD(scal_force_mf[mfi].loVect()), AMREX_INT_ANYD(scal_force_mf[mfi].hiVect()), BL_TO_FORTRAN_ANYD(umac_mf[mfi]), BL_TO_FORTRAN_ANYD(vmac_mf[mfi]), #if (AMREX_SPACEDIM == 3) BL_TO_FORTRAN_ANYD(wmac_mf[mfi]), #endif BL_TO_FORTRAN_ANYD(thermal_mf[mfi]), BL_TO_FORTRAN_ANYD(grav_mf[mfi]), BL_TO_FORTRAN_ANYD(rho0_mf[mfi]), BL_TO_FORTRAN_ANYD(p0cart_mf[mfi]), BL_TO_FORTRAN_ANYD(p0macx_mf[mfi]), BL_TO_FORTRAN_ANYD(p0macy_mf[mfi]), #if (AMREX_SPACEDIM == 3) BL_TO_FORTRAN_ANYD(p0macz_mf[mfi]), #endif BL_TO_FORTRAN_ANYD(psi_mf[mfi]), AMREX_REAL_ANYD(dx), AMREX_INT_ANYD(domainBox.hiVect()), is_prediction, add_thermal); } } // average down and fill ghost cells AverageDown(scal_force,RhoH,1); FillPatch(t_old,scal_force,scal_force,scal_force,RhoH,RhoH,1,0,bcs_f); #ifdef AMREX_USE_CUDA // turn off GPU if (not_launched) Gpu::setLaunchRegion(false); #endif }
40.988235
128
0.547245
[ "vector" ]
1135b62ea21843f5bbab110689d978f826c86d10
650
hpp
C++
src/Core/Containers.hpp
alourencodev/CoffeeShader
5e06ec40dfc627d577f7bf710271f6b9267c8395
[ "MIT" ]
null
null
null
src/Core/Containers.hpp
alourencodev/CoffeeShader
5e06ec40dfc627d577f7bf710271f6b9267c8395
[ "MIT" ]
null
null
null
src/Core/Containers.hpp
alourencodev/CoffeeShader
5e06ec40dfc627d577f7bf710271f6b9267c8395
[ "MIT" ]
null
null
null
#ifndef COFFEE_UTILS_CONTAINERS_HPP #define COFFEE_UTILS_CONTAINERS_HPP #include <vector> namespace coffee { template<typename T, size_t columns, size_t rows> class matrix { public: T *data() const { return &_data[0][0]; } std::array<T, columns> &operator[](std::size_t index) { return _data[index]; } T &at(std::size_t column, std::size_t row) { return _data[column][row]; } private: std::array<std::array<T, columns>, rows> _data; }; namespace utils { template<typename T> void unorderedRemove(std::vector<T> &v, const typename std::vector<T>::iterator &it) { *it = std::move(v.back()); v.pop_back(); } } } #endif
18.571429
84
0.676923
[ "vector" ]
113604841c696516ec6e5f4e717613d5470ffa62
2,424
hpp
C++
modules/common/include/v4r/common/impl/RandomNumbers.hpp
ToMadoRe/v4r
7cb817e05cb9d99cb2f68db009c27d7144d07f09
[ "MIT" ]
17
2015-11-16T14:21:10.000Z
2020-11-09T02:57:33.000Z
modules/common/include/v4r/common/impl/RandomNumbers.hpp
ToMadoRe/v4r
7cb817e05cb9d99cb2f68db009c27d7144d07f09
[ "MIT" ]
35
2015-07-27T15:04:43.000Z
2019-08-22T10:52:35.000Z
modules/common/include/v4r/common/impl/RandomNumbers.hpp
ToMadoRe/v4r
7cb817e05cb9d99cb2f68db009c27d7144d07f09
[ "MIT" ]
18
2015-08-06T09:26:27.000Z
2020-09-03T01:31:00.000Z
/** * $Id$ * * Software License Agreement (GNU General Public License) * * Copyright (C) 2015: * * Johann Prankl, prankl@acin.tuwien.ac.at * Aitor Aldoma, aldoma@acin.tuwien.ac.at * * Automation and Control Institute * Vienna University of Technology * Gusshausstraße 25-29 * 1170 Vienn, Austria * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * @author Johann Prankl, Aitor Aldoma * */ #ifndef V4R_RANDOM_NUMBERS_HPP #define V4R_RANDOM_NUMBERS_HPP namespace v4r { inline bool contains(const std::vector<int> &idx, int num) { for (unsigned i=0; i<idx.size(); i++) if (idx[i]==num) return true; return false; } /** * @brief Returns a pseudo random number in [0.0, 1.0] */ inline float frand() { return rand()/((float)RAND_MAX + 1.); } inline float expPdf(float lambda) { float dum; do dum = frand(); while (dum == 0.); return -log(dum)/lambda; } /** * expSelect */ inline int expSelect(int max) { int i; /* we want 99% probability of getting with expdev() a number smaller max * this requires a lambda of the exponential distribution: * lambda = -log(0.01)/max; (-log(0.01) = 4.6) */ float lambda = 4.6/(float)max; do i = (int)(expPdf(lambda)); while(i > max); return i; } /** * getRandIdx */ inline void getRandIdx(int size, int num, std::vector<int> &idx) { int temp; idx.clear(); for (int i=0; i<num; i++) { do{ temp = rand()%size; }while(contains(idx,temp)); idx.push_back(temp); } } /** * getExpRandIdx */ inline void getExpRandIdx(int size, int num, std::vector<int> &idx) { int temp; idx.clear(); for (int i=0; i<num; i++) { do{ temp = expSelect(size); }while(contains(idx,temp)); idx.push_back(temp); } } } //--END-- #endif
19.707317
74
0.641089
[ "vector" ]
113609aed8d9a4b608cfee9cc49a2d697534ee20
1,071
cpp
C++
562-Longest_Line_of_Consecutive_One_in_Matrix.cpp
elsdrium/LeetCode-practice
a3b1fa5dd200155a636d36cd570e2454f7194e10
[ "MIT" ]
null
null
null
562-Longest_Line_of_Consecutive_One_in_Matrix.cpp
elsdrium/LeetCode-practice
a3b1fa5dd200155a636d36cd570e2454f7194e10
[ "MIT" ]
null
null
null
562-Longest_Line_of_Consecutive_One_in_Matrix.cpp
elsdrium/LeetCode-practice
a3b1fa5dd200155a636d36cd570e2454f7194e10
[ "MIT" ]
null
null
null
class Solution { public: int longestLine(vector<vector<int>>& M) { if ( M.empty() ) return 0; vector<int> vertical(M[0].size()); vector<vector<int>> diagonal(M.size(), vector<int>(M[0].size())), antidiagonal(M.size(), vector<int>(M[0].size())); int len = 0; for ( int i=0; i!=M.size(); ++i ) { for ( int j=0, h=0; j!=M[0].size(); ++j ) { if ( M[i][j] ) { if ( ++h > len ) len = h; if ( ++vertical[j] > len ) len = vertical[j]; diagonal[i][j] = (i>0 && j>0) ? diagonal[i-1][j-1] + 1 : 1; if ( diagonal[i][j] > len ) len = diagonal[i][j]; antidiagonal[i][j] = (i>0 && j<M[0].size()-1) ? antidiagonal[i-1][j+1] + 1 : 1; if ( antidiagonal[i][j] > len ) len = antidiagonal[i][j]; } else { h = 0; vertical[j] = 0; } } } return len; } };
38.25
99
0.37535
[ "vector" ]
1136ee23ba44f5a67209c3102c7bf3ff7089216a
2,311
cpp
C++
image_test.cpp
block8437/GameEngine
e4b452c33781566e55e9339efee9441ddb924f58
[ "MIT" ]
null
null
null
image_test.cpp
block8437/GameEngine
e4b452c33781566e55e9339efee9441ddb924f58
[ "MIT" ]
1
2017-04-05T00:58:21.000Z
2017-04-05T00:58:21.000Z
image_test.cpp
block8437/GameEngine
e4b452c33781566e55e9339efee9441ddb924f58
[ "MIT" ]
null
null
null
#include "image_test.h" inline bool exists_test1 (const std::string& name) { if (FILE *file = fopen(name.c_str(), "r")) { fclose(file); return true; } else { return false; } } namespace GameEngine { Image::Image() { } Image::Image(const char* filename) { //printf("%s\n", exists_test1(filename) ? "true" : "false"); int n; unsigned char* data = stbi_load(filename, &width, &height, &n, 4); if (data == NULL) { printf("ERROR LOADING IMAGE"); return; } glGenTextures(1, &textureID); glBindTexture(GL_TEXTURE_2D, textureID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height,0, GL_RGBA, GL_UNSIGNED_BYTE, data); //printf("TexID: %i\nValid: %s\nError: %s\n", textureID, glIsTexture(textureID) ? "true" : "false", gluErrorString(glGetError())); //printf("W/H: (%d, %d)\n", width, height); //printf("%d %d %d %d\n",*(data+30),*(data+31),*(data+32),*(data+33)); stbi_image_free(data); } Image::~Image() { } void Image::render(int x, int y, float angle, float scale) { if( textureID != 0 ) { //printf("%i , %i\n", x, y); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glColor4f(1.0,1.0,1.0, 1.0); glTranslatef( x, y, 0.0f ); glRotatef(angle, 0.0f, 0.0f, 1.0f); glEnable(GL_TEXTURE_2D); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); //glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glBindTexture( GL_TEXTURE_2D, textureID ); glBegin( GL_QUADS ); glTexCoord2f( 0.0f, 0.0f ); glVertex2f( 0.0f, 0.0f ); glTexCoord2f( 1.0f, 0.0f ); glVertex2f( width*scale, 0.0f ); glTexCoord2f( 1.0f, 1.0f ); glVertex2f( width*scale, height*scale ); glTexCoord2f( 0.0f, 1.0f ); glVertex2f( 0.0f, height*scale ); glEnd(); //printf("TexID: %i\nValid: %s\nError: %s\n", textureID, glIsTexture(textureID) ? "true" : "false", gluErrorString(glGetError())); glDisable(GL_TEXTURE_2D); glDisable(GL_BLEND); glPopMatrix(); } } }
33.985294
133
0.647772
[ "render" ]
1141a2432bdc52fa0486411a0aaefa21a5d412c5
11,086
cpp
C++
src/nbla/function/generic/batch_normalization.cpp
kodavatimahendra/nnabla
72009f670af075f17ffca9c809b07d48cca30bd9
[ "Apache-2.0" ]
null
null
null
src/nbla/function/generic/batch_normalization.cpp
kodavatimahendra/nnabla
72009f670af075f17ffca9c809b07d48cca30bd9
[ "Apache-2.0" ]
null
null
null
src/nbla/function/generic/batch_normalization.cpp
kodavatimahendra/nnabla
72009f670af075f17ffca9c809b07d48cca30bd9
[ "Apache-2.0" ]
null
null
null
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/function/batch_normalization.hpp> #include <nbla/variable.hpp> #include <algorithm> #include <cmath> #include <limits> namespace nbla { NBLA_REGISTER_FUNCTION_SOURCE(BatchNormalization, const vector<int> &, float, float, bool); template <typename T> void BatchNormalization<T>::setup_impl(const Variables &inputs, const Variables &outputs) { // Check axes NBLA_CHECK(axes_.size() == 1, error_code::not_implemented, "Specifying axis more than one is not supported so far.") // Check and parse shapes Shape_t shape_i = inputs[0]->shape(); Size_t size = inputs[0]->size(); Size_t size_axis = inputs[0]->size(axes_[0]); size0_ = size / size_axis; // Batch size. size1_ = shape_i[axes_[0]]; // Size of specified axis. size2_ = size / size0_ / size1_; // Size of rest. size12_ = size1_ * size2_; size02_ = size0_ * size2_; NBLA_CHECK(size0_ * size1_ * size2_ == size, error_code::unclassified, "An error occurred during setup BatchNormalization function."); // Verify mean, var, beta and gamma dims. Shape_t shape_b = inputs[1]->shape(); Shape_t shape_g = inputs[2]->shape(); Shape_t shape_m = inputs[3]->shape(); Shape_t shape_v = inputs[4]->shape(); // Verify mean, var, beta and gamma shapes. Shape_t shape_check(shape_i.size(), 1); shape_check[axes_[0]] = shape_i[axes_[0]]; NBLA_CHECK(shape_check == shape_b, error_code::value, "Shape of beta(inputs[1]) does not match. " "beta: (%s) != expected: (%s).", string_join(shape_b, string(", ")).c_str(), string_join(shape_check, string(", ")).c_str()); NBLA_CHECK(shape_check == shape_g, error_code::value, "Shape of gamma(inputs[2]) does not match. " "gamma: (%s) != expected: (%s).", string_join(shape_g, string(", ")).c_str(), string_join(shape_check, string(", ")).c_str()); NBLA_CHECK(shape_check == shape_m, error_code::value, "Shape of mean(inputs[3]) does not match. " "mean: (%s) != expected: (%s).", string_join(shape_m, string(", ")).c_str(), string_join(shape_check, string(", ")).c_str()); NBLA_CHECK(shape_check == shape_v, error_code::value, "Shape of var(inputs[4]) does not match. " "var: (%s) != expected: (%s).", string_join(shape_v, string(", ")).c_str(), string_join(shape_check, string(", ")).c_str()); // Check num of inputs and outputs. size_t noutputs = outputs.size(); if (!batch_stat_) { NBLA_CHECK( noutputs == 1, error_code::value, "If batch_stat_ is false, it cannot output batch mean and variance."); } NBLA_CHECK(noutputs == 1 || noutputs == 3, error_code::value, "Number of outputs must be 1 or 3."); // Reshape outputs and/or temporary buffers. outputs[0]->reshape(shape_i, true); if (noutputs == 3) { outputs[1]->reshape(shape_b, true); // batch mean outputs[2]->reshape(shape_g, true); // batch var } else { mean_.reshape(shape_b, true); // batch mean var_.reshape(shape_g, true); // batch var } } template <class T> void BatchNormalization<T>::forward_impl(const Variables &inputs, const Variables &outputs) { if (batch_stat_) { // Training mode. forward_impl_batch(inputs, outputs); } else { // Testing mode. forward_impl_global(inputs, outputs); } } template <class T> void BatchNormalization<T>::forward_impl_batch(const Variables &inputs, const Variables &outputs) { // Check whether it outputs batch mean and var. Variable *batch_mean = &mean_; Variable *batch_var = &var_; if (outputs.size() == 3) { batch_mean = outputs[1]; batch_var = outputs[2]; } // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); const T *beta = inputs[1]->get_data_pointer<T>(this->ctx_); const T *gamma = inputs[2]->get_data_pointer<T>(this->ctx_); // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_, true); T *m = batch_mean->cast_data_and_get_pointer<T>(this->ctx_, true); // batch mean T *v = batch_var->cast_data_and_get_pointer<T>(this->ctx_, true); // batch varf // Inputs/Outputs T *rm = inputs[3]->cast_data_and_get_pointer<T>(this->ctx_); // running mean T *rv = inputs[4]->cast_data_and_get_pointer<T>(this->ctx_); // running var // Main loop for (int i1 = 0; i1 < size1_; ++i1) { // Mean and variance calculation and their moving ones. // Batch mean and var m[i1] = 0; v[i1] = 0; for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; const T value = x[i]; m[i1] += value; v[i1] += value * value; } m[i1] /= size02_; v[i1] = v[i1] / size02_ - m[i1] * m[i1]; // Moving mean and var rm[i1] = decay_rate_ * rm[i1] + (1 - decay_rate_) * m[i1]; rv[i1] = decay_rate_ * rv[i1] + (1 - decay_rate_) * v[i1] * size02_ / (size02_ - 1); // v[i1] = 1 / std::sqrt(v[i1] + (T)eps_); // Subtract mean and divide by std, and apply beta and gamma. for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; const T stdvar = std::sqrt(v[i1] + (T)eps_); y[i] = (x[i] - m[i1]) * gamma[i1] / stdvar + beta[i1]; } } } template <class T> void BatchNormalization<T>::forward_impl_global(const Variables &inputs, const Variables &outputs) { // Inputs const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); const T *beta = inputs[1]->get_data_pointer<T>(this->ctx_); const T *gamma = inputs[2]->get_data_pointer<T>(this->ctx_); const T *rm = inputs[3]->get_data_pointer<T>(this->ctx_); // running mean const T *rv = inputs[4]->get_data_pointer<T>(this->ctx_); // running var // Output T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_, true); // Subtract mean and divide by std, and apply beta and gamma. for (int i1 = 0; i1 < size1_; ++i1) { for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; const T mean = rm[i1]; const T stdvar = std::sqrt(rv[i1] + (T)eps_); y[i] = (x[i] - mean) * gamma[i1] / stdvar + beta[i1]; } } } template <class T> void BatchNormalization<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (batch_stat_) { // Training mode. backward_impl_batch(inputs, outputs, propagate_down, accum); } else { // Testing mode. NBLA_ERROR(error_code::not_implemented, ""); } } template <class T> void BatchNormalization<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) { return; } // Check whether it outputs batch mean/var. Variable *batch_mean = &mean_; Variable *batch_var = &var_; if (outputs.size() == 3) { batch_mean = outputs[1]; batch_var = outputs[2]; } // Commont inputs wrt. gradient. const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); const T *m = batch_mean->get_data_pointer<T>(this->ctx_); const T *v = batch_var->get_data_pointer<T>(this->ctx_); const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); // Gradient wrt. x. if (propagate_down[0]) { T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_, !accum[0]); const T *g = inputs[2]->get_data_pointer<T>(this->ctx_); const T *dm = nullptr; const T *dv = nullptr; if (outputs.size() == 3) { dm = batch_mean->get_grad_pointer<T>(this->ctx_); dv = batch_var->get_grad_pointer<T>(this->ctx_); } for (int i1 = 0; i1 < size1_; ++i1) { // Compute gradient wrt mean and var respectively T dvar = 0; T dmean = 0; T tmp = 0; for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; const T dxh = dy[i] * g[i1]; // Grad of x hat. const T cx = x[i] - m[i1]; // x - mean dvar += dxh * cx; dmean += dxh; tmp += cx; } // dm and dv are set if batch mean and var are used following functions // in computation graph. dvar = dvar * (T)-0.5 * std::pow(v[i1] + (T)eps_, (T)-1.5) + (dv ? dv[i1] : (T)0); dmean = dmean * (-1 / std::sqrt(v[i1] + (T)eps_)) + dvar * (-2) * tmp / (size02_) + (dm ? dm[i1] : (T)0); // Compute gradient wrt x. for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; const T grad = dy[i] * g[i1] / std::sqrt(v[i1] + (T)eps_) + dvar * 2 * (x[i] - m[i1]) / (size02_) + dmean / (size02_); if (accum[0]) dx[i] += grad; else dx[i] = grad; } } } if (propagate_down[1] || propagate_down[2]) { // beta and gamma NBLA_CHECK(propagate_down[1] && propagate_down[2], error_code::value, "'need_grad' of beta and gamma must be the same."); T *db = inputs[1]->cast_grad_and_get_pointer<T>(this->ctx_, !accum[1]); T *dg = inputs[2]->cast_grad_and_get_pointer<T>(this->ctx_, !accum[2]); for (int i1 = 0; i1 < size1_; ++i1) { T dbv = accum[1] ? db[i1] : (T)0; T dgv = accum[2] ? dg[i1] : (T)0; for (int i02 = 0; i02 < size02_; ++i02) { const int i0 = i02 / size2_; const int i2 = i02 % size2_; const int i = i0 * size12_ + i1 * size2_ + i2; dbv += dy[i]; dgv += dy[i] * (x[i] - m[i1]) / std::sqrt(v[i1] + (T)eps_); } db[i1] = dbv; dg[i1] = dgv; } } } }
38.227586
79
0.583078
[ "shape", "vector" ]
1146ba64c149ca23c0d68a9004508897b72b718e
4,982
cpp
C++
one/fake/arcus/game/parsing.cpp
i3D-net/ONE-GameHosting-SDK
060473173136b2c8d9bc43aaad0eb487870dc115
[ "BSD-3-Clause" ]
6
2020-07-03T09:18:04.000Z
2021-01-07T17:50:06.000Z
one/fake/arcus/game/parsing.cpp
i3D-net/ONE-GameHosting-SDK
060473173136b2c8d9bc43aaad0eb487870dc115
[ "BSD-3-Clause" ]
null
null
null
one/fake/arcus/game/parsing.cpp
i3D-net/ONE-GameHosting-SDK
060473173136b2c8d9bc43aaad0eb487870dc115
[ "BSD-3-Clause" ]
null
null
null
#include <one/fake/arcus/game/parsing.h> #include <one/fake/arcus/game/log.h> namespace one_integration { bool Parsing::extract_key_value_payload( const OneArrayPtr array, std::function<bool(const size_t total_number_of_keys, const std::string &, const std::string &)> callback) { if (callback == nullptr) { L_ERROR("callback is nullptr"); return false; } if (array == nullptr) { L_ERROR("array is nullptr"); return false; } bool empty = true; auto err = one_array_is_empty(array, &empty); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } if (empty) { L_ERROR("array is empty"); return false; } unsigned number_of_keys = 0; err = one_array_size(array, &number_of_keys); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } OneObjectPtr pair = nullptr; err = one_object_create(&pair); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } for (unsigned int pos = 0; pos < number_of_keys; ++pos) { err = one_array_val_object(array, pos, pair); if (one_is_error(err)) { L_ERROR(one_error_text(err)); one_object_destroy(pair); return false; } if (!extract_key_value_pair(pair, _key, _value)) { L_ERROR(one_error_text(err)); one_object_destroy(pair); return false; } if (!callback(number_of_keys, _key.data(), _value.data())) { L_ERROR("callback unable to extract key value pair"); one_object_destroy(pair); return false; } } one_object_destroy(pair); return true; } bool Parsing::extract_key_value_pair( const OneObjectPtr pair, std::array<char, codec::key_max_size_null_terminated()> &key, std::array<char, codec::value_max_size_null_terminated()> &value) { if (pair == nullptr) { L_ERROR("pair is nullptr"); return false; } unsigned int key_size = 0; auto err = one_object_val_string_size(pair, "key", &key_size); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Because buffer must add the '\0' explicitly. if (codec::key_max_size() < key_size + 1) { L_ERROR("key size is bigger than max size(" + std::to_string(codec::key_max_size()) + ")"); return false; } err = one_object_val_string(pair, "key", key.data(), codec::key_max_size()); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Ensure that the string is `\0` terminated. _key[key_size] = '\0'; unsigned int value_size = 0; err = one_object_val_string_size(pair, "value", &value_size); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Because buffer must add the '\0' explicitly. if (codec::value_max_size() < value_size + 1) { L_ERROR("value size is bigger than max size(" + std::to_string(codec::value_max_size()) + ")"); return false; } err = one_object_val_string(pair, "value", value.data(), codec::value_max_size()); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Ensure that the string is `\0` terminated. _value[value_size] = '\0'; return true; } bool Parsing::extract_string(const OneObjectPtr object, const char *key, std::function<bool(const std::string &)> callback) { if (object == nullptr) { L_ERROR("object is nullptr"); return false; } if (key == nullptr) { L_ERROR("key is nullptr"); return false; } unsigned int size = 0; auto err = one_object_val_string_size(object, key, &size); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Because buffer must add the '\0' explicitly. if (_string_buffer.size() < size + 1) { L_ERROR("string buffer size too small."); return false; } err = one_object_val_string(object, key, _string_buffer.data(), _string_buffer.size()); if (one_is_error(err)) { L_ERROR(one_error_text(err)); return false; } // Ensure that the string is `\0` terminated. _string_buffer[size] = '\0'; if (!callback(_string_buffer.data())) { L_ERROR("callback unable to extract string"); return false; } return true; } std::array<char, codec::key_max_size_null_terminated()> Parsing::_key = {}; std::array<char, codec::value_max_size_null_terminated()> Parsing::_value = {}; std::array<char, codec::string_buffer_max_size()> Parsing::_string_buffer = {}; } // namespace one_integration
28.306818
90
0.593737
[ "object" ]
1147ec5cb79db95b0ba07dc065075436d3b11ac5
2,725
hpp
C++
RobWorkSim/src/rwsim/log/SimulatorStatistics.hpp
ZLW07/RobWork
e713881f809d866b9a0749eeb15f6763e64044b3
[ "Apache-2.0" ]
1
2021-12-29T14:16:27.000Z
2021-12-29T14:16:27.000Z
RobWorkSim/src/rwsim/log/SimulatorStatistics.hpp
ZLW07/RobWork
e713881f809d866b9a0749eeb15f6763e64044b3
[ "Apache-2.0" ]
null
null
null
RobWorkSim/src/rwsim/log/SimulatorStatistics.hpp
ZLW07/RobWork
e713881f809d866b9a0749eeb15f6763e64044b3
[ "Apache-2.0" ]
null
null
null
/******************************************************************************** * Copyright 2015 The Robotics Group, The Maersk Mc-Kinney Moller Institute, * Faculty of Engineering, University of Southern Denmark * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ********************************************************************************/ #ifndef RWSIM_LOG_SIMULATORSTATISTICS_HPP_ #define RWSIM_LOG_SIMULATORSTATISTICS_HPP_ /** * @file SimulatorStatistics.hpp * * \copydoc rwsim::log::SimulatorStatistics */ #include <rw/core/Ptr.hpp> #include <map> #include <string> #include <vector> namespace rwsim { namespace log { //! @addtogroup rwsim_log //! @{ /** * @brief Statistics utility for automatic generation of data set based on a SimulatorLogScope. */ class SimulatorStatistics { public: //! Smart pointer type of SimulatorStatistics typedef rw::core::Ptr< SimulatorStatistics > Ptr; //! Type of a DataSeries collection (each series is a name and a list of numbers) typedef std::map< std::string, std::vector< double > > DataSeries; /** * @brief Create a new statistics on the given SimulatorLogScope. * @note Please use the SimulatorLogScope::getStatistics function instead. * @note All children log scopes will have their statistics updated or added. * @param log [in/out] the log to create statistics based on. */ SimulatorStatistics (const class SimulatorLogScope* log); //! @brief Destructor. virtual ~SimulatorStatistics (); //! @brief Generate statistics. void update (); //! @brief Check if any statistics was generated. bool hasData () const; /** * @brief Get the found statistics. * @return a collection of data series. */ const DataSeries& getSeries () const; private: const DataSeries& getPropagated () const; private: const class SimulatorLogScope* _log; DataSeries _singleValues; DataSeries _multipleValues; }; //! @} }} // namespace rwsim::log #endif /* RWSIM_LOG_SIMULATORSTATISTICS_HPP_ */
32.440476
99
0.629358
[ "vector" ]
11488d3942caac121841e29f858d3b6a298429b1
5,540
cpp
C++
mapping/src/octomap/build_octomap.cpp
sameeptandon/sail-car-log
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
[ "BSD-2-Clause" ]
1
2021-02-24T03:11:13.000Z
2021-02-24T03:11:13.000Z
mapping/src/octomap/build_octomap.cpp
sameeptandon/sail-car-log
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
[ "BSD-2-Clause" ]
null
null
null
mapping/src/octomap/build_octomap.cpp
sameeptandon/sail-car-log
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
[ "BSD-2-Clause" ]
3
2015-03-18T14:36:04.000Z
2018-07-04T02:57:24.000Z
#include <string> #include <boost/filesystem.hpp> #include <boost/foreach.hpp> #include <boost/program_options.hpp> #include <boost/progress.hpp> #include "point_defs.h" #include <pcl/io/pcd_io.h> #include <pcl/common/transforms.h> #include <octomap/octomap.h> #include <octomap/Pointcloud.h> #include <octomap/OccupancyOcTreeBase.h> #include "utils/path_utils.h" #include "utils/cloud_utils.h" #include "utils/hdf_utils.h" #include "parameters.h" namespace po = boost::program_options; namespace fs = boost::filesystem; struct Options { bool single; bool debug; po::options_description desc; }; int options(int ac, char ** av, Options& opts) { // Declare the supported options. po::options_description desc = opts.desc; desc.add_options()("help", "Produce help message."); desc.add_options() ("single", po::bool_switch(&opts.single)->default_value(false), "export octomaps from individual scans") ("debug", po::bool_switch(&opts.debug)->default_value(false), "debug flag") ; po::variables_map vm; po::store(po::parse_command_line(ac, av, desc), vm); if (vm.count("help")) { std::cout << desc << std::endl; return 1; } po::notify(vm); return 0; } template <typename PointT> void pcl_to_octomap(boost::shared_ptr<pcl::PointCloud<PointT> > src_cloud, octomap::Pointcloud& octomap_cloud) { octomap_cloud.clear(); octomap::point3d octopt; BOOST_FOREACH(PointT pt, src_cloud->points) { octopt(0) = pt.x; octopt(1) = pt.y; octopt(2) = pt.z; octomap_cloud.push_back(octopt); } } int main(int argc, char** argv) { params().initialize(); // Set up paths Options opts; if (options(argc, argv, opts)) return 1; int count = params().count; std::string transforms_dir = params().h5_dir; std::vector<std::string> pcd_paths; // Read transforms std::vector<std::string> transform_paths; get_range_files(params().pcd_downsampled_dir, 0, 1, count, "%1%.pcd", pcd_paths); get_range_files(transforms_dir, 0, 1, count, "%1%.transform", transform_paths); assert(pcd_paths.size() == transform_paths.size()); // Transformations octomap::point3d sensor_origin; MatrixXfRowMajor transform; Eigen::Vector3f init_pos; // Build the octomap boost::progress_display show_progress(pcd_paths.size()); boost::shared_ptr<octomap::OcTree> octree(new octomap::OcTree(params().octree_res)); if (opts.single) { // TODO PARAM octree->setProbHit(1.0); octree->setProbMiss(0.4); octree->setClampingThresMax(1.0); octree->setClampingThresMin(0.0); } else { octree->setProbHit(params().prob_hit); octree->setProbMiss(params().prob_miss); octree->setClampingThresMax(params().clamping_thres_max); octree->setClampingThresMin(params().clamping_thres_min); } boost::shared_ptr<octomap::OcTree> octree_centered(new octomap::OcTree(*octree)); for (int k = 0; k < pcd_paths.size(); k++) { // Load point cloud and transforms std::string pcd_path = pcd_paths[k]; std::string transform_path = transform_paths[k]; if (!fs::exists(transform_path)) { std::cout << transform_path << " does not exist, quitting" << std::endl; break; } H5::H5File transform_file(transform_path, H5F_ACC_RDONLY); load_hdf_dataset(transform_file, "transform", transform, H5::PredType::NATIVE_FLOAT); Eigen::Matrix4f transform_copy(transform); Eigen::Vector4f imu_origin = transform_copy.block<4, 1>(0, 3); //Eigen::Vector4f lidar_origin = params().T_from_i_to_l * imu_origin; sensor_origin(0) = imu_origin(0); sensor_origin(1) = imu_origin(1); sensor_origin(2) = imu_origin(2); pcl::PointCloud<pcl::PointXYZ>::Ptr src_cloud(new pcl::PointCloud<pcl::PointXYZ>()); //std::cout << "Reading " << pcd_path << std::endl; load_cloud(pcd_path, src_cloud); octomap::Pointcloud octomap_cloud; pcl_to_octomap(src_cloud, octomap_cloud); if (opts.debug) std::cout << "cloud size:" << octomap_cloud.size() << std::endl; octree->insertPointCloud(octomap_cloud, sensor_origin); // Following is for octovis so the map is close to centered pcl::PointCloud<pcl::PointXYZ>::Ptr centered_cloud(new pcl::PointCloud<pcl::PointXYZ>()); if (k == 0) init_pos = transform_copy.block<3, 1>(0, 3); sensor_origin(0) -= init_pos(0); sensor_origin(1) -= init_pos(1); sensor_origin(2) -= init_pos(2); Eigen::Matrix4f T = Eigen::Matrix4f::Identity(); T(0, 3) -= init_pos(0); T(1, 3) -= init_pos(1); T(2, 3) -= init_pos(2); pcl::transformPointCloud(*src_cloud, *centered_cloud, T); pcl_to_octomap(centered_cloud, octomap_cloud); octree_centered->insertPointCloud(octomap_cloud, sensor_origin); if (opts.single) { // Write individual octomaps and clear octree->write(params().octomap_single_files[k]); octree->clear(); octree_centered->clear(); } ++show_progress; } if (!opts.single) { std::cout << "Writing octree of size " << octree->size() << std::endl; octree->write(params().octomap_file); octree_centered->write(params().centered_octomap_file); } return 0; }
28.265306
110
0.636282
[ "vector", "transform" ]
1148b837002970ee85f72f57fcb9a306c606babe
10,229
cc
C++
cryptohome/auth_factor_vault_keyset_converter_unittest.cc
ascii33/platform2
b78891020724e9ff26b11ca89c2a53f949e99748
[ "BSD-3-Clause" ]
null
null
null
cryptohome/auth_factor_vault_keyset_converter_unittest.cc
ascii33/platform2
b78891020724e9ff26b11ca89c2a53f949e99748
[ "BSD-3-Clause" ]
null
null
null
cryptohome/auth_factor_vault_keyset_converter_unittest.cc
ascii33/platform2
b78891020724e9ff26b11ca89c2a53f949e99748
[ "BSD-3-Clause" ]
null
null
null
// Copyright 2022 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "cryptohome/auth_factor_vault_keyset_converter.h" #include <stdint.h> #include <map> #include <memory> #include <vector> #include <base/check.h> #include <brillo/cryptohome.h> #include <cryptohome/proto_bindings/UserDataAuth.pb.h> #include <cryptohome/proto_bindings/auth_factor.pb.h> #include <cryptohome/proto_bindings/rpc.pb.h> #include <gmock/gmock.h> #include <gtest/gtest.h> #include <libhwsec-foundation/crypto/secure_blob_util.h> #include "cryptohome/auth_factor/auth_factor.h" #include "cryptohome/auth_factor/auth_factor_label.h" #include "cryptohome/auth_factor/auth_factor_type.h" #include "cryptohome/credentials.h" #include "cryptohome/crypto.h" #include "cryptohome/filesystem_layout.h" #include "cryptohome/key_objects.h" #include "cryptohome/keyset_management.h" #include "cryptohome/mock_crypto.h" #include "cryptohome/mock_platform.h" #include "cryptohome/mock_tpm.h" #include "cryptohome/vault_keyset.h" #include "cryptohome/vault_keyset.pb.h" using ::testing::NiceMock; namespace { constexpr char kUsername[] = "user"; constexpr char kPinLabel[] = "pin"; constexpr char kLabel[] = "label0"; constexpr char kLabel1[] = "label1"; constexpr char kLabel2[] = "label2"; constexpr char kUserPassword[] = "user_pass"; constexpr char kFirstIndice[] = "0"; constexpr char kSecondIndice[] = "1"; constexpr char kThirdIndice[] = "2"; } // namespace namespace cryptohome { class AuthFactorVaultKeysetConverterTest : public ::testing::Test { public: AuthFactorVaultKeysetConverterTest() : crypto_(&platform_) {} ~AuthFactorVaultKeysetConverterTest() override {} // Not copyable or movable AuthFactorVaultKeysetConverterTest( const AuthFactorVaultKeysetConverterTest&) = delete; AuthFactorVaultKeysetConverterTest& operator=( const AuthFactorVaultKeysetConverterTest&) = delete; AuthFactorVaultKeysetConverterTest(AuthFactorVaultKeysetConverterTest&&) = delete; AuthFactorVaultKeysetConverterTest& operator=( AuthFactorVaultKeysetConverterTest&&) = delete; void SetUp() override { // Setup salt for brillo functions. keyset_management_ = std::make_unique<KeysetManagement>( &platform_, &crypto_, std::make_unique<VaultKeysetFactory>()); converter_ = std::make_unique<AuthFactorVaultKeysetConverter>( keyset_management_.get()); file_system_keyset_ = FileSystemKeyset::CreateRandom(); AddUser(kUserPassword); PrepareDirectoryStructure(); } protected: NiceMock<MockPlatform> platform_; Crypto crypto_; FileSystemKeyset file_system_keyset_; std::unique_ptr<KeysetManagement> keyset_management_; std::unique_ptr<AuthFactorVaultKeysetConverter> converter_; struct UserInfo { std::string name; std::string obfuscated; brillo::SecureBlob passkey; Credentials credentials; base::FilePath homedir_path; base::FilePath user_path; }; UserInfo user; void AddUser(const char* password) { std::string obfuscated = brillo::cryptohome::home::SanitizeUserName(kUsername); brillo::SecureBlob passkey(password); Credentials credentials(kUsername, passkey); user = {kUsername, obfuscated, passkey, credentials, UserPath(obfuscated), brillo::cryptohome::home::GetHashedUserPath(obfuscated)}; } void PrepareDirectoryStructure() { ASSERT_TRUE(platform_.CreateDirectory(ShadowRoot())); ASSERT_TRUE(platform_.CreateDirectory( brillo::cryptohome::home::GetUserPathPrefix())); // We only need the homedir path, not the vault/mount paths. ASSERT_TRUE(platform_.CreateDirectory(user.homedir_path)); } KeyData SetKeyData(const std::string& label) { KeyData key_data; key_data.set_label(label); return key_data; } void KeysetSetUpWithKeyData(const KeyData& key_data, const std::string& indice) { VaultKeyset vk; vk.Initialize(&platform_, &crypto_); vk.CreateFromFileSystemKeyset(file_system_keyset_); vk.SetKeyData(key_data); user.credentials.set_key_data(key_data); ASSERT_TRUE(vk.Encrypt(user.passkey, user.obfuscated)); ASSERT_TRUE( vk.Save(user.homedir_path.Append(kKeyFile).AddExtension(indice))); } }; // Test that VaultKeysetsToAuthFactors return correct error when there is // no VaultKeyset on the disk. TEST_F(AuthFactorVaultKeysetConverterTest, ConvertToAuthFactorFailWhenListEmpty) { std::map<std::string, std::unique_ptr<AuthFactor>> label_to_auth_factor; EXPECT_EQ( user_data_auth::CRYPTOHOME_ERROR_KEY_NOT_FOUND, converter_->VaultKeysetsToAuthFactors(kUsername, label_to_auth_factor)); EXPECT_TRUE(label_to_auth_factor.empty()); } // Test that VaultKeysetsToAuthFactors lists the existing VaultKeyset on // the disk. TEST_F(AuthFactorVaultKeysetConverterTest, ConvertToAuthFactorListSuccess) { KeysetSetUpWithKeyData(SetKeyData(kLabel), kFirstIndice); std::map<std::string, std::unique_ptr<AuthFactor>> label_to_auth_factor; EXPECT_EQ( user_data_auth::CRYPTOHOME_ERROR_NOT_SET, converter_->VaultKeysetsToAuthFactors(kUsername, label_to_auth_factor)); EXPECT_FALSE(label_to_auth_factor.empty()); EXPECT_EQ(kLabel, label_to_auth_factor[kLabel]->label()); EXPECT_EQ(AuthFactorType::kPassword, label_to_auth_factor[kLabel]->type()); } // Test that VaultKeysetsToAuthFactors lists all the VaultKeysets in the // disk. TEST_F(AuthFactorVaultKeysetConverterTest, ConvertToAuthFactorListMultipleVaultKeysetsSuccess) { KeysetSetUpWithKeyData(SetKeyData(kLabel), kFirstIndice); KeysetSetUpWithKeyData(SetKeyData(kLabel1), kSecondIndice); KeysetSetUpWithKeyData(SetKeyData(kLabel2), kThirdIndice); std::map<std::string, std::unique_ptr<AuthFactor>> label_to_auth_factor; EXPECT_EQ( user_data_auth::CRYPTOHOME_ERROR_NOT_SET, converter_->VaultKeysetsToAuthFactors(kUsername, label_to_auth_factor)); EXPECT_EQ(3, label_to_auth_factor.size()); EXPECT_EQ(kLabel, label_to_auth_factor[kLabel]->label()); EXPECT_EQ(AuthFactorType::kPassword, label_to_auth_factor[kLabel]->type()); EXPECT_EQ(kLabel1, label_to_auth_factor[kLabel1]->label()); EXPECT_EQ(AuthFactorType::kPassword, label_to_auth_factor[kLabel1]->type()); EXPECT_EQ(kLabel2, label_to_auth_factor[kLabel2]->label()); EXPECT_EQ(AuthFactorType::kPassword, label_to_auth_factor[kLabel2]->type()); } // Test that PopulateKeyDataForVK returns correct KeyData for the given // label. TEST_F(AuthFactorVaultKeysetConverterTest, ConvertToVaultKeysetDataSuccess) { KeyData test_key_data = SetKeyData(kLabel); KeysetSetUpWithKeyData(test_key_data, kFirstIndice); KeyData key_data; std::string auth_factor_label = kLabel; EXPECT_EQ( user_data_auth::CRYPTOHOME_ERROR_NOT_SET, converter_->PopulateKeyDataForVK(kUsername, auth_factor_label, key_data)); EXPECT_EQ(kLabel, key_data.label()); } // Test that PopulateKeyDataForVK fails to return KeyData for a wrong given // label. TEST_F(AuthFactorVaultKeysetConverterTest, ConvertToVaultKeysetDataFail) { KeyData test_key_data = SetKeyData(kLabel); KeysetSetUpWithKeyData(test_key_data, kFirstIndice); KeyData key_data; std::string auth_factor_label = kLabel1; EXPECT_EQ( user_data_auth::CRYPTOHOME_ERROR_KEY_NOT_FOUND, converter_->PopulateKeyDataForVK(kUsername, auth_factor_label, key_data)); } // Test that AuthFactorToKeyData generates correct KeyData for the given // password label and type. TEST_F(AuthFactorVaultKeysetConverterTest, GenerateKeyDataPassword) { KeyData key_data = SetKeyData(kLabel); key_data.set_type(KeyData::KEY_TYPE_PASSWORD); KeyData test_key_data; std::string auth_factor_label = kLabel; AuthFactorType auth_factor_type = AuthFactorType::kPassword; EXPECT_EQ(user_data_auth::CRYPTOHOME_ERROR_NOT_SET, converter_->AuthFactorToKeyData(auth_factor_label, auth_factor_type, test_key_data)); EXPECT_EQ(key_data.label(), test_key_data.label()); EXPECT_EQ(key_data.type(), test_key_data.type()); EXPECT_FALSE(test_key_data.policy().low_entropy_credential()); } // Test that AuthFactorToKeyData generates correct KeyData for the given // password label and type. TEST_F(AuthFactorVaultKeysetConverterTest, GenerateKeyDataPin) { KeyData key_data = SetKeyData(kPinLabel); key_data.set_type(KeyData::KEY_TYPE_PASSWORD); key_data.mutable_policy()->set_low_entropy_credential(true); KeyData test_key_data; std::string auth_factor_label = kPinLabel; AuthFactorType auth_factor_type = AuthFactorType::kPin; EXPECT_EQ(user_data_auth::CRYPTOHOME_ERROR_NOT_SET, converter_->AuthFactorToKeyData(auth_factor_label, auth_factor_type, test_key_data)); EXPECT_EQ(key_data.label(), test_key_data.label()); EXPECT_EQ(key_data.type(), test_key_data.type()); EXPECT_TRUE(test_key_data.policy().low_entropy_credential()); } // Test that VaultKeysetToAuthFactor returns correct AuthFactor for the given // label. TEST_F(AuthFactorVaultKeysetConverterTest, VaultKeysetToAuthFactorSuccess) { KeyData test_key_data = SetKeyData(kLabel); KeysetSetUpWithKeyData(test_key_data, kFirstIndice); KeyData key_data; std::string auth_factor_label = kLabel; std::unique_ptr<AuthFactor> auth_factor = converter_->VaultKeysetToAuthFactor(kUsername, auth_factor_label); EXPECT_NE(nullptr, auth_factor); EXPECT_EQ(kLabel, auth_factor->label()); EXPECT_EQ(AuthFactorType::kPassword, auth_factor->type()); } // Test that VaultKeysetToAuthFactor fails to return AuthFactor for a wrong // given label. TEST_F(AuthFactorVaultKeysetConverterTest, VaultKeysetToAuthFactorFail) { KeyData test_key_data = SetKeyData(kLabel); KeysetSetUpWithKeyData(test_key_data, kFirstIndice); KeyData key_data; std::string auth_factor_label = kLabel1; std::unique_ptr<AuthFactor> auth_factor = converter_->VaultKeysetToAuthFactor(kUsername, auth_factor_label); EXPECT_EQ(nullptr, auth_factor); } } // namespace cryptohome
35.891228
80
0.766351
[ "vector" ]
114ad10d09b44fc21904835aa06c31928b36ee17
5,674
cpp
C++
medgpc/src/kernel/c_kernel_SE.cpp
bee-hive/MedGP
596a24ca519900507cce42cb4e2061319cef801e
[ "BSD-3-Clause" ]
25
2018-03-18T18:09:03.000Z
2022-02-24T07:47:33.000Z
medgpc/src/kernel/c_kernel_SE.cpp
bee-hive/MedGP
596a24ca519900507cce42cb4e2061319cef801e
[ "BSD-3-Clause" ]
3
2021-04-12T16:11:00.000Z
2021-04-12T16:26:17.000Z
medgpc/src/kernel/c_kernel_SE.cpp
bee-hive/MedGP
596a24ca519900507cce42cb4e2061319cef801e
[ "BSD-3-Clause" ]
4
2019-04-27T23:18:26.000Z
2021-12-03T20:19:09.000Z
/* ------------------------------------------------------------------------- This is the function file for top kernel class. All other kernels should inherit this class. ------------------------------------------------------------------------- */ #include <iostream> #include <vector> #include <math.h> #include <mkl.h> #include <omp.h> #include "kernel/c_kernel_SE.h" #include "util/global_settings.h" using namespace std; c_kernel_SE::c_kernel_SE(){ kernel_name = "c_kernel_SE"; kernel_hyp_num = 2; } c_kernel_SE::c_kernel_SE(const vector<int> &input_param){ kernel_name = "c_kernel_SE"; kernel_hyp_num = 2; } c_kernel_SE::c_kernel_SE( const vector<int> &input_param, const vector<double> &input_hyp ){ kernel_name = "c_kernel_SE"; kernel_hyp_num = 2; set_kernel_param(input_param); set_kernel_hyp(input_hyp); if(int(input_hyp.size()) != kernel_hyp_num){ cout << "ERROR: mismatch # of hyperparameters! "; cout << "Get " << int(input_hyp.size()) << ", but expect " << kernel_hyp_num << endl; exit(0); } } void c_kernel_SE::set_kernel_param(const vector<int> &input_param){ kernel_param = input_param; } void c_kernel_SE::set_kernel_hyp(const vector<double> &input_hyp){ kernel_hyp = input_hyp; for(int i = 0; i < int(input_hyp.size()); i++){ kernel_hyp[i] = exp(kernel_hyp[i]); } } void c_kernel_SE::compute_self_diag_matrix( const vector<int> &meta, const vector<float> &x, float *&diag_gram_matrix ){ int dim; int i; float diag_value; dim = int(x.size()); diag_value = pow(kernel_hyp[1], 2.0); #pragma omp parallel for private(i) firstprivate(dim, diag_value) for(i = 0; i < dim; i++){ diag_gram_matrix[i] = diag_value; } } void c_kernel_SE::compute_self_gram_matrix( const vector<int> &meta, const vector<float> &x, float *&self_gram_matrix ){ int dim = int(x.size()); int i, j; compute_scale_squared_dist(kernel_hyp[0], x, x, self_gram_matrix); for(i = 0; i < dim; i++){ #pragma omp parallel for private(j) firstprivate(dim, i) for(j = i; j < dim; j++){ self_gram_matrix[ (i * dim) + j ] = pow(kernel_hyp[1], 2.0)*exp(-0.5*self_gram_matrix[ (i * dim) + j ]); self_gram_matrix[ (j * dim) + i ] = self_gram_matrix[ (i * dim) + j ]; } } } void c_kernel_SE::compute_self_gradients( const vector<int> &meta, const vector<float> &x, const float *chol_Q, vector<double> &gradients ){ int dim = int(x.size()); int i, j, index; float *rsq, *map_matrix; double derivative; map_matrix = new float[dim*dim]; rsq = new float[dim*dim]; compute_scale_squared_dist(kernel_hyp[0], x, x, rsq); gradients.clear(); gradients.resize(kernel_hyp_num, 0.0); for(int hyp_index = 0; hyp_index < kernel_hyp_num; hyp_index++){ if(hyp_index == 0){ for(i = 0; i < dim; i++){ #pragma omp parallel for private(j, index, derivative) firstprivate(dim, i) for(j = i; j < dim; j++){ index = (i * dim) + j; derivative = pow(kernel_hyp[1], 2.)*exp(-0.5*rsq[index]); derivative *= (rsq[index]); map_matrix[index] = float(derivative); map_matrix[ (j * dim) + i ] = float(derivative); } } } else if(hyp_index == 1){ for(i = 0; i < dim; i++){ #pragma omp parallel for private(j, index, derivative) firstprivate(dim, i) for(j = i; j < dim; j++){ index = (i * dim) + j; derivative = 2.0*pow(kernel_hyp[1], 2.0)*exp(-0.5*rsq[index]); map_matrix[index] = float(derivative); map_matrix[ (j * dim) + i ] = float(derivative); } } } gradients[hyp_index] = cblas_dsdot((dim*dim), chol_Q, 1, map_matrix, 1); gradients[hyp_index] = gradients[hyp_index]/2.0; } delete[] rsq; delete[] map_matrix; } void c_kernel_SE::compute_cross_gram_matrix( const vector<int> &meta, const vector<int> &meta2, const vector<float> &x, const vector<float> &x2, float *&cross_gram_matrix ){ int dim1, dim2; int i, j; dim1 = int(x.size()); dim2 = int(x2.size()); compute_scale_squared_dist(kernel_hyp[0], x, x2, cross_gram_matrix); for(i = 0; i < dim1; i++){ #pragma omp parallel for private(j) firstprivate(dim1, dim2, i) for(j = 0; j < dim2; j++){ cross_gram_matrix[ (i * dim2) + j ] = pow(kernel_hyp[1], 2.)*exp(-0.5*cross_gram_matrix[(i * dim2) + j ]); } } }
33.976048
118
0.470568
[ "vector" ]
114cb1bbc0b477448fc66ee125fb83ece3db6d3a
2,042
cpp
C++
scan.cpp
5cript/icmp-watchdog
7c655008575a0951721f14f7a8dd56b40fe8a3af
[ "MIT" ]
1
2016-06-17T15:42:09.000Z
2016-06-17T15:42:09.000Z
scan.cpp
5cript/icmp-watchdog
7c655008575a0951721f14f7a8dd56b40fe8a3af
[ "MIT" ]
null
null
null
scan.cpp
5cript/icmp-watchdog
7c655008575a0951721f14f7a8dd56b40fe8a3af
[ "MIT" ]
1
2021-07-15T07:30:56.000Z
2021-07-15T07:30:56.000Z
#include "scan.hpp" #include "net_help.h" #include <iostream> //##################################################################################################################### NetworkScanner::NetworkScanner(std::string localAddress) : localAddress_(std::move(localAddress)) { } //--------------------------------------------------------------------------------------------------------------------- std::vector <std::future <std::string>> NetworkScanner::icmpEchoScan() const { std::vector <std::future <std::string>> result; std::string base = getAddressBase(); for (std::size_t i = 1; i != 255; ++i) { std::string addr = base + std::to_string(i); result.emplace_back( std::async(std::launch::async, [this, addr, i]() -> std::string { auto echoResult = ICMPEcho(addr.c_str()); if (!echoResult) return {}; auto actual = echoResult.get(); if (actual.answerReceived && actual.status == 0) return addr; else return {}; }) ); } return result; } //--------------------------------------------------------------------------------------------------------------------- std::string NetworkScanner::getAddressBase() const { return localAddress_.substr(0, localAddress_.find_last_of('.') + 1); } //--------------------------------------------------------------------------------------------------------------------- std::vector <std::string> NetworkScanner::getLocalAddresses() { return ::getLocalAddresses(); } //--------------------------------------------------------------------------------------------------------------------- std::string NetworkScanner::getHostName(std::string const& ip) { return ::getHostName(ip.c_str()); } //#####################################################################################################################
36.464286
120
0.357493
[ "vector" ]
1155c5027da43518130b706a18b1ee6ac162673e
1,984
cc
C++
cpp/24/24a.cc
ckennelly/advent-of-code-2021
2aed5b665ee0ad58a62d08499e562d39bc3d52cd
[ "Apache-2.0" ]
3
2021-12-01T21:05:04.000Z
2021-12-17T05:18:14.000Z
cpp/24/24a.cc
ckennelly/advent-of-code-2021
2aed5b665ee0ad58a62d08499e562d39bc3d52cd
[ "Apache-2.0" ]
null
null
null
cpp/24/24a.cc
ckennelly/advent-of-code-2021
2aed5b665ee0ad58a62d08499e562d39bc3d52cd
[ "Apache-2.0" ]
null
null
null
#include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstring> #include <iostream> #include <memory> #include <regex> #include <sstream> #include <string> #include <vector> #include "24/common.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/numbers.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "absl/types/variant.h" using namespace aoc2021_24; int main(int argc, char** argv) { const std::string input = readAll(); const auto program = ParseProgram(input); const auto summary = ParseSummary(input); const auto relationships = FindRelationships(summary); char digits[14]; memset(digits, 0, sizeof(digits)); auto convertDigits = [&]() { long r = 0; for (int i = 0; i < 14; i++) { r *= 10; r += long(digits[i]); } return r; }; int found_count = 0; for (int i = 0; i < 14; i++) { auto it = relationships.find(i); if (it == relationships.end()) { // We'll revisit when doing the matched pair. continue; } const int a0 = it->first; const int a1 = std::get<0>(it->second); assert(a0 != a1); const long r = std::get<1>(it->second); assert(digits[a0] == 0); assert(digits[a1] == 0); const int amin = std::min(a0, a1); const int amax = std::max(a0, a1); bool found = false; int jmin, jmax; for (jmin = 9; jmin >= 1; jmin--) { jmax = jmin - r; if (jmax < 1 || jmax > 9) { continue; } found = true; break; } assert(found); digits[amin] = jmin; digits[amax] = jmax; found_count += 2; } assert(found_count == 14); // Verify assert(RunProgram(program, absl::MakeSpan(digits)) == 0); absl::PrintF("%d\n", convertDigits()); }
22.804598
59
0.615927
[ "vector" ]
1155f935996b3a94dfa7e7abe0bb6d1a72f42fe0
1,366
hpp
C++
packages/motionPlanning/include/int/MP_AbstractAction.hpp
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
2
2021-01-15T13:27:19.000Z
2021-08-04T08:40:52.000Z
packages/motionPlanning/include/int/MP_AbstractAction.hpp
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
null
null
null
packages/motionPlanning/include/int/MP_AbstractAction.hpp
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
5
2018-05-01T10:39:31.000Z
2022-03-25T03:02:35.000Z
// Copyright 2019-2020 Erik Kouters (Falcons) // SPDX-License-Identifier: Apache-2.0 /* * cAbstractAction.hpp * * Created on: Nov 17, 2017 * Author: Jan Feitsma */ #ifndef MP_ABSTRACTACTION_HPP_ #define MP_ABSTRACTACTION_HPP_ #include <vector> #include <string> #include <boost/lexical_cast.hpp> #include "adapters/MP_RTDBOutputAdapter.hpp" #include "int/cInterfaces.hpp" #include "int/cTimer.hpp" #include "tracing.hpp" #include "FalconsRtDB2.hpp" // actionResult class MP_AbstractAction { public: MP_AbstractAction(); virtual ~MP_AbstractAction(); void setParameters(std::vector<std::string> const &params); virtual actionResultTypeEnum execute() = 0; void connect(cInterfaces *interfaces = NULL); float elapsed(); // time in seconds // shared convenience functions void stopMoving(); actionResultTypeEnum setMotionSetpointAndCalculate(actionTypeEnum action, Position2D const &target, motionTypeEnum motionType, bool autostop = true); ConfigMotionPlanning getConfig(); void setConfig(ConfigMotionPlanning config); protected: std::vector<std::string> _params; bool _isConnected; ConfigMotionPlanning _config; MP_WorldModelInterface *_wm; PathPlanningClient *_pp; MP_RTDBOutputAdapter *_rtdbOutput; cTimer _actionTimer; }; #endif /* MP_ABSTRACTACTION_HPP_ */
24.836364
153
0.735725
[ "vector" ]
115b6607fb073e011a295dc8b7302efef7aae97a
13,010
cpp
C++
V6 Out-Of & On Equilibrium/V6.9 Wolff Algorithm/Arbitrary alpha/wolffTemps.cpp
mattborghi/Thesis
ca186a812f7aa0cefbd5848684d1e3cc36fecaae
[ "MIT" ]
1
2021-10-15T09:02:43.000Z
2021-10-15T09:02:43.000Z
V6 Out-Of & On Equilibrium/V6.9 Wolff Algorithm/Arbitrary alpha/wolffTemps.cpp
mattborghi/Thesis
ca186a812f7aa0cefbd5848684d1e3cc36fecaae
[ "MIT" ]
null
null
null
V6 Out-Of & On Equilibrium/V6.9 Wolff Algorithm/Arbitrary alpha/wolffTemps.cpp
mattborghi/Thesis
ca186a812f7aa0cefbd5848684d1e3cc36fecaae
[ "MIT" ]
null
null
null
// Wolff cluster algorithm for the 2-D Ising Model #include <cmath> #include <cstdlib> #include <iostream> #include <fstream> #include <list> #include "rng.h" using namespace std; double J = +1; // ferromagnetic coupling int Lx, Ly; // number of spins in x and y int N; // number of spins int **s; // the spins double T,Tmax,Tmin,Tstep; // temperature double H = 0; // magnetic field int steps; // number of Monte Carlo steps int idum = -123456789; // seed double alpha[3][3]; double alphaud; void initializeAlpha () { alpha [0][0] = 1; alpha [2][2] = 1; alpha [0][2] = alpha [2][0] = alphaud; } void initialize ( ) { s = new int* [Lx]; for (int i = 0; i < Lx; i++) s[i] = new int [Ly]; for (int i = 0; i < Lx; i++) for (int j = 0; j < Ly; j++) s[i][j] = ran2(idum) < 0.5 ? +1 : -1; // hot start steps = 0; } bool **cluster; // cluster[i][j] = true if i,j belongs double addProbability; // 1 - e^(-2J/kT) void initializeClusterVariables() { // allocate 2-D array for spin cluster labels cluster = new bool* [Lx]; for (int i = 0; i < Lx; i++) cluster[i] = new bool [Ly]; // compute the probability to add a like spin to the cluster addProbability = 1 - exp(-J*(alphaud + 1)/double(T)); } // declare functions to implement Wolff algorithm void growCluster(int i, int j, int clusterSpin); void tryAdd(int i, int j, int clusterSpin); void oneMonteCarloStep() { //cout << "Entered\n" ; // no cluster defined so clear the cluster array for (int i = 0; i < Lx; i++) for (int j = 0; j < Lx; j++) cluster[i][j] = false; //cout << "Cluster value: " << cluster[0][0] << endl; // choose a random spin and grow a cluster int i = int(ran2(idum) * Lx); //cout << "Posx: " << ran2(idum) << endl; int j = int(ran2(idum) * Ly); //cout << "current Pos:" << i << ":" << j << "value" << s[i][j] << endl; growCluster(i, j, s[i][j]); ++steps; } void growCluster(int i, int j, int clusterSpin) { // mark the spin as belonging to the cluster and flip it cluster[i][j] = true; s[i][j] = -s[i][j]; // find the indices of the 4 neighbors // assuming periodic boundary conditions int iPrev = i == 0 ? Lx-1 : i-1; int iNext = i == Lx-1 ? 0 : i+1; int jPrev = j == 0 ? Ly-1 : j-1; int jNext = j == Ly-1 ? 0 : j+1; // if the neighbor spin does not belong to the // cluster, then try to add it to the cluster if (!cluster[iPrev][j]) tryAdd(iPrev, j, clusterSpin); if (!cluster[iNext][j]) tryAdd(iNext, j, clusterSpin); if (!cluster[i][jPrev]) tryAdd(i, jPrev, clusterSpin); if (!cluster[i][jNext]) tryAdd(i, jNext, clusterSpin); } void tryAdd(int i, int j, int clusterSpin) { if (s[i][j] == clusterSpin) if (ran2(idum) < addProbability){ //printf("Agregado..\n"); growCluster(i, j, clusterSpin); } } // variables to measure chi and its error estimate double chi; // current susceptibility per spin double chiSum; // accumulate chi values double chiSqdSum; // accumulate chi^2 values int nChi; // number of values accumulated double chiAbs; // energy variables double E; // current energy double Esum; // sum of energy double E2sum; // sum of energy squared // magnetization variables double M; double MabsSum; double M2sum; double M4sum; // anti magnet variables double aM,aMabsSum; double aM2sum,aM4sum; // variables to measure the heat capacity and its error estimate double heatCap; // current heat capactiy per spin double heatCapSum; // accumulate chi values int nheatCap; // number of values accumulated // variables to measure autocorrelation time int nSave = 10; // number of values to save double cChiSum; // accumulate list<double> chiSave; // the saved values double *cChi; // correlation sums int nCorr; // number of values accumulated // variables to estimate fluctuations by blocking int stepsPerBlock = 1000; // suggested in Wolff paper double chiBlock; // used to calculate block average double chiBlockSum; // accumulate block <chi> values double chiBlockSqdSum; // accumulate block <chi>^2 values int stepInBlock; // number of steps in current block int blocks; // number of blocks void initializeObservables() { // Initialize chi observables chiSum = chiSqdSum = 0; nChi = 0; chiBlock = chiBlockSum = chiBlockSqdSum = 0; stepInBlock = blocks = 0; cChiSum = 0; cChi = new double [nSave + 1]; for (int i = 0; i <= nSave; i++) cChi[i] = 0; nCorr = 0; // Initialize heat capacity observables heatCapSum = 0; nheatCap = 0; // Initialize energy observables Esum = E2sum = 0; // Initialize magnetization observables MabsSum = M2sum = M4sum = 0; // initialize anti magnet obser aMabsSum = aM2sum = aM4sum = 0; } void measureObservables() { // observables are derived from the magnetic moment // & energy derived observables E = 0; M = 0; int black = 0, white = 0; double magnetBlack = 0, magnetWhite=0; for (int i = 0; i < Lx; i++){ //printf("\n"); for (int j = 0; j < Ly; j++){ // Magnetization M += s[i][j]; /*if (s[i][j] == 1) printf("*"); else printf(" "); */ // PBC // find the indices of the 4 neighbors // assuming periodic boundary conditions int iPrev = i == 0 ? Lx-1 : i-1; int iNext = i == Lx-1 ? 0 : i+1; int jPrev = j == 0 ? Ly-1 : j-1; int jNext = j == Ly-1 ? 0 : j+1; // Energy E += - J * s[i][j] * ( alpha[s[i][j]+1][s[iPrev][j]+1]* s[iPrev][j] + alpha[s[i][j]+1][s[iNext][j]+1]*s[iNext][j] +\ alpha[s[i][j]+1][s[i][jPrev]+1]*s[i][jPrev] + alpha[s[i][j]+1][s[i][jNext]+1]*s[i][jNext] ); if ( ( (i%2 == 0) && (j%2 == 0) ) || ( (i%2 != 0) && (j%2 != 0) ) ) { //printf("%d %d\n",i,j ); magnetBlack += s[i][j]; black++; //printf("black: %d %d %d %d\n",black,i,j,s[i][j] ); } else{ magnetWhite += s[i][j]; white++; //printf("white: %d %d %d %d\n",white,i,j,s[i][j] ); } //end if } } //Divide energy by two E = E/double(2); chi = M * double(M) / double(N); // <M^2> // accumulate values chiSum += chi; chiSqdSum += chi * chi; ++nChi; Esum += E; E2sum += E*E; MabsSum += abs(M); M2sum += M*M; M4sum += M*M*M*M; // anti magnet quantities magnetBlack = magnetBlack / double(black); magnetWhite = magnetWhite / double(white); aM = abs(magnetBlack - magnetWhite) / double(2); //printf("\nnew black: %f white : %f aM: %f\n", magnetBlack, magnetWhite,aM); aMabsSum += aM; aM2sum += aM*aM; aM4sum += aM*aM*aM*aM; //printf("%f %f %f %f\n",aM,aMabsSum,aM2sum,aM4sum ); //cin.get(); // --------- ERRORS ----------- // accumulate correlation values if (chiSave.size() == nSave) { cChiSum += chi; cChi[0] += chi * chi; ++nCorr; list<double>::const_iterator iter = chiSave.begin(); for (int i = 1; i <= nSave; i++) cChi[i] += *iter++ * chi; chiSave.pop_back(); // remove oldest saved chi value } chiSave.push_front(chi); // add current chi value // accumulate block values chiBlock += chi; ++stepInBlock; if (stepInBlock == stepsPerBlock) { chiBlock /= stepInBlock; chiBlockSum += chiBlock; chiBlockSqdSum += chiBlock * chiBlock; ++blocks; stepInBlock = 0; chiBlock = 0; } } // averages of observables double chiAve; // average susceptibility per spin double Eave; // average energy per spin double heatCapAve; // average heat capacity per spin double MabsAve; // average absolute magnetization per spin double cumAve; // average cumulant double chiAbsAve; // average abs susceptibility per spin double aMabsAve; double achiAbsAve; double acumAve; double chiError; // Monte Carlo error estimate double chiStdDev; // Standard deviation error from blocking double tauChi; // autocorrelation time double tauEffective; // effective autocorrelation time void computeAverages() { // average susceptibility per spin chiAve = chiSum / nChi / T; Eave = Esum / nChi / N; heatCapAve = ( E2sum/nChi - pow((Esum/nChi),2) )/double(N)/pow(T,2); // beta^2*(<E^2> - <E>^2)/N MabsAve = MabsSum / nChi / double(N); chiAbsAve = ( M2sum/nChi - pow((MabsSum/nChi),2) ) / double(N)/T; cumAve = 1 - ( M4sum/nChi / (3*pow(M2sum/nChi,2)) ); // 1 - <M^4>/(3*<M^2>^2) // anti magnet aMabsAve = aMabsSum / nChi; // double(N); achiAbsAve = ( aM2sum/nChi - pow((aMabsSum/nChi),2) )/T; // double(N)/T; acumAve = 1 - ( aM4sum/nChi / (3*pow(aM2sum/nChi,2)) ); // Monte Carlo error estimate chiError = chiSqdSum / nChi; chiError = sqrt(chiError - chiAve * chiAve); chiError /= sqrt(double(nChi)); // exponential correlation time tauChi = 0; double cAve = cChiSum / nCorr; double c0 = cChi[0] / nCorr - cAve * cAve; for (int i = 1; i <= nSave; i++) { double c = (cChi[i] / nCorr - cAve * cAve) / c0; if (c > 0.01) { tauChi += -i/log(c); } else { tauChi /= (i - 1); break; } if (i == nSave) tauChi /= nSave; } // standard deviation from blocking double chiBlockAve = chiBlockSum / blocks; chiStdDev = chiBlockSqdSum / blocks; chiStdDev = sqrt(chiStdDev - chiBlockAve * chiBlockAve); chiStdDev /= sqrt(double(blocks)); // effective autocorrelation time tauEffective = chiStdDev / chiError; tauEffective *= tauEffective / 2; } int main() { cout << " Two-dimensional Ising Model - Wolff Cluster Algorithm\n" << " -----------------------------------------------------\n" << " Enter number of spins L in each direction: "; cin >> Lx; Ly = Lx; N = Lx * Ly; cout << " Enter MAXIMUM temperature T: "; cin >> Tmax; cout << " Enter MINIMUM temperature T: "; cin >> Tmin; cout << " Enter STEP temperatures: "; cin >> Tstep; cout << " Enter number of Monte Carlo steps: "; int MCSteps; cin >> MCSteps; cout << " Enter alphaud a: "; cin >> alphaud; initializeAlpha(); int thermSteps = MCSteps / 5; int numSteps = int( (Tmax - Tmin )/Tstep); for (int t = 0; t <= numSteps; ++t) { T = Tmax - t*Tstep; cout << "current temperature: " << T << endl; //cout << s[19][19] << "\n"; initialize(); initializeClusterVariables(); /*cout << " Performing " << thermSteps << " thermalization steps ..." << flush;*/ for (int i = 0; i < thermSteps; i++) oneMonteCarloStep(); //cout << " done\n Performing production steps ..." << flush; initializeObservables(); for (int i = 0; i < MCSteps; i++) { oneMonteCarloStep(); measureObservables(); } //cout << " done" << endl; computeAverages(); /*cout << "\n Average chi per spin = " << chiAve << "\n Monte Carlo error estimate = " << chiError << "\n Autocorrelation time tau = " << tauChi << "\n Std. Dev. using blocking = " << chiStdDev << "\n Effective tau = " << tauEffective << "\n Average energy per spin = " << Eave << "\n Average heat cap per spin = " << heatCapAve << "\nAverage abs magnet per spin = " << MabsAve << "\n Average abs chi per spin = " << chiAbsAve << "\n Average cumulant = " << cumAve << endl; */ ofstream file("Output.dat", ofstream::app); // append file << Lx << '\t' << alphaud << '\t' << T << '\t' << chiAve << '\t' << chiError << '\t' << tauChi << '\t' << chiStdDev << '\t' << tauEffective \ << '\t' << Eave << '\t' << heatCapAve << '\t' << MabsAve << '\t' << chiAbsAve << '\t' << cumAve \ << '\t' << aMabsAve << '\t' << achiAbsAve << '\t' << acumAve<< '\n'; file.close(); } }
33.704663
153
0.526211
[ "model" ]
115d34978d1e675281075827ad2ccc0b410e62e6
11,283
cpp
C++
3dpplayer/3dpplayer.cpp
hypernumbernet/3dp-player
2ab338384cf3d2173db1dfb12bf9f5c725c0e80f
[ "MIT" ]
2
2017-03-11T14:48:16.000Z
2018-11-22T21:34:35.000Z
3dpplayer/3dpplayer.cpp
hypernumbernet/3dp-player
2ab338384cf3d2173db1dfb12bf9f5c725c0e80f
[ "MIT" ]
null
null
null
3dpplayer/3dpplayer.cpp
hypernumbernet/3dp-player
2ab338384cf3d2173db1dfb12bf9f5c725c0e80f
[ "MIT" ]
null
null
null
// @author Tomohito Inoue <hypernumbernet@users.noreply.github.com> #include "stdafx.h" using namespace my_math; //再描画時間間隔(ミリ秒) //コメントアウトすると、待ち時間なし #define RECALL_TIME 16 //#define RECALL_TIME 1000 // 視点位置初期値 #define EYE_RADIUS_START 8 //球のパラメータ #define SPHERE_RADIUS 0.01 #define SPHERE_SLICES 8 #define SPHERE_STACKS 4 #define NUM_COLOR 6 //データファイル関連 #define VERSION 1 #define HEADER_SIZE 256 //using namespace math; //粒子数 int nParticle; Vector3<GLdouble> *locat; HANDLE hFile; DWORD wReadSize; GLfloat black[] = { 0.0, 0.0, 0.0, 0.0 }; GLfloat white[] = { 1.0, 1.0, 1.0, 1.0 }; GLfloat ambient_set[][4] = { { 0.5, 0.2, 0.2, 1.0 },//red { 0.2, 0.2, 0.5, 1.0 },//blue { 0.2, 0.5, 0.2, 1.0 },//green { 0.4, 0.4, 0.2, 1.0 },//yellow { 0.4, 0.2, 0.4, 1.0 }, { 0.2, 0.4, 0.4, 1.0 } }; GLfloat diffuse_set[][4] = { { 0.8, 0.0, 0.0, 1.0 }, { 0.0, 0.0, 0.8, 1.0 }, { 0.0, 0.8, 0.0, 1.0 }, { 0.8, 0.8, 0.0, 1.0 }, { 0.8, 0.0, 0.8, 1.0 }, { 0.0, 0.8, 0.8, 1.0 } }; GLfloat specular[] = {0.0}; GLfloat shininess[] = {1.0}; GLfloat emission_set[][4] = { { 0.8, 0.2, 0.2, 1.0 },//red { 0.6, 0.0, 0.0, 1.0 }, { 0.8, 0.8, 0.2, 1.0 },//yellow { 0.5, 0.5, 0.8, 1.0 },//blue { 0.2, 0.2, 0.8, 1.0 }, { 0.8, 0.8, 0.8, 1.0 } }; GLfloat light0_ambient[] = { 0, 0, 0, 0 }; GLfloat light0_diffuse[] = { 0, 0, 0, 0 }; GLfloat light0_specular[] = { 0, 0, 0, 0 }; GLfloat light0_position[] = { 0, 0, 0, 0 }; GLfloat light0_spot_direction[] = { 0, 0, 0 }; GLfloat light1_ambient[] = { 1.0 , 1.0 , 1.0, 1.0}; GLfloat light1_diffuse[] = { 0.8 , 0.8 , 0.8, 1.0}; GLfloat light1_specular[] = { 0.5 , 0.5 , 0.5, 1.0}; GLfloat light1_position[] = { 9.0 , 9.0 , 5.0, 1.0}; GLfloat light1_spot_direction[] = { 0.0 , 0.0 , 0.0}; GLint mouse_down_x, mouse_down_y; GLdouble eye_theta, eye_phi, eye_radius; GLuint xyzLines; GLuint xyzGrid; int xyzLinesDisplay = 0; bool play = true; int colorSet = 0; void keyboard(unsigned char key, int x, int y) { switch(key) { //Grid line case 'g' : ++ xyzLinesDisplay; if (xyzLinesDisplay == 3) xyzLinesDisplay = 0; break; //Pause/Play case 'p' : play = ! play; break; //Back one frame case 'b' : wReadSize = SetFilePointer(hFile, -nParticle * 48, NULL, FILE_CURRENT); if (wReadSize == INVALID_SET_FILE_POINTER) SetFilePointer(hFile, HEADER_SIZE, NULL, FILE_BEGIN); ReadFile(hFile , locat , nParticle * 24 , &wReadSize , NULL); break; //Forward one frame case 'f' : ReadFile(hFile , locat , nParticle * 24 , &wReadSize , NULL); break; //jump Start case 's' : SetFilePointer(hFile, HEADER_SIZE, NULL, FILE_BEGIN); ReadFile(hFile , locat , nParticle * 24 , &wReadSize , NULL); break; case 'c' : ++ colorSet; if (colorSet == 2) colorSet = 0; break; } } void skeyboard(int key, int x, int y) { switch(key) { case GLUT_KEY_PAGE_UP : eye_radius -= 0.2; break; case GLUT_KEY_PAGE_DOWN : eye_radius += 0.2; break; case GLUT_KEY_LEFT : eye_theta -= 0.4; break; case GLUT_KEY_RIGHT : eye_theta += 0.4; break; case GLUT_KEY_UP : eye_phi += 0.4; break; case GLUT_KEY_DOWN : eye_phi -= 0.4; break; case GLUT_KEY_F1 : MessageBox ( NULL, TEXT("[Mouse Drag] : Rotation\n\n[Page Up/Down] : Zoom In/Out\n[Up/Down/Right/Left] : Rotation\n\ng : Grid line\np : Pause/Play\nb : play Back\nf : play Forward\ns : jump Start\nc : change Color set"), TEXT("Help"), MB_OK ); break; } } void mouse(int button, int state, int x, int y) { if (button == GLUT_LEFT_BUTTON) { if (state == GLUT_DOWN) { mouse_down_x = x; mouse_down_y = y; } } } void motion(int x, int y) { eye_theta -= (x - mouse_down_x); eye_phi += (y - mouse_down_y); mouse_down_x = x; mouse_down_y = y; } void gridline(double i, double j, double k, double di, double dj, double dk) { Quaternion<double> q1, q2; Vector3<double> v1; double r = 32.0; int count = 0; q1.i0 = 16.0; q1.i1 = i; q1.i2 = j; q1.i3 = k; q1.Normalize(); q2 = Quaternion<double>::Exp(di, dj, dk); for (count = 0; count < 2; ++count) { v1 = q1.LnV3() * r; glVertex3d(v1.x, v1.y, v1.z); q1 *= q2; v1 = q1.LnV3() * r; glVertex3d(v1.x, v1.y, v1.z); } q1.i0 = 16.0; q1.i1 = i; q1.i2 = j; q1.i3 = k; q1.Normalize(); q2 = Quaternion<double>::Exp(-di, -dj, -dk); for (count = 0; count < 2; ++count) { v1 = q1.LnV3() * r; glVertex3d(v1.x, v1.y, v1.z); q1 *= q2; v1 = q1.LnV3() * r; glVertex3d(v1.x, v1.y, v1.z); } } void scene(void) { double j, k; xyzLines = glGenLists(1); glNewList(xyzLines, GL_COMPILE); glColor3d(1.0, 0.0, 0.0); glBegin(GL_LINES); glVertex3d( 0.0, 0.0, 0.0); glVertex3d(10.0, 0.0, 0.0); glEnd(); glColor3d(0.0, 1.0, 0.0); glBegin(GL_LINES); glVertex3d( 0.0, 0.0, 0.0); glVertex3d( 0.0, 10.0, 0.0); glEnd(); glColor3d(0.0, 0.0, 1.0); glBegin(GL_LINES); glVertex3d( 0.0, 0.0, 0.0); glVertex3d( 0.0, 0.0, 10.0); glEnd(); glEndList(); xyzGrid = glGenLists(1); glNewList(xyzGrid, GL_COMPILE); glColor3d(1.0, 0.0, 0.0); glBegin(GL_LINES); k = 0; for (j = -3.0; j < 4.0; j++) gridline(0, j, k, 0.1, 0.0, 0.0); j = 0; for (k = -3.0; k < 4.0; k++) gridline(0, j, k, 0.1, 0.0, 0.0); glEnd(); glColor3d(0.0, 1.0, 0.0); glBegin(GL_LINES); k = 0; for (j = -3.0; j < 4.0; j++) gridline(j, 0, k, 0.0, 0.1, 0.0); j = 0; for (k = -3.0; k < 4.0; k++) gridline(j, 0, k, 0.0, 0.1, 0.0); glEnd(); glColor3d(0.0, 0.0, 1.0); glBegin(GL_LINES); k = 0; for (j = -3.0; j < 4.0; j++) gridline(j, k, 0, 0.0, 0.0, 0.1); j = 0; for (k = -3.0; k < 4.0; k++) gridline(j, k, 0, 0.0, 0.0, 0.1); glEnd(); glEndList(); } void display(void) { if (play) ReadFile(hFile , locat , nParticle * 24 , &wReadSize , NULL); if (eye_phi >= 90.0) { eye_phi = 89.9999; } else if (eye_phi <= -90.0) { eye_phi = -89.9999; } GLdouble theta = DegreeToRadian(eye_theta); GLdouble phi = DegreeToRadian(eye_phi ); int i; int j; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); gluLookAt ( eye_radius * sin(theta) * cos(phi), eye_radius * sin(phi), eye_radius * cos(theta) * cos(phi), 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 ); if (xyzLinesDisplay == 1) glCallList(xyzLines); else if(xyzLinesDisplay == 2) glCallList(xyzGrid); glEnable(GL_LIGHTING); if (colorSet == 0) { glEnable(GL_LIGHT0); glDisable(GL_LIGHT1); glMaterialfv(GL_FRONT, GL_AMBIENT, white); glMaterialfv(GL_FRONT, GL_DIFFUSE, white); glMaterialfv(GL_FRONT, GL_SPECULAR , black); glMaterialfv(GL_FRONT, GL_SHININESS, black); } else if (colorSet == 1) { glEnable(GL_LIGHT1); glDisable(GL_LIGHT0); glLightfv(GL_LIGHT1, GL_POSITION , light1_position ); glLightfv(GL_LIGHT1, GL_SPOT_DIRECTION, light1_spot_direction); glMaterialfv(GL_FRONT, GL_SPECULAR , specular ); glMaterialfv(GL_FRONT, GL_SHININESS, shininess); glMaterialfv(GL_FRONT, GL_EMISSION , black ); } if (wReadSize == nParticle * 24) { for (i = 0; i < nParticle; i++) { glPushMatrix(); glTranslated(locat[i].x, locat[i].y, locat[i].z); j = i % NUM_COLOR; if (colorSet == 0) glMaterialfv(GL_FRONT, GL_EMISSION, emission_set[j]); else if (colorSet == 1) { glMaterialfv(GL_FRONT, GL_AMBIENT, ambient_set[j]); glMaterialfv(GL_FRONT, GL_DIFFUSE, diffuse_set[j]); } glutSolidSphere(SPHERE_RADIUS, SPHERE_SLICES, SPHERE_STACKS); glPopMatrix(); } } glDisable(GL_LIGHTING); glutSwapBuffers(); } void reshape(int width, int height) { GLdouble ratio = (GLdouble)height / (GLdouble)width; glViewport(0, 0, width, height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-1.0, 1.0, -ratio, ratio, 1.0, 100.0); glMatrixMode(GL_MODELVIEW); } #ifdef RECALL_TIME void timer(int value) { glutTimerFunc(RECALL_TIME, timer, 0); glutPostRedisplay(); } #else void idle() { glutPostRedisplay(); } #endif void init(void) { //int i, j; //初期視点位置 eye_theta = 30.0; eye_phi = 15.0; eye_radius = EYE_RADIUS_START; glClearColor(0.0, 0.0, 0.0, 0.0); glEnable(GL_DEPTH_TEST); glShadeModel(GL_FLAT); glEnable(GL_CULL_FACE); glCullFace(GL_BACK); glLightfv(GL_LIGHT0, GL_AMBIENT , light0_ambient ); glLightfv(GL_LIGHT0, GL_DIFFUSE , light0_diffuse ); glLightfv(GL_LIGHT0, GL_SPECULAR , light0_specular ); glLightfv(GL_LIGHT0, GL_POSITION , light0_position ); glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light0_spot_direction); glEnable(GL_LIGHT0); glLightfv(GL_LIGHT1, GL_AMBIENT , light1_ambient ); glLightfv(GL_LIGHT1, GL_DIFFUSE , light1_diffuse ); glLightfv(GL_LIGHT1, GL_SPECULAR , light1_specular ); glLightfv(GL_LIGHT1, GL_POSITION , light1_position ); glLightfv(GL_LIGHT1, GL_SPOT_DIRECTION, light1_spot_direction); //glEnable(GL_LIGHT1); glDisable(GL_LIGHTING); } //WinMainで始めないと、コンソール窓が開いてしまう。 int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR lpCmdLine, int nCmdShow) //int _tmain(int argc, _TCHAR* argv[]) { //コマンドライン引数を得る。UnicodeはWinMainから取得しにくい。 int nArgs; LPWSTR *lplpszArgs; lplpszArgs = CommandLineToArgvW(GetCommandLineW(), &nArgs); if (nArgs < 2) { MessageBox(NULL, TEXT("データファイルをドロップしてください。"), TEXT("エラー"), MB_OK); return 1; } hFile = CreateFileW ( //TEXT("out.dat") , //lpCmdLine, lplpszArgs[1] , GENERIC_READ , 0 , NULL , OPEN_EXISTING , FILE_FLAG_SEQUENTIAL_SCAN/*FILE_ATTRIBUTE_NORMAL*/ , NULL ); if (hFile == INVALID_HANDLE_VALUE) { MessageBox(NULL, TEXT("ファイルを開けませんでした"), TEXT("エラー"), MB_OK); //MessageBox(NULL, GetCommandLine(), TEXT("エラー"), MB_OK); return 1; } int header[HEADER_SIZE / 4]; ReadFile(hFile , header , HEADER_SIZE , &wReadSize , NULL); if (wReadSize != HEADER_SIZE) { MessageBox(NULL, TEXT("ヘッダ読み込みエラー"), TEXT("エラー"), MB_OK); return 1; } nParticle = header[0]; locat = new Vector3<double> [nParticle]; //glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(640, 480); glutInitWindowPosition(0, 0); glutCreateWindow("3D Particles Player (F1 for HELP)"); glutDisplayFunc (display ); glutReshapeFunc (reshape ); glutMotionFunc (motion ); glutKeyboardFunc(keyboard ); glutSpecialFunc (skeyboard); glutMouseFunc (mouse ); init(); scene(); #ifdef RECALL_TIME glutTimerFunc(RECALL_TIME, timer, 0); #else glutIdleFunc(idle); #endif glutMainLoop(); //これ以下は実行されない。 //delete [] locat; //CloseHandle(hFile); return 0; }
22.21063
206
0.587698
[ "3d" ]
11656a487c82bb6e90ceb4f480ab54bb94a97130
5,452
cc
C++
flare/fiber/fiber.cc
AriCheng/flare
b2c84588fe4ac52f0875791d22284d7e063fd057
[ "CC-BY-3.0", "BSD-2-Clause", "BSD-3-Clause" ]
868
2021-05-28T04:00:22.000Z
2022-03-31T08:57:14.000Z
flare/fiber/fiber.cc
AriCheng/flare
b2c84588fe4ac52f0875791d22284d7e063fd057
[ "CC-BY-3.0", "BSD-2-Clause", "BSD-3-Clause" ]
33
2021-05-28T08:44:47.000Z
2021-09-26T13:09:21.000Z
flare/fiber/fiber.cc
AriCheng/flare
b2c84588fe4ac52f0875791d22284d7e063fd057
[ "CC-BY-3.0", "BSD-2-Clause", "BSD-3-Clause" ]
122
2021-05-28T08:22:23.000Z
2022-03-29T09:52:09.000Z
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this // file except in compliance with the License. You may obtain a copy of the // License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations under // the License. #include "flare/fiber/fiber.h" #include <utility> #include <vector> #include "flare/base/likely.h" #include "flare/base/random.h" #include "flare/fiber/detail/fiber_entity.h" #include "flare/fiber/detail/scheduling_group.h" #include "flare/fiber/detail/waitable.h" #include "flare/fiber/execution_context.h" #include "flare/fiber/runtime.h" namespace flare { namespace { fiber::detail::SchedulingGroup* GetSchedulingGroup(std::size_t id) { if (FLARE_LIKELY(id == Fiber::kNearestSchedulingGroup)) { return fiber::detail::NearestSchedulingGroup(); } else if (id == Fiber::kUnspecifiedSchedulingGroup) { return fiber::detail::GetSchedulingGroup( Random<std::size_t>(0, fiber::GetSchedulingGroupCount() - 1)); } else { return fiber::detail::GetSchedulingGroup(id); } } } // namespace Fiber::Fiber() = default; Fiber::~Fiber() { FLARE_CHECK(!joinable(), "You need to call either `join()` or `detach()` prior to destroy " "a fiber."); } Fiber::Fiber(const Attributes& attr, Function<void()>&& start) { // Choose a scheduling group for running this fiber. auto sg = GetSchedulingGroup(attr.scheduling_group); FLARE_CHECK(sg, "No scheduling group is available?"); if (attr.execution_context) { // Caller specified an execution context, so we should wrap `start` to run // in the execution context. // // `ec` holds a reference to `attr.execution_context`, it's released when // `start` returns. start = [start = std::move(start), ec = RefPtr(ref_ptr, attr.execution_context)] { ec->Execute(start); }; } // `desc` will cease to exist once `start` returns. We don't own it. auto desc = fiber::detail::NewFiberDesc(); desc->start_proc = std::move(start); desc->scheduling_group_local = attr.scheduling_group_local; desc->system_fiber = attr.system_fiber; // If `join()` is called, we'll sleep on this. desc->exit_barrier = object_pool::GetRefCounted<fiber::detail::ExitBarrier>(); join_impl_ = desc->exit_barrier; // Schedule the fiber. if (attr.launch_policy == fiber::Launch::Post) { sg->StartFiber(desc); } else { sg->SwitchTo(fiber::detail::GetCurrentFiberEntity(), fiber::detail::InstantiateFiberEntity(sg, desc)); } } void Fiber::detach() { FLARE_CHECK(joinable(), "The fiber is in detached state."); join_impl_ = nullptr; } void Fiber::join() { FLARE_CHECK(joinable(), "The fiber is in detached state."); join_impl_->Wait(); join_impl_.Reset(); } bool Fiber::joinable() const { return !!join_impl_; } Fiber::Fiber(Fiber&&) noexcept = default; Fiber& Fiber::operator=(Fiber&&) noexcept = default; void StartFiberFromPthread(Function<void()>&& start_proc) { fiber::internal::StartFiberDetached(std::move(start_proc)); } namespace fiber::internal { void StartFiberDetached(Function<void()>&& start_proc) { auto desc = detail::NewFiberDesc(); desc->start_proc = std::move(start_proc); FLARE_CHECK(!desc->exit_barrier); desc->scheduling_group_local = false; desc->system_fiber = false; fiber::detail::NearestSchedulingGroup()->StartFiber(desc); } void StartSystemFiberDetached(Function<void()>&& start_proc) { auto desc = detail::NewFiberDesc(); desc->start_proc = std::move(start_proc); FLARE_CHECK(!desc->exit_barrier); desc->scheduling_group_local = false; desc->system_fiber = true; fiber::detail::NearestSchedulingGroup()->StartFiber(desc); } void StartFiberDetached(Fiber::Attributes&& attrs, Function<void()>&& start_proc) { auto sg = GetSchedulingGroup(attrs.scheduling_group); if (attrs.execution_context) { start_proc = [start_proc = std::move(start_proc), ec = RefPtr(ref_ptr, std::move(attrs.execution_context))] { ec->Execute(start_proc); }; } auto desc = detail::NewFiberDesc(); desc->start_proc = std::move(start_proc); FLARE_CHECK(!desc->exit_barrier); desc->scheduling_group_local = attrs.scheduling_group_local; desc->system_fiber = attrs.system_fiber; if (attrs.launch_policy == fiber::Launch::Post) { sg->StartFiber(desc); } else { sg->SwitchTo(fiber::detail::GetCurrentFiberEntity(), detail::InstantiateFiberEntity(sg, desc)); } } void BatchStartFiberDetached(std::vector<Function<void()>>&& start_procs) { std::vector<fiber::detail::FiberDesc*> descs; for (auto&& e : start_procs) { auto desc = fiber::detail::NewFiberDesc(); desc->start_proc = std::move(e); FLARE_CHECK(!desc->exit_barrier); desc->scheduling_group_local = false; desc->system_fiber = false; descs.push_back(desc); } fiber::detail::NearestSchedulingGroup()->StartFibers( descs.data(), descs.data() + descs.size()); } } // namespace fiber::internal } // namespace flare
31.514451
80
0.693324
[ "vector" ]
1170ac4f10c2a7fb31a337429dcf472c403aa979
4,326
cpp
C++
lib/djvCore/OSUnix.cpp
belzecue/DJV
94fb63a2f56cc0c41ab5d518ef9f2e0590c295c0
[ "BSD-3-Clause" ]
456
2018-10-06T00:07:14.000Z
2022-03-31T06:14:22.000Z
lib/djvCore/OSUnix.cpp
belzecue/DJV
94fb63a2f56cc0c41ab5d518ef9f2e0590c295c0
[ "BSD-3-Clause" ]
438
2018-10-31T15:05:51.000Z
2022-03-31T09:01:24.000Z
lib/djvCore/OSUnix.cpp
belzecue/DJV
94fb63a2f56cc0c41ab5d518ef9f2e0590c295c0
[ "BSD-3-Clause" ]
54
2018-10-29T10:18:36.000Z
2022-03-23T06:56:11.000Z
// SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2020 Darby Johnston // All rights reserved. #include <djvCore/OS.h> #include <djvCore/Error.h> #include <djvCore/String.h> #include <djvCore/StringFormat.h> #if defined(DJV_PLATFORM_MACOS) #include <ApplicationServices/ApplicationServices.h> #include <CoreFoundation/CFBundle.h> #include <CoreServices/CoreServices.h> #endif // DJV_PLATFORM_MACOS #include <sstream> #include <sys/ioctl.h> #if defined(DJV_PLATFORM_MACOS) #include <sys/types.h> #include <sys/sysctl.h> #else // DJV_PLATFORM_MACOS #include <sys/sysinfo.h> #endif // DJV_PLATFORM_MACOS #include <sys/utsname.h> #include <pwd.h> #include <unistd.h> //#pragma optimize("", off) namespace djv { namespace Core { namespace OS { std::string getInformation() { std::string out; ::utsname info; uname(&info); std::stringstream s; s << info.sysname << " " << info.release << " " << info.machine; out = s.str(); return out; } size_t getRAMSize() { size_t out = 0; #if defined(DJV_PLATFORM_MACOS) int name[2] = { CTL_HW, HW_MEMSIZE }; u_int namelen = sizeof(name) / sizeof(name[0]); uint64_t size = 0; size_t len = sizeof(size); if (0 == sysctl(name, namelen, &size, &len, NULL, 0)) { out = static_cast<size_t>(size); } #else // DJV_PLATFORM_MACOS struct sysinfo info; if (0 == sysinfo(&info)) { out = info.totalram; } #endif // DJV_PLATFORM_MACOS return out; } int getTerminalWidth() { int out = 80; struct winsize ws; ws.ws_col = 0; if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) == -1) { ws.ws_col = 80; } out = ws.ws_col; return out; } bool getEnv(const std::string& name, std::string& out) { if (const char* p = ::getenv(name.c_str())) { out = std::string(p); return true; } return false; } bool setEnv(const std::string& name, const std::string& value) { return ::setenv(name.c_str(), value.c_str(), 1) == 0; } bool clearEnv(const std::string& name) { return ::unsetenv(name.c_str()) == 0; } std::string getUserName() { std::string out; if (struct passwd* buf = ::getpwuid(::getuid())) { out = std::string(buf->pw_name); } return out; } void openURL(const std::string& value) { #if defined(DJV_PLATFORM_MACOS) CFURLRef url = CFURLCreateWithBytes( NULL, (UInt8*)value.c_str(), value.size(), kCFStringEncodingASCII, NULL); LSOpenCFURLRef(url, 0); CFRelease(url); #else // DJV_PLATFORM_MACOS std::stringstream ss; ss << "xdg-open" << " " << value; int r = system(ss.str().c_str()); if (r != 0) { std::vector<std::string> messages; //! \todo How can we translate this? messages.push_back(String::Format("{0}: {1}"). arg(value). arg(DJV_TEXT("error_url_cannot_open"))); messages.push_back(String::Format(DJV_TEXT("error_url_code")). arg(value)); throw std::runtime_error(String::join(messages, ' ')); } #endif // DJV_PLATFORM_MACOS } } // namespace OS } // namespace Core } // namespace djv
29.630137
82
0.452381
[ "vector" ]
11717304e9b73517e84e0852bb103870892393e6
15,813
cpp
C++
examples/RenderManagerD3DPresentExample3D.cpp
sensics/OSVR-RenderManager
56f9db6279945a52327ffeac138b3ff81cf37438
[ "Apache-2.0" ]
68
2016-02-16T13:40:01.000Z
2022-03-28T19:27:53.000Z
examples/RenderManagerD3DPresentExample3D.cpp
sensics/OSVR-RenderManager
56f9db6279945a52327ffeac138b3ff81cf37438
[ "Apache-2.0" ]
230
2016-02-16T13:41:56.000Z
2021-06-27T12:13:33.000Z
examples/RenderManagerD3DPresentExample3D.cpp
sensics/OSVR-RenderManager
56f9db6279945a52327ffeac138b3ff81cf37438
[ "Apache-2.0" ]
51
2016-02-20T15:37:37.000Z
2022-03-21T07:52:40.000Z
/** @file @brief Example program that uses the OSVR direct-to-display interface and D3D to render a scene with low latency. @date 2015 @author Russ Taylor <russ@sensics.com> <http://sensics.com/osvr> */ // Copyright 2015 Sensics, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Internal Includes #include <osvr/ClientKit/Context.h> #include <osvr/ClientKit/Interface.h> #include <osvr/RenderKit/RenderManager.h> // Library/third-party includes #include <windows.h> #include <initguid.h> #include <d3d11.h> #include <wrl.h> #include <DirectXMath.h> // Standard includes #include <iostream> #include <string> #include <stdlib.h> // For exit() #include <chrono> // This must come after we include <d3d11.h> so its pointer types are defined. #include <osvr/RenderKit/GraphicsLibraryD3D11.h> using namespace DirectX; #include "D3DCube.h" #include "D3DSimpleShader.h" // Set to true when it is time for the application to quit. // Handlers below that set it to true when the user causes // any of a variety of events so that we shut down the system // cleanly. This only works on Windows, but so does D3D... static bool quit = false; static Cube roomCube(5.0f, true); static SimpleShader simpleShader; #ifdef _WIN32 // Note: On Windows, this runs in a different thread from // the main application. static BOOL CtrlHandler(DWORD fdwCtrlType) { switch (fdwCtrlType) { // Handle the CTRL-C signal. case CTRL_C_EVENT: // CTRL-CLOSE: confirm that the user wants to exit. case CTRL_CLOSE_EVENT: case CTRL_BREAK_EVENT: case CTRL_LOGOFF_EVENT: case CTRL_SHUTDOWN_EVENT: quit = true; return TRUE; default: return FALSE; } } #endif // This callback sets a boolean value whose pointer is passed in to // the state of the button that was pressed. This lets the callback // be used to handle any button press that just needs to update state. void myButtonCallback(void* userdata, const OSVR_TimeValue* /*timestamp*/, const OSVR_ButtonReport* report) { bool* result = static_cast<bool*>(userdata); *result = (report->state != 0); } // Callbacks to draw things in world space, left-hand space, and right-hand // space. void RenderView( const osvr::renderkit::RenderInfo& renderInfo //< Info needed to render , ID3D11RenderTargetView* renderTargetView, ID3D11DepthStencilView* depthStencilView) { // Make sure our pointers are filled in correctly. if (renderInfo.library.D3D11 == nullptr) { std::cerr << "SetupDisplay: No D3D11 GraphicsLibrary, this should not happen" << std::endl; return; } auto context = renderInfo.library.D3D11->context; auto device = renderInfo.library.D3D11->device; float projectionD3D[16]; float viewD3D[16]; XMMATRIX identity = XMMatrixIdentity(); // Set up to render to the textures for this eye context->OMSetRenderTargets(1, &renderTargetView, depthStencilView); // Set up the viewport we're going to draw into. CD3D11_VIEWPORT viewport(static_cast<float>(renderInfo.viewport.left), static_cast<float>(renderInfo.viewport.lower), static_cast<float>(renderInfo.viewport.width), static_cast<float>(renderInfo.viewport.height)); context->RSSetViewports(1, &viewport); // Make a grey background FLOAT colorRgba[4] = {0.3f, 0.3f, 0.3f, 1.0f}; context->ClearRenderTargetView(renderTargetView, colorRgba); context->ClearDepthStencilView( depthStencilView, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0); osvr::renderkit::OSVR_PoseState_to_D3D(viewD3D, renderInfo.pose); osvr::renderkit::OSVR_Projection_to_D3D(projectionD3D, renderInfo.projection); XMMATRIX xm_projectionD3D(projectionD3D), xm_viewD3D(viewD3D); // draw room simpleShader.use(device, context, xm_projectionD3D, xm_viewD3D, identity); roomCube.draw(device, context); } void Usage(std::string name) { std::cerr << "Usage: " << name << std::endl; exit(-1); } int main(int argc, char* argv[]) { // Parse the command line int realParams = 0; for (int i = 1; i < argc; i++) { if (argv[i][0] == '-') { Usage(argv[0]); } else switch (++realParams) { case 1: default: Usage(argv[0]); } } if (realParams != 0) { Usage(argv[0]); } // Get an OSVR client context to use to access the devices // that we need. osvr::clientkit::ClientContext context( "osvr.RenderManager.D3DPresentExample3D"); // Construct button devices and connect them to a callback // that will set the "quit" variable to true when it is // pressed. Use button "1" on the left-hand or // right-hand controller. osvr::clientkit::Interface leftButton1 = context.getInterface("/controller/left/1"); leftButton1.registerCallback(&myButtonCallback, &quit); osvr::clientkit::Interface rightButton1 = context.getInterface("/controller/right/1"); rightButton1.registerCallback(&myButtonCallback, &quit); // Open Direct3D and set up the context for rendering to // an HMD. Do this using the OSVR RenderManager interface, // which maps to the nVidia or other vendor direct mode // to reduce the latency. osvr::renderkit::RenderManager* render = osvr::renderkit::createRenderManager(context.get(), "Direct3D11"); if ((render == nullptr) || (!render->doingOkay())) { std::cerr << "Could not create RenderManager" << std::endl; return 1; } // Set up a handler to cause us to exit cleanly. #ifdef _WIN32 SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); #endif // Open the display and make sure this worked. osvr::renderkit::RenderManager::OpenResults ret = render->OpenDisplay(); if (ret.status == osvr::renderkit::RenderManager::OpenStatus::FAILURE) { std::cerr << "Could not open display" << std::endl; return 2; } if (ret.library.D3D11 == nullptr) { std::cerr << "Attempted to run a Direct3D11 program with a config file " << "that specified a different rendering library." << std::endl; return 3; } // Do a call to get the information we need to construct our // color and depth render-to-texture buffers. std::vector<osvr::renderkit::RenderInfo> renderInfo; context.update(); renderInfo = render->GetRenderInfo(); // Set up the vector of textures to render to and any framebuffer // we need to group them. std::vector<osvr::renderkit::RenderBuffer> renderBuffers; std::vector<ID3D11Texture2D*> depthStencilTextures; std::vector<ID3D11DepthStencilView*> depthStencilViews; HRESULT hr; for (size_t i = 0; i < renderInfo.size(); i++) { // The color buffer for this eye. We need to put this into // a generic structure for the Present function, but we only need // to fill in the Direct3D portion. // Note that this texture format must be RGBA and unsigned byte, // so that we can present it to Direct3D for DirectMode. ID3D11Texture2D* D3DTexture = nullptr; unsigned width = static_cast<int>(renderInfo[i].viewport.width); unsigned height = static_cast<int>(renderInfo[i].viewport.height); // Initialize a new render target texture description. D3D11_TEXTURE2D_DESC textureDesc = {}; textureDesc.Width = width; textureDesc.Height = height; textureDesc.MipLevels = 1; textureDesc.ArraySize = 1; textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; textureDesc.SampleDesc.Count = 1; textureDesc.SampleDesc.Quality = 0; textureDesc.Usage = D3D11_USAGE_DEFAULT; // We need it to be both a render target and a shader resource textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE; textureDesc.CPUAccessFlags = 0; textureDesc.MiscFlags = 0; // Create a new render target texture to use. hr = renderInfo[i].library.D3D11->device->CreateTexture2D( &textureDesc, nullptr, &D3DTexture); if (FAILED(hr)) { std::cerr << "Can't create texture for eye " << i << std::endl; return -1; } // Fill in the resource view for your render texture buffer here D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc = {}; // This must match what was created in the texture to be rendered renderTargetViewDesc.Format = textureDesc.Format; renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; renderTargetViewDesc.Texture2D.MipSlice = 0; // Create the render target view. ID3D11RenderTargetView* renderTargetView; //< Pointer to our render target view hr = renderInfo[i].library.D3D11->device->CreateRenderTargetView( D3DTexture, &renderTargetViewDesc, &renderTargetView); if (FAILED(hr)) { std::cerr << "Could not create render target for eye " << i << std::endl; return -2; } // Push the filled-in RenderBuffer onto the vector. osvr::renderkit::RenderBufferD3D11* rbD3D = new osvr::renderkit::RenderBufferD3D11; rbD3D->colorBuffer = D3DTexture; rbD3D->colorBufferView = renderTargetView; osvr::renderkit::RenderBuffer rb; rb.D3D11 = rbD3D; renderBuffers.push_back(rb); //================================================================== // Create a depth buffer // Make the depth/stencil texture. D3D11_TEXTURE2D_DESC textureDescription = {}; textureDescription.SampleDesc.Count = 1; textureDescription.SampleDesc.Quality = 0; textureDescription.Usage = D3D11_USAGE_DEFAULT; textureDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL; textureDescription.Width = width; textureDescription.Height = height; textureDescription.MipLevels = 1; textureDescription.ArraySize = 1; textureDescription.CPUAccessFlags = 0; textureDescription.MiscFlags = 0; /// @todo Make this a parameter textureDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; ID3D11Texture2D* depthStencilBuffer; hr = renderInfo[i].library.D3D11->device->CreateTexture2D( &textureDescription, NULL, &depthStencilBuffer); if (FAILED(hr)) { std::cerr << "Could not create depth/stencil texture for eye " << i << std::endl; return -4; } depthStencilTextures.push_back(depthStencilBuffer); // Create the depth/stencil view description D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDescription = {}; depthStencilViewDescription.Format = textureDescription.Format; depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; depthStencilViewDescription.Texture2D.MipSlice = 0; ID3D11DepthStencilView* depthStencilView; hr = renderInfo[i].library.D3D11->device->CreateDepthStencilView( depthStencilBuffer, &depthStencilViewDescription, &depthStencilView); if (FAILED(hr)) { std::cerr << "Could not create depth/stencil view for eye " << i << std::endl; return -5; } depthStencilViews.push_back(depthStencilView); } // Create depth stencil state. // Describe how depth and stencil tests should be performed. D3D11_DEPTH_STENCIL_DESC depthStencilDescription = {}; depthStencilDescription.DepthEnable = true; depthStencilDescription.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL; depthStencilDescription.DepthFunc = D3D11_COMPARISON_LESS; depthStencilDescription.StencilEnable = true; depthStencilDescription.StencilReadMask = 0xFF; depthStencilDescription.StencilWriteMask = 0xFF; // Front-facing stencil operations depthStencilDescription.FrontFace.StencilFailOp = D3D11_STENCIL_OP_KEEP; depthStencilDescription.FrontFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR; depthStencilDescription.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP; depthStencilDescription.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS; // Back-facing stencil operations depthStencilDescription.BackFace.StencilFailOp = D3D11_STENCIL_OP_KEEP; depthStencilDescription.BackFace.StencilDepthFailOp = D3D11_STENCIL_OP_DECR; depthStencilDescription.BackFace.StencilPassOp = D3D11_STENCIL_OP_KEEP; depthStencilDescription.BackFace.StencilFunc = D3D11_COMPARISON_ALWAYS; ID3D11DepthStencilState* depthStencilState; hr = renderInfo[0].library.D3D11->device->CreateDepthStencilState( &depthStencilDescription, &depthStencilState); if (FAILED(hr)) { std::cerr << "Could not create depth/stencil state" << std::endl; return -3; } // Register our constructed buffers so that we can use them for // presentation. if (!render->RegisterRenderBuffers(renderBuffers)) { std::cerr << "RegisterRenderBuffers() returned false, cannot continue" << std::endl; quit = true; } // Timing of frame rates size_t count = 0; std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); // Continue rendering until it is time to quit. while (!quit) { // Update the context so we get our callbacks called and // update tracker state. context.update(); renderInfo = render->GetRenderInfo(); // Render into each buffer using the specified information. for (size_t i = 0; i < renderInfo.size(); i++) { renderInfo[i].library.D3D11->context->OMSetDepthStencilState( depthStencilState, 1); RenderView(renderInfo[i], renderBuffers[i].D3D11->colorBufferView, depthStencilViews[i]); } // Send the rendered results to the screen if (!render->PresentRenderBuffers(renderBuffers, renderInfo)) { std::cerr << "PresentRenderBuffers() returned false, maybe because " "it was asked to quit" << std::endl; quit = true; } // Timing information end = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_sec = end - start; if (elapsed_sec.count() >= 2) { std::chrono::duration<double, std::micro> elapsed_usec = end - start; double usec = elapsed_usec.count(); std::cout << "Rendering at " << count / (usec * 1e-6) << " fps" << std::endl; start = end; count = 0; } count++; } // Clean up after ourselves. // @todo // Close the Renderer interface cleanly. delete render; return 0; }
37.65
80
0.654904
[ "render", "vector" ]
1175e5642304b7ad2c63c014d46bfe1597629b32
4,125
cpp
C++
bin/cutsite_trimming_mHiC.cpp
ITBE-Lab/mHiC-Ranking
3c2cd443fb7a594378db89faa04c3180fa3d0897
[ "MIT" ]
17
2018-04-24T03:36:59.000Z
2021-12-25T22:18:41.000Z
bin/cutsite_trimming_mHiC.cpp
ITBE-Lab/mHiC-Ranking
3c2cd443fb7a594378db89faa04c3180fa3d0897
[ "MIT" ]
13
2018-05-03T15:59:48.000Z
2021-12-29T16:03:35.000Z
bin/cutsite_trimming_mHiC.cpp
ITBE-Lab/mHiC-Ranking
3c2cd443fb7a594378db89faa04c3180fa3d0897
[ "MIT" ]
6
2018-01-12T16:40:43.000Z
2021-12-09T06:45:48.000Z
// HiC-Pro // Copyright 2015 Institut Curie // Author(s): Nicolas Servant // Contact: nicolas.servant@curie.fr // This software is distributed without any guarantee under the terms of the BSD-3 licence // Ye modified June 15, 2017 // g++ -std=c++0x -o cutsite_trimming cutsite_trimming.cpp //./cutsite_trimming -fastq fastq -cutsite AAGCTAGCTT # for HindIII #include <iostream> // std::cout #include <stdlib.h> #include <string.h> #include <vector> #include <fstream> static const char* prog; static int usage(int ret=1) { std::cerr << "usage: " << prog << " --fastq FASTQFILE --cutsite CUTSITE --out OUTFILE [--rmuntrim] \n"; std::cerr << "usage: " << prog << " --help\n"; return ret; } static int get_options(int argc, char* argv[], std::string& fastqFile, std::vector<std::string>& cutSites, std::string& output, bool& rmuntrim) { prog = argv[0]; if (argc == 1){ exit(usage()); } for (int ac = 1; ac < argc; ++ac) { const char* opt = argv[ac]; if (*opt == '-') { if (!strcmp(opt, "--fastq")) { fastqFile = std::string(argv[++ac]); } else if (!strcmp(opt, "--cutsite")) { std::string cutSitesSequence; cutSitesSequence = std::string(argv[++ac]); //int strLen = cutSitesSequence.size(); size_t pos = cutSitesSequence.find(","); size_t begin = 0; while(pos != std::string::npos){ cutSites.push_back(cutSitesSequence.substr(begin, pos - begin)); begin = pos + 1; pos = cutSitesSequence.find(",", begin + 1); } cutSites.push_back(cutSitesSequence.substr(begin, pos)); } else if (!strcmp(opt, "--out")) { output = std::string(argv[++ac]); } else if (!strcmp(opt, "--rmuntrim")) { rmuntrim = true; } }else { std::cerr << prog << ": unknown option " << opt << std::endl; return usage(); } } return 0; } static int trim_fastq(std::string& fastqFile, std::vector<std::string>& cutSites, std::string& outFile, bool& rmuntrim) { int trim_count=0; std::string ID; std::ifstream ifs (fastqFile); std::ofstream ofs (outFile); if (ifs.is_open()){ while (getline(ifs, ID)) { std::string seq; std::string dummy; std::string qual; //Ye int strLen = std::string(cutSites[0]).size(); //printf("Let print %s, and the length is %d \n", cutSites[0].c_str(), strLen); getline(ifs, seq); getline(ifs, dummy); getline(ifs, qual); bool find_pos = false; size_t pos = std::string::npos; for (std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){ size_t tmp_pos = seq.find(*it); if (tmp_pos != std::string::npos) { // If find_pos is alread True, there is a problem (there are two cut // sites in the same read).) if (find_pos == true){ if(tmp_pos < pos) { pos = tmp_pos; } } else { find_pos = true; pos = tmp_pos; } } } //Ye if (pos != std::string::npos) { trim_count++; ofs << ID << '\n'; ofs << seq.substr(0, pos + strLen/2) << '\n'; ofs << "+\n"; ofs << qual.substr(0, pos + strLen/2) << '\n'; } else { if (!rmuntrim){ ofs << ID << '\n'; ofs << seq << '\n'; ofs << "+\n"; ofs << qual << '\n'; } } find_pos = false; } }else{ std::cerr << "Error : Cannot open file : " << fastqFile; } return trim_count; } int main(int argc, char* argv[]) { std::string fastqFile; std::vector<std::string> cutSites; std::string outFile; bool rmuntrim = false; int ret = get_options(argc, argv, fastqFile, cutSites, outFile, rmuntrim); printf("##Fastq file: %s\n", fastqFile.c_str()); printf("##Restriction sites:\n"); for(std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){ std::cout << *it << std::endl; } printf("##Output File: %s\n", outFile.c_str()); if (fastqFile.empty() || cutSites.size() == 0 || outFile.empty()){ usage(); exit(ret); } int trim_count=trim_fastq(fastqFile, cutSites, outFile, rmuntrim); printf("\n##Trimmed reads: %d\n", trim_count); return(0); }
25.152439
105
0.584485
[ "vector" ]
117d60c979bf9deb59476f1002cc8a1bc654effd
2,731
cpp
C++
src/renderer/camera.cpp
kevinmkchin/opengl-sdl-cpp
4ac9a7b397fb4fb13f40c669ed782ad51824b064
[ "MIT" ]
null
null
null
src/renderer/camera.cpp
kevinmkchin/opengl-sdl-cpp
4ac9a7b397fb4fb13f40c669ed782ad51824b064
[ "MIT" ]
null
null
null
src/renderer/camera.cpp
kevinmkchin/opengl-sdl-cpp
4ac9a7b397fb4fb13f40c669ed782ad51824b064
[ "MIT" ]
null
null
null
#include "camera.h" #include "../core/timer.h" #include "../core/input.h" #include "deferred_renderer.h" #include "../debugging/console.h" #include "../game_statics.h" void camera_t::update_camera() { float dt = timer::delta_time; i32 g_mouse_delta_x = game_statics::the_input->g_mouse_delta_x; i32 g_mouse_delta_y = game_statics::the_input->g_mouse_delta_y; if(kc_abs(g_mouse_delta_x) < 50.f && kc_abs(g_mouse_delta_y) < 50.f) // don't move if mouse delta is too big to be normal { rotation.y /*yaw*/ -= g_mouse_delta_x * turnspeed; rotation.z /*pitch*/ -= g_mouse_delta_y * turnspeed; } if(rotation.z > 89.f) { rotation.z = 89.f; } if(rotation.z < -89.f) { rotation.z = -89.f; } // Calcuate direction, right, and up vectors calculated_direction = orientation_to_direction(euler_to_quat(rotation*KC_DEG2RAD)); calculated_direction = normalize(calculated_direction); calculated_right = normalize(cross(calculated_direction, world_up)); // right vector is cross product of direction and up direction of world calculated_up = normalize(cross(calculated_right, calculated_direction)); // up vector is cross product of right vector and direction const u8* keystate = game_statics::the_input->g_keystate; if(console_is_hidden()) { // Check Inputs if (keystate[SDL_SCANCODE_W]) { position += calculated_direction * movespeed * dt; } if (keystate[SDL_SCANCODE_A]) { position += -calculated_right * movespeed * dt; } if (keystate[SDL_SCANCODE_S]) { position += -calculated_direction * movespeed * dt; } if (keystate[SDL_SCANCODE_D]) { position += calculated_right * movespeed * dt; } if (keystate[SDL_SCANCODE_Q]) { position.y += -movespeed * dt; } if (keystate[SDL_SCANCODE_E]) { position.y += movespeed * dt; } } } void camera_t::calculate_perspective_matrix() { float fov = 90.f; vec2i display_buffer_size = game_statics::the_renderer->get_buffer_size(); float aspect_ratio = (float)display_buffer_size.x / (float)display_buffer_size.y; #if 0 matrix_perspective = projection_matrix_orthographic(-30.0f, 30.0f, -30.0f, 30.0f, 0.1f, 100.f); #else matrix_perspective = projection_matrix_perspective((fov/2.f)*KC_DEG2RAD, aspect_ratio, nearclip, farclip); #endif } /** Returns a view matrix using the given camera as the observer */ void camera_t::calculate_view_matrix() { matrix_view = view_matrix_look_at(position,position + calculated_direction, calculated_up); }
32.511905
144
0.65617
[ "vector" ]
117d98a25861c01a91d0cc5baee7eb7125f2abaa
1,162
cc
C++
sample.cc
r-lyeh-archived/bourne
c7d081c57a76d600917a18f613a1c5802b3910f0
[ "Zlib" ]
3
2018-10-12T11:20:08.000Z
2021-08-19T01:50:19.000Z
sample.cc
r-lyeh/medea
c7d081c57a76d600917a18f613a1c5802b3910f0
[ "Zlib" ]
null
null
null
sample.cc
r-lyeh/medea
c7d081c57a76d600917a18f613a1c5802b3910f0
[ "Zlib" ]
null
null
null
#include "bourne.hpp" #include <cassert> #include <iostream> #include <vector> #include <string> #include <unordered_map> struct phones { std::string country; std::vector<int> phonelist; std::string details; }; BOURNE_DEFINE( phones &it, (it.country, it.phonelist, it.details) ); int main() { // first-class objects are automatically serialized std::unordered_map< std::string, std::vector< std::string > > contacts = { { "homer", {"marge", "lisa", "bart", "maggie" } }, { "marge", {"homer", "lisa", "bart", "maggie" } }, { "lisa", {"marge", "homer", "bart", "maggie" } }, { "bart", {"marge", "lisa", "homer", "maggie" } }, { "maggie", {"marge", "lisa", "bart", "homer" } } }, built; std::cout << bourne::to_json( contacts ) << std::endl; // saving/loading showcase std::string json = bourne::to_json( contacts ); bourne::from_json( built, json ); assert( built == contacts ); // custom objects require a thin BOURNE_DEFINE() wrapper phones list = { "uk", {123,456}, "just a few contacts" }; std::cout << bourne::to_json( list ) << std::endl; }
31.405405
78
0.580895
[ "vector" ]
1185b00fc0dc61fc0366ab46afa179eb90d46307
5,767
cpp
C++
ngraph/test/backend/adaptive_avg_pool.in.cpp
uikilin100/openvino
afc5191b8c75b1de4adc8cb07c6269b52882ddfe
[ "Apache-2.0" ]
1
2021-03-16T17:40:26.000Z
2021-03-16T17:40:26.000Z
ngraph/test/backend/adaptive_avg_pool.in.cpp
uikilin100/openvino
afc5191b8c75b1de4adc8cb07c6269b52882ddfe
[ "Apache-2.0" ]
42
2020-11-23T08:09:57.000Z
2022-02-21T13:03:34.000Z
ngraph/test/backend/adaptive_avg_pool.in.cpp
tsocha/openvino
3081fac7581933568b496a3c4e744d1cee481619
[ "Apache-2.0" ]
4
2021-04-02T08:48:38.000Z
2021-07-01T06:59:02.000Z
// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "util/engine/test_engines.hpp" #include "util/test_case.hpp" #include "util/test_control.hpp" using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_1d) { auto data = make_shared<op::Parameter>(element::f32, Shape{2, 3, 7}); auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{1}, {3}); auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape); auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data}); std::vector<float> inputs{0, 4, 1, 3, -2, -5, -2, -2, 1, -3, 1, -3, -4, 0, -2, 1, -1, -2, 3, -1, -3, -1, -2, 3, 4, -3, -4, 1, 2, 0, -4, -5, -2, -2, -3, 2, 3, 1, -5, 2, -4, -2}; std::vector<float> expected_result{1.66666663, 0.66666669, -3., -1.33333337, -1.66666663, -2.33333325, -0.66666669, 0., -0.33333334, 0., 1.33333337, -2., -0.66666669, -3.66666675, -2.33333325, 2., -0.66666669, -1.33333337}; auto test_case = test::TestCase<TestEngine>(fun); test_case.add_input<float>(Shape{2, 3, 7}, inputs); test_case.add_expected_output<float>(Shape{2, 3, 3}, expected_result); test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_2d) { auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 7, 10}); auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {3, 3}); auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape); auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data}); std::vector<float> inputs{ -2, -3, -4, 3, -5, 4, 0, -4, -2, -4, -5, 0, -3, 0, -2, 0, 0, -5, -4, -1, 3, -1, 0, -1, 0, -2, 0, 4, 1, 4, 0, -1, -4, 2, -2, -5, -1, -1, -2, 1, 2, -2, -1, 2, 0, -1, 0, -5, 4, 4, 3, 0, -4, -4, -4, -2, 0, 1, -2, -1, 4, -2, -4, 1, -1, -3, -4, -1, 1, -4, -2, -4, -5, 0, -4, 3, 4, -5, -4, -2, 0, 2, -4, -3, 3, -1, 1, -4, -5, 4, 2, -5, 2, -3, 0, 4, 3, 3, 1, 2, -1, -4, 1, -3, -3, -2, 3, 4, -2, -5, 1, 4, 4, -2, 2, 1, -5, -2, -5, 1, 1, -2, -3, -3, -1, -5, 1, -3, -5, -3, -4, -1, 4, -3, 4, -1, 4, 3, 1, 4, -2, -4, -4, 4, -3, 4, 2, -3, -2, 4, -3, 0, 1, -4, 4, 4, 0, 3, -1, 3, 3, -5, 0, 3, -3, 1, -2, 4, -5, -5, 1, 0, -1, 0, -3, -2, 0, -3, 3, -2, -2, 0, -3, 4, -1, 2, -2, 2, -3, -1, -4, -2, 0, 2, 0, 2, 0, -3, 4, 3, -5, -3, -5, 1, -5, -3, -5, 4, -3, 3}; std::vector<float> expected_result{-1.08333337, -0.25000000, -0.91666669, -0.08333334, -0.66666669, 0.75000000, -0.41666666, -1.33333337, -0.58333331, -1.66666663, 0.58333331, -0.16666667, -0.33333334, -0.41666666, -0.16666667, -0.33333334, -0.66666669, -0.75000000, -0.91666669, 0.83333331, -0.16666667, 0., -0.25000000, -1.16666663, -1.41666663, -0.41666666, -0.08333334}; auto test_case = test::TestCase<TestEngine>(fun); test_case.add_input<float>(Shape{1, 3, 7, 10}, inputs); test_case.add_expected_output<float>(Shape{1, 3, 3, 3}, expected_result); test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_3d) { auto data = make_shared<op::Parameter>(element::f32, Shape{2, 2, 3, 3, 3}); auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {2, 2, 2}); auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape); auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data}); std::vector<float> inputs{ -5, 1, -3, -4, 4, -4, 3, -3, -1, 0, 0, -2, -4, 2, 0, -4, -5, -2, -4, -4, 0, -2, 3, -3, 4, -1, -4, -1, -1, -5, 4, -1, -2, -3, 0, 4, -1, -5, -4, 1, 1, 4, -5, -5, -5, 4, -3, -3, -3, 4, 0, -3, -5, 1, 4, 2, 1, -5, -5, 1, 0, -4, -1, 2, -4, -2, 4, 3, 1, -3, -3, -2, -4, -3, -3, 3, -1, 1, 2, 2, -4, -5, -4, 1, 3, -4, -1, 2, 4, -5, 0, 1, -2, 0, 0, -2, 3, -2, -5, -3, -5, -2, -1, 3, -2, 4, 3, -3}; std::vector<float> expected_result{-0.750, -0.250, -1.375, -1.125, -1.125, -0.500, -0.875, -1.250, -0.375, -1.625, -1., -0.500, -0.250, -0.750, -1.875, -0.625, 0.125, -0.375, -1.625, -1.250, 0., -1., 0.875, -0.375, -1.125, -1.375, 0.750, -1.875, -0.625, -1.125, 1.250, -1.}; auto test_case = test::TestCase<TestEngine>(fun); test_case.add_input<float>(Shape{2, 2, 3, 3, 3}, inputs); test_case.add_expected_output<float>(Shape{2, 2, 2, 2, 2}, expected_result); test_case.run(); }
51.491071
115
0.444252
[ "shape", "vector" ]
1189e5a364e261b31b3f7f69a7215444c4964222
3,096
cpp
C++
Unique Pointer Legacy Glue/source/superGlue.cpp
PaletzTheWise/Experiments
a67a550f23ff9b2c615d6c8fef5d4015395d347f
[ "Unlicense" ]
null
null
null
Unique Pointer Legacy Glue/source/superGlue.cpp
PaletzTheWise/Experiments
a67a550f23ff9b2c615d6c8fef5d4015395d347f
[ "Unlicense" ]
null
null
null
Unique Pointer Legacy Glue/source/superGlue.cpp
PaletzTheWise/Experiments
a67a550f23ff9b2c615d6c8fef5d4015395d347f
[ "Unlicense" ]
null
null
null
#include "SmartPointerAdapter.h" #include "OutputPointer.h" #include "Destroyable.h" // Final scenario where both glues are combined. // Assuming there is a third party function to create an object that we cannot upgrade to unique pointer. static void createDestroyableThirdParty(Destroyable *& destroyableOut) { TRACE; destroyableOut = new Destroyable; } // Then a legacy function initializes the created object static bool initDestroyableLegacy(Destroyable *& destroyableOut) { TRACE; createDestroyableThirdParty( destroyableOut ); if (!destroyableOut->init()) { destroyableOut = nullptr; return false; } return true; } // And another legacy function initializes it a bit more static bool initMoreDestroyableLegacy(Destroyable *& destroyableOut) { TRACE; bool rc = initDestroyableLegacy(destroyableOut); if (!destroyableOut->init()) { destroyableOut = nullptr; return false; } return true; } // And finally yet another legacy function uses the init more function. static bool legacy() { TRACE; Destroyable * ptr = NULL; // define all variables at the top for more legacy feel :) bool rc = initMoreDestroyableLegacy(ptr); rc = rc && ptr->doStuff(); if (ptr != NULL) { ptr->destroy(); } return rc; } // We can improve init function using the OutputPointer to keep it compatible with all call sites. // However, the third party create function won't e compatible with OutputPointer, so we have to use adaptSmartPointer(). // Fortunately, even though OutputPointer is not a smart pointer in the strictest sense, it still does fulfill the requirements of // SmartPointerAdapter. static bool initDestroyableBetter(OutputPointer<Destroyable, DestroyDeleter> destroyableOut) { TRACE; createDestroyableThirdParty(adaptSmartPointer(destroyableOut)); if (!destroyableOut->init()) { destroyableOut = nullptr; return false; } return true; } static bool initMoreDestroyableBetter(OutputPointer<Destroyable, DestroyDeleter> destroyableOut) { TRACE; if (!initDestroyableBetter(destroyableOut)) { return false; } if (!destroyableOut->init()) { destroyableOut = nullptr; return false; } return true; } // Now we can improve the legacy method to use unique_ptr. static bool better() { TRACE; unique_ptr<Destroyable, DestroyDeleter> ptr; return ( initMoreDestroyableBetter(ptr) && ptr->doStuff() ); } Destroyable * createDestroyableNewLike() { return new Destroyable(); } unique_ptr<Destroyable, DestroyDeleter> createDestroyableUniqueLike() { return unique_ptr<Destroyable, DestroyDeleter>(new Destroyable); } void superGlue() { legacy(); better(); unique_ptr<Destroyable, DestroyDeleter> b( createDestroyableNewLike() ); b = unique_ptr<Destroyable, DestroyDeleter>( createDestroyableNewLike() ); Destroyable * c = createDestroyableNewLike(); unique_ptr<Destroyable, DestroyDeleter> a(createDestroyableUniqueLike()); b = unique_ptr<Destroyable, DestroyDeleter>(createDestroyableUniqueLike()); unique_ptr<Destroyable, DestroyDeleter> a(createDestroyableUniqueLike()); b = createDestroyableUniqueLike(); }
22.59854
130
0.758721
[ "object" ]
118d01dda86937763cce67f10c95a9baf798556b
506
cpp
C++
leetcode/0268_missing_number.cpp
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
3
2018-05-10T09:56:49.000Z
2020-11-07T18:09:42.000Z
leetcode/0268_missing_number.cpp
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
null
null
null
leetcode/0268_missing_number.cpp
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
null
null
null
// Copyright (c) 2018 Jacopo Notarstefano #include <cassert> #include <numeric> #include <vector> using namespace std; class Solution { public: int missingNumber(vector<int>& nums) { int expected_sum = (nums.size() * (nums.size() + 1)) / 2; int actual_sum = accumulate(nums.begin(), nums.end(), 0); return expected_sum - actual_sum; } }; int main() { auto solution = Solution(); vector<int> nums = {3, 0, 1}; assert(2 == solution.missingNumber(nums)); }
19.461538
65
0.620553
[ "vector" ]
118dc67b415d72bf9da263c5e84a31cad5c8376b
36,463
cpp
C++
src/blend2d/gradient.cpp
Daxaker/blend2d
e8128b663d40f5a590f7690868be419d19c4f933
[ "Zlib" ]
63
2016-04-06T19:30:32.000Z
2018-09-28T10:56:54.000Z
src/blend2d/gradient.cpp
Daxaker/blend2d
e8128b663d40f5a590f7690868be419d19c4f933
[ "Zlib" ]
1
2018-07-29T17:31:30.000Z
2018-10-08T18:32:07.000Z
src/blend2d/gradient.cpp
Daxaker/blend2d
e8128b663d40f5a590f7690868be419d19c4f933
[ "Zlib" ]
null
null
null
// This file is part of Blend2D project <https://blend2d.com> // // See blend2d.h or LICENSE.md for license and copyright information // SPDX-License-Identifier: Zlib #include "api-build_p.h" #include "array_p.h" #include "math_p.h" #include "format_p.h" #include "gradient_p.h" #include "rgba_p.h" #include "runtime_p.h" #include "tables_p.h" #include "pixelops/funcs_p.h" #include "support/algorithm_p.h" #include "support/intops_p.h" #include "support/ptrops_p.h" #include "threading/atomic_p.h" namespace BLGradientPrivate { // BLGradient - Globals // ==================== static BLObjectEthernalImpl<BLGradientPrivateImpl> defaultImpl; static constexpr const double noValues[BL_GRADIENT_VALUE_MAX_VALUE + 1] = { 0.0 }; static constexpr const BLMatrix2D noMatrix(1.0, 0.0, 0.0, 1.0, 0.0, 0.0); // BLGradient - Tables // =================== struct BLGradientValueCountTableGen { static constexpr uint8_t value(size_t i) noexcept { return i == BL_GRADIENT_TYPE_LINEAR ? uint8_t(sizeof(BLLinearGradientValues ) / sizeof(double)) : i == BL_GRADIENT_TYPE_RADIAL ? uint8_t(sizeof(BLRadialGradientValues ) / sizeof(double)) : i == BL_GRADIENT_TYPE_CONICAL ? uint8_t(sizeof(BLConicalGradientValues) / sizeof(double)) : uint8_t(0); } }; static constexpr const auto valueCountTable = blMakeLookupTable<uint8_t, BL_GRADIENT_TYPE_MAX_VALUE + 1, BLGradientValueCountTableGen>(); // BLGradient - Internals & Utilities // ================================== static BL_INLINE constexpr BLObjectImplSize implSizeFromCapacity(size_t n) noexcept { return BLObjectImplSize(sizeof(BLGradientPrivateImpl) + n * sizeof(BLGradientStop)); } static BL_INLINE constexpr size_t capacityFromImplSize(BLObjectImplSize implSize) noexcept { return (implSize.value() - sizeof(BLGradientPrivateImpl)) / sizeof(BLGradientStop); } static BL_INLINE bool isMutable(const BLGradientCore* self) noexcept { const size_t* refCountPtr = blObjectImplGetRefCountPtr(self->_d.impl); return *refCountPtr == 1; } static BL_INLINE size_t getSize(const BLGradientCore* self) noexcept { return getImpl(self)->size; } static BL_INLINE size_t getCapacity(const BLGradientCore* self) noexcept { return getImpl(self)->capacity; } static BL_INLINE BLGradientStop* getStops(const BLGradientCore* self) noexcept { return getImpl(self)->stops; } static constexpr size_t BL_GRADIENT_IMPL_INITIAL_SIZE = BLIntOps::alignUp(implSizeFromCapacity(2).value(), BL_OBJECT_IMPL_ALIGNMENT); // BLGradient - Analysis // ===================== static BL_INLINE uint32_t analyzeStopArray(const BLGradientStop* stops, size_t n) noexcept { uint32_t result = BL_DATA_ANALYSIS_CONFORMING; uint32_t wasSame = false; double prev = -1.0; for (size_t i = 0; i < n; i++) { double offset = stops[i].offset; if (!((offset >= 0.0) & (offset <= 1.0))) return BL_DATA_ANALYSIS_INVALID_VALUE; uint32_t isSame = (offset == prev); result |= (offset < prev); result |= isSame & wasSame; wasSame = isSame; prev = offset; } return result; } // BLGradient - Stop Matcher // ========================= struct GradientStopMatcher { double offset; BL_INLINE GradientStopMatcher(double offset) noexcept : offset(offset) {} }; static BL_INLINE bool operator==(const BLGradientStop& a, const GradientStopMatcher& b) noexcept { return a.offset == b.offset; } static BL_INLINE bool operator<=(const BLGradientStop& a, const GradientStopMatcher& b) noexcept { return a.offset <= b.offset; } // BLGradient - AltStop // ==================== // Alternative representation of `BLGradientStop` that is used to sort unknown stop array that is either unsorted or // may contain more than 2 stops that have the same offset. The `index` member is actually an index to the original // stop array. struct GradientStopAlt { double offset; union { intptr_t index; uint64_t rgba; }; }; BL_STATIC_ASSERT(sizeof(GradientStopAlt) == sizeof(BLGradientStop)); // BLGradient - Utilities // ====================== static BL_INLINE void initValues(double* dst, const double* src, size_t n) noexcept { size_t i; BL_NOUNROLL for (i = 0; i < n; i++) dst[i] = src[i]; BL_NOUNROLL while (i <= BL_GRADIENT_VALUE_MAX_VALUE) dst[i++] = 0.0; } static BL_INLINE void moveStops(BLGradientStop* dst, const BLGradientStop* src, size_t n) noexcept { memmove(dst, src, n * sizeof(BLGradientStop)); } static BL_INLINE size_t copyStops(BLGradientStop* dst, const BLGradientStop* src, size_t n) noexcept { for (size_t i = 0; i < n; i++) dst[i] = src[i]; return n; } static BL_NOINLINE size_t copyUnsafeStops(BLGradientStop* dst, const BLGradientStop* src, size_t n, uint32_t analysis) noexcept { BL_ASSERT(analysis == BL_DATA_ANALYSIS_CONFORMING || analysis == BL_DATA_ANALYSIS_NON_CONFORMING); if (analysis == BL_DATA_ANALYSIS_CONFORMING) return copyStops(dst, src, n); size_t i; // First copy source stops into the destination and index them. GradientStopAlt* stops = reinterpret_cast<GradientStopAlt*>(dst); for (i = 0; i < n; i++) { stops[i].offset = src[i].offset; stops[i].index = intptr_t(i); } // Now sort the stops and use both `offset` and `index` as a comparator. After the sort is done we will have // preserved the order of all stops that have the same `offset`. BLAlgorithm::quickSort(stops, n, [](const GradientStopAlt& a, const GradientStopAlt& b) noexcept -> intptr_t { intptr_t result = 0; if (a.offset < b.offset) result = -1; if (a.offset > b.offset) result = 1; return result ? result : a.index - b.index; }); // Now assign rgba value to the stop and remove all duplicates. If there are 3 or more consecutive stops we // remove all except the first/second to make sharp transitions possible. size_t j = 0; double prev1 = -1.0; // Dummy, cannot be within [0..1] range. double prev2 = -1.0; for (i = 0; i < n - 1; i++) { double offset = stops[i].offset; BLRgba64 rgba = src[size_t(stops[i].index)].rgba; j -= size_t((prev1 == prev2) & (prev2 == offset)); stops[j].offset = offset; stops[j].rgba = rgba.value; j++; prev1 = prev2; prev2 = offset; } // Returns the final number of stops kept. Could be the same as `n` or less. return j; } static BL_INLINE BLGradientLUT* copyMaybeNullLUT(BLGradientLUT* lut) noexcept { return lut ? lut->incRef() : nullptr; } // Cache invalidation means to remove the cached lut tables from `impl`. Since modification always means to either // create a copy of it or to modify a unique instance (not shared) it also means that we don't have to worry about // atomic operations here. static BL_INLINE BLResult invalidateLUTCache(BLGradientPrivateImpl* impl) noexcept { BLGradientLUT* lut32 = impl->lut32; if (lut32) { impl->lut32 = nullptr; lut32->release(); } impl->info32.packed = 0; return BL_SUCCESS; } BLGradientInfo ensureInfo32(BLGradientPrivateImpl* impl) noexcept { BLGradientInfo info; info.packed = impl->info32.packed; constexpr uint32_t FLAG_ALPHA_NOT_ONE = 0x1; // Has alpha that is not 1.0. constexpr uint32_t FLAG_ALPHA_NOT_ZERO = 0x2; // Has alpha that is not 0.0. constexpr uint32_t FLAG_TRANSITION = 0x4; // Has transition. if (info.packed == 0) { const BLGradientStop* stops = impl->stops; size_t stopCount = impl->size; if (stopCount != 0) { uint32_t flags = 0; uint64_t prev = stops[0].rgba.value & 0xFF00FF00FF00FF00u; uint32_t lutSize = 0; if (prev < 0xFF00000000000000u) flags |= FLAG_ALPHA_NOT_ONE; if (prev > 0x00FFFFFFFFFFFFFFu) flags |= FLAG_ALPHA_NOT_ZERO; for (size_t i = 1; i < stopCount; i++) { uint64_t value = stops[i].rgba.value & 0xFF00FF00FF00FF00u; if (value == prev) continue; flags |= FLAG_TRANSITION; if (value < 0xFF00000000000000u) flags |= FLAG_ALPHA_NOT_ONE; if (value > 0x00FFFFFFFFFFFFFFu) flags |= FLAG_ALPHA_NOT_ZERO; prev = value; } // If all alpha values are zero then we consider this to be without transition, because the whole transition // would result in transparent black. if (!(flags & FLAG_ALPHA_NOT_ZERO)) flags &= ~FLAG_TRANSITION; if (!(flags & FLAG_TRANSITION)) { // Minimal LUT size for no transition. The engine should always convert such style into solid fill, so such // LUT should never be used by the renderer. lutSize = 256; } else { // TODO: This is kinda adhoc, it would be much better if we base the calculation on both stops and their // offsets and estimate how big the ideal table should be. switch (stopCount) { case 1: { lutSize = 256; break; } case 2: { // 2 stops at endpoints only require 256 entries, more stops will use 512. double delta = stops[1].offset - stops[0].offset; lutSize = (delta >= 0.998) ? 256 : 512; break; } case 3: { lutSize = (stops[0].offset <= 0.002 && stops[1].offset == 0.5 && stops[2].offset >= 0.998) ? 512 : 1024; break; } default: { lutSize = 1024; break; } } } info.solid = uint8_t(flags & FLAG_TRANSITION ? 0 : 1); info.format = uint8_t(flags & FLAG_ALPHA_NOT_ONE) ? uint8_t(BL_FORMAT_PRGB32) : uint8_t(BL_FORMAT_FRGB32); info.lutSize = uint16_t(lutSize); // Update the info. It doesn't have to be atomic. impl->info32.packed = info.packed; } } return info; } BLGradientLUT* ensureLut32(BLGradientPrivateImpl* impl) noexcept { BLGradientLUT* lut = impl->lut32; if (lut) return lut; BLGradientInfo info = ensureInfo32(impl); const BLGradientStop* stops = impl->stops; uint32_t lutSize = info.lutSize; if (!lutSize) return nullptr; lut = BLGradientLUT::alloc(lutSize, 4); if (BL_UNLIKELY(!lut)) return nullptr; BLPixelOps::funcs.interpolate_prgb32(lut->data<uint32_t>(), lutSize, stops, impl->size); // We must drop this LUT if another thread created it meanwhile. BLGradientLUT* expected = nullptr; if (!blAtomicCompareExchange(&impl->lut32, &expected, lut)) { BL_ASSERT(expected != nullptr); BLGradientLUT::destroy(lut); lut = expected; } return lut; } // BLGradient - Alloc & Free Impl // ============================== static BLGradientPrivateImpl* allocImpl( BLGradientCore* self, BLObjectImplSize implSize, BLGradientType type, const void* values, BLExtendMode extendMode, BLMatrix2DType mType, const BLMatrix2D* m) noexcept { BL_ASSERT(type <= BL_GRADIENT_TYPE_MAX_VALUE); BL_ASSERT(mType <= BL_MATRIX2D_TYPE_MAX_VALUE); BL_ASSERT(extendMode <= BL_EXTEND_MODE_SIMPLE_MAX_VALUE); BLGradientPrivateImpl* impl = blObjectDetailAllocImplT<BLGradientPrivateImpl>(self, BLObjectInfo::packType(BL_OBJECT_TYPE_GRADIENT), implSize, &implSize); if (BL_UNLIKELY(!impl)) return nullptr; impl->stops = BLPtrOps::offset<BLGradientStop>(impl, sizeof(BLGradientPrivateImpl)); impl->size = 0; impl->capacity = capacityFromImplSize(implSize); impl->gradientType = uint8_t(type); impl->extendMode = uint8_t(extendMode); impl->matrixType = uint8_t(mType); impl->reserved[0] = 0; impl->matrix = *m; initValues(impl->values, static_cast<const double*>(values), valueCountTable[type]); impl->lut32 = nullptr; impl->info32.packed = 0; return impl; } BLResult freeImpl(BLGradientPrivateImpl* impl, BLObjectInfo info) noexcept { invalidateLUTCache(impl); return blObjectImplFreeInline(impl, info); } // BLGradient - Deep Copy & Mutation // ================================= static BL_NOINLINE BLResult deepCopy(BLGradientCore* self, const BLGradientCore* other, bool copyCache) noexcept { const BLGradientPrivateImpl* otherI = getImpl(other); BLGradientCore newO; BLGradientPrivateImpl* newI = allocImpl(&newO, implSizeFromCapacity(otherI->capacity), (BLGradientType)otherI->gradientType, otherI->values, (BLExtendMode)otherI->extendMode, (BLMatrix2DType)otherI->matrixType, &otherI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); newI->size = copyStops(newI->stops, otherI->stops, otherI->size); if (copyCache) { newI->lut32 = copyMaybeNullLUT(otherI->lut32); newI->info32.packed = otherI->info32.packed; } return replaceInstance(self, &newO); } static BL_INLINE BLResult makeMutable(BLGradientCore* self, bool copyCache) noexcept { // NOTE: `copyCache` should be a constant so its handling should have zero cost. if (!isMutable(self)) return deepCopy(self, self, copyCache); if (!copyCache) return invalidateLUTCache(getImpl(self)); return BL_SUCCESS; } } // {BLGradientPrivate} // BLGradient - API - Init & Destroy // ================================= BL_API_IMPL BLResult blGradientInit(BLGradientCore* self) noexcept { using namespace BLGradientPrivate; self->_d = blObjectDefaults[BL_OBJECT_TYPE_GRADIENT]._d; return BL_SUCCESS; } BL_API_IMPL BLResult blGradientInitMove(BLGradientCore* self, BLGradientCore* other) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self != other); BL_ASSERT(other->_d.isGradient()); self->_d = other->_d; other->_d = blObjectDefaults[BL_OBJECT_TYPE_GRADIENT]._d; return BL_SUCCESS; } BL_API_IMPL BLResult blGradientInitWeak(BLGradientCore* self, const BLGradientCore* other) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self != other); BL_ASSERT(other->_d.isGradient()); return blObjectPrivateInitWeakTagged(self, other); } BL_API_IMPL BLResult blGradientInitAs(BLGradientCore* self, BLGradientType type, const void* values, BLExtendMode extendMode, const BLGradientStop* stops, size_t n, const BLMatrix2D* m) noexcept { using namespace BLGradientPrivate; self->_d = blObjectDefaults[BL_OBJECT_TYPE_GRADIENT]._d; return blGradientCreate(self, type, values, extendMode, stops, n, m); } BL_API_IMPL BLResult blGradientDestroy(BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return releaseInstance(self); } // BLGradient - API - Reset // ======================== BL_API_IMPL BLResult blGradientReset(BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return replaceInstance(self, static_cast<BLGradientCore*>(&blObjectDefaults[BL_OBJECT_TYPE_GRADIENT])); } // BLGradient - API - Assign // ========================= BL_API_IMPL BLResult blGradientAssignMove(BLGradientCore* self, BLGradientCore* other) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); BL_ASSERT(other->_d.isGradient()); BLGradientCore tmp = *other; other->_d = blObjectDefaults[BL_OBJECT_TYPE_GRADIENT]._d; return replaceInstance(self, &tmp); } BL_API_IMPL BLResult blGradientAssignWeak(BLGradientCore* self, const BLGradientCore* other) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); BL_ASSERT(other->_d.isGradient()); blObjectPrivateAddRefTagged(other); return replaceInstance(self, other); } BL_API_IMPL BLResult blGradientCreate(BLGradientCore* self, BLGradientType type, const void* values, BLExtendMode extendMode, const BLGradientStop* stops, size_t n, const BLMatrix2D* m) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY((uint32_t(type) > BL_GRADIENT_TYPE_MAX_VALUE) | (uint32_t(extendMode) > BL_EXTEND_MODE_SIMPLE_MAX_VALUE))) return blTraceError(BL_ERROR_INVALID_VALUE); if (!values) values = noValues; BLMatrix2DType mType = BL_MATRIX2D_TYPE_IDENTITY; if (!m) m = &noMatrix; else mType = m->type(); uint32_t analysis = BL_DATA_ANALYSIS_CONFORMING; if (n) { if (BL_UNLIKELY(stops == nullptr)) return blTraceError(BL_ERROR_INVALID_VALUE); analysis = analyzeStopArray(stops, n); if (BL_UNLIKELY(analysis >= BL_DATA_ANALYSIS_INVALID_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); } size_t immutableMsk = BLIntOps::bitMaskFromBool<size_t>(!isMutable(self)); if ((n | immutableMsk) > getCapacity(self)) { BLObjectImplSize implSize = blMax(implSizeFromCapacity(n), BLObjectImplSize(BL_GRADIENT_IMPL_INITIAL_SIZE)); BLGradientCore newO; BLGradientPrivateImpl* newI = allocImpl(&newO, implSize, type, values, extendMode, mType, m); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); newI->size = copyUnsafeStops(newI->stops, stops, n, analysis); return replaceInstance(self, &newO); } else { BLGradientPrivateImpl* selfI = getImpl(self); selfI->gradientType = uint8_t(type); selfI->extendMode = uint8_t(extendMode); selfI->matrixType = uint8_t(mType); selfI->matrix.reset(*m); initValues(selfI->values, static_cast<const double*>(values), valueCountTable[type]); selfI->size = copyUnsafeStops(selfI->stops, stops, n, analysis); return invalidateLUTCache(selfI); } } // BLGradient - API - Storage // ========================== BL_API_IMPL BLResult blGradientShrink(BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); BLGradientPrivateImpl* selfI = getImpl(self); BLObjectImplSize currentSize = implSizeFromCapacity(selfI->capacity); BLObjectImplSize fittingSize = implSizeFromCapacity(selfI->size); if (currentSize - fittingSize < BL_OBJECT_IMPL_ALIGNMENT) return BL_SUCCESS; BLGradientCore newO; BLGradientPrivateImpl* newI = allocImpl(&newO, fittingSize, (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); newI->size = copyStops(newI->stops, selfI->stops, selfI->size); newI->lut32 = copyMaybeNullLUT(selfI->lut32); return replaceInstance(self, &newO); } BL_API_IMPL BLResult blGradientReserve(BLGradientCore* self, size_t n) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); BLGradientPrivateImpl* selfI = getImpl(self); size_t immutableMsk = BLIntOps::bitMaskFromBool<size_t>(!isMutable(self)); if ((n | immutableMsk) > selfI->capacity) { BLGradientCore newO; BLObjectImplSize implSize = blMax(implSizeFromCapacity(n), BLObjectImplSize(BL_GRADIENT_IMPL_INITIAL_SIZE)); BLGradientPrivateImpl* newI = allocImpl(&newO, implSize, (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); newI->size = copyStops(newI->stops, selfI->stops, selfI->size); newI->lut32 = copyMaybeNullLUT(selfI->lut32); return replaceInstance(self, &newO); } else { return BL_SUCCESS; } } // BLGradient - API - Accessors // ============================ BL_API_IMPL BLGradientType blGradientGetType(const BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); const BLGradientPrivateImpl* selfI = getImpl(self); return (BLGradientType)selfI->gradientType; } BL_API_IMPL BLResult blGradientSetType(BLGradientCore* self, BLGradientType type) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(uint32_t(type) > BL_GRADIENT_TYPE_MAX_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); BL_PROPAGATE(makeMutable(self, true)); BLGradientPrivateImpl* selfI = getImpl(self); selfI->gradientType = uint8_t(type); return BL_SUCCESS; } BL_API_IMPL BLExtendMode blGradientGetExtendMode(const BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); const BLGradientPrivateImpl* selfI = getImpl(self); return (BLExtendMode)selfI->extendMode; } BL_API_IMPL BLResult blGradientSetExtendMode(BLGradientCore* self, BLExtendMode extendMode) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(extendMode > BL_EXTEND_MODE_SIMPLE_MAX_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); BL_PROPAGATE(makeMutable(self, true)); BLGradientPrivateImpl* selfI = getImpl(self); selfI->extendMode = uint8_t(extendMode); return BL_SUCCESS; } BL_API_IMPL double blGradientGetValue(const BLGradientCore* self, size_t index) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(index > BL_GRADIENT_VALUE_MAX_VALUE)) return blNaN<double>(); const BLGradientPrivateImpl* selfI = getImpl(self); return selfI->values[index]; } BL_API_IMPL BLResult blGradientSetValue(BLGradientCore* self, size_t index, double value) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(index > BL_GRADIENT_VALUE_MAX_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); BL_PROPAGATE(makeMutable(self, true)); BLGradientPrivateImpl* selfI = getImpl(self); selfI->values[index] = value; return BL_SUCCESS; } BL_API_IMPL BLResult blGradientSetValues(BLGradientCore* self, size_t index, const double* values, size_t valueCount) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(index > BL_GRADIENT_VALUE_MAX_VALUE || valueCount > BL_GRADIENT_VALUE_MAX_VALUE + 1 - index)) return blTraceError(BL_ERROR_INVALID_VALUE); if (BL_UNLIKELY(!valueCount)) return BL_SUCCESS; BL_PROPAGATE(makeMutable(self, true)); BLGradientPrivateImpl* selfI = getImpl(self); double* dst = selfI->values + index; for (size_t i = 0; i < valueCount; i++) dst[i] = values[i]; return BL_SUCCESS; } // BLGradient - API - Stops // ======================== BL_API_IMPL size_t blGradientGetSize(const BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return getSize(self); } BL_API_IMPL size_t blGradientGetCapacity(const BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return getCapacity(self); } BL_API_IMPL const BLGradientStop* blGradientGetStops(const BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return getStops(self); } BL_API_IMPL BLResult blGradientResetStops(BLGradientCore* self) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (!getSize(self)) return BL_SUCCESS; BLGradientPrivateImpl* selfI = getImpl(self); if (!isMutable(self)) { BLGradientCore newO; BLGradientPrivateImpl* newI = allocImpl(&newO, BLObjectImplSize(BL_GRADIENT_IMPL_INITIAL_SIZE), (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); return replaceInstance(self, &newO); } else { selfI->size = 0; return invalidateLUTCache(selfI); } } BL_API_IMPL BLResult blGradientAssignStops(BLGradientCore* self, const BLGradientStop* stops, size_t n) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (n == 0) return blGradientResetStops(self); BLGradientPrivateImpl* selfI = getImpl(self); size_t immutableMsk = BLIntOps::bitMaskFromBool<size_t>(!isMutable(self)); uint32_t analysis = analyzeStopArray(stops, n); if (BL_UNLIKELY(analysis >= BL_DATA_ANALYSIS_INVALID_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); if ((n | immutableMsk) > selfI->capacity) { BLGradientCore newO; BLObjectImplSize implSize = blMax(implSizeFromCapacity(n), BLObjectImplSize(BL_GRADIENT_IMPL_INITIAL_SIZE)); BLGradientPrivateImpl* newI = allocImpl( &newO, implSize, (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); newI->size = copyUnsafeStops(newI->stops, stops, n, analysis); return replaceInstance(self, &newO); } else { selfI->size = copyUnsafeStops(selfI->stops, stops, n, analysis); return invalidateLUTCache(selfI); } } BL_API_IMPL BLResult blGradientAddStopRgba32(BLGradientCore* self, double offset, uint32_t rgba32) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return blGradientAddStopRgba64(self, offset, BLRgbaPrivate::rgba64FromRgba32(rgba32)); } BL_API_IMPL BLResult blGradientAddStopRgba64(BLGradientCore* self, double offset, uint64_t rgba64) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(!(offset >= 0.0 && offset <= 1.0))) return blTraceError(BL_ERROR_INVALID_VALUE); BLGradientPrivateImpl* selfI = getImpl(self); BLGradientStop* stops = selfI->stops; size_t i = 0; size_t n = selfI->size; if (n && offset >= stops[0].offset) { i = BLAlgorithm::binarySearchClosestLast(stops, n, GradientStopMatcher(offset)); // If there are two stops that have the same offset then we would replace the second one. This is supported // and it would make a sharp transition. if (i > 0 && stops[i - 1].offset == offset) return blGradientReplaceStopRgba64(self, i, offset, rgba64); // Insert a new stop after `i`. i++; } // If we are here it means that we are going to insert a stop at `i`. All other cases were handled at this point // so focus on generic insert, which could be just a special case of append operation, but we don't really care. size_t immutableMsk = BLIntOps::bitMaskFromBool<size_t>(!isMutable(self)); if ((n | immutableMsk) >= selfI->capacity) { BLGradientCore newO; BLObjectImplSize implSize = blObjectExpandImplSize(implSizeFromCapacity(n + 1)); BLGradientPrivateImpl* newI = allocImpl(&newO, implSize, (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); if (BL_UNLIKELY(!newI)) return blTraceError(BL_ERROR_OUT_OF_MEMORY); BLGradientStop* newStops = newI->stops; copyStops(newStops, stops, i); newStops[i].reset(offset, BLRgba64(rgba64)); copyStops(newStops + i + 1, stops + i, n - i); newI->size = n + 1; return replaceInstance(self, &newO); } else { moveStops(stops + i + 1, stops + i, n - i); stops[i].reset(offset, BLRgba64(rgba64)); selfI->size = n + 1; return invalidateLUTCache(selfI); } } BL_API_IMPL BLResult blGradientRemoveStop(BLGradientCore* self, size_t index) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return blGradientRemoveStopsByIndex(self, index, index + 1); } BL_API_IMPL BLResult blGradientRemoveStopByOffset(BLGradientCore* self, double offset, uint32_t all) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(!(offset >= 0.0 && offset <= 1.0))) return blTraceError(BL_ERROR_INVALID_VALUE); size_t size = getSize(self); const BLGradientStop* stops = getStops(self); for (size_t a = 0; a < size; a++) { if (stops[a].offset > offset) break; if (stops[a].offset == offset) { size_t b = a + 1; if (all) { while (b < size) { if (stops[b].offset != offset) break; b++; } } return blGradientRemoveStopsByIndex(self, a, b); } } return BL_SUCCESS; } BL_API_IMPL BLResult blGradientRemoveStopsByIndex(BLGradientCore* self, size_t rStart, size_t rEnd) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); size_t size = getSize(self); size_t index = rStart; size_t end = blMin(rEnd, size); if (BL_UNLIKELY(index > size || end < index)) return blTraceError(BL_ERROR_INVALID_VALUE); if (BL_UNLIKELY(index == end)) return BL_SUCCESS; BLGradientPrivateImpl* selfI = getImpl(self); BLGradientStop* stops = selfI->stops; size_t removedCount = end - index; size_t shiftedCount = size - end; size_t afterCount = size - removedCount; if (!isMutable(self)) { BLGradientCore newO; BLGradientPrivateImpl* newI = allocImpl(&newO, implSizeFromCapacity(afterCount), (BLGradientType)selfI->gradientType, selfI->values, (BLExtendMode)selfI->extendMode, (BLMatrix2DType)selfI->matrixType, &selfI->matrix); BLGradientStop* newStops = newI->stops; copyStops(newStops, stops, index); copyStops(newStops + index, stops + end, shiftedCount); return replaceInstance(self, &newO); } else { moveStops(stops + index, stops + end, shiftedCount); selfI->size = afterCount; return invalidateLUTCache(selfI); } } BL_API_IMPL BLResult blGradientRemoveStopsByOffset(BLGradientCore* self, double offsetMin, double offsetMax) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(offsetMax < offsetMin)) return blTraceError(BL_ERROR_INVALID_VALUE); if (!getSize(self)) return BL_SUCCESS; BLGradientPrivateImpl* selfI = getImpl(self); const BLGradientStop* stops = selfI->stops; size_t size = selfI->size; size_t a, b; for (a = 0; a < size && stops[a].offset < offsetMin; a++) continue; for (b = a; b < size && stops[b].offset <= offsetMax; b++) continue; if (a >= b) return BL_SUCCESS; return blGradientRemoveStopsByIndex(self, a, b); } BL_API_IMPL BLResult blGradientReplaceStopRgba32(BLGradientCore* self, size_t index, double offset, uint32_t rgba32) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); return blGradientReplaceStopRgba64(self, index, offset, BLRgbaPrivate::rgba64FromRgba32(rgba32)); } BL_API_IMPL BLResult blGradientReplaceStopRgba64(BLGradientCore* self, size_t index, double offset, uint64_t rgba64) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(index >= getSize(self))) return blTraceError(BL_ERROR_INVALID_VALUE); BL_PROPAGATE(makeMutable(self, false)); BLGradientPrivateImpl* selfI = getImpl(self); BLGradientStop* stops = selfI->stops; if (stops[index].offset == offset) { stops[index].rgba.value = rgba64; return BL_SUCCESS; } else { BL_PROPAGATE(blGradientRemoveStop(self, index)); return blGradientAddStopRgba64(self, offset, rgba64); } } BL_API_IMPL size_t blGradientIndexOfStop(const BLGradientCore* self, double offset) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); const BLGradientPrivateImpl* selfI = getImpl(self); const BLGradientStop* stops = selfI->stops; size_t n = selfI->size; if (!n) return SIZE_MAX; size_t i = BLAlgorithm::binarySearch(stops, n, GradientStopMatcher(offset)); if (i == SIZE_MAX) return SIZE_MAX; if (i > 0 && stops[i - 1].offset == offset) i--; return i; } // BLGradient - API - Matrix // ========================= BL_API_IMPL BLResult blGradientApplyMatrixOp(BLGradientCore* self, BLMatrix2DOp opType, const void* opData) noexcept { using namespace BLGradientPrivate; BL_ASSERT(self->_d.isGradient()); if (BL_UNLIKELY(uint32_t(opType) > BL_MATRIX2D_OP_MAX_VALUE)) return blTraceError(BL_ERROR_INVALID_VALUE); BLGradientPrivateImpl* selfI = getImpl(self); if (opType == 0 && selfI->matrixType == BL_MATRIX2D_TYPE_IDENTITY) return BL_SUCCESS; BL_PROPAGATE(makeMutable(self, true)); selfI = getImpl(self); blMatrix2DApplyOp(&selfI->matrix, opType, opData); selfI->matrixType = uint8_t(selfI->matrix.type()); return BL_SUCCESS; } // BLGradient - API - Equals // ========================= BL_API_IMPL bool blGradientEquals(const BLGradientCore* a, const BLGradientCore* b) noexcept { using namespace BLGradientPrivate; BL_ASSERT(a->_d.isGradient()); BL_ASSERT(b->_d.isGradient()); const BLGradientPrivateImpl* aI = getImpl(a); const BLGradientPrivateImpl* bI = getImpl(b); if (aI == bI) return true; size_t size = aI->size; bool eq = (aI->gradientType == bI->gradientType) & (aI->extendMode == bI->extendMode ) & (aI->matrixType == bI->matrixType ) & (aI->matrix == bI->matrix ) & (size == bI->size ) ; return eq && memcmp(aI->stops, bI->stops, size * sizeof(BLGradientStop)) == 0; } // BLGradient - Runtime Registration // ================================= void blGradientRtInit(BLRuntimeContext* rt) noexcept { blUnused(rt); BLGradientPrivate::defaultImpl.impl->matrix.reset(); blObjectDefaults[BL_OBJECT_TYPE_GRADIENT]._d.initDynamic( BL_OBJECT_TYPE_GRADIENT, BLObjectInfo{BL_OBJECT_INFO_IMMUTABLE_FLAG}, &BLGradientPrivate::defaultImpl.impl); } // BLGradient - Tests // ================== #if defined(BL_TEST) UNIT(gradient) { INFO("Dynamic memory allocation strategy"); { BLGradient g; size_t kNumItems = 10000000; size_t capacity = g.capacity(); for (size_t i = 0; i < kNumItems; i++) { g.addStop(double(i) / double(kNumItems), BLRgba32(0xFFFFFFFF)); if (capacity != g.capacity()) { size_t implSize = BLGradientPrivate::implSizeFromCapacity(g.capacity()).value(); INFO("Capacity increased from %zu to %zu [ImplSize=%zu]\n", capacity, g.capacity(), implSize); capacity = g.capacity(); } } } INFO("Gradient - Linear values"); { BLGradient g(BLLinearGradientValues(0.0, 0.5, 1.0, 1.5)); EXPECT_EQ(g.type(), BL_GRADIENT_TYPE_LINEAR); EXPECT_EQ(g.x0(), 0.0); EXPECT_EQ(g.y0(), 0.5); EXPECT_EQ(g.x1(), 1.0); EXPECT_EQ(g.y1(), 1.5); g.setX0(0.15); g.setY0(0.85); g.setX1(0.75); g.setY1(0.25); EXPECT_EQ(g.x0(), 0.15); EXPECT_EQ(g.y0(), 0.85); EXPECT_EQ(g.x1(), 0.75); EXPECT_EQ(g.y1(), 0.25); } INFO("Gradient - Radial values"); { BLGradient g(BLRadialGradientValues(1.0, 1.5, 0.0, 0.5, 500.0)); EXPECT_EQ(g.type(), BL_GRADIENT_TYPE_RADIAL); EXPECT_EQ(g.x0(), 1.0); EXPECT_EQ(g.y0(), 1.5); EXPECT_EQ(g.x1(), 0.0); EXPECT_EQ(g.y1(), 0.5); EXPECT_EQ(g.r0(), 500.0); g.setR0(150.0); EXPECT_EQ(g.r0(), 150.0); } INFO("Gradient - Conical values"); { BLGradient g(BLConicalGradientValues(1.0, 1.5, 0.1)); EXPECT_EQ(g.type(), BL_GRADIENT_TYPE_CONICAL); EXPECT_EQ(g.x0(), 1.0); EXPECT_EQ(g.y0(), 1.5); EXPECT_EQ(g.angle(), 0.1); } INFO("Gradient - Stops"); { BLGradient g; g.addStop(0.0, BLRgba32(0x00000000u)); EXPECT_EQ(g.size(), 1u); EXPECT_EQ(g.stopAt(0).rgba.value, 0x0000000000000000u); g.addStop(1.0, BLRgba32(0xFF000000u)); EXPECT_EQ(g.size(), 2u); EXPECT_EQ(g.stopAt(1).rgba.value, 0xFFFF000000000000u); g.addStop(0.5, BLRgba32(0xFFFF0000u)); EXPECT_EQ(g.size(), 3u); EXPECT_EQ(g.stopAt(1).rgba.value, 0xFFFFFFFF00000000u); g.addStop(0.5, BLRgba32(0xFFFFFF00u)); EXPECT_EQ(g.size(), 4u); EXPECT_EQ(g.stopAt(2).rgba.value, 0xFFFFFFFFFFFF0000u); g.removeStopByOffset(0.5, true); EXPECT_EQ(g.size(), 2u); EXPECT_EQ(g.stopAt(0).rgba.value, 0x0000000000000000u); EXPECT_EQ(g.stopAt(1).rgba.value, 0xFFFF000000000000u); g.addStop(0.5, BLRgba32(0x80000000u)); EXPECT_EQ(g.size(), 3u); EXPECT_EQ(g.stopAt(1).rgba.value, 0x8080000000000000u); // Check whether copy-on-write works as expected. BLGradient copy(g); EXPECT_EQ(copy.size(), 3u); g.addStop(0.5, BLRgba32(0xCC000000u)); EXPECT_EQ(copy.size(), 3u); EXPECT_EQ(g.size(), 4u); EXPECT_EQ(g.stopAt(0).rgba.value, 0x0000000000000000u); EXPECT_EQ(g.stopAt(1).rgba.value, 0x8080000000000000u); EXPECT_EQ(g.stopAt(2).rgba.value, 0xCCCC000000000000u); EXPECT_EQ(g.stopAt(3).rgba.value, 0xFFFF000000000000u); g.resetStops(); EXPECT_EQ(g.size(), 0u); } } #endif
30.512971
196
0.69731
[ "solid" ]
118e15b4de21058b852357a13166034320a23645
7,053
cpp
C++
judger/uoj_judger/builtin/judger/judger.cpp
vfleaking/UOJ-System
c34a6406dd556568b06605c2988667eca73af253
[ "MIT" ]
null
null
null
judger/uoj_judger/builtin/judger/judger.cpp
vfleaking/UOJ-System
c34a6406dd556568b06605c2988667eca73af253
[ "MIT" ]
null
null
null
judger/uoj_judger/builtin/judger/judger.cpp
vfleaking/UOJ-System
c34a6406dd556568b06605c2988667eca73af253
[ "MIT" ]
null
null
null
#include "uoj_judger.h" struct SubtaskInfo { bool passed; int score; SubtaskInfo() { } SubtaskInfo(const bool &_p, const int &_s) : passed(_p), score(_s){} }; void ordinary_test() { int n = conf_int("n_tests", 10); int m = conf_int("n_ex_tests", 0); int nT = conf_int("n_subtasks", 0); if (!conf_is("submit_answer", "on")) { report_judge_status_f("Compiling"); RunCompilerResult c_ret = !conf_is("with_implementer", "on") ? compile("answer") : compile_with_implementer("answer"); if (!c_ret.succeeded) { end_judge_compile_error(c_ret); } } bool passed = true; if (nT == 0) { for (int i = 1; i <= n; i++) { report_judge_status_f("Judging Test #%d", i); PointInfo po = test_point("answer", i); if (po.scr != 100) { passed = false; } po.scr = scale_score(po.scr, conf_int("point_score", i, 100 / n)); add_point_info(po); } } else if (nT == 1 && conf_str("subtask_type", 1, "packed") == "packed") { for (int i = 1; i <= n; i++) { report_judge_status_f("Judging Test #%d", i); PointInfo po = test_point("answer", i); if (po.scr != 100) { passed = false; po.scr = i == 1 ? 0 : -100; add_point_info(po); break; } else { po.scr = i == 1 ? 100 : 0; add_point_info(po); } } } else { map<int, SubtaskInfo> subtasks; map<int,int> minScore; for (int t = 1; t <= nT; t++) { string subtaskType = conf_str("subtask_type", t, "packed"); int startI = conf_int("subtask_end", t - 1, 0) + 1; int endI = conf_int("subtask_end", t, 0); vector<PointInfo> points; minScore[t] = 100; vector<int> dependences; if (conf_str("subtask_dependence", t, "none") == "many") { string cur = "subtask_dependence_" + vtos(t); int p = 1; while (conf_int(cur, p, 0) != 0) { dependences.push_back(conf_int(cur, p, 0)); p++; } } else if (conf_int("subtask_dependence", t, 0) != 0) { dependences.push_back(conf_int("subtask_dependence", t, 0)); } bool skipped = false; for (vector<int>::iterator it = dependences.begin(); it != dependences.end(); it++) { if (subtaskType == "packed") { if (!subtasks[*it].passed) { skipped = true; break; } } else if (subtaskType == "min") { minScore[t] = min(minScore[t], minScore[*it]); } } if (skipped) { add_subtask_info(t, 0, "Skipped", points); continue; } int tfull = conf_int("subtask_score", t, 100 / nT); int tscore = scale_score(minScore[t], tfull); string info = "Accepted"; for (int i = startI; i <= endI; i++) { report_judge_status_f("Judging Test #%d of Subtask #%d", i, t); PointInfo po = test_point("answer", i); if (subtaskType == "packed") { if (po.scr != 100) { passed = false; po.scr = i == startI ? 0 : -tfull; tscore = 0; points.push_back(po); info = po.info; break; } else { po.scr = i == startI ? tfull : 0; tscore = tfull; points.push_back(po); } } else if (subtaskType == "min") { minScore[t] = min(minScore[t], po.scr); if (po.scr != 100) { passed = false; } po.scr = scale_score(po.scr, tfull); if (po.scr <= tscore) { tscore = po.scr; points.push_back(po); info = po.info; } else { points.push_back(po); } } } subtasks[t] = SubtaskInfo(info == "Accepted", tscore); add_subtask_info(t, tscore, info, points); } } if (conf_is("submit_answer", "on") || !passed) { end_judge_ok(); } tot_score = 100; for (int i = 1; i <= m; i++) { report_judge_status_f("Judging Extra Test #%d", i); PointInfo po = test_point("answer", -i); if (po.scr != 100) { po.num = -1; po.info = "Extra Test Failed : " + po.info + " on " + vtos(i); po.scr = -3; add_point_info(po); end_judge_ok(); } } if (m != 0) { PointInfo po(-1, 0, -1, -1, "Extra Test Passed", "", "", ""); add_point_info(po); } end_judge_ok(); } void hack_test() { if (conf_is("submit_answer", "on")) { end_judge_judgement_failed("Hack is not supported in this problem."); } else { RunCompilerResult c_ret = !conf_is("with_implementer", "on") ? compile("answer") : compile_with_implementer("answer"); if (!c_ret.succeeded) { end_judge_compile_error(c_ret); } TestPointConfig tpc; tpc.input_file_name = work_path + "/hack_input.txt"; tpc.output_file_name = work_path + "/pro_output.txt"; tpc.answer_file_name = work_path + "/std_output.txt"; PointInfo po = test_hack_point("answer", tpc); add_point_info(po); end_judge_ok(); } } void sample_test() { if (conf_is("submit_answer", "on")) { int n = conf_int("n_tests", 10); for (int i = 1; i <= n; i++) { report_judge_status_f("Judging Test #%d", i); if (conf_is("check_existence_only_in_sample_test", "on")) { TestPointConfig tpc = TestPointConfig(); tpc.auto_complete(i); string usrout = file_preview(tpc.output_file_name); if (usrout == "") { add_point_info(PointInfo(i, 0, -1, -1, "default", file_preview(tpc.input_file_name), usrout, "wrong answer empty file\n")); } else { PointInfo po = PointInfo(i, 100, -1, -1, "default", file_preview(tpc.input_file_name), usrout, "ok nonempty file\n"); po.scr = scale_score(po.scr, conf_int("point_score", i, 100 / n)); add_point_info(po); } } else { PointInfo po = test_point("answer", i); if (po.scr != 0) { po.info = "Accepted"; po.scr = 100; } po.scr = scale_score(po.scr, conf_int("point_score", i, 100 / n)); po.res = "no comment"; add_point_info(po); } } end_judge_ok(); } else { report_judge_status_f("Compiling"); RunCompilerResult c_ret = !conf_is("with_implementer", "on") ? compile("answer") : compile_with_implementer("answer"); if (!c_ret.succeeded) { end_judge_compile_error(c_ret); } int n = conf_int("n_sample_tests", 0); bool passed = true; for (int i = 1; i <= n; i++) { report_judge_status_f("Judging Sample Test #%d", i); PointInfo po = test_point("answer", -i); po.num = i; if (po.scr != 100) { passed = false; } po.scr = scale_score(po.scr, 100 / n); add_point_info(po); } if (passed) { tot_score = 100; } end_judge_ok(); } } void custom_test() { if (conf_is("submit_answer", "on")) { end_judge_judgement_failed("Custom test is not supported in this problem."); } else { report_judge_status_f("Compiling"); RunCompilerResult c_ret = !conf_is("with_implementer", "on") ? compile("answer") : compile_with_implementer("answer"); if (!c_ret.succeeded) { end_judge_compile_error(c_ret); } report_judge_status_f("Judging"); add_custom_test_info(ordinary_custom_test("answer")); end_judge_ok(); } } int main(int argc, char **argv) { judger_init(argc, argv); if (conf_is("test_new_hack_only", "on")) { hack_test(); } else if (conf_is("test_sample_only", "on")) { sample_test(); } else if (conf_is("custom_test", "on")) { custom_test(); } else { ordinary_test(); } }
26.919847
120
0.601446
[ "vector" ]
118ea19a7e7337b4e7ffec6280efeb03d0634160
1,532
cpp
C++
clang/test/CodeGenSYCL/union-kernel-param.cpp
rarutyun/llvm
76fa6b3bcade074bdedef740001c4528e1aa08a8
[ "Apache-2.0" ]
null
null
null
clang/test/CodeGenSYCL/union-kernel-param.cpp
rarutyun/llvm
76fa6b3bcade074bdedef740001c4528e1aa08a8
[ "Apache-2.0" ]
null
null
null
clang/test/CodeGenSYCL/union-kernel-param.cpp
rarutyun/llvm
76fa6b3bcade074bdedef740001c4528e1aa08a8
[ "Apache-2.0" ]
null
null
null
// RUN: %clang_cc1 -fsycl -fsycl-is-device -triple spir64-unknown-unknown-sycldevice -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s // This test checks a kernel argument that is union with both array and non-array fields. #include "Inputs/sycl.hpp" using namespace cl::sycl; union MyUnion { int FldInt; char FldChar; float FldArr[3]; }; template <typename name, typename Func> __attribute__((sycl_kernel)) void a_kernel(Func kernelFunc) { kernelFunc(); } int main() { MyUnion obj; a_kernel<class kernel_A>( [=]() { float local = obj.FldArr[2]; }); } // CHECK kernel_A parameters // CHECK: define spir_kernel void @{{.*}}kernel_A(%union.{{.*}}.MyUnion* byval(%union.{{.*}}.MyUnion) align 4 [[MEM_ARG:%[a-zA-Z0-9_]+]]) // Check lambda object alloca // CHECK: [[LOCAL_OBJECT:%0]] = alloca %"class.{{.*}}.anon", align 4 // CHECK: [[L_STRUCT_ADDR:%[a-zA-Z0-9_]+]] = getelementptr inbounds %"class.{{.*}}.anon", %"class.{{.*}}.anon"* [[LOCAL_OBJECT]], i32 0, i32 0 // CHECK: [[MEMCPY_DST:%[0-9a-zA-Z_]+]] = bitcast %union.{{.*}}MyUnion* [[L_STRUCT_ADDR]] to i8* // CHECK: [[MEMCPY_SRC:%[0-9a-zA-Z_]+]] = bitcast %union.{{.*}}MyUnion* [[MEM_ARG]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[MEMCPY_DST]], i8* align 4 [[MEMCPY_SRC]], i64 12, i1 false) // CHECK: [[ACC_CAST1:%[0-9]+]] = addrspacecast %"class.{{.*}}.anon"* [[LOCAL_OBJECT]] to %"class.{{.*}}.anon" addrspace(4)* // CHECK: call spir_func void @{{.*}}(%"class.{{.*}}.anon" addrspace(4)* [[ACC_CAST1]])
36.47619
142
0.634465
[ "object" ]
11977ba5c3cef9d4f8d2ed05a3040d69091c0a28
2,212
cpp
C++
ch07/QtActivitySampleKotlin/activitycalling.cpp
argama147/qt-android
ca9e5a7d5b35bd9d81e6d12455b8c9d296e2e880
[ "MIT" ]
null
null
null
ch07/QtActivitySampleKotlin/activitycalling.cpp
argama147/qt-android
ca9e5a7d5b35bd9d81e6d12455b8c9d296e2e880
[ "MIT" ]
null
null
null
ch07/QtActivitySampleKotlin/activitycalling.cpp
argama147/qt-android
ca9e5a7d5b35bd9d81e6d12455b8c9d296e2e880
[ "MIT" ]
null
null
null
#include "activitycalling.h" #include <QtAndroidExtras> #include <QtDebug> #include <QString> #include <QException> ActivityCalling::ActivityCalling(QObject *parent) : QObject(parent), m_handle("android/content/Intent", "()V") { m_result_receiver.setActivityCalling(this); } void ActivityCalling::connectQml(QObject *root) { m_root = root; QQuickItem *startButton = root->findChild<QQuickItem *>("startActivityButton"); connect(startButton, SIGNAL(clicked()), this, SLOT(startActivitySlot())); } void ActivityCalling::setResult(const QString &text) { qDebug() << __FUNCTION__; if (m_root != nullptr) { QObject *resultText = m_root->findChild<QObject*>("resultText"); resultText->setProperty("text", text); } } void ActivityCalling::putExtra(const QString &key, const QByteArray &data, QAndroidJniObject handle) { QAndroidJniExceptionCleaner cleaner; QAndroidJniEnvironment env; jbyteArray array = env->NewByteArray(data.size()); env->SetByteArrayRegion(array, 0, data.length(), reinterpret_cast<const jbyte*>(data.constData())); handle.callObjectMethod("putExtra", "(Ljava/lang/String;[B)Landroid/content/Intent;", QAndroidJniObject::fromString(key).object(), array); env->DeleteLocalRef(array); } void ActivityCalling::startActivitySlot() { qDebug() << __FUNCTION__; auto intent = QAndroidIntent(QtAndroid::androidActivity().object(), "com.eugo.example.qtactivitysample.SubActivity"); QString key = "MainActivityData"; QString strData = "MainActivityData"; // QByteArray array; // array.insert(0, "test"); QVariant variantData(strData); putExtra(key, strData.toUtf8(), intent.handle()); // intent.putExtra(key, variantData); // variantData.setValue(strData); // try { // intent.putExtra(key, variantData); // } catch (QException e) { // qDebug() << e.what(); // } QtAndroid::startActivity(intent, 1, &m_result_receiver); } void ActivityCalling::updateQml(const QString &msg) { QObject *resultText = m_root->findChild<QObject*>("resultText"); resultText->setProperty("text", msg); }
30.722222
103
0.678571
[ "object" ]
119b8dd8ba6333c62827cd8b76d66120770c594c
9,301
cxx
C++
src/Cxx/Annotation/MultiLineText.cxx
cvandijck/VTKExamples
b6bb89414522afc1467be8a1f0089a37d0c16883
[ "Apache-2.0" ]
309
2017-05-21T09:07:19.000Z
2022-03-15T09:18:55.000Z
src/Cxx/Annotation/MultiLineText.cxx
yijianmingliu/VTKExamples
dc8aac47c4384f9a2de9facbdd1ab3249f62ec99
[ "Apache-2.0" ]
379
2017-05-21T09:06:43.000Z
2021-03-29T20:30:50.000Z
src/Cxx/Annotation/MultiLineText.cxx
yijianmingliu/VTKExamples
dc8aac47c4384f9a2de9facbdd1ab3249f62ec99
[ "Apache-2.0" ]
170
2017-05-17T14:47:41.000Z
2022-03-31T13:16:26.000Z
// This example demonstrates the use of multiline 2D text using // vtkTextMappers. It shows several justifications as well as // single-line and multiple-line text inputs. #include <vtkSmartPointer.h> #include <vtkNamedColors.h> #include <vtkActor2D.h> #include <vtkCamera.h> #include <vtkCellArray.h> #include <vtkCoordinate.h> #include <vtkPoints.h> #include <vtkPolyData.h> #include <vtkPolyDataMapper2D.h> #include <vtkProperty2D.h> #include <vtkRenderWindow.h> #include <vtkRenderWindowInteractor.h> #include <vtkRenderer.h> #include <vtkTextMapper.h> #include <vtkTextProperty.h> int main (int, char*[]) { int font_size = 24;; // Create the text mappers and the associated Actor2Ds. // The font and text properties (except justification) are the same for // each single line mapper. Let's create a common text property object vtkSmartPointer<vtkTextProperty> singleLineTextProp = vtkSmartPointer<vtkTextProperty>::New(); singleLineTextProp->SetFontSize(font_size); singleLineTextProp->SetFontFamilyToArial(); singleLineTextProp->BoldOff(); singleLineTextProp->ItalicOff(); singleLineTextProp->ShadowOff(); // The font and text properties (except justification) are the same for // each multi line mapper. Let's create a common text property object vtkSmartPointer<vtkTextProperty> multiLineTextProp = vtkSmartPointer<vtkTextProperty>::New(); multiLineTextProp->ShallowCopy(singleLineTextProp); multiLineTextProp->BoldOn(); multiLineTextProp->ItalicOn(); multiLineTextProp->ShadowOn(); multiLineTextProp->SetLineSpacing(0.8); vtkSmartPointer<vtkNamedColors> colors = vtkSmartPointer<vtkNamedColors>::New(); // The text is on a single line and bottom-justified. vtkSmartPointer<vtkTextMapper> singleLineTextB = vtkSmartPointer<vtkTextMapper>::New(); singleLineTextB->SetInput("Single line (bottom)"); vtkTextProperty *tprop = singleLineTextB->GetTextProperty(); tprop->ShallowCopy(singleLineTextProp);; tprop->SetVerticalJustificationToBottom(); tprop->SetColor(colors->GetColor3d("Tomato").GetData()); vtkSmartPointer<vtkActor2D> singleLineTextActorB = vtkSmartPointer<vtkActor2D>::New(); singleLineTextActorB->SetMapper(singleLineTextB); singleLineTextActorB->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); singleLineTextActorB->GetPositionCoordinate()->SetValue(0.05, 0.85); // The text is on a single line and center-justified (vertical // justification). vtkSmartPointer<vtkTextMapper> singleLineTextC = vtkSmartPointer<vtkTextMapper>::New(); singleLineTextC->SetInput("Single line (centered)"); tprop = singleLineTextC->GetTextProperty(); tprop->ShallowCopy(singleLineTextProp); tprop->SetVerticalJustificationToCentered(); tprop->SetColor(colors->GetColor3d("DarkGreen").GetData()); vtkSmartPointer<vtkActor2D> singleLineTextActorC = vtkSmartPointer<vtkActor2D>::New(); singleLineTextActorC->SetMapper(singleLineTextC); singleLineTextActorC->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); singleLineTextActorC->GetPositionCoordinate()->SetValue(0.05, 0.75); // The text is on a single line and top-justified. vtkSmartPointer<vtkTextMapper> singleLineTextT = vtkSmartPointer<vtkTextMapper>::New(); singleLineTextT->SetInput("Single line (top)"); tprop = singleLineTextT->GetTextProperty(); tprop->ShallowCopy(singleLineTextProp); tprop->SetVerticalJustificationToTop(); tprop->SetColor(colors->GetColor3d("Peacock").GetData()); vtkSmartPointer<vtkActor2D> singleLineTextActorT = vtkSmartPointer<vtkActor2D>::New(); singleLineTextActorT->SetMapper(singleLineTextT); singleLineTextActorT->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); singleLineTextActorT->GetPositionCoordinate()->SetValue(0.05, 0.65); // The text is on multiple lines and left- and top-justified. vtkSmartPointer<vtkTextMapper> textMapperL = vtkSmartPointer<vtkTextMapper>::New(); textMapperL->SetInput("This is\nmulti-line\ntext output\n(left-top)"); tprop = textMapperL->GetTextProperty(); tprop->ShallowCopy(multiLineTextProp); tprop->SetJustificationToLeft(); tprop->SetVerticalJustificationToTop(); tprop->SetColor(colors->GetColor3d("Tomato").GetData()); vtkSmartPointer<vtkActor2D> textActorL = vtkSmartPointer<vtkActor2D>::New(); textActorL->SetMapper(textMapperL); textActorL->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); textActorL->GetPositionCoordinate()->SetValue(0.05, 0.5); // The text is on multiple lines and center-justified (both horizontal and // vertical). vtkSmartPointer<vtkTextMapper> textMapperC = vtkSmartPointer<vtkTextMapper>::New(); textMapperC->SetInput("This is\nmulti-line\ntext output\n(centered)"); tprop = textMapperC->GetTextProperty(); tprop->ShallowCopy(multiLineTextProp); tprop->SetJustificationToCentered(); tprop->SetVerticalJustificationToCentered(); tprop->SetColor(colors->GetColor3d("DarkGreen").GetData()); vtkSmartPointer<vtkActor2D> textActorC = vtkSmartPointer<vtkActor2D>::New(); textActorC->SetMapper(textMapperC); textActorC->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); textActorC->GetPositionCoordinate()->SetValue(0.5, 0.5); // The text is on multiple lines and right- and bottom-justified. vtkSmartPointer<vtkTextMapper> textMapperR = vtkSmartPointer<vtkTextMapper>::New(); textMapperR->SetInput("This is\nmulti-line\ntext output\n(right-bottom)"); tprop = textMapperR->GetTextProperty(); tprop->ShallowCopy(multiLineTextProp); tprop->SetJustificationToRight(); tprop->SetVerticalJustificationToBottom(); tprop->SetColor(colors->GetColor3d("Peacock").GetData()); vtkSmartPointer<vtkActor2D> textActorR = vtkSmartPointer<vtkActor2D>::New(); textActorR->SetMapper(textMapperR); textActorR->GetPositionCoordinate()->SetCoordinateSystemToNormalizedDisplay(); textActorR->GetPositionCoordinate()->SetValue(0.95, 0.5); // Draw the grid to demonstrate the placement of the text. // Set up the necessary points. vtkSmartPointer<vtkPoints> Pts = vtkSmartPointer<vtkPoints>::New(); Pts->InsertNextPoint(0.05, 0.0, 0.0); Pts->InsertNextPoint(0.05, 1.0, 0.0); Pts->InsertNextPoint(0.5, 0.0, 0.0); Pts->InsertNextPoint(0.5, 1.0, 0.0); Pts->InsertNextPoint(0.95, 0.0, 0.0); Pts->InsertNextPoint(0.95, 1.0, 0.0); Pts->InsertNextPoint(0.0, 0.5, 0.0); Pts->InsertNextPoint(1.0, 0.5, 0.0); Pts->InsertNextPoint(0.00, 0.85, 0.0); Pts->InsertNextPoint(0.50, 0.85, 0.0); Pts->InsertNextPoint(0.00, 0.75, 0.0); Pts->InsertNextPoint(0.50, 0.75, 0.0); Pts->InsertNextPoint(0.00, 0.65, 0.0); Pts->InsertNextPoint(0.50, 0.65, 0.0); // Set up the lines that use these points. vtkSmartPointer<vtkCellArray> Lines = vtkSmartPointer<vtkCellArray>::New(); Lines->InsertNextCell(2); Lines->InsertCellPoint(0); Lines->InsertCellPoint(1); Lines->InsertNextCell(2); Lines->InsertCellPoint(2); Lines->InsertCellPoint(3); Lines->InsertNextCell(2); Lines->InsertCellPoint(4); Lines->InsertCellPoint(5); Lines->InsertNextCell(2); Lines->InsertCellPoint(6); Lines->InsertCellPoint(7); Lines->InsertNextCell(2); Lines->InsertCellPoint(8); Lines->InsertCellPoint(9); Lines->InsertNextCell(2); Lines->InsertCellPoint(10); Lines->InsertCellPoint(11); Lines->InsertNextCell(2); Lines->InsertCellPoint(12); Lines->InsertCellPoint(13); // Create a grid that uses these points and lines. vtkSmartPointer<vtkPolyData> Grid = vtkSmartPointer<vtkPolyData>::New(); Grid->SetPoints(Pts); Grid->SetLines(Lines); // Set up the coordinate system. vtkSmartPointer<vtkCoordinate> normCoords = vtkSmartPointer<vtkCoordinate>::New(); normCoords->SetCoordinateSystemToNormalizedViewport(); // Set up the mapper and actor (2D) for the grid. vtkSmartPointer<vtkPolyDataMapper2D> mapper = vtkSmartPointer<vtkPolyDataMapper2D>::New(); mapper->SetInputData(Grid); mapper->SetTransformCoordinate(normCoords); vtkSmartPointer<vtkActor2D> gridActor = vtkSmartPointer<vtkActor2D>::New(); gridActor->SetMapper(mapper); gridActor->GetProperty()->SetColor(colors->GetColor3d("DimGray").GetData()); // Create the Renderer, RenderWindow, and RenderWindowInteractor vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New(); vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New(); renderWindow->AddRenderer(renderer); vtkSmartPointer<vtkRenderWindowInteractor> interactor = vtkSmartPointer<vtkRenderWindowInteractor>::New(); interactor->SetRenderWindow(renderWindow); // Add the actors to the renderer; set the background and size; zoom in // closer to the image; render renderer->AddActor2D(textActorL); renderer->AddActor2D(textActorC); renderer->AddActor2D(textActorR); renderer->AddActor2D(singleLineTextActorB); renderer->AddActor2D(singleLineTextActorC); renderer->AddActor2D(singleLineTextActorT); renderer->AddActor2D(gridActor); renderer->SetBackground(colors->GetColor3d("Silver").GetData()); renderWindow->SetSize(640, 480); renderer->GetActiveCamera()->Zoom(1.5); interactor->Initialize(); renderWindow->Render(); interactor->Start(); return EXIT_SUCCESS; }
38.118852
90
0.760671
[ "render", "object" ]
119de6616b9307946a7fb37d266f89bff5fa318e
5,483
cpp
C++
src/render/r_model.cpp
thefishlive/VkFPS
42d66070f95285bef144300bd14c9927f30d32fe
[ "MIT" ]
null
null
null
src/render/r_model.cpp
thefishlive/VkFPS
42d66070f95285bef144300bd14c9927f30d32fe
[ "MIT" ]
null
null
null
src/render/r_model.cpp
thefishlive/VkFPS
42d66070f95285bef144300bd14c9927f30d32fe
[ "MIT" ]
null
null
null
/****************************************************************************** * Copyright 2017 James Fitzpatrick <james_fitzpatrick@outlook.com> * * * * Permission is hereby granted, free of charge, to any person obtaining a * * copy of this software and associated documentation files (the "Software"), * * to deal in the Software without restriction, including without limitation * * the rights to use, copy, modify, merge, publish, distribute, sublicense, * * and/or sell copies of the Software, and to permit persons to whom the * * Software is furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in * * all copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * * DEALINGS IN THE SOFTWARE. * ******************************************************************************/ #include "r_model.h" #include <fstream> #include <iostream> #include <map> #include <sstream> #define GLM_ENABLE_EXPERIMENTAL #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/quaternion.hpp> #include <glm/gtx/string_cast.hpp> #include "r_camera.h" #include "r_material.h" #include "u_debug.h" #include "u_defines.h" #include "u_io.h" Model::Model(std::shared_ptr<GraphicsDevice>& device, std::shared_ptr<GraphicsDevmem>& devmem, std::shared_ptr<Renderer>& renderer, std::vector<Vertex>& verticies, std::vector<std::unique_ptr<Material>>& materials, std::vector<std::vector<uint32_t>>& indicies) : device(device), devmem(devmem), renderer(renderer), position(0, 0, 0) { /* * transfer data from indicies + materials to material_data */ size_t index = 0; for (unsigned int i = 0; i < materials.size(); i++) { material_data.push_back( std::make_unique<MaterialData>( materials[i], indicies[i], index ) ); index += indicies[i].size(); } this->index_count = (uint32_t) index; vk::BufferCreateInfo vbuf_create_info( vk::BufferCreateFlags(0), verticies.size() * sizeof(Vertex), vk::BufferUsageFlagBits::eVertexBuffer, vk::SharingMode::eExclusive ); VmaAllocationCreateInfo vbuf_alloc_info{}; vbuf_alloc_info.flags = 0; vbuf_alloc_info.usage = VMA_MEMORY_USAGE_GPU_ONLY; vbuf_alloc_info.pUserData = STRING_TO_DATA("Vertex Buffer"); vertex_buffer = devmem->create_buffer(vbuf_create_info, vbuf_alloc_info); void *data; vertex_buffer->map_memory(&data); memcpy(data, verticies.data(), verticies.size() * sizeof(Vertex)); vertex_buffer->unmap_memory(); vertex_buffer->commit_memory(); vk::BufferCreateInfo ibuf_create_info( vk::BufferCreateFlags(0), sizeof(uint32_t) * index_count, vk::BufferUsageFlagBits::eIndexBuffer, vk::SharingMode::eExclusive ); VmaAllocationCreateInfo ibuf_alloc_info{}; ibuf_alloc_info.flags = 0; ibuf_alloc_info.usage = VMA_MEMORY_USAGE_GPU_ONLY; ibuf_alloc_info.pUserData = STRING_TO_DATA("Index Buffer"); index_buffer = devmem->create_buffer(ibuf_create_info, ibuf_alloc_info); index_buffer->map_memory(&data); for (const auto & material : material_data) { memcpy(data, material->indicies.data(), material->indicies.size() * sizeof(uint32_t)); } index_buffer->unmap_memory(); index_buffer->commit_memory(); command_buffers = renderer->alloc_render_command_buffers(); this->invalidate_recording(); } Model::~Model() { } void Model::invalidate_recording() { for (auto i = 0; i < command_buffers.size(); i++) { renderer->start_secondary_command_buffer(command_buffers[i], i, 0); std::vector<vk::Buffer> vbufs{ vertex_buffer->buffer }; std::vector<VkDeviceSize> voffsets{ 0 }; command_buffers[i].bindVertexBuffers(0, (uint32_t)vbufs.size(), vbufs.data(), voffsets.data()); command_buffers[i].bindIndexBuffer(index_buffer->buffer, 0, vk::IndexType::eUint32); glm::mat4 translation = glm::translate(glm::mat4(1), position); glm::mat4 rotation = glm::toMat4(this->rotation); VertexShaderData shader_data(translation); for (const auto & data : material_data) { data->material->bind_material(command_buffers[i]); data->material->push_shader_data(command_buffers[i], 0, vk::ShaderStageFlagBits::eVertex, sizeof(VertexShaderData), &shader_data); command_buffers[i].drawIndexed((uint32_t)data->indicies.size(), 1, data->start_index, 0, 0); } renderer->end_secondary_command_buffer(command_buffers[i]); } } void Model::render(vk::CommandBuffer cmd, uint32_t image) const { cmd.executeCommands(this->command_buffers[image]); }
37.29932
260
0.644538
[ "render", "vector", "model" ]
11a0b38daaf0abc49409760461d5636f2de4f2ef
6,876
cpp
C++
lib/AL_USDMaya/AL/usdmaya/Global.cpp
PaulDoessel/AL_USDMaya
912071b304748073299be08ba46a670390eee346
[ "Apache-2.0" ]
1
2018-08-30T13:46:41.000Z
2018-08-30T13:46:41.000Z
lib/AL_USDMaya/AL/usdmaya/Global.cpp
PaulDoessel/AL_USDMaya
912071b304748073299be08ba46a670390eee346
[ "Apache-2.0" ]
null
null
null
lib/AL_USDMaya/AL/usdmaya/Global.cpp
PaulDoessel/AL_USDMaya
912071b304748073299be08ba46a670390eee346
[ "Apache-2.0" ]
null
null
null
// // Copyright 2017 Animal Logic // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.// // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "AL/usdmaya/Global.h" #include "AL/usdmaya/StageCache.h" #include "AL/usdmaya/DebugCodes.h" #include "AL/usdmaya/nodes/Layer.h" #include "AL/usdmaya/nodes/ProxyShape.h" #include "AL/usdmaya/nodes/Transform.h" #include "AL/usdmaya/nodes/TransformationMatrix.h" #include <pxr/base/plug/registry.h> #include <pxr/base/tf/getenv.h> #include <pxr/base/tf/stringUtils.h> #include <pxr/usd/usdUtils/stageCache.h> #include "maya/MGlobal.h" #include "maya/MFnDependencyNode.h" #include "maya/MItDependencyNodes.h" #include <iostream> #ifndef AL_USDMAYA_LOCATION_NAME #define AL_USDMAYA_LOCATION_NAME "AL_USDMAYA_LOCATION" #endif namespace AL { namespace usdmaya { //---------------------------------------------------------------------------------------------------------------------- MCallbackId Global::m_preSave; MCallbackId Global::m_postSave; MCallbackId Global::m_preOpen; MCallbackId Global::m_postOpen; MCallbackId Global::m_fileNew; //---------------------------------------------------------------------------------------------------------------------- static void onFileNew(void*) { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("onFileNew\n"); // These should both clear the caches, however they don't actually do anything of the sort. Puzzled. UsdUtilsStageCache::Get().Clear(); StageCache::Clear(); } //---------------------------------------------------------------------------------------------------------------------- static void preFileOpen(void*) { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("preFileOpen\n"); } //---------------------------------------------------------------------------------------------------------------------- static void postFileOpen(void*) { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("postFileOpen\n"); MFnDependencyNode fn; { MItDependencyNodes iter(MFn::kPluginShape); for(; !iter.isDone(); iter.next()) { fn.setObject(iter.item()); if(fn.typeId() == nodes::ProxyShape::kTypeId) { // execute a pull on each proxy shape to ensure that each one has a valid USD stage! nodes::ProxyShape* proxy = (nodes::ProxyShape*)fn.userNode(); auto stage = proxy->getUsdStage(); proxy->deserialiseTranslatorContext(); proxy->findTaggedPrims(); proxy->constructGLImagingEngine(); proxy->deserialiseTransformRefs(); auto layer = proxy->getLayer(); if(layer) { layer->setLayerAndClearAttribute(stage->GetSessionLayer()); } } } } { MItDependencyNodes iter(MFn::kPluginDependNode); for(; !iter.isDone(); iter.next()) { fn.setObject(iter.item()); if(fn.typeId() == nodes::Layer::kTypeId) { // now go and fix up each of the layer nodes in the scene nodes::Layer* layerPtr = (nodes::Layer*)fn.userNode(); MPlug plug = layerPtr->nameOnLoadPlug(); MString path = plug.asString(); if(path.length() && path.substring(0, 3) != "anon") { SdfLayerHandle layer = SdfLayer::FindOrOpen(path.asChar()); LAYER_HANDLE_CHECK(layer); layerPtr->setLayerAndClearAttribute(layer); } else { } } } } { MItDependencyNodes iter(MFn::kPluginTransformNode); for(; !iter.isDone(); iter.next()) { fn.setObject(iter.item()); if(fn.typeId() == nodes::Transform::kTypeId) { // ensure all of the transforms are referring to the correct prim nodes::Transform* tmPtr = (nodes::Transform*)fn.userNode(); tmPtr->transform()->initialiseToPrim(true, tmPtr); } } } } //---------------------------------------------------------------------------------------------------------------------- static void preFileSave(void*) { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("preFileSave\n"); // currently, if we have selected a shape in the usd proxy shape, a series of transforms will have been created. // Ideally we don't want these transient nodes to be stored in the Maya file, so make sure we unselect prior to a file // save (which should call another set of callbacks and delete those transient nodes. This should leave us with just // those AL::usdmaya::nodes::Transform nodes that are created because they are required, or have been requested). MGlobal::clearSelectionList(); } //---------------------------------------------------------------------------------------------------------------------- static void postFileSave(void*) { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("postFileSave\n"); } //---------------------------------------------------------------------------------------------------------------------- void Global::onPluginLoad() { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("Registering callbacks\n"); m_fileNew = MSceneMessage::addCallback(MSceneMessage::kAfterNew, onFileNew); m_preSave = MSceneMessage::addCallback(MSceneMessage::kBeforeSave, preFileSave); m_postSave = MSceneMessage::addCallback(MSceneMessage::kAfterSave, postFileSave); m_preOpen = MSceneMessage::addCallback(MSceneMessage::kBeforeOpen, preFileOpen); m_postOpen = MSceneMessage::addCallback(MSceneMessage::kAfterOpen, postFileOpen); TF_DEBUG(ALUSDMAYA_EVENTS).Msg("Registering USD plugins\n"); // Let USD know about the additional plugins std::string pluginLocation(TfStringCatPaths(TfGetenv(AL_USDMAYA_LOCATION_NAME), "share/usd/plugins")); PlugRegistry::GetInstance().RegisterPlugins(pluginLocation); // For callback initialization for stage cache callback, it will be done via proxy node attribute change. } //---------------------------------------------------------------------------------------------------------------------- void Global::onPluginUnload() { TF_DEBUG(ALUSDMAYA_EVENTS).Msg("Removing callbacks\n"); MSceneMessage::removeCallback(m_fileNew); MSceneMessage::removeCallback(m_preSave); MSceneMessage::removeCallback(m_postSave); MSceneMessage::removeCallback(m_preOpen); MSceneMessage::removeCallback(m_postOpen); StageCache::removeCallbacks(); } //---------------------------------------------------------------------------------------------------------------------- } // usdmaya } // al //----------------------------------------------------------------------------------------------------------------------
37.369565
120
0.582606
[ "shape", "transform" ]
11a124822212985126f88d37cf1c278e63952dfe
13,420
cpp
C++
NanoTest/Source/Benchmarks/BSharedMutex.cpp
refnum/Nano
dceb0907061f7845d8a3c662f309ca164e932e6f
[ "BSD-3-Clause" ]
23
2019-11-12T09:31:11.000Z
2021-09-13T08:59:37.000Z
NanoTest/Source/Benchmarks/BSharedMutex.cpp
refnum/nano
dceb0907061f7845d8a3c662f309ca164e932e6f
[ "BSD-3-Clause" ]
1
2020-10-30T09:54:12.000Z
2020-10-30T09:54:12.000Z
NanoTest/Source/Benchmarks/BSharedMutex.cpp
refnum/Nano
dceb0907061f7845d8a3c662f309ca164e932e6f
[ "BSD-3-Clause" ]
3
2015-09-08T11:00:02.000Z
2017-09-11T05:42:30.000Z
/* NAME: BSharedMutex.cpp DESCRIPTION: NSharedMutex benchmark. COPYRIGHT: Copyright (c) 2006-2021, refNum Software All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ___________________________________________________________________________ */ //============================================================================= // Includes //----------------------------------------------------------------------------- // Nano #include "NData.h" #include "NDataCompressor.h" #include "NFunction.h" #include "NMachine.h" #include "NRandom.h" #include "NSharedMutex.h" #include "NTestFixture.h" #include "NThread.h" // System #include <shared_mutex> #if !NN_TARGET_WINDOWS #include <pthread.h> #endif // !NN_TARGET_WINDOWS //============================================================================= // Internal Macros //----------------------------------------------------------------------------- #define NN_BENCHMARK_SHAREDMUTEX 0 #define NN_BENCHMARK_SHAREDMUTEX_ALL 0 NN_DIAGNOSTIC_IGNORE_CLANG("-Wformat-extra-args"); NN_DIAGNOSTIC_IGNORE_CLANG("-Wunused-template"); //============================================================================= // Internal Constants //----------------------------------------------------------------------------- static constexpr size_t kSizeSeeds = 100; static constexpr size_t kSizeBlock = 1 * kNKilobyte; //============================================================================= // Internal Types //----------------------------------------------------------------------------- template<typename T> struct WorkState { T theLock; size_t theLoad; NVectorUInt64 theSeeds; std::atomic_bool startWork; size_t percentRead; }; //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- template<typename T> class Work { public: Work(WorkState<T>& theState) : mData(kSizeBlock, nullptr, NDataSource::None) , mRandom(1) , mState(theState) { } void Execute() { while (!mState.startWork) { NThread::Switch(); } for (size_t n = 0; n < mState.theLoad; n++) { if (mRandom.NextUInt8(0, 100) <= mState.percentRead) { ExecuteRead(); } else { ExecuteWrite(); } } } private: void ExecuteRead() { mState.theLock.LockShared(); for (const auto theSeed : mState.theSeeds) { mRandom.SetSeed(theSeed); mRandom.NextData(mData.GetSize(), mData.GetMutableData()); (void) NDataCompressor::Compress(NCompression::ZLib, mData); } mState.theLock.UnlockShared(); } void ExecuteWrite() { mState.theLock.Lock(); for (auto& theSeed : mState.theSeeds) { theSeed = NRandom::GetUInt64(); } mState.theLock.Unlock(); } private: NData mData; NRandom mRandom; WorkState<T>& mState; }; //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockNull { public: static constexpr const char* Name = "null"; void Lock() { } void Unlock() { } void LockShared() { } void UnlockShared() { } private: }; //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockNSharedMutex { public: static constexpr const char* Name = "NSharedMutex"; void Lock() { bool didLock = mLock.Lock(); NN_REQUIRE(didLock); } void Unlock() { mLock.Unlock(); } void LockShared() { bool didLock = mLock.LockShared(); NN_REQUIRE(didLock); } void UnlockShared() { mLock.UnlockShared(); } private: NSharedMutex mLock; }; //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockStdMutex { public: static constexpr const char* Name = "std::mutex"; void Lock() { mLock.lock(); } void Unlock() { mLock.unlock(); } void LockShared() { mLock.lock(); } void UnlockShared() { mLock.unlock(); } private: std::mutex mLock; }; //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockStdSharedMutex { public: static constexpr const char* Name = "std::shared_mutex"; void Lock() { mLock.lock(); } void Unlock() { mLock.unlock(); } void LockShared() { mLock.lock_shared(); } void UnlockShared() { mLock.unlock_shared(); } private: std::shared_mutex mLock; }; #if NN_TARGET_WINDOWS //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockSRWLock { public: static constexpr const char* Name = "SRWLock"; LockSRWLock() { InitializeSRWLock(&mLock); } void Lock() { AcquireSRWLockExclusive(&mLock); } void Unlock() { ReleaseSRWLockExclusive(&mLock); } void LockShared() { AcquireSRWLockShared(&mLock); } void UnlockShared() { ReleaseSRWLockShared(&mLock); } private: SRWLOCK mLock; }; #endif // NN_TARGET_WINDOWS #if !NN_TARGET_WINDOWS //============================================================================= // Internal Class Declaration //----------------------------------------------------------------------------- class LockPThread { public: static constexpr const char* Name = "pthread_rwlock"; LockPThread() { int sysErr = pthread_rwlock_init(&mLock, nullptr); NN_REQUIRE_NOT_ERR(sysErr); } ~LockPThread() { int sysErr = pthread_rwlock_destroy(&mLock); NN_REQUIRE_NOT_ERR(sysErr); } void Lock() { int sysErr = pthread_rwlock_wrlock(&mLock); NN_REQUIRE_NOT_ERR(sysErr); } void Unlock() { int sysErr = pthread_rwlock_unlock(&mLock); NN_REQUIRE_NOT_ERR(sysErr); } void LockShared() { int sysErr = pthread_rwlock_rdlock(&mLock); NN_REQUIRE_NOT_ERR(sysErr); } void UnlockShared() { int sysErr = pthread_rwlock_unlock(&mLock); NN_REQUIRE_NOT_ERR(sysErr); } private: pthread_rwlock_t mLock; }; #endif // !NN_TARGET_WINDOWS //============================================================================= // ExecuteWork : Execute the work. //----------------------------------------------------------------------------- template<typename T> static void ExecuteWork(size_t numThreads, size_t percentRead) { // Prepare the state WorkState<T> theState; theState.theLoad = 20; theState.theSeeds.resize(kSizeSeeds, 999); theState.startWork = false; theState.percentRead = percentRead; // Perform the work BENCHMARK_ADVANCED(T::Name)(Catch::Benchmark::Chronometer theMeter) { std::vector<NUniqueThread> theThreads; for (size_t n = 0; n < numThreads; n++) { theThreads.push_back(std::move(NThread::Create("BSharedMutex", [&]() { Work<T> theWork(theState); theWork.Execute(); }))); } theMeter.measure( [&] { theState.startWork = true; for (auto& theThread : theThreads) { theThread->WaitForCompletion(); } }); }; } //============================================================================= // PerformBenchmark : Perform a benchmark. //----------------------------------------------------------------------------- static void PerformBenchmark(size_t numThreads, size_t percentRead) { // Perform the benchmark if constexpr (NN_BENCHMARK_SHAREDMUTEX || NN_BENCHMARK_SHAREDMUTEX_ALL) { ExecuteWork<LockStdSharedMutex>(numThreads, percentRead); ExecuteWork<LockNSharedMutex>(numThreads, percentRead); if constexpr (NN_BENCHMARK_SHAREDMUTEX_ALL) { ExecuteWork<LockNull>(numThreads, percentRead); ExecuteWork<LockStdMutex>(numThreads, percentRead); } #if NN_TARGET_WINDOWS ExecuteWork<LockSRWLock>(numThreads, percentRead); #else ExecuteWork<LockPThread>(numThreads, percentRead); #endif // NN_TARGET_WINDOWS } } //============================================================================= // Fixture //----------------------------------------------------------------------------- NANO_FIXTURE(BSharedMutex){}; #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "2Threads_10PercentRead") { // Perform the test PerformBenchmark(2, 10); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "2Threads_50PercentRead") { // Perform the test PerformBenchmark(2, 50); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "2Threads_90PercentRead") { // Perform the test PerformBenchmark(2, 90); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "8Threads_10PercentRead") { // Perform the test PerformBenchmark(8, 10); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "8Threads_50PercentRead") { // Perform the test PerformBenchmark(8, 50); } #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "8Threads_90PercentRead") { // Perform the test PerformBenchmark(8, 90); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "16Threads_10PercentRead") { // Perform the test PerformBenchmark(16, 10); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "16Threads_50PercentRead") { // Perform the test PerformBenchmark(16, 50); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL #if NN_BENCHMARK_SHAREDMUTEX_ALL //============================================================================= // Test Case //----------------------------------------------------------------------------- NANO_TEST(BSharedMutex, "16Threads_90PercentRead") { // Perform the test PerformBenchmark(16, 90); } #endif // NN_BENCHMARK_SHAREDMUTEX_ALL
21.472
79
0.498957
[ "vector" ]
11a2497b3e0e4693ee054ae524f0ae0ca45291ca
39,352
hxx
C++
src/freertos_drivers/common/TCAN4550Can.hxx
balazsracz/openmrn
338f5dcbafeff6d171b2787b291d1904f2c45965
[ "BSD-2-Clause" ]
34
2015-05-23T03:57:56.000Z
2022-03-27T03:48:48.000Z
src/freertos_drivers/common/TCAN4550Can.hxx
balazsracz/openmrn
338f5dcbafeff6d171b2787b291d1904f2c45965
[ "BSD-2-Clause" ]
214
2015-07-05T05:06:55.000Z
2022-02-06T14:53:14.000Z
src/freertos_drivers/common/TCAN4550Can.hxx
balazsracz/openmrn
338f5dcbafeff6d171b2787b291d1904f2c45965
[ "BSD-2-Clause" ]
38
2015-08-28T05:32:07.000Z
2021-07-06T16:47:23.000Z
/** @copyright * Copyright (c) 2020 Stuart W Baker * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @file TCAN4550Can.hxx * This file implements the CAN driver for the TCAN4550 CAN Controller. * * @author Stuart W. Baker * @date 26 February 2020 */ #ifndef _FREERTOS_DRIVERS_COMMON_TCAN4550CAN_HXX_ #define _FREERTOS_DRIVERS_COMMON_TCAN4550CAN_HXX_ #include "Can.hxx" #include "SPI.hxx" #include "os/OS.hxx" #include "utils/Atomic.hxx" #include "can_ioctl.h" #define TCAN4550_DEBUG 0 /// Specification of CAN driver for the TCAN4550. /// @todo The TCAN4550 uses the Bosch MCAN IP. If we end up supporting other /// devices that also use this IP, then some of the generic MCAN related /// content can be factored out into a common location. class TCAN4550Can : public Can, public OSThread, private Atomic { public: /// Constructor. /// @param name name of this device instance in the file system /// @param interrupt_enable callback to enable the interrupt /// @param interrupt_disable callback to disable the interrupt TCAN4550Can(const char *name, void (*interrupt_enable)(), void (*interrupt_disable)()) : Can(name, 0, 0) , OSThread() , interruptEnable_(interrupt_enable) , interruptDisable_(interrupt_disable) , spiFd_(-1) , spi_(nullptr) , sem_() , mcanInterruptEnable_() , txCompleteMask_(0) , state_(CAN_STATE_STOPPED) , txPending_(false) , rxPending_(false) { } /// Destructor. ~TCAN4550Can() { } /// Initialize CAN device settings. Typically called in hw_postinit(), not /// hw_preinit() or hw_init(). /// @param spi_name spi interface that the TCAN4550Can is on /// @param freq frequency in Hz that the TCAN4550 clock runs at /// @param baud target baud rate in Hz /// @param rx_timeout_bits timeout in CAN bit periods for rx interrupt void init(const char *spi_name, uint32_t freq, uint32_t baud, uint16_t rx_timeout_bits); /// Handle an interrupt. Called by user provided interrupt handler. __attribute__((optimize("-O3"))) void interrupt_handler() { int woken = false; interruptDisable_(); sem_.post_from_isr(&woken); os_isr_exit_yield_test(woken); } /// Return a mutex that can be used by another SPI driver instance sharing /// the same bus as its bus lock. /// @return a reference to a mutex that can be used as a bus lock OSMutex *get_spi_bus_lock() { return &lock_; } private: /// maximum SPI clock speed in Hz static constexpr uint32_t SPI_MAX_SPEED_HZ = 18000000; /// size in words of the MRAM memory static constexpr size_t MRAM_SIZE_WORDS = (2 * 1024) / 4; // ---- Memory layout ---- // // +-----------------------+ // | RX FIFO 0, buf 0 | 0x0000 // | ... | // | RX FIFO 0, buf 63 | 0x03F0 // +-----------------------+ // | TX Event 0 (FIFO) | 0x0400 // | ... | // | TX Event 15 (FIFO) | 0x047F // +-----------------------+ // | TX Buf 0 | 0x0480 // | ... | // | TX Buf 15 | 0x057F // +-----------------------+ // | TX Buf 16 (FIFO) | 0x0580 // | ... | // | TX Buf 31 (FIFO) | 0x067F // +-----------------------+ // | Unused | 0x0680 // +-----------------------+ /// size in elements for the RX FIFO static constexpr uint32_t RX_FIFO_SIZE = 64; /// size in elements for the TX event FIFO static constexpr uint32_t TX_EVENT_FIFO_SIZE = 16; /// size in elements for the dedicated TX buffers static constexpr uint32_t TX_DEDICATED_BUFFER_COUNT = 16; /// size in elements for the TX FIFO static constexpr uint32_t TX_FIFO_SIZE = 16; /// mask of all the TX buffers used in the TX FIFO static constexpr uint32_t TX_FIFO_BUFFERS_MASK = 0xFFFF0000; /// start address of RX FIFO 0 in MRAM static constexpr uint16_t RX_FIFO_0_MRAM_ADDR = 0x0000; /// start address of TX Event FIFO in MRAM static constexpr uint16_t TX_EVENT_FIFO_MRAM_ADDR = 0x0400; /// start address of TX BUFFERS in MRAM static constexpr uint16_t TX_BUFFERS_MRAM_ADDR = 0x0480; /// start address of TX FIFO in MRAM static constexpr uint16_t TX_FIFO_BUFFERS_MRAM_ADDR = 0x0580; /// Offset of the MRAM address over SPI static constexpr uint16_t MRAM_ADDR_OFFSET = 0x8000; /// SPI Registers, word addressing, not byte addressing. /// This means that the values here need to be multiplied by 4 to get the /// actual address. enum Registers : uint16_t { DEVICE_IDL = 0x0, ///< device ID "TCAN" DEVICE_IDH, ///< device ID "4550" REVISION, ///< silicon revision STATUS, ///< status MODE = 0x200, ///< modes of operation and pin configurations TIMESTAMP_PRESCALER, ///< timestamp presacaler TEST, ///< read and write test registers, scratchpad ECC, ///< ECC error detection and testing INTERRUPT_STATUS = 0x208, ///< interrupt and diagnostic flags MCAN_INTERRUPT_STATUS, ///< interrupt flags related to MCAN core INTERRUPT_ENABLE = 0x20C, ///< interrupt and diagnostic flags CREL = 0x400, ///< core release ENDN, ///< endianess CUST, ///< customer DBTP, ///< data bit timing and prescaler TEST2, ///< test RWD, ///< RAM watchdog CCCR, ///< CC control NBTP, ///< nominal bit timing and prescaler TSCC, ///< timestamp counter configuration TSCV, ///< timestamp counter value TOCC, ///< timeout counter configuration TOCV, ///< timeout counter value RSVD1, ///< reserved RSVD2, ///< reserved RSVD3, ///< reserved RSVD4, ///< reserved ECR, ///< error count PSR, ///< protocol status TDCR, ///< transmitter delay compensation RSVD5, ///< reserved IR, ///< interrupt status IE, ///< interrupt enable ILS, ///< interrupt line select ILE, ///< interrupt line enable RSVD6, ///< reserved RSVD7, ///< reserved RSVD8, ///< reserved RSVD9, ///< reserved RSVD10, ///< reserved RSVD11, ///< reserved RSVD12, ///< reserved RSVD13, ///< reserved GFC, ///< global filter configuration SIDFC, ///< standard ID filter configuration XIDFC, ///< extended ID filter configuration RSVD14, ///< reserved XIDAM, ///< extended ID and mask HPMS, ///< high prioirty message status NDAT1, ///< new data 1 NDAT2, ///< new data 2 RXF0C, ///< RX FIFO 0 configuration RXF0S, ///< RX FIFO 0 status RXF0A, ///< RX FIFO 0 Acknowledge RXBC, ///< RX buffer configuration RXF1C, ///< RX FIFO 1 configuration RXF1S, ///< RX FIFO 1 status RXF1A, ///< RX FIFO 1 acknowledge RXESC, ///< RX buffer/FIFO element size configuration TXBC, ///< TX buffer configuration TXFQS, ///< TX FIFO/queue status TXESC, ///< TX buffer element size configuration TXBRP, ///< TX buffer request pending TXBAR, ///< TX buffer add request TXBCR, ///< TX buffer cancellation request TXBTO, ///< TX buffer transmission occurred TXBCF, ///< TX buffer cancellation finished TXBTIE, ///< TX buffer transmission interrupt enable TXBCIE, ///< TX buffer cancellation finished interrupt enable RSVD15, ///< reserved RSVD16, ///< reserved TXEFC, ///< TX event FIFO configuration TXEFS, ///< TX event FIFO status TXEFA, ///< TX event FIFO acknowledge RSVD17, ///< reserved MRAM = 0x2000, ///< MRAM offset }; // Check alignment static_assert(TXEFS * 4 == 0x10F4, "register enum misaligned"); static_assert(ILE * 4 == 0x105C, "register enum misaligned"); static_assert(MCAN_INTERRUPT_STATUS * 4 == 0x0824, "register enum misaligned"); enum Command : uint8_t { WRITE = 0x61, ///< write one or more addresses READ = 0x41, ///< read one or more addresses }; /// Mode register definition struct Mode { /// Constructor. Sets the reset value. Mode() : data(0xC8000468) { } union { uint32_t data; ///< raw word value struct { uint32_t testModeConfig : 1; ///< test mode configuration uint32_t sweDis : 1; ///< sleep wake error disable uint32_t reset : 1; ///< device reset uint32_t wdEnable : 1; ///< watchdog enable uint32_t reserved1 : 2; ///< reserved uint32_t modeSel : 2; ///< mode of operation select uint32_t nWkrqConfig : 1; ///< nWKRQ pin function uint32_t inhDisable : 1; ///< INH pin disable uint32_t gpio1GpoConfig : 2; ///< GPIO1 output function select uint32_t reserved2 : 1; ///< reserved uint32_t failSafeEnable : 1; ///< fail safe mode enable uint32_t gpio1Config : 2; ///< GPIO1 pin function select uint32_t wdAction : 2; ///< selected watchdog action uint32_t wdBitSet : 1; ///< write a '1' to reset timer uint32_t nWkrqVoltage : 1; ///< nWKRQ pin GPO buffer voltage uint32_t reserved3 : 1; ///< reserved uint32_t testModeEn : 1; ///< test mode enable uint32_t gpo2Config : 2; ///< GPO2 pin configuration uint32_t reserved4 : 3; ///< reserved uint32_t clkRef : 1; ///< CLKIN/crystal freq reference uint32_t wdTimer : 2; ///< watchdog timer uint32_t wakeConfig : 2; ///< Wake pin configuration }; }; }; /// Data bit timing and prescaler register definition struct Dbtp { /// Constructor. /// @param dsjw data (re)synchronization jump width /// @param dtseg2 data time segment before sample point /// @param dtseg1 data time segment after sample point /// @param dbrp data bit rate prescaler /// @param tdc trasmitter delay compensation Dbtp(uint32_t dsjw, uint32_t dtseg2, uint32_t dtseg1, uint32_t dbrp, uint32_t tdc) : dsjw(dsjw) , dtseg2(dtseg2) , dtseg1(dtseg1) , dbrp(dbrp) , tdc(tdc) { } union { uint32_t data; ///< raw word value struct { uint32_t dsjw : 4; ///< data (re)synchronization jump width uint32_t dtseg2 : 4; ///< data time segment before sample uint32_t dtseg1 : 5; ///< data time segment after sample uint32_t reserved1 : 3; ///< reserved uint32_t dbrp : 5; ///< data bit rate prescaler uint32_t reserved2 : 2; ///< reserved uint32_t tdc : 1; ///< trasmitter delay compensation uint32_t reserved3 : 8; ///< reserved }; }; }; /// CC control register definition struct Cccr { /// Constructor. Sets the reset value. Cccr() : data(0x00000001) { } union { uint32_t data; ///< raw word value struct { uint32_t init : 1; ///< initialzation uint32_t cce : 1; ///< configuration change enable uint32_t asm_ : 1; ///< restricted operation mode uint32_t csa : 1; ///< clock stop acknowledge uint32_t csr : 1; ///< clock stop request uint32_t mon : 1; ///< bus monitorying mode is disabled uint32_t dar : 1; ///< disable automatic retransmission uint32_t test : 1; ///< test mode enable uint32_t fdoe : 1; ///< FD operation enable uint32_t brse : 1; ///< bit rate switch enable uint32_t rsvd1 : 2; ///< reserved uint32_t pxhd : 1; ///< protocol exception handling disable uint32_t efbi : 1; ///< edge filtering during bus integration uint32_t txp : 1; ///< transmitter pause uint32_t niso : 1; ///< non ISO operation uint32_t rsvd2 : 16; ///< reserved }; }; }; /// Nominal bit timing & prescaler register definition struct Nbtp { /// Constructor. /// @param dsjw data (re)synchronization jump width /// @param dtseg2 data time segment before sample point /// @param dtseg1 data time segment after sample point /// @param dbrp data bit rate prescaler /// @param tdc trasmitter delay compensation Nbtp(uint32_t sjw, uint32_t tseg2, uint32_t tseg1, uint32_t brp) : ntseg2(tseg2) , ntseg1(tseg1) , nbrp(brp) , nsjw(sjw) { } union { uint32_t data; ///< raw word value struct { uint32_t ntseg2 : 7; ///< time segment before sample uint32_t rsvd1 : 1; ///< reserved uint32_t ntseg1 : 8; ///< time segment after sample uint32_t nbrp : 9; ///< bit rate prescaler uint32_t nsjw : 7; ///< re-synchronization jump width }; }; }; /// Timestamp counter configuration register definition struct Tscc { /// Constructor. Sets the reset value. Tscc() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t tss : 2; ///< timestamp select uint32_t rsvd1 : 14; ///< reserved uint32_t tcp : 4; ///< timestamp counter prescaler uint32_t rsvd2 : 12; ///< reserved }; }; }; /// Timestamp counter value register definition struct Tscv { union { uint32_t data; ///< raw word value struct { uint32_t tsc : 16; ///< timestamp counter uint32_t rsvd : 16; ///< reserved }; }; }; /// Timeout counter configuration register definition struct Tocc { /// Constructor. Sets the reset value. Tocc() : data(0xFFFF0000) { } union { uint32_t data; ///< raw word value struct { uint32_t etoc : 1; ///< enable timeout counter uint32_t tos : 2; ///< timeout select uint32_t rsvd1 : 5; ///< reserved uint32_t rsvd2 : 8; ///< reserved uint32_t top : 16; ///< timeout period }; }; }; /// Timeout counter value register definition struct Tocv { union { uint32_t data; ///< raw word value struct { uint32_t toc : 16; ///< timeout counter uint32_t rsvd : 16; ///< reserved }; }; }; /// Protocol status register definition struct Psr { union { uint32_t data; ///< raw word value struct { uint32_t lec : 3; ///< last error code uint32_t act : 2; ///< activity uint32_t ep : 1; ///< error passive uint32_t ew : 1; ///< warning status uint32_t bo : 1; ///< bus-off status uint32_t dlec : 3; ///< data phase last error code uint32_t resi : 1; ///< ESI of last received CAN FD message uint32_t rbrs : 1; ///< BRS of last received CAN FD message uint32_t rfdf : 1; ///< received a CAN FD message uint32_t pxe : 1; ///< protocol exception event uint32_t rsvd1 : 1; ///< reserved uint32_t tdcv : 7; ///< transmitter delauy compenation value uint32_t rsvd2 : 1; ///< reserved uint32_t rsvd3 : 8; ///< reserved }; }; }; /// RX FIFO x configuraation register definition struct Rxfxc { /// Constructor. Sets the reset value. Rxfxc() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t fsa : 16; ///< RX FIFO start address uint32_t fs : 7; ///< RX FIFO size uint32_t rsvd : 1; ///< reserved uint32_t fwm : 7; ///< RX FIFO high water mark uint32_t fom : 1; ///< RX FIFO operation mode }; }; }; /// RX FIFO x status register definition struct Rxfxs { union { uint32_t data; ///< raw word value struct { uint32_t ffl : 7; ///< RX FIFO fill level uint32_t rsvd1 : 1; ///< reserved uint32_t fgi : 6; ///< RX FIFO get index uint32_t rsvd2 : 2; ///< reserved uint32_t fpi : 6; ///< RX FIFO put index uint32_t rsvd3 : 2; uint32_t ff : 1; ///< RX FIFO full uint32_t rfl : 1; ///< RX FIFO message lost uint32_t rsvd4 : 6; ///< reserved }; }; }; /// RX FIFO x acknowledge register definition struct Rxfxa { /// Constructor. Sets the reset value. Rxfxa() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t fai : 6; ///< RX FIFO acknowledge index uint32_t rsvd : 26; ///< reserved }; }; }; /// TX Buffer configuraation register definition struct Txbc { /// Constructor. Sets the reset value. Txbc() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t tbsa : 16; ///< TX buffers start address uint32_t ndtb : 6; ///< number of dediated transmit buffers uint32_t rsvd1 : 2; ///< reserved uint32_t tfqs : 6; ///< TX FIFO/queue size uint32_t tfqm : 1; ///< TX FIFO/queue mode uint32_t rsvd2 : 1; ///< reserved }; }; }; /// TX FIFO/queue status register definition struct Txfqs { union { uint32_t data; ///< raw word value struct { uint32_t tffl : 6; ///< TX FIFO free level uint32_t rsvd1 : 2; ///< reserved uint32_t tfgi : 5; ///< TX FIFO/queue get index uint32_t rsvd2 : 3; ///< reserved uint32_t tfqpi : 5; ///< TX FIFO/queue put index uint32_t tfqf : 1; ///< TX FIFO/queue full uint32_t rsvd3 : 2; ///< reserved uint32_t rsvd4 : 8; ///< reserved }; }; }; /// TX buffer element size configurataion register definition struct Txesc { /// Constructor. Sets the reset value. Txesc() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t tbds : 3; ///< TX buffer data field size uint32_t rsvd : 29; ///< reserved }; }; }; /// TX event FIFO configuration register definition struct Txefc { /// Constructor. Sets the reset value. Txefc() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t efsa : 16; ///< event FIFO start address uint32_t efs : 6; ///< event FIFO size uint32_t rsvd1 : 2; ///< reserved uint32_t efwm : 6; ///< event FIFO watermark uint32_t rsvd2 : 2; ///< reserved }; }; }; /// TX event FIFO status register definition struct Txefs { union { uint32_t data; ///< raw word value struct { uint32_t effl : 6; ///< event FIFO fill level uint32_t rsvd1 : 2; ///< reserved uint32_t efgi : 5; ///< event FIFO get index uint32_t rsvd2 : 3; ///< reserved uint32_t efpi : 5; ///< event FIFO put index uint32_t rsvd3 : 3; ///< reserved uint32_t eff : 1; ///< event FIFO full uint32_t tefl : 1; ///< TX event FIFO element lost uint32_t rsvd4 : 6; ///< reserved }; }; }; /// TX event FIFO acknowledge register definition struct Txefa { /// Constructor. Sets the reset value. Txefa() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t efai : 5; ///< TX event FIFO acknowledge index uint32_t rsvd : 27; ///< reserved }; }; }; /// TCAN4550 interrupt registers (INTERRUPT_ENABLE/STATUS) struct Interrupt { /// Constructor. Sets the reset value. Interrupt() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t vtwd : 1; ///< global voltage, temp or wdto uint32_t mcanint : 1; ///< M_CAN global interrupt uint32_t rsvd1 : 1; ///< reserved uint32_t spierr : 1; ///< SPI error uint32_t rsvd2 : 1; ///< reserved uint32_t canerr : 1; ///< CAN eror uint32_t wkrq : 1; ///< wake request uint32_t globalerr : 1; ///< global error (any fault) uint32_t candom : 1; ///< CAN stuck dominant uint32_t rsvd3 : 1; ///< reserved uint32_t canslnt : 1; ///< CAN silent uint32_t rsvd4 : 2; ///< reserved uint32_t wkerr : 1; ///< wake error uint32_t lwu : 1; ///< local wake up uint32_t canint : 1; ///< CAN bus wake up interrupt uint32_t eccerr : 1; ///< uncorrectable ECC error detected uint32_t rsvd5 : 1; ///< reserved uint32_t wdto : 1; ///< watchdog timeout uint32_t tsd : 1; ///< thermal shutdown uint32_t pwron : 1; ///< power on uint32_t uvio : 1; ///< under voltage VIO uint32_t uvsup : 1; ///< under voltage VSUP and UVCCOUT uint32_t sms : 1; ///< sleep mode status uint32_t rsvd6 : 7; ///< reserved uint32_t canbusnom : 1; ///< CAN bus normal }; }; }; /// MCAN interrupt registers (IR, IE, and ILS) definition struct MCANInterrupt { /// Constructor. Sets the reset value. MCANInterrupt() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t rf0n : 1; ///< RX FIFO 0 new message uint32_t rf0w : 1; ///< RX FIFO 0 watermark reached uint32_t rf0f : 1; ///< RX FIFO 0 full uint32_t rf0l : 1; ///< RX FIFO 0 message lost uint32_t rf1n : 1; ///< RX FIFO 1 new message uint32_t rf1w : 1; ///< RX FIFO 1 watermark reached uint32_t rf1f : 1; ///< RX FIFO 1 full uint32_t rf1l : 1; ///< RX FIFO 1 message lost uint32_t hpm : 1; ///< high priority message uint32_t tc : 1; ///< transmission completed uint32_t tcf : 1; ///< transmission cancellation finished uint32_t tfe : 1; ///< TX FIFO empty uint32_t tefn : 1; ///< TX event FIFO new entry uint32_t tefw : 1; ///< TX event FIFO watermark reached uint32_t teff : 1; ///< TX event FIFO full uint32_t tefl : 1; ///< TX event FIFO event lost uint32_t tsw : 1; ///< timestamp wraparound uint32_t mraf : 1; ///< message RAM access failure uint32_t too : 1; ///< timeout occurred uint32_t drx : 1; ///< message stored to dedicated RX buffer uint32_t bec : 1; ///< bit error corrected uint32_t beu : 1; ///< bit error uncorrected uint32_t elo : 1; ///< error logging overflow uint32_t ep : 1; ///< error passive uint32_t ew : 1; ///< warning status uint32_t bo : 1; ///< bus-off status uint32_t wdi : 1; ///< watchdog uint32_t pea : 1; ///< protocol error in arbitration phase uint32_t ped : 1; ///< protocol error in data phase uint32_t ara : 1; ///< access to reserved address uint32_t rsvd : 2; ///< reserved }; }; }; /// MCAN interrupt line enable register definition struct Ile { /// Constructor. Sets the reset value. Ile() : data(0x00000000) { } union { uint32_t data; ///< raw word value struct { uint32_t eint0 : 1; ///< enable interrupt line 0 uint32_t eint1 : 1; ///< enable interrupt line 1 uint32_t rsvd : 30; ///< reserved }; }; }; /// Buad rate table entry struct TCAN4550Baud { uint32_t freq; ///< incoming frequency uint32_t baud; ///< target baud rate Nbtp nbtp; ///< data bit timing and prescaler }; /// SPI message for read/write commands struct SPIMessage { union { uint64_t payload64; ///< raw payload as 64-bit value uint32_t payload32[2]; ///< raw paylaod as 32-bit array uint8_t payload[8]; ///< raw payload struct { uint8_t length; ///< length in words uint8_t addrL; ///< register address LSB uint8_t addrH; ///< register address MSB union { uint8_t cmd; ///< command uint8_t status; ///< bits 0..7 of INTERRUPT_STATUS }; uint32_t data; ///< data word }; }; }; /// MRAM SPI message for read/write commands struct MRAMSPIMessage { union { uint32_t payload32; ///< raw paylaod as 32-bit value uint8_t payload[4]; ///< raw payload struct { uint8_t length; ///< length in words uint8_t addrL; ///< register address LSB uint8_t addrH; ///< register address MSB union { uint8_t cmd; ///< command uint8_t status; ///< bits 0..7 of INTERRUPT_STATUS }; }; }; }; /// RX Buffer structure struct MRAMRXBuffer { uint32_t id : 29; ///< CAN identifier uint32_t rtr : 1; ///< remote transmission request uint32_t xtd : 1; ///< extended identifier uint32_t esi : 1; ///< error state indicator uint32_t rxts : 16; ///< receive timestamp uint32_t dlc : 4; ///< data length code uint32_t brs : 1; ///< bit rate switch uint32_t fdf : 1; ///< FD format uint32_t rsvd : 2; ///< reserved uint32_t fidx : 7; ///< filter index that message mached if ANMF = 0 uint32_t anmf : 1; ///< accepted non-matching frame of filter element union { uint64_t data64; ///< data payload (64-bit) uint32_t data32[2]; ///< data payload (0 - 1 word) uint16_t data16[4]; ///< data payload (0 - 3 half word) uint8_t data[8]; ///< data payload (0 - 8 byte) }; }; /// TX Buffer structure struct MRAMTXBuffer { uint32_t id : 29; ///< CAN identifier uint32_t rtr : 1; ///< remote transmission request uint32_t xtd : 1; ///< extended identifier uint32_t esi : 1; ///< error state indicator uint32_t rsvd1 : 16; ///< reserved uint32_t dlc : 4; ///< data length code uint32_t brs : 1; ///< bit rate switch uint32_t fdf : 1; ///< FD format uint32_t rsvd2 : 1; ///< reserved uint32_t efc : 1; ///< event FIFO control uint32_t mm : 8; ///< message marker union { uint64_t data64; ///< data payload 64-bit uint32_t data32[2]; ///< data payload (0 - 1 word) uint16_t data16[4]; ///< data payload (0 - 3 half word) }; }; /// TX Event FIFO Element structure struct MRAMTXEventFIFOElement { uint32_t id : 29; ///< CAN identifier uint32_t rtr : 1; ///< remote transmission request uint32_t xtd : 1; ///< extended identifier uint32_t esi : 1; ///< error state indicator uint32_t txts : 16; ///< transmit timestamp uint32_t dlc : 4; ///< data length code uint32_t brs : 1; ///< bit rate switch uint32_t fdf : 1; ///< FD format uint32_t et : 2; ///< event type uint32_t mm : 8; ///< message marker }; /// Structure for writing multiple TX buffers in one SPI transaction. struct MRAMTXBufferMultiWrite { static_assert(sizeof(MRAMSPIMessage) == sizeof(uint32_t), "unexpected MRAMSPIMessage size"); uint32_t padding; ///< padding for 8-byte alignment MRAMSPIMessage header; ///< message header MRAMTXBuffer txBuffers[TX_FIFO_SIZE]; ///< buffer payload }; /// Called after disable. void flush_buffers() override; /// Read from a file or device. /// @param file file reference for this device /// @param buf location to place read data /// @param count number of bytes to read /// @return number of bytes read upon success, -1 upon failure with errno /// containing the cause ssize_t read(File *file, void *buf, size_t count) override; /// Write to a file or device. /// @param file file reference for this device /// @param buf location to find write data /// @param count number of bytes to write /// @return number of bytes written upon success, -1 upon failure with errno /// containing the cause ssize_t write(File *file, const void *buf, size_t count) override; /// Request an ioctl transaction. /// @param file file reference for this device /// @param key ioctl key /// @param data key data /// @return >= 0 upon success, -errno upon failure int ioctl(File *file, unsigned long int key, unsigned long data) override; /// Device select method. Default impementation returns true. /// @param file reference to the file /// @param mode FREAD for read active, FWRITE for write active, 0 for /// exceptions /// @return true if active, false if inactive bool select(File* file, int mode) override; /// User entry point for the created thread. /// @return exit status void *entry() override; void enable() override; ///< function to enable device void disable() override; ///< function to disable device /// Function to try and transmit a message. void tx_msg() override { // unused in this implementation } /// Read from a SPI register. /// @param address address to read from /// @return data read __attribute__((optimize("-O3"))) uint32_t register_read(Registers address) { SPIMessage msg; msg.cmd = READ; msg.addrH = address >> 6; msg.addrL = (address << 2) & 0xFF; msg.length = 1; spi_ioc_transfer xfer; xfer.tx_buf = (unsigned long)(&msg); xfer.rx_buf = (unsigned long)(&msg); xfer.len = sizeof(msg); spi_->transfer_with_cs_assert_polled(&xfer); #if TCAN4550_DEBUG HASSERT((msg.status & 0x8) == 0); #endif return msg.data; } /// Write to a SPI register. /// @param address address to write to /// @param data data to write __attribute__((optimize("-O3"))) void register_write(Registers address, uint32_t data) { SPIMessage msg; msg.cmd = WRITE; msg.addrH = address >> 6; msg.addrL = (address << 2) & 0xFF; msg.length = 1; msg.data = data; spi_ioc_transfer xfer; xfer.tx_buf = (unsigned long)(&msg); xfer.rx_buf = (unsigned long)(&msg); xfer.len = sizeof(msg); spi_->transfer_with_cs_assert_polled(&xfer); #if TCAN4550_DEBUG HASSERT((msg.status & 0x8) == 0); #endif } /// Read one or more RX buffers. /// @param offset word offset in the MRAM to read from /// @param buf location to read into /// @param count number of buffers to read __attribute__((optimize("-O3"))) void rxbuf_read(uint16_t offset, MRAMRXBuffer *buf, size_t count) { uint16_t address = offset + MRAM_ADDR_OFFSET; SPIMessage msg; msg.cmd = READ; msg.addrH = address >> 8; msg.addrL = address & 0xFF; msg.length = count * (sizeof(MRAMRXBuffer) / 4); spi_ioc_transfer xfer[2]; xfer[0].tx_buf = (unsigned long)(&msg); xfer[0].rx_buf = (unsigned long)(&msg); xfer[0].len = 4; //sizeof(SPIMessage); xfer[1].tx_buf = (unsigned long)(nullptr); xfer[1].rx_buf = (unsigned long)(buf); xfer[1].len = count * sizeof(MRAMRXBuffer); spi_->transfer_with_cs_assert_polled(xfer, 2); #if TCAN4550_DEBUG HASSERT((msg.status & 0x8) == 0); #endif } /// Write one or more TX buffers. /// @param offset word offset in the MRAM to write to /// @param buf location to write from /// @param count number of buffers to write __attribute__((optimize("-O3"))) void txbuf_write(uint16_t offset, MRAMTXBufferMultiWrite *buf, size_t count) { static_assert(sizeof(MRAMTXBuffer) == 16, "Unexpected MRAMTXBuffer size"); uint16_t address = offset + MRAM_ADDR_OFFSET; buf->header.cmd = WRITE; buf->header.addrH = address >> 8; buf->header.addrL = address & 0xFF; buf->header.length = count * (sizeof(MRAMTXBuffer) / 4); spi_ioc_transfer xfer; xfer.tx_buf = (unsigned long)&buf->header; xfer.rx_buf = (unsigned long)&buf->header; xfer.len = sizeof(buf->header) + (count * sizeof(MRAMTXBuffer)); spi_->transfer_with_cs_assert_polled(&xfer); #if TCAN4550_DEBUG HASSERT((buf->header.status & 0x8) == 0); #endif } void (*interruptEnable_)(); ///< enable interrupt callback void (*interruptDisable_)(); ///< disable interrupt callback int spiFd_; ///< SPI bus that accesses TCAN4550 SPI *spi_; ///< pointer to a SPI object instance OSSem sem_; ///< semaphore for posting events MCANInterrupt mcanInterruptEnable_; ///< shadow for the interrupt enable uint32_t txCompleteMask_; ///< shadow for the transmit complete buffer mask uint8_t state_; ///< present bus state uint8_t txPending_ : 1; ///< waiting on a TX active event uint8_t rxPending_ : 1; ///< waiting on a RX active event /// Allocating this buffer here avoids having to put it on the /// TCAN4550Can::write() caller's stack. MRAMTXBufferMultiWrite txBufferMultiWrite_ __attribute__((aligned(8))); #if TCAN4550_DEBUG volatile uint32_t regs_[64]; ///< debug copy of TCAN4550 registers volatile uint32_t status_; volatile uint32_t enable_; volatile uint32_t spiStatus_; #endif /// baud rate settings table static const TCAN4550Baud BAUD_TABLE[]; /// Default Constructor. TCAN4550Can(); DISALLOW_COPY_AND_ASSIGN(TCAN4550Can); }; #endif // _FREERTOS_DRIVERS_COMMON_TCAN4550CAN_HXX_
34.189401
80
0.525971
[ "object" ]
11a34f81ec866c8b343928a8d86378b095743467
12,643
cpp
C++
src/lib/operators/join_nested_loop.cpp
IanJamesMcKay/InMemoryDB
a267d9522926eca9add2ad4512f8ce352daac879
[ "MIT" ]
1
2021-04-14T11:16:52.000Z
2021-04-14T11:16:52.000Z
src/lib/operators/join_nested_loop.cpp
IanJamesMcKay/InMemoryDB
a267d9522926eca9add2ad4512f8ce352daac879
[ "MIT" ]
null
null
null
src/lib/operators/join_nested_loop.cpp
IanJamesMcKay/InMemoryDB
a267d9522926eca9add2ad4512f8ce352daac879
[ "MIT" ]
1
2020-11-30T13:11:04.000Z
2020-11-30T13:11:04.000Z
#include "join_nested_loop.hpp" #include <map> #include <memory> #include <numeric> #include <set> #include <string> #include <utility> #include <vector> #include "resolve_type.hpp" #include "storage/column_iterables/any_column_iterable.hpp" #include "storage/create_iterable_from_column.hpp" #include "type_comparison.hpp" #include "utils/assert.hpp" #include "utils/performance_warning.hpp" namespace opossum { /* * This is a Nested Loop Join implementation completely based on iterables. * It supports all current join and predicate conditions, as well as NULL values. * Because this is a Nested Loop Join, the performance is going to be far inferior to JoinHash and JoinSortMerge, * so only use this for testing or benchmarking purposes. */ JoinNestedLoop::JoinNestedLoop(const std::shared_ptr<const AbstractOperator>& left, const std::shared_ptr<const AbstractOperator>& right, const JoinMode mode, const ColumnIDPair& column_ids, const PredicateCondition predicate_condition) : AbstractJoinOperator(OperatorType::JoinNestedLoop, left, right, mode, column_ids, predicate_condition) {} const std::string JoinNestedLoop::name() const { return "JoinNestedLoop"; } std::shared_ptr<AbstractOperator> JoinNestedLoop::_on_recreate( const std::vector<AllParameterVariant>& args, const std::shared_ptr<AbstractOperator>& recreated_input_left, const std::shared_ptr<AbstractOperator>& recreated_input_right) const { return std::make_shared<JoinNestedLoop>(recreated_input_left, recreated_input_right, _mode, _column_ids, _predicate_condition); } std::shared_ptr<const Table> JoinNestedLoop::_on_execute() { PerformanceWarning("Nested Loop Join used"); _create_table_structure(); _perform_join(); return _output_table; } void JoinNestedLoop::_create_table_structure() { _left_in_table = _input_left->get_output(); _right_in_table = _input_right->get_output(); _left_column_id = _column_ids.first; _right_column_id = _column_ids.second; const bool left_may_produce_null = (_mode == JoinMode::Right || _mode == JoinMode::Outer); const bool right_may_produce_null = (_mode == JoinMode::Left || _mode == JoinMode::Outer); TableColumnDefinitions output_column_definitions; // Preparing output table by adding columns from left table for (ColumnID column_id{0}; column_id < _left_in_table->column_count(); ++column_id) { const auto nullable = (left_may_produce_null || _left_in_table->column_is_nullable(column_id)); output_column_definitions.emplace_back(_left_in_table->column_name(column_id), _left_in_table->column_data_type(column_id), nullable); } // Preparing output table by adding columns from right table for (ColumnID column_id{0}; column_id < _right_in_table->column_count(); ++column_id) { const auto nullable = (right_may_produce_null || _right_in_table->column_is_nullable(column_id)); output_column_definitions.emplace_back(_right_in_table->column_name(column_id), _right_in_table->column_data_type(column_id), nullable); } _output_table = std::make_shared<Table>(output_column_definitions, TableType::References); } void JoinNestedLoop::_process_match(RowID left_row_id, RowID right_row_id, JoinNestedLoop::JoinParams& params) { params.pos_list_left.emplace_back(left_row_id); params.pos_list_right.emplace_back(right_row_id); if (params.track_left_matches) { params.left_matches[left_row_id.chunk_offset] = true; } if (params.track_right_matches) { params.right_matches[right_row_id.chunk_offset] = true; } } // inner join loop that joins two columns via their iterators template <typename BinaryFunctor, typename LeftIterator, typename RightIterator> void JoinNestedLoop::_join_two_typed_columns(const BinaryFunctor& func, LeftIterator left_it, LeftIterator left_end, RightIterator right_begin, RightIterator right_end, const ChunkID chunk_id_left, const ChunkID chunk_id_right, JoinNestedLoop::JoinParams& params) { for (; left_it != left_end; ++left_it) { const auto left_value = *left_it; if (left_value.is_null()) continue; for (auto right_it = right_begin; right_it != right_end; ++right_it) { const auto right_value = *right_it; if (right_value.is_null()) continue; if (func(left_value.value(), right_value.value())) { _process_match(RowID{chunk_id_left, left_value.chunk_offset()}, RowID{chunk_id_right, right_value.chunk_offset()}, params); } } } } void JoinNestedLoop::_join_two_untyped_columns(const std::shared_ptr<const BaseColumn>& column_left, const std::shared_ptr<const BaseColumn>& column_right, const ChunkID chunk_id_left, const ChunkID chunk_id_right, JoinNestedLoop::JoinParams& params) { resolve_data_and_column_type(*column_left, [&](auto left_type, auto& typed_left_column) { resolve_data_and_column_type(*column_right, [&](auto right_type, auto& typed_right_column) { using LeftType = typename decltype(left_type)::type; using RightType = typename decltype(right_type)::type; // make sure that we do not compile invalid versions of these lambdas constexpr auto LEFT_IS_STRING_COLUMN = (std::is_same<LeftType, std::string>{}); constexpr auto RIGHT_IS_STRING_COLUMN = (std::is_same<RightType, std::string>{}); constexpr auto NEITHER_IS_STRING_COLUMN = !LEFT_IS_STRING_COLUMN && !RIGHT_IS_STRING_COLUMN; constexpr auto BOTH_ARE_STRING_COLUMNS = LEFT_IS_STRING_COLUMN && RIGHT_IS_STRING_COLUMN; // clang-format off if constexpr (NEITHER_IS_STRING_COLUMN || BOTH_ARE_STRING_COLUMNS) { auto iterable_left = create_iterable_from_column<LeftType>(typed_left_column); auto iterable_right = create_iterable_from_column<RightType>(typed_right_column); iterable_left.with_iterators([&](auto left_it, auto left_end) { iterable_right.with_iterators([&](auto right_it, auto right_end) { with_comparator(params.predicate_condition, [&](auto comparator) { _join_two_typed_columns(comparator, left_it, left_end, right_it, right_end, chunk_id_left, chunk_id_right, params); }); }); }); } // clang-format on }); }); } void JoinNestedLoop::_perform_join() { auto left_table = _left_in_table; auto right_table = _right_in_table; auto left_column_id = _left_column_id; auto right_column_id = _right_column_id; if (_mode == JoinMode::Right) { // for Right Outer we swap the tables so we have the outer on the "left" left_table = _right_in_table; right_table = _left_in_table; left_column_id = _right_column_id; right_column_id = _left_column_id; } _pos_list_left = std::make_shared<PosList>(); _pos_list_right = std::make_shared<PosList>(); _is_outer_join = (_mode == JoinMode::Left || _mode == JoinMode::Right || _mode == JoinMode::Outer); // Scan all chunks from left input _right_matches.resize(right_table->chunk_count()); for (ChunkID chunk_id_left = ChunkID{0}; chunk_id_left < left_table->chunk_count(); ++chunk_id_left) { auto column_left = left_table->get_chunk(chunk_id_left)->get_column(left_column_id); // for Outer joins, remember matches on the left side std::vector<bool> left_matches; if (_is_outer_join) { left_matches.resize(column_left->size()); } // Scan all chunks for right input for (ChunkID chunk_id_right = ChunkID{0}; chunk_id_right < right_table->chunk_count(); ++chunk_id_right) { const auto column_right = right_table->get_chunk(chunk_id_right)->get_column(right_column_id); _right_matches[chunk_id_right].resize(column_right->size()); const auto track_right_matches = (_mode == JoinMode::Outer); JoinParams params{*_pos_list_left, *_pos_list_right, left_matches, _right_matches[chunk_id_right], _is_outer_join, track_right_matches, _mode, _predicate_condition}; _join_two_untyped_columns(column_left, column_right, chunk_id_left, chunk_id_right, params); } if (_is_outer_join) { // add unmatched rows on the left for Left and Full Outer joins for (ChunkOffset chunk_offset{0}; chunk_offset < left_matches.size(); ++chunk_offset) { if (!left_matches[chunk_offset]) { _pos_list_left->emplace_back(RowID{chunk_id_left, chunk_offset}); _pos_list_right->emplace_back(NULL_ROW_ID); } } } } // For Full Outer we need to add all unmatched rows for the right side. // Unmatched rows on the left side are already added in the main loop above if (_mode == JoinMode::Outer) { for (ChunkID chunk_id_right = ChunkID{0}; chunk_id_right < right_table->chunk_count(); ++chunk_id_right) { const auto column_right = right_table->get_chunk(chunk_id_right)->get_column(right_column_id); resolve_data_and_column_type(*column_right, [&](auto right_type, auto& typed_right_column) { using RightType = typename decltype(right_type)::type; auto iterable_right = create_iterable_from_column<RightType>(typed_right_column); iterable_right.for_each([&](const auto& right_value) { const auto row_id = RowID{chunk_id_right, right_value.chunk_offset()}; if (!_right_matches[chunk_id_right][row_id.chunk_offset]) { _pos_list_left->emplace_back(NULL_ROW_ID); _pos_list_right->emplace_back(row_id); } }); }); } } // write output chunks ChunkColumns columns; if (_mode == JoinMode::Right) { _write_output_chunks(columns, right_table, _pos_list_right); _write_output_chunks(columns, left_table, _pos_list_left); } else { _write_output_chunks(columns, left_table, _pos_list_left); _write_output_chunks(columns, right_table, _pos_list_right); } _output_table->append_chunk(columns); } void JoinNestedLoop::_write_output_chunks(ChunkColumns& columns, const std::shared_ptr<const Table>& input_table, const std::shared_ptr<PosList>& pos_list) { // Add columns from table to output chunk for (ColumnID column_id{0}; column_id < input_table->column_count(); ++column_id) { std::shared_ptr<BaseColumn> column; if (input_table->type() == TableType::References) { if (input_table->chunk_count() > 0) { auto new_pos_list = std::make_shared<PosList>(); // de-reference to the correct RowID so the output can be used in a Multi Join for (const auto row : *pos_list) { if (row.is_null()) { new_pos_list->push_back(NULL_ROW_ID); } else { auto reference_column = std::static_pointer_cast<const ReferenceColumn>( input_table->get_chunk(row.chunk_id)->get_column(column_id)); new_pos_list->push_back(reference_column->pos_list()->at(row.chunk_offset)); } } auto reference_column = std::static_pointer_cast<const ReferenceColumn>(input_table->get_chunk(ChunkID{0})->get_column(column_id)); column = std::make_shared<ReferenceColumn>(reference_column->referenced_table(), reference_column->referenced_column_id(), new_pos_list); } else { // If there are no Chunks in the input_table, we can't deduce the Table that input_table is referencING to // pos_list will contain only NULL_ROW_IDs anyway, so it doesn't matter which Table the ReferenceColumn that // we output is referencing. HACK, but works fine: we create a dummy table and let the ReferenceColumn ref // it. const auto dummy_table = Table::create_dummy_table(input_table->column_definitions()); column = std::make_shared<ReferenceColumn>(dummy_table, column_id, pos_list); } } else { column = std::make_shared<ReferenceColumn>(input_table, column_id, pos_list); } columns.push_back(column); } } void JoinNestedLoop::_on_cleanup() { _output_table.reset(); _left_in_table.reset(); _right_in_table.reset(); _pos_list_left.reset(); _pos_list_right.reset(); _right_matches.clear(); } } // namespace opossum
43.150171
119
0.696907
[ "vector" ]
11a573c900b79db7d80c7fac9e98341636b546c0
5,942
hpp
C++
include/vapor/unique_ptr_cache.hpp
sgpearse/VAPOR
12d4ed2e914ff3f6b59989a33a88d7399f45c41b
[ "BSD-3-Clause" ]
120
2017-07-31T08:40:34.000Z
2022-03-24T03:57:35.000Z
include/vapor/unique_ptr_cache.hpp
sgpearse/VAPOR
12d4ed2e914ff3f6b59989a33a88d7399f45c41b
[ "BSD-3-Clause" ]
2,215
2017-06-21T20:47:30.000Z
2022-03-31T22:41:38.000Z
include/vapor/unique_ptr_cache.hpp
sgpearse/VAPOR
12d4ed2e914ff3f6b59989a33a88d7399f45c41b
[ "BSD-3-Clause" ]
48
2017-08-02T22:56:40.000Z
2022-02-12T13:44:51.000Z
//----------------------------------------------------------------------------- // This is an implementation of a least-recently-used (LRU) cache that keeps // unique pointers pointing to big structures (e.g., grids, quadtrees). // // This cache has two execution policies: // 1) only insertion counts as `recently used`, and // 2) both insertion and query count as `recently used`. // // Given this design, this cache is expected to keep the ownership of these // structures once they're put in the cache, and all other codes will not // need to manage these structures. // // All structures stored in this cache are const qualified, so once a // structure is put in this cache, there is no more modification to this structure. // // Caveat: A cache keeps things that it is asked to keep, which in this case are pointers. // This implementation guarantees that pointers and the objects that they point to // are not altered while in the cache, and are properly destroyed when evicted. // The cache guarantees no more than that. // // Tip: This cache should be initialized sufficiently big so that a returned // pointer won't be evicted while that pointer is in use. // In other words, users need to know how many new insersions are going to happen // while a queried pointer is in use, and initialize the cache to be at least // that big. // // To use an example, a `unique_ptr_cache` is initialized to hold N objects. // A queried pointer `ptr` is valid at the time of query, and will remain valid until // another (N-1) unique individual objects being inserted/queried. // At that point, the immediate next insertion of another unique object (the N-th) // will evict `ptr`, and the object it points to is destroyed, and `prt` is no longer valid. // // Revision: (8/13/2020) it uses std::array<> instead of std::list<> to achieve // the highest performance with small to medium cache sizes. // Revision: (8/13/2020) it uses mutexes to achieve thread safety. // Revision: (9/29/2020) it uses std::vector<> instead of std::array<> so that the cache size // can be set dynamically at construction time. // // Author : Samuel Li // Date : 9/26/2019 // Revision : 8/13/2020, 9/29/2020 //----------------------------------------------------------------------------- #ifndef UNIQUE_PTR_CACHE_H #define UNIQUE_PTR_CACHE_H #include <cstddef> // size_t #include <utility> // std::pair<> #include <memory> // std::unique_ptr<> #include <mutex> #include <algorithm> #include <vector> namespace VAPoR { // // Note : Key must support == operator // template<typename Key, typename BigObj> class unique_ptr_cache final { public: // Constructor // A user needs to specify if a query is counted as `recently used` // by passing a boolean to the constructor. unique_ptr_cache(size_t capacity, bool query) : _capacity(capacity), _query_shuffle(query) { _element_vector.reserve(_capacity); } // Note: because this cache is intended to be used to keep unique pointers, // we don't want to allow any type of copy constructors, so delete them. unique_ptr_cache(const unique_ptr_cache &) = delete; unique_ptr_cache(const unique_ptr_cache &&) = delete; unique_ptr_cache &operator=(const unique_ptr_cache &) = delete; unique_ptr_cache &operator=(const unique_ptr_cache &&) = delete; auto capacity() const -> size_t { return _capacity; } auto size() const -> size_t { return _element_vector.size(); } void clear() { _element_vector.clear(); } auto empty() const -> bool { return _element_vector.empty(); } auto full() const -> bool { return (_element_vector.size() >= _capacity); } // // Major action function. // If the key exists, it returns the unique pointer associated with the key. // If the key does not exist, it returns the unique_ptr version of a nullptr. // auto query(const Key &key) -> const std::unique_ptr<const BigObj> & { // Only need to apply the mutex if `_query_shuffle` is enabled. if (_query_shuffle) const std::lock_guard<std::mutex> lock_gd(_element_vector_mutex); auto it = std::find_if(_element_vector.begin(), _element_vector.end(), [&key](element_type &e) { return e.first == key; }); if (it == _element_vector.end()) { // This key does not exist return _local_nullptr; } else { // This key does exist if (_query_shuffle) { std::rotate(_element_vector.begin(), it, it + 1); return _element_vector.front().second; } else return it->second; } } void insert(Key key, const BigObj *ptr) { const std::lock_guard<std::mutex> lock_gd(_element_vector_mutex); auto it = std::find_if(_element_vector.begin(), _element_vector.end(), [&key](element_type &e) { return e.first == key; }); if (it == _element_vector.end()) { // This key does not exist if (_element_vector.size() >= _capacity) _element_vector.pop_back(); // Evict the last element std::unique_ptr<const BigObj> tmp(ptr); _element_vector.emplace(_element_vector.begin(), std::move(key), std::move(tmp)); } else { // This key does exist. it->second.reset(ptr); std::rotate(_element_vector.begin(), it, it + 1); } } private: using element_type = std::pair<Key, std::unique_ptr<const BigObj>>; const size_t _capacity; const bool _query_shuffle; std::vector<element_type> _element_vector; const std::unique_ptr<const BigObj> _local_nullptr = {nullptr}; std::mutex _element_vector_mutex; }; } // namespace VAPoR #endif
45.015152
134
0.633625
[ "object", "vector" ]
11a919bb88ca42f1e3d8061a1f0a8e5f205ea1fe
32,155
cpp
C++
monet/lm_ops/conv.cpp
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
161
2020-10-28T02:21:50.000Z
2022-03-11T05:06:16.000Z
monet/lm_ops/conv.cpp
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
4
2020-10-28T02:27:43.000Z
2021-03-31T00:04:43.000Z
monet/lm_ops/conv.cpp
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
15
2020-10-28T02:32:12.000Z
2021-12-23T13:20:23.000Z
#include <torch/extension.h> #include <mutex> #include <unordered_map> #include <ATen/cudnn/cudnn-wrapper.h> #include <ATen/cudnn/Descriptors.h> #include <ATen/cudnn/Types.h> #include <ATen/cudnn/Utils.h> using namespace at; using namespace at::native; constexpr int max_dim = 3; constexpr int input_batch_size_dim = 0; // also grad_input constexpr int input_channels_dim = 1; constexpr int output_batch_size_dim = 0; // also grad_output constexpr int output_channels_dim = 1; constexpr int weight_output_channels_dim = 0; constexpr int weight_input_channels_dim = 1; static const std::array<cudnnConvolutionFwdAlgo_t, 8> fwd_algos = { CUDNN_CONVOLUTION_FWD_ALGO_GEMM, CUDNN_CONVOLUTION_FWD_ALGO_FFT, CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, }; static const std::array<cudnnConvolutionBwdDataAlgo_t, 6> bwd_algos = { CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, CUDNN_CONVOLUTION_BWD_DATA_ALGO_1, CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT, CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING, CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD, CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED }; static const std::array<cudnnConvolutionBwdFilterAlgo_t, 6> bwd_w_algos = { CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING, }; static inline bool cudnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) { // disable NHWC for float64 input. if (input.scalar_type() == at::kDouble || weight.scalar_type() == at::kDouble) { return false; } return (CUDNN_VERSION >= 7603) && ((input.suggest_memory_format() == at::MemoryFormat::ChannelsLast) || (weight.suggest_memory_format() == at::MemoryFormat::ChannelsLast)); } constexpr size_t operator "" _TiB(unsigned long long n) { return size_t(n) * 1024 * 1024 * 1024 * 1024; } static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name) { TORCH_CHECK(args.size() <= expected_size, "Too many ", arg_name, " values (", args.size(), ") supplied, expecting ", expected_size, " (while checking arguments for ", c, ")"); TORCH_CHECK(args.size() >= expected_size, "Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ", expected_size, " (while checking arguments for ", c, ")"); auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;}); if (num_negative_values > 0){ std::stringstream ss; ss << arg_name << " should be greater than zero but got ("; std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", ")); ss << args.back() << ")" << " (while checking arguments for " << c << ")"; AT_ERROR(ss.str()); } } static void convolution_shape_check( CheckedFrom c, const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { check_args(c, padding, input->dim() - 2, "padding"); check_args(c, stride, padding.size(), "stride"); check_args(c, dilation, padding.size(), "dilation"); // Input checkDimRange(c, input, 3, 6 /* exclusive */); checkSize(c, input, input_channels_dim, weight->size(1) * groups); // Weight checkSameDim(c, input, weight); // TODO: check that output->size() matches output_sizes // TODO: check that weight matches output->sizes() checkSameDim(c, input, output); } static inline std::vector<int64_t> conv_output_size( IntArrayRef input_size, IntArrayRef weight_size, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef() ) { // ASSERT(input_size.size() > 2) // ASSERT(input_size.size() == weight_size.size()) bool has_dilation = dilation.size() > 0; auto dim = input_size.size(); std::vector<int64_t> output_size(dim); output_size[0] = input_size[input_batch_size_dim]; output_size[1] = weight_size[weight_output_channels_dim]; for (size_t d = 2; d < dim; ++d) { auto dilation_ = has_dilation ? dilation[d - 2] : 1; auto kernel = dilation_ * (weight_size[d] - 1) + 1; output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1; } return output_size; } static inline std::vector<int64_t> conv_input_size( IntArrayRef output_size, IntArrayRef weight_size, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups ) { // ASSERT(output_size.size() > 2) // ASSERT(output_size.size() == weight_size.size()) auto dim = output_size.size(); std::vector<int64_t> input_size(dim); input_size[0] = output_size[output_batch_size_dim]; input_size[1] = weight_size[weight_input_channels_dim] * groups; for (size_t d = 2; d < dim; ++d) { int kernel = dilation[d - 2] * (weight_size[d] - 1) + 1; input_size[d] = (output_size[d] - 1) * stride[d - 2] - (2 * padding[d - 2]) + kernel + output_padding[d - 2]; } return input_size; } torch::Tensor forward_normal(const torch::Tensor& input, const torch::Tensor& weight, torch::IntArrayRef stride, torch::IntArrayRef padding, torch::IntArrayRef dilation, int64_t groups) { // torch::NoGradGuard no_grad_guard; static torch::Tensor undefined; // return torch::conv2d(input, weight, undefined, stride, padding, dilation, groups); return torch::cudnn_convolution(input, weight, undefined, padding, stride, dilation, groups, true, false); //benchmark, deterministic } torch::Tensor backward_input_normal(torch::IntArrayRef input_sizes, const torch::Tensor& grad_output_t, const torch::Tensor& weight, torch::IntArrayRef stride, torch::IntArrayRef padding, torch::IntArrayRef dilation, int64_t groups) { // torch::NoGradGuard no_grad_guard; // torch::Tensor grad_output = grad_output_t.contiguous(weight.suggest_memory_format()); return torch::cudnn_convolution_backward_input(input_sizes, grad_output_t, weight, padding, stride, dilation, groups, true, false); } torch::Tensor backward_weight_normal(torch::IntArrayRef weight_sizes,const torch::Tensor& grad_output_t, const torch::Tensor& input, torch::IntArrayRef stride, torch::IntArrayRef padding, torch::IntArrayRef dilation, int64_t groups) { // torch::NoGradGuard no_grad_guard; // torch::Tensor grad_output = grad_output_t.contiguous(input.suggest_memory_format()); return torch::cudnn_convolution_backward_weight(weight_sizes, grad_output_t, input, padding, stride, dilation, groups, true, false); } // This POD struct is used to let us easily compute hashes of the // parameters struct ConvolutionParams { int input_size[2 + max_dim]; int input_stride[2 + max_dim]; int weight_size[2 + max_dim]; int padding[max_dim]; int stride[max_dim]; int dilation[max_dim]; int64_t groups; // NB: transposed purposely omitted: transposed just swaps // forward and backward, so you can reuse the benchmark entry, }; // NB: This can't be a constructor, because then ConvolutionParams // would not be a POD anymore. // TODO: Use TensorGeometry here instead of the entire Tensor, which we // don't actually need. (OTOH: We can always pass in // grad_input/grad_output, so this is not very pressing) void setConvolutionParams( ConvolutionParams* params, const at::Tensor& input, const at::Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { memset(params, 0, sizeof(ConvolutionParams)); // ASSERT(weight.dim() == input.dim()) for (int i = 0; i != input.dim(); ++i) { params->input_size[i] = (int) input.size(i); params->input_stride[i] = (int) input.stride(i); params->weight_size[i] = (int) weight.size(i); } // ASSERT(padding.size() == stride.size()) // ASSERT(padding.size() == dilation.size()) for (size_t i = 0; i != padding.size(); ++i) { params->padding[i] = padding[i]; params->stride[i] = stride[i]; params->dilation[i] = dilation[i]; } // In principle, we shouldn't parametrize by groups for legacy // CuDNN, but it doesn't seem worth the effort to actually do this. params->groups = groups; } // Convenience struct for passing around descriptors and data // pointers struct ConvolutionArgs { cudnnHandle_t handle; ConvolutionParams params; TensorDescriptor idesc, odesc; FilterDescriptor wdesc; const Tensor& input, output, weight; ConvolutionDescriptor cdesc; ConvolutionArgs(const Tensor& input, const Tensor& output, const Tensor& weight) : input(input), output(output), weight(weight) { } }; inline Tensor allocate_workspace(size_t size, const Tensor &other) { // Sometimes cuDNN returns a workspace size > 2^63, this could makes the allocation of // workspace fail with some 64bit indexing error instead of an OOM error. In such case, // we manually fail with OOM. TORCH_CHECK_WITH(CUDAOutOfMemoryError, size < 1_TiB, "Not enough memory for workspace!"); return at::empty({static_cast<int64_t>(size)}, other.options().dtype(kByte)); } // --------------------------------------------------------------------- // // Splitting to 32bit // // --------------------------------------------------------------------- template <typename func_t, typename algo_t> static inline void split_batch_dim_to_32bit_out( const at::Tensor& output, const at::Tensor& input, const at::Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, algo_t algo, int64_t max_worksize, func_t func_32bit) { constexpr int64_t int_max = std::numeric_limits<int>::max(); const int64_t ni = input.numel(); const int64_t no = output.numel(); // Assume the shape of the tensor is (N, C, D1, D2, ...) // if N * C * D1 * D2 * ... <= int_max, then no need to split at all if (ni <= int_max && no <= int_max) { func_32bit(output, input, weight, padding, stride, dilation, groups, algo); return; } // else, if C * D1 * D2 * ... <= int_max, then we just need to split across the N dimension // // Here we use a simple heuristics to determine the size of each split // We don't max out the 2^31 address space because this number is super // large and very likely to get an OOM. int64_t n = output.size(0); int64_t max_inner_size = std::max<int64_t>(ni, no) / n; int64_t split_size = std::max<int64_t>(max_worksize / max_inner_size, 1L); int64_t num_splits = (n + split_size - 1) / split_size; if (split_size * max_inner_size < int_max) { for (int64_t i = 0; i < num_splits; i++) { int64_t start = split_size * i; int64_t split_size_ = std::min<int64_t>(split_size, n - start); Tensor input_ = input.narrow(0, start, split_size_); Tensor output_ = output.narrow(0, start, split_size_); func_32bit(output_, input_, weight, padding, stride, dilation, groups, algo); } return; } // If control flow reaches here, this means even splitting N is not enough, then things starts to become complicated: // For example, for conv2d, there following questions needs to be considered. // - Is the memory layout NCHW or NHWC ? // - If the conv is NCHW -> NC'H'W', then should we // - split only NC? // - split only N'C'? // - split both? // - If the conv is NHWC, then we need to split across H, we need to be very careful about the boundary condition // to make sure that the boundary is handled correctly. // - If we decide to make these splits, is the memory contiguous? Do we need to copy the memory? // Considering the complexity of this issue, it is better not to use cuDNN for this case TORCH_INTERNAL_ASSERT(false, "This case should not be dispatched to cuDNN."); } // --------------------------------------------------------------------- // // Convolution forward / Transposed convolution backward // // --------------------------------------------------------------------- // The raw API directly invokes CuDNN and does not emulate support // for group convolution on old versions of CuDNN. // // There are a few reasons this should never be directly exposed // via ATen: // // - It takes output as a parameter (this should be computed!) // - It doesn't do input checking // - It doesn't resize output (it is assumed to be correctly sized) // void raw_cudnn_convolution_forward_out_32bit( const Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionFwdAlgo_t algo) { auto dataType = CUDNN_DATA_FLOAT; ConvolutionArgs args{ input, output, weight }; args.handle = getCudnnHandle(); setConvolutionParams(&args.params, input, weight, padding, stride, dilation, groups); args.idesc.set(input); args.wdesc.set(weight, 0, input.suggest_memory_format()==at::MemoryFormat::ChannelsLast); args.odesc.set(output); args.cdesc.set(dataType, input.dim() - 2, args.params.padding, args.params.stride, args.params.dilation, args.params.groups); size_t workspaceSize; cudnnGetConvolutionForwardWorkspaceSize( args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.odesc.desc(), algo, &workspaceSize); Tensor workspace = allocate_workspace(workspaceSize, input); // update convDesc mathType since cudnn 7.4+ now requires both algo + mathType to figure out // whether to use Tensor core kernels or not // See Note [behavior of cudnnFind and cudnnGet] AT_CUDNN_CHECK(cudnnSetConvolutionMathType(args.cdesc.mut_desc(), CUDNN_DEFAULT_MATH)); Constant one(dataType, 1); Constant zero(dataType, 0); AT_CUDNN_CHECK(cudnnConvolutionForward( args.handle, &one, args.idesc.desc(), input.data_ptr(), args.wdesc.desc(), weight.data_ptr(), args.cdesc.desc(), algo, workspace.data_ptr(), workspaceSize, &zero, args.odesc.desc(), output.data_ptr())); } void raw_cudnn_convolution_forward_out( const Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionFwdAlgo_t algo) { split_batch_dim_to_32bit_out(output, input, weight, padding, stride, dilation, groups, algo, 1024 * 1024 * 256, raw_cudnn_convolution_forward_out_32bit); } Tensor cudnn_convolution_forward( CheckedFrom c, const TensorArg& input, const TensorArg& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionFwdAlgo_t algo) { checkAllSameType(c, {input, weight}); checkAllSameGPU(c, {input, weight}); auto layout = cudnn_conv_use_channels_last(*input, *weight) ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::Contiguous; auto output_t = at::empty( conv_output_size(input->sizes(), weight->sizes(), padding, stride, dilation), input->options(), layout); if (output_t.numel() == 0) { return output_t; } // Avoid ambiguity of "output" when this is being used as backwards TensorArg output{ output_t, "result", 0 }; convolution_shape_check(c, input, weight, output, padding, stride, dilation, groups); // See #4500 Tensor weight_contig = weight->contiguous(layout); // Make sure that NC11 strides follow formula weight_contig.resize_(weight_contig.sizes(), layout); Tensor input_contig = input->contiguous(layout); input_contig.resize_(input_contig.sizes(), layout); raw_cudnn_convolution_forward_out( *output, input_contig, weight_contig, padding, stride, dilation, groups, algo); return *output; } // same as cudnn_convolution_transpose_backward_input_2 Tensor cudnn_convolution_2( const Tensor& input_t, const Tensor& weight_t, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, int alg_type) { const cudnnConvolutionFwdAlgo_t algo = (0 <= alg_type && alg_type < fwd_algos.size()) ? fwd_algos[alg_type] : CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; TensorArg input { input_t, "input", 1 }, weight { weight_t, "weight", 2 }; auto output_t = cudnn_convolution_forward( "cudnn_convolution", input, weight, padding, stride, dilation, groups, algo); return output_t; } // --------------------------------------------------------------------- // // Convolution backward / Transposed convolution forward // // --------------------------------------------------------------------- void raw_cudnn_convolution_backward_input_out_32bit( const at::Tensor& grad_input, const at::Tensor& grad_output, const at::Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdDataAlgo_t algo) { auto dataType = CUDNN_DATA_FLOAT; ConvolutionArgs args{ grad_input, grad_output, weight }; args.handle = getCudnnHandle(); setConvolutionParams(&args.params, grad_input, weight, padding, stride, dilation, groups); args.idesc.set(grad_input); args.wdesc.set(weight, 0, grad_output.suggest_memory_format()==at::MemoryFormat::ChannelsLast); args.odesc.set(grad_output); args.cdesc.set(dataType, grad_output.dim() - 2, args.params.padding, args.params.stride, args.params.dilation, args.params.groups); size_t workspaceSize; AT_CUDNN_CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize( args.handle, args.wdesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.idesc.desc(), algo, &workspaceSize)); Tensor workspace = allocate_workspace(workspaceSize, grad_output); // update convDesc mathType since cudnn 7.4+ now requires both algo + mathType to figure out // whether to use Tensor core kernels or not // See Note [behavior of cudnnFind and cudnnGet] AT_CUDNN_CHECK(cudnnSetConvolutionMathType(args.cdesc.mut_desc(), CUDNN_DEFAULT_MATH)); Constant one(dataType, 1); Constant zero(dataType, 0); AT_CUDNN_CHECK(cudnnConvolutionBackwardData( args.handle, &one, args.wdesc.desc(), weight.data_ptr(), args.odesc.desc(), grad_output.data_ptr(), args.cdesc.desc(), algo, workspace.data_ptr(), workspaceSize, &zero, args.idesc.desc(), grad_input.data_ptr())); } void raw_cudnn_convolution_backward_input_out( const at::Tensor& grad_input, const at::Tensor& grad_output, const at::Tensor& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdDataAlgo_t algo) { split_batch_dim_to_32bit_out(grad_input, grad_output, weight, padding, stride, dilation, groups, algo, 1024 * 1024 * 128, raw_cudnn_convolution_backward_input_out_32bit); } // NOTE [ Backward vs transpose convolutions ] // // Backward and transpose are algorithmically equivalent, but they // compute their geometry differently. In a backwards, you knew what // the original size of the input tensor was, so you can cache that // geometry and fill it directly. In transposed convolution, it is // more conventional to not explicitly specify the output (previously // input) size, and compute it. This, however, leaves a degree of // freedom; this degree of freedom is resolved using the // output_padding parameter. Both of these interfaces are equivalent, // but they are differently convenient depending on the use case. Tensor cudnn_convolution_backward_input( CheckedFrom c, IntArrayRef input_size, const TensorArg& grad_output, const TensorArg& weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdDataAlgo_t algo) { checkAllSameType(c, {grad_output, weight}); checkAllSameGPU(c, {grad_output, weight}); auto layout = cudnn_conv_use_channels_last(*grad_output, *weight) ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::Contiguous; auto grad_input_t = at::empty(input_size, grad_output->options(), layout); // Avoid "grad_input" when this is being used as transposed convolution TensorArg grad_input{ grad_input_t, "result", 0 }; convolution_shape_check(c, grad_input, weight, grad_output, padding, stride, dilation, groups); // See #4500 Tensor weight_contig = weight->contiguous(layout); // Make sure that NC11 strides follow formula weight_contig.resize_(weight_contig.sizes(), layout); Tensor grad_output_contig = grad_output->contiguous(layout); grad_output_contig.resize_(grad_output_contig.sizes(), layout); raw_cudnn_convolution_backward_input_out( *grad_input, grad_output_contig, weight_contig, padding, stride, dilation, groups, algo); return *grad_input; } Tensor cudnn_convolution_backward_input_2( IntArrayRef input_size, const Tensor& grad_output_t, const Tensor& weight_t, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, int alg_type) { const cudnnConvolutionBwdDataAlgo_t algo = (0 <= alg_type && alg_type < bwd_algos.size()) ? bwd_algos[alg_type] : CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; TensorArg grad_output{ grad_output_t, "grad_output", 1 }, weight{ weight_t, "weight", 2 }; return cudnn_convolution_backward_input( "cudnn_convolution_backward_input", input_size, grad_output, weight, padding, stride, dilation, groups, algo); } Tensor cudnn_convolution_transpose_forward( CheckedFrom c, const TensorArg& grad_output, const TensorArg& weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdDataAlgo_t algo) { auto input_size = conv_input_size(grad_output->sizes(), weight->sizes(), padding, output_padding, stride, dilation, groups); return cudnn_convolution_backward_input(c, input_size, grad_output, weight, padding, stride, dilation, groups, algo); } Tensor cudnn_convolution_transpose_2( const Tensor& input_t, const Tensor& weight_t, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, int alg_type) { const cudnnConvolutionBwdDataAlgo_t algo = (0 <= alg_type && alg_type < bwd_algos.size()) ? bwd_algos[alg_type] : CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; TensorArg input { input_t, "input", 1 }, weight { weight_t, "weight", 2 }; CheckedFrom c = "cudnn_convolution_transpose"; auto output_t = cudnn_convolution_transpose_forward( c, input, weight, padding, output_padding, stride, dilation, groups, algo); return output_t; } // --------------------------------------------------------------------- // // Convolution backward (weight) // // --------------------------------------------------------------------- void raw_cudnn_convolution_backward_weight_out_32bit( const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdFilterAlgo_t algo) { auto dataType = CUDNN_DATA_FLOAT; ConvolutionArgs args{ input, grad_output, grad_weight }; args.handle = getCudnnHandle(); setConvolutionParams(&args.params, input, grad_weight, padding, stride, dilation, groups); args.idesc.set(input); args.wdesc.set(grad_weight, 0, input.suggest_memory_format()==at::MemoryFormat::ChannelsLast); args.odesc.set(grad_output); args.cdesc.set(dataType, input.dim() - 2, args.params.padding, args.params.stride, args.params.dilation, args.params.groups); size_t workspaceSize; cudnnGetConvolutionBackwardFilterWorkspaceSize( args.handle, args.idesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.wdesc.desc(), algo, &workspaceSize); Tensor workspace = allocate_workspace(workspaceSize, input); // update convDesc mathType since cudnn 7.4+ now requires both algo + mathType to figure out // whether to use Tensor core kernels or not // See Note [behavior of cudnnFind and cudnnGet] AT_CUDNN_CHECK(cudnnSetConvolutionMathType(args.cdesc.mut_desc(), CUDNN_DEFAULT_MATH)); Constant one(dataType, 1); Constant zero(dataType, 0); AT_CUDNN_CHECK(cudnnConvolutionBackwardFilter( args.handle, &one, args.idesc.desc(), input.data_ptr(), args.odesc.desc(), grad_output.data_ptr(), args.cdesc.desc(), algo, workspace.data_ptr(), workspaceSize, &zero, args.wdesc.desc(), grad_weight.data_ptr())); } void raw_cudnn_convolution_backward_weight_out( const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdFilterAlgo_t algo) { constexpr int64_t int_max = std::numeric_limits<int>::max(); const int64_t ni = input.numel(); const int64_t no = grad_output.numel(); // Assume the shape of the tensor is (N, C, D1, D2, ...) // if N * C * D1 * D2 * ... <= int_max, then no need to split at all if (ni <= int_max && no <= int_max) { raw_cudnn_convolution_backward_weight_out_32bit(grad_weight, grad_output, input, padding, stride, dilation, groups, algo); return; } // else, if C * D1 * D2 * ... <= int_max, then we just need to split across the N dimension // // Here we use a simple heuristics to determine the size of each split // We don't max out the 2^31 address space because this number is super // large and very likely to get an OOM. int64_t n = grad_output.size(0); int64_t max_inner_size = std::max<int64_t>(ni, no) / n; int64_t split_size = std::max<int64_t>(1024 * 1024 * 512 / max_inner_size, 1L); int64_t num_splits = (n + split_size - 1) / split_size; if (split_size * max_inner_size < int_max) { for (int64_t i = 0; i < num_splits; i++) { int64_t start = split_size * i; int64_t split_size_ = std::min<int64_t>(split_size, n - start); Tensor input_ = input.narrow(0, start, split_size_); Tensor grad_output_ = grad_output.narrow(0, start, split_size_); Tensor grad_weight_ = at::empty_like(grad_weight); raw_cudnn_convolution_backward_weight_out_32bit(grad_weight_, grad_output_, input_, padding, stride, dilation, groups, algo); grad_weight.add_(grad_weight_); } return; } // If control flow reaches here, this means even splitting N is not enough, then things starts to become complicated: // For example, for conv2d, there following questions needs to be considered. // - Is the memory layout NCHW or NHWC ? // - If the conv is NCHW -> NC'H'W', then should we // - split only NC? // - split only N'C'? // - split both? // - If the conv is NHWC, then we need to split across H, we need to be very careful about the boundary condition // to make sure that the boundary is handled correctly. // - If we decide to make these splits, is the memory contiguous? Do we need to copy the memory? // Considering the complexity of this issue, it is better not to use cuDNN for this case TORCH_INTERNAL_ASSERT(false, "This case should not be dispatched to cuDNN."); } Tensor cudnn_convolution_backward_weight( CheckedFrom c, IntArrayRef weight_size, const Tensor& grad_output_t, const Tensor& input_t, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, cudnnConvolutionBwdFilterAlgo_t algo) { auto layout = cudnn_conv_use_channels_last(input_t, grad_output_t) ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::Contiguous; Tensor grad_output_contig_t = grad_output_t.contiguous(layout); // Make sure that NC11 strides follow formula grad_output_contig_t.resize_(grad_output_contig_t.sizes(), layout); TensorArg grad_output_contig{ grad_output_contig_t, "grad_output", 1 }; Tensor input_contig_t = input_t.contiguous(layout); input_contig_t.resize_(input_contig_t.sizes(), layout); TensorArg input{ input_contig_t, "input", 2}; checkAllSameType(c, {grad_output_contig, input}); checkAllSameGPU(c, {grad_output_contig, input}); auto grad_weight_t = at::empty(weight_size, grad_output_contig->options(), layout); // For uniformity with everything else, although it seems grad_weight // would be unambiguous too. TensorArg grad_weight{ grad_weight_t, "result", 0 }; convolution_shape_check(c, input, grad_weight, grad_output_contig, padding, stride, dilation, groups); raw_cudnn_convolution_backward_weight_out( *grad_weight, *grad_output_contig, *input, padding, stride, dilation, groups, algo); return grad_weight_t; } Tensor cudnn_convolution_backward_weight_2( IntArrayRef weight_size, const Tensor& grad_output_t, const Tensor& input_t, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, int alg_type) { const cudnnConvolutionBwdFilterAlgo_t algo = (0 <= alg_type && alg_type < bwd_w_algos.size()) ? bwd_w_algos[alg_type] : CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; return cudnn_convolution_backward_weight( "cudnn_convolution_backward_weight", weight_size, grad_output_t, input_t, padding, stride, dilation, groups, algo); } Tensor cudnn_convolution_transpose_backward_weight_2( IntArrayRef weight_size, const Tensor& grad_output_t, const Tensor& input_t, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, int alg_type) { const cudnnConvolutionBwdFilterAlgo_t algo = (0 <= alg_type && alg_type < bwd_w_algos.size()) ? bwd_w_algos[alg_type] : CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; return cudnn_convolution_backward_weight( "cudnn_convolution_backward_weight", weight_size, input_t, grad_output_t, padding, stride, dilation, groups, algo); } Tensor convolution_main( const Tensor& input, const Tensor& weight, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { if (bias.dim() == 0) { return at::convolution(input, weight, Tensor(), stride, padding, dilation, transposed, output_padding, groups); } else { return at::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); } } std::tuple<Tensor,Tensor> backward_depthwise( const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask ) { return torch::thnn_conv_depthwise2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("n_fwd_algos", [](){return fwd_algos.size();}); m.def("n_bwd_ip_algos", [](){return bwd_algos.size();}); m.def("n_bwd_wt_algos", [](){return bwd_w_algos.size();}); m.def("cudnn_convolution", &cudnn_convolution_2); m.def("cudnn_convolution_backward_input", &cudnn_convolution_backward_input_2); m.def("cudnn_convolution_backward_weight", &cudnn_convolution_backward_weight_2); m.def("cudnn_convolution_transpose", &cudnn_convolution_transpose_2); m.def("cudnn_convolution_transpose_backward_input", &cudnn_convolution_2); m.def("cudnn_convolution_transpose_backward_weight", &cudnn_convolution_transpose_backward_weight_2); m.def("forward_normal", &forward_normal, "Conv forward"); m.def("backward_input_normal", &backward_input_normal, "Conv backward input"); m.def("backward_weight_normal", &backward_weight_normal, "Conv backward weight"); m.def("convolution_main", &convolution_main, "First conv abstraction"); m.def("backward_depthwise", &backward_depthwise, "Conv backward thnn depthwise"); }
43.103217
172
0.71345
[ "geometry", "shape", "vector" ]
11ac176f69b8f8d3299e3b809f74fc083055d187
4,358
cpp
C++
Engine/Engine/gamemodel.cpp
Elephly/Snatch
09a19de4932af80ca4c5def1683cf1bc1b271a47
[ "MIT" ]
null
null
null
Engine/Engine/gamemodel.cpp
Elephly/Snatch
09a19de4932af80ca4c5def1683cf1bc1b271a47
[ "MIT" ]
null
null
null
Engine/Engine/gamemodel.cpp
Elephly/Snatch
09a19de4932af80ca4c5def1683cf1bc1b271a47
[ "MIT" ]
null
null
null
#include "gamemodel.h" #include "modelclass.h" GameModel::GameModel() { render = true; //initialize all matrices to identity matrix XMStoreFloat4x4(&m_orientRotateMatrix, XMMatrixIdentity()); XMStoreFloat4x4(&m_orientTranslateMatrix, XMMatrixIdentity()); XMStoreFloat4x4(&m_dimensionScaleMatrix, XMMatrixIdentity()); XMStoreFloat4x4(&m_worldRotateMatrix, XMMatrixIdentity()); XMStoreFloat4x4(&m_worldTranslateMatrix, XMMatrixIdentity()); } GameModel::~GameModel(void) { Shutdown(); } void GameModel::Shutdown() { } bool GameModel::InitializeVertexModels(ID3D11Device* d3dDevice){ //subclasses who have vertices are expected to overide this method return false; } bool GameModel::initializeTextures(ID3D11Device* device){ //subclasses who have textures are expected to overide this method return false; } ID3D11ShaderResourceView* GameModel::GetTexture(){ //subclasses that have textures are expected to overide this method return 0; } ID3D11ShaderResourceView* GameModel::GetTexture(int i){ //subclasses that have textures are expected to overide this method return 0; } //ModelClass* GameModel::GetVertexModel() {return m_VertexModel; } XMFLOAT4X4 GameModel::GetWorldMatrix(){ //Build the world matrix to give to the graphis system XMFLOAT4X4 worldMatrix; XMStoreFloat4x4(&worldMatrix, XMLoadFloat4x4(&m_orientRotateMatrix) * XMLoadFloat4x4(&m_orientTranslateMatrix) * XMLoadFloat4x4(&m_worldRotateMatrix) * XMLoadFloat4x4(&m_worldTranslateMatrix) ); return worldMatrix; } bool GameModel::Render(ID3D11DeviceContext* deviceContext, XMFLOAT4X4 viewMatrix, XMFLOAT4X4 projectionMatrix, ColorShaderClass* colorShader, TextureShaderClass* textureShader){ //Render the model on the device context using the colorShader or textureShader as appropriate return false; //subclasses must implement this method } XMFLOAT4X4 GameModel::GetWorldRotateMatrix(){ return m_worldRotateMatrix; } void GameModel::orientRotateX(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_orientRotateMatrix, XMLoadFloat4x4(&m_orientRotateMatrix) * XMMatrixRotationX(radianAngle)); } void GameModel::orientRotateY(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_orientRotateMatrix, XMLoadFloat4x4(&m_orientRotateMatrix) * XMMatrixRotationY(radianAngle)); } void GameModel::orientRotateZ(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_orientRotateMatrix, XMLoadFloat4x4(&m_orientRotateMatrix) * XMMatrixRotationZ(radianAngle)); } void GameModel::orientTranslate(float deltaX, float deltaY, float deltaZ){ XMStoreFloat4x4(&m_orientTranslateMatrix, XMLoadFloat4x4(&m_orientTranslateMatrix) * XMMatrixTranslation(deltaX, deltaY, deltaZ)); } void GameModel::worldRotateX(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_worldRotateMatrix, XMLoadFloat4x4(&m_worldRotateMatrix) * XMMatrixRotationX(radianAngle)); } void GameModel::worldRotateY(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_worldRotateMatrix, XMLoadFloat4x4(&m_worldRotateMatrix) * XMMatrixRotationY(radianAngle)); } void GameModel::worldRotateZ(float radianAngle){ // orientationMatrix *= Matrix.CreateRotationY(ry); XMStoreFloat4x4(&m_worldRotateMatrix, XMLoadFloat4x4(&m_worldRotateMatrix) * XMMatrixRotationZ(radianAngle)); } void GameModel::worldTranslate(float deltaX, float deltaY, float deltaZ){ XMStoreFloat4x4(&m_worldTranslateMatrix, XMLoadFloat4x4(&m_worldTranslateMatrix) * XMMatrixTranslation(deltaX, deltaY, deltaZ)); } //User Control Moved Methods void GameModel::MoveLeft() { worldTranslate(-TRANSLATION_INCREMENT, 0.0f, 0.0f); } void GameModel::MoveRight() { worldTranslate(TRANSLATION_INCREMENT, 0, 0); } void GameModel::MoveUp() { worldTranslate(0.0f, TRANSLATION_INCREMENT, 0.0f); } void GameModel::MoveDown() { worldTranslate(0.0f, -TRANSLATION_INCREMENT, 0.0f); } void GameModel::RotateLeft() { orientRotateY(-XM_PIDIV4*ROTATION_SPEED); } void GameModel::RotateRight() { orientRotateY(XM_PIDIV4*ROTATION_SPEED); } bool GameModel::getRenderVal() { return render; } void GameModel::setRenderVal(bool b) { render = b; }
24.902857
178
0.781551
[ "render", "model" ]
11adde8081bcdd2add2ac1a550b9c63d6b6366be
919
hpp
C++
src/org/apache/poi/ss/formula/functions/DGet.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
src/org/apache/poi/ss/formula/functions/DGet.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
src/org/apache/poi/ss/formula/functions/DGet.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
// Generated from /POI/java/org/apache/poi/ss/formula/functions/DGet.java #pragma once #include <fwd-POI.hpp> #include <org/apache/poi/ss/formula/eval/fwd-POI.hpp> #include <org/apache/poi/ss/formula/functions/fwd-POI.hpp> #include <java/lang/Object.hpp> #include <org/apache/poi/ss/formula/functions/IDStarAlgorithm.hpp> struct default_init_tag; class poi::ss::formula::functions::DGet final : public virtual ::java::lang::Object , public IDStarAlgorithm { public: typedef ::java::lang::Object super; private: ::poi::ss::formula::eval::ValueEval* result { }; public: bool processMatch(::poi::ss::formula::eval::ValueEval* eval) override; ::poi::ss::formula::eval::ValueEval* getResult() override; // Generated DGet(); protected: DGet(const ::default_init_tag&); public: static ::java::lang::Class *class_(); private: virtual ::java::lang::Class* getClass0(); };
22.975
74
0.696409
[ "object" ]
11ae2ae831e9f19a7d1d1226ac48c3b62c1c6a86
61,552
cc
C++
src/main/util/conversions.cc
dohse/aerospike-client-nodejs
907743eba6987bfe8a4ecefb8cb88f7f72c545f4
[ "Apache-2.0" ]
null
null
null
src/main/util/conversions.cc
dohse/aerospike-client-nodejs
907743eba6987bfe8a4ecefb8cb88f7f72c545f4
[ "Apache-2.0" ]
null
null
null
src/main/util/conversions.cc
dohse/aerospike-client-nodejs
907743eba6987bfe8a4ecefb8cb88f7f72c545f4
[ "Apache-2.0" ]
null
null
null
/******************************************************************************* * Copyright 2013 Aerospike Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. ******************************************************************************/ #include <node.h> #include <node_buffer.h> #include <v8.h> #include <cstdlib> #include <unistd.h> #include <inttypes.h> extern "C" { #include <aerospike/aerospike.h> #include <aerospike/aerospike_key.h> #include <aerospike/as_config.h> #include <aerospike/as_key.h> #include <aerospike/as_record.h> #include <aerospike/as_record_iterator.h> #include <aerospike/aerospike_batch.h> #include <aerospike/aerospike_scan.h> #include <aerospike/as_arraylist.h> #include <aerospike/as_arraylist_iterator.h> #include <aerospike/as_boolean.h> #include <aerospike/as_hashmap.h> #include <aerospike/as_hashmap_iterator.h> #include <aerospike/as_pair.h> #include <aerospike/as_scan.h> #include <aerospike/as_map.h> #include <aerospike/as_nil.h> #include <aerospike/as_stringmap.h> #include <citrusleaf/alloc.h> } #include "client.h" #include "conversions.h" #include "log.h" #include "enums.h" #include "async.h" using namespace node; using namespace v8; /******************************************************************************* * FUNCTIONS ******************************************************************************/ int config_from_jsobject(as_config * config, Local<Object> obj, LogInfo * log) { Local<Value> hosts = obj->Get(String::NewSymbol("hosts")); if(hosts->IsArray()) { Local<Array> hostlist = Local<Array>::Cast(hosts); for ( uint32_t i=0; i<hostlist->Length(); i++) { Local<Value> addr = hostlist->Get(i)->ToObject()->Get(String::NewSymbol("addr")); Local<Value> port = hostlist->Get(i)->ToObject()->Get(String::NewSymbol("port")); if ( addr->IsString() ) { config->hosts[i].addr = strdup(*String::Utf8Value(addr)); as_v8_detail(log,"host[%d].addr = \"%s\"", i, config->hosts[i].addr); } else { as_v8_error(log, "host[%d].addr should be an string", i); return AS_NODE_PARAM_ERR; } if ( port->IsNumber() ) { config->hosts[i].port = V8INTEGER_TO_CINTEGER(port); as_v8_detail(log,"host[%d].port = %d", i, config->hosts[i].port); } else { as_v8_error(log, "host[%d].port should be an integer", i); return AS_NODE_PARAM_ERR; } } } else{ as_v8_error(log, "Host list has to be an array"); return AS_NODE_PARAM_ERR; } if ( obj->Has(String::NewSymbol("policies"))){ Local<Value> policy_val = obj->Get(String::NewSymbol("policies")); if ( policy_val->IsObject() ){ Local<Object> policies = policy_val->ToObject(); if (policies->Has(String::NewSymbol("timeout"))) { Local<Value> v8timeout = policies->Get(String::NewSymbol("timeout")); config->policies.timeout = V8INTEGER_TO_CINTEGER(v8timeout); } if ( policies->Has(String::NewSymbol("read") )){ Local<Value> readpolicy = policies->Get(String::NewSymbol("read")); if ( readpolicy_from_jsobject(&config->policies.read, readpolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } if ( policies->Has(String::NewSymbol("write"))){ Local<Value> writepolicy = policies->Get(String::NewSymbol("write")); if( writepolicy_from_jsobject(&config->policies.write, writepolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } if ( policies->Has(String::NewSymbol("remove"))){ Local<Value> removepolicy = policies->Get(String::NewSymbol("remove")); if( removepolicy_from_jsobject(&config->policies.remove, removepolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } if ( policies->Has(String::NewSymbol("batch"))){ Local<Value> batchpolicy = policies->Get(String::NewSymbol("batch")); if( batchpolicy_from_jsobject(&config->policies.batch, batchpolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } if ( policies->Has(String::NewSymbol("operate"))){ Local<Value> operatepolicy = policies->Get(String::NewSymbol("operate")); if( operatepolicy_from_jsobject(&config->policies.operate, operatepolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } if ( policies->Has(String::NewSymbol("info"))){ Local<Value> infopolicy = policies->Get(String::NewSymbol("info")); if( infopolicy_from_jsobject(&config->policies.info, infopolicy->ToObject(), log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } } } as_v8_debug(log, "Parsing global policies : Done"); } // stores information about mod-lua userpath and systempath. bool syspath_set = false; bool usrpath_set = false; // If modlua path is passed in config object, set those values here if( obj->Has(String::NewSymbol("modlua"))) { Handle<Object> modlua = obj->Get(String::NewSymbol("modlua"))->ToObject(); if ( modlua->Has(String::NewSymbol("systemPath"))) { Local<Value> v8syspath = modlua->Get(String::NewSymbol("systemPath")); strcpy(config->lua.system_path, *String::Utf8Value(v8syspath)); as_v8_debug(log, "The system path in the config is %s ", config->lua.system_path); syspath_set = true; } if( modlua->Has(String::NewSymbol("userPath"))) { Local<Value> v8usrpath = modlua->Get(String::NewSymbol("userPath")); strcpy(config->lua.user_path, *String::Utf8Value(v8usrpath)); as_v8_debug(log, "The user path in the config is %s ", config->lua.user_path); usrpath_set = true; } } // Modlua system and user path is not passed in a config object. // Set them to default values here. if(!syspath_set) { #ifdef __linux char const * syspath = "./node_modules/aerospike/aerospike-client-c/package/opt/aerospike/client/sys/udf/lua/"; #elif __APPLE__ char const * syspath = "./node_modules/aerospike/aerospike-client-c/package/usr/local/aerospike/client/sys/udf/lua/"; #endif int rc = access(syspath, R_OK); if(rc == 0) { strcpy(config->lua.system_path, syspath); } else { #ifdef __linux char const * syspath = "./aerospike-client-c/package/opt/aerospike/client/sys/udf/lua/"; #elif __APPLE__ char const * syspath = "./aerospike-client-c/package/usr/local/aerospike/client/sys/udf/lua/"; #endif rc = access(syspath, R_OK); if ( rc== 0) { strcpy(config->lua.system_path, syspath); } else { as_v8_debug(log,"Could not find a valid LUA system path %s", syspath); } } } if(!usrpath_set) { #ifdef __linux char const * usrpath = "./node_modules/aerospike/aerospike-client-c/package/opt/aerospike/client/usr/udf/lua/"; #elif __APPLE__ char const * usrpath = "./node_modules/aerospike/aerospike-client-c/package/usr/local/aerospike/client/usr/udf/lua/"; #endif int rc = access(usrpath, R_OK); if ( rc == 0) { strcpy(config->lua.user_path, usrpath); } else { #ifdef __linux char const * usrpath = "./aerospike-client-c/package/opt/aerospike/client/usr/udf/lua"; #elif __APPLE__ char const * usrpath = "./aerospike-client-c/package/usr/local/aerospike/client/usr/udf/lua"; #endif rc = access(usrpath, R_OK); if( rc == 0) { strcpy(config->lua.user_path, usrpath); } else { as_v8_debug(log, "Could not find valid LUA user path %s", usrpath); } } } return AS_NODE_PARAM_OK; } int host_from_jsobject( Local<Object> obj, char **addr, uint16_t * port, LogInfo * log) { if (obj->Has(String::New("addr")) ) { Local<Value> addrVal = obj->Get(String::NewSymbol("addr")); if ( addrVal->IsString() ) { *addr = (char*) malloc (HOST_ADDRESS_SIZE); strcpy(*addr, *String::Utf8Value(addrVal->ToString())); as_v8_detail(log, "host addr : %s", (*addr)); } else { return AS_NODE_PARAM_ERR; } } if ( obj->Has(String::New("port")) ){ Local<Value> portVal = obj->Get(String::NewSymbol("port")); if ( portVal->IsNumber() ) { *port = V8INTEGER_TO_CINTEGER(portVal); } else { return AS_NODE_PARAM_ERR; } } return AS_NODE_PARAM_OK; } int log_from_jsobject( LogInfo * log, Local<Object> obj) { int rc = AS_NODE_PARAM_OK; int level = log->severity; int fd = log->fd; if ( obj->IsObject() ) { Local<Object> v8_log = obj->ToObject(); // `level` is optional if ( rc == AS_NODE_PARAM_OK && v8_log->Has(String::New("level")) ) { Local<Value> v8_log_level = v8_log->Get(String::NewSymbol("level")); if ( v8_log_level->IsNumber() ){ level = (as_log_level) V8INTEGER_TO_CINTEGER(v8_log_level); } else if ( v8_log_level->IsNull() || v8_log_level->IsUndefined() ){ // `null` and `undefined` imply the value should not change. } else { // Any other value is a bad parameter rc = AS_NODE_PARAM_ERR; } } // `file` is optional if ( rc == AS_NODE_PARAM_OK && v8_log->Has(String::NewSymbol("file"))) { Local<Value> v8_file = obj->Get(String::NewSymbol("file")); if ( v8_file->IsNumber() ) { fd = V8INTEGER_TO_CINTEGER(v8_file); } else if (v8_file->IsNull() || v8_file->IsUndefined()){ // `null` and `undefined` imply the value should not change. } else { // Any other value is a bad parameter rc = AS_NODE_PARAM_ERR; } } } else { // The value should be an object. Otherwise it should fail. rc = AS_NODE_PARAM_ERR; } // Only if no error occurred do we set the log values. if ( rc == AS_NODE_PARAM_OK ) { log->severity = (as_log_level) level; log->fd = fd; } return AS_NODE_PARAM_OK; } as_val* asval_clone( as_val* val, LogInfo* log) { as_val_t t = as_val_type( (as_val*)val); as_val* clone_val = NULL; switch(t) { case AS_NIL: { clone_val = (as_val*) &as_nil; break; } case AS_BOOLEAN: { as_boolean *bool_val = as_boolean_fromval(val); as_boolean *clone_bool = as_boolean_new(bool_val->value); if( clone_bool == NULL) { as_v8_error(log, "cloning a boolean value failed"); } clone_val = as_boolean_toval(clone_bool); break; } case AS_INTEGER: { as_integer* int_val = as_integer_fromval( val ); int64_t ival = as_integer_get( int_val); as_v8_detail(log, "Cloning Integer value %d", ival); as_integer* clone_int = as_integer_new(ival); if(clone_int == NULL) { as_v8_error(log, "Cloning integer failed"); } clone_val = as_integer_toval(clone_int); break; } case AS_STRING: { as_string* str_val = as_string_fromval( val ); char* strval = as_string_get( str_val); as_v8_detail(log, "Cloning String value %s", strval); char* clone_str = (char*) cf_strdup( strval); if(clone_str == NULL) { as_v8_error(log, "cloning string failed"); } as_string* clone_as = as_string_new(clone_str, true); if(clone_as == NULL) { as_v8_error(log, "cloning string failed"); } clone_val = as_string_toval( clone_as); break; } case AS_BYTES: { as_bytes* bytes_val = as_bytes_fromval( val); size_t size = as_bytes_size(bytes_val); uint8_t *bytes = (uint8_t*) cf_malloc(size); memcpy(bytes, as_bytes_get(bytes_val), size); as_v8_detail(log, "Cloning Blob value %u ", bytes); clone_val = as_bytes_toval(as_bytes_new_wrap( bytes, size, true)); break; } case AS_LIST: { as_arraylist* list = (as_arraylist*) as_list_fromval( val); clone_val = as_list_toval( (as_list*)as_arraylist_new(as_arraylist_size(list), list->block_size)); as_arraylist_iterator it; as_arraylist_iterator_init( &it, list); int index = 0; as_v8_detail(log, "Cloning a list value of size %d ", as_arraylist_size(list)); while( as_arraylist_iterator_has_next( &it)) { as_val* arr_element = (as_val*) as_arraylist_iterator_next( &it); as_val* clone_element = asval_clone( arr_element, log); as_arraylist_set((as_arraylist*) clone_val, index++, clone_element); } as_v8_detail(log, "Cloning a list SUCCESS"); break; } case AS_MAP: { as_hashmap* map = (as_hashmap*) as_map_fromval(val); clone_val = as_map_toval( (as_map*)as_hashmap_new(as_hashmap_size(map))); as_hashmap_iterator it; as_hashmap_iterator_init( &it, map); while( as_hashmap_iterator_has_next( &it )) { as_pair* pair = (as_pair*) as_hashmap_iterator_next( &it); as_val* orig_key = as_pair_1(pair); as_val* orig_val = as_pair_2(pair); as_val* clone_key = asval_clone( orig_key, log); as_val* clone_mapval = asval_clone( orig_val, log); as_hashmap_set( (as_hashmap*) clone_val, clone_key, clone_mapval); } as_v8_detail( log, "Cloning a map SUCCESS"); break; } default: as_v8_error( log, "as_val received is UNKNOWN type"); break; } return clone_val; } bool key_clone(const as_key* src, as_key** dest, LogInfo * log, bool alloc_key) { if(src == NULL || dest== NULL ) { as_v8_info(log, "Parameter error : NULL in source/destination"); return false; } as_v8_detail(log, "Cloning the key"); as_key_value* val = src->valuep; if(val != NULL) { as_key_value* clone_val = (as_key_value*) asval_clone( (as_val*) val, log); if( alloc_key) { *dest = as_key_new_value( src->ns, src->set, (as_key_value*) clone_val); } else { as_key_init_value(*dest, src->ns, src->set, (as_key_value*) clone_val); } } else if( src->digest.init == true) { if( alloc_key) { *dest = as_key_new_digest( src->ns, src->set, src->digest.value); } else { as_key_init_digest(*dest, src->ns, src->set, src->digest.value); } } else { as_v8_detail(log, "Key has neither value nor digest "); } return true; } bool record_clone(const as_record* src, as_record** dest, LogInfo * log) { if(src == NULL || dest == NULL) { return false; } as_v8_detail( log, "Cloning the record"); (*dest)->ttl = src->ttl; (*dest)->gen = src->gen; as_record_iterator it; as_record_iterator_init(&it, src); while (as_record_iterator_has_next(&it)) { as_bin * bin = as_record_iterator_next(&it); as_bin_value * val = as_bin_get_value(bin); as_bin_value* clone_val = (as_bin_value*) asval_clone( (as_val*) val, log); as_v8_detail(log, "Bin Name: %s", as_bin_get_name(bin)); as_record_set( *dest, as_bin_get_name(bin), clone_val); } as_key* src_key = (as_key*) &src->key; as_key* dest_key = (as_key*) &(*dest)->key; if(src_key != NULL) { //clone the key but do not malloc the key structure, // use the structure available inside record structure. key_clone( src_key, &dest_key, log, false); } return true; } Handle<Object> error_to_jsobject(as_error * error, LogInfo * log) { HANDLESCOPE; Local<Object> err = Object::New(); if (error == NULL) { as_v8_info(log, "error(C structure) object is NULL, node.js error object cannot be constructed"); return scope.Close(err); } err->Set(String::NewSymbol("code"), Integer::New(error->code)); err->Set(String::NewSymbol("message"), error->message[0] != '\0' ? String::NewSymbol(error->message) : Null() ); err->Set(String::NewSymbol("func"), error->func ? String::NewSymbol(error->func) : Null() ); err->Set(String::NewSymbol("file"), error->file ? String::NewSymbol(error->file) : Null() ); err->Set(String::NewSymbol("line"), error->line ? Integer::New(error->line) : Null() ); return scope.Close(err); } Handle<Value> val_to_jsvalue(as_val * val, LogInfo * log ) { HANDLESCOPE; if ( val == NULL) { as_v8_debug(log, "value = NULL"); return scope.Close(Undefined()); } switch ( as_val_type(val) ) { case AS_NIL: { as_v8_detail(log,"value is of type as_null"); return scope.Close(Null()); } case AS_INTEGER : { as_integer * ival = as_integer_fromval(val); if ( ival ) { int64_t data = as_integer_getorelse(ival, -1); as_v8_detail(log, "value = %d ", data); return scope.Close(Number::New((double)data)); } } case AS_STRING : { as_string * sval = as_string_fromval(val); if ( sval ) { char * data = as_string_getorelse(sval, NULL); as_v8_detail(log, "value = \"%s\"", data); return scope.Close(String::NewSymbol(data)); } } case AS_BYTES : { as_bytes * bval = as_bytes_fromval(val); if ( bval ) { uint8_t * data = as_bytes_getorelse(bval, NULL); uint32_t size = as_bytes_size(bval); as_v8_detail(log, "value = <%x %x %x%s>", size > 0 ? data[0] : 0, size > 1 ? data[1] : 0, size > 2 ? data[2] : 0, size > 3 ? " ..." : "" ); // this constructor actually copies data into the new Buffer node::Buffer *buff = node::Buffer::New((char *) data, size); return scope.Close(buff->handle_); } } case AS_LIST : { as_arraylist* listval = (as_arraylist*) as_list_fromval(val); int size = as_arraylist_size(listval); Local<Array> jsarray = Array::New(size); for ( int i = 0; i < size; i++ ) { as_val * arr_val = as_arraylist_get(listval, i); Handle<Value> jsval = val_to_jsvalue(arr_val, log); jsarray->Set(i, jsval); } return scope.Close(jsarray); } case AS_MAP : { Local<Object> jsobj = Object::New(); as_hashmap* map = (as_hashmap*) as_map_fromval(val); as_hashmap_iterator it; as_hashmap_iterator_init(&it, map); while ( as_hashmap_iterator_has_next(&it) ) { as_pair *p = (as_pair*) as_hashmap_iterator_next(&it); as_val* key = as_pair_1(p); as_val* val = as_pair_2(p); jsobj->Set(val_to_jsvalue(key, log), val_to_jsvalue(val, log)); } return scope.Close(jsobj); } default: break; } return scope.Close(Undefined()); } Handle<Object> recordbins_to_jsobject(const as_record * record, LogInfo * log ) { HANDLESCOPE; Local<Object> bins ; if (record == NULL) { as_v8_debug( log, "Record ( C structure) is NULL, cannot form node.js record object"); return scope.Close(bins); } bins = Object::New(); as_record_iterator it; as_record_iterator_init(&it, record); while ( as_record_iterator_has_next(&it) ) { as_bin * bin = as_record_iterator_next(&it); char * name = as_bin_get_name(bin); as_val * val = (as_val *) as_bin_get_value(bin); Handle<Value> obj = val_to_jsvalue(val, log ); bins->Set(String::NewSymbol(name), obj); as_v8_detail(log, "Setting binname %s ", name); } return scope.Close(bins); } Handle<Object> recordmeta_to_jsobject(const as_record * record, LogInfo * log) { HANDLESCOPE; Local<Object> meta; if(record == NULL) { as_v8_debug( log, "Record ( C structure) is NULL, cannot form node.js metadata object"); return scope.Close(meta); } meta = Object::New(); meta->Set(String::NewSymbol("ttl"), Number::New((double)record->ttl)); as_v8_detail(log, "TTL of the record %d", record->ttl); meta->Set(String::NewSymbol("gen"), Integer::New(record->gen)); as_v8_detail(log, "Gen of the record %d", record->gen); return scope.Close(meta); } Handle<Object> record_to_jsobject(const as_record * record, const as_key * key, LogInfo * log ) { HANDLESCOPE; Handle<Object> okey; if ( record == NULL ) { as_v8_debug( log, "Record ( C structure) is NULL, cannot form node.js record object"); return scope.Close(okey); } okey = key_to_jsobject(key ? key : &record->key, log); Handle<Object> bins = recordbins_to_jsobject(record, log ); Handle<Object> meta = recordmeta_to_jsobject(record, log); Local<Object> rec = Object::New(); rec->Set(String::NewSymbol("key"), okey); rec->Set(String::NewSymbol("meta"), meta); rec->Set(String::NewSymbol("bins"), bins); return scope.Close(rec); } //Forward references; int extract_blob_from_jsobject( Local<Object> obj, uint8_t **data, int *len, LogInfo * log ); as_val* asval_from_jsobject( Local<Value> obj, LogInfo * log) { if(obj->IsNull()){ as_v8_detail(log, "The as_val is NULL"); return (as_val*) &as_nil; } else if(obj->IsUndefined()) { // asval_from_jsobject is called recursively. // If a bin value is undefined, it should be handled by the caller of // this function gracefully. // If an entry in a map/list is undefined the corresponding entry becomes null. as_v8_detail(log, "Object passed is undefined"); return (as_val*) &as_nil; } else if(obj->IsBoolean()) { as_v8_error(log, "Boolean datatype is not supported"); return NULL; } else if(obj->IsString()){ String::Utf8Value v(obj); as_string *str = as_string_new(strdup(*v), true); return (as_val*) str; } else if(obj->IsNumber()){ as_integer *num = as_integer_new(obj->NumberValue()); return (as_val*) num; } else if(obj->ToObject()->GetIndexedPropertiesExternalArrayDataType() == kExternalUnsignedByteArray) { int size ; uint8_t* data ; if (extract_blob_from_jsobject(obj->ToObject(), &data, &size, log) != AS_NODE_PARAM_OK) { as_v8_error(log, "Extractingb blob from a js object failed"); return NULL; } as_bytes *bytes = as_bytes_new_wrap( data, size, true); return (as_val*) bytes; } else if(obj->IsArray()){ Local<Array> js_list = Local<Array>::Cast(obj); as_arraylist *list = as_arraylist_new( js_list->Length(), 0); if (list == NULL) { as_v8_error(log, "List allocation failed"); return NULL; } for ( uint32_t i = 0; i < js_list->Length(); i++ ) { Local<Value> val = js_list->Get(i); as_val* asval = asval_from_jsobject(val, log); as_arraylist_append(list, asval); } return (as_val*) list; } else { const Local<Array> props = obj->ToObject()->GetOwnPropertyNames(); const uint32_t count = props->Length(); as_hashmap *map = as_hashmap_new(count); if( map == NULL){ as_v8_error(log, "Map allocation failed"); return NULL; } for ( uint32_t i = 0; i < count; i++) { const Local<Value> name = props->Get(i); const Local<Value> value = obj->ToObject()->Get(name); String::Utf8Value n(name); as_val* val = asval_from_jsobject(value, log); as_stringmap_set((as_map*) map, *n, val); } return (as_val*) map; } return NULL; } int recordbins_from_jsobject(as_record * rec, Local<Object> obj, LogInfo * log) { HANDLESCOPE; const Local<Array> props = obj->GetOwnPropertyNames(); const uint32_t count = props->Length(); as_record_init(rec, count); for ( uint32_t i = 0; i < count; i++ ) { const Local<Value> name = props->Get(i); const Local<Value> value = obj->Get(name); // A bin can be undefined, or an entry inside a CDT(list, map) // can be an undefined value. // If a bin is undefined, it must error out at the earliest. if( value->IsUndefined()) { as_v8_error(log, "Bin value passed for bin %s is undefined", *String::Utf8Value(name)); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } String::Utf8Value n(name); as_val* val = asval_from_jsobject( value, log); if( val == NULL) { scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } switch(as_val_type(val)){ case AS_INTEGER: as_record_set_integer(rec, *n, (as_integer*)val); break; case AS_STRING: as_record_set_string(rec, *n, (as_string*)val); break; case AS_BYTES: as_record_set_bytes(rec, *n, (as_bytes*) val); break; case AS_LIST: as_record_set_list(rec, *n, (as_list*) val); break; case AS_MAP: as_record_set_map(rec, *n, (as_map*) val); break; case AS_NIL: as_record_set_nil(rec, *n); default: break; } } scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int recordmeta_from_jsobject(as_record * rec, Local<Object> obj, LogInfo * log) { HANDLESCOPE; setTTL( obj, &rec->ttl, log); setGeneration( obj, &rec->gen, log); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int extract_blob_from_jsobject( Local<Object> obj, uint8_t **data, int *len, LogInfo * log) { if (obj->GetIndexedPropertiesExternalArrayDataType() != kExternalUnsignedByteArray ) { as_v8_error(log, "The binary data is not of the type UnsignedBytes"); return AS_NODE_PARAM_ERR; } (*len) = obj->GetIndexedPropertiesExternalArrayDataLength(); (*data) = (uint8_t*) cf_malloc(sizeof(uint8_t) * (*len)); memcpy((*data), static_cast<uint8_t*>(obj->GetIndexedPropertiesExternalArrayData()), (*len)); return AS_NODE_PARAM_OK; } // Clone the as_val into a new val. And push the cloned value // into the queue. When the queue size reaches 1/20th of total queue size // send an async signal to v8 thread to process the records in the queue. // This is common function used by both scan and query. // scan populates only as_val of type record. // In case of query it can be record - in case of query without aggregation // In query aggregation, the value can be any as_val. bool async_queue_populate(const as_val* val, AsyncCallbackData * data) { if(data->result_q == NULL) { // in case result_q is not initialized, return from the callback. // But this should never happen. as_v8_error(data->log,"Internal Error: Queue not initialized"); return false; } // if the record queue is full sleep for n microseconds. if( cf_queue_sz(data->result_q) > data->max_q_size) { // why 20 - no reason right now. usleep(20); } as_val_t type = as_val_type(val); switch(type) { case AS_REC: { as_record* p_rec = as_record_fromval(val); as_record* rec = NULL; if( !p_rec) { as_v8_error(data->log, "record returned in the callback is NULL"); return false; } uint16_t numbins = as_record_numbins(p_rec); rec = as_record_new(numbins); // clone the record into Asyncdata structure here. // as_val is freed up after the callback. We need to retain a copy of this // as_val until we pass this structure to nodejs record_clone( p_rec, &rec, data->log); as_val* clone_rec = as_record_toval(rec); if( cf_queue_sz( data->result_q) >= data->max_q_size) { sleep(1); } cf_queue_push( data->result_q, &clone_rec); data->signal_interval++; break; } case AS_NIL: case AS_BOOLEAN: case AS_INTEGER: case AS_STRING: case AS_BYTES: case AS_LIST: case AS_MAP: { as_val* clone = asval_clone((as_val*) val, data->log); if( cf_queue_sz( data->result_q) >= data->max_q_size) { sleep(1); } cf_queue_push( data->result_q, &clone); data->signal_interval++; break; } default: as_v8_debug(data->log, "Query returned - unrecognizable type"); break; } int async_signal_sz = (data->max_q_size)/20; if ( data->signal_interval% async_signal_sz == 0) { data->signal_interval = 0; data->async_handle.data = data; async_send( &data->async_handle); } return true; } void async_queue_process(AsyncCallbackData * data) { int rv; as_val * val = NULL; // Pop each record from the queue and invoke the node callback with this record. while(data->result_q && cf_queue_sz(data->result_q) > 0) { if (cf_queue_sz(data->result_q) > data->max_q_size) { } if(data->signal_interval%10000 == 0) { v8::HeapStatistics h = v8::HeapStatistics(); v8::V8::GetHeapStatistics(&h); //printf(" heap size from v8 %d \n", h.used_heap_size()); } rv = cf_queue_pop( data->result_q, &val, CF_QUEUE_FOREVER); if( rv == CF_QUEUE_OK) { if(as_val_type(val) == AS_REC) { as_record* record = as_record_fromval(val); Handle<Value> cbargs[3] = { recordbins_to_jsobject(record, data->log), recordmeta_to_jsobject(record, data->log), key_to_jsobject(&record->key, data->log)}; as_record_destroy(record); data->data_cb->Call(Context::GetCurrent()->Global(), 3, cbargs); } else { Handle<Value> cbargs[1] = { val_to_jsvalue(val, data->log)}; as_val_destroy(val); data->data_cb->Call(Context::GetCurrent()->Global(), 1, cbargs); } } } return; } // Callback that gets invoked when an async signal is sent. void async_callback(uv_async_t * handle, int status) { AsyncCallbackData * data = reinterpret_cast<AsyncCallbackData *>(handle->data); if (data == NULL && data->result_q == NULL) { as_v8_error(data->log, "Internal error: data or result q is not initialized"); return; } async_queue_process(data); return; } int setTTL ( Local<Object> obj, uint32_t *ttl, LogInfo * log) { if ( obj->Has(String::NewSymbol("ttl"))) { Local<Value> v8ttl = obj->Get(String::NewSymbol("ttl")) ; if ( v8ttl->IsNumber() ) { (*ttl) = (uint32_t) V8INTEGER_TO_CINTEGER(v8ttl); } else { return AS_NODE_PARAM_ERR; } } return AS_NODE_PARAM_OK; } int setTimeOut( Local<Object> obj, uint32_t *timeout, LogInfo * log ) { HANDLESCOPE; if ( obj->Has(String::NewSymbol("timeout")) ) { Local<Value> v8timeout = obj->Get(String::NewSymbol("timeout")) ; if ( v8timeout->IsNumber() ) { (*timeout) = (uint32_t) V8INTEGER_TO_CINTEGER(v8timeout); as_v8_detail(log, "timeout value %d", *timeout); } else { as_v8_error(log, "timeout should be an integer"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } } scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int setGeneration( Local<Object> obj, uint16_t * generation, LogInfo * log ) { if ( obj->Has(String::NewSymbol("gen")) ) { Local<Value> v8gen = obj->Get(String::NewSymbol("gen")); if ( v8gen->IsNumber() ) { (*generation) = (uint16_t) V8INTEGER_TO_CINTEGER(v8gen); as_v8_detail(log, "Generation value %d ", (*generation)); } else { as_v8_error(log, "Generation should be an integer"); return AS_NODE_PARAM_ERR; } } return AS_NODE_PARAM_OK; } int setPolicyGeneric(Local<Object> obj, const char *policyname, int *policyEnumValue, LogInfo * log ) { HANDLESCOPE; if ( obj->Has(String::NewSymbol(policyname)) ) { Local<Value> policy = obj->Get(String::NewSymbol(policyname)); // Check if node layer is passing a legal integer value if (policy->IsNumber()) { *policyEnumValue = V8INTEGER_TO_CINTEGER(policy); } else { as_v8_error(log, "value for %s policy must be an integer", policyname); //Something other than expected type which is Number scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } } // The policyEnumValue will/should be inited to the default value by the caller // So, do not change anything if we get an non-integer from node layer scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int setKeyPolicy( Local<Object> obj, as_policy_key *keypolicy, LogInfo * log) { HANDLESCOPE; if (setPolicyGeneric(obj, "key", (int *) keypolicy, log) != AS_NODE_PARAM_OK) { scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Key policy is set to %d", *keypolicy); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int setGenPolicy( Local<Object> obj, as_policy_gen * genpolicy, LogInfo * log) { if ( setPolicyGeneric(obj, "gen", (int *) genpolicy, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Generation policy is set to %d", *genpolicy); return AS_NODE_PARAM_OK; } int setRetryPolicy( Local<Object> obj, as_policy_retry * retrypolicy, LogInfo * log) { if (setPolicyGeneric(obj, "retry", (int *) retrypolicy, log) != AS_NODE_PARAM_OK ) { return AS_NODE_PARAM_OK; } as_v8_detail(log, "Retry Policy is set to %d", *retrypolicy); return AS_NODE_PARAM_OK; } int setExistsPolicy( Local<Object> obj, as_policy_exists * existspolicy, LogInfo * log) { if ( setPolicyGeneric(obj, "exists", (int *) existspolicy, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Exists policy is set to %d", *existspolicy); return AS_NODE_PARAM_OK; } int infopolicy_from_jsobject( as_policy_info * policy, Local<Object> obj, LogInfo * log) { if ( obj->IsUndefined() || obj->IsNull()) { return AS_NODE_PARAM_ERR; } as_policy_info_init(policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( obj->Has(String::NewSymbol("send_as_is")) ) { Local<Value> v8send_as_is = obj->Get(String::NewSymbol("send_as_is")); if ( v8send_as_is->IsBoolean() ) { policy->send_as_is = (bool) v8send_as_is->ToBoolean()->Value(); as_v8_detail(log,"info policy send_as_is is set to %s", policy->send_as_is ? "true":"false"); } else { as_v8_error(log, "send_as_is should be a boolean object"); return AS_NODE_PARAM_ERR; } } if ( obj->Has(String::NewSymbol("check_bounds")) ) { Local<Value> v8check_bounds = obj->Get(String::NewSymbol("check_bounds")); if ( v8check_bounds->IsBoolean() ) { policy->check_bounds = (bool) v8check_bounds->ToBoolean()->Value(); as_v8_detail(log, "info policy check bounds is set to %s", policy->check_bounds ? "true" : "false"); } else { as_v8_error(log, "check_bounds should be a boolean object"); return AS_NODE_PARAM_ERR; } } return AS_NODE_PARAM_OK; } int operatepolicy_from_jsobject( as_policy_operate * policy, Local<Object> obj, LogInfo * log) { HANDLESCOPE; as_policy_operate_init( policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setGenPolicy( obj, &policy->gen, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setRetryPolicy( obj, &policy->retry, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setKeyPolicy( obj, &policy->key, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int batchpolicy_from_jsobject( as_policy_batch * policy, Local<Object> obj, LogInfo * log) { HANDLESCOPE; as_policy_batch_init(policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int removepolicy_from_jsobject( as_policy_remove * policy, Local<Object> obj, LogInfo * log) { HANDLESCOPE; as_policy_remove_init(policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setGeneration( obj, &policy->generation, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setRetryPolicy( obj, &policy->retry, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setKeyPolicy( obj, &policy->key, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int readpolicy_from_jsobject( as_policy_read * policy, Local<Object> obj, LogInfo * log) { HANDLESCOPE; as_policy_read_init( policy ); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setKeyPolicy( obj, &policy->key, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; as_v8_detail(log, "Parsing read policy : success"); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int writepolicy_from_jsobject( as_policy_write * policy, Local<Object> obj, LogInfo * log) { as_policy_write_init( policy ); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setGenPolicy( obj, &policy->gen, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setRetryPolicy( obj, &policy->retry, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setKeyPolicy( obj, &policy->key, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setExistsPolicy( obj, &policy->exists, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; as_v8_detail(log, "Parsing write policy : success"); return AS_NODE_PARAM_OK; } int applypolicy_from_jsobject( as_policy_apply * policy, Local<Object> obj, LogInfo* log) { HANDLESCOPE; as_policy_apply_init( policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( setKeyPolicy( obj, &policy->key, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; as_v8_detail( log, "Parsing apply policy : success"); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int querypolicy_from_jsobject( as_policy_query* policy, Local<Object> obj, LogInfo* log) { HANDLESCOPE; as_policy_query_init( policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; as_v8_detail( log, "Parsing query policy : success"); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int scanpolicy_from_jsobject( as_policy_scan * policy, Local<Object> obj, LogInfo* log) { as_policy_scan_init( policy); if ( setTimeOut( obj, &policy->timeout, log) != AS_NODE_PARAM_OK) return AS_NODE_PARAM_ERR; if ( obj->Has(String::NewSymbol("failOnClusterChange")) ) { Local<Value> failOnClusterChange = obj->Get(String::NewSymbol("failOnClusterChange")); if ( failOnClusterChange->IsBoolean() ) { policy->fail_on_cluster_change = (bool) failOnClusterChange->ToBoolean()->Value(); as_v8_detail(log,"scan policy fail on cluster change is set to %s", policy->fail_on_cluster_change ? "true":"false"); } else { as_v8_error(log, "failOnClusterChange should be a boolean object"); return AS_NODE_PARAM_ERR; } } as_v8_detail( log, "Parsing scan policy : success"); return AS_NODE_PARAM_OK; } Handle<Object> key_to_jsobject(const as_key * key, LogInfo * log) { HANDLESCOPE; Local<Object> obj; if (key == NULL) { return scope.Close(obj); } obj = Object::New(); if ( key->ns && strlen(key->ns) > 0 ) { as_v8_debug(log, "key.ns = \"%s\"", key->ns); obj->Set(String::NewSymbol("ns"), String::NewSymbol(key->ns)); } if ( key->set && strlen(key->set) > 0 ) { as_v8_debug(log, "key.set = \"%s\"", key->set); obj->Set(String::NewSymbol("set"), String::NewSymbol(key->set)); } if ( key->valuep ) { as_val * val = (as_val *) key->valuep; as_val_t type = as_val_type(val); switch(type) { case AS_INTEGER: { as_integer * ival = as_integer_fromval(val); as_v8_debug(log, "key.key = %d", as_integer_get(ival)); obj->Set(String::NewSymbol("key"), Number::New(as_integer_get(ival))); break; } case AS_STRING: { as_string * sval = as_string_fromval(val); as_v8_debug(log, "key.key = \"%s\"", as_string_get(sval)); obj->Set(String::NewSymbol("key"), String::NewSymbol(as_string_get(sval))); break; } case AS_BYTES: { as_bytes * bval = as_bytes_fromval(val); if ( bval ) { int size = as_bytes_size(bval); as_v8_debug(log,"key.key = \"%u\"", bval->value); Buffer * buf = Buffer::New(size); memcpy(node::Buffer::Data(buf), bval->value, size); obj->Set(String::NewSymbol("key"), buf->handle_); break; } } default: break; } } if(key->digest.init == true) { Buffer * buf = Buffer::New(AS_DIGEST_VALUE_SIZE); memcpy(Buffer::Data(buf), key->digest.value, AS_DIGEST_VALUE_SIZE); obj->Set(String::NewSymbol("digest"), buf->handle_); } return scope.Close(obj); } Handle<Object> scaninfo_to_jsobject( const as_scan_info * info, LogInfo * log) { HANDLESCOPE; Local<Object> scaninfo; if(info == NULL) { as_v8_debug( log, "Scan Info ( C structure) is NULL, cannot form node.js scanInfo object"); return scope.Close(scaninfo); } scaninfo = Object::New(); scaninfo->Set(String::NewSymbol("progressPct"), Integer::New(info->progress_pct)); as_v8_detail(log, "Progress pct of the scan %d", info->progress_pct); scaninfo->Set(String::NewSymbol("recordScanned"), Number::New(info->records_scanned)); as_v8_detail(log, "Number of records scanned so far %d", info->records_scanned); scaninfo->Set(String::NewSymbol("status"), Integer::New(info->status)); return scope.Close(scaninfo); } int key_from_jsobject(as_key * key, Local<Object> obj, LogInfo * log) { // Every v8 object has be declared/accessed inside a scope, and the // scope has to be closed to avoid memory leak. // Open a scope HANDLESCOPE; as_namespace ns = {'\0'}; as_set set = {'\0'}; // All the v8 local variables have to declared before any of the goto // statements. V8 demands that. if(obj->IsNull()) { goto ReturnError; } // get the namespace if ( obj->Has(String::NewSymbol("ns")) ) { Local<Value> ns_obj = obj->Get(String::NewSymbol("ns")); if ( ns_obj->IsString() ) { strncpy(ns, *String::Utf8Value(ns_obj), AS_NAMESPACE_MAX_SIZE); as_v8_detail(log, "key.ns = \"%s\"", ns); if ( strlen(ns) == 0 ) { goto ReturnError; } } else { goto ReturnError; } } else { goto ReturnError; } // get the set if ( obj->Has(String::NewSymbol("set")) ) { Local<Value> set_obj = obj->Get(String::NewSymbol("set")); //check if set is string or a null value. if ( set_obj->IsString() ) { strncpy(set, *String::Utf8Value(set_obj), AS_SET_MAX_SIZE); as_v8_detail(log,"key.set = \"%s\"", set); if ( strlen(set) == 0 ) { as_v8_debug(log, "Set passed is empty string"); } } // null value for set is valid in a key. Any value other than null and string is not // acceptable for set else if( !set_obj->IsNull()){ goto ReturnError; } } // get the value if ( obj->Has(String::NewSymbol("key")) ) { Local<Value> val_obj = obj->Get(String::NewSymbol("key")); if(val_obj->IsNull()) { goto ReturnError; } if ( val_obj->IsString() ) { char * value = strdup(*String::Utf8Value(val_obj)); as_key_init(key, ns, set, value); as_v8_detail(log, "key.key = \"%s\"", value); ((as_string *) key->valuep)->free = true; goto ReturnOk; } else if ( val_obj->IsNumber() ) { int64_t value = V8INTEGER_TO_CINTEGER(val_obj); as_key_init_int64(key, ns, set, value); as_v8_detail(log, "key.key = %d", value); goto ReturnOk; } else if ( val_obj->IsObject() ) { Local<Object> obj = val_obj->ToObject(); int size ; uint8_t* data ; if (extract_blob_from_jsobject(obj, &data, &size, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_key_init_rawp(key, ns, set, data, size, true); as_v8_detail(log, "key.key = <%x %x %x%s>", size > 0 ? data[0] : 0, size > 1 ? data[1] : 0, size > 2 ? data[2] : 0, size > 3 ? " ..." : "" ); } } else { goto ReturnError; } // close the scope, so that garbage collector can collect the v8 variables. ReturnOk: scope.Close(Undefined()); return AS_NODE_PARAM_OK; ReturnError: scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } int key_from_jsarray(as_key * key, Local<Array> arr, LogInfo * log) { HANDLESCOPE; as_namespace ns = { '\0' }; as_set set = { '\0' }; Local<Value> ns_obj = arr->Get(0); Local<Value> set_obj = arr->Get(1); Local<Value> val_obj = arr->Get(2); if ( arr->Length() != 3 ) { goto Ret_Err; } if ( ns_obj->IsString() ) { strncpy(ns, *String::Utf8Value(ns_obj), AS_NAMESPACE_MAX_SIZE); } else { goto Ret_Err; } if ( strlen(ns) == 0 ) { goto Ret_Err; } if ( set_obj->IsString() ) { strncpy(set, *String::Utf8Value(set_obj), AS_SET_MAX_SIZE); } else { goto Ret_Err; } if ( strlen(set) == 0 ) { goto Ret_Err; } if ( val_obj->IsString() ) { char * value = strdup(*String::Utf8Value(val_obj)); as_key_init(key, ns, set, value); ((as_string *) key->valuep)->free = true; goto Ret_Ok; } else if ( val_obj->IsNumber() ) { int64_t value = V8INTEGER_TO_CINTEGER(val_obj); as_key_init_int64(key, ns, set, value); goto Ret_Ok; } Ret_Ok: scope.Close(Undefined()); return AS_NODE_PARAM_OK; Ret_Err: scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } int batch_from_jsarray(as_batch *batch, Local<Array> arr, LogInfo * log) { HANDLESCOPE; uint32_t capacity = arr->Length(); if(capacity > 0) { as_batch_init(batch, capacity); } else { scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } for ( uint32_t i=0; i < capacity; i++) { Local<Object> key = arr->Get(i)->ToObject(); key_from_jsobject(as_batch_keyat(batch, i), key, log); } scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int asarray_from_jsarray( as_arraylist** udfargs, Local<Array> arr, LogInfo * log) { uint32_t capacity = arr->Length(); if ( capacity <= 0) { capacity = 0; } as_v8_detail(log, "Capacity of the asarray to be initialized %d", capacity); if ( *udfargs != NULL) { as_arraylist_init( *udfargs, capacity, 0); } else { *udfargs = as_arraylist_new( capacity, 0); } for ( uint32_t i = 0; i < capacity; i++) { as_val* val = asval_from_jsobject( arr->Get(i), log); as_arraylist_append(*udfargs, val); } return AS_NODE_PARAM_OK; } int udfargs_from_jsobject( char** filename, char** funcname, as_arraylist** args, Local<Object> obj, LogInfo * log) { HANDLESCOPE; if(obj->IsNull()) { as_v8_error(log, "Object passed is NULL"); return AS_NODE_PARAM_ERR; } // Extract UDF module name if( obj->Has(String::NewSymbol("module"))) { Local<Value> module = obj->Get( String::NewSymbol("module")); int size = 0; if( module->IsString()) { size = module->ToString()->Length()+1; if( *filename == NULL) { *filename = (char*) cf_malloc(sizeof(char) * size); } strcpy( *filename, *String::Utf8Value(module) ); as_v8_detail(log, "Filename in the udf args is set to %s", *filename); } else { as_v8_error(log, "UDF module name should be string"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } } else { as_v8_error(log, "UDF module name should be passed to execute UDF"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } // Extract UDF function name if( obj->Has(String::NewSymbol("funcname"))) { Local<Value> v8_funcname = obj->Get( String::NewSymbol("funcname")); if ( v8_funcname->IsString()) { if( *funcname == NULL) { int size = v8_funcname->ToString()->Length(); *funcname = (char*) cf_malloc( sizeof(char) * size); } strcpy( *funcname, *String::Utf8Value( v8_funcname)); as_v8_detail(log, "The function name in the UDF args set to %s ", *funcname); } else { as_v8_error(log, "UDF function name should be string"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } } else { as_v8_error(log, "UDF function name should be passed to execute UDF"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } // Is it fair to expect an array always. For a single argument UDF invocation // should we relax. // Extract UDF arglist as_arraylist if( obj->Has( String::NewSymbol("args"))) { Local<Value> arglist = obj->Get( String::NewSymbol("args")); if ( ! arglist->IsArray()){ as_v8_error(log, "UDF args should be an array"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } asarray_from_jsarray( args, Local<Array>::Cast(arglist), log); as_v8_detail(log, "Parsing UDF args -- done !!!"); scope.Close(Undefined()); return AS_NODE_PARAM_OK; } else { // no argument case. Initialize array with 0 elements and invoke UDF. if (*args != NULL) { as_arraylist_init(*args, 0, 0); } scope.Close(Undefined()); return AS_NODE_PARAM_OK; } scope.Close(Undefined()); return AS_NODE_PARAM_OK; } int GetBinName( char** binName, Local<Object> obj, LogInfo * log) { if ( obj->Has(String::NewSymbol("bin"))) { Local<Value> val = obj->Get(String::NewSymbol("bin")); if ( !val->IsString()) { as_v8_error(log, "Type error in bin_name(bin should be string"); return AS_NODE_PARAM_ERR; } (*binName) = strdup(*String::Utf8Value(val)); return AS_NODE_PARAM_OK; } else { return AS_NODE_PARAM_ERR; } } Local<Value> GetBinValue( Local<Object> obj, LogInfo * log) { HANDLESCOPE; Local<Value> val = obj->Get(String::NewSymbol("value")); return scope.Close(val); } int populate_write_op ( as_operations * op, Local<Object> obj, LogInfo * log) { if ( op == NULL ) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the V8 object"); return AS_NODE_PARAM_ERR; } char* binName; if ( GetBinName(&binName, obj, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "write operation on bin : %s", binName); Local<Value> v8val = GetBinValue(obj, log); if ( v8val->IsNumber() ) { int64_t val = v8val->NumberValue(); as_v8_detail(log, "integer value to be written %d", val); as_operations_add_write_int64(op, binName, val); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else if ( v8val->IsString() ) { char* binVal = strdup(*String::Utf8Value(v8val)); as_v8_detail(log, "String value to be written %s", binVal); as_operations_add_write_str(op, binName, binVal); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else if ( v8val->IsObject() ) { Local<Object> binObj = v8val->ToObject(); int len ; uint8_t* data ; if ( extract_blob_from_jsobject(binObj, &data, &len, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Blob value to be written %u ", data); as_operations_add_write_rawp(op, binName, data, len, true); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else { as_v8_debug(log, "Type error in write operation"); return AS_NODE_PARAM_ERR; } } int populate_read_op( as_operations * ops, Local<Object> obj, LogInfo * log) { if ( ops == NULL ) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the v8 object"); return AS_NODE_PARAM_ERR; } char* binName; if ( GetBinName(&binName, obj, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Read operation on bin :%s", binName); as_operations_add_read(ops, binName); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } int populate_incr_op ( as_operations * ops, Local<Object> obj, LogInfo * log) { if ( ops == NULL ) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the v8 object"); return AS_NODE_PARAM_ERR; } char* binName; if ( GetBinName(&binName, obj, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "Incr operation on bin :%s", binName); Local<Value> v8val = GetBinValue(obj, log); if ( v8val->IsNumber()) { int64_t binValue = v8val->NumberValue(); as_v8_detail(log, "value to be incremented %d", binValue); as_operations_add_incr( ops, binName, binValue); if (binName != NULL) free (binName); return AS_NODE_PARAM_OK; } else { as_v8_debug(log, "Type error in incr operation"); return AS_NODE_PARAM_ERR; } } int populate_prepend_op( as_operations* ops, Local<Object> obj, LogInfo * log) { if ( ops == NULL ) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the v8 object"); return AS_NODE_PARAM_ERR; } char* binName; if ( GetBinName(&binName, obj, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "prepend operation on bin :%s", binName); Local<Value> v8val = GetBinValue(obj, log); if ( v8val->IsString() ) { char* binVal = strdup(*String::Utf8Value(v8val)); as_v8_detail(log, "prepending string %s", binVal); as_operations_add_prepend_strp(ops, binName, binVal, true); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else if ( v8val->IsObject() ) { Local<Object> binObj = v8val->ToObject(); int len ; uint8_t* data ; if (extract_blob_from_jsobject(binObj, &data, &len, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "prepending raw bytes %u", data); as_operations_add_prepend_rawp(ops, binName, data, len, true); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else { as_v8_debug(log, "Type error in prepend operation"); return AS_NODE_PARAM_ERR; } } int populate_append_op( as_operations * ops, Local<Object> obj, LogInfo * log) { if ( ops == NULL ) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the v8 object"); return AS_NODE_PARAM_ERR; } char* binName; if ( GetBinName(&binName, obj, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "append operation on bin :%s", binName); Local<Value> v8val = GetBinValue(obj, log); if ( v8val->IsString() ) { char* binVal = strdup(*String::Utf8Value(v8val)); as_v8_detail(log, "appending string %s", binVal); as_operations_add_append_strp(ops, binName, binVal,true); if ( binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else if ( v8val->IsObject() ) { Local<Object> binObj = v8val->ToObject(); int len ; uint8_t* data ; if (extract_blob_from_jsobject(binObj, &data, &len, log) != AS_NODE_PARAM_OK) { return AS_NODE_PARAM_ERR; } as_v8_detail(log, "appending raw bytes %u", data); as_operations_add_append_rawp(ops, binName, data, len, true); if (binName != NULL) free(binName); return AS_NODE_PARAM_OK; } else { as_v8_debug(log, "Type error in append operation"); return AS_NODE_PARAM_ERR; } } int populate_touch_op( as_operations* ops, LogInfo * log) { if ( ops == NULL) { as_v8_debug(log, "operation (C structure) passed is NULL, can't parse the v8 object"); return AS_NODE_PARAM_ERR; } as_operations_add_touch(ops); as_v8_debug(log, "Touch operation is set"); return AS_NODE_PARAM_OK; } int operations_from_jsarray( as_operations * ops, Local<Array> arr, LogInfo * log) { HANDLESCOPE; uint32_t capacity = arr->Length(); as_v8_detail(log, "no op operations in the array %d", capacity); if ( capacity > 0 ) { as_operations_init( ops, capacity ); } else { scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } for ( uint32_t i = 0; i < capacity; i++ ) { Local<Object> obj = arr->Get(i)->ToObject(); setTTL(obj, &ops->ttl, log); Local<Value> v8op = obj->Get(String::NewSymbol("operation")); if ( v8op->IsNumber() ) { as_operator op = (as_operator) v8op->ToInteger()->Value(); switch ( op ) { case AS_OPERATOR_WRITE: { populate_write_op(ops, obj, log); break; } case AS_OPERATOR_READ: { populate_read_op(ops, obj, log); break; } case AS_OPERATOR_INCR: { populate_incr_op(ops, obj, log); break; } case AS_OPERATOR_PREPEND: { populate_prepend_op(ops, obj, log); break; } case AS_OPERATOR_APPEND: { populate_append_op(ops, obj, log); break; } case AS_OPERATOR_TOUCH: { populate_touch_op(ops, log); break; } default : as_v8_info(log, "Operation Type not supported by the API"); scope.Close(Undefined()); return AS_NODE_PARAM_ERR; } } } scope.Close(Undefined()); return AS_NODE_PARAM_OK; }
33.145934
129
0.600825
[ "object" ]
11b2436566002ebc45f592061a6ab5cedb0e41d5
42,396
cpp
C++
fe-terrain/src/terrain.cpp
yongbinkwon/FE-terrain
c00e89c13d984f5d04aa63d8ba43d81745b77445
[ "MIT" ]
null
null
null
fe-terrain/src/terrain.cpp
yongbinkwon/FE-terrain
c00e89c13d984f5d04aa63d8ba43d81745b77445
[ "MIT" ]
null
null
null
fe-terrain/src/terrain.cpp
yongbinkwon/FE-terrain
c00e89c13d984f5d04aa63d8ba43d81745b77445
[ "MIT" ]
null
null
null
#define _USE_MATH_DEFINES #include "terrain.hpp" #include <cmath> #include <iostream> glm::vec3 gradient(GLfloat height) { glm::vec3 blendMap; //rgb where r is the 0 texture, g is 1 texture and b 2 texture if(height >= 0.75f) { blendMap = glm::vec3(0.0f, 0.0f, 1.0f); } else if(height >= 0.5f) { GLfloat linfunc = 4.0f*(height-0.5f); blendMap = glm::vec3(0.0f, 1.0f-linfunc, linfunc); } else if(height > 0.25f) { GLfloat linfunc = 4.0f*(height-0.25f); blendMap = glm::vec3(1.0f-linfunc, linfunc, 0.0f); } else { blendMap = glm::vec3(1.0f, 0.0f, 0.0f); } return blendMap; } glm::vec2 smoothening_gradient(GLfloat step) { float normalized_step = step/32.0f; glm::vec2 gradient = glm::vec2(1.0f-normalized_step, normalized_step); return gradient; } std::vector<unsigned char> noiseMap(unsigned int seed) { FastNoise noise; noise.SetNoiseType(FastNoise::SimplexFractal); noise.SetFractalOctaves(5); noise.SetFractalGain(0.7f); noise.SetFrequency(0.03f); noise.SetFractalLacunarity(3.0f); noise.SetSeed(seed); std::vector<unsigned char> pixels; for(unsigned int y=0; y<128; y++) { for(unsigned int x=0; x<128; x++) { //unsigned char noise_val = 255*((0.75*(noise.GetNoise(x, y)+1)/2)+0.25); unsigned char noise_val = 255*((0.4*(noise.GetNoise(x, y)+1)/2)+0.6); pixels.push_back(noise_val); pixels.push_back(noise_val); pixels.push_back(noise_val); pixels.push_back(255); } } return pixels; } glm::vec3 calculate_normal(Vertex A, Vertex B, Vertex C) { glm::vec3 surface_normal; GLfloat BA_x = B.x - A.x; GLfloat BA_y = B.y - A.y; GLfloat BA_z = B.z - A.z; GLfloat CA_x = C.x - A.x; GLfloat CA_y = C.y - A.y; GLfloat CA_z = C.z - A.z; surface_normal = glm::vec3((BA_y*CA_z - BA_z*CA_y), (BA_z*CA_x - BA_x*CA_z), (BA_x*CA_y - BA_y*CA_x)); return surface_normal; } //unitsize is how many coordinates each height/width unit refers to //0=mountain 1=plains HeightMap generatePerlinNoiseMap(unsigned int width, unsigned int height, unsigned int xoffset, unsigned int zoffset, GLuint quadCount, GLfloat unitsize, unsigned int seed, unsigned int terraintype) { std::vector<Vertex> vertices; std::vector<GLfloat> buffer; GLfloat scalingFactor; glm::vec3 tex1; glm::vec3 tex2; glm::vec3 tex3; if(terraintype == 0) { scalingFactor = 40.0f*unitsize; tex1 = glm::vec3(0.0f, 0.4f, 0.0f); tex2 = glm::vec3(0.45f, 0.45f, 0.45f); tex3 = glm::vec3(1.0f, 1.0f, 1.0f); } else { scalingFactor = 10.0f*unitsize; tex1 = glm::vec3(0.4f, 0.35f, 0.0f); tex2 = glm::vec3(0.0f, 0.4f, 0.0f); tex3 = glm::vec3(0.0f, 0.75f, 0.0f); } GLuint quadNumber = quadCount; FastNoise noise; noise.SetNoiseType(FastNoise::SimplexFractal); noise.SetFractalOctaves(5); noise.SetFractalGain(0.4f); noise.SetSeed(seed); std::vector<GLuint> indices; std::vector<Triangle> triangles; glm::vec3 vertex_normal = glm::vec3(0.0f, 0.0f, 0.0f); //vertices for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)zoffset*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(0); adjacent_triangles.push_back(1); } else if(x==width) { adjacent_triangles.push_back(2*width - 1); } else { adjacent_triangles.push_back(2*x - 1); adjacent_triangles.push_back(2*x); adjacent_triangles.push_back(2*x + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } for(unsigned int z=1; z<height; z++) { for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(z+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*width*(z-1)); adjacent_triangles.push_back(2*width*z); adjacent_triangles.push_back(2*width*z + 1); } else if(x==width) { adjacent_triangles.push_back(2*(width*(z-1) + (width-1))); adjacent_triangles.push_back(2*(width*(z-1) + (width-1)) + 1); adjacent_triangles.push_back(2*(width*z + width) - 1); } else { adjacent_triangles.push_back(2*(width*(z-1) + (x-1))); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 2); adjacent_triangles.push_back(2*(width*z + (x-1)) + 1); adjacent_triangles.push_back(2*(width*z + x)); adjacent_triangles.push_back(2*(width*z + x) + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } } for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(height+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*(width*(height-1))); } else if(x==width) { adjacent_triangles.push_back(2*(width*(height-1) + (width-1))); adjacent_triangles.push_back(2*(width*(height-1) + (width-1)) + 1); } else { adjacent_triangles.push_back(2*(width*(height-1) + (x-1))); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 2); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } //triangles for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { Vertex botleft = vertices.at((width+1)*z + x); Vertex botright = vertices.at((width+1)*z + (x+1)); Vertex topright = vertices.at((width+1)*(z+1) + (x+1)); Vertex topleft = vertices.at((width+1)*(z+1) + x); Triangle triangle1 = { topright, topleft, botleft, calculate_normal(topright, topleft, botleft), }; Triangle triangle2 = { botleft, botright, topright, calculate_normal(botleft, botright, topright), }; triangles.push_back(triangle1); triangles.push_back(triangle2); } } for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { GLuint noiseNum = std::rand() % 3; //odd is flip, even is not flip unsigned int u_flip = std::rand(); unsigned int v_flip = std::rand(); Triangle triangle1 = triangles.at(2*(width*z + x)); Triangle triangle2 = triangles.at(2*(width*z + x) + 1); //topright Vertex topright = triangle2.C; //std::cout << "(" << topright.x << ", " << topright.z << ")"; //xyz buffer.push_back(topright.x); buffer.push_back(topright.y); buffer.push_back(topright.z); /* test.push_back(topright.x); test.push_back(topright.y); test.push_back(topright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap glm::vec3 blendMap = gradient(topright.y/scalingFactor); glm::vec3 color = blendMap.x*tex1 + blendMap.y*tex2 + blendMap.z*tex3; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topright.vertex_normal.x, 2) + std::pow(topright.vertex_normal.y, 2) + std::pow(topright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topright.adjacent_triangles.size(); i++) { topright.vertex_normal.x += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.x; topright.vertex_normal.y += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.y; topright.vertex_normal.z += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topright.vertex_normal.x); buffer.push_back(topright.vertex_normal.y); buffer.push_back(topright.vertex_normal.z); /* test.push_back(topright.vertex_normal.at(0) + topright.x); test.push_back(topright.vertex_normal.at(1) + topright.y); test.push_back(topright.vertex_normal.at(2) + topright.z); */ //std::cout << "(" << topright.vertex_normal.at(0) << ", " << topright.vertex_normal.at(1) << ", " << topright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //topleft Vertex topleft = triangle1.B; //std::cout << "(" << topleft.x << ", " << topleft.z << ")"; //xyz buffer.push_back(topleft.x); buffer.push_back(topleft.y); buffer.push_back(topleft.z); /* test.push_back(topleft.x); test.push_back(topleft.y); test.push_back(topleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap blendMap = gradient(topleft.y/scalingFactor); color = blendMap.x*tex1 + blendMap.y*tex2 + blendMap.z*tex3; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topleft.vertex_normal.x, 2) + std::pow(topleft.vertex_normal.y, 2) + std::pow(topleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topleft.adjacent_triangles.size(); i++) { topleft.vertex_normal.x += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.x; topleft.vertex_normal.y += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.y; topleft.vertex_normal.z += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topleft.vertex_normal.x); buffer.push_back(topleft.vertex_normal.y); buffer.push_back(topleft.vertex_normal.z); /* test.push_back(topleft.vertex_normal.at(0) + topleft.x); test.push_back(topleft.vertex_normal.at(1) + topleft.y); test.push_back(topleft.vertex_normal.at(2) + topleft.z); */ //std::cout << "(" << topleft.vertex_normal.at(0) << ", " << topleft.vertex_normal.at(1) << ", " << topleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botleft Vertex botleft = triangle2.A; //std::cout << "(" << botleft.x << ", " << botleft.z << ")"; //xyz buffer.push_back(botleft.x); buffer.push_back(botleft.y); buffer.push_back(botleft.z); /* test.push_back(botleft.x); test.push_back(botleft.y); test.push_back(botleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap blendMap = gradient(botleft.y/scalingFactor); color = blendMap.x*tex1 + blendMap.y*tex2 + blendMap.z*tex3; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botleft.vertex_normal.x, 2) + std::pow(botleft.vertex_normal.y, 2) + std::pow(botleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botleft.adjacent_triangles.size(); i++) { botleft.vertex_normal.x += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.x; botleft.vertex_normal.y += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.y; botleft.vertex_normal.z += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botleft.vertex_normal.x); buffer.push_back(botleft.vertex_normal.y); buffer.push_back(botleft.vertex_normal.z); /* test.push_back(botleft.vertex_normal.at(0) + botleft.x); test.push_back(botleft.vertex_normal.at(1) + botleft.y); test.push_back(botleft.vertex_normal.at(2) + botleft.z); */ //std::cout << "(" << botleft.vertex_normal.at(0) << ", " << botleft.vertex_normal.at(1) << ", " << botleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botright Vertex botright = triangle2.B; //std::cout << "(" << botright.x << ", " << botright.z << ") "; //xyz buffer.push_back(botright.x); buffer.push_back(botright.y); buffer.push_back(botright.z); /* test.push_back(botright.x); test.push_back(botright.y); test.push_back(botright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap blendMap = gradient(botright.y/scalingFactor); color = blendMap.x*tex1 + blendMap.y*tex2 + blendMap.z*tex3; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botright.vertex_normal.x, 2) + std::pow(botright.vertex_normal.y, 2) + std::pow(botright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botright.adjacent_triangles.size(); i++) { botright.vertex_normal.x += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.x; botright.vertex_normal.y += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.y; botright.vertex_normal.z += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botright.vertex_normal.x); buffer.push_back(botright.vertex_normal.y); buffer.push_back(botright.vertex_normal.z); /* test.push_back(botright.vertex_normal.at(0) + botright.x); test.push_back(botright.vertex_normal.at(1) + botright.y); test.push_back(botright.vertex_normal.at(2) + botright.z); */ //std::cout << "(" << botright.vertex_normal.at(0) << ", " << botright.vertex_normal.at(1) << ", " << botright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); indices.push_back(4*quadNumber); indices.push_back(4*quadNumber + 1); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 3); indices.push_back(4*quadNumber); quadNumber++; } } HeightMap heightMap = { buffer, indices, quadNumber, }; /* for(unsigned int c = 0; c<vertices.size(); c++) { std::cout << vertices.at(c).x << ", " << vertices.at(c).y << ", " << vertices.at(c).z << " "; } */ return heightMap; } HeightMap horizontalTerrainSmoothener(unsigned int width, unsigned int height, unsigned int xoffset, unsigned int zoffset, GLuint quadCount, GLfloat unitsize, unsigned int seed, GLfloat leftScaling, GLfloat rightScaling) { std::vector<Vertex> vertices; std::vector<GLfloat> buffer; GLfloat stepsize = ((rightScaling-leftScaling)/width)*unitsize; GLfloat scalingFactor = leftScaling*unitsize; GLfloat left_original_scaling; GLfloat right_original_scaling; //textures; glm::vec3 lefttex1; glm::vec3 lefttex2; glm::vec3 lefttex3; glm::vec3 righttex1; glm::vec3 righttex2; glm::vec3 righttex3; if(leftScaling == 40.0f) { left_original_scaling = 40.0f*unitsize; lefttex1 = glm::vec3(0.0f, 0.4f, 0.0f); lefttex2 = glm::vec3(0.45f, 0.45f, 0.45f); lefttex3 = glm::vec3(1.0f, 1.0f, 1.0f); } else { left_original_scaling = 10.0f*unitsize; lefttex1 = glm::vec3(0.4f, 0.35f, 0.0f); lefttex2 = glm::vec3(0.0f, 0.4f, 0.0f); lefttex3 = glm::vec3(0.0f, 0.75f, 0.0f); } if(rightScaling == 40.0f) { right_original_scaling = 40.0f*unitsize; righttex1 = glm::vec3(0.0f, 0.4f, 0.0f); righttex2 = glm::vec3(0.45f, 0.45f, 0.45f); righttex3 = glm::vec3(1.0f, 1.0f, 1.0f); } else { right_original_scaling = 10.0f*unitsize; righttex1 = glm::vec3(0.4f, 0.35f, 0.0f); righttex2 = glm::vec3(0.0f, 0.4f, 0.0f); righttex3 = glm::vec3(0.0f, 0.75f, 0.0f); } GLuint quadNumber = quadCount; FastNoise noise; noise.SetNoiseType(FastNoise::SimplexFractal); noise.SetFractalOctaves(5); noise.SetFractalGain(0.4f); noise.SetSeed(seed); std::vector<GLuint> indices; std::vector<Triangle> triangles; glm::vec3 vertex_normal = glm::vec3(0.0f, 0.0f, 0.0f); //vertices for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)zoffset*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(0); adjacent_triangles.push_back(1); } else if(x==width) { adjacent_triangles.push_back(2*width - 1); } else { adjacent_triangles.push_back(2*x - 1); adjacent_triangles.push_back(2*x); adjacent_triangles.push_back(2*x + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); scalingFactor += stepsize; } for(unsigned int z=1; z<height; z++) { scalingFactor = leftScaling*unitsize; for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(z+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*width*(z-1)); adjacent_triangles.push_back(2*width*z); adjacent_triangles.push_back(2*width*z + 1); } else if(x==width) { adjacent_triangles.push_back(2*(width*(z-1) + (width-1))); adjacent_triangles.push_back(2*(width*(z-1) + (width-1)) + 1); adjacent_triangles.push_back(2*(width*z + width) - 1); } else { adjacent_triangles.push_back(2*(width*(z-1) + (x-1))); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 2); adjacent_triangles.push_back(2*(width*z + (x-1)) + 1); adjacent_triangles.push_back(2*(width*z + x)); adjacent_triangles.push_back(2*(width*z + x) + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); scalingFactor += stepsize; } } scalingFactor = leftScaling*unitsize; for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(height+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*(width*(height-1))); } else if(x==width) { adjacent_triangles.push_back(2*(width*(height-1) + (width-1))); adjacent_triangles.push_back(2*(width*(height-1) + (width-1)) + 1); } else { adjacent_triangles.push_back(2*(width*(height-1) + (x-1))); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 2); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); scalingFactor += stepsize; } //triangles for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { Vertex botleft = vertices.at((width+1)*z + x); Vertex botright = vertices.at((width+1)*z + (x+1)); Vertex topright = vertices.at((width+1)*(z+1) + (x+1)); Vertex topleft = vertices.at((width+1)*(z+1) + x); Triangle triangle1 = { topright, topleft, botleft, calculate_normal(topright, topleft, botleft), }; Triangle triangle2 = { botleft, botright, topright, calculate_normal(botleft, botright, topright), }; triangles.push_back(triangle1); triangles.push_back(triangle2); } } for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { GLuint noiseNum = std::rand() % 3; //odd is flip, even is not flip unsigned int u_flip = std::rand(); unsigned int v_flip = std::rand(); Triangle triangle1 = triangles.at(2*(width*z + x)); Triangle triangle2 = triangles.at(2*(width*z + x) + 1); //topright Vertex topright = triangle2.C; //std::cout << "(" << topright.x << ", " << topright.z << ")"; //xyz buffer.push_back(topright.x); buffer.push_back(topright.y); buffer.push_back(topright.z); /* test.push_back(topright.x); test.push_back(topright.y); test.push_back(topright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap glm::vec3 left_blendMap = gradient(topright.y/left_original_scaling); glm::vec3 left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; glm::vec3 right_blendMap = gradient(topright.y/right_original_scaling); glm::vec3 right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; glm::vec2 smoothening_blendmap = smoothening_gradient(x+1); glm::vec3 color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topright.vertex_normal.x, 2) + std::pow(topright.vertex_normal.y, 2) + std::pow(topright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topright.adjacent_triangles.size(); i++) { topright.vertex_normal.x += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.x; topright.vertex_normal.y += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.y; topright.vertex_normal.z += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topright.vertex_normal.x); buffer.push_back(topright.vertex_normal.y); buffer.push_back(topright.vertex_normal.z); /* test.push_back(topright.vertex_normal.at(0) + topright.x); test.push_back(topright.vertex_normal.at(1) + topright.y); test.push_back(topright.vertex_normal.at(2) + topright.z); */ //std::cout << "(" << topright.vertex_normal.at(0) << ", " << topright.vertex_normal.at(1) << ", " << topright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //topleft Vertex topleft = triangle1.B; //std::cout << "(" << topleft.x << ", " << topleft.z << ")"; //xyz buffer.push_back(topleft.x); buffer.push_back(topleft.y); buffer.push_back(topleft.z); /* test.push_back(topleft.x); test.push_back(topleft.y); test.push_back(topleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap left_blendMap = gradient(topleft.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(topleft.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(x); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topleft.vertex_normal.x, 2) + std::pow(topleft.vertex_normal.y, 2) + std::pow(topleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topleft.adjacent_triangles.size(); i++) { topleft.vertex_normal.x += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.x; topleft.vertex_normal.y += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.y; topleft.vertex_normal.z += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topleft.vertex_normal.x); buffer.push_back(topleft.vertex_normal.y); buffer.push_back(topleft.vertex_normal.z); /* test.push_back(topleft.vertex_normal.at(0) + topleft.x); test.push_back(topleft.vertex_normal.at(1) + topleft.y); test.push_back(topleft.vertex_normal.at(2) + topleft.z); */ //std::cout << "(" << topleft.vertex_normal.at(0) << ", " << topleft.vertex_normal.at(1) << ", " << topleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botleft Vertex botleft = triangle2.A; //std::cout << "(" << botleft.x << ", " << botleft.z << ")"; //xyz buffer.push_back(botleft.x); buffer.push_back(botleft.y); buffer.push_back(botleft.z); /* test.push_back(botleft.x); test.push_back(botleft.y); test.push_back(botleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap left_blendMap = gradient(botleft.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(botleft.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(x); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botleft.vertex_normal.x, 2) + std::pow(botleft.vertex_normal.y, 2) + std::pow(botleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botleft.adjacent_triangles.size(); i++) { botleft.vertex_normal.x += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.x; botleft.vertex_normal.y += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.y; botleft.vertex_normal.z += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botleft.vertex_normal.x); buffer.push_back(botleft.vertex_normal.y); buffer.push_back(botleft.vertex_normal.z); /* test.push_back(botleft.vertex_normal.at(0) + botleft.x); test.push_back(botleft.vertex_normal.at(1) + botleft.y); test.push_back(botleft.vertex_normal.at(2) + botleft.z); */ //std::cout << "(" << botleft.vertex_normal.at(0) << ", " << botleft.vertex_normal.at(1) << ", " << botleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botright Vertex botright = triangle2.B; //std::cout << "(" << botright.x << ", " << botright.z << ") "; //xyz buffer.push_back(botright.x); buffer.push_back(botright.y); buffer.push_back(botright.z); /* test.push_back(botright.x); test.push_back(botright.y); test.push_back(botright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap left_blendMap = gradient(botright.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(botright.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(x+1); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botright.vertex_normal.x, 2) + std::pow(botright.vertex_normal.y, 2) + std::pow(botright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botright.adjacent_triangles.size(); i++) { botright.vertex_normal.x += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.x; botright.vertex_normal.y += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.y; botright.vertex_normal.z += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botright.vertex_normal.x); buffer.push_back(botright.vertex_normal.y); buffer.push_back(botright.vertex_normal.z); /* test.push_back(botright.vertex_normal.at(0) + botright.x); test.push_back(botright.vertex_normal.at(1) + botright.y); test.push_back(botright.vertex_normal.at(2) + botright.z); */ //std::cout << "(" << botright.vertex_normal.at(0) << ", " << botright.vertex_normal.at(1) << ", " << botright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); indices.push_back(4*quadNumber); indices.push_back(4*quadNumber + 1); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 3); indices.push_back(4*quadNumber); quadNumber++; } } HeightMap heightMap = { buffer, indices, quadNumber, }; /* for(unsigned int c = 0; c<vertices.size(); c++) { std::cout << vertices.at(c).x << ", " << vertices.at(c).y << ", " << vertices.at(c).z << " "; } */ return heightMap; } //leftscaling in this case is the bottom grid and rightscaling is the upper grid HeightMap verticalTerrainSmoothener(unsigned int width, unsigned int height, unsigned int xoffset, unsigned int zoffset, GLuint quadCount, GLfloat unitsize, unsigned int seed, GLfloat leftScaling, GLfloat rightScaling) { std::vector<Vertex> vertices; std::vector<GLfloat> buffer; GLfloat stepsize = ((rightScaling-leftScaling)/height)*unitsize; GLfloat scalingFactor = leftScaling*unitsize; GLfloat left_original_scaling; GLfloat right_original_scaling; //textures; glm::vec3 lefttex1; glm::vec3 lefttex2; glm::vec3 lefttex3; glm::vec3 righttex1; glm::vec3 righttex2; glm::vec3 righttex3; if(leftScaling == 40.0f) { left_original_scaling = 40.0f*unitsize; lefttex1 = glm::vec3(0.0f, 0.4f, 0.0f); lefttex2 = glm::vec3(0.45f, 0.45f, 0.45f); lefttex3 = glm::vec3(1.0f, 1.0f, 1.0f); } else { left_original_scaling = 10.0f*unitsize; lefttex1 = glm::vec3(0.4f, 0.35f, 0.0f); lefttex2 = glm::vec3(0.0f, 0.4f, 0.0f); lefttex3 = glm::vec3(0.0f, 0.75f, 0.0f); } if(rightScaling == 40.0f) { right_original_scaling = 40.0f*unitsize; righttex1 = glm::vec3(0.0f, 0.4f, 0.0f); righttex2 = glm::vec3(0.45f, 0.45f, 0.45f); righttex3 = glm::vec3(1.0f, 1.0f, 1.0f); } else { right_original_scaling = 10.0f*unitsize; righttex1 = glm::vec3(0.4f, 0.35f, 0.0f); righttex2 = glm::vec3(0.0f, 0.4f, 0.0f); righttex3 = glm::vec3(0.0f, 0.75f, 0.0f); } GLuint quadNumber = quadCount; FastNoise noise; noise.SetNoiseType(FastNoise::SimplexFractal); noise.SetFractalOctaves(5); noise.SetFractalGain(0.4f); noise.SetSeed(seed); std::vector<GLuint> indices; std::vector<Triangle> triangles; glm::vec3 vertex_normal = glm::vec3(0.0f, 0.0f, 0.0f); //vertices for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)zoffset*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(0); adjacent_triangles.push_back(1); } else if(x==width) { adjacent_triangles.push_back(2*width - 1); } else { adjacent_triangles.push_back(2*x - 1); adjacent_triangles.push_back(2*x); adjacent_triangles.push_back(2*x + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } for(unsigned int z=1; z<height; z++) { scalingFactor += stepsize; for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(z+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*width*(z-1)); adjacent_triangles.push_back(2*width*z); adjacent_triangles.push_back(2*width*z + 1); } else if(x==width) { adjacent_triangles.push_back(2*(width*(z-1) + (width-1))); adjacent_triangles.push_back(2*(width*(z-1) + (width-1)) + 1); adjacent_triangles.push_back(2*(width*z + width) - 1); } else { adjacent_triangles.push_back(2*(width*(z-1) + (x-1))); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(z-1) + (x-1)) + 2); adjacent_triangles.push_back(2*(width*z + (x-1)) + 1); adjacent_triangles.push_back(2*(width*z + x)); adjacent_triangles.push_back(2*(width*z + x) + 1); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } } scalingFactor += stepsize; for(unsigned int x=0; x<=width; x++) { GLfloat x_coord = (GLfloat)(x+xoffset)*unitsize; GLfloat z_coord = -(GLfloat)(height+zoffset)*unitsize; GLfloat y_coord = scalingFactor*(noise.GetNoise(x_coord, z_coord)+1)/2; std::vector<unsigned int> adjacent_triangles; if(x==0) { adjacent_triangles.push_back(2*(width*(height-1))); } else if(x==width) { adjacent_triangles.push_back(2*(width*(height-1) + (width-1))); adjacent_triangles.push_back(2*(width*(height-1) + (width-1)) + 1); } else { adjacent_triangles.push_back(2*(width*(height-1) + (x-1))); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 1); adjacent_triangles.push_back(2*(width*(height-1) + (x-1)) + 2); } Vertex vertex = { x_coord, y_coord, z_coord, adjacent_triangles, vertex_normal, }; vertices.push_back(vertex); } //triangles for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { Vertex botleft = vertices.at((width+1)*z + x); Vertex botright = vertices.at((width+1)*z + (x+1)); Vertex topright = vertices.at((width+1)*(z+1) + (x+1)); Vertex topleft = vertices.at((width+1)*(z+1) + x); Triangle triangle1 = { topright, topleft, botleft, calculate_normal(topright, topleft, botleft), }; Triangle triangle2 = { botleft, botright, topright, calculate_normal(botleft, botright, topright), }; triangles.push_back(triangle1); triangles.push_back(triangle2); } } for(unsigned int z=0; z<height; z++) { for(unsigned int x=0; x<width; x++) { GLuint noiseNum = std::rand() % 3; //odd is flip, even is not flip unsigned int u_flip = std::rand(); unsigned int v_flip = std::rand(); Triangle triangle1 = triangles.at(2*(width*z + x)); Triangle triangle2 = triangles.at(2*(width*z + x) + 1); //topright Vertex topright = triangle2.C; //std::cout << "(" << topright.x << ", " << topright.z << ")"; //xyz buffer.push_back(topright.x); buffer.push_back(topright.y); buffer.push_back(topright.z); /* test.push_back(topright.x); test.push_back(topright.y); test.push_back(topright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap glm::vec3 left_blendMap = gradient(topright.y/left_original_scaling); glm::vec3 left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; glm::vec3 right_blendMap = gradient(topright.y/right_original_scaling); glm::vec3 right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; glm::vec2 smoothening_blendmap = smoothening_gradient(z+1); glm::vec3 color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topright.vertex_normal.x, 2) + std::pow(topright.vertex_normal.y, 2) + std::pow(topright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topright.adjacent_triangles.size(); i++) { topright.vertex_normal.x += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.x; topright.vertex_normal.y += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.y; topright.vertex_normal.z += triangles.at(topright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topright.vertex_normal.x); buffer.push_back(topright.vertex_normal.y); buffer.push_back(topright.vertex_normal.z); /* test.push_back(topright.vertex_normal.at(0) + topright.x); test.push_back(topright.vertex_normal.at(1) + topright.y); test.push_back(topright.vertex_normal.at(2) + topright.z); */ //std::cout << "(" << topright.vertex_normal.at(0) << ", " << topright.vertex_normal.at(1) << ", " << topright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //topleft Vertex topleft = triangle1.B; //std::cout << "(" << topleft.x << ", " << topleft.z << ")"; //xyz buffer.push_back(topleft.x); buffer.push_back(topleft.y); buffer.push_back(topleft.z); /* test.push_back(topleft.x); test.push_back(topleft.y); test.push_back(topleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((1 + v_flip)%2); //blendmap left_blendMap = gradient(topleft.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(topleft.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(z+1); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(topleft.vertex_normal.x, 2) + std::pow(topleft.vertex_normal.y, 2) + std::pow(topleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<topleft.adjacent_triangles.size(); i++) { topleft.vertex_normal.x += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.x; topleft.vertex_normal.y += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.y; topleft.vertex_normal.z += triangles.at(topleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(topleft.vertex_normal.x); buffer.push_back(topleft.vertex_normal.y); buffer.push_back(topleft.vertex_normal.z); /* test.push_back(topleft.vertex_normal.at(0) + topleft.x); test.push_back(topleft.vertex_normal.at(1) + topleft.y); test.push_back(topleft.vertex_normal.at(2) + topleft.z); */ //std::cout << "(" << topleft.vertex_normal.at(0) << ", " << topleft.vertex_normal.at(1) << ", " << topleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botleft Vertex botleft = triangle2.A; //std::cout << "(" << botleft.x << ", " << botleft.z << ")"; //xyz buffer.push_back(botleft.x); buffer.push_back(botleft.y); buffer.push_back(botleft.z); /* test.push_back(botleft.x); test.push_back(botleft.y); test.push_back(botleft.z); */ //uv buffer.push_back((0 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap left_blendMap = gradient(botleft.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(botleft.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(z); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botleft.vertex_normal.x, 2) + std::pow(botleft.vertex_normal.y, 2) + std::pow(botleft.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botleft.adjacent_triangles.size(); i++) { botleft.vertex_normal.x += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.x; botleft.vertex_normal.y += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.y; botleft.vertex_normal.z += triangles.at(botleft.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botleft.vertex_normal.x); buffer.push_back(botleft.vertex_normal.y); buffer.push_back(botleft.vertex_normal.z); /* test.push_back(botleft.vertex_normal.at(0) + botleft.x); test.push_back(botleft.vertex_normal.at(1) + botleft.y); test.push_back(botleft.vertex_normal.at(2) + botleft.z); */ //std::cout << "(" << botleft.vertex_normal.at(0) << ", " << botleft.vertex_normal.at(1) << ", " << botleft.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); //botright Vertex botright = triangle2.B; //std::cout << "(" << botright.x << ", " << botright.z << ") "; //xyz buffer.push_back(botright.x); buffer.push_back(botright.y); buffer.push_back(botright.z); /* test.push_back(botright.x); test.push_back(botright.y); test.push_back(botright.z); */ //uv buffer.push_back((1 + u_flip)%2); buffer.push_back((0 + v_flip)%2); //blendmap left_blendMap = gradient(botright.y/left_original_scaling); left_color = left_blendMap.x*lefttex1 + left_blendMap.y*lefttex2 + left_blendMap.z*lefttex3; right_blendMap = gradient(botright.y/right_original_scaling); right_color = right_blendMap.x*righttex1 + right_blendMap.y*righttex2 + right_blendMap.z*righttex3; smoothening_blendmap = smoothening_gradient(z); color = smoothening_blendmap.x*left_color + smoothening_blendmap.y*right_color; buffer.push_back(color.x); buffer.push_back(color.y); buffer.push_back(color.z); //normal if(std::pow(botright.vertex_normal.x, 2) + std::pow(botright.vertex_normal.y, 2) + std::pow(botright.vertex_normal.z, 2) == 0.0f) { for(unsigned int i=0; i<botright.adjacent_triangles.size(); i++) { botright.vertex_normal.x += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.x; botright.vertex_normal.y += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.y; botright.vertex_normal.z += triangles.at(botright.adjacent_triangles.at(i)).surface_normal.z; } } buffer.push_back(botright.vertex_normal.x); buffer.push_back(botright.vertex_normal.y); buffer.push_back(botright.vertex_normal.z); /* test.push_back(botright.vertex_normal.at(0) + botright.x); test.push_back(botright.vertex_normal.at(1) + botright.y); test.push_back(botright.vertex_normal.at(2) + botright.z); */ //std::cout << "(" << botright.vertex_normal.at(0) << ", " << botright.vertex_normal.at(1) << ", " << botright.vertex_normal.at(2) << ") "; //which noise buffer.push_back(noiseNum); indices.push_back(4*quadNumber); indices.push_back(4*quadNumber + 1); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 2); indices.push_back(4*quadNumber + 3); indices.push_back(4*quadNumber); quadNumber++; } } HeightMap heightMap = { buffer, indices, quadNumber, }; /* for(unsigned int c = 0; c<vertices.size(); c++) { std::cout << vertices.at(c).x << ", " << vertices.at(c).y << ", " << vertices.at(c).z << " "; } */ return heightMap; }
35.537301
222
0.675771
[ "vector" ]
11b2d991bee0a91ebd5c19a1a0baae485c0621e8
6,514
hpp
C++
include/lbann/execution_algorithms/batch_functional_inference_algorithm.hpp
LLNL/LBANN
8bcc5d461e52de70e329d73081ca7eee3e5c580a
[ "Apache-2.0" ]
null
null
null
include/lbann/execution_algorithms/batch_functional_inference_algorithm.hpp
LLNL/LBANN
8bcc5d461e52de70e329d73081ca7eee3e5c580a
[ "Apache-2.0" ]
null
null
null
include/lbann/execution_algorithms/batch_functional_inference_algorithm.hpp
LLNL/LBANN
8bcc5d461e52de70e329d73081ca7eee3e5c580a
[ "Apache-2.0" ]
null
null
null
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #ifndef LBANN_BATCH_INFERENCE_ALGORITHM_HPP #define LBANN_BATCH_INFERENCE_ALGORITHM_HPP #include "lbann/callbacks/callback.hpp" #include "lbann/data_coordinator/data_coordinator.hpp" #include "lbann/execution_algorithms/sgd_execution_context.hpp" #include "lbann/layers/data_type_layer.hpp" #include "lbann/layers/io/input_layer.hpp" #include "lbann/models/model.hpp" namespace lbann { /** @brief Class for LBANN batch inference algorithms. * * This execution algorithm is meant for running inference using a trained * model and samples passed by the user from an external application. The * algorithm currently assumes that there is only 1 input layer in the model, * and the output layer is a softmax layer. */ class batch_functional_inference_algorithm { public: /** Constructor. */ batch_functional_inference_algorithm() {}; /** Copy constructor. */ batch_functional_inference_algorithm(const batch_functional_inference_algorithm& other) = default; /** Copy assignment operator. */ batch_functional_inference_algorithm& operator=(const batch_functional_inference_algorithm& other) = default; /** Move constructor. */ batch_functional_inference_algorithm(batch_functional_inference_algorithm&& other) = default; /** Move assignment operator. */ batch_functional_inference_algorithm& operator=(batch_functional_inference_algorithm&& other) = default; /** Destructor. */ virtual ~batch_functional_inference_algorithm() = default; std::string get_name() const { return "batch_functional_inference"; } std::string get_type() const { return "batch_functional_inference"; } // =========================================== // Execution // =========================================== /** @brief Run model inference on samples and return predicted categories. * @param[in] model A trained model * @param[in] samples A distributed matrix containing samples for model input * @param[in] mbs The max mini-batch size * @return Matrix of predicted labels (by index) */ template <typename DataT, El::Dist CDist, El::Dist RDist, El::DistWrap DistView, El::Device Device> El::Matrix<int, El::Device::CPU> infer(observer_ptr<model> model, El::DistMatrix<DataT, CDist, RDist, DistView, Device> const& samples, size_t mbs) { if (mbs <= 0) { LBANN_ERROR("mini-batch size must be larger than 0"); } // Make matrix for returning predicted labels size_t samples_size = samples.Height(); El::Matrix<int, El::Device::CPU> labels(samples_size, 1); // Create an SGD_execution_context so that layer.forward_prop can get the // mini_batch_size - This should be fixed in the future, when SGD is not so // hard-coded into the model & layers auto c = SGDExecutionContext(execution_mode::inference, mbs); model->reset_mode(c, execution_mode::inference); // Infer on mini batches for (size_t i = 0; i < samples_size; i+=mbs) { size_t mb_idx = std::min(i+mbs, samples_size); auto mb_range = El::IR(i, mb_idx); auto mb_samples = El::LockedView(samples, mb_range, El::ALL); auto mb_labels = El::View(labels, mb_range, El::ALL); infer_mini_batch(*model, mb_samples); get_labels(*model, mb_labels); } return labels; } protected: /** @brief Run model inference on a single mini-batch of samples * This method takes a mini-batch of samples, inserts them into the input * layer of the model, and runs forward prop on the model. * @param[in] model A trained model * @param[in] samples A distributed matrix containing samples for model input */ template <typename DataT, El::Dist CDist, El::Dist RDist, El::DistWrap DistView, El::Device Device> void infer_mini_batch(model& model, El::DistMatrix<DataT, CDist, RDist, DistView, Device> const& samples) { for (int i=0; i < model.get_num_layers(); i++) { auto& l = model.get_layer(i); // Insert samples into the input layer if (l.get_type() == "input") { auto& il = dynamic_cast<input_layer<DataType>&>(l); il.set_samples(samples); } } model.forward_prop(execution_mode::inference); } /** @brief Finds the predicted category in a models softmax layer * @param[in] model A model that has been used for inference * @param[in] labels A matrix to place predicted category labels */ void get_labels(model& model,\ El::Matrix<int, El::Device::CPU> &labels) { int pred_label = 0; float max, col_value; for (const auto* l : model.get_layers()) { // Find the output layer if (l->get_type() == "softmax") { auto const& dtl = dynamic_cast<lbann::data_type_layer<float> const&>(*l); const auto& outputs = dtl.get_activations(); // Find the prediction for each sample int col_count = outputs.Width(); int row_count = outputs.Height(); for (int i=0; i<col_count; i++) { max = 0; for (int j=0; j<row_count; j++) { col_value = outputs.Get(i, j); if (col_value > max) { max = col_value; pred_label = j; } } labels(i) = pred_label; } } } } }; } // namespace lbann #endif // LBANN_BATCH_INFERENCE_ALGORITHM_HPP
38.093567
111
0.662573
[ "model" ]
11b38ea6c60a77531fb29727cad63b9820016fec
9,569
hpp
C++
include/bvh.hpp
liuwei792966953/stitch
108e3dbd3410331c741c7cb166f93bbffa11b369
[ "Zlib" ]
1
2021-01-23T05:20:09.000Z
2021-01-23T05:20:09.000Z
include/bvh.hpp
liuwei792966953/stitch
108e3dbd3410331c741c7cb166f93bbffa11b369
[ "Zlib" ]
null
null
null
include/bvh.hpp
liuwei792966953/stitch
108e3dbd3410331c741c7cb166f93bbffa11b369
[ "Zlib" ]
null
null
null
// Copyright (C) 2019 David Harmon and Artificial Necessity // This code distributed under zlib, see LICENSE.txt for terms. #pragma once #include <array> #include <iostream> #include "kdop.hpp" #include <numeric> #include <stack> #include <unordered_set> #include <vector> template <typename Type> class BoundingVolume : public Type { public: BoundingVolume() : Type(), data_index(-1) { }; // -1 = inactive node, 0 = internal, > 0 = leaf int data_index; }; // N = branching factor template<size_t N> class BVH { public: using BV = BoundingVolume<kDOP26d>; void init(const Eigen::MatrixXi& F, const Eigen::VectorXd& x, double h=0.0) { // Each triangle is a leaf node, so assuming a full well-balanced // tree, we need at most this many nodes to hold the tree // (some nodes will be inactive) height_ = std::ceil(std::log(F.rows()) / std::log(N)) + 1; bvs_.resize(std::pow(N, height_) - 1); std::cout << F.rows() << "; " << bvs_.size() << "; " << height_ << std::endl; // To begin, construct a BV around all faces, then recurse down std::vector<BV> leaves(F.rows()); for (int i=0; i<F.rows(); i++) { leaves[i].data_index = i + 1; for (int j=0; j<3; j++) { leaves[i].extend(x.segment<3>(3*F(i,j))); } leaves[i].extendK(leaves[i].min() - BV::VectorK::Constant(h)); leaves[i].extendK(leaves[i].max() + BV::VectorK::Constant(h)); } build(0, leaves); // Assign each vertex to a "representative triangle". It can only belong // to one std::unordered_set<int> assigned; rep_tri_vertices_.resize(F.rows(), { -1, -1, -1 }); for (int i=0; i<F.rows(); i++) { // Find first available slot for this face size_t idx = 0; for (int j=0; j<3; j++) { int vidx = F(i,j); if (!assigned.count(vidx)) { rep_tri_vertices_[i][idx++] = vidx; assigned.insert(vidx); } } } } void build(int idx, const std::vector<BV>& leaves, int height=0) { if (idx >= bvs_.size()) std::cout << "uh-oh!!! " << idx << std::endl; // Leaf node? Don't recurse anymore if (leaves.size() == 1) { bvs_[idx] = leaves.front(); return; } bvs_[idx].data_index = 0; // 0 => internal node // Step 1: Construct a BV around all leaves at the requested index bvs_[idx].setEmpty(); for (const BV& leaf : leaves) { bvs_[idx].extend(leaf); } // Step 2: Split leaves up into lists // TODO: Make the "short" side on the right, then we can resize bvs_ // and reclaim some space int split_idx; (bvs_[idx].max() - bvs_[idx].min()).maxCoeff(&split_idx); std::vector<std::pair<double, int>> vals; for (int i=0; i<leaves.size(); i++) { // Sort by midpoint const double mid = (leaves[i].min()[split_idx] + leaves[i].max()[split_idx]) * 0.5; vals.push_back({ mid, i }); } std::sort(vals.begin(), vals.end(), [](const auto& p1, const auto& p2) { return p1.first < p2.first; }); std::vector<BV> split_idxs[N]; const size_t group_size = int(std::ceil(double(leaves.size()) / double(N))); for (int i=0; i<N; i++) { for (size_t j=i*group_size; j<std::min(leaves.size(), (i+1)*group_size); j++) { split_idxs[i].push_back(leaves[vals[j].second]); } } if ((height % 2) != 0) { std::swap(split_idxs[0], split_idxs[1]); } // Step 3: Recursively construct hierarchy for (size_t i=0; i<N; i++) { if (!split_idxs[i].empty()) { build(get_child_index(idx, i), split_idxs[i], height+1); } } } void refit(const Eigen::MatrixXi& F, const Eigen::VectorXd& x, double h) { /* size_t idx = bvs_.size() - 1; for (auto it=bvs_.rbegin(); it!=bvs_.rend(); ++it) { if (it->data_index != -1) { it->setEmpty(); if (it->data_index > 0) { // Refit around face (data_index-1) for (int j=0; j<3; j++) { it->extend(x.segment<3>(3*F(it->data_index-1,j))); } it->extendK(it->min() - BV::VectorK::Constant(h)); it->extendK(it->max() + BV::VectorK::Constant(h)); } else { // Internal node. Refit around children for (int i=0; i<N; ++i) { if (bvs_[get_child_index(idx, i)].data_index != -1) { it->extend(bvs_[get_child_index(idx, i)]); } } } } idx--; } */ for (int l=0; l<height_; l++) { // The nodes at depth l are N^l-1 through N^(l+1)-1, and // can all be refitted in parallel #pragma omp parallel for for (int i=std::pow(N, l) - 1; i<std::pow(N, l+1) - 1; i++) { refit_node(i, F, x, h); } } } template <typename Derived, typename F> void visit(const Eigen::MatrixBase<Derived>& pt, F func) const { std::stack<int> q; q.push(0); // Start with root node while (!q.empty()) { int idx = q.top(); q.pop(); if (bvs_[idx].contains(pt)) { // Leaf node? if (bvs_[idx].data_index > 0) { func(bvs_[idx].data_index - 1); } // Valid node? else if (bvs_[idx].data_index != -1) { for (int i=0; i<N; i++) { q.push(get_child_index(idx, i)); } } } } } template <typename F> void self_intersect(const Eigen::MatrixXi& faces, F f) const { auto go = [&](int i1, int i2) { std::stack<std::pair<int, int>> s; s.push({ i1, i2 }); //s.push({ 0, 0 }); while (!s.empty()) { auto idxs = s.top(); s.pop(); if (bvs_[idxs.first].intersects(bvs_[idxs.second])) { int d1 = bvs_[idxs.first].data_index; int d2 = bvs_[idxs.second].data_index; if (d1 > 0 && d2 > 0) { // Both leaf nodes for (int i=0; i<3; i++) { if (rep_tri_vertices_[d1-1][i] != -1) { f(rep_tri_vertices_[d1-1][i], d2-1); } if (rep_tri_vertices_[d2-1][i] != -1) { f(rep_tri_vertices_[d2-1][i], d1-1); } } } else if (d1 > 0 && d2 != -1) { // First is leaf node, other is internal, // so just descend down the second for (int i=0; i<N; i++) { s.push({ idxs.first, get_child_index(idxs.second, i) }); } } else if (d1 != -1 && d2 > 0) { // First is internal node, other is leaf, // so just descend down the first for (int i=0; i<N; i++) { s.push({ get_child_index(idxs.first, i), idxs.second }); } } else if (d1 != -1 && d2 != -1) { // Both internal nodes, intersect children pairs for (int i=0; i<N; i++) { for (int j=(idxs.first == idxs.second ? i : 0); j<N; j++) { s.push({ get_child_index(idxs.first, i), get_child_index(idxs.second, j) }); } } } } } }; std::array<std::pair<int, int>, N*N> pairs; for (size_t i=0; i<N; i++) { for (size_t j=0; j<N; j++) { pairs[i*N+j] = std::make_pair(1+i, 1+j); } } #pragma omp parallel for for (size_t i=0; i<pairs.size(); i++) { go(pairs[i].first, pairs[i].second); } } protected: void refit_node(int idx, const Eigen::MatrixXi& F, const Eigen::VectorXd& x, double h) { if (bvs_[idx].data_index == -1) { return; } bvs_[idx].setEmpty(); if (bvs_[idx].data_index > 0) { // Refit around face (data_index-1) for (int j=0; j<3; j++) { bvs_[idx].extend(x.segment<3>(3*F(bvs_[idx].data_index-1, j))); } bvs_[idx].extendK(bvs_[idx].min() - BV::VectorK::Constant(h)); bvs_[idx].extendK(bvs_[idx].max() + BV::VectorK::Constant(h)); } else { // Internal node. Refit around children for (int i=0; i<N; ++i) { if (bvs_[get_child_index(idx, i)].data_index != -1) { bvs_[idx].extend(bvs_[get_child_index(idx, i)]); } } } } int get_child_index(int idx, int child) const { return N*idx+child+1; } protected: std::vector<BV> bvs_; double height_ = 0; std::vector<std::array<int, 3>> rep_tri_vertices_; };
34.175
104
0.45637
[ "vector" ]
11b440d2c73776e4e11b8dd7eee8c1985bc87b9b
29,559
cpp
C++
src/Open3D/IO/Sensor/AzureKinect/K4aPlugin.cpp
OSSDC/Open3D
b7516161874a90b330545049344541e1c731468c
[ "MIT" ]
1
2019-09-06T12:20:56.000Z
2019-09-06T12:20:56.000Z
src/Open3D/IO/Sensor/AzureKinect/K4aPlugin.cpp
OSSDC/Open3D
b7516161874a90b330545049344541e1c731468c
[ "MIT" ]
null
null
null
src/Open3D/IO/Sensor/AzureKinect/K4aPlugin.cpp
OSSDC/Open3D
b7516161874a90b330545049344541e1c731468c
[ "MIT" ]
null
null
null
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <k4a/k4a.h> #include <k4arecord/playback.h> #include <k4arecord/record.h> #include <cstdlib> #include <cstring> #include <unordered_map> #include <vector> #ifdef _WIN32 #include <windows.h> #else #include <dlfcn.h> #include <link.h> #endif #include "Open3D/IO/Sensor/AzureKinect/K4aPlugin.h" #include "Open3D/IO/Sensor/AzureKinect/PluginMacros.h" #include "Open3D/Utility/Console.h" #include "Open3D/Utility/Helper.h" namespace open3d { namespace io { namespace k4a_plugin { #ifdef _WIN32 // clang-format off static const std::vector<std::string> k4a_lib_path_hints = { "", "C:\\Program Files\\Azure Kinect SDK v1.2.0\\sdk\\windows-desktop\\amd64\\release\\bin\\" }; // clang-format on static const std::string k4a_lib_name = "k4a.dll"; static const std::string k4arecord_lib_name = "k4arecord.dll"; static HINSTANCE GetDynamicLibHandle(const std::string& lib_name) { static std::unordered_map<std::string, HINSTANCE> map_lib_name_to_handle; if (map_lib_name_to_handle.count(lib_name) == 0) { HINSTANCE handle = NULL; for (const std::string& k4a_lib_path_hint : k4a_lib_path_hints) { std::string full_path = k4a_lib_path_hint + lib_name; handle = LoadLibrary(TEXT(full_path.c_str())); if (handle != NULL) { utility::LogDebug("Loaded {}\n", full_path); break; } } if (handle == NULL) { utility::LogFatal("Cannot load {}\n", lib_name); } map_lib_name_to_handle[lib_name] = handle; } return map_lib_name_to_handle.at(lib_name); } #define DEFINE_BRIDGED_FUNC_WITH_COUNT(lib_name, return_type, f_name, \ num_args, ...) \ return_type f_name(EXTRACT_TYPES_PARAMS(num_args, __VA_ARGS__)) { \ typedef return_type (*f_type)( \ EXTRACT_TYPES_PARAMS(num_args, __VA_ARGS__)); \ static f_type f = nullptr; \ \ if (!f) { \ f = (f_type)GetProcAddress(GetDynamicLibHandle(lib_name), \ #f_name); \ if (f == nullptr) { \ utility::LogFatal("Cannot load func {}\n", #f_name); \ } else { \ utility::LogInfo("Loaded func {}\n", #f_name); \ } \ } \ return f(EXTRACT_PARAMS(num_args, __VA_ARGS__)); \ } #else static const std::string k4a_lib_name = "libk4a.so"; static const std::string k4arecord_lib_name = "libk4arecord.so"; static void* GetDynamicLibHandle(const std::string& lib_name) { static std::unordered_map<std::string, void*> map_lib_name_to_handle; if (map_lib_name_to_handle.count(lib_name) == 0) { // Hack to support Ubuntu 16.04 K4A library loading // // Dependency: // In __init__.py Python file, one of os.environ['LD_LIBRARY_PATH'] is // set to contain libk4a.so, libk4arecord.so and libdepthengine.so. // // Normally LD_LIBRARY_PATH cannot be set at runtime (only possible at // program load time). Here we explicitly try to load from // LD_LIBRARY_PATH. std::vector<std::string> k4a_lib_path_hints; if (const char* ld_paths_c = std::getenv("LD_LIBRARY_PATH")) { utility::SplitString(k4a_lib_path_hints, ld_paths_c, ":", true); } k4a_lib_path_hints.insert(k4a_lib_path_hints.begin(), ""); void* handle = nullptr; std::string full_path; for (const std::string& k4a_lib_path_hint : k4a_lib_path_hints) { if (k4a_lib_path_hint == "") { full_path = lib_name; } else { full_path = k4a_lib_path_hint + "/" + lib_name; } handle = dlopen(full_path.c_str(), RTLD_NOW); if (handle != NULL) { break; } } if (!handle) { utility::LogFatal("Cannot load {}\n", dlerror()); } else { utility::LogInfo("Loaded {}\n", full_path); struct link_map* map = nullptr; if (!dlinfo(handle, RTLD_DI_LINKMAP, &map)) { if (map != nullptr) { utility::LogInfo("Library path {}\n", map->l_name); } else { utility::LogWarning("Cannot get link_map\n"); } } else { utility::LogWarning("Cannot get dlinfo\n"); } } map_lib_name_to_handle[lib_name] = handle; } return map_lib_name_to_handle.at(lib_name); } #define DEFINE_BRIDGED_FUNC_WITH_COUNT(lib_name, return_type, f_name, \ num_args, ...) \ return_type f_name(EXTRACT_TYPES_PARAMS(num_args, __VA_ARGS__)) { \ typedef return_type (*f_type)( \ EXTRACT_TYPES_PARAMS(num_args, __VA_ARGS__)); \ static f_type f = nullptr; \ \ if (!f) { \ f = (f_type)dlsym(GetDynamicLibHandle(lib_name), #f_name); \ if (!f) { \ utility::LogFatal("Cannot load {}: {}\n", #f_name, dlerror()); \ } \ } \ return f(EXTRACT_PARAMS(num_args, __VA_ARGS__)); \ } #endif #define DEFINE_BRIDGED_FUNC(lib_name, return_type, f_name, ...) \ DEFINE_BRIDGED_FUNC_WITH_COUNT(lib_name, return_type, f_name, \ COUNT_ARGS(__VA_ARGS__), __VA_ARGS__) //////////////////////////////////////////////////////////////////////////////// DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_create, const char*, path, k4a_device_t, device, const k4a_device_configuration_t, device_config, k4a_record_t*, recording_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_add_tag, k4a_record_t, recording_handle, const char*, name, const char*, value) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_add_imu_track, k4a_record_t, recording_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_write_header, k4a_record_t, recording_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_write_capture, k4a_record_t, recording_handle, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_write_imu_sample, k4a_record_t, recording_handle, k4a_imu_sample_t, imu_sample) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_record_flush, k4a_record_t, recording_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, void, k4a_record_close, k4a_record_t, recording_handle) //////////////////////////////////////////////////////////////////////////////// DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_playback_open, const char*, path, k4a_playback_t*, playback_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_buffer_result_t, k4a_playback_get_raw_calibration, k4a_playback_t, playback_handle, uint8_t*, data, size_t*, data_size) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_playback_get_calibration, k4a_playback_t, playback_handle, k4a_calibration_t*, calibration) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_playback_get_record_configuration, k4a_playback_t, playback_handle, k4a_record_configuration_t*, config) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_buffer_result_t, k4a_playback_get_tag, k4a_playback_t, playback_handle, const char*, name, char*, value, size_t*, value_size) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_playback_set_color_conversion, k4a_playback_t, playback_handle, k4a_image_format_t, target_format) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_stream_result_t, k4a_playback_get_next_capture, k4a_playback_t, playback_handle, k4a_capture_t*, capture_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_stream_result_t, k4a_playback_get_previous_capture, k4a_playback_t, playback_handle, k4a_capture_t*, capture_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_stream_result_t, k4a_playback_get_next_imu_sample, k4a_playback_t, playback_handle, k4a_imu_sample_t*, imu_sample) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_stream_result_t, k4a_playback_get_previous_imu_sample, k4a_playback_t, playback_handle, k4a_imu_sample_t*, imu_sample) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, k4a_result_t, k4a_playback_seek_timestamp, k4a_playback_t, playback_handle, int64_t, offset_usec, k4a_playback_seek_origin_t, origin) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, uint64_t, k4a_playback_get_last_timestamp_usec, k4a_playback_t, playback_handle) DEFINE_BRIDGED_FUNC(k4arecord_lib_name, void, k4a_playback_close, k4a_playback_t, playback_handle) //////////////////////////////////////////////////////////////////////////////// DEFINE_BRIDGED_FUNC(k4a_lib_name, uint32_t, k4a_device_get_installed_count) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_set_debug_message_handler, k4a_logging_message_cb_t*, message_cb, void*, message_cb_context, k4a_log_level_t, min_level) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_open, uint32_t, index, k4a_device_t*, device_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, void, k4a_device_close, k4a_device_t, device_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_wait_result_t, k4a_device_get_capture, k4a_device_t, device_handle, k4a_capture_t*, capture_handle, int32_t, timeout_in_ms) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_wait_result_t, k4a_device_get_imu_sample, k4a_device_t, device_handle, k4a_imu_sample_t*, imu_sample, int32_t, timeout_in_ms) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_capture_create, k4a_capture_t*, capture_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, void, k4a_capture_release, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_capture_reference, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_image_t, k4a_capture_get_color_image, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_image_t, k4a_capture_get_depth_image, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_image_t, k4a_capture_get_ir_image, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_capture_set_color_image, k4a_capture_t, capture_handle, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_capture_set_depth_image, k4a_capture_t, capture_handle, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_capture_set_ir_image, k4a_capture_t, capture_handle, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_capture_set_temperature_c, k4a_capture_t, capture_handle, float, temperature_c) DEFINE_BRIDGED_FUNC(k4a_lib_name, float, k4a_capture_get_temperature_c, k4a_capture_t, capture_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_image_create, k4a_image_format_t, format, int, width_pixels, int, height_pixels, int, stride_bytes, k4a_image_t*, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_image_create_from_buffer, k4a_image_format_t, format, int, width_pixels, int, height_pixels, int, stride_bytes, uint8_t*, buffer, size_t, buffer_size, k4a_memory_destroy_cb_t*, buffer_release_cb, void*, buffer_release_cb_context, k4a_image_t*, image_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, uint8_t*, k4a_image_get_buffer, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, size_t, k4a_image_get_size, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_image_format_t, k4a_image_get_format, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, int, k4a_image_get_width_pixels, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, int, k4a_image_get_height_pixels, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, int, k4a_image_get_stride_bytes, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, uint64_t, k4a_image_get_timestamp_usec, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, uint64_t, k4a_image_get_exposure_usec, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, uint32_t, k4a_image_get_white_balance, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, uint32_t, k4a_image_get_iso_speed, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_image_set_timestamp_usec, k4a_image_t, image_handle, uint64_t, timestamp_usec) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_image_set_exposure_time_usec, k4a_image_t, image_handle, uint64_t, exposure_usec) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_image_set_white_balance, k4a_image_t, image_handle, uint32_t, white_balance) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_image_set_iso_speed, k4a_image_t, image_handle, uint32_t, iso_speed) DEFINE_BRIDGED_FUNC( k4a_lib_name, void, k4a_image_reference, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, void, k4a_image_release, k4a_image_t, image_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_start_cameras, k4a_device_t, device_handle, k4a_device_configuration_t*, config) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_device_stop_cameras, k4a_device_t, device_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_start_imu, k4a_device_t, device_handle) DEFINE_BRIDGED_FUNC( k4a_lib_name, void, k4a_device_stop_imu, k4a_device_t, device_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_buffer_result_t, k4a_device_get_serialnum, k4a_device_t, device_handle, char*, serial_number, size_t*, serial_number_size) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_get_version, k4a_device_t, device_handle, k4a_hardware_version_t*, version) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_get_color_control_capabilities, k4a_device_t, device_handle, k4a_color_control_command_t, command, bool*, supports_auto, int32_t*, min_value, int32_t*, max_value, int32_t*, step_value, int32_t*, default_value, k4a_color_control_mode_t*, default_mode) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_get_color_control, k4a_device_t, device_handle, k4a_color_control_command_t, command, k4a_color_control_mode_t*, mode, int32_t*, value) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_set_color_control, k4a_device_t, device_handle, k4a_color_control_command_t, command, k4a_color_control_mode_t, mode, int32_t, value) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_buffer_result_t, k4a_device_get_raw_calibration, k4a_device_t, device_handle, uint8_t*, data, size_t*, data_size) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_get_calibration, k4a_device_t, device_handle, const k4a_depth_mode_t, depth_mode, const k4a_color_resolution_t, color_resolution, k4a_calibration_t*, calibration) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_device_get_sync_jack, k4a_device_t, device_handle, bool*, sync_in_jack_connected, bool*, sync_out_jack_connected) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_calibration_get_from_raw, char*, raw_calibration, size_t, raw_calibration_size, const k4a_depth_mode_t, depth_mode, const k4a_color_resolution_t, color_resolution, k4a_calibration_t*, calibration) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_calibration_3d_to_3d, const k4a_calibration_t*, calibration, const k4a_float3_t*, source_point3d_mm, const k4a_calibration_type_t, source_camera, const k4a_calibration_type_t, target_camera, k4a_float3_t*, target_point3d_mm) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_calibration_2d_to_3d, const k4a_calibration_t*, calibration, const k4a_float2_t*, source_point2d, const float, source_depth_mm, const k4a_calibration_type_t, source_camera, const k4a_calibration_type_t, target_camera, k4a_float3_t*, target_point3d_mm, int*, valid) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_calibration_3d_to_2d, const k4a_calibration_t*, calibration, const k4a_float3_t*, source_point3d_mm, const k4a_calibration_type_t, source_camera, const k4a_calibration_type_t, target_camera, k4a_float2_t*, target_point2d, int*, valid) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_calibration_2d_to_2d, const k4a_calibration_t*, calibration, const k4a_float2_t*, source_point2d, const float, source_depth_mm, const k4a_calibration_type_t, source_camera, const k4a_calibration_type_t, target_camera, k4a_float2_t*, target_point2d, int*, valid) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_transformation_t, k4a_transformation_create, const k4a_calibration_t*, calibration) DEFINE_BRIDGED_FUNC(k4a_lib_name, void, k4a_transformation_destroy, k4a_transformation_t, transformation_handle) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_transformation_depth_image_to_color_camera, k4a_transformation_t, transformation_handle, const k4a_image_t, depth_image, k4a_image_t, transformed_depth_image) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_transformation_color_image_to_depth_camera, k4a_transformation_t, transformation_handle, const k4a_image_t, depth_image, const k4a_image_t, color_image, k4a_image_t, transformed_color_image) DEFINE_BRIDGED_FUNC(k4a_lib_name, k4a_result_t, k4a_transformation_depth_image_to_point_cloud, k4a_transformation_t, transformation_handle, const k4a_image_t, depth_image, const k4a_calibration_type_t, camera, k4a_image_t, xyz_image) } // namespace k4a_plugin } // namespace io } // namespace open3d
35.484994
93
0.479279
[ "vector" ]
11b8c00e8dd8a43b733c226c63af9c7d968b9234
30,470
cpp
C++
src/trunk/libs/seiscomp3/datamodel/focalmechanism.cpp
Fran89/seiscomp3
a25d29966949769d2bce9c0d28db0a2128e00649
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
src/trunk/libs/seiscomp3/datamodel/focalmechanism.cpp
Fran89/seiscomp3
a25d29966949769d2bce9c0d28db0a2128e00649
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
src/trunk/libs/seiscomp3/datamodel/focalmechanism.cpp
Fran89/seiscomp3
a25d29966949769d2bce9c0d28db0a2128e00649
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
/*************************************************************************** * Copyright (C) by GFZ Potsdam * * * * You can redistribute and/or modify this program under the * * terms of the SeisComP Public License. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * SeisComP Public License for more details. * ***************************************************************************/ // This file was created by a source code generator. // Do not modify the contents. Change the definition and run the generator // again! #define SEISCOMP_COMPONENT DataModel #include <seiscomp3/datamodel/focalmechanism.h> #include <seiscomp3/datamodel/eventparameters.h> #include <seiscomp3/datamodel/momenttensor.h> #include <algorithm> #include <seiscomp3/datamodel/metadata.h> #include <seiscomp3/logging/log.h> namespace Seiscomp { namespace DataModel { IMPLEMENT_SC_CLASS_DERIVED(FocalMechanism, PublicObject, "FocalMechanism"); namespace { static Seiscomp::Core::MetaEnumImpl<EvaluationMode> metaEvaluationMode; static Seiscomp::Core::MetaEnumImpl<EvaluationStatus> metaEvaluationStatus; } FocalMechanism::MetaObject::MetaObject(const Core::RTTI* rtti) : Seiscomp::Core::MetaObject(rtti) { addProperty(Core::simpleProperty("triggeringOriginID", "string", false, false, false, true, false, false, NULL, &FocalMechanism::setTriggeringOriginID, &FocalMechanism::triggeringOriginID)); addProperty(objectProperty<NodalPlanes>("nodalPlanes", "NodalPlanes", false, false, true, &FocalMechanism::setNodalPlanes, &FocalMechanism::nodalPlanes)); addProperty(objectProperty<PrincipalAxes>("principalAxes", "PrincipalAxes", false, false, true, &FocalMechanism::setPrincipalAxes, &FocalMechanism::principalAxes)); addProperty(Core::simpleProperty("azimuthalGap", "float", false, false, false, false, true, false, NULL, &FocalMechanism::setAzimuthalGap, &FocalMechanism::azimuthalGap)); addProperty(Core::simpleProperty("stationPolarityCount", "int", false, false, false, false, true, false, NULL, &FocalMechanism::setStationPolarityCount, &FocalMechanism::stationPolarityCount)); addProperty(Core::simpleProperty("misfit", "float", false, false, false, false, true, false, NULL, &FocalMechanism::setMisfit, &FocalMechanism::misfit)); addProperty(Core::simpleProperty("stationDistributionRatio", "float", false, false, false, false, true, false, NULL, &FocalMechanism::setStationDistributionRatio, &FocalMechanism::stationDistributionRatio)); addProperty(Core::simpleProperty("methodID", "string", false, false, false, false, false, false, NULL, &FocalMechanism::setMethodID, &FocalMechanism::methodID)); addProperty(enumProperty("evaluationMode", "EvaluationMode", false, true, &metaEvaluationMode, &FocalMechanism::setEvaluationMode, &FocalMechanism::evaluationMode)); addProperty(enumProperty("evaluationStatus", "EvaluationStatus", false, true, &metaEvaluationStatus, &FocalMechanism::setEvaluationStatus, &FocalMechanism::evaluationStatus)); addProperty(objectProperty<CreationInfo>("creationInfo", "CreationInfo", false, false, true, &FocalMechanism::setCreationInfo, &FocalMechanism::creationInfo)); addProperty(arrayClassProperty<Comment>("comment", "Comment", &FocalMechanism::commentCount, &FocalMechanism::comment, static_cast<bool (FocalMechanism::*)(Comment*)>(&FocalMechanism::add), &FocalMechanism::removeComment, static_cast<bool (FocalMechanism::*)(Comment*)>(&FocalMechanism::remove))); addProperty(arrayObjectProperty("momentTensor", "MomentTensor", &FocalMechanism::momentTensorCount, &FocalMechanism::momentTensor, static_cast<bool (FocalMechanism::*)(MomentTensor*)>(&FocalMechanism::add), &FocalMechanism::removeMomentTensor, static_cast<bool (FocalMechanism::*)(MomentTensor*)>(&FocalMechanism::remove))); } IMPLEMENT_METAOBJECT(FocalMechanism) FocalMechanism::FocalMechanism() { } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism::FocalMechanism(const FocalMechanism& other) : PublicObject() { *this = other; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism::FocalMechanism(const std::string& publicID) : PublicObject(publicID) { } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism::~FocalMechanism() { std::for_each(_comments.begin(), _comments.end(), std::compose1(std::bind2nd(std::mem_fun(&Comment::setParent), (PublicObject*)NULL), std::mem_fun_ref(&CommentPtr::get))); std::for_each(_momentTensors.begin(), _momentTensors.end(), std::compose1(std::bind2nd(std::mem_fun(&MomentTensor::setParent), (PublicObject*)NULL), std::mem_fun_ref(&MomentTensorPtr::get))); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism* FocalMechanism::Create() { FocalMechanism* object = new FocalMechanism(); return static_cast<FocalMechanism*>(GenerateId(object)); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism* FocalMechanism::Create(const std::string& publicID) { if ( PublicObject::IsRegistrationEnabled() && Find(publicID) != NULL ) { SEISCOMP_ERROR( "There exists already a PublicObject with Id '%s'", publicID.c_str() ); return NULL; } return new FocalMechanism(publicID); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism* FocalMechanism::Find(const std::string& publicID) { return FocalMechanism::Cast(PublicObject::Find(publicID)); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::operator==(const FocalMechanism& rhs) const { if ( _triggeringOriginID != rhs._triggeringOriginID ) return false; if ( _nodalPlanes != rhs._nodalPlanes ) return false; if ( _principalAxes != rhs._principalAxes ) return false; if ( _azimuthalGap != rhs._azimuthalGap ) return false; if ( _stationPolarityCount != rhs._stationPolarityCount ) return false; if ( _misfit != rhs._misfit ) return false; if ( _stationDistributionRatio != rhs._stationDistributionRatio ) return false; if ( _methodID != rhs._methodID ) return false; if ( _evaluationMode != rhs._evaluationMode ) return false; if ( _evaluationStatus != rhs._evaluationStatus ) return false; if ( _creationInfo != rhs._creationInfo ) return false; return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::operator!=(const FocalMechanism& rhs) const { return !operator==(rhs); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::equal(const FocalMechanism& other) const { return *this == other; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setTriggeringOriginID(const std::string& triggeringOriginID) { _triggeringOriginID = triggeringOriginID; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> const std::string& FocalMechanism::triggeringOriginID() const { return _triggeringOriginID; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setNodalPlanes(const OPT(NodalPlanes)& nodalPlanes) { _nodalPlanes = nodalPlanes; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> NodalPlanes& FocalMechanism::nodalPlanes() { if ( _nodalPlanes ) return *_nodalPlanes; throw Seiscomp::Core::ValueException("FocalMechanism.nodalPlanes is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> const NodalPlanes& FocalMechanism::nodalPlanes() const { if ( _nodalPlanes ) return *_nodalPlanes; throw Seiscomp::Core::ValueException("FocalMechanism.nodalPlanes is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setPrincipalAxes(const OPT(PrincipalAxes)& principalAxes) { _principalAxes = principalAxes; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PrincipalAxes& FocalMechanism::principalAxes() { if ( _principalAxes ) return *_principalAxes; throw Seiscomp::Core::ValueException("FocalMechanism.principalAxes is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> const PrincipalAxes& FocalMechanism::principalAxes() const { if ( _principalAxes ) return *_principalAxes; throw Seiscomp::Core::ValueException("FocalMechanism.principalAxes is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setAzimuthalGap(const OPT(double)& azimuthalGap) { _azimuthalGap = azimuthalGap; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> double FocalMechanism::azimuthalGap() const { if ( _azimuthalGap ) return *_azimuthalGap; throw Seiscomp::Core::ValueException("FocalMechanism.azimuthalGap is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setStationPolarityCount(const OPT(int)& stationPolarityCount) { _stationPolarityCount = stationPolarityCount; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> int FocalMechanism::stationPolarityCount() const { if ( _stationPolarityCount ) return *_stationPolarityCount; throw Seiscomp::Core::ValueException("FocalMechanism.stationPolarityCount is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setMisfit(const OPT(double)& misfit) { _misfit = misfit; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> double FocalMechanism::misfit() const { if ( _misfit ) return *_misfit; throw Seiscomp::Core::ValueException("FocalMechanism.misfit is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setStationDistributionRatio(const OPT(double)& stationDistributionRatio) { _stationDistributionRatio = stationDistributionRatio; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> double FocalMechanism::stationDistributionRatio() const { if ( _stationDistributionRatio ) return *_stationDistributionRatio; throw Seiscomp::Core::ValueException("FocalMechanism.stationDistributionRatio is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setMethodID(const std::string& methodID) { _methodID = methodID; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> const std::string& FocalMechanism::methodID() const { return _methodID; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setEvaluationMode(const OPT(EvaluationMode)& evaluationMode) { _evaluationMode = evaluationMode; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> EvaluationMode FocalMechanism::evaluationMode() const { if ( _evaluationMode ) return *_evaluationMode; throw Seiscomp::Core::ValueException("FocalMechanism.evaluationMode is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setEvaluationStatus(const OPT(EvaluationStatus)& evaluationStatus) { _evaluationStatus = evaluationStatus; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> EvaluationStatus FocalMechanism::evaluationStatus() const { if ( _evaluationStatus ) return *_evaluationStatus; throw Seiscomp::Core::ValueException("FocalMechanism.evaluationStatus is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::setCreationInfo(const OPT(CreationInfo)& creationInfo) { _creationInfo = creationInfo; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> CreationInfo& FocalMechanism::creationInfo() { if ( _creationInfo ) return *_creationInfo; throw Seiscomp::Core::ValueException("FocalMechanism.creationInfo is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> const CreationInfo& FocalMechanism::creationInfo() const { if ( _creationInfo ) return *_creationInfo; throw Seiscomp::Core::ValueException("FocalMechanism.creationInfo is not set"); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> EventParameters* FocalMechanism::eventParameters() const { return static_cast<EventParameters*>(parent()); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> FocalMechanism& FocalMechanism::operator=(const FocalMechanism& other) { PublicObject::operator=(other); _triggeringOriginID = other._triggeringOriginID; _nodalPlanes = other._nodalPlanes; _principalAxes = other._principalAxes; _azimuthalGap = other._azimuthalGap; _stationPolarityCount = other._stationPolarityCount; _misfit = other._misfit; _stationDistributionRatio = other._stationDistributionRatio; _methodID = other._methodID; _evaluationMode = other._evaluationMode; _evaluationStatus = other._evaluationStatus; _creationInfo = other._creationInfo; return *this; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::assign(Object* other) { FocalMechanism* otherFocalMechanism = FocalMechanism::Cast(other); if ( other == NULL ) return false; *this = *otherFocalMechanism; return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::attachTo(PublicObject* parent) { if ( parent == NULL ) return false; // check all possible parents EventParameters* eventParameters = EventParameters::Cast(parent); if ( eventParameters != NULL ) return eventParameters->add(this); SEISCOMP_ERROR("FocalMechanism::attachTo(%s) -> wrong class type", parent->className()); return false; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::detachFrom(PublicObject* object) { if ( object == NULL ) return false; // check all possible parents EventParameters* eventParameters = EventParameters::Cast(object); if ( eventParameters != NULL ) { // If the object has been added already to the parent locally // just remove it by pointer if ( object == parent() ) return eventParameters->remove(this); // The object has not been added locally so it must be looked up else { FocalMechanism* child = eventParameters->findFocalMechanism(publicID()); if ( child != NULL ) return eventParameters->remove(child); else { SEISCOMP_DEBUG("FocalMechanism::detachFrom(EventParameters): focalMechanism has not been found"); return false; } } } SEISCOMP_ERROR("FocalMechanism::detachFrom(%s) -> wrong class type", object->className()); return false; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::detach() { if ( parent() == NULL ) return false; return detachFrom(parent()); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Object* FocalMechanism::clone() const { FocalMechanism* clonee = new FocalMechanism(); *clonee = *this; return clonee; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::updateChild(Object* child) { Comment* commentChild = Comment::Cast(child); if ( commentChild != NULL ) { Comment* commentElement = comment(commentChild->index()); if ( commentElement != NULL ) { *commentElement = *commentChild; return true; } return false; } MomentTensor* momentTensorChild = MomentTensor::Cast(child); if ( momentTensorChild != NULL ) { MomentTensor* momentTensorElement = MomentTensor::Cast(PublicObject::Find(momentTensorChild->publicID())); if ( momentTensorElement && momentTensorElement->parent() == this ) { *momentTensorElement = *momentTensorChild; return true; } return false; } return false; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::accept(Visitor* visitor) { if ( visitor->traversal() == Visitor::TM_TOPDOWN ) if ( !visitor->visit(this) ) return; for ( std::vector<CommentPtr>::iterator it = _comments.begin(); it != _comments.end(); ++it ) (*it)->accept(visitor); for ( std::vector<MomentTensorPtr>::iterator it = _momentTensors.begin(); it != _momentTensors.end(); ++it ) (*it)->accept(visitor); if ( visitor->traversal() == Visitor::TM_BOTTOMUP ) visitor->visit(this); else visitor->finished(); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> size_t FocalMechanism::commentCount() const { return _comments.size(); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Comment* FocalMechanism::comment(size_t i) const { return _comments[i].get(); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Comment* FocalMechanism::comment(const CommentIndex& i) const { for ( std::vector<CommentPtr>::const_iterator it = _comments.begin(); it != _comments.end(); ++it ) if ( i == (*it)->index() ) return (*it).get(); return NULL; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::add(Comment* comment) { if ( comment == NULL ) return false; // Element has already a parent if ( comment->parent() != NULL ) { SEISCOMP_ERROR("FocalMechanism::add(Comment*) -> element has already a parent"); return false; } // Duplicate index check for ( std::vector<CommentPtr>::iterator it = _comments.begin(); it != _comments.end(); ++it ) { if ( (*it)->index() == comment->index() ) { SEISCOMP_ERROR("FocalMechanism::add(Comment*) -> an element with the same index has been added already"); return false; } } // Add the element _comments.push_back(comment); comment->setParent(this); // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_ADD); comment->accept(&nc); } // Notify registered observers childAdded(comment); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::remove(Comment* comment) { if ( comment == NULL ) return false; if ( comment->parent() != this ) { SEISCOMP_ERROR("FocalMechanism::remove(Comment*) -> element has another parent"); return false; } std::vector<CommentPtr>::iterator it; it = std::find(_comments.begin(), _comments.end(), comment); // Element has not been found if ( it == _comments.end() ) { SEISCOMP_ERROR("FocalMechanism::remove(Comment*) -> child object has not been found although the parent pointer matches???"); return false; } // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_REMOVE); (*it)->accept(&nc); } (*it)->setParent(NULL); childRemoved((*it).get()); _comments.erase(it); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::removeComment(size_t i) { // index out of bounds if ( i >= _comments.size() ) return false; // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_REMOVE); _comments[i]->accept(&nc); } _comments[i]->setParent(NULL); childRemoved(_comments[i].get()); _comments.erase(_comments.begin() + i); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::removeComment(const CommentIndex& i) { Comment* object = comment(i); if ( object == NULL ) return false; return remove(object); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> size_t FocalMechanism::momentTensorCount() const { return _momentTensors.size(); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MomentTensor* FocalMechanism::momentTensor(size_t i) const { return _momentTensors[i].get(); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MomentTensor* FocalMechanism::findMomentTensor(const std::string& publicID) const { for ( std::vector<MomentTensorPtr>::const_iterator it = _momentTensors.begin(); it != _momentTensors.end(); ++it ) if ( (*it)->publicID() == publicID ) return (*it).get(); return NULL; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::add(MomentTensor* momentTensor) { if ( momentTensor == NULL ) return false; // Element has already a parent if ( momentTensor->parent() != NULL ) { SEISCOMP_ERROR("FocalMechanism::add(MomentTensor*) -> element has already a parent"); return false; } if ( PublicObject::IsRegistrationEnabled() ) { MomentTensor* momentTensorCached = MomentTensor::Find(momentTensor->publicID()); if ( momentTensorCached ) { if ( momentTensorCached->parent() ) { if ( momentTensorCached->parent() == this ) SEISCOMP_ERROR("FocalMechanism::add(MomentTensor*) -> element with same publicID has been added already"); else SEISCOMP_ERROR("FocalMechanism::add(MomentTensor*) -> element with same publicID has been added already to another object"); return false; } else momentTensor = momentTensorCached; } } // Add the element _momentTensors.push_back(momentTensor); momentTensor->setParent(this); // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_ADD); momentTensor->accept(&nc); } // Notify registered observers childAdded(momentTensor); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::remove(MomentTensor* momentTensor) { if ( momentTensor == NULL ) return false; if ( momentTensor->parent() != this ) { SEISCOMP_ERROR("FocalMechanism::remove(MomentTensor*) -> element has another parent"); return false; } std::vector<MomentTensorPtr>::iterator it; it = std::find(_momentTensors.begin(), _momentTensors.end(), momentTensor); // Element has not been found if ( it == _momentTensors.end() ) { SEISCOMP_ERROR("FocalMechanism::remove(MomentTensor*) -> child object has not been found although the parent pointer matches???"); return false; } // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_REMOVE); (*it)->accept(&nc); } (*it)->setParent(NULL); childRemoved((*it).get()); _momentTensors.erase(it); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool FocalMechanism::removeMomentTensor(size_t i) { // index out of bounds if ( i >= _momentTensors.size() ) return false; // Create the notifiers if ( Notifier::IsEnabled() ) { NotifierCreator nc(OP_REMOVE); _momentTensors[i]->accept(&nc); } _momentTensors[i]->setParent(NULL); childRemoved(_momentTensors[i].get()); _momentTensors.erase(_momentTensors.begin() + i); return true; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> void FocalMechanism::serialize(Archive& ar) { // Do not read/write if the archive's version is higher than // currently supported if ( ar.isHigherVersion<0,9>() ) { SEISCOMP_ERROR("Archive version %d.%d too high: FocalMechanism skipped", ar.versionMajor(), ar.versionMinor()); ar.setValidity(false); return; } PublicObject::serialize(ar); if ( !ar.success() ) return; ar & NAMED_OBJECT_HINT("triggeringOriginID", _triggeringOriginID, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("nodalPlanes", _nodalPlanes, Archive::STATIC_TYPE | Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("principalAxes", _principalAxes, Archive::STATIC_TYPE | Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("azimuthalGap", _azimuthalGap, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("stationPolarityCount", _stationPolarityCount, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("misfit", _misfit, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("stationDistributionRatio", _stationDistributionRatio, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("methodID", _methodID, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("evaluationMode", _evaluationMode, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("evaluationStatus", _evaluationStatus, Archive::XML_ELEMENT); ar & NAMED_OBJECT_HINT("creationInfo", _creationInfo, Archive::STATIC_TYPE | Archive::XML_ELEMENT); if ( ar.hint() & Archive::IGNORE_CHILDS ) return; ar & NAMED_OBJECT_HINT("comment", Seiscomp::Core::Generic::containerMember(_comments, Seiscomp::Core::Generic::bindMemberFunction<Comment>(static_cast<bool (FocalMechanism::*)(Comment*)>(&FocalMechanism::add), this)), Archive::STATIC_TYPE); ar & NAMED_OBJECT_HINT("momentTensor", Seiscomp::Core::Generic::containerMember(_momentTensors, Seiscomp::Core::Generic::bindMemberFunction<MomentTensor>(static_cast<bool (FocalMechanism::*)(MomentTensor*)>(&FocalMechanism::add), this)), Archive::STATIC_TYPE); } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> } }
33.373494
325
0.501871
[ "object", "vector" ]
11bdc58d371b56ea688dc087c08e6474a3475832
17,976
cc
C++
third_party/blink/renderer/core/layout/layout_embedded_content.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
third_party/blink/renderer/core/layout/layout_embedded_content.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
third_party/blink/renderer/core/layout/layout_embedded_content.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2021-03-07T14:20:02.000Z
2021-03-07T14:20:02.000Z
/* * Copyright (C) 1999 Lars Knoll (knoll@kde.org) * (C) 2000 Simon Hausmann <hausmann@kde.org> * (C) 2000 Stefan Schimanski (1Stein@gmx.de) * Copyright (C) 2004, 2005, 2006, 2009 Apple Inc. All rights reserved. * Copyright (C) Research In Motion Limited 2011. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #include "third_party/blink/renderer/core/layout/layout_embedded_content.h" #include "third_party/blink/public/common/features.h" #include "third_party/blink/renderer/core/accessibility/ax_object_cache.h" #include "third_party/blink/renderer/core/exported/web_plugin_container_impl.h" #include "third_party/blink/renderer/core/frame/embedded_content_view.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/frame/local_frame_view.h" #include "third_party/blink/renderer/core/frame/remote_frame.h" #include "third_party/blink/renderer/core/frame/remote_frame_view.h" #include "third_party/blink/renderer/core/html/html_frame_element_base.h" #include "third_party/blink/renderer/core/html/html_plugin_element.h" #include "third_party/blink/renderer/core/layout/hit_test_result.h" #include "third_party/blink/renderer/core/layout/layout_analyzer.h" #include "third_party/blink/renderer/core/layout/layout_view.h" #include "third_party/blink/renderer/core/paint/compositing/paint_layer_compositor.h" #include "third_party/blink/renderer/core/paint/embedded_content_painter.h" #include "third_party/blink/renderer/core/paint/paint_layer.h" namespace blink { LayoutEmbeddedContent::LayoutEmbeddedContent(HTMLFrameOwnerElement* element) : LayoutReplaced(element), // Reference counting is used to prevent the part from being destroyed // while inside the EmbeddedContentView code, which might not be able to // handle that. ref_count_(1) { DCHECK(element); SetInline(false); } void LayoutEmbeddedContent::Release() { NOT_DESTROYED(); if (--ref_count_ <= 0) delete this; } void LayoutEmbeddedContent::WillBeDestroyed() { NOT_DESTROYED(); if (AXObjectCache* cache = GetDocument().ExistingAXObjectCache()) { cache->ChildrenChanged(Parent()); cache->Remove(this); } if (auto* frame_owner = GetFrameOwnerElement()) frame_owner->SetEmbeddedContentView(nullptr); LayoutReplaced::WillBeDestroyed(); } void LayoutEmbeddedContent::DeleteThis() { NOT_DESTROYED(); // We call clearNode here because LayoutEmbeddedContent is ref counted. This // call to destroy may not actually destroy the layout object. We can keep it // around because of references from the LocalFrameView class. (The actual // destruction of the class happens in PostDestroy() which is called from // Release()). // // But, we've told the system we've destroyed the layoutObject, which happens // when the DOM node is destroyed. So there is a good chance the DOM node this // object points too is invalid, so we have to clear the node so we make sure // we don't access it in the future. ClearNode(); Release(); } LayoutEmbeddedContent::~LayoutEmbeddedContent() { DCHECK_LE(ref_count_, 0); } FrameView* LayoutEmbeddedContent::ChildFrameView() const { NOT_DESTROYED(); return DynamicTo<FrameView>(GetEmbeddedContentView()); } LayoutView* LayoutEmbeddedContent::ChildLayoutView() const { NOT_DESTROYED(); if (HTMLFrameOwnerElement* owner_element = GetFrameOwnerElement()) { if (Document* content_document = owner_element->contentDocument()) return content_document->GetLayoutView(); } return nullptr; } WebPluginContainerImpl* LayoutEmbeddedContent::Plugin() const { NOT_DESTROYED(); EmbeddedContentView* embedded_content_view = GetEmbeddedContentView(); if (embedded_content_view && embedded_content_view->IsPluginView()) return To<WebPluginContainerImpl>(embedded_content_view); return nullptr; } EmbeddedContentView* LayoutEmbeddedContent::GetEmbeddedContentView() const { NOT_DESTROYED(); if (auto* frame_owner = GetFrameOwnerElement()) return frame_owner->OwnedEmbeddedContentView(); return nullptr; } PaintLayerType LayoutEmbeddedContent::LayerTypeRequired() const { NOT_DESTROYED(); if (AdditionalCompositingReasons()) return kNormalPaintLayer; PaintLayerType type = LayoutReplaced::LayerTypeRequired(); if (type != kNoPaintLayer) return type; // We can't check layout_view->Layer()->GetCompositingReasons() here because // we're only in style update, so haven't run compositing update yet. if (!RuntimeEnabledFeatures::CompositeAfterPaintEnabled()) { if (LayoutView* child_layout_view = ChildLayoutView()) { if (child_layout_view->AdditionalCompositingReasons()) return kNormalPaintLayer; } } return kForcedPaintLayer; } bool LayoutEmbeddedContent::ContentDocumentContainsGraphicsLayer() const { NOT_DESTROYED(); // This method must use the same logic as GraphicsLayerTreeBuilder: if // an iframe is throttled, we look for the existence of a root graphics layer, // even if the compositing state information is stale. if (PaintLayerCompositor* inner_compositor = PaintLayerCompositor::FrameContentsCompositor(*this)) { DisableCompositingQueryAsserts compositing_disabler; return inner_compositor->RootGraphicsLayer(); } return false; } bool LayoutEmbeddedContent::NodeAtPointOverEmbeddedContentView( HitTestResult& result, const HitTestLocation& hit_test_location, const PhysicalOffset& accumulated_offset, HitTestAction action) { NOT_DESTROYED(); bool had_result = result.InnerNode(); bool inside = LayoutReplaced::NodeAtPoint(result, hit_test_location, accumulated_offset, action); // Check to see if we are really over the EmbeddedContentView itself (and not // just in the border/padding area). if ((inside || hit_test_location.IsRectBasedTest()) && !had_result && result.InnerNode() == GetNode()) { result.SetIsOverEmbeddedContentView( PhysicalContentBoxRect().Contains(result.LocalPoint())); } return inside; } bool LayoutEmbeddedContent::NodeAtPoint( HitTestResult& result, const HitTestLocation& hit_test_location, const PhysicalOffset& accumulated_offset, HitTestAction action) { NOT_DESTROYED(); auto* local_frame_view = DynamicTo<LocalFrameView>(ChildFrameView()); bool skip_contents = (result.GetHitTestRequest().GetStopNode() == this || !result.GetHitTestRequest().AllowsChildFrameContent()); if (!local_frame_view || skip_contents) { return NodeAtPointOverEmbeddedContentView(result, hit_test_location, accumulated_offset, action); } // A hit test can never hit an off-screen element; only off-screen iframes are // throttled; therefore, hit tests can skip descending into throttled iframes. // We also check the document lifecycle state because the frame may have been // throttled at the time lifecycle updates happened, in which case it will not // be up-to-date and we can't hit test it. if (local_frame_view->ShouldThrottleRendering() || !local_frame_view->GetFrame().GetDocument() || local_frame_view->GetFrame().GetDocument()->Lifecycle().GetState() < DocumentLifecycle::kPrePaintClean) { return NodeAtPointOverEmbeddedContentView(result, hit_test_location, accumulated_offset, action); } DCHECK_GE(GetDocument().Lifecycle().GetState(), DocumentLifecycle::kPrePaintClean); if (action == kHitTestForeground) { auto* child_layout_view = local_frame_view->GetLayoutView(); if (VisibleToHitTestRequest(result.GetHitTestRequest()) && child_layout_view) { PhysicalOffset content_offset(BorderLeft() + PaddingLeft(), BorderTop() + PaddingTop()); HitTestLocation new_hit_test_location( hit_test_location, -accumulated_offset - content_offset); HitTestRequest new_hit_test_request( result.GetHitTestRequest().GetType() | HitTestRequest::kChildFrameHitTest, result.GetHitTestRequest().GetStopNode()); HitTestResult child_frame_result(new_hit_test_request, new_hit_test_location); child_frame_result.SetInertNode(result.InertNode()); // The frame's layout and style must be up to date if we reach here. bool is_inside_child_frame = child_layout_view->HitTestNoLifecycleUpdate( new_hit_test_location, child_frame_result); if (result.GetHitTestRequest().ListBased()) { result.Append(child_frame_result); } else if (is_inside_child_frame) { // Force the result not to be cacheable because the parent frame should // not cache this result; as it won't be notified of changes in the // child. child_frame_result.SetCacheable(false); result = child_frame_result; } // Don't trust |isInsideChildFrame|. For rect-based hit-test, returns // true only when the hit test rect is totally within the iframe, // i.e. nodeAtPointOverEmbeddedContentView() also returns true. // Use a temporary HitTestResult because we don't want to collect the // iframe element itself if the hit-test rect is totally within the // iframe. if (is_inside_child_frame) { if (!hit_test_location.IsRectBasedTest()) return true; HitTestResult point_over_embedded_content_view_result = result; bool point_over_embedded_content_view = NodeAtPointOverEmbeddedContentView( point_over_embedded_content_view_result, hit_test_location, accumulated_offset, action); if (point_over_embedded_content_view) return true; result = point_over_embedded_content_view_result; return false; } } } return NodeAtPointOverEmbeddedContentView(result, hit_test_location, accumulated_offset, action); } CompositingReasons LayoutEmbeddedContent::AdditionalCompositingReasons() const { NOT_DESTROYED(); WebPluginContainerImpl* plugin_view = Plugin(); if (plugin_view && plugin_view->CcLayer()) return CompositingReason::kPlugin; if (auto* element = GetFrameOwnerElement()) { if (Frame* content_frame = element->ContentFrame()) { if (content_frame->IsRemoteFrame()) return CompositingReason::kIFrame; } } return CompositingReason::kNone; } void LayoutEmbeddedContent::StyleDidChange(StyleDifference diff, const ComputedStyle* old_style) { NOT_DESTROYED(); LayoutReplaced::StyleDidChange(diff, old_style); if (EmbeddedContentView* embedded_content_view = GetEmbeddedContentView()) { if (StyleRef().Visibility() != EVisibility::kVisible) { embedded_content_view->Hide(); } else { embedded_content_view->Show(); } } if (old_style && StyleRef().VisibleToHitTesting() == old_style->VisibleToHitTesting()) { return; } auto* frame_owner = GetFrameOwnerElement(); if (!frame_owner) return; auto* frame = frame_owner->ContentFrame(); if (!frame) return; frame->UpdateVisibleToHitTesting(); } void LayoutEmbeddedContent::UpdateLayout() { NOT_DESTROYED(); DCHECK(NeedsLayout()); LayoutAnalyzer::Scope analyzer(*this); UpdateAfterLayout(); ClearNeedsLayout(); } void LayoutEmbeddedContent::PaintReplaced( const PaintInfo& paint_info, const PhysicalOffset& paint_offset) const { NOT_DESTROYED(); if (ChildPaintBlockedByDisplayLock()) return; EmbeddedContentPainter(*this).PaintReplaced(paint_info, paint_offset); } void LayoutEmbeddedContent::InvalidatePaint( const PaintInvalidatorContext& context) const { NOT_DESTROYED(); LayoutReplaced::InvalidatePaint(context); if (auto* plugin = Plugin()) plugin->InvalidatePaint(); } CursorDirective LayoutEmbeddedContent::GetCursor(const PhysicalOffset& point, ui::Cursor& cursor) const { NOT_DESTROYED(); if (Plugin()) { // A plugin is responsible for setting the cursor when the pointer is over // it. return kDoNotSetCursor; } return LayoutReplaced::GetCursor(point, cursor); } PhysicalRect LayoutEmbeddedContent::ReplacedContentRect() const { NOT_DESTROYED(); PhysicalRect content_rect = PhysicalContentBoxRect(); // IFrames set as the root scroller should get their size from their parent. if (ChildFrameView() && View() && IsEffectiveRootScroller()) { content_rect.offset = PhysicalOffset(); content_rect.size = View()->ViewRect().size; } // We don't propagate sub-pixel into sub-frame layout, in other words, the // rect is snapped at the document boundary, and sub-pixel movement could // cause the sub-frame to layout due to the 1px snap difference. In order to // avoid that, the size of sub-frame is rounded in advance. return PreSnappedRectForPersistentSizing(content_rect); } void LayoutEmbeddedContent::UpdateOnEmbeddedContentViewChange() { NOT_DESTROYED(); if (!Style()) return; if (EmbeddedContentView* embedded_content_view = GetEmbeddedContentView()) { if (!NeedsLayout()) UpdateGeometry(*embedded_content_view); if (StyleRef().Visibility() != EVisibility::kVisible) embedded_content_view->Hide(); else embedded_content_view->Show(); } // One of the reasons of the following is that the layout tree in the new // embedded content view may have already had some paint property and paint // invalidation flags set, and we need to propagate the flags into the host // view. Adding, changing and removing are also significant changes to the // tree so setting the flags ensures the required updates. SetNeedsPaintPropertyUpdate(); SetShouldDoFullPaintInvalidation(); // Showing/hiding the embedded content view and changing the view between null // and non-null affect compositing (see: PaintLayerCompositor::CanBeComposited // and RootShouldAlwaysComposite). if (HasLayer()) Layer()->SetNeedsCompositingInputsUpdate(); } void LayoutEmbeddedContent::UpdateGeometry( EmbeddedContentView& embedded_content_view) { NOT_DESTROYED(); // TODO(wangxianzhu): We reset subpixel accumulation at some boundaries, so // the following code is incorrect when some ancestors are such boundaries. // What about multicol? Need a LayoutBox function to query sub-pixel // accumulation. PhysicalRect replaced_rect = ReplacedContentRect(); TransformState transform_state(TransformState::kApplyTransformDirection, FloatPoint(), FloatQuad(FloatRect(replaced_rect))); MapLocalToAncestor(nullptr, transform_state, 0); transform_state.Flatten(); PhysicalOffset absolute_location = PhysicalOffset::FromFloatPointRound(transform_state.LastPlanarPoint()); PhysicalRect absolute_replaced_rect = replaced_rect; absolute_replaced_rect.Move(absolute_location); FloatRect absolute_bounding_box = transform_state.LastPlanarQuad().BoundingBox(); IntRect frame_rect(IntPoint(), PixelSnappedIntRect(absolute_replaced_rect).Size()); // Normally the location of the frame rect is ignored by the painter, but // currently it is still used by a family of coordinate conversion function in // LocalFrameView. This is incorrect because coordinate conversion // needs to take transform and into account. A few callers still use the // family of conversion function, including but not exhaustive: // LocalFrameView::updateViewportIntersectionIfNeeded() // RemoteFrameView::frameRectsChanged(). // WebPluginContainerImpl::reportGeometry() // TODO(trchen): Remove this hack once we fixed all callers. frame_rect.SetLocation(RoundedIntPoint(absolute_bounding_box.Location())); // As an optimization, we don't include the root layer's scroll offset in the // frame rect. As a result, we don't need to recalculate the frame rect every // time the root layer scrolls; however, each implementation of // EmbeddedContentView::FrameRect() must add the root layer's scroll offset // into its position. // TODO(szager): Refactor this functionality into EmbeddedContentView, rather // than reimplementing in each concrete subclass. LayoutView* layout_view = View(); if (layout_view && layout_view->IsScrollContainer()) { // Floored because the PixelSnappedScrollOffset returns a ScrollOffset // which is a float-type but frame_rect in a content view is an IntRect. We // may want to reevaluate the use of pixel snapping that since scroll // offsets/layout can be fractional. frame_rect.Move( FlooredIntSize(layout_view->PixelSnappedScrolledContentOffset())); } embedded_content_view.SetFrameRect(frame_rect); } bool LayoutEmbeddedContent::IsThrottledFrameView() const { NOT_DESTROYED(); if (auto* local_frame_view = DynamicTo<LocalFrameView>(ChildFrameView())) return local_frame_view->ShouldThrottleRendering(); return false; } } // namespace blink
40.035635
85
0.728583
[ "object", "transform" ]
11be4ca82dd99b0e244b600e027371f4ee75e4c1
867
inl
C++
Radiolocation/Include/WhiteGaussianNoise.inl
bgin/MissileSimulation
90adcbf1c049daafb939f3fe9f9dfe792f26d5df
[ "MIT" ]
23
2016-08-28T23:20:12.000Z
2021-12-15T14:43:58.000Z
Radiolocation/Include/WhiteGaussianNoise.inl
bgin/MissileSimulation
90adcbf1c049daafb939f3fe9f9dfe792f26d5df
[ "MIT" ]
1
2018-06-02T21:29:51.000Z
2018-06-05T05:59:31.000Z
Radiolocation/Include/WhiteGaussianNoise.inl
bgin/MissileSimulation
90adcbf1c049daafb939f3fe9f9dfe792f26d5df
[ "MIT" ]
1
2019-07-04T22:38:22.000Z
2019-07-04T22:38:22.000Z
/* Copyright (c) 2015, Bernard Gingold. License: MIT License (http://www.opensource.org/licenses/mit-license.php) White Gaussian Noise class- inline functions implementation. @aulthor: Bernard Gingold @version: 1.0 26/10/2015 */ __forceinline std::size_t radiolocation::WGaussianNoise::samples() const { return this->m_samples; } __forceinline std::vector<std::pair<double, double>> radiolocation::WGaussianNoise::WGNoise() const { return this->m_oWGNoise; } __forceinline double radiolocation::WGaussianNoise::mean() const { return this->m_mean; } __forceinline double radiolocation::WGaussianNoise::variance() const { return this->m_variance; } __forceinline std::function<double(double)> radiolocation::WGaussianNoise::WaveformGenerator() const { return this->m_oWaveformGenerator; }
27.09375
113
0.716263
[ "vector" ]
11c3179825c27e542f31c5087b4ba42797bb435b
21,117
hpp
C++
embedded/yotta_modules/mbed-drivers/mbed-drivers/v2/I2C.hpp
bobwilmes/MicroBitSecurity
7a725d24fe8b7c809ca60069289e66cb1243a5d3
[ "Apache-2.0" ]
49
2015-09-04T13:33:25.000Z
2020-09-19T01:53:37.000Z
mbed-drivers/v2/I2C.hpp
u-blox/mbed-drivers
36156434ce3cbbe87b91bcf55f3614caed119ffa
[ "Apache-2.0" ]
121
2015-09-04T12:42:41.000Z
2019-07-09T09:18:38.000Z
mbed-drivers/v2/I2C.hpp
u-blox/mbed-drivers
36156434ce3cbbe87b91bcf55f3614caed119ffa
[ "Apache-2.0" ]
59
2015-09-04T12:20:43.000Z
2021-06-02T10:54:51.000Z
/* mbed Microcontroller Library * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MBED_DRIVERS_V1_I2C_HPP #define MBED_DRIVERS_V1_I2C_HPP #include "mbed-drivers/platform.h" #if DEVICE_I2C && DEVICE_I2C_ASYNCH #include "mbed-hal/i2c_api.h" #include "mbed-hal/dma_api.h" #include "mbed-drivers/CThunk.h" #include "core-util/FunctionPointer.h" #include "core-util/PoolAllocator.h" // Forward declarations namespace mbed { namespace drivers { namespace v2 { enum class I2CError; } // namespace mbed } // namespace drivers } // namespace v2 #include "I2CDetail.hpp" #include "EphemeralBuffer.hpp" /// There are 4 possible I2C Events, so limit the I2C transaction handlers to 4 const size_t I2C_TRANSACTION_NHANDLERS = 4; /** * \file * \brief A generic interface for I2C peripherals * * The I2C class interfaces with an I2C Resource manager in order to initiate Transactions and receive events. The * I2CTransaction class encapsulates all I2C transaction parameters. The I2CResourceManager class is a generic interface * for implementing I2C resource managers. This will allow for additional classes of I2C device, for example, a * bitbanged I2C master. * * # I2C * I2C encapsulates an I2C master. The physical I2C master to use is selected via the pins provided to the constructor. * The ```frequency()``` API sets the default frequency for transactions issued from the I2C object. This is used for * each transaction issued by I2C unless overridden when creating the transaction. Transactions are initiated by calling * ```transfer_to()``` or ```transfer_to_irqsafe()```. Both of these APIs create an instance of the ```TransferAdder``` * helper class. * * # TransferAdder * The TransferAdder class allows convenient construction of complex transfers. * * The ```frequency()``` member overrides the default frequency set by the issuing I2C object. * * The ```on()``` member allows setting up to 4 event handlers, each with a corresponding event mask. * * The ```tx()``` members add a buffer to send to the transfer. * * The ```rx()``` members add a buffer to receive into to the transfer. There is a special case of ```rx()```, which * doesn't use a normal buffer. When ```rx(size_t)``` is called with a size of less than 8 bytes, the underlying * EphermeralBuffer is placed in ephemeral mode. This means that no preallocated receive buffer is needed, instead the * data is packed directly into the EphemeralBuffer. This has a side-effect that the data will be freed once the last * event handler has exited, so if the data must be retained, it should be copied out. * * The ```apply()``` method validates the transfer and adds it to the transaction queue of the I2CResourceManager. It * returns the result of validation. * * # I2C Resource Managers * I2C Resource managers are instantiated statically and initialized during global init. There is one Resource Manager * per logical I2C master. Logical I2C masters could consist of: * * * Onchip I2C masters * * I2C Bridges (SPI->I2C bridge, I2C->I2C bridge, etc.) * * Bit banged I2C * * Bit banged I2C over SPI GPIO expander * * More... * * Currently only onchip I2C masters are supported. * * # I2C transactions * An I2CTransaction contains a list of event handlers and their event masks, an I2C address, an operating frequency, * and zero or more I2CSegments. Zero-segment Transactions are explicitly supported since they are useful in connected * device discovery (pings). * * # I2C Segments * An I2CSegment is a wrapper around an EphemeralBuffer. It provides an I2C transfer direction (read or write) and an * optional callback to execute in IRQ context. I2CSegments also provide a chaining pointer so that they can perform * sequential or scatter/gather operations. * * # Constructing I2C transactions * * ```C++ * void doneCB(bool dir, I2CTransaction *t, uint32_t event) { * // Do something * } * I2C i2c0(sda, scl); * void app_start (int, char **) { * uint8_t cmd[2] = {0xaa, 0x55}; * i2c0.transfer_to(addr).tx(cmd,2).rx(4).on(I2C_EVENT_ALL, doneCB); * } * ``` */ namespace mbed { namespace drivers { namespace v2 { // Forward declaration of I2C class I2C; /** * @brief List of error codes that can be produced by the I2C API */ enum class I2CError { None, InvalidMaster, PinMismatch, Busy, NullTransaction, NullSegment, MissingPoolAllocator, InvalidAddress, BufferSize, ScatterGatherNotSupported, DeinitInProgress }; /** * A Transaction container for I2C */ class I2CTransaction { public: /** I2C transfer callback * @param The transaction that was running when the callback was triggered * @param the event that triggered the calback */ using event_callback_t = detail::I2C_event_callback_t; /** * Construct an I2C transaction and set the destination address at the same time * * @param[in] address set the I2C destination address */ I2CTransaction(uint16_t address, uint32_t hz, bool irqsafe, I2C *issuer); ~I2CTransaction(); /** * Get a new segment to be used by the transaction. * This API calls the associated I2C object's associated allocator. * @return a new I2CSegment */ detail::I2CSegment * new_segment(); /** * Install a new event handler with the corresponding event mask * Once all available event slots are consumed, further calls to add_event are ignored. * * @param[in] event The event mask on which to trigger cb * @param[in] cb The event to trigger when one or more bits in the event mask is matched * @retval false There was no space for a new event handler * @retval true The new event handler was installed */ bool add_event(uint32_t event, const event_callback_t & cb); /** * The resource manager calls this API. * Calls the event handlers and */ void process_event(uint32_t event); /** * Set the next transaction in the queue * This is an atomic operation that will append to the queue. */ void append(I2CTransaction *t); /** * Forwards the irq-context callback to the segment * Also adds a pointer to this transaction to the callback * @param[in] the event that triggered this callback */ void call_irq_cb(uint32_t event); /** * If the current segment is valid advance the segment pointer * * @retval true if the current segment is valid after this operation * @retval false if the current segment is not valid after this operation */ bool advance_segment(); /** * Reset the current segment to the root segment */ void reset_current() { _current = _root; } /** * Accessor for the next pointer * @return the next transaction */ I2CTransaction * get_next() { return _next; } /** * Accessor for the Transactions's issuer * @return the I2C object that issued this transaction */ I2C * get_issuer() { return _issuer; } /** * Accessor for the current segment pointer * @return the current segment poitner */ detail::I2CSegment * get_current() { return _current; } /** * Accessor for the irqsafe flag * @retval true if the transaction was instantiated with irqsafe set and access to irq-safe allocators * @retval false if the transaction was not instantiated with irqsafe set and access to irq-safe allocators */ bool is_irqsafe() const { return _irqsafe; } /** * Accessor for the transaction frequency * @return the frequency of the transaction in Hz */ uint32_t frequency() const { return _hz; } /** * Accessor for the transaction frequency * @param[in] hz the transaction frequency in Hz */ void frequency(uint32_t hz) { _hz = hz; } /** * Accessor for the transaction target address * @return the transaction address */ uint16_t address() const { return _address; } protected: /** * The next transaction in the queue * This field is used for chaining transactions together into a queue. * * This field is not volatile because it is only accessed from within a * critical section. */ I2CTransaction * _next; /// The target I2C address to communicate with uint16_t _address; /** * The first I2CSegment in the transaction * * This field is not volatile because it is only accessed from within a * critical section. This will still be valid if the critical section is * replaced with an atomic operation. */ detail::I2CSegment * _root; /** * The first I2CSegment in the transaction * * This is a helper field for building or processing I2C transactions. * It allows the Transaction to easily locate the end of the queue when * composing the transaction and, equally, the currently transferring segment * while processing the transaction. * * This field is not volatile because it is only accessed from within a * critical section. */ detail::I2CSegment * _current; /// The I2C frequency to use for the transaction unsigned _hz; /// Flag to indicate that the Transaction and its Segments were allocated with an irqsafe allocator bool _irqsafe; /// The I2C Object that launched this transaction I2C * _issuer; /// An array of I2C Event Handlers. detail::I2CEventHandler _handlers[I2C_TRANSACTION_NHANDLERS]; }; /** An I2C Master, used for communicating with I2C slave devices * * Example: * @code * // Read 6 bytes from I2C EEPROM slave at address 0x62 * * #include "mbed.h" * * mbed::drivers::v2::I2C i2c(p28, p27); * * // This callback executes in minar context * void xfer_done(mbed::drivers::v2::I2CTransaction * t, int event) { * t->reset_current(); // Set the current pointer to root. * uint8_t *txPtr = t->get_current->get_buf(); * printf("EEPROM 0x%02x@0x%02x%02x: ", t->address(), txPtr[0], txPtr[1]); * // Get the rx buffer pointer * uint8_t * rxPtr = t->get_current() // get the first segment (the tx segment) * ->get_next() // get the second segment (the rx segment) * ->get_buf(); // get the buffer pointer * for (uint i = 0; i < 6; i++) { * printf("%02x", rxPtr[i]); * } * printf("\n"); * // Both the tx and rx buffers are ephemeral, so they will be freed automatically when this function exits * } * * void app_start(int, char **) { * i2c.transfer_to(0x62) // I2C Slave Address * .tx_ephemeral("\x12\x34", 2) // Send EEPROM location * .rx(6) // Read 6 bytes into an ephemeral buffer * .on(I2C_EVENT_TRANSFER_COMPLETE, xfer_done) * .apply(); * } * @endcode */ class I2C { public: using event_callback_t = detail::I2C_event_callback_t; /** Create an I2C Master interface, connected to the specified pins * * @param sda I2C data line pin * @param scl I2C clock line pin */ I2C(PinName sda, PinName scl); /** Create an I2C Master interface, connected to the specified pins and providing IRQ-safe allocators * * @param sda I2C data line pin * @param scl I2C clock line pin * @param TransactionPool An IRQ-safe allocator for Transaction objects * @param SegmentPool An IRQ-safe allocator for Segment objects */ I2C(PinName sda, PinName scl, mbed::util::PoolAllocator *TransactionPool, mbed::util::PoolAllocator *SegmentPool); /** Destroy the I2C Master interface. * Releases a reference to the I2C Resource Manager */ ~I2C(); /** Set the frequency of the I2C interface * * @param hz The bus frequency in hertz */ void frequency(uint32_t hz); /** * @brief A helper class for constructing transactions */ class TransferAdder { friend I2C; protected: /** * @brief Construct a new TransferAdder * * @param[in] i2c the issuing I2C object * @param[in] address the address that is the target of the transfer * @param[in] hz the frequency to use for the transfer * @param[in] irqsafe indicates whether the TransferAdder should use the I2C Object's IRQ-safe allocators */ TransferAdder(I2C *i2c, int address, uint32_t hz, bool irqsafe); /** * @brief Allocates and constructs a new I2C Segment * @return A new I2C Segment */ detail::I2CSegment * new_segment(detail::I2CDirection d); public: /** * @brief Set the frequency for this transaction * * By default, the transaction will use the default frequency for the I2C object. This overrides that frequency. * The frequency used will be applied to the whole transaction, not just a single segment. * * @param[in] hz the frequency to set */ TransferAdder & frequency(uint32_t hz); /** * @brief set an event handler * * An event is triggered when any of the bits in the event mask match the event. * Four event slots are provided, additional event * * @param[in] event the event mask * @param[in] cb the callback to trigger on an event mask match */ TransferAdder & on(uint32_t event, const event_callback_t & cb); /** * @brief set an event handler * * An event is triggered when any of the bits in the event mask match the event. * Four event slots are provided, additional event * * @param[in] event the event mask * @param[in] cb the callback to trigger on an event mask match */ TransferAdder & on(uint32_t event, event_callback_t && cb); /** * @brief Queue the transfer * * Hands the transfer over to the resource manager and returns the resource manager's status. No further * configuration of the transfer is possible after apply() has been called. * * @return the error status of submitting the transfer to the resource manager */ I2CError apply(); /** * @brief Add a transmit buffer to the transaction * * @param[in] buf a pointer to the buffer to send * @param[in] len the number of bytes to send */ TransferAdder & tx(void *buf, size_t len); /** * @brief Add a transmit buffer to the transaction * * @param[in] buf a pointer to and length of the buffer to send */ TransferAdder & tx(const Buffer & buf); /** * @brief Add an ephermeral transmit buffer to the transaction * * If the buffer is 7 or fewer bytes, it will be managed internally, so the original can be freed. * * @param[in] buf a pointer to the buffer to send * @param[in] len the number of bytes to send */ TransferAdder & tx_ephemeral(void *buf, size_t len); /** * @brief Add a receive buffer to the transaction * * @param[in] buf a pointer to the buffer to receive into * @param[in] len the number of bytes to receive */ TransferAdder & rx(void *buf, size_t len); /** * @brief Add a receive buffer to the transaction * * @param[in] buf a pointer to and length of the buffer to receive into */ TransferAdder & rx(const Buffer & buf); /** * @brief Add an ephermeral receive buffer to the transaction * * If the buffer is 7 or fewer bytes, it will be managed internally * * @param[in] len the number of bytes to receive */ TransferAdder & rx(size_t len); /** * @brief Applies an unapplied transaction or destroys a failed transaction * * If the transaction has not been applied when the TransferAdder goes out of scope, it is automatically * applied. If it has already been applied, the destructor exits. If an error condition has been detected, the * destructor destroys the transaction and any associated segments. */ ~TransferAdder(); protected: /// The transaction object that is to be added to the I2C transaction queue I2CTransaction * _xact; /// The I2C object to use for posting the transaction I2C* _i2c; /// flag variable to prevent double-posting of transactions bool _posted; /// flag variable to indicate whether the transaction is intended to use irq-safe allocators bool _irqsafe; /// The error status of the TransferAdder. Transaction will only be posted if the error status is I2CError::none I2CError _rc; }; /** * @brief Begin constructing a transfer to the specified I2C address * * Creates a TransferAdder to manage the construction of the transfer. This API should not be called from IRQ * context * * @param[in] address the I2C address that is the target of this transaction */ TransferAdder transfer_to(int address); /** * @brief Begin constructing a transfer to the specified I2C address, in irq context * * Creates a TransferAdder to manage the construction of the transfer. This API can be called from IRQ context, but * It requires that a pool allocators for both Transactions and Segments have been specified. * * @param[in] address the I2C address that is the target of this transaction */ TransferAdder transfer_to_irqsafe(int address); /** * @brief Create a new segment * * If irqsafe = true, allocate from a pool allocator. Otherwise, allocate from new. * * @param[in] irqsafe flag that indicates whether or not to use a pool allocator * @return the new segment on success, or NULL on failure */ detail::I2CSegment * new_segment(bool irqsafe); /** * @brief Free a transaction * * Determines destroys and frees a transaction. If the transaction was marked irqsafe, calls the destructor then the * pool allocator's free member function. If the transaction was not marked irqsafe, calls delete. * * @param[in] t the transaction to destroy and free */ void free(I2CTransaction *t); /** * @brief Free a segment * * Determines destroys and frees a segment. If irqsafe = true, calls the destructor then the * pool allocator's free member function. If irqsafe = false, calls delete. * * @param[in] s the segment to destroy and free * @param[in] irqsafe a flag that indicates whether to use the pool allocator to free or not */ void free(detail::I2CSegment *s, bool irqsafe); protected: friend TransferAdder; /** * @brief Initiate a transaction * * Submits the transaction to the resource manager's queue and returns the result * * @param[in] t the transaction to queue * @return the status of the submission */ I2CError post_transaction(I2CTransaction *t); /** * @brief Creates a new transaction and pre-fills some parts of it. * * new_transaction prefills the address, frequency, and issuer fields. If marked irqsafe, it will be allocated from * the pool allocator and any associated segments will be allocated from the pool allocator as well. * * @param[in] address The I2C address that is the target of this transaction * @param[in] hz The I2C frequency to use * @param[in] irqsafe The flag that indicates whether to use pool allocators * @param[in] issuer A pointer to this I2C instance * @return the new I2C Transaction object, or NULL on failure */ I2CTransaction * new_transaction(uint16_t address, uint32_t hz, bool irqsafe, I2C *issuer); uint32_t _hz; detail::I2CResourceManager * _owner; mbed::util::PoolAllocator * TransactionPool; mbed::util::PoolAllocator * SegmentPool; }; } // namespace v2 } // namespace drivers } // namespace mbed #endif #endif // MBED_DRIVERS_V1_I2C_HPP
34.789127
120
0.657196
[ "object" ]
11d7b9e716a93dc458a54a5d6853e4b81843acfa
33,671
cpp
C++
Engine/Graphics/Graphics.cpp
GCourtney27/Retina-Engine
5358b9c499f4163a209024dc303c3efe6c520c01
[ "MIT" ]
null
null
null
Engine/Graphics/Graphics.cpp
GCourtney27/Retina-Engine
5358b9c499f4163a209024dc303c3efe6c520c01
[ "MIT" ]
null
null
null
Engine/Graphics/Graphics.cpp
GCourtney27/Retina-Engine
5358b9c499f4163a209024dc303c3efe6c520c01
[ "MIT" ]
null
null
null
#include "Graphics.h" #include "..\Systems\FileSystem.h" #include "..\Input\InputManager.h" #include "..\Systems\BenchmarkingTimer.h" #include "MaterialTexturedFoliage.h" #include "MaterialTextured.h" #include "..\Components\RigidBodyComponent.h" #include <cstdlib> #include <math.h> #define PI 3.14159265 bool Graphics::Initialize(HWND hwnd, int width, int height, Engine* engine) { windowWidth = width; windowHeight = height; m_pEngine = engine; if (!InitializeDirectX(hwnd)) return false; if (!InitializeShaders()) return false; pointLight = new PointLight(&(m_pEngine->GetScene()), *(new ID("Point Light"))); pointLight->GetTransform().SetPosition(DirectX::XMFLOAT3(0.0f, 32.0f, -50.0f)); pointLight->GetTransform().SetRotation(0.0f, 0.0f, 0.0f); pointLight->GetTransform().SetScale(1.0f, 1.0f, 1.0f); MeshRenderer* mr = pointLight->AddComponent<MeshRenderer>(); mr->Initialize(pointLight, "..\\Assets\\Objects\\Primatives\\Sphere.fbx", Graphics::Instance()->GetDevice(), Graphics::Instance()->GetDeviceContext(), Graphics::Instance()->GetDefaultVertexShader(), m_pMaterial); EditorSelection* es = pointLight->AddComponent<EditorSelection>(); es->Initialize(pointLight, 1.0f, pointLight->GetTransform().GetPosition()); directionalLight = new DirectionalLight(&(m_pEngine->GetScene()), *(new ID("Directional Light"))); directionalLight->GetTransform().SetPosition(DirectX::XMFLOAT3(0.0f, 1000.0f, -100.0f)); directionalLight->GetTransform().SetRotation(0.0f, 0.0f, 0.0f); directionalLight->GetTransform().SetScale(1.0f, 1.0f, 1.0f); MeshRenderer* mrd = directionalLight->AddComponent<MeshRenderer>(); mrd->Initialize(directionalLight, "..\\Assets\\Objects\\Primatives\\Sphere.fbx", Graphics::Instance()->GetDevice(), Graphics::Instance()->GetDeviceContext(), Graphics::Instance()->GetDefaultVertexShader(), m_pMaterial); EditorSelection* esd = directionalLight->AddComponent<EditorSelection>(); esd->Initialize(directionalLight, 1.0f, directionalLight->GetTransform().GetPosition()); if (!InitializeScene()) return false; InitSkybox(); // Setup ImGui InitialzeImGui(hwnd); //backBufferTex.Initialize(pDevice.Get(), "..\\Assets\\Objects\\Norway\\Opaque\\Rock02\\Rock02_Albedo.jpg"); HRESULT hr = DirectX::CreateWICTextureFromFile(pDevice.Get(), L"..\\Assets\\Objects\\Norway\\Opaque\\Rock02\\Rock02_Albedo.jpg", nullptr, &backBufferSRV); if (FAILED(hr)) ErrorLogger::Log("Failed ot create SRV for back buffer."); return true; } void Graphics::InitialzeImGui(HWND hwnd) { pImGuiIO = new ImGuiIO(); IMGUI_CHECKVERSION(); ImGui::CreateContext(); ImGuiIO& io = ImGui::GetIO(); io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; //io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; ImGui::StyleColorsDark(); //ImGui::StyleColorsClassic(); //ImGui::StyleColorsLight(); ImGuiStyle& style = ImGui::GetStyle(); if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) { style.WindowRounding = 0.0f; style.Colors[ImGuiCol_WindowBg].w = 1.0f; } ImGui_ImplWin32_Init(hwnd); ImGui_ImplDX11_Init(pDevice.Get(), pDeviceContext.Get()); *pImGuiIO = io; } void Graphics::InitSkybox() { /*m_pSkyMaterial = dynamic_cast<MaterialSky*>(skybox->GetComponent<MeshRenderer>()->GetModel()->GetMaterial()); return;*/ skybox = new Entity((&m_pEngine->GetScene()), *(new ID("Sky Box"))); skybox->GetTransform().SetPosition(0.0f, 0.0f, 0.0f); skybox->GetTransform().SetScale(50000.0f, 50000.0f, 50000.0f); skybox->GetTransform().SetRotation(0.0f, 0.0f, 0.0f); MeshRenderer* me = skybox->AddComponent<MeshRenderer>(); me->Initialize(skybox, "..\\Assets\\Objects\\Primatives\\Sphere.fbx", pDevice.Get(), pDeviceContext.Get(), cb_vs_vertexshader, nullptr); // MountainTop // NewportLoft // LowOrbit // skybox1 // skybox2 // skybox3 std::wstring skyboxType = L"MountainTop"; std::wstring diffuse = L"..\\Assets\\Textures\\Skyboxes\\" + skyboxType + L"_Diff.dds"; std::wstring envMap = L"..\\Assets\\Textures\\Skyboxes\\" + skyboxType + L"_EnvMap.dds"; std::wstring IR = L"..\\Assets\\Textures\\Skyboxes\\" + skyboxType + L"_IR.dds"; HRESULT hr = DirectX::CreateDDSTextureFromFile(pDevice.Get(), diffuse.c_str(), nullptr, &skyboxTextureSRV); if(FAILED(hr)) ErrorLogger::Log("Failed to load dds diffuse texture for skybox"); hr = DirectX::CreateDDSTextureFromFile(pDevice.Get(), envMap.c_str(), nullptr, &environmentMapSRV); if (FAILED(hr)) ErrorLogger::Log("Failed to load dds texture for environment map"); hr = DirectX::CreateDDSTextureFromFile(pDevice.Get(), IR.c_str(), nullptr, &irradianceMapSRV); if (FAILED(hr)) ErrorLogger::Log("Failed to load dds texture for irradiance map"); hr = DirectX::CreateWICTextureFromFile(pDevice.Get(), L"..\\Assets\\Textures\\Skyboxes\\ibl_brdf_lut.png", nullptr, &brdfLUTSRV); if (FAILED(hr)) ErrorLogger::Log("Failed to load dds texture for brdfLUT map"); } bool Graphics::InitializeDirectX(HWND hwnd) { try { std::vector<AdapterData> adapters = AdapterReader::GetAdapters(); if (adapters.size() < 1) { ErrorLogger::Log("No DirectX compatable adapters where found when initializing Direct3D 11."); return false; } // -- Initialize Swap Chain -- // DXGI_SWAP_CHAIN_DESC scd = { 0 }; scd.BufferDesc.Width = windowWidth; scd.BufferDesc.Height = windowHeight; scd.BufferDesc.RefreshRate.Numerator = 60; scd.BufferDesc.RefreshRate.Denominator = 1; scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; scd.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED; scd.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED; //scd.SampleDesc.Count = 1; scd.SampleDesc.Count = 4;// New scd.SampleDesc.Quality = 0; scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; scd.BufferCount = 1; scd.OutputWindow = hwnd; scd.Windowed = TRUE; scd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD; scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; HRESULT hr; hr = D3D11CreateDeviceAndSwapChain(adapters[1].pAdapter, //IDXGI Adapter: 0 for Intel gpu; 1 for NVIDIA D3D_DRIVER_TYPE_UNKNOWN, NULL, // NULL for software driver type NULL, // Flags for runtime layers NULL, // Feature levels array 0, //Number of Feature levels in array D3D11_SDK_VERSION, &scd, // Swapchain desciption pSwapchain.GetAddressOf(), // Swapchain address pDevice.GetAddressOf(), // Device address NULL, //Supported feature level pDeviceContext.GetAddressOf()); // Device context address COM_ERROR_IF_FAILED(hr, "Failed to create device swap chain."); hr = pSwapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(pBackBuffer.GetAddressOf())); COM_ERROR_IF_FAILED(hr, "Failed to set buffer for swap chain."); hr = pDevice->CreateRenderTargetView(pBackBuffer.Get(), NULL, pRenderTargetView.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create render target view for back buffer."); // -- Create Depth/Stencil Buffer -- // CD3D11_TEXTURE2D_DESC depthStencilDesc(DXGI_FORMAT_D24_UNORM_S8_UINT, windowWidth, windowHeight); depthStencilDesc.MipLevels = 1; depthStencilDesc.SampleDesc.Count = 4;// New depthStencilDesc.SampleDesc.Quality = 0;// New depthStencilDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL; hr = pDevice->CreateTexture2D(&depthStencilDesc, NULL, pDepthStencilBuffer.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create depth stencil buffer Texture2D."); hr = pDevice->CreateDepthStencilView(pDepthStencilBuffer.Get(), NULL, pDepthStencilView.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create depth stencil view."); pDeviceContext->OMSetRenderTargets(1, pRenderTargetView.GetAddressOf(), pDepthStencilView.Get()); // Create depth stencil state CD3D11_DEPTH_STENCIL_DESC depthstancildesc(D3D11_DEFAULT); // Z Buffer depthstancildesc.DepthFunc = D3D11_COMPARISON_FUNC::D3D11_COMPARISON_LESS_EQUAL; hr = pDevice->CreateDepthStencilState(&depthstancildesc, pDepthStencilState.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Filaed to create depth stencil state."); // Create and set the ViewPort CD3D11_VIEWPORT viewport(0.0f, 0.0f, static_cast<float>(windowWidth), static_cast<float>(windowHeight)); pDeviceContext->RSSetViewports(1, &viewport); // Create Rasterizer State/s // Default CD3D11_RASTERIZER_DESC rasterizerDesc(D3D11_DEFAULT); rasterizerDesc.AntialiasedLineEnable = true;// New rasterizerDesc.MultisampleEnable = true; hr = pDevice->CreateRasterizerState(&rasterizerDesc, pRasterizerState.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create rasterizer state."); // Skybox and foliage CD3D11_RASTERIZER_DESC rasterizerDescCULLNONE(D3D11_DEFAULT); rasterizerDesc.AntialiasedLineEnable = true;// New rasterizerDesc.MultisampleEnable = true; rasterizerDescCULLNONE.CullMode = D3D11_CULL_MODE::D3D11_CULL_NONE; // Uncomment to draw both sides of the mesh hr = pDevice->CreateRasterizerState(&rasterizerDescCULLNONE, pRasterizerStateCULLNONE.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create rasterizer state."); D3D11_DEPTH_STENCIL_DESC dssDesc; ZeroMemory(&dssDesc, sizeof(D3D11_DEPTH_STENCIL_DESC)); dssDesc.DepthEnable = true; dssDesc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL; dssDesc.DepthFunc = D3D11_COMPARISON_LESS_EQUAL; // Create Blend State D3D11_BLEND_DESC blendDesc = { 0 }; D3D11_RENDER_TARGET_BLEND_DESC rtbd = { 0 }; rtbd.BlendEnable = true; rtbd.SrcBlend = D3D11_BLEND::D3D11_BLEND_SRC_ALPHA; rtbd.DestBlend = D3D11_BLEND::D3D11_BLEND_INV_SRC_ALPHA; rtbd.BlendOp = D3D11_BLEND_OP::D3D11_BLEND_OP_ADD; rtbd.SrcBlendAlpha = D3D11_BLEND::D3D11_BLEND_ONE; rtbd.DestBlendAlpha = D3D11_BLEND::D3D11_BLEND_ZERO; rtbd.BlendOpAlpha = D3D11_BLEND_OP::D3D11_BLEND_OP_ADD; rtbd.RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE::D3D11_COLOR_WRITE_ENABLE_ALL; blendDesc.RenderTarget[0] = rtbd; hr = pDevice->CreateBlendState(&blendDesc, pBlendState.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create blend state."); pSpriteBatch = std::make_unique<DirectX::SpriteBatch>(pDeviceContext.Get()); pSpriteFont = std::make_unique<DirectX::SpriteFont>(pDevice.Get(), L"..\\Assets\\Fonts\\calibri.spritefont"); // Create sampler description for sampler state CD3D11_SAMPLER_DESC samplerDesc(D3D11_DEFAULT); samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP; samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP; samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP; samplerDesc.MipLODBias = 0.0f; samplerDesc.MaxAnisotropy = 1; samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS; samplerDesc.BorderColor[0] = 0; samplerDesc.BorderColor[1] = 0; samplerDesc.BorderColor[2] = 0; samplerDesc.BorderColor[3] = 0; samplerDesc.MinLOD = 0; samplerDesc.MaxLOD = D3D11_FLOAT32_MAX; hr = pDevice->CreateSamplerState(&samplerDesc, samplerState.GetAddressOf()); COM_ERROR_IF_FAILED(hr, "Failed to create sampler state."); } catch (COMException & exception) { ErrorLogger::Log(exception); return false; } return true; } void Graphics::RenderFrame() { Debug::ScopedTimer timer; m_frameTimer.tick(); if (Debug::Editor::Instance()->PlayingGame()) m_pSelectedCamera = m_pEngine->GetPlayer()->GetPlayerCamera(); else m_pSelectedCamera = &editorCamera; const DirectX::XMMATRIX & ProjMat = m_pSelectedCamera->GetProjectionMatrix(); const DirectX::XMMATRIX & ViewMat = m_pSelectedCamera->GetViewMatrix(); // -- These Constant Buffers dont get included in materials becasue they change on a per-scene basis -- // #pragma region // -- Update Light Shader Information -- // cb_ps_light.data.dynamicLightColor = pointLight->lightColor; cb_ps_light.data.dynamicLightStrength = pointLight->lightStrength; cb_ps_light.data.dynamicLightPosition = pointLight->GetTransform().GetPosition(); cb_ps_light.data.dynamicLightAttenuation_a = pointLight->attenuation_a; cb_ps_light.data.dynamicLightAttenuation_b = pointLight->attenuation_b; cb_ps_light.data.dynamicLightAttenuation_c = pointLight->attenuation_c; cb_ps_light.ApplyChanges(); cb_ps_directionalLight.data.Color = directionalLight->lightColor; cb_ps_directionalLight.data.Strength = directionalLight->lightStrength; cb_ps_directionalLight.data.Direction = directionalLight->GetTransform().GetPosition(); cb_ps_directionalLight.ApplyChanges(); // -- Update Pixel Shader Per Frame Informaiton -- // cb_ps_PerFrame.data.deltaTime = m_deltaTime; if (Debug::Editor::Instance()->PlayingGame()) cb_ps_PerFrame.data.camPosition = m_pEngine->GetPlayer()->GetPlayerCamera()->GetTransform().GetPosition(); else cb_ps_PerFrame.data.camPosition = editorCamera.GetTransform().GetPosition(); cb_ps_PerFrame.ApplyChanges(); // -- Update Vertex Shader Per Frame Informaiton -- // cb_vs_PerFrame.data.deltaTime = m_deltaTime; time = (float)m_pEngine->GetFrameTimer().seconds(); cb_vs_PerFrame.data.time = time; cb_vs_PerFrame.ApplyChanges(); // -- Set Pixel Shader Constant Buffers -- // pDeviceContext->PSSetConstantBuffers(0, 1, cb_ps_light.GetAddressOf()); pDeviceContext->PSSetConstantBuffers(1, 1, cb_ps_PerFrame.GetAddressOf()); pDeviceContext->PSSetConstantBuffers(3, 1, cb_ps_directionalLight.GetAddressOf()); // -- Set Vertex Shader Constant Buffers -- // pDeviceContext->VSSetConstantBuffers(1, 1, cb_vs_PerFrame.GetAddressOf()); #pragma endregion Per Scene // -- Start ImGui frame -- // ImGui_ImplDX11_NewFrame(); ImGui_ImplWin32_NewFrame(); ImGui::NewFrame(); ImGuizmo::BeginFrame(); ImGui::DockSpaceOverViewport(0, ImGuiDockNodeFlags_PassthruCentralNode); // -- Clear Background Color for Scene -- // float bgcolor[] = { 0.01f, 0.01f, 0.01f, 1.0f }; pDeviceContext->ClearRenderTargetView(pRenderTargetView.Get(), bgcolor); pDeviceContext->ClearDepthStencilView(pDepthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0); if(m_drawWireframe) pDeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D10_PRIMITIVE_TOPOLOGY_LINELIST); else pDeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST); // Set Rasterizer state to CULLNONE to draw skybox pDeviceContext->RSSetState(pRasterizerStateCULLNONE.Get()); // -- Set Shader Samplers -- // pDeviceContext->PSSetSamplers(0, 1, samplerState.GetAddressOf()); // -- Draw Skybox -- // pDeviceContext->IASetInputLayout(skyVertexShader.GetInputLayout()); pDeviceContext->PSSetShaderResources(0, 1, &skyboxTextureSRV); pDeviceContext->PSSetShader(skyPixelShader.GetShader(), NULL, 0); pDeviceContext->VSSetShader(skyVertexShader.GetShader(), NULL, 0); skybox->Draw(ProjMat, ViewMat); // Reset Rasterizer state for rest of geometry pDeviceContext->RSSetState(pRasterizerState.Get()); pDeviceContext->OMSetDepthStencilState(pDepthStencilState.Get(), 0); pDeviceContext->OMSetBlendState(pBlendState.Get(), NULL, 0xFFFFFFFF); // -- Set IBL resources for shader slots -- // pDeviceContext->PSSetShaderResources(5, 1, &irradianceMapSRV); pDeviceContext->PSSetShaderResources(6, 1, &environmentMapSRV); pDeviceContext->PSSetShaderResources(7, 1, &brdfLUTSRV); // -- Draw Scene Objects -- // m_pEngine->GetScene().GetRenderManager().DrawOpaque(ProjMat, ViewMat); pDeviceContext->RSSetState(pRasterizerStateCULLNONE.Get()); m_pEngine->GetScene().GetRenderManager().DrawFoliage(ProjMat, ViewMat); pDeviceContext->RSSetState(pRasterizerState.Get()); // -- Update 2D shaders -- // pDeviceContext->IASetInputLayout(vertexshader_2d.GetInputLayout()); pDeviceContext->PSSetShader(pixelshader_2d.GetShader(), NULL, 0); pDeviceContext->VSSetShader(vertexshader_2d.GetShader(), NULL, 0); //sprite.Draw(camera2D.GetWorldmatrix() * camera2D.GetOrthoMatrix()); // Draws hello world sprite image // -- Draw Text -- // static int fpsCounter = 0; static std::string fpsString = "FPS: 0"; if (m_drawfpsCPU) { fpsString += "CPU " + std::to_string(static_cast<int>(m_pEngine->GetFrameTimer().fps())); if (m_drawFrameTimeCPU) fpsString += " / " + std::to_string(static_cast<int>(m_pEngine->GetFrameTimer().milliseconds())) + " ms\n"; else fpsString += " fps\n"; } if (m_drawfpsGPU) { fpsString += "GPU " + std::to_string(static_cast<int>(m_frameTimer.fps())); if (m_drawFrameTimeGPU) fpsString += " / " + std::to_string(static_cast<int>(m_frameTimer.milliseconds())) + " ms \n"; else fpsString += " fps\n"; } if (m_logDrawCalls) { fpsString += "Draw Calls: " + std::to_string(m_drawCalls) + "\n"; m_drawCalls = 0; } pSpriteBatch->Begin(); pSpriteFont->DrawString(pSpriteBatch.get(), StringHelper::StringToWide(fpsString).c_str(), DirectX::XMFLOAT2(0, 50), DirectX::Colors::White, 0.0f, DirectX::XMFLOAT2(0.0f, 0.0f), DirectX::XMFLOAT2(1.0f, 1.0f)); pSpriteBatch->End(); fpsString = ""; // -- Update ImGui -- // static bool showEditor = true; if (InputManager::Instance()->keyboard.KeyIsPressed('Y')) showEditor = false; if (InputManager::Instance()->keyboard.KeyIsPressed('U')) showEditor = true; if(showEditor) UpdateImGuiWidgets(); else ImGui::Render(); ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData()); if (pImGuiIO->ConfigFlags & ImGuiConfigFlags_ViewportsEnable) { ImGui::UpdatePlatformWindows(); ImGui::RenderPlatformWindowsDefault(); } // -- Flip Buffer and Present-- // pSwapchain->Present(0, NULL); // Enable Vertical sync with 1 or 0 } void Graphics::Update(const float& deltaTime) { m_deltaTime = deltaTime; editorCamera.Update(deltaTime); } void Graphics::Shutdown() { // Raw pointers delete pointLight; delete directionalLight; delete m_pMaterial; delete skybox; delete m_pSkyMaterial; delete pImGuiIO; delete m_pEngine; delete skyTexture; delete skyboxTextureSRV; delete irradianceMap; delete irradianceMapSRV; delete environmentMap; delete environmentMapSRV; delete brdfLUTtex; delete brdfLUTSRV; // WRL pointers pDevice->Release(); pDeviceContext->Release(); pSwapchain->Release(); pRenderTargetView->Release(); pDepthStencilView->Release(); pDepthStencilBuffer->Release(); pDepthStencilState->Release(); pRasterizerState->Release(); pRasterizerState->Release(); pRasterizerState->Release(); samplerState->Release(); // Smart pointers pSpriteBatch.release(); pSpriteFont.release(); ImGui_ImplDX11_Shutdown(); ImGui_ImplWin32_Shutdown(); ImGui::DestroyContext(); } bool Graphics::InitializeShaders() { #pragma region DetermineShaderPath { #ifdef _DEBUG // Debug Mode #ifdef _WIN64 //x64 m_shaderFolder = L"..\\bin\\x64\\Debug\\"; #else // x86 (Win32) m_shaderFolder = L"..\\bin\\Win32\\Debug\\"; #endif #else // Release Mode #ifdef _WIN64 //x64 m_shaderFolder = L"..\\bin\\x64\\Release\\"; #else //x86 (Win32) m_shaderFolder = L"..\\bin\\Win32\\Release\\"; #endif #endif } #pragma endregion // 2D shaders D3D11_INPUT_ELEMENT_DESC layout2D[] = { {"POSITION", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0}, {"TEXCOORD", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0 } }; UINT numElements2D = ARRAYSIZE(layout2D); if (!vertexshader_2d.Initialize(pDevice, m_shaderFolder + L"vertexshader_2d.cso", layout2D, numElements2D)) return false; if (!pixelshader_2d.Initialize(pDevice, m_shaderFolder + L"pixelshader_2d.cso")) return false; // 3D shaders // -- Initialize PBR Shaders -- // D3D11_INPUT_ELEMENT_DESC defaultLayout3D[] = { {"POSITION", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0}, {"TEXCOORD", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0 }, {"NORMAL", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0 }, {"TANGENT", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0 }, {"BITANGENT", 0, DXGI_FORMAT::DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_CLASSIFICATION::D3D11_INPUT_PER_VERTEX_DATA, 0 } }; UINT defaultNumElements3D = ARRAYSIZE(defaultLayout3D); // Sky if (!skyVertexShader.Initialize(pDevice, m_shaderFolder + L"Sky_vs.cso", defaultLayout3D, defaultNumElements3D)) { ErrorLogger::Log("Failed to initialize Sky vertex shader"); return false; } if (!skyPixelShader.Initialize(pDevice, m_shaderFolder + L"Sky_ps.cso")) { ErrorLogger::Log("Failed to initialize Sky pixel shader"); return false; } return true; } bool Graphics::InitializeScene() { try { // Initialize Constant Buffer(s) HRESULT hr = cb_vs_vertexshader_2d.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer 2d for vertex shader."); hr = cb_vs_vertexshader.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer for vertex shader."); hr = cb_ps_light.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer for pixel shader."); hr = cb_ps_PerFrame.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer for pixel shader utilites."); hr = cb_vs_PerFrame.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer for vertex shader utilites."); hr = cb_ps_directionalLight.Initialize(pDevice.Get(), pDeviceContext.Get()); COM_ERROR_IF_FAILED(hr, "Failed to initialize constant buffer for directional light for use in pixel shader."); //editorCamera.Initialize(&m_pEngine->GetScene(), (*new ID("EditorCamera"))); // Initialize light shader values cb_ps_light.data.ambientLightColor = DirectX::XMFLOAT3(1.0f, 1.0f, 1.0f); //cb_ps_light.data.ambientLightStrength = 2.498f; //cb_ps_light.data.ambientLightStrength = 4.4f; cb_ps_light.data.ambientLightStrength = 1.0f; cb_ps_directionalLight.data.Color = DirectX::XMFLOAT3(1.0f, 1.0f, 1.0f); cb_ps_directionalLight.data.Strength = 20.0f; cb_ps_directionalLight.data.Direction = XMFLOAT3(0.25f, 0.5f, -1.0f); cb_ps_directionalLight.ApplyChanges(); cb_ps_PerFrame.data.camPosition = editorCamera.GetTransform().GetPosition(); cb_ps_PerFrame.data.deltaTime = 0.5f; cb_vs_PerFrame.data.deltaTime = 0.5f; cb_vs_PerFrame.data.time = 0.5f; pointLight->lightStrength = 1.0f; pointLight->attenuation_a = 0.5f; pointLight->attenuation_b = 0.0f; pointLight->attenuation_c = 0.0f; // Hello World sprite if (!sprite.Initialize(pDevice.Get(), pDeviceContext.Get(), 256, 256, "..\\Assets\\Textures\\cat.jpg", cb_vs_vertexshader_2d)) { ErrorLogger::Log("Failed to initilize sprite"); return false; } camera2D.SetProjectionValues((float)windowWidth, (float)windowHeight, 0.0f, 1.0f); editorCamera.GetTransform().SetPosition(DirectX::XMFLOAT3(0.0f, 5.0f, -40.0f)); editorCamera.SetProjectionValues(75.0f, static_cast<float>(windowWidth) / static_cast<float>(windowHeight), 0.1f, 4000.0f); } catch (COMException & exception) { ErrorLogger::Log(exception); return false; } return true; } void Graphics::UpdateImGuiWidgets() { using namespace Debug; Entity* pSelectedEntity = Editor::Instance()->GetSelectedEntity(); std::list<Entity*>* entities = m_pEngine->GetScene().GetAllEntities(); // ImGuizmo Experimental tool //ImGuizmo::BeginFrame(); //ImGuizmo::Enable(true); ////ImGuiIO& io = ImGui::GetIO(); //XMFLOAT4X4 camViewTemp; //XMStoreFloat4x4(&camViewTemp, editorCamera.GetViewMatrix()); //float camView[16] = //{ // camViewTemp._11,camViewTemp._21, camViewTemp._31, camViewTemp._41, // camViewTemp._12,camViewTemp._22, camViewTemp._32, camViewTemp._42, // camViewTemp._13,camViewTemp._23, camViewTemp._33, camViewTemp._43, // camViewTemp._14,camViewTemp._24, camViewTemp._34, camViewTemp._44 //}; //XMFLOAT4X4 camPojTemp; //XMStoreFloat4x4(&camPojTemp, editorCamera.GetProjectionMatrix()); //float camProj[16] = //{ // camPojTemp._11, camPojTemp._21, camPojTemp._31, camPojTemp._41, // camPojTemp._12, camPojTemp._22, camPojTemp._32, camPojTemp._42, // camPojTemp._13, camPojTemp._23, camPojTemp._33, camPojTemp._43, // camPojTemp._14, camPojTemp._24, camPojTemp._34, camPojTemp._44 //}; //XMFLOAT4X4 objMatTemp; //XMStoreFloat4x4(&objMatTemp, pSelectedEntity->GetTransform().GetWorldMatrix()); //float objMat[16] = //{ // objMatTemp._11, objMatTemp._21, objMatTemp._31, objMatTemp._41, // objMatTemp._12, objMatTemp._22, objMatTemp._32, objMatTemp._42, // objMatTemp._13, objMatTemp._23, objMatTemp._33, objMatTemp._43, // objMatTemp._14, objMatTemp._24, objMatTemp._34, objMatTemp._44 //}; //static float identityMatrix[16] = //{ // 1.f, 0.f, 0.f, 0.f, // 0.f, 1.f, 0.f, 0.f, // 0.f, 0.f, 1.f, 0.f, // 0.f, 0.f, 0.f, 1.f //}; //static float defaultMatrix[16] = //{ // 1.f, 0.f, 0.f, 0.f, // 0.f, 1.f, 0.f, 0.f, // 0.f, 0.f, 1.f, 0.f, // 0.f, 0.f, 0.f, 1.f //}; //ImGuizmo::DrawCube(camView, camProj, defaultMatrix); //ImGuizmo::SetDrawlist(); //ImGuizmo::SetRect(0, 0, pImGuiIO->DisplaySize.x, pImGuiIO->DisplaySize.y); //ImGuizmo::Manipulate(camView, camProj, ImGuizmo::TRANSLATE, ImGuizmo::LOCAL, objMat); //if (ImGuizmo::IsOver()) // Debug::Editor::Instance()->DebugLog("Mouse is over"); //ImGui::Begin("Game"); //{ // /*ID3D11ShaderResourceView* my_texture_view; // D3D11_SHADER_RESOURCE_VIEW_DESC desc = {}; // pDevice->CreateShaderResourceView(backBufferTex.GetTexture(), 0, backBufferTex.GetTextureResourceViewAddress()); // ImGui::Image((void*)backBufferTex.GetTextureResourceViewAddress(), ImVec2(1024, 1024));*/ // /*ID3D11ShaderResourceView* my_texture_view; // D3D11_SHADER_RESOURCE_VIEW_DESC my_shader_resource_view_desc = {}; // DirectX::CreateWICTextureFromFile(pDevice.Get(), L"..\\Assets\\Textures\\Skyboxes\\ibl_brdf_lut.png", nullptr, &brdfLUTSRV); // pDevice->CreateShaderResourceView(pBackBuffer.Get(), &my_shader_resource_view_desc, &my_texture_view);*/ // //HRESULT hr = DirectX::CreateWICTextureFromFile(pDevice.Get(), L"..\\Assets\\Textures\\Skyboxes\\ibl_brdf_lut.png", nullptr, &backBufferSRV); // // D3D11_TEXTURE2D_DESC textureDesc; // ZeroMemory(&textureDesc, sizeof(textureDesc)); // // Setup the render target texture description. // textureDesc.Width = 1920; // textureDesc.Height = 1080; // textureDesc.MipLevels = 1; // textureDesc.ArraySize = 1; // textureDesc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT; // textureDesc.SampleDesc.Count = 1; // textureDesc.Usage = D3D11_USAGE_DEFAULT; // textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE; // textureDesc.CPUAccessFlags = 0; // textureDesc.MiscFlags = 0; // D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc = {}; // shaderResourceViewDesc.Format = textureDesc.Format; // shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; // shaderResourceViewDesc.Texture2D.MostDetailedMip = 0; // shaderResourceViewDesc.Texture2D.MipLevels = 1; // pDevice->CreateShaderResourceView(pBackBuffer.Get(), &shaderResourceViewDesc, &backBufferSRV); // ImGui::Image((void*)backBufferSRV, ImVec2(512, 512)); // DirectX::CreateWicTextureFromMemory() //} //ImGui::End(); // Menu Bar if (ImGui::Begin("Menu Bar", NULL, ImGuiWindowFlags_MenuBar | ImGuiWindowFlags_AlwaysAutoResize)) { if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("File")) { if (ImGui::MenuItem("Save")) FileSystem::Instance()->WriteSceneToJSON(&m_pEngine->GetScene()); ImGui::MenuItem("Open"); ImGui::MenuItem("New Scene"); ImGui::EndMenu(); } if (ImGui::BeginMenu("Engine DEBUG")) { if (ImGui::MenuItem("Draw CPU fps")) { m_drawfpsCPU = !m_drawfpsCPU; } if (ImGui::MenuItem("Draw CPU Frame Time")) { m_drawFrameTimeCPU = !m_drawFrameTimeCPU; } if (ImGui::MenuItem("Draw GPU fps")) { m_drawfpsGPU = !m_drawfpsGPU; } if (ImGui::MenuItem("Draw GPU Frame Time")) { m_drawFrameTimeGPU = !m_drawFrameTimeGPU; } if (ImGui::MenuItem("Enable Wireframe")) m_drawWireframe = !m_drawWireframe; if (ImGui::MenuItem("Log Draw Calls")) m_logDrawCalls = !m_logDrawCalls; ImGui::EndMenu(); } ImGui::EndMenuBar(); } } ImGui::End(); ImGui::Begin("World Outliner"); { std::list<Entity*>::iterator iter; for (iter = entities->begin(); iter != entities->end(); iter++) { if (ImGui::Button((*iter)->GetID().GetName().c_str(), { 150.0f, 20.0f })) { Editor::Instance()->SetSelectedEntity((*iter)); } } } ImGui::End(); ImGui::Begin("Console"); { if (ImGui::Button("Clear", { 100, 20 })) { Editor::Instance()->ClearConsole(); } ImGui::SameLine(); static bool clearOnPlay = Editor::Instance()->GetClearConsoleOnPlay(); if (ImGui::Checkbox("Clear on play", &clearOnPlay)) { Editor::Instance()->SetCLearConsoleOnPlay(clearOnPlay); } ImGui::Text(Editor::Instance()->GetLogStatement().c_str()); } ImGui::End(); ImGui::Begin("Editor"); { ImGui::Text("Status: "); ImGui::SameLine(); std::string playStatus = ""; if (Editor::Instance()->PlayingGame()) playStatus += "Playing"; else playStatus += "Not Playing"; ImGui::Text(playStatus.c_str()); if (ImGui::Button("Play", { 50.0f, 20.0f })) { if(!Editor::Instance()->PlayingGame()) Editor::Instance()->PlayGame();// Editor calles scene OnStart } if (ImGui::Button("Stop", { 50.0f, 20.0f }) || InputManager::Instance()->keyboard.KeyIsPressed(27)) { Editor::Instance()->StopGame(); } //ImGui::Checkbox("Editor Camera Enabled", &m_editorCamEnabled); } ImGui::End(); ImGui::Begin("Lighting"); { ImGui::Text("Ambient/IBL Light"); ImGui::DragFloat3("Color Override", &cb_ps_light.data.ambientLightColor.x, 0.01f, 0.0f, 1.0f); ImGui::DragFloat("Strength Override", &cb_ps_light.data.ambientLightStrength, 0.01f, 0.0f, 10.0f); ImGui::Text("Directional Light"); ImGui::DragFloat3("Directional Color", &directionalLight->lightColor.x, 0.01f, 0.0f, 10.0f); ImGui::DragFloat("Directional Strength", &directionalLight->lightStrength, 0.01f, 0.0f, 50.0f); ImGui::Text("Point Light"); ImGui::DragFloat3("Point Color", &pointLight->lightColor.x, 0.01f, 0.0f, 10.0f); ImGui::DragFloat("Point Strength", &pointLight->lightStrength, 0.01f, 0.0f, 10.0f); ImGui::DragFloat("Attenuation A", &pointLight->attenuation_a, 0.01f, 0.1f, 10.0f); ImGui::DragFloat("Attenuation B", &pointLight->attenuation_b, 0.01f, 0.0f, 10.0f); ImGui::DragFloat("Attenuation C", &pointLight->attenuation_c, 0.01f, 0.0f, 10.0f); } ImGui::End(); std::string entityName = pSelectedEntity->GetID().GetName(); ImGui::Begin("Inspector"); { ImGui::Text(entityName.c_str()); ImGui::TextColored({100, 100, 100, 100}, "Transform"); ImGui::DragFloat3("Position", &pSelectedEntity->GetTransform().GetPosition().x, 0.1f, -2000.0f, 2000.0f); ImGui::DragFloat3("Rotation", &pSelectedEntity->GetTransform().GetRotation().x, 0.1f, -100.0f, 100.0f); ImGui::DragFloat3("Scale", &pSelectedEntity->GetTransform().GetScale().x, 0.1f, -500.0f, 500.0f); ImGui::NewLine(); std::vector<Component*> objectComponents = pSelectedEntity->GetAllComponents(); std::vector<Component*>::iterator iter; for (iter = objectComponents.begin(); iter != objectComponents.end(); iter++) { ImGui::NewLine(); (*iter)->OnImGuiRender(); } } ImGui::End(); //static int creationCounter = 33; static int creationCounter = 0; ImGui::Begin("Entity Creator"); { if (ImGui::Button("Create Rock Asset")) { creationCounter++; std::string creationCount = "Rock-" + std::to_string(creationCounter); Entity* entity = new Entity(&m_pEngine->GetScene(), (*new ID())); entity->GetID().SetName(creationCount); entity->GetID().SetTag("Untagged"); entity->GetID().SetType("Entity"); entity->GetTransform().SetPosition(editorCamera.GetTransform().GetPosition()); entity->GetTransform().SetRotation(0.0f, 0.0f, 0.0f); entity->GetTransform().SetScale(1.0f, 1.0f, 1.0f); // mr //Material* mat = new MaterialTextured(Material::eMaterialType::PBR_DEFAULT); Material* mat = nullptr; mat = mat->SetMaterialByType(Material::eMaterialType::PBR_DEFAULT, Material::eFlags::NOFLAGS); //mat = mat->SetMaterialByType(Material::eMaterialType::PBR_DEFAULT, Material::eFlags::FOLIAGE); //mat->Initiailze(pDevice.Get(), pDeviceContext.Get(), Material::eFlags::FOLIAGE); mat->Initiailze(pDevice.Get(), pDeviceContext.Get(), Material::eFlags::NOFLAGS); std::string file = "..\\Assets\\Objects\\\MossyRock\\\MossyRock_LOD2.fbx"; MeshRenderer* mr = entity->AddComponent<MeshRenderer>(); mr->Initialize(entity, file, pDevice.Get(), pDeviceContext.Get(), this->GetDefaultVertexShader(), mat); entity->SetHasMeshRenderer(true); //LuaScript entity->AddComponent<LuaScript>()->Initialize(entity, "NONE"); //Es entity->AddComponent<EditorSelection>()->Initialize(entity, 10.0f, entity->GetTransform().GetPosition()); //m_pEngine->GetScene().GetRenderManager().AddFoliageObject(mr); m_pEngine->GetScene().GetRenderManager().AddOpaqueObject(mr); m_pEngine->GetScene().AddEntity(entity); } } ImGui::End(); if (ImGui::IsAnyItemHovered()) Editor::Instance()->rayCastEnabled = false; // Assemble Draw Data ImGui::Render(); }
37.246681
220
0.734668
[ "mesh", "geometry", "render", "vector", "transform", "3d" ]
11d8eff232fd0801ff56bba443ea6e589b9cdb17
6,238
cpp
C++
Ceng213/pa3/PA3_Student_Pack/METUMaps.cpp
Arda1333/METU-CENG
1fafd9536b93be34fe031c84ae6a82ef4b9b932d
[ "MIT" ]
null
null
null
Ceng213/pa3/PA3_Student_Pack/METUMaps.cpp
Arda1333/METU-CENG
1fafd9536b93be34fe031c84ae6a82ef4b9b932d
[ "MIT" ]
null
null
null
Ceng213/pa3/PA3_Student_Pack/METUMaps.cpp
Arda1333/METU-CENG
1fafd9536b93be34fe031c84ae6a82ef4b9b932d
[ "MIT" ]
null
null
null
#include "METUMaps.h" #include "GraphExceptions.h" #include <iostream> void METUMaps::PrintNotInJourney() const { std::cout << "Device is not in a journey!" << std::endl; } void METUMaps::PrintUnableToChangeDestination() const { std::cout << "Cannot change Destination during journey!" << std::endl; } void METUMaps::PrintUnableToChangeStartingLoc() const { std::cout << "Cannot change Starting Location during journey!" << std::endl; } void METUMaps::PrintAlreadyInJourney() const { std::cout << "Device is already in a journey!" << std::endl; } void METUMaps::PrintJourneyIsAlreadFinished() const { std::cout << "Journey is already finished!" << std::endl; } void METUMaps::PrintLocationNotFound() const { std::cout << "One (or both) of the locations are not found in the maps!" << std::endl; } void METUMaps::PrintJourneyCompleted() const { std::cout << "Journey Completed!" << std::endl; } void METUMaps::PrintCachedLocationFound(const std::string& location0, const std::string& location1) const { std::cout << "Route between \"" << location0 << "\" and \"" << location1 << "\" is in cache, using that..." << std::endl; } void METUMaps::PrintCalculatingRoutes(const std::string& location0, const std::string& location1) const { std::cout << "Calculating Route(s) between \"" << location0 << "\" and \"" << location1 << "\"..." << std::endl; } std::string METUMaps::GenerateKey(const std::string& location0, const std::string& location1) { // ============================= // // This function is implemented // // Do not edit this function ! // // ============================= // return location0 + "/" + location1; } METUMaps::METUMaps(int potentialPathCount, const std::string& mapFilePath) { Graph graph(mapFilePath); this -> map = graph; KeyedHashTable paths(this -> map.TotalVertexCount() * potentialPathCount); this -> cachedPaths = paths; this -> potentialPathCount = potentialPathCount; this -> inJourney = false; } void METUMaps::SetDestination(const std::string& name) { if(inJourney) PrintUnableToChangeDestination(); else destination = name; } void METUMaps::SetStartingLocation(const std::string& name) { if(inJourney) PrintUnableToChangeStartingLoc(); else startingLoc = name; } void METUMaps::StartJourney() { std::vector<std::vector<int> > allPaths; std::vector<int> temp; int i, j, k, size; bool foundStart = false, foundDest = false; PrintCalculatingRoutes(startingLoc, destination); if(inJourney) { PrintAlreadyInJourney(); return; } for(i = 0; i < map.TotalVertexCount(); i++) { if(map.VertexName(i) == startingLoc) foundStart = true; if(map.VertexName(i) == destination) foundDest = true; } if(!foundStart || !foundDest) { PrintLocationNotFound(); return; } map.MultipleShortPaths(allPaths, startingLoc, destination, potentialPathCount); for(i = 0; i < allPaths.size(); i++) { size = allPaths[i].size(); for(j = 0; j < size - 1; j++) { std::vector<int> tempPath; std::string key = GenerateKey(map.VertexName(allPaths[i][j]), destination); for(k = j; k < size; k++) tempPath.push_back(allPaths[i][k]); cachedPaths.Insert(key, tempPath); } } currentLoc = startingLoc; currentRoute = allPaths[0]; inJourney = true; } void METUMaps::EndJourney() { if(!inJourney) { PrintJourneyIsAlreadFinished(); return; } cachedPaths.ClearTable(); startingLoc = ""; destination = ""; currentLoc = ""; inJourney = false; } void METUMaps::UpdateLocation(const std::string& name) { if(!inJourney) { PrintNotInJourney(); return; } int vertexCount = map.TotalVertexCount(), i; bool locationFound = false, pathFound = false; std::vector<int> path; std::string key; if(destination == name) { PrintJourneyCompleted(); return; } for(i = 0; i < vertexCount; i++) { if(map.VertexName(i) == name) { locationFound = true; break; } } if(!locationFound) { PrintLocationNotFound(); return; } key = GenerateKey(name, destination); pathFound = cachedPaths.Find(path, key); currentLoc = name; if(pathFound) { PrintCachedLocationFound(name, destination); currentRoute = path; return; } else { std::vector<std::vector<int> > allPaths; int j, k, size; PrintCalculatingRoutes(name, destination); map.MultipleShortPaths(allPaths, name, destination, potentialPathCount); currentRoute = allPaths[0]; for(i = 0; i < allPaths.size(); i++) { size = allPaths[i].size(); for(j = 0; j < size - 1; j++) { std::vector<int> tempPath; key = GenerateKey(map.VertexName(allPaths[i][j]), destination); for(k = j; k < size; k++) tempPath.push_back(allPaths[i][k]); cachedPaths.Insert(key, tempPath); } } } } void METUMaps::Display() { // ============================= // // This function is implemented // // Do not edit this function ! // // ============================= // if(!inJourney) { // We are not in journey, this function // shouldn't be called PrintNotInJourney(); return; } int timeLeft = map.TotalWeightInBetween(currentRoute); // Print last known location std::cout << "Journey : " << startingLoc << "->" << destination << "\n"; std::cout << "Current Location: " << currentLoc << "\n"; std::cout << "Time Left : " << timeLeft << " minutes\n"; std::cout << "Current Route : "; map.PrintPath(currentRoute, true); std::cout.flush(); }
24.084942
91
0.563161
[ "vector" ]
11dc3820cc39ef3085ec786d42930291e1c5b525
834
hpp
C++
src/org/apache/poi/sl/draw/binding/CTAdjustHandleList.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
src/org/apache/poi/sl/draw/binding/CTAdjustHandleList.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
src/org/apache/poi/sl/draw/binding/CTAdjustHandleList.hpp
pebble2015/cpoi
6dcc0c5e13e3e722b4ef9fd0baffbf62bf71ead6
[ "Apache-2.0" ]
null
null
null
// Generated from /POI/java/org/apache/poi/sl/draw/binding/CTAdjustHandleList.java #pragma once #include <fwd-POI.hpp> #include <java/util/fwd-POI.hpp> #include <org/apache/poi/sl/draw/binding/fwd-POI.hpp> #include <java/lang/Object.hpp> struct default_init_tag; class poi::sl::draw::binding::CTAdjustHandleList : public virtual ::java::lang::Object { public: typedef ::java::lang::Object super; public: /* protected */ ::java::util::List* ahXYOrAhPolar { }; public: virtual ::java::util::List* getAhXYOrAhPolar(); virtual bool isSetAhXYOrAhPolar(); virtual void unsetAhXYOrAhPolar(); // Generated CTAdjustHandleList(); protected: CTAdjustHandleList(const ::default_init_tag&); public: static ::java::lang::Class *class_(); private: virtual ::java::lang::Class* getClass0(); };
21.384615
82
0.699041
[ "object" ]
11dd05ab465241a9d6a64394b67ec07bfd12ad27
37,660
cpp
C++
src/tests/class_tests/openms/source/ConstRefVector_test.cpp
aiche/OpenMS
5d212db863ff1ef48b3a70fe4d556ef179ae4f49
[ "Zlib", "Apache-2.0" ]
null
null
null
src/tests/class_tests/openms/source/ConstRefVector_test.cpp
aiche/OpenMS
5d212db863ff1ef48b3a70fe4d556ef179ae4f49
[ "Zlib", "Apache-2.0" ]
null
null
null
src/tests/class_tests/openms/source/ConstRefVector_test.cpp
aiche/OpenMS
5d212db863ff1ef48b3a70fe4d556ef179ae4f49
[ "Zlib", "Apache-2.0" ]
null
null
null
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2015. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Erhan Kenar $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/CONCEPT/ClassTest.h> #include <OpenMS/test_config.h> #include <OpenMS/KERNEL/StandardTypes.h> #include <OpenMS/KERNEL/Peak2D.h> /////////////////////////// #include <OpenMS/DATASTRUCTURES/ConstRefVector.h> /////////////////////////// using namespace OpenMS; using namespace std; typedef std::vector< Peak1D > PeakArrayType; typedef std::vector< Peak2D > PeakArray2DType; START_TEST(ConstRefVector, "$Id$") ///////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// ConstRefVector<PeakArrayType>* ptr = 0; ConstRefVector<PeakArrayType>* nullPointer = 0; START_SECTION((ConstRefVector())) ptr = new ConstRefVector<PeakArrayType>(); TEST_NOT_EQUAL(ptr, nullPointer) END_SECTION START_SECTION((~ConstRefVector())) delete ptr; END_SECTION START_SECTION((ConstRefVector(const ConstRefVector& p))) ConstRefVector<PeakArrayType> pl; Peak1D peak1; Peak1D peak2; peak1.setIntensity(1.0f); pl.push_back(peak1); peak2.setIntensity(2.0f); pl.push_back(peak2); ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl2.size(), 2) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 2.0) END_SECTION START_SECTION((ConstRefVector& operator=(const ConstRefVector &rhs))) ConstRefVector<PeakArrayType> pl; Peak1D peak1; Peak1D peak2; peak1.setIntensity(1.0f); pl.push_back(peak1); peak2.setIntensity(2.0f); pl.push_back(peak2); ConstRefVector<PeakArrayType> pl2; pl2 = pl; TEST_EQUAL(pl2.size(), 2) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 2.0) END_SECTION ConstRefVector<PeakArrayType> pl; Peak1D peak1; peak1.setPosition(2.0); peak1.setIntensity(1.0f); Peak1D peak2; peak2.setPosition(0.0); peak2.setIntensity(0.5f); Peak1D peak3; peak3.setPosition(10.5); peak3.setIntensity(0.01f); // ConstRefVectorConstIterator tests added (ek) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D>* c_ptr = 0; ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D>* c_nullPointer = 0; START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator())) c_ptr = new ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D>(); TEST_NOT_EQUAL(c_ptr, c_nullPointer) END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ~ConstRefVectorConstIterator())) delete c_ptr; END_SECTION std::vector<Peak1D*> p_vec; p_vec.push_back(&peak1); p_vec.push_back(&peak2); p_vec.push_back(&peak3); START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator(const typename std::vector< ValueType * > *vec, unsigned int position))) const std::vector<Peak1D*> p_vec_const(p_vec); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec_const, 1); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator(typename std::vector< ValueType * > *vec, unsigned int position))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator(const ConstRefVectorConstIterator &it))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> copy_it(tmp_c_it); TEST_REAL_SIMILAR(copy_it->getMZ(), 2.0); TEST_REAL_SIMILAR(copy_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator& operator=(const ConstRefVectorConstIterator &rhs))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> assign_it; assign_it = tmp_c_it; TEST_REAL_SIMILAR(assign_it->getMZ(), 10.5); TEST_REAL_SIMILAR(assign_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator<(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); TEST_EQUAL(tmp_c_it1 < tmp_c_it2, 1); TEST_EQUAL(tmp_c_it2 < tmp_c_it1, 0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator>(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); TEST_EQUAL(tmp_c_it1 > tmp_c_it2, 0); TEST_EQUAL(tmp_c_it2 > tmp_c_it1, 1); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator<=(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it3(&p_vec, 2); TEST_EQUAL(tmp_c_it1 <= tmp_c_it2, 1); TEST_EQUAL(tmp_c_it2 <= tmp_c_it3, 1); TEST_EQUAL(tmp_c_it2 <= tmp_c_it1, 0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator>=(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it3(&p_vec, 0); TEST_EQUAL(tmp_c_it1 >= tmp_c_it2, 0); TEST_EQUAL(tmp_c_it2 >= tmp_c_it1, 1); TEST_EQUAL(tmp_c_it3 >= tmp_c_it1, 1); END_SECTION std::vector<Peak1D*> p_vec2; p_vec2.push_back(&peak1); START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator==(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it3(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it4(&p_vec2, 0); TEST_EQUAL(tmp_c_it1 == tmp_c_it2, 0); TEST_EQUAL(tmp_c_it2 == tmp_c_it3, 0); TEST_EQUAL(tmp_c_it3 == tmp_c_it1, 1); TEST_EQUAL(tmp_c_it4 == tmp_c_it1, 0); TEST_EQUAL(tmp_c_it4 == tmp_c_it3, 0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] bool operator!=(const ConstRefVectorConstIterator &it) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it1(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it2(&p_vec, 2); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it3(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it4(&p_vec2, 0); TEST_EQUAL(tmp_c_it1 != tmp_c_it2, 1); TEST_EQUAL(tmp_c_it2 != tmp_c_it3, 1); TEST_EQUAL(tmp_c_it3 != tmp_c_it1, 0); TEST_EQUAL(tmp_c_it4 != tmp_c_it1, 1); TEST_EQUAL(tmp_c_it4 != tmp_c_it3, 1); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator& operator++())) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); ++tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); ++tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator operator++(int))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); tmp_c_it++; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); tmp_c_it++; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator& operator--())) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); --tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); --tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator operator--(int))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); tmp_c_it--; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); tmp_c_it--; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator operator-(difference_type n) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); unsigned int diff = 2; ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> result_it = tmp_c_it - diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); TEST_REAL_SIMILAR(result_it->getMZ(), 2.0); TEST_REAL_SIMILAR(result_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator operator+(difference_type n) const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); unsigned int diff = 2; ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> result_it = tmp_c_it + diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); TEST_REAL_SIMILAR(result_it->getMZ(), 10.5); TEST_REAL_SIMILAR(result_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator& operator-=(difference_type n))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); unsigned int diff = 2; tmp_c_it -= diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] ConstRefVectorConstIterator& operator+=(difference_type n))) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); unsigned int diff = 2; tmp_c_it += diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] reference operator*())) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 1); Peak1D orig_peak((*tmp_c_it)); TEST_REAL_SIMILAR(orig_peak.getMZ(), tmp_c_it->getMZ()); TEST_REAL_SIMILAR(orig_peak.getIntensity(), tmp_c_it->getIntensity()); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] pointer operator->())) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); double mz = tmp_c_it->getMZ(); double Int = tmp_c_it->getIntensity(); TEST_REAL_SIMILAR(mz, 10.5); TEST_REAL_SIMILAR(Int, 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorConstIterator] pointer operator->() const)) ConstRefVector<PeakArrayType>::ConstRefVectorConstIterator<Peak1D> tmp_c_it(&p_vec, 2); double mz = tmp_c_it->getMZ(); double Int = tmp_c_it->getIntensity(); TEST_REAL_SIMILAR(mz, 10.5); TEST_REAL_SIMILAR(Int, 0.01); END_SECTION /////////////////////////////////////////// // ConstRefVectorIterator tests added (ek) /////////////////////////////////////////// ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D>* m_ptr = 0; ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D>* m_nullPointer = 0; START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator())) m_ptr = new ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D>(); TEST_NOT_EQUAL(m_ptr, m_nullPointer) END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ~ConstRefVectorIterator())) delete m_ptr; END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator(typename std::vector< ValueType * > *vec, unsigned int position))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator(const ConstRefVectorIterator< ValueType > &it))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 0); ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> copy_it(tmp_c_it); TEST_REAL_SIMILAR(copy_it->getMZ(), 2.0); TEST_REAL_SIMILAR(copy_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator& operator++())) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); ++tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); ++tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator operator++(int))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); tmp_c_it++; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); tmp_c_it++; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator& operator--())) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); --tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); --tmp_c_it; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator operator--(int))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); tmp_c_it--; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 0.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.5); tmp_c_it--; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator operator-(typename ConstRefVectorIterator::difference_type n) const )) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); unsigned int diff = 2; ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> result_it = tmp_c_it - diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); TEST_REAL_SIMILAR(result_it->getMZ(), 2.0); TEST_REAL_SIMILAR(result_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator operator+(typename ConstRefVectorIterator::difference_type n) const )) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); unsigned int diff = 2; ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> result_it = tmp_c_it + diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); TEST_REAL_SIMILAR(result_it->getMZ(), 10.5); TEST_REAL_SIMILAR(result_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator& operator-=(typename ConstRefVectorIterator::difference_type n))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); unsigned int diff = 2; tmp_c_it -= diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] ConstRefVectorIterator& operator+=(typename ConstRefVectorIterator::difference_type n))) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 0); TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 2.0); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 1.0); unsigned int diff = 2; tmp_c_it += diff; TEST_REAL_SIMILAR(tmp_c_it->getMZ(), 10.5); TEST_REAL_SIMILAR(tmp_c_it->getIntensity(), 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] reference operator*())) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 1); Peak1D orig_peak((*tmp_c_it)); TEST_REAL_SIMILAR(orig_peak.getMZ(), tmp_c_it->getMZ()); TEST_REAL_SIMILAR(orig_peak.getIntensity(), tmp_c_it->getIntensity()); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] pointer operator->())) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); double mz = tmp_c_it->getMZ(); double Int = tmp_c_it->getIntensity(); TEST_REAL_SIMILAR(mz, 10.5); TEST_REAL_SIMILAR(Int, 0.01); END_SECTION START_SECTION(([ConstRefVector::ConstRefVectorIterator] pointer operator->() const)) ConstRefVector<PeakArrayType>::ConstRefVectorIterator<Peak1D> tmp_c_it(&p_vec, 2); double mz = tmp_c_it->getMZ(); double Int = tmp_c_it->getIntensity(); TEST_REAL_SIMILAR(mz, 10.5); TEST_REAL_SIMILAR(Int, 0.01); END_SECTION //////////////////////////////// START_SECTION((size_type size() const)) TEST_EQUAL(pl.size(), 0) pl.push_back(peak1); TEST_EQUAL(pl.size(), 1) END_SECTION START_SECTION((void push_back(const ValueType &x))) pl.push_back(peak2); TEST_EQUAL(pl.size(), 2) END_SECTION START_SECTION((size_type max_size() const)) ConstRefVector<PeakArrayType>::size_type max = pl.max_size(); pl.push_back(peak3); TEST_EQUAL(pl.max_size() == max, true) END_SECTION START_SECTION((bool empty() const)) TEST_EQUAL(pl.empty(), false) END_SECTION START_SECTION([EXTRA] ConstIterator begin() const) const ConstRefVector<PeakArrayType>& c_pl(pl); TEST_EQUAL(c_pl.size(), 3) ABORT_IF(c_pl.size() != 3) TEST_REAL_SIMILAR(c_pl.begin()->getIntensity(), peak1.getIntensity()) TEST_REAL_SIMILAR(c_pl.begin()->getPosition()[0], peak1.getPosition()[0]) END_SECTION START_SECTION([EXTRA] ConstIterator end() const) const ConstRefVector<PeakArrayType>& c_pl(pl); TEST_EQUAL(c_pl.size(), 3) ABORT_IF(c_pl.size() != 3) bool result = (c_pl.begin() == c_pl.end()); TEST_EQUAL(result, false) const ConstRefVector<PeakArrayType> empty; result = (empty.begin() == empty.end()); TEST_EQUAL(result, true) std::vector<Peak1D> v(c_pl.size()); std::copy(c_pl.begin(), c_pl.end(), v.begin()); TEST_EQUAL(v.size(), 3) ABORT_IF(v.size() != 3) TEST_REAL_SIMILAR(v[0].getIntensity(), peak1.getIntensity()) TEST_REAL_SIMILAR(v[0].getPosition()[0], peak1.getPosition()[0]) TEST_REAL_SIMILAR(v[1].getIntensity(), peak2.getIntensity()) TEST_REAL_SIMILAR(v[1].getPosition()[0], peak2.getPosition()[0]) TEST_REAL_SIMILAR(v[2].getIntensity(), peak3.getIntensity()) TEST_REAL_SIMILAR(v[2].getPosition()[0], peak3.getPosition()[0]) END_SECTION START_SECTION((void sortByIntensity(bool reverse=false))) ConstRefVector<PeakArrayType> pl2(pl); pl2.sortByIntensity(); TEST_EQUAL(pl2.size(), 3) std::vector<Peak1D> v(pl2.size()); std::copy(pl2.begin(), pl2.end(), v.begin()); TEST_EQUAL(v.size(), 3) ABORT_IF(v.size() != 3) TEST_REAL_SIMILAR(v[2].getIntensity(), peak1.getIntensity()) TEST_REAL_SIMILAR(v[2].getPosition()[0], peak1.getPosition()[0]) TEST_REAL_SIMILAR(v[1].getIntensity(), peak2.getIntensity()) TEST_REAL_SIMILAR(v[1].getPosition()[0], peak2.getPosition()[0]) TEST_REAL_SIMILAR(v[0].getIntensity(), peak3.getIntensity()) TEST_REAL_SIMILAR(v[0].getPosition()[0], peak3.getPosition()[0]) END_SECTION ConstRefVector<PeakArray2DType> pl2; Peak2D peak4; peak4.getPosition()[0] = 2.0; peak4.getPosition()[1] = 3.0; peak4.setIntensity(1.0f); pl2.push_back(peak4); Peak2D peak5; peak5.getPosition()[0] = 0.0; peak5.getPosition()[1] = 2.5; peak5.setIntensity(0.5f); pl2.push_back(peak5); Peak2D peak6; peak6.getPosition()[0] = 10.5; peak6.getPosition()[1] = 0.0; peak6.setIntensity(0.01f); pl2.push_back(peak6); START_SECTION((Iterator begin())) ConstRefVector<PeakArrayType>::Iterator it = pl.begin(); TEST_REAL_SIMILAR(it->getIntensity(), 1.0) TEST_REAL_SIMILAR(it->getPosition()[0], 2.0) END_SECTION START_SECTION((Iterator end())) ConstRefVector<PeakArrayType>::Iterator it = pl.end()-1; TEST_REAL_SIMILAR(it->getIntensity(), 0.01) TEST_REAL_SIMILAR(it->getPosition()[0], 10.5) END_SECTION START_SECTION((ConstIterator begin() const)) ConstRefVector<PeakArrayType>::ConstIterator it = pl.begin(); TEST_REAL_SIMILAR(it->getIntensity(), 1.0) TEST_REAL_SIMILAR(it->getPosition()[0], 2.0) END_SECTION START_SECTION((ConstIterator end() const)) ConstRefVector<PeakArrayType>::ConstIterator it = pl.end(); --it; TEST_REAL_SIMILAR(it->getIntensity(), 0.01) TEST_REAL_SIMILAR(it->getPosition()[0], 10.5) END_SECTION START_SECTION((ReverseIterator rbegin())) ConstRefVector<PeakArrayType>::ReverseIterator it = pl.rbegin(); TEST_REAL_SIMILAR(it->getIntensity(), 0.01) TEST_REAL_SIMILAR(it->getPosition()[0], 10.5) END_SECTION START_SECTION((ReverseIterator rend())) ConstRefVector<PeakArrayType>::ReverseIterator it = pl.rend()-1; TEST_REAL_SIMILAR(it->getIntensity(), 1.0) TEST_REAL_SIMILAR(it->getPosition()[0], 2.0) END_SECTION START_SECTION((ConstReverseIterator rbegin() const)) ConstRefVector<PeakArrayType>::ConstReverseIterator it = pl.rbegin(); TEST_REAL_SIMILAR(it->getIntensity(), 0.01) TEST_REAL_SIMILAR(it->getPosition()[0], 10.5) END_SECTION START_SECTION((ConstReverseIterator rend() const)) ConstRefVector<PeakArrayType>::ConstReverseIterator it = pl.rend()-1; TEST_REAL_SIMILAR(it->getIntensity(), 1.0) TEST_REAL_SIMILAR(it->getPosition()[0], 2.0) END_SECTION START_SECTION((size_type capacity() const)) TEST_EQUAL(pl.capacity(), 3) TEST_EQUAL(pl.size(), 3) END_SECTION Peak1D peak7; peak7.getPosition()[0] = 1.1; peak7.setIntensity(1.1f); START_SECTION((void reserve(size_type n))) pl.reserve(4); TEST_EQUAL(pl.size(), 3) TEST_EQUAL(pl.capacity(), 4) pl.push_back(peak7); TEST_EQUAL(pl.size(), 4) TEST_EQUAL(pl.capacity(), 4) END_SECTION START_SECTION((const_reference operator [](size_type n) const)) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[2].getPosition()[0], 10.5) TEST_REAL_SIMILAR(pl[3].getIntensity(), 1.1) TEST_REAL_SIMILAR(pl[3].getPosition()[0], 1.1) END_SECTION START_SECTION((ConstRefVector(size_type n))) ConstRefVector<PeakArrayType> pl2(2); TEST_EQUAL(pl2.size(), 2) END_SECTION START_SECTION((ConstRefVector(size_type n, const ValueType &element))) Peak2D peak; peak.getPosition()[0] = 1.1; peak.setIntensity(5.1f); ConstRefVector<PeakArray2DType> pl2(3, peak); TEST_EQUAL(pl2.size(), 3) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 5.1) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 5.1) TEST_REAL_SIMILAR(pl2[2].getIntensity(), 5.1) END_SECTION START_SECTION((const_reference front() const)) Peak1D peak; peak = pl.front(); TEST_REAL_SIMILAR(peak.getIntensity(), 1.0) TEST_REAL_SIMILAR(peak.getPosition()[0], 2) END_SECTION START_SECTION((const_reference back() const)) Peak1D peak; peak = pl.back(); TEST_REAL_SIMILAR(peak.getIntensity(), 1.1) TEST_REAL_SIMILAR(peak.getPosition()[0], 1.1) END_SECTION START_SECTION((void pop_back())) TEST_EQUAL(pl.size(), 4) pl.pop_back(); TEST_EQUAL(pl.size(), 3) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) END_SECTION Peak1D peak8; peak8.getPosition()[0] = 2.0; peak8.setIntensity(1.0f); Peak1D peak9; peak9.getPosition()[0] = 0.0; peak9.setIntensity(2.5f); START_SECTION((void swap(ConstRefVector &array))) ConstRefVector<PeakArrayType> pl2; pl2.push_back(peak8); pl2.push_back(peak9); TEST_REAL_SIMILAR(pl2[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 2.5) TEST_EQUAL(pl2.size(), 2) TEST_EQUAL(pl.size(), 3) pl.swap(pl2); TEST_EQUAL(pl2.size(), 3) TEST_EQUAL(pl.size(), 2) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl2[2].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 2.5) swap(pl,pl2); TEST_EQUAL(pl.size(), 3) TEST_EQUAL(pl2.size(), 2) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 2.5) END_SECTION Peak1D peak10; peak10.setIntensity(4712.0); START_SECTION((Iterator insert(Iterator pos, const ValueType &element))) TEST_EQUAL(pl.size(), 3) pl.insert(pl.end(),peak10); TEST_EQUAL(pl.size(), 4) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[3].getIntensity(), 4712.0) END_SECTION START_SECTION((Iterator erase(Iterator pos))) TEST_EQUAL(pl.size(), 4) pl.erase(pl.end()-1); TEST_EQUAL(pl.size(), 3) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) END_SECTION START_SECTION((void insert(Iterator pos, size_type n, const ValueType &element))) peak10.setIntensity(4714.0); TEST_EQUAL(pl.size(), 3) pl.insert(pl.begin(),3,peak10); TEST_EQUAL(pl.size(), 6) TEST_REAL_SIMILAR(pl[0].getIntensity(), 4714.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 4714.0) TEST_REAL_SIMILAR(pl[2].getIntensity(), 4714.0) TEST_REAL_SIMILAR(pl[3].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[4].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[5].getIntensity(), 0.01) END_SECTION START_SECTION((template <class InputIterator> void insert(Iterator pos, InputIterator f, InputIterator l))) pl.erase(pl.begin(),pl.begin()+3); TEST_EQUAL(pl.size(), 3) pl.insert(pl.begin(),pl.begin()+1,pl.end()); TEST_EQUAL(pl.size(), 5) TEST_REAL_SIMILAR(pl[0].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[2].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[3].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[4].getIntensity(), 0.01) END_SECTION START_SECTION((template <class InputIterator> ConstRefVector(InputIterator f, InputIterator l))) ConstRefVector<PeakArrayType> pl2(pl.begin()+1,pl.end()-1); TEST_EQUAL(pl2.size(), 3) TEST_REAL_SIMILAR(pl2[0].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl2[1].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl2[2].getIntensity(), 0.5) END_SECTION START_SECTION((bool operator==(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl.size(), pl2.size()) TEST_EQUAL(pl == pl2 , true) END_SECTION START_SECTION((bool operator!=(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl.size(), pl2.size()) TEST_EQUAL(pl != pl2 , false) END_SECTION START_SECTION((bool operator<(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl < pl2, false) pl2.push_back(Peak1D()); TEST_EQUAL(pl < pl2 , true) END_SECTION START_SECTION((bool operator>(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl > pl2, false) pl2.erase(pl2.end()-1); TEST_EQUAL(pl > pl2 , true) END_SECTION START_SECTION((bool operator<=(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl <= pl2, true) pl2.push_back(Peak1D()); TEST_EQUAL(pl <= pl2 , true) pl2.erase(pl2.begin()+1,pl2.end()-2); TEST_EQUAL(pl <= pl2 , false) END_SECTION START_SECTION((bool operator>=(const ConstRefVector &array) const)) ConstRefVector<PeakArrayType> pl2(pl); TEST_EQUAL(pl >= pl2, true) pl2.erase(pl2.end()-1); TEST_EQUAL(pl >= pl2 , true) pl2.insert(pl2.end(),2,pl2.front()); TEST_EQUAL(pl >= pl2 , false) END_SECTION START_SECTION((void clear())) pl.clear(); TEST_EQUAL(pl.size(), 0) END_SECTION Peak1D peak11; peak11.setIntensity(4713.0); START_SECTION((void resize(size_type new_size))) pl.resize(4,peak11); TEST_EQUAL(pl.size(), 4) TEST_REAL_SIMILAR(pl[2].getIntensity(), 4713.0) TEST_REAL_SIMILAR(pl[3].getIntensity(), 4713.0) END_SECTION START_SECTION((void resize(size_type new_size, const ValueType &t))) ConstRefVector<PeakArrayType> pl; Peak1D peak; peak.getPosition()[0] = 0.0; peak.setIntensity(2.5f); pl.resize(2,peak); TEST_EQUAL(pl.size(), 2) TEST_EQUAL(pl[0].getIntensity() == peak.getIntensity(),true) TEST_EQUAL(pl[0].getPosition() == peak.getPosition(),true) TEST_EQUAL(pl[1].getIntensity() == peak.getIntensity(),true) TEST_EQUAL(pl[1].getPosition() == peak.getPosition(),true) END_SECTION START_SECTION((ConstRefVector(ContainerType &p))) PeakArrayType pa(5); ConstRefVector<PeakArrayType> pl(pa); for (Size i=0; i<pa.size(); ++i) { TEST_EQUAL(pa[i]== pl[i],true) } END_SECTION START_SECTION((template <class InputIterator> void assign(InputIterator f , InputIterator l))) ConstRefVector<PeakArrayType> dpa2; dpa2.push_back(peak1); dpa2.push_back(peak2); dpa2.push_back(peak3); TEST_EQUAL(pl.size(), 4) pl.assign(dpa2.begin(),dpa2.end()); TEST_EQUAL(pl.size(), 3) TEST_REAL_SIMILAR(pl[0].getIntensity(), 1.0) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.5) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) END_SECTION START_SECTION((void assign(size_type n, const ValueType &x))) pl.assign(5,peak3); TEST_EQUAL(pl.size(), 5) TEST_REAL_SIMILAR(pl[0].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[1].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[2].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[3].getIntensity(), 0.01) TEST_REAL_SIMILAR(pl[4].getIntensity(), 0.01) END_SECTION START_SECTION((Iterator erase(Iterator first,Iterator last))) TEST_EQUAL(pl.size(), 5) pl.erase(pl.begin(),pl.end()); TEST_EQUAL(pl.size(), 0) END_SECTION START_SECTION((void sortByPosition())) ConstRefVector<PeakArray2DType> dpa2; Peak2D p1(peak4); p1.setIntensity(1.0f); Peak2D p2(peak5); p2.setIntensity(2.0f); Peak2D p3(peak6); p3.setIntensity(3.0f); Peak2D p4; p4.getPosition()[0]=4.3; p4.getPosition()[1]=4711; p4.setIntensity(4.0f); Peak2D p5; p5.getPosition()[1]=4711; p5.setIntensity(5.0f); Peak2D p6; p6.getPosition()[1]=4711; p6.setIntensity(6.0f); dpa2.push_back(p1); dpa2.push_back(p2); dpa2.push_back(p3); dpa2.push_back(p4); dpa2.push_back(p5); dpa2.push_back(p6); dpa2.sortByPosition(); TEST_REAL_SIMILAR(dpa2[0].getIntensity(), 2.0) TEST_REAL_SIMILAR(dpa2[1].getIntensity(), 5.0) TEST_REAL_SIMILAR(dpa2[2].getIntensity(), 6.0) TEST_REAL_SIMILAR(dpa2[3].getIntensity(), 1.0) TEST_REAL_SIMILAR(dpa2[4].getIntensity(), 4.0) TEST_REAL_SIMILAR(dpa2[5].getIntensity(), 3.0) END_SECTION START_SECTION((template <typename ComparatorType> void sortByComparator(ComparatorType const &comparator=ComparatorType()))) pl2.sortByComparator<Peak2D::PositionLess>(); TEST_EQUAL(pl2.size(), 3) TEST_REAL_SIMILAR(pl2[1].getIntensity(), peak4.getIntensity()) TEST_REAL_SIMILAR(pl2[1].getPosition()[0], peak4.getPosition()[0]) TEST_REAL_SIMILAR(pl2[1].getPosition()[1], peak4.getPosition()[1]) TEST_REAL_SIMILAR(pl2[0].getIntensity(), peak5.getIntensity()) TEST_REAL_SIMILAR(pl2[0].getPosition()[0], peak5.getPosition()[0]) TEST_REAL_SIMILAR(pl2[0].getPosition()[1], peak5.getPosition()[1]) TEST_REAL_SIMILAR(pl2[2].getIntensity(), peak6.getIntensity()) TEST_REAL_SIMILAR(pl2[2].getPosition()[0], peak6.getPosition()[0]) TEST_REAL_SIMILAR(pl2[2].getPosition()[1], peak6.getPosition()[1]) // ---------------- ConstRefVector<PeakArray2DType> dpa2; Peak2D p1(peak4); p1.setIntensity(1.0f); Peak2D p2(peak5); p2.setIntensity(2.0f); Peak2D p3(peak6); p3.setIntensity(3.0f); Peak2D p4; p4.getPosition()[0]=4.3; p4.getPosition()[1]=4711; p4.setIntensity(4.0f); Peak2D p5; p5.getPosition()[1]=4711; p5.setIntensity(5.0f); Peak2D p6; p6.getPosition()[1]=4711; p6.setIntensity(6.0f); dpa2.push_back(p1); dpa2.push_back(p2); dpa2.push_back(p3); dpa2.push_back(p4); dpa2.push_back(p5); dpa2.push_back(p6); dpa2.sortByComparator<Peak2D::MZLess >(Peak2D::MZLess()); TEST_REAL_SIMILAR(dpa2[0].getIntensity(), 3.0) TEST_REAL_SIMILAR(dpa2[1].getIntensity(), 2.0) TEST_REAL_SIMILAR(dpa2[2].getIntensity(), 1.0) TEST_REAL_SIMILAR(dpa2[3].getIntensity(), 4.0) TEST_REAL_SIMILAR(dpa2[4].getIntensity(), 5.0) TEST_REAL_SIMILAR(dpa2[5].getIntensity(), 6.0) END_SECTION START_SECTION([EXTRA] Container without special members for sorting) vector<Int> vec(5); ConstRefVector<vector<Int> > ref_vec(vec); TEST_EQUAL(ref_vec.size(),5) END_SECTION ///////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// END_TEST
35.229186
163
0.737175
[ "vector" ]
11df61f80fa09e3437fd671d3baa72a1f744437c
97,189
cpp
C++
riscv/llvm/3.5/cfe-3.5.0.src/lib/Analysis/ThreadSafety.cpp
tangyibin/goblin-core
1940db6e95908c81687b2b22ddd9afbc8db9cdfe
[ "BSD-3-Clause" ]
null
null
null
riscv/llvm/3.5/cfe-3.5.0.src/lib/Analysis/ThreadSafety.cpp
tangyibin/goblin-core
1940db6e95908c81687b2b22ddd9afbc8db9cdfe
[ "BSD-3-Clause" ]
null
null
null
riscv/llvm/3.5/cfe-3.5.0.src/lib/Analysis/ThreadSafety.cpp
tangyibin/goblin-core
1940db6e95908c81687b2b22ddd9afbc8db9cdfe
[ "BSD-3-Clause" ]
1
2021-03-24T06:40:32.000Z
2021-03-24T06:40:32.000Z
//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // A intra-procedural analysis for thread safety (e.g. deadlocks and race // conditions), based off of an annotation system. // // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html // for more information. // //===----------------------------------------------------------------------===// #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtVisitor.h" #include "clang/Analysis/Analyses/PostOrderCFGView.h" #include "clang/Analysis/Analyses/ThreadSafety.h" #include "clang/Analysis/Analyses/ThreadSafetyLogical.h" #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" #include "clang/Analysis/AnalysisContext.h" #include "clang/Analysis/CFG.h" #include "clang/Analysis/CFGStmtMap.h" #include "clang/Basic/OperatorKinds.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/ImmutableMap.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <utility> #include <vector> using namespace clang; using namespace thread_safety; // Key method definition ThreadSafetyHandler::~ThreadSafetyHandler() {} namespace { /// SExpr implements a simple expression language that is used to store, /// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr /// does not capture surface syntax, and it does not distinguish between /// C++ concepts, like pointers and references, that have no real semantic /// differences. This simplicity allows SExprs to be meaningfully compared, /// e.g. /// (x) = x /// (*this).foo = this->foo /// *&a = a /// /// Thread-safety analysis works by comparing lock expressions. Within the /// body of a function, an expression such as "x->foo->bar.mu" will resolve to /// a particular mutex object at run-time. Subsequent occurrences of the same /// expression (where "same" means syntactic equality) will refer to the same /// run-time object if three conditions hold: /// (1) Local variables in the expression, such as "x" have not changed. /// (2) Values on the heap that affect the expression have not changed. /// (3) The expression involves only pure function calls. /// /// The current implementation assumes, but does not verify, that multiple uses /// of the same lock expression satisfies these criteria. class SExpr { private: enum ExprOp { EOP_Nop, ///< No-op EOP_Wildcard, ///< Matches anything. EOP_Universal, ///< Universal lock. EOP_This, ///< This keyword. EOP_NVar, ///< Named variable. EOP_LVar, ///< Local variable. EOP_Dot, ///< Field access EOP_Call, ///< Function call EOP_MCall, ///< Method call EOP_Index, ///< Array index EOP_Unary, ///< Unary operation EOP_Binary, ///< Binary operation EOP_Unknown ///< Catchall for everything else }; class SExprNode { private: unsigned char Op; ///< Opcode of the root node unsigned char Flags; ///< Additional opcode-specific data unsigned short Sz; ///< Number of child nodes const void* Data; ///< Additional opcode-specific data public: SExprNode(ExprOp O, unsigned F, const void* D) : Op(static_cast<unsigned char>(O)), Flags(static_cast<unsigned char>(F)), Sz(1), Data(D) { } unsigned size() const { return Sz; } void setSize(unsigned S) { Sz = S; } ExprOp kind() const { return static_cast<ExprOp>(Op); } const NamedDecl* getNamedDecl() const { assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot); return reinterpret_cast<const NamedDecl*>(Data); } const NamedDecl* getFunctionDecl() const { assert(Op == EOP_Call || Op == EOP_MCall); return reinterpret_cast<const NamedDecl*>(Data); } bool isArrow() const { return Op == EOP_Dot && Flags == 1; } void setArrow(bool A) { Flags = A ? 1 : 0; } unsigned arity() const { switch (Op) { case EOP_Nop: return 0; case EOP_Wildcard: return 0; case EOP_Universal: return 0; case EOP_NVar: return 0; case EOP_LVar: return 0; case EOP_This: return 0; case EOP_Dot: return 1; case EOP_Call: return Flags+1; // First arg is function. case EOP_MCall: return Flags+1; // First arg is implicit obj. case EOP_Index: return 2; case EOP_Unary: return 1; case EOP_Binary: return 2; case EOP_Unknown: return Flags; } return 0; } bool operator==(const SExprNode& Other) const { // Ignore flags and size -- they don't matter. return (Op == Other.Op && Data == Other.Data); } bool operator!=(const SExprNode& Other) const { return !(*this == Other); } bool matches(const SExprNode& Other) const { return (*this == Other) || (Op == EOP_Wildcard) || (Other.Op == EOP_Wildcard); } }; /// \brief Encapsulates the lexical context of a function call. The lexical /// context includes the arguments to the call, including the implicit object /// argument. When an attribute containing a mutex expression is attached to /// a method, the expression may refer to formal parameters of the method. /// Actual arguments must be substituted for formal parameters to derive /// the appropriate mutex expression in the lexical context where the function /// is called. PrevCtx holds the context in which the arguments themselves /// should be evaluated; multiple calling contexts can be chained together /// by the lock_returned attribute. struct CallingContext { const NamedDecl* AttrDecl; // The decl to which the attribute is attached. const Expr* SelfArg; // Implicit object argument -- e.g. 'this' bool SelfArrow; // is Self referred to with -> or .? unsigned NumArgs; // Number of funArgs const Expr* const* FunArgs; // Function arguments CallingContext* PrevCtx; // The previous context; or 0 if none. CallingContext(const NamedDecl *D) : AttrDecl(D), SelfArg(nullptr), SelfArrow(false), NumArgs(0), FunArgs(nullptr), PrevCtx(nullptr) {} }; typedef SmallVector<SExprNode, 4> NodeVector; private: // A SExpr is a list of SExprNodes in prefix order. The Size field allows // the list to be traversed as a tree. NodeVector NodeVec; private: unsigned make(ExprOp O, unsigned F = 0, const void *D = nullptr) { NodeVec.push_back(SExprNode(O, F, D)); return NodeVec.size() - 1; } unsigned makeNop() { return make(EOP_Nop); } unsigned makeWildcard() { return make(EOP_Wildcard); } unsigned makeUniversal() { return make(EOP_Universal); } unsigned makeNamedVar(const NamedDecl *D) { return make(EOP_NVar, 0, D); } unsigned makeLocalVar(const NamedDecl *D) { return make(EOP_LVar, 0, D); } unsigned makeThis() { return make(EOP_This); } unsigned makeDot(const NamedDecl *D, bool Arrow) { return make(EOP_Dot, Arrow ? 1 : 0, D); } unsigned makeCall(unsigned NumArgs, const NamedDecl *D) { return make(EOP_Call, NumArgs, D); } // Grab the very first declaration of virtual method D const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) { while (true) { D = D->getCanonicalDecl(); CXXMethodDecl::method_iterator I = D->begin_overridden_methods(), E = D->end_overridden_methods(); if (I == E) return D; // Method does not override anything D = *I; // FIXME: this does not work with multiple inheritance. } return nullptr; } unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) { return make(EOP_MCall, NumArgs, getFirstVirtualDecl(D)); } unsigned makeIndex() { return make(EOP_Index); } unsigned makeUnary() { return make(EOP_Unary); } unsigned makeBinary() { return make(EOP_Binary); } unsigned makeUnknown(unsigned Arity) { return make(EOP_Unknown, Arity); } inline bool isCalleeArrow(const Expr *E) { const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts()); return ME ? ME->isArrow() : false; } /// Build an SExpr from the given C++ expression. /// Recursive function that terminates on DeclRefExpr. /// Note: this function merely creates a SExpr; it does not check to /// ensure that the original expression is a valid mutex expression. /// /// NDeref returns the number of Derefence and AddressOf operations /// preceding the Expr; this is used to decide whether to pretty-print /// SExprs with . or ->. unsigned buildSExpr(const Expr *Exp, CallingContext *CallCtx, int *NDeref = nullptr) { if (!Exp) return 0; if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { const NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND); if (PV) { const FunctionDecl *FD = cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl(); unsigned i = PV->getFunctionScopeIndex(); if (CallCtx && CallCtx->FunArgs && FD == CallCtx->AttrDecl->getCanonicalDecl()) { // Substitute call arguments for references to function parameters assert(i < CallCtx->NumArgs); return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref); } // Map the param back to the param of the original function declaration. makeNamedVar(FD->getParamDecl(i)); return 1; } // Not a function parameter -- just store the reference. makeNamedVar(ND); return 1; } else if (isa<CXXThisExpr>(Exp)) { // Substitute parent for 'this' if (CallCtx && CallCtx->SelfArg) { if (!CallCtx->SelfArrow && NDeref) // 'this' is a pointer, but self is not, so need to take address. --(*NDeref); return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref); } else { makeThis(); return 1; } } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { const NamedDecl *ND = ME->getMemberDecl(); int ImplicitDeref = ME->isArrow() ? 1 : 0; unsigned Root = makeDot(ND, false); unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref); NodeVec[Root].setArrow(ImplicitDeref > 0); NodeVec[Root].setSize(Sz + 1); return Sz + 1; } else if (const CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) { // When calling a function with a lock_returned attribute, replace // the function call with the expression in lock_returned. const CXXMethodDecl *MD = CMCE->getMethodDecl()->getMostRecentDecl(); if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) { CallingContext LRCallCtx(CMCE->getMethodDecl()); LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument(); LRCallCtx.SelfArrow = isCalleeArrow(CMCE->getCallee()); LRCallCtx.NumArgs = CMCE->getNumArgs(); LRCallCtx.FunArgs = CMCE->getArgs(); LRCallCtx.PrevCtx = CallCtx; return buildSExpr(At->getArg(), &LRCallCtx); } // Hack to treat smart pointers and iterators as pointers; // ignore any method named get(). if (CMCE->getMethodDecl()->getNameAsString() == "get" && CMCE->getNumArgs() == 0) { if (NDeref && isCalleeArrow(CMCE->getCallee())) ++(*NDeref); return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref); } unsigned NumCallArgs = CMCE->getNumArgs(); unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl()); unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx); const Expr* const* CallArgs = CMCE->getArgs(); for (unsigned i = 0; i < NumCallArgs; ++i) { Sz += buildSExpr(CallArgs[i], CallCtx); } NodeVec[Root].setSize(Sz + 1); return Sz + 1; } else if (const CallExpr *CE = dyn_cast<CallExpr>(Exp)) { const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl(); if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) { CallingContext LRCallCtx(CE->getDirectCallee()); LRCallCtx.NumArgs = CE->getNumArgs(); LRCallCtx.FunArgs = CE->getArgs(); LRCallCtx.PrevCtx = CallCtx; return buildSExpr(At->getArg(), &LRCallCtx); } // Treat smart pointers and iterators as pointers; // ignore the * and -> operators. if (const CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) { OverloadedOperatorKind k = OE->getOperator(); if (k == OO_Star) { if (NDeref) ++(*NDeref); return buildSExpr(OE->getArg(0), CallCtx, NDeref); } else if (k == OO_Arrow) { return buildSExpr(OE->getArg(0), CallCtx, NDeref); } } unsigned NumCallArgs = CE->getNumArgs(); unsigned Root = makeCall(NumCallArgs, nullptr); unsigned Sz = buildSExpr(CE->getCallee(), CallCtx); const Expr* const* CallArgs = CE->getArgs(); for (unsigned i = 0; i < NumCallArgs; ++i) { Sz += buildSExpr(CallArgs[i], CallCtx); } NodeVec[Root].setSize(Sz+1); return Sz+1; } else if (const BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) { unsigned Root = makeBinary(); unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx); Sz += buildSExpr(BOE->getRHS(), CallCtx); NodeVec[Root].setSize(Sz); return Sz; } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) { // Ignore & and * operators -- they're no-ops. // However, we try to figure out whether the expression is a pointer, // so we can use . and -> appropriately in error messages. if (UOE->getOpcode() == UO_Deref) { if (NDeref) ++(*NDeref); return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); } if (UOE->getOpcode() == UO_AddrOf) { if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) { if (DRE->getDecl()->isCXXInstanceMember()) { // This is a pointer-to-member expression, e.g. &MyClass::mu_. // We interpret this syntax specially, as a wildcard. unsigned Root = makeDot(DRE->getDecl(), false); makeWildcard(); NodeVec[Root].setSize(2); return 2; } } if (NDeref) --(*NDeref); return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); } unsigned Root = makeUnary(); unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx); NodeVec[Root].setSize(Sz); return Sz; } else if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) { unsigned Root = makeIndex(); unsigned Sz = buildSExpr(ASE->getBase(), CallCtx); Sz += buildSExpr(ASE->getIdx(), CallCtx); NodeVec[Root].setSize(Sz); return Sz; } else if (const AbstractConditionalOperator *CE = dyn_cast<AbstractConditionalOperator>(Exp)) { unsigned Root = makeUnknown(3); unsigned Sz = buildSExpr(CE->getCond(), CallCtx); Sz += buildSExpr(CE->getTrueExpr(), CallCtx); Sz += buildSExpr(CE->getFalseExpr(), CallCtx); NodeVec[Root].setSize(Sz); return Sz; } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) { unsigned Root = makeUnknown(3); unsigned Sz = buildSExpr(CE->getCond(), CallCtx); Sz += buildSExpr(CE->getLHS(), CallCtx); Sz += buildSExpr(CE->getRHS(), CallCtx); NodeVec[Root].setSize(Sz); return Sz; } else if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) { return buildSExpr(CE->getSubExpr(), CallCtx, NDeref); } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) { return buildSExpr(PE->getSubExpr(), CallCtx, NDeref); } else if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) { return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref); } else if (const CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) { return buildSExpr(E->getSubExpr(), CallCtx, NDeref); } else if (isa<CharacterLiteral>(Exp) || isa<CXXNullPtrLiteralExpr>(Exp) || isa<GNUNullExpr>(Exp) || isa<CXXBoolLiteralExpr>(Exp) || isa<FloatingLiteral>(Exp) || isa<ImaginaryLiteral>(Exp) || isa<IntegerLiteral>(Exp) || isa<StringLiteral>(Exp) || isa<ObjCStringLiteral>(Exp)) { makeNop(); return 1; // FIXME: Ignore literals for now } else { makeNop(); return 1; // Ignore. FIXME: mark as invalid expression? } } /// \brief Construct a SExpr from an expression. /// \param MutexExp The original mutex expression within an attribute /// \param DeclExp An expression involving the Decl on which the attribute /// occurs. /// \param D The declaration to which the lock/unlock attribute is attached. void buildSExprFromExpr(const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D, VarDecl *SelfDecl = nullptr) { CallingContext CallCtx(D); if (MutexExp) { if (const StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) { if (SLit->getString() == StringRef("*")) // The "*" expr is a universal lock, which essentially turns off // checks until it is removed from the lockset. makeUniversal(); else // Ignore other string literals for now. makeNop(); return; } } // If we are processing a raw attribute expression, with no substitutions. if (!DeclExp) { buildSExpr(MutexExp, nullptr); return; } // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute // for formal parameters when we call buildMutexID later. if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) { CallCtx.SelfArg = ME->getBase(); CallCtx.SelfArrow = ME->isArrow(); } else if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) { CallCtx.SelfArg = CE->getImplicitObjectArgument(); CallCtx.SelfArrow = isCalleeArrow(CE->getCallee()); CallCtx.NumArgs = CE->getNumArgs(); CallCtx.FunArgs = CE->getArgs(); } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) { CallCtx.NumArgs = CE->getNumArgs(); CallCtx.FunArgs = CE->getArgs(); } else if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) { CallCtx.SelfArg = nullptr; // Will be set below CallCtx.NumArgs = CE->getNumArgs(); CallCtx.FunArgs = CE->getArgs(); } else if (D && isa<CXXDestructorDecl>(D)) { // There's no such thing as a "destructor call" in the AST. CallCtx.SelfArg = DeclExp; } // Hack to handle constructors, where self cannot be recovered from // the expression. if (SelfDecl && !CallCtx.SelfArg) { DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue, SelfDecl->getLocation()); CallCtx.SelfArg = &SelfDRE; // If the attribute has no arguments, then assume the argument is "this". if (!MutexExp) buildSExpr(CallCtx.SelfArg, nullptr); else // For most attributes. buildSExpr(MutexExp, &CallCtx); return; } // If the attribute has no arguments, then assume the argument is "this". if (!MutexExp) buildSExpr(CallCtx.SelfArg, nullptr); else // For most attributes. buildSExpr(MutexExp, &CallCtx); } /// \brief Get index of next sibling of node i. unsigned getNextSibling(unsigned i) const { return i + NodeVec[i].size(); } public: explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); } /// \param MutexExp The original mutex expression within an attribute /// \param DeclExp An expression involving the Decl on which the attribute /// occurs. /// \param D The declaration to which the lock/unlock attribute is attached. /// Caller must check isValid() after construction. SExpr(const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D, VarDecl *SelfDecl = nullptr) { buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl); } /// Return true if this is a valid decl sequence. /// Caller must call this by hand after construction to handle errors. bool isValid() const { return !NodeVec.empty(); } bool shouldIgnore() const { // Nop is a mutex that we have decided to deliberately ignore. assert(NodeVec.size() > 0 && "Invalid Mutex"); return NodeVec[0].kind() == EOP_Nop; } bool isUniversal() const { assert(NodeVec.size() > 0 && "Invalid Mutex"); return NodeVec[0].kind() == EOP_Universal; } /// Issue a warning about an invalid lock expression static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D, StringRef Kind) { SourceLocation Loc; if (DeclExp) Loc = DeclExp->getExprLoc(); // FIXME: add a note about the attribute location in MutexExp or D if (Loc.isValid()) Handler.handleInvalidLockExp(Kind, Loc); } bool operator==(const SExpr &other) const { return NodeVec == other.NodeVec; } bool operator!=(const SExpr &other) const { return !(*this == other); } bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const { if (NodeVec[i].matches(Other.NodeVec[j])) { unsigned ni = NodeVec[i].arity(); unsigned nj = Other.NodeVec[j].arity(); unsigned n = (ni < nj) ? ni : nj; bool Result = true; unsigned ci = i+1; // first child of i unsigned cj = j+1; // first child of j for (unsigned k = 0; k < n; ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) { Result = Result && matches(Other, ci, cj); } return Result; } return false; } // A partial match between a.mu and b.mu returns true a and b have the same // type (and thus mu refers to the same mutex declaration), regardless of // whether a and b are different objects or not. bool partiallyMatches(const SExpr &Other) const { if (NodeVec[0].kind() == EOP_Dot) return NodeVec[0].matches(Other.NodeVec[0]); return false; } /// \brief Pretty print a lock expression for use in error messages. std::string toString(unsigned i = 0) const { assert(isValid()); if (i >= NodeVec.size()) return ""; const SExprNode* N = &NodeVec[i]; switch (N->kind()) { case EOP_Nop: return "_"; case EOP_Wildcard: return "(?)"; case EOP_Universal: return "*"; case EOP_This: return "this"; case EOP_NVar: case EOP_LVar: { return N->getNamedDecl()->getNameAsString(); } case EOP_Dot: { if (NodeVec[i+1].kind() == EOP_Wildcard) { std::string S = "&"; S += N->getNamedDecl()->getQualifiedNameAsString(); return S; } std::string FieldName = N->getNamedDecl()->getNameAsString(); if (NodeVec[i+1].kind() == EOP_This) return FieldName; std::string S = toString(i+1); if (N->isArrow()) return S + "->" + FieldName; else return S + "." + FieldName; } case EOP_Call: { std::string S = toString(i+1) + "("; unsigned NumArgs = N->arity()-1; unsigned ci = getNextSibling(i+1); for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { S += toString(ci); if (k+1 < NumArgs) S += ","; } S += ")"; return S; } case EOP_MCall: { std::string S = ""; if (NodeVec[i+1].kind() != EOP_This) S = toString(i+1) + "."; if (const NamedDecl *D = N->getFunctionDecl()) S += D->getNameAsString() + "("; else S += "#("; unsigned NumArgs = N->arity()-1; unsigned ci = getNextSibling(i+1); for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { S += toString(ci); if (k+1 < NumArgs) S += ","; } S += ")"; return S; } case EOP_Index: { std::string S1 = toString(i+1); std::string S2 = toString(i+1 + NodeVec[i+1].size()); return S1 + "[" + S2 + "]"; } case EOP_Unary: { std::string S = toString(i+1); return "#" + S; } case EOP_Binary: { std::string S1 = toString(i+1); std::string S2 = toString(i+1 + NodeVec[i+1].size()); return "(" + S1 + "#" + S2 + ")"; } case EOP_Unknown: { unsigned NumChildren = N->arity(); if (NumChildren == 0) return "(...)"; std::string S = "("; unsigned ci = i+1; for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) { S += toString(ci); if (j+1 < NumChildren) S += "#"; } S += ")"; return S; } } return ""; } }; /// \brief A short list of SExprs class MutexIDList : public SmallVector<SExpr, 3> { public: /// \brief Push M onto list, but discard duplicates. void push_back_nodup(const SExpr& M) { if (end() == std::find(begin(), end(), M)) push_back(M); } }; /// \brief This is a helper class that stores info about the most recent /// accquire of a Lock. /// /// The main body of the analysis maps MutexIDs to LockDatas. struct LockData { SourceLocation AcquireLoc; /// \brief LKind stores whether a lock is held shared or exclusively. /// Note that this analysis does not currently support either re-entrant /// locking or lock "upgrading" and "downgrading" between exclusive and /// shared. /// /// FIXME: add support for re-entrant locking and lock up/downgrading LockKind LKind; bool Asserted; // for asserted locks bool Managed; // for ScopedLockable objects SExpr UnderlyingMutex; // for ScopedLockable objects LockData(SourceLocation AcquireLoc, LockKind LKind, bool M=false, bool Asrt=false) : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(Asrt), Managed(M), UnderlyingMutex(Decl::EmptyShell()) {} LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu) : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(false), Managed(false), UnderlyingMutex(Mu) {} bool operator==(const LockData &other) const { return AcquireLoc == other.AcquireLoc && LKind == other.LKind; } bool operator!=(const LockData &other) const { return !(*this == other); } void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(AcquireLoc.getRawEncoding()); ID.AddInteger(LKind); } bool isAtLeast(LockKind LK) { return (LK == LK_Shared) || (LKind == LK_Exclusive); } }; /// \brief A FactEntry stores a single fact that is known at a particular point /// in the program execution. Currently, this is information regarding a lock /// that is held at that point. struct FactEntry { SExpr MutID; LockData LDat; FactEntry(const SExpr& M, const LockData& L) : MutID(M), LDat(L) { } }; typedef unsigned short FactID; /// \brief FactManager manages the memory for all facts that are created during /// the analysis of a single routine. class FactManager { private: std::vector<FactEntry> Facts; public: FactID newLock(const SExpr& M, const LockData& L) { Facts.push_back(FactEntry(M,L)); return static_cast<unsigned short>(Facts.size() - 1); } const FactEntry& operator[](FactID F) const { return Facts[F]; } FactEntry& operator[](FactID F) { return Facts[F]; } }; /// \brief A FactSet is the set of facts that are known to be true at a /// particular program point. FactSets must be small, because they are /// frequently copied, and are thus implemented as a set of indices into a /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 /// locks, so we can get away with doing a linear search for lookup. Note /// that a hashtable or map is inappropriate in this case, because lookups /// may involve partial pattern matches, rather than exact matches. class FactSet { private: typedef SmallVector<FactID, 4> FactVec; FactVec FactIDs; public: typedef FactVec::iterator iterator; typedef FactVec::const_iterator const_iterator; iterator begin() { return FactIDs.begin(); } const_iterator begin() const { return FactIDs.begin(); } iterator end() { return FactIDs.end(); } const_iterator end() const { return FactIDs.end(); } bool isEmpty() const { return FactIDs.size() == 0; } FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) { FactID F = FM.newLock(M, L); FactIDs.push_back(F); return F; } bool removeLock(FactManager& FM, const SExpr& M) { unsigned n = FactIDs.size(); if (n == 0) return false; for (unsigned i = 0; i < n-1; ++i) { if (FM[FactIDs[i]].MutID.matches(M)) { FactIDs[i] = FactIDs[n-1]; FactIDs.pop_back(); return true; } } if (FM[FactIDs[n-1]].MutID.matches(M)) { FactIDs.pop_back(); return true; } return false; } iterator findLockIter(FactManager &FM, const SExpr &M) { return std::find_if(begin(), end(), [&](FactID ID) { return FM[ID].MutID.matches(M); }); } LockData *findLock(FactManager &FM, const SExpr &M) const { auto I = std::find_if(begin(), end(), [&](FactID ID) { return FM[ID].MutID.matches(M); }); return I != end() ? &FM[*I].LDat : nullptr; } LockData *findLockUniv(FactManager &FM, const SExpr &M) const { auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { const SExpr &Expr = FM[ID].MutID; return Expr.isUniversal() || Expr.matches(M); }); return I != end() ? &FM[*I].LDat : nullptr; } FactEntry *findPartialMatch(FactManager &FM, const SExpr &M) const { auto I = std::find_if(begin(), end(), [&](FactID ID) { return FM[ID].MutID.partiallyMatches(M); }); return I != end() ? &FM[*I] : nullptr; } }; /// A Lockset maps each SExpr (defined above) to information about how it has /// been locked. typedef llvm::ImmutableMap<SExpr, LockData> Lockset; typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext; class LocalVariableMap; /// A side (entry or exit) of a CFG node. enum CFGBlockSide { CBS_Entry, CBS_Exit }; /// CFGBlockInfo is a struct which contains all the information that is /// maintained for each block in the CFG. See LocalVariableMap for more /// information about the contexts. struct CFGBlockInfo { FactSet EntrySet; // Lockset held at entry to block FactSet ExitSet; // Lockset held at exit from block LocalVarContext EntryContext; // Context held at entry to block LocalVarContext ExitContext; // Context held at exit from block SourceLocation EntryLoc; // Location of first statement in block SourceLocation ExitLoc; // Location of last statement in block. unsigned EntryIndex; // Used to replay contexts later bool Reachable; // Is this block reachable? const FactSet &getSet(CFGBlockSide Side) const { return Side == CBS_Entry ? EntrySet : ExitSet; } SourceLocation getLocation(CFGBlockSide Side) const { return Side == CBS_Entry ? EntryLoc : ExitLoc; } private: CFGBlockInfo(LocalVarContext EmptyCtx) : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false) { } public: static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); }; // A LocalVariableMap maintains a map from local variables to their currently // valid definitions. It provides SSA-like functionality when traversing the // CFG. Like SSA, each definition or assignment to a variable is assigned a // unique name (an integer), which acts as the SSA name for that definition. // The total set of names is shared among all CFG basic blocks. // Unlike SSA, we do not rewrite expressions to replace local variables declrefs // with their SSA-names. Instead, we compute a Context for each point in the // code, which maps local variables to the appropriate SSA-name. This map // changes with each assignment. // // The map is computed in a single pass over the CFG. Subsequent analyses can // then query the map to find the appropriate Context for a statement, and use // that Context to look up the definitions of variables. class LocalVariableMap { public: typedef LocalVarContext Context; /// A VarDefinition consists of an expression, representing the value of the /// variable, along with the context in which that expression should be /// interpreted. A reference VarDefinition does not itself contain this /// information, but instead contains a pointer to a previous VarDefinition. struct VarDefinition { public: friend class LocalVariableMap; const NamedDecl *Dec; // The original declaration for this variable. const Expr *Exp; // The expression for this variable, OR unsigned Ref; // Reference to another VarDefinition Context Ctx; // The map with which Exp should be interpreted. bool isReference() { return !Exp; } private: // Create ordinary variable definition VarDefinition(const NamedDecl *D, const Expr *E, Context C) : Dec(D), Exp(E), Ref(0), Ctx(C) { } // Create reference to previous definition VarDefinition(const NamedDecl *D, unsigned R, Context C) : Dec(D), Exp(nullptr), Ref(R), Ctx(C) { } }; private: Context::Factory ContextFactory; std::vector<VarDefinition> VarDefinitions; std::vector<unsigned> CtxIndices; std::vector<std::pair<Stmt*, Context> > SavedContexts; public: LocalVariableMap() { // index 0 is a placeholder for undefined variables (aka phi-nodes). VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); } /// Look up a definition, within the given context. const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { const unsigned *i = Ctx.lookup(D); if (!i) return nullptr; assert(*i < VarDefinitions.size()); return &VarDefinitions[*i]; } /// Look up the definition for D within the given context. Returns /// NULL if the expression is not statically known. If successful, also /// modifies Ctx to hold the context of the return Expr. const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { const unsigned *P = Ctx.lookup(D); if (!P) return nullptr; unsigned i = *P; while (i > 0) { if (VarDefinitions[i].Exp) { Ctx = VarDefinitions[i].Ctx; return VarDefinitions[i].Exp; } i = VarDefinitions[i].Ref; } return nullptr; } Context getEmptyContext() { return ContextFactory.getEmptyMap(); } /// Return the next context after processing S. This function is used by /// clients of the class to get the appropriate context when traversing the /// CFG. It must be called for every assignment or DeclStmt. Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) { if (SavedContexts[CtxIndex+1].first == S) { CtxIndex++; Context Result = SavedContexts[CtxIndex].second; return Result; } return C; } void dumpVarDefinitionName(unsigned i) { if (i == 0) { llvm::errs() << "Undefined"; return; } const NamedDecl *Dec = VarDefinitions[i].Dec; if (!Dec) { llvm::errs() << "<<NULL>>"; return; } Dec->printName(llvm::errs()); llvm::errs() << "." << i << " " << ((const void*) Dec); } /// Dumps an ASCII representation of the variable map to llvm::errs() void dump() { for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { const Expr *Exp = VarDefinitions[i].Exp; unsigned Ref = VarDefinitions[i].Ref; dumpVarDefinitionName(i); llvm::errs() << " = "; if (Exp) Exp->dump(); else { dumpVarDefinitionName(Ref); llvm::errs() << "\n"; } } } /// Dumps an ASCII representation of a Context to llvm::errs() void dumpContext(Context C) { for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { const NamedDecl *D = I.getKey(); D->printName(llvm::errs()); const unsigned *i = C.lookup(D); llvm::errs() << " -> "; dumpVarDefinitionName(*i); llvm::errs() << "\n"; } } /// Builds the variable map. void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector<CFGBlockInfo> &BlockInfo); protected: // Get the current context index unsigned getContextIndex() { return SavedContexts.size()-1; } // Save the current context for later replay void saveContext(Stmt *S, Context C) { SavedContexts.push_back(std::make_pair(S,C)); } // Adds a new definition to the given context, and returns a new context. // This method should be called when declaring a new variable. Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { assert(!Ctx.contains(D)); unsigned newID = VarDefinitions.size(); Context NewCtx = ContextFactory.add(Ctx, D, newID); VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); return NewCtx; } // Add a new reference to an existing definition. Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { unsigned newID = VarDefinitions.size(); Context NewCtx = ContextFactory.add(Ctx, D, newID); VarDefinitions.push_back(VarDefinition(D, i, Ctx)); return NewCtx; } // Updates a definition only if that definition is already in the map. // This method should be called when assigning to an existing variable. Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { if (Ctx.contains(D)) { unsigned newID = VarDefinitions.size(); Context NewCtx = ContextFactory.remove(Ctx, D); NewCtx = ContextFactory.add(NewCtx, D, newID); VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); return NewCtx; } return Ctx; } // Removes a definition from the context, but keeps the variable name // as a valid variable. The index 0 is a placeholder for cleared definitions. Context clearDefinition(const NamedDecl *D, Context Ctx) { Context NewCtx = Ctx; if (NewCtx.contains(D)) { NewCtx = ContextFactory.remove(NewCtx, D); NewCtx = ContextFactory.add(NewCtx, D, 0); } return NewCtx; } // Remove a definition entirely frmo the context. Context removeDefinition(const NamedDecl *D, Context Ctx) { Context NewCtx = Ctx; if (NewCtx.contains(D)) { NewCtx = ContextFactory.remove(NewCtx, D); } return NewCtx; } Context intersectContexts(Context C1, Context C2); Context createReferenceContext(Context C); void intersectBackEdge(Context C1, Context C2); friend class VarMapBuilder; }; // This has to be defined after LocalVariableMap. CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { return CFGBlockInfo(M.getEmptyContext()); } /// Visitor which builds a LocalVariableMap class VarMapBuilder : public StmtVisitor<VarMapBuilder> { public: LocalVariableMap* VMap; LocalVariableMap::Context Ctx; VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) : VMap(VM), Ctx(C) {} void VisitDeclStmt(DeclStmt *S); void VisitBinaryOperator(BinaryOperator *BO); }; // Add new local variables to the variable map void VarMapBuilder::VisitDeclStmt(DeclStmt *S) { bool modifiedCtx = false; DeclGroupRef DGrp = S->getDeclGroup(); for (const auto *D : DGrp) { if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { const Expr *E = VD->getInit(); // Add local variables with trivial type to the variable map QualType T = VD->getType(); if (T.isTrivialType(VD->getASTContext())) { Ctx = VMap->addDefinition(VD, E, Ctx); modifiedCtx = true; } } } if (modifiedCtx) VMap->saveContext(S, Ctx); } // Update local variable definitions in variable map void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) { if (!BO->isAssignmentOp()) return; Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); // Update the variable map and current context. if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { ValueDecl *VDec = DRE->getDecl(); if (Ctx.lookup(VDec)) { if (BO->getOpcode() == BO_Assign) Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); else // FIXME -- handle compound assignment operators Ctx = VMap->clearDefinition(VDec, Ctx); VMap->saveContext(BO, Ctx); } } } // Computes the intersection of two contexts. The intersection is the // set of variables which have the same definition in both contexts; // variables with different definitions are discarded. LocalVariableMap::Context LocalVariableMap::intersectContexts(Context C1, Context C2) { Context Result = C1; for (const auto &P : C1) { const NamedDecl *Dec = P.first; const unsigned *i2 = C2.lookup(Dec); if (!i2) // variable doesn't exist on second path Result = removeDefinition(Dec, Result); else if (*i2 != P.second) // variable exists, but has different definition Result = clearDefinition(Dec, Result); } return Result; } // For every variable in C, create a new variable that refers to the // definition in C. Return a new context that contains these new variables. // (We use this for a naive implementation of SSA on loop back-edges.) LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { Context Result = getEmptyContext(); for (const auto &P : C) Result = addReference(P.first, P.second, Result); return Result; } // This routine also takes the intersection of C1 and C2, but it does so by // altering the VarDefinitions. C1 must be the result of an earlier call to // createReferenceContext. void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { for (const auto &P : C1) { unsigned i1 = P.second; VarDefinition *VDef = &VarDefinitions[i1]; assert(VDef->isReference()); const unsigned *i2 = C2.lookup(P.first); if (!i2 || (*i2 != i1)) VDef->Ref = 0; // Mark this variable as undefined } } // Traverse the CFG in topological order, so all predecessors of a block // (excluding back-edges) are visited before the block itself. At // each point in the code, we calculate a Context, which holds the set of // variable definitions which are visible at that point in execution. // Visible variables are mapped to their definitions using an array that // contains all definitions. // // At join points in the CFG, the set is computed as the intersection of // the incoming sets along each edge, E.g. // // { Context | VarDefinitions } // int x = 0; { x -> x1 | x1 = 0 } // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } // // This is essentially a simpler and more naive version of the standard SSA // algorithm. Those definitions that remain in the intersection are from blocks // that strictly dominate the current block. We do not bother to insert proper // phi nodes, because they are not used in our analysis; instead, wherever // a phi node would be required, we simply remove that definition from the // context (E.g. x above). // // The initial traversal does not capture back-edges, so those need to be // handled on a separate pass. Whenever the first pass encounters an // incoming back edge, it duplicates the context, creating new definitions // that refer back to the originals. (These correspond to places where SSA // might have to insert a phi node.) On the second pass, these definitions are // set to NULL if the variable has changed on the back-edge (i.e. a phi // node was actually required.) E.g. // // { Context | VarDefinitions } // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } // ... { y -> y1 | x3 = 2, x2 = 1, ... } // void LocalVariableMap::traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector<CFGBlockInfo> &BlockInfo) { PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); CtxIndices.resize(CFGraph->getNumBlockIDs()); for (const auto *CurrBlock : *SortedGraph) { int CurrBlockID = CurrBlock->getBlockID(); CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; VisitedBlocks.insert(CurrBlock); // Calculate the entry context for the current block bool HasBackEdges = false; bool CtxInit = true; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge, so skip it if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { HasBackEdges = true; continue; } int PrevBlockID = (*PI)->getBlockID(); CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; if (CtxInit) { CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; CtxInit = false; } else { CurrBlockInfo->EntryContext = intersectContexts(CurrBlockInfo->EntryContext, PrevBlockInfo->ExitContext); } } // Duplicate the context if we have back-edges, so we can call // intersectBackEdges later. if (HasBackEdges) CurrBlockInfo->EntryContext = createReferenceContext(CurrBlockInfo->EntryContext); // Create a starting context index for the current block saveContext(nullptr, CurrBlockInfo->EntryContext); CurrBlockInfo->EntryIndex = getContextIndex(); // Visit all the statements in the basic block. VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { switch (BI->getKind()) { case CFGElement::Statement: { CFGStmt CS = BI->castAs<CFGStmt>(); VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt())); break; } default: break; } } CurrBlockInfo->ExitContext = VMapBuilder.Ctx; // Mark variables on back edges as "unknown" if they've been changed. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; Context LoopEnd = CurrBlockInfo->ExitContext; intersectBackEdge(LoopBegin, LoopEnd); } } // Put an extra entry at the end of the indexed context array unsigned exitID = CFGraph->getExit().getBlockID(); saveContext(nullptr, BlockInfo[exitID].ExitContext); } /// Find the appropriate source locations to use when producing diagnostics for /// each block in the CFG. static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector<CFGBlockInfo> &BlockInfo) { for (const auto *CurrBlock : *SortedGraph) { CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; // Find the source location of the last statement in the block, if the // block is not empty. if (const Stmt *S = CurrBlock->getTerminator()) { CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart(); } else { for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), BE = CurrBlock->rend(); BI != BE; ++BI) { // FIXME: Handle other CFGElement kinds. if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart(); break; } } } if (!CurrBlockInfo->ExitLoc.isInvalid()) { // This block contains at least one statement. Find the source location // of the first statement in the block. for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { // FIXME: Handle other CFGElement kinds. if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart(); break; } } } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && CurrBlock != &CFGraph->getExit()) { // The block is empty, and has a single predecessor. Use its exit // location. CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; } } } /// \brief Class which implements the core thread safety analysis routines. class ThreadSafetyAnalyzer { friend class BuildLockset; ThreadSafetyHandler &Handler; LocalVariableMap LocalVarMap; FactManager FactMan; std::vector<CFGBlockInfo> BlockInfo; public: ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {} void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat, StringRef DiagKind); void removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, StringRef DiagKind); template <typename AttrType> void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, const NamedDecl *D, VarDecl *SelfDecl = nullptr); template <class AttrType> void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, const NamedDecl *D, const CFGBlock *PredBlock, const CFGBlock *CurrBlock, Expr *BrE, bool Neg); const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, bool &Negate); void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, const CFGBlock* PredBlock, const CFGBlock *CurrBlock); void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, SourceLocation JoinLoc, LockErrorKind LEK1, LockErrorKind LEK2, bool Modify=true); void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, SourceLocation JoinLoc, LockErrorKind LEK1, bool Modify=true) { intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); } void runAnalysis(AnalysisDeclContext &AC); }; /// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs. static const ValueDecl *getValueDecl(const Expr *Exp) { if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) return getValueDecl(CE->getSubExpr()); if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) return DR->getDecl(); if (const auto *ME = dyn_cast<MemberExpr>(Exp)) return ME->getMemberDecl(); return nullptr; } template <typename Ty> class has_arg_iterator_range { typedef char yes[1]; typedef char no[2]; template <typename Inner> static yes& test(Inner *I, decltype(I->args()) * = nullptr); template <typename> static no& test(...); public: static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); }; static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { return A->getName(); } static StringRef ClassifyDiagnostic(QualType VDT) { // We need to look at the declaration of the type of the value to determine // which it is. The type should either be a record or a typedef, or a pointer // or reference thereof. if (const auto *RT = VDT->getAs<RecordType>()) { if (const auto *RD = RT->getDecl()) if (const auto *CA = RD->getAttr<CapabilityAttr>()) return ClassifyDiagnostic(CA); } else if (const auto *TT = VDT->getAs<TypedefType>()) { if (const auto *TD = TT->getDecl()) if (const auto *CA = TD->getAttr<CapabilityAttr>()) return ClassifyDiagnostic(CA); } else if (VDT->isPointerType() || VDT->isReferenceType()) return ClassifyDiagnostic(VDT->getPointeeType()); return "mutex"; } static StringRef ClassifyDiagnostic(const ValueDecl *VD) { assert(VD && "No ValueDecl passed"); // The ValueDecl is the declaration of a mutex or role (hopefully). return ClassifyDiagnostic(VD->getType()); } template <typename AttrTy> static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value, StringRef>::type ClassifyDiagnostic(const AttrTy *A) { if (const ValueDecl *VD = getValueDecl(A->getArg())) return ClassifyDiagnostic(VD); return "mutex"; } template <typename AttrTy> static typename std::enable_if<has_arg_iterator_range<AttrTy>::value, StringRef>::type ClassifyDiagnostic(const AttrTy *A) { for (const auto *Arg : A->args()) { if (const ValueDecl *VD = getValueDecl(Arg)) return ClassifyDiagnostic(VD); } return "mutex"; } /// \brief Add a new lock to the lockset, warning if the lock is already there. /// \param Mutex -- the Mutex expression for the lock /// \param LDat -- the LockData for the lock void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat, StringRef DiagKind) { // FIXME: deal with acquired before/after annotations. // FIXME: Don't always warn when we have support for reentrant locks. if (Mutex.shouldIgnore()) return; if (FSet.findLock(FactMan, Mutex)) { if (!LDat.Asserted) Handler.handleDoubleLock(DiagKind, Mutex.toString(), LDat.AcquireLoc); } else { FSet.addLock(FactMan, Mutex, LDat); } } /// \brief Remove a lock from the lockset, warning if the lock is not there. /// \param Mutex The lock expression corresponding to the lock to be removed /// \param UnlockLoc The source location of the unlock (only used in error msg) void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc, bool FullyRemove, LockKind ReceivedKind, StringRef DiagKind) { if (Mutex.shouldIgnore()) return; const LockData *LDat = FSet.findLock(FactMan, Mutex); if (!LDat) { Handler.handleUnmatchedUnlock(DiagKind, Mutex.toString(), UnlockLoc); return; } // Generic lock removal doesn't care about lock kind mismatches, but // otherwise diagnose when the lock kinds are mismatched. if (ReceivedKind != LK_Generic && LDat->LKind != ReceivedKind) { Handler.handleIncorrectUnlockKind(DiagKind, Mutex.toString(), LDat->LKind, ReceivedKind, UnlockLoc); return; } if (LDat->UnderlyingMutex.isValid()) { // This is scoped lockable object, which manages the real mutex. if (FullyRemove) { // We're destroying the managing object. // Remove the underlying mutex if it exists; but don't warn. if (FSet.findLock(FactMan, LDat->UnderlyingMutex)) FSet.removeLock(FactMan, LDat->UnderlyingMutex); } else { // We're releasing the underlying mutex, but not destroying the // managing object. Warn on dual release. if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) { Handler.handleUnmatchedUnlock( DiagKind, LDat->UnderlyingMutex.toString(), UnlockLoc); } FSet.removeLock(FactMan, LDat->UnderlyingMutex); return; } } FSet.removeLock(FactMan, Mutex); } /// \brief Extract the list of mutexIDs from the attribute on an expression, /// and push them onto Mtxs, discarding any duplicates. template <typename AttrType> void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, const NamedDecl *D, VarDecl *SelfDecl) { if (Attr->args_size() == 0) { // The mutex held is the "this" object. SExpr Mu(nullptr, Exp, D, SelfDecl); if (!Mu.isValid()) SExpr::warnInvalidLock(Handler, nullptr, Exp, D, ClassifyDiagnostic(Attr)); else Mtxs.push_back_nodup(Mu); return; } for (const auto *Arg : Attr->args()) { SExpr Mu(Arg, Exp, D, SelfDecl); if (!Mu.isValid()) SExpr::warnInvalidLock(Handler, Arg, Exp, D, ClassifyDiagnostic(Attr)); else Mtxs.push_back_nodup(Mu); } } /// \brief Extract the list of mutexIDs from a trylock attribute. If the /// trylock applies to the given edge, then push them onto Mtxs, discarding /// any duplicates. template <class AttrType> void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, const NamedDecl *D, const CFGBlock *PredBlock, const CFGBlock *CurrBlock, Expr *BrE, bool Neg) { // Find out which branch has the lock bool branch = false; if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) branch = BLE->getValue(); else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) branch = ILE->getValue().getBoolValue(); int branchnum = branch ? 0 : 1; if (Neg) branchnum = !branchnum; // If we've taken the trylock branch, then add the lock int i = 0; for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { if (*SI == CurrBlock && i == branchnum) getMutexIDs(Mtxs, Attr, Exp, D); } } bool getStaticBooleanValue(Expr* E, bool& TCond) { if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { TCond = false; return true; } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { TCond = BLE->getValue(); return true; } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) { TCond = ILE->getValue().getBoolValue(); return true; } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { return getStaticBooleanValue(CE->getSubExpr(), TCond); } return false; } // If Cond can be traced back to a function call, return the call expression. // The negate variable should be called with false, and will be set to true // if the function call is negated, e.g. if (!mu.tryLock(...)) const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, bool &Negate) { if (!Cond) return nullptr; if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) { return CallExp; } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) { return getTrylockCallExpr(PE->getSubExpr(), C, Negate); } else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) { return getTrylockCallExpr(CE->getSubExpr(), C, Negate); } else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) { return getTrylockCallExpr(EWC->getSubExpr(), C, Negate); } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) { const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); return getTrylockCallExpr(E, C, Negate); } else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) { if (UOP->getOpcode() == UO_LNot) { Negate = !Negate; return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); } return nullptr; } else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) { if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { if (BOP->getOpcode() == BO_NE) Negate = !Negate; bool TCond = false; if (getStaticBooleanValue(BOP->getRHS(), TCond)) { if (!TCond) Negate = !Negate; return getTrylockCallExpr(BOP->getLHS(), C, Negate); } TCond = false; if (getStaticBooleanValue(BOP->getLHS(), TCond)) { if (!TCond) Negate = !Negate; return getTrylockCallExpr(BOP->getRHS(), C, Negate); } return nullptr; } if (BOP->getOpcode() == BO_LAnd) { // LHS must have been evaluated in a different block. return getTrylockCallExpr(BOP->getRHS(), C, Negate); } if (BOP->getOpcode() == BO_LOr) { return getTrylockCallExpr(BOP->getRHS(), C, Negate); } return nullptr; } return nullptr; } /// \brief Find the lockset that holds on the edge between PredBlock /// and CurrBlock. The edge set is the exit set of PredBlock (passed /// as the ExitSet parameter) plus any trylocks, which are conditionally held. void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, const FactSet &ExitSet, const CFGBlock *PredBlock, const CFGBlock *CurrBlock) { Result = ExitSet; const Stmt *Cond = PredBlock->getTerminatorCondition(); if (!Cond) return; bool Negate = false; const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; StringRef CapDiagKind = "mutex"; CallExpr *Exp = const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate)); if (!Exp) return; NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); if(!FunDecl || !FunDecl->hasAttrs()) return; MutexIDList ExclusiveLocksToAdd; MutexIDList SharedLocksToAdd; // If the condition is a call to a Trylock function, then grab the attributes for (auto *Attr : FunDecl->getAttrs()) { switch (Attr->getKind()) { case attr::ExclusiveTrylockFunction: { ExclusiveTrylockFunctionAttr *A = cast<ExclusiveTrylockFunctionAttr>(Attr); getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), Negate); CapDiagKind = ClassifyDiagnostic(A); break; } case attr::SharedTrylockFunction: { SharedTrylockFunctionAttr *A = cast<SharedTrylockFunctionAttr>(Attr); getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), Negate); CapDiagKind = ClassifyDiagnostic(A); break; } default: break; } } // Add and remove locks. SourceLocation Loc = Exp->getExprLoc(); for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) addLock(Result, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive), CapDiagKind); for (const auto &SharedLockToAdd : SharedLocksToAdd) addLock(Result, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind); } /// \brief We use this class to visit different types of expressions in /// CFGBlocks, and build up the lockset. /// An expression may cause us to add or remove locks from the lockset, or else /// output error messages related to missing locks. /// FIXME: In future, we may be able to not inherit from a visitor. class BuildLockset : public StmtVisitor<BuildLockset> { friend class ThreadSafetyAnalyzer; ThreadSafetyAnalyzer *Analyzer; FactSet FSet; LocalVariableMap::Context LVarCtx; unsigned CtxIndex; // Helper functions void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, Expr *MutexExp, ProtectedOperationKind POK, StringRef DiagKind); void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, StringRef DiagKind); void checkAccess(const Expr *Exp, AccessKind AK); void checkPtAccess(const Expr *Exp, AccessKind AK); void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); public: BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) : StmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} void VisitUnaryOperator(UnaryOperator *UO); void VisitBinaryOperator(BinaryOperator *BO); void VisitCastExpr(CastExpr *CE); void VisitCallExpr(CallExpr *Exp); void VisitCXXConstructExpr(CXXConstructExpr *Exp); void VisitDeclStmt(DeclStmt *S); }; /// \brief Warn if the LSet does not contain a lock sufficient to protect access /// of at least the passed in AccessKind. void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, Expr *MutexExp, ProtectedOperationKind POK, StringRef DiagKind) { LockKind LK = getLockKindFromAccessKind(AK); SExpr Mutex(MutexExp, Exp, D); if (!Mutex.isValid()) { SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind); return; } else if (Mutex.shouldIgnore()) { return; } LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex); bool NoError = true; if (!LDat) { // No exact match found. Look for a partial match. FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex); if (FEntry) { // Warn that there's no precise match. LDat = &FEntry->LDat; std::string PartMatchStr = FEntry->MutID.toString(); StringRef PartMatchName(PartMatchStr); Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK, Exp->getExprLoc(), &PartMatchName); } else { // Warn that there's no match at all. Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK, Exp->getExprLoc()); } NoError = false; } // Make sure the mutex we found is the right kind. if (NoError && LDat && !LDat->isAtLeast(LK)) Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK, Exp->getExprLoc()); } /// \brief Warn if the LSet contains the given lock. void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, StringRef DiagKind) { SExpr Mutex(MutexExp, Exp, D); if (!Mutex.isValid()) { SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind); return; } LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex); if (LDat) Analyzer->Handler.handleFunExcludesLock( DiagKind, D->getNameAsString(), Mutex.toString(), Exp->getExprLoc()); } /// \brief Checks guarded_by and pt_guarded_by attributes. /// Whenever we identify an access (read or write) to a DeclRefExpr that is /// marked with guarded_by, we must ensure the appropriate mutexes are held. /// Similarly, we check if the access is to an expression that dereferences /// a pointer marked with pt_guarded_by. void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK) { Exp = Exp->IgnoreParenCasts(); if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) { // For dereferences if (UO->getOpcode() == clang::UO_Deref) checkPtAccess(UO->getSubExpr(), AK); return; } if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { checkPtAccess(AE->getLHS(), AK); return; } if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { if (ME->isArrow()) checkPtAccess(ME->getBase(), AK); else checkAccess(ME->getBase(), AK); } const ValueDecl *D = getValueDecl(Exp); if (!D || !D->hasAttrs()) return; if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty()) Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarAccess, AK, Exp->getExprLoc()); for (const auto *I : D->specific_attrs<GuardedByAttr>()) warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarAccess, ClassifyDiagnostic(I)); } /// \brief Checks pt_guarded_by and pt_guarded_var attributes. void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) { while (true) { if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) { Exp = PE->getSubExpr(); continue; } if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) { if (CE->getCastKind() == CK_ArrayToPointerDecay) { // If it's an actual array, and not a pointer, then it's elements // are protected by GUARDED_BY, not PT_GUARDED_BY; checkAccess(CE->getSubExpr(), AK); return; } Exp = CE->getSubExpr(); continue; } break; } const ValueDecl *D = getValueDecl(Exp); if (!D || !D->hasAttrs()) return; if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty()) Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarDereference, AK, Exp->getExprLoc()); for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarDereference, ClassifyDiagnostic(I)); } /// \brief Process a function call, method call, constructor call, /// or destructor call. This involves looking at the attributes on the /// corresponding function/method/constructor/destructor, issuing warnings, /// and updating the locksets accordingly. /// /// FIXME: For classes annotated with one of the guarded annotations, we need /// to treat const method calls as reads and non-const method calls as writes, /// and check that the appropriate locks are held. Non-const method calls with /// the same signature as const method calls can be also treated as reads. /// void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { SourceLocation Loc = Exp->getExprLoc(); const AttrVec &ArgAttrs = D->getAttrs(); MutexIDList ExclusiveLocksToAdd, SharedLocksToAdd; MutexIDList ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; StringRef CapDiagKind = "mutex"; for(unsigned i = 0; i < ArgAttrs.size(); ++i) { Attr *At = const_cast<Attr*>(ArgAttrs[i]); switch (At->getKind()) { // When we encounter a lock function, we need to add the lock to our // lockset. case attr::AcquireCapability: { auto *A = cast<AcquireCapabilityAttr>(At); Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, Exp, D, VD); CapDiagKind = ClassifyDiagnostic(A); break; } // An assert will add a lock to the lockset, but will not generate // a warning if it is already there, and will not generate a warning // if it is not removed. case attr::AssertExclusiveLock: { AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At); MutexIDList AssertLocks; Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); for (const auto &AssertLock : AssertLocks) Analyzer->addLock(FSet, AssertLock, LockData(Loc, LK_Exclusive, false, true), ClassifyDiagnostic(A)); break; } case attr::AssertSharedLock: { AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At); MutexIDList AssertLocks; Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); for (const auto &AssertLock : AssertLocks) Analyzer->addLock(FSet, AssertLock, LockData(Loc, LK_Shared, false, true), ClassifyDiagnostic(A)); break; } // When we encounter an unlock function, we need to remove unlocked // mutexes from the lockset, and flag a warning if they are not there. case attr::ReleaseCapability: { auto *A = cast<ReleaseCapabilityAttr>(At); if (A->isGeneric()) Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); else if (A->isShared()) Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); else Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); CapDiagKind = ClassifyDiagnostic(A); break; } case attr::RequiresCapability: { RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At); for (auto *Arg : A->args()) warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, POK_FunctionCall, ClassifyDiagnostic(A)); break; } case attr::LocksExcluded: { LocksExcludedAttr *A = cast<LocksExcludedAttr>(At); for (auto *Arg : A->args()) warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); break; } // Ignore attributes unrelated to thread-safety default: break; } } // Figure out if we're calling the constructor of scoped lockable class bool isScopedVar = false; if (VD) { if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) { const CXXRecordDecl* PD = CD->getParent(); if (PD && PD->hasAttr<ScopedLockableAttr>()) isScopedVar = true; } } // Add locks. for (const auto &M : ExclusiveLocksToAdd) Analyzer->addLock(FSet, M, LockData(Loc, LK_Exclusive, isScopedVar), CapDiagKind); for (const auto &M : SharedLocksToAdd) Analyzer->addLock(FSet, M, LockData(Loc, LK_Shared, isScopedVar), CapDiagKind); // Add the managing object as a dummy mutex, mapped to the underlying mutex. // FIXME -- this doesn't work if we acquire multiple locks. if (isScopedVar) { SourceLocation MLoc = VD->getLocation(); DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); SExpr SMutex(&DRE, nullptr, nullptr); for (const auto &M : ExclusiveLocksToAdd) Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, M), CapDiagKind); for (const auto &M : SharedLocksToAdd) Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, M), CapDiagKind); } // Remove locks. // FIXME -- should only fully remove if the attribute refers to 'this'. bool Dtor = isa<CXXDestructorDecl>(D); for (const auto &M : ExclusiveLocksToRemove) Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); for (const auto &M : SharedLocksToRemove) Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); for (const auto &M : GenericLocksToRemove) Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); } /// \brief For unary operations which read and write a variable, we need to /// check whether we hold any required mutexes. Reads are checked in /// VisitCastExpr. void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { switch (UO->getOpcode()) { case clang::UO_PostDec: case clang::UO_PostInc: case clang::UO_PreDec: case clang::UO_PreInc: { checkAccess(UO->getSubExpr(), AK_Written); break; } default: break; } } /// For binary operations which assign to a variable (writes), we need to check /// whether we hold any required mutexes. /// FIXME: Deal with non-primitive types. void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { if (!BO->isAssignmentOp()) return; // adjust the context LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); checkAccess(BO->getLHS(), AK_Written); } /// Whenever we do an LValue to Rvalue cast, we are reading a variable and /// need to ensure we hold any required mutexes. /// FIXME: Deal with non-primitive types. void BuildLockset::VisitCastExpr(CastExpr *CE) { if (CE->getCastKind() != CK_LValueToRValue) return; checkAccess(CE->getSubExpr(), AK_Read); } void BuildLockset::VisitCallExpr(CallExpr *Exp) { if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee()); // ME can be null when calling a method pointer CXXMethodDecl *MD = CE->getMethodDecl(); if (ME && MD) { if (ME->isArrow()) { if (MD->isConst()) { checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); } else { // FIXME -- should be AK_Written checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); } } else { if (MD->isConst()) checkAccess(CE->getImplicitObjectArgument(), AK_Read); else // FIXME -- should be AK_Written checkAccess(CE->getImplicitObjectArgument(), AK_Read); } } } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { switch (OE->getOperator()) { case OO_Equal: { const Expr *Target = OE->getArg(0); const Expr *Source = OE->getArg(1); checkAccess(Target, AK_Written); checkAccess(Source, AK_Read); break; } case OO_Star: case OO_Arrow: case OO_Subscript: { const Expr *Obj = OE->getArg(0); checkAccess(Obj, AK_Read); checkPtAccess(Obj, AK_Read); break; } default: { const Expr *Obj = OE->getArg(0); checkAccess(Obj, AK_Read); break; } } } NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); if(!D || !D->hasAttrs()) return; handleCall(Exp, D); } void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) { const CXXConstructorDecl *D = Exp->getConstructor(); if (D && D->isCopyConstructor()) { const Expr* Source = Exp->getArg(0); checkAccess(Source, AK_Read); } // FIXME -- only handles constructors in DeclStmt below. } void BuildLockset::VisitDeclStmt(DeclStmt *S) { // adjust the context LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); for (auto *D : S->getDeclGroup()) { if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) { Expr *E = VD->getInit(); // handle constructors that involve temporaries if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E)) E = EWC->getSubExpr(); if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) { NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); if (!CtorD || !CtorD->hasAttrs()) return; handleCall(CE, CtorD, VD); } } } } /// \brief Compute the intersection of two locksets and issue warnings for any /// locks in the symmetric difference. /// /// This function is used at a merge point in the CFG when comparing the lockset /// of each branch being merged. For example, given the following sequence: /// A; if () then B; else C; D; we need to check that the lockset after B and C /// are the same. In the event of a difference, we use the intersection of these /// two locksets at the start of D. /// /// \param FSet1 The first lockset. /// \param FSet2 The second lockset. /// \param JoinLoc The location of the join point for error reporting /// \param LEK1 The error message to report if a mutex is missing from LSet1 /// \param LEK2 The error message to report if a mutex is missing from Lset2 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, SourceLocation JoinLoc, LockErrorKind LEK1, LockErrorKind LEK2, bool Modify) { FactSet FSet1Orig = FSet1; // Find locks in FSet2 that conflict or are not in FSet1, and warn. for (const auto &Fact : FSet2) { const SExpr &FSet2Mutex = FactMan[Fact].MutID; const LockData &LDat2 = FactMan[Fact].LDat; FactSet::iterator I1 = FSet1.findLockIter(FactMan, FSet2Mutex); if (I1 != FSet1.end()) { const LockData* LDat1 = &FactMan[*I1].LDat; if (LDat1->LKind != LDat2.LKind) { Handler.handleExclusiveAndShared("mutex", FSet2Mutex.toString(), LDat2.AcquireLoc, LDat1->AcquireLoc); if (Modify && LDat1->LKind != LK_Exclusive) { // Take the exclusive lock, which is the one in FSet2. *I1 = Fact; } } else if (LDat1->Asserted && !LDat2.Asserted) { // The non-asserted lock in FSet2 is the one we want to track. *I1 = Fact; } } else { if (LDat2.UnderlyingMutex.isValid()) { if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) { // If this is a scoped lock that manages another mutex, and if the // underlying mutex is still held, then warn about the underlying // mutex. Handler.handleMutexHeldEndOfScope("mutex", LDat2.UnderlyingMutex.toString(), LDat2.AcquireLoc, JoinLoc, LEK1); } } else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted) Handler.handleMutexHeldEndOfScope("mutex", FSet2Mutex.toString(), LDat2.AcquireLoc, JoinLoc, LEK1); } } // Find locks in FSet1 that are not in FSet2, and remove them. for (const auto &Fact : FSet1Orig) { const SExpr &FSet1Mutex = FactMan[Fact].MutID; const LockData &LDat1 = FactMan[Fact].LDat; if (!FSet2.findLock(FactMan, FSet1Mutex)) { if (LDat1.UnderlyingMutex.isValid()) { if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) { // If this is a scoped lock that manages another mutex, and if the // underlying mutex is still held, then warn about the underlying // mutex. Handler.handleMutexHeldEndOfScope("mutex", LDat1.UnderlyingMutex.toString(), LDat1.AcquireLoc, JoinLoc, LEK1); } } else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted) Handler.handleMutexHeldEndOfScope("mutex", FSet1Mutex.toString(), LDat1.AcquireLoc, JoinLoc, LEK2); if (Modify) FSet1.removeLock(FactMan, FSet1Mutex); } } } // Return true if block B never continues to its successors. inline bool neverReturns(const CFGBlock* B) { if (B->hasNoReturnElement()) return true; if (B->empty()) return false; CFGElement Last = B->back(); if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { if (isa<CXXThrowExpr>(S->getStmt())) return true; } return false; } /// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { // TODO: this whole function needs be rewritten as a visitor for CFGWalker. // For now, we just use the walker to set things up. threadSafety::CFGWalker walker; if (!walker.init(AC)) return; // AC.dumpCFG(true); // threadSafety::printSCFG(walker); CFG *CFGraph = walker.getGraph(); const NamedDecl *D = walker.getDecl(); if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) return; // FIXME: Do something a bit more intelligent inside constructor and // destructor code. Constructors and destructors must assume unique access // to 'this', so checks on member variable access is disabled, but we should // still enable checks on other objects. if (isa<CXXConstructorDecl>(D)) return; // Don't check inside constructors. if (isa<CXXDestructorDecl>(D)) return; // Don't check inside destructors. BlockInfo.resize(CFGraph->getNumBlockIDs(), CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); // We need to explore the CFG via a "topological" ordering. // That way, we will be guaranteed to have information about required // predecessor locksets when exploring a new block. const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); // Mark entry block as reachable BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; // Compute SSA names for local variables LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); // Fill in source locations for all CFGBlocks. findBlockLocations(CFGraph, SortedGraph, BlockInfo); MutexIDList ExclusiveLocksAcquired; MutexIDList SharedLocksAcquired; MutexIDList LocksReleased; // Add locks from exclusive_locks_required and shared_locks_required // to initial lockset. Also turn off checking for lock and unlock functions. // FIXME: is there a more intelligent way to check lock/unlock functions? if (!SortedGraph->empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph->begin(); FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; const AttrVec &ArgAttrs = D->getAttrs(); MutexIDList ExclusiveLocksToAdd; MutexIDList SharedLocksToAdd; StringRef CapDiagKind = "mutex"; SourceLocation Loc = D->getLocation(); for (const auto *Attr : ArgAttrs) { Loc = Attr->getLocation(); if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, nullptr, D); CapDiagKind = ClassifyDiagnostic(A); } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. // We must ignore such methods. if (A->args_size() == 0) return; // FIXME -- deal with exclusive vs. shared unlock functions? getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D); getMutexIDs(LocksReleased, A, nullptr, D); CapDiagKind = ClassifyDiagnostic(A); } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { if (A->args_size() == 0) return; getMutexIDs(A->isShared() ? SharedLocksAcquired : ExclusiveLocksAcquired, A, nullptr, D); CapDiagKind = ClassifyDiagnostic(A); } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { // Don't try to check trylock functions for now return; } else if (isa<SharedTrylockFunctionAttr>(Attr)) { // Don't try to check trylock functions for now return; } } // FIXME -- Loc can be wrong here. for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) addLock(InitialLockset, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive), CapDiagKind); for (const auto &SharedLockToAdd : SharedLocksToAdd) addLock(InitialLockset, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind); } for (const auto *CurrBlock : *SortedGraph) { int CurrBlockID = CurrBlock->getBlockID(); CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; // Use the default initial lockset in case there are no predecessors. VisitedBlocks.insert(CurrBlock); // Iterate through the predecessor blocks and warn if the lockset for all // predecessors is not the same. We take the entry lockset of the current // block to be the intersection of all previous locksets. // FIXME: By keeping the intersection, we may output more errors in future // for a lock which is not in the intersection, but was in the union. We // may want to also keep the union in future. As an example, let's say // the intersection contains Mutex L, and the union contains L and M. // Later we unlock M. At this point, we would output an error because we // never locked M; although the real error is probably that we forgot to // lock M on all code paths. Conversely, let's say that later we lock M. // In this case, we should compare against the intersection instead of the // union because the real error is probably that we forgot to unlock M on // all code paths. bool LocksetInitialized = false; SmallVector<CFGBlock *, 8> SpecialBlocks; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) continue; int PrevBlockID = (*PI)->getBlockID(); CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; // Ignore edges from blocks that can't return. if (neverReturns(*PI) || !PrevBlockInfo->Reachable) continue; // Okay, we can reach this block from the entry. CurrBlockInfo->Reachable = true; // If the previous block ended in a 'continue' or 'break' statement, then // a difference in locksets is probably due to a bug in that block, rather // than in some other predecessor. In that case, keep the other // predecessor's lockset. if (const Stmt *Terminator = (*PI)->getTerminator()) { if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { SpecialBlocks.push_back(*PI); continue; } } FactSet PrevLockset; getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); if (!LocksetInitialized) { CurrBlockInfo->EntrySet = PrevLockset; LocksetInitialized = true; } else { intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc, LEK_LockedSomePredecessors); } } // Skip rest of block if it's not reachable. if (!CurrBlockInfo->Reachable) continue; // Process continue and break blocks. Assume that the lockset for the // resulting block is unaffected by any discrepancies in them. for (const auto *PrevBlock : SpecialBlocks) { int PrevBlockID = PrevBlock->getBlockID(); CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; if (!LocksetInitialized) { CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; LocksetInitialized = true; } else { // Determine whether this edge is a loop terminator for diagnostic // purposes. FIXME: A 'break' statement might be a loop terminator, but // it might also be part of a switch. Also, a subsequent destructor // might add to the lockset, in which case the real issue might be a // double lock on the other path. const Stmt *Terminator = PrevBlock->getTerminator(); bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); FactSet PrevLockset; getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, PrevBlock, CurrBlock); // Do not update EntrySet. intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, PrevBlockInfo->ExitLoc, IsLoop ? LEK_LockedSomeLoopIterations : LEK_LockedSomePredecessors, false); } } BuildLockset LocksetBuilder(this, *CurrBlockInfo); // Visit all the statements in the basic block. for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { switch (BI->getKind()) { case CFGElement::Statement: { CFGStmt CS = BI->castAs<CFGStmt>(); LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt())); break; } // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. case CFGElement::AutomaticObjectDtor: { CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>(); CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>( AD.getDestructorDecl(AC.getASTContext())); if (!DD->hasAttrs()) break; // Create a dummy expression, VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl()); DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, AD.getTriggerStmt()->getLocEnd()); LocksetBuilder.handleCall(&DRE, DD); break; } default: break; } } CurrBlockInfo->ExitSet = LocksetBuilder.FSet; // For every back edge from CurrBlock (the end of the loop) to another block // (FirstLoopBlock) we need to check that the Lockset of Block is equal to // the one held at the beginning of FirstLoopBlock. We can look up the // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, PreLoop->EntryLoc, LEK_LockedSomeLoopIterations, false); } } CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; // Skip the final check if the exit block is unreachable. if (!Final->Reachable) return; // By default, we expect all locks held on entry to be held on exit. FactSet ExpectedExitSet = Initial->EntrySet; // Adjust the expected exit set by adding or removing locks, as declared // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then // issue the appropriate warning. // FIXME: the location here is not quite right. for (const auto &Lock : ExclusiveLocksAcquired) ExpectedExitSet.addLock(FactMan, Lock, LockData(D->getLocation(), LK_Exclusive)); for (const auto &Lock : SharedLocksAcquired) ExpectedExitSet.addLock(FactMan, Lock, LockData(D->getLocation(), LK_Shared)); for (const auto &Lock : LocksReleased) ExpectedExitSet.removeLock(FactMan, Lock); // FIXME: Should we call this function for all blocks which exit the function? intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc, LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction, false); } } // end anonymous namespace namespace clang { namespace thread_safety { /// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler) { ThreadSafetyAnalyzer Analyzer(Handler); Analyzer.runAnalysis(AC); } /// \brief Helper function that returns a LockKind required for the given level /// of access. LockKind getLockKindFromAccessKind(AccessKind AK) { switch (AK) { case AK_Read : return LK_Shared; case AK_Written : return LK_Exclusive; } llvm_unreachable("Unknown AccessKind"); } }} // end namespace clang::thread_safety
36.66126
85
0.634897
[ "object", "vector" ]
11e02f181df178ca68d64039a539ac5b67b15401
478,100
cpp
C++
lib/Runtime/ByteCode/ByteCodeEmitter.cpp
lizhengxing/ChakraCore
831e6b26b9ecd093d433480b217ececa2dccff45
[ "MIT" ]
1
2018-09-29T23:39:40.000Z
2018-09-29T23:39:40.000Z
lib/Runtime/ByteCode/ByteCodeEmitter.cpp
AzureMentor/ChakraCore
831e6b26b9ecd093d433480b217ececa2dccff45
[ "MIT" ]
null
null
null
lib/Runtime/ByteCode/ByteCodeEmitter.cpp
AzureMentor/ChakraCore
831e6b26b9ecd093d433480b217ececa2dccff45
[ "MIT" ]
null
null
null
//------------------------------------------------------------------------------------------------------- // Copyright (C) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "RuntimeByteCodePch.h" #include "FormalsUtil.h" #include "Language/AsmJs.h" #include "ConfigFlagsList.h" void EmitReference(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitAssignment(ParseNode *asgnNode, ParseNode *lhs, Js::RegSlot rhsLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitLoad(ParseNode *rhs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitCall(ParseNodeCall* pnodeCall, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, BOOL fReturnValue, BOOL fEvaluateComponents, Js::RegSlot overrideThisLocation = Js::Constants::NoRegister, Js::RegSlot newTargetLocation = Js::Constants::NoRegister); void EmitYield(Js::RegSlot inputLocation, Js::RegSlot resultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, Js::RegSlot yieldStarIterator = Js::Constants::NoRegister); void EmitUseBeforeDeclaration(Symbol *sym, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitUseBeforeDeclarationRuntimeError(ByteCodeGenerator *byteCodeGenerator, Js::RegSlot location); void VisitClearTmpRegs(ParseNode * pnode, ByteCodeGenerator * byteCodeGenerator, FuncInfo * funcInfo); bool CallTargetIsArray(ParseNode *pnode) { return pnode->nop == knopName && pnode->AsParseNodeName()->PropertyIdFromNameNode() == Js::PropertyIds::Array; } #define STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode) \ if ((isTopLevel)) \ { \ byteCodeGenerator->StartStatement(pnode); \ } #define ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode) \ if ((isTopLevel)) \ { \ byteCodeGenerator->EndStatement(pnode); \ } BOOL MayHaveSideEffectOnNode(ParseNode *pnode, ParseNode *pnodeSE) { // Try to determine whether pnodeSE may kill the named var represented by pnode. if (pnode->nop == knopComputedName) { pnode = pnode->AsParseNodeUni()->pnode1; } if (pnode->nop != knopName) { // Only investigating named vars here. return false; } uint fnop = ParseNode::Grfnop(pnodeSE->nop); if (fnop & fnopLeaf) { // pnodeSE is a leaf and can't kill anything. return false; } if (fnop & fnopAsg) { // pnodeSE is an assignment (=, ++, +=, etc.) // Trying to examine the LHS of pnodeSE caused small perf regressions, // maybe because of code layout or some other subtle effect. return true; } if (fnop & fnopUni) { // pnodeSE is a unary op, so recurse to the source (if present - e.g., [] may have no opnd). if (pnodeSE->nop == knopTempRef) { return false; } else { return pnodeSE->AsParseNodeUni()->pnode1 && MayHaveSideEffectOnNode(pnode, pnodeSE->AsParseNodeUni()->pnode1); } } else if (fnop & fnopBin) { // pnodeSE is a binary (or ternary) op, so recurse to the sources (if present). return MayHaveSideEffectOnNode(pnode, pnodeSE->AsParseNodeBin()->pnode1) || (pnodeSE->AsParseNodeBin()->pnode2 && MayHaveSideEffectOnNode(pnode, pnodeSE->AsParseNodeBin()->pnode2)); } else if (pnodeSE->nop == knopQmark) { ParseNodeTri * pnodeTriSE = pnodeSE->AsParseNodeTri(); return MayHaveSideEffectOnNode(pnode, pnodeTriSE->pnode1) || MayHaveSideEffectOnNode(pnode, pnodeTriSE->pnode2) || MayHaveSideEffectOnNode(pnode, pnodeTriSE->pnode3); } else if (pnodeSE->nop == knopCall || pnodeSE->nop == knopNew) { return MayHaveSideEffectOnNode(pnode, pnodeSE->AsParseNodeCall()->pnodeTarget) || (pnodeSE->AsParseNodeCall()->pnodeArgs && MayHaveSideEffectOnNode(pnode, pnodeSE->AsParseNodeCall()->pnodeArgs)); } else if (pnodeSE->nop == knopList) { return true; } return false; } bool IsCallOfConstants(ParseNode *pnode); bool BlockHasOwnScope(ParseNodeBlock * pnodeBlock, ByteCodeGenerator *byteCodeGenerator); bool CreateNativeArrays(ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); bool IsArguments(ParseNode *pnode) { for (;;) { switch (pnode->nop) { case knopName: return pnode->AsParseNodeName()->sym && pnode->AsParseNodeName()->sym->IsArguments(); case knopCall: case knopNew: if (IsArguments(pnode->AsParseNodeCall()->pnodeTarget)) { return true; } if (pnode->AsParseNodeCall()->pnodeArgs) { ParseNode *pnodeArg = pnode->AsParseNodeCall()->pnodeArgs; while (pnodeArg->nop == knopList) { if (IsArguments(pnodeArg->AsParseNodeBin()->pnode1)) return true; pnodeArg = pnodeArg->AsParseNodeBin()->pnode2; } pnode = pnodeArg; break; } return false; case knopArray: if (pnode->AsParseNodeArrLit()->arrayOfNumbers || pnode->AsParseNodeArrLit()->count == 0) { return false; } pnode = pnode->AsParseNodeUni()->pnode1; break; case knopQmark: if (IsArguments(pnode->AsParseNodeTri()->pnode1) || IsArguments(pnode->AsParseNodeTri()->pnode2)) { return true; } pnode = pnode->AsParseNodeTri()->pnode3; break; // // Cases where we don't check for "arguments" yet. // Assume that they might have it. Disable the optimization is such scenarios // case knopList: case knopObject: case knopVarDecl: case knopConstDecl: case knopLetDecl: case knopFncDecl: case knopClassDecl: case knopFor: case knopIf: case knopDoWhile: case knopWhile: case knopForIn: case knopForOf: case knopReturn: case knopBlock: case knopBreak: case knopContinue: case knopTypeof: case knopThrow: case knopWith: case knopFinally: case knopTry: case knopTryCatch: case knopTryFinally: case knopArrayPattern: case knopObjectPattern: case knopParamPattern: return true; default: { uint flags = ParseNode::Grfnop(pnode->nop); if (flags&fnopUni) { ParseNodeUni * pnodeUni = pnode->AsParseNodeUni(); Assert(pnodeUni->pnode1); pnode = pnodeUni->pnode1; break; } else if (flags&fnopBin) { ParseNodeBin * pnodeBin = pnode->AsParseNodeBin(); Assert(pnodeBin->pnode1 && pnodeBin->pnode2); if (IsArguments(pnodeBin->pnode1)) { return true; } pnode = pnodeBin->pnode2; break; } return false; } } } } bool ApplyEnclosesArgs(ParseNode* fncDecl, ByteCodeGenerator* byteCodeGenerator); void Emit(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue, bool isConstructorCall = false, ParseNode *bindPnode = nullptr, bool isTopLevel = false); void EmitBinaryOpnds(ParseNode *pnode1, ParseNode *pnode2, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); bool IsExpressionStatement(ParseNode* stmt, const Js::ScriptContext *const scriptContext); void EmitInvoke(Js::RegSlot location, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitInvoke(Js::RegSlot location, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, Js::RegSlot arg1Location); static const Js::OpCode nopToOp[knopLim] = { #define OP(x) Br##x##_A #define PTNODE(nop,sn,pc,nk,grfnop,json) Js::OpCode::pc, #include "ptlist.h" }; static const Js::OpCode nopToCMOp[knopLim] = { #define OP(x) Cm##x##_A #define PTNODE(nop,sn,pc,nk,grfnop,json) Js::OpCode::pc, #include "ptlist.h" }; Js::OpCode ByteCodeGenerator::ToChkUndeclOp(Js::OpCode op) const { switch (op) { case Js::OpCode::StLocalSlot: return Js::OpCode::StLocalSlotChkUndecl; case Js::OpCode::StParamSlot: return Js::OpCode::StParamSlotChkUndecl; case Js::OpCode::StInnerSlot: return Js::OpCode::StInnerSlotChkUndecl; case Js::OpCode::StEnvSlot: return Js::OpCode::StEnvSlotChkUndecl; case Js::OpCode::StObjSlot: return Js::OpCode::StObjSlotChkUndecl; case Js::OpCode::StLocalObjSlot: return Js::OpCode::StLocalObjSlotChkUndecl; case Js::OpCode::StParamObjSlot: return Js::OpCode::StParamObjSlotChkUndecl; case Js::OpCode::StInnerObjSlot: return Js::OpCode::StInnerObjSlotChkUndecl; case Js::OpCode::StEnvObjSlot: return Js::OpCode::StEnvObjSlotChkUndecl; default: AssertMsg(false, "Unknown opcode for chk undecl mapping"); return Js::OpCode::InvalidOpCode; } } // Tracks a register slot let/const property for the passed in debugger block/catch scope. // debuggerScope - The scope to add the variable to. // symbol - The symbol that represents the register property. // funcInfo - The function info used to store the property into the tracked debugger register slot list. // flags - The flags to assign to the property. // isFunctionDeclaration - Whether or not the register is a function declaration, which requires that its byte code offset be updated immediately. void ByteCodeGenerator::TrackRegisterPropertyForDebugger( Js::DebuggerScope *debuggerScope, Symbol *symbol, FuncInfo *funcInfo, Js::DebuggerScopePropertyFlags flags /*= Js::DebuggerScopePropertyFlags_None*/, bool isFunctionDeclaration /*= false*/) { Assert(debuggerScope); Assert(symbol); Assert(funcInfo); Js::RegSlot location = symbol->GetLocation(); Js::DebuggerScope *correctDebuggerScope = debuggerScope; if (debuggerScope->scopeType != Js::DiagExtraScopesType::DiagBlockScopeDirect && debuggerScope->scopeType != Js::DiagExtraScopesType::DiagCatchScopeDirect) { // We have to get the appropriate scope and add property over there. // Make sure the scope is created whether we're in debug mode or not, because we // need the empty scopes present during reparsing for debug mode. correctDebuggerScope = debuggerScope->GetSiblingScope(location, Writer()->GetFunctionWrite()); } if (this->ShouldTrackDebuggerMetadata() && !symbol->GetIsTrackedForDebugger()) { // Only track the property if we're in debug mode since it's only needed by the debugger. Js::PropertyId propertyId = symbol->EnsurePosition(this); this->Writer()->AddPropertyToDebuggerScope( correctDebuggerScope, location, propertyId, /*shouldConsumeRegister*/ true, flags, isFunctionDeclaration); Js::FunctionBody *byteCodeFunction = funcInfo->GetParsedFunctionBody(); byteCodeFunction->InsertSymbolToRegSlotList(location, propertyId, funcInfo->varRegsCount); symbol->SetIsTrackedForDebugger(true); } } void ByteCodeGenerator::TrackActivationObjectPropertyForDebugger( Js::DebuggerScope *debuggerScope, Symbol *symbol, Js::DebuggerScopePropertyFlags flags /*= Js::DebuggerScopePropertyFlags_None*/, bool isFunctionDeclaration /*= false*/) { Assert(debuggerScope); Assert(symbol); // Only need to track activation object properties in debug mode. if (ShouldTrackDebuggerMetadata() && !symbol->GetIsTrackedForDebugger()) { Js::RegSlot location = symbol->GetLocation(); Js::PropertyId propertyId = symbol->EnsurePosition(this); this->Writer()->AddPropertyToDebuggerScope( debuggerScope, location, propertyId, /*shouldConsumeRegister*/ false, flags, isFunctionDeclaration); symbol->SetIsTrackedForDebugger(true); } } void ByteCodeGenerator::TrackSlotArrayPropertyForDebugger( Js::DebuggerScope *debuggerScope, Symbol* symbol, Js::PropertyId propertyId, Js::DebuggerScopePropertyFlags flags /*= Js::DebuggerScopePropertyFlags_None*/, bool isFunctionDeclaration /*= false*/) { // Note: Slot array properties are tracked even in non-debug mode in order to support slot array serialization // of let/const variables between non-debug and debug mode (for example, when a slot array var escapes and is retrieved // after a debugger attach or for WWA apps). They are also needed for heap enumeration. Assert(debuggerScope); Assert(symbol); if (!symbol->GetIsTrackedForDebugger()) { Js::RegSlot location = symbol->GetScopeSlot(); Assert(location != Js::Constants::NoRegister); Assert(propertyId != Js::Constants::NoProperty); this->Writer()->AddPropertyToDebuggerScope( debuggerScope, location, propertyId, /*shouldConsumeRegister*/ false, flags, isFunctionDeclaration); symbol->SetIsTrackedForDebugger(true); } } // Tracks a function declaration inside a block scope for the debugger metadata's current scope (let binding). void ByteCodeGenerator::TrackFunctionDeclarationPropertyForDebugger(Symbol *functionDeclarationSymbol, FuncInfo *funcInfoParent) { Assert(functionDeclarationSymbol); Assert(funcInfoParent); AssertMsg(functionDeclarationSymbol->GetIsBlockVar(), "We should only track inner function let bindings for the debugger."); // Note: we don't have to check symbol->GetIsTrackedForDebugger, as we are not doing actual work here, // which is done in other Track* functions that we call. if (functionDeclarationSymbol->IsInSlot(this, funcInfoParent)) { if (functionDeclarationSymbol->GetScope()->GetIsObject()) { this->TrackActivationObjectPropertyForDebugger( this->Writer()->GetCurrentDebuggerScope(), functionDeclarationSymbol, Js::DebuggerScopePropertyFlags_None, true /*isFunctionDeclaration*/); } else { // Make sure the property has a slot. This will bump up the size of the slot array if necessary. // Note that slot array inner function bindings are tracked even in non-debug mode in order // to keep the lifetime of the closure binding that could escape around for heap enumeration. functionDeclarationSymbol->EnsureScopeSlot(this, funcInfoParent); functionDeclarationSymbol->EnsurePosition(this); this->TrackSlotArrayPropertyForDebugger( this->Writer()->GetCurrentDebuggerScope(), functionDeclarationSymbol, functionDeclarationSymbol->GetPosition(), Js::DebuggerScopePropertyFlags_None, true /*isFunctionDeclaration*/); } } else { this->TrackRegisterPropertyForDebugger( this->Writer()->GetCurrentDebuggerScope(), functionDeclarationSymbol, funcInfoParent, Js::DebuggerScopePropertyFlags_None, true /*isFunctionDeclaration*/); } } // Updates the byte code offset of the property with the passed in location and ID. // Used to track let/const variables that are in the dead zone debugger side. // location - The activation object, scope slot index, or register location for the property. // propertyId - The ID of the property to update. // shouldConsumeRegister - Whether or not the a register should be consumed (used for reg slot locations). void ByteCodeGenerator::UpdateDebuggerPropertyInitializationOffset(Js::RegSlot location, Js::PropertyId propertyId, bool shouldConsumeRegister) { Assert(this->Writer()); Js::DebuggerScope* currentDebuggerScope = this->Writer()->GetCurrentDebuggerScope(); Assert(currentDebuggerScope); if (currentDebuggerScope != nullptr) { this->Writer()->UpdateDebuggerPropertyInitializationOffset( currentDebuggerScope, location, propertyId, shouldConsumeRegister); } } void ByteCodeGenerator::LoadHeapArguments(FuncInfo *funcInfo) { if (funcInfo->GetHasCachedScope()) { this->LoadCachedHeapArguments(funcInfo); } else { this->LoadUncachedHeapArguments(funcInfo); } } void GetFormalArgsArray(ByteCodeGenerator *byteCodeGenerator, FuncInfo * funcInfo, Js::PropertyIdArray *propIds) { Assert(funcInfo); Assert(propIds); Assert(byteCodeGenerator); bool hadDuplicates = false; Js::ArgSlot i = 0; auto processArg = [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { Assert(i < propIds->count); Symbol *sym = pnode->AsParseNodeVar()->sym; Assert(sym); Js::PropertyId symPos = sym->EnsurePosition(byteCodeGenerator); // // Check if the function has any same name parameters // For the same name param, only the last one will be passed the correct propertyid // For remaining dup param names, pass Constants::NoProperty // for (Js::ArgSlot j = 0; j < i; j++) { if (propIds->elements[j] == symPos) { // Found a dup parameter name propIds->elements[j] = Js::Constants::NoProperty; hadDuplicates = true; break; } } propIds->elements[i] = symPos; } else { propIds->elements[i] = Js::Constants::NoProperty; } ++i; }; MapFormals(funcInfo->root, processArg); propIds->hadDuplicates = hadDuplicates; } void ByteCodeGenerator::LoadUncachedHeapArguments(FuncInfo *funcInfo) { Assert(funcInfo->GetHasHeapArguments()); Scope *scope = funcInfo->GetBodyScope(); Assert(scope); Symbol *argSym = funcInfo->GetArgumentsSymbol(); Assert(argSym && argSym->IsArguments()); Js::RegSlot argumentsLoc = argSym->GetLocation(); Js::OpCode opcode = !funcInfo->root->HasNonSimpleParameterList() ? Js::OpCode::LdHeapArguments : Js::OpCode::LdLetHeapArguments; bool hasRest = funcInfo->root->pnodeRest != nullptr; uint count = funcInfo->inArgsCount + (hasRest ? 1 : 0) - 1; if (count == 0) { // If no formals to function (only "this"), then no need to create the scope object. // Leave both the arguments location and the propertyIds location as null. Assert(funcInfo->root->pnodeParams == nullptr && !hasRest); } else if (!NeedScopeObjectForArguments(funcInfo, funcInfo->root)) { // We may not need a scope object for arguments, e.g. strict mode with no eval. } else if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { // Pass the frame object and ID array to the runtime, and put the resulting Arguments object // at the expected location. Js::PropertyIdArray *propIds = funcInfo->GetParsedFunctionBody()->AllocatePropertyIdArrayForFormals(UInt32Math::Mul(count, sizeof(Js::PropertyId)), count, 0); GetFormalArgsArray(this, funcInfo, propIds); } this->m_writer.Reg1(opcode, argumentsLoc); EmitLocalPropInit(argSym->GetLocation(), argSym, funcInfo); } void ByteCodeGenerator::LoadCachedHeapArguments(FuncInfo *funcInfo) { Assert(funcInfo->GetHasHeapArguments()); Scope *scope = funcInfo->GetBodyScope(); Assert(scope); Symbol *argSym = funcInfo->GetArgumentsSymbol(); Assert(argSym && argSym->IsArguments()); Js::RegSlot argumentsLoc = argSym->GetLocation(); Js::OpCode op = !funcInfo->root->HasNonSimpleParameterList() ? Js::OpCode::LdHeapArgsCached : Js::OpCode::LdLetHeapArgsCached; this->m_writer.Reg1(op, argumentsLoc); EmitLocalPropInit(argumentsLoc, argSym, funcInfo); } Js::JavascriptArray* ByteCodeGenerator::BuildArrayFromStringList(ParseNode* stringNodeList, uint arrayLength, Js::ScriptContext* scriptContext) { Assert(stringNodeList); uint index = 0; Js::Var str; IdentPtr pid; Js::JavascriptArray* pArr = scriptContext->GetLibrary()->CreateArray(arrayLength); while (stringNodeList->nop == knopList) { Assert(stringNodeList->AsParseNodeBin()->pnode1->nop == knopStr); pid = stringNodeList->AsParseNodeBin()->pnode1->AsParseNodeStr()->pid; str = Js::JavascriptString::NewCopyBuffer(pid->Psz(), pid->Cch(), scriptContext); pArr->SetItemWithAttributes(index, str, PropertyEnumerable); stringNodeList = stringNodeList->AsParseNodeBin()->pnode2; index++; } Assert(stringNodeList->nop == knopStr); pid = stringNodeList->AsParseNodeStr()->pid; str = Js::JavascriptString::NewCopyBuffer(pid->Psz(), pid->Cch(), scriptContext); pArr->SetItemWithAttributes(index, str, PropertyEnumerable); return pArr; } // For now, this just assigns field ids for the current script. // Later, we will combine this information with the global field id map. // This temporary code will not work if a global member is accessed both with and without a LHS. void ByteCodeGenerator::AssignPropertyIds(Js::ParseableFunctionInfo* functionInfo) { globalScope->ForEachSymbol([this, functionInfo](Symbol * sym) { this->AssignPropertyId(sym, functionInfo); }); } void ByteCodeGenerator::InitBlockScopedContent(ParseNodeBlock *pnodeBlock, Js::DebuggerScope* debuggerScope, FuncInfo *funcInfo) { Assert(pnodeBlock->nop == knopBlock); auto genBlockInit = [this, debuggerScope, funcInfo](ParseNode *pnode) { // Only check if the scope is valid when let/const vars are in the scope. If there are no let/const vars, // the debugger scope will not be created. AssertMsg(debuggerScope, "Missing a case of scope tracking in BeginEmitBlock."); FuncInfo *funcInfo = this->TopFuncInfo(); Symbol *sym = pnode->AsParseNodeVar()->sym; Scope *scope = sym->GetScope(); if (sym->GetIsGlobal()) { Js::PropertyId propertyId = sym->EnsurePosition(this); if (this->flags & fscrEval) { AssertMsg(this->IsConsoleScopeEval(), "Let/Consts cannot be in global scope outside of console eval"); Js::OpCode op = (sym->GetDecl()->nop == knopConstDecl) ? Js::OpCode::InitUndeclConsoleConstFld : Js::OpCode::InitUndeclConsoleLetFld; this->m_writer.ElementScopedU(op, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } else { Js::OpCode op = (sym->GetDecl()->nop == knopConstDecl) ? Js::OpCode::InitUndeclRootConstFld : Js::OpCode::InitUndeclRootLetFld; this->m_writer.ElementRootU(op, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } } else if (sym->IsInSlot(this, funcInfo) || (scope->GetIsObject() && sym->NeedsSlotAlloc(this, funcInfo))) { if (scope->GetIsObject()) { Js::RegSlot scopeLocation = scope->GetLocation(); Js::PropertyId propertyId = sym->EnsurePosition(this); if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { uint cacheId = funcInfo->FindOrAddInlineCacheId(scopeLocation, propertyId, false, true); Js::OpCode op = (sym->GetDecl()->nop == knopConstDecl) ? Js::OpCode::InitUndeclLocalConstFld : Js::OpCode::InitUndeclLocalLetFld; this->m_writer.ElementP(op, ByteCodeGenerator::ReturnRegister, cacheId); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->InnerScopeToRegSlot(scope), propertyId, false, true); Js::OpCode op = (sym->GetDecl()->nop == knopConstDecl) ? Js::OpCode::InitUndeclConstFld : Js::OpCode::InitUndeclLetFld; this->m_writer.ElementPIndexed(op, ByteCodeGenerator::ReturnRegister, scope->GetInnerScopeIndex(), cacheId); } TrackActivationObjectPropertyForDebugger(debuggerScope, sym, pnode->nop == knopConstDecl ? Js::DebuggerScopePropertyFlags_Const : Js::DebuggerScopePropertyFlags_None); } else { Js::RegSlot tmpReg = funcInfo->AcquireTmpRegister(); this->m_writer.Reg1(Js::OpCode::InitUndecl, tmpReg); this->EmitLocalPropInit(tmpReg, sym, funcInfo); funcInfo->ReleaseTmpRegister(tmpReg); // Slot array properties are tracked in non-debug mode as well because they need to stay // around for heap enumeration and escaping during attach/detach. TrackSlotArrayPropertyForDebugger(debuggerScope, sym, sym->EnsurePosition(this), pnode->nop == knopConstDecl ? Js::DebuggerScopePropertyFlags_Const : Js::DebuggerScopePropertyFlags_None); } } else if (!sym->GetIsModuleExportStorage()) { if (sym->GetDecl()->AsParseNodeVar()->isSwitchStmtDecl) { // let/const declared in a switch is the only case of a variable that must be checked for // use-before-declaration dynamically within its own function. this->m_writer.Reg1(Js::OpCode::InitUndecl, sym->GetLocation()); } // Syms that begin in register may be delay-captured. In debugger mode, such syms // will live only in slots, so tell the debugger to find them there. if (sym->NeedsSlotAlloc(this, funcInfo)) { TrackSlotArrayPropertyForDebugger(debuggerScope, sym, sym->EnsurePosition(this), pnode->nop == knopConstDecl ? Js::DebuggerScopePropertyFlags_Const : Js::DebuggerScopePropertyFlags_None); } else { TrackRegisterPropertyForDebugger(debuggerScope, sym, funcInfo, pnode->nop == knopConstDecl ? Js::DebuggerScopePropertyFlags_Const : Js::DebuggerScopePropertyFlags_None); } } }; IterateBlockScopedVariables(pnodeBlock, genBlockInit); } // Records the start of a debugger scope if the passed in node has any let/const variables (or is not a block node). // If it has no let/const variables, nullptr will be returned as no scope will be created. Js::DebuggerScope* ByteCodeGenerator::RecordStartScopeObject(ParseNode * pnode, Js::DiagExtraScopesType scopeType, Js::RegSlot scopeLocation /*= Js::Constants::NoRegister*/, int* index /*= nullptr*/) { Assert(pnode); if (pnode->nop == knopBlock && !pnode->AsParseNodeBlock()->HasBlockScopedContent()) { // In order to reduce allocations now that we track debugger scopes in non-debug mode, // don't add a block to the chain if it has no let/const variables at all. return nullptr; } return this->Writer()->RecordStartScopeObject(scopeType, scopeLocation, index); } // Records the end of the current scope, but only if the current block has block scoped content. // Otherwise, a scope would not have been added (see ByteCodeGenerator::RecordStartScopeObject()). void ByteCodeGenerator::RecordEndScopeObject(ParseNode *pnodeBlock) { Assert(pnodeBlock); if (pnodeBlock->nop == knopBlock && !pnodeBlock->AsParseNodeBlock()->HasBlockScopedContent()) { return; } this->Writer()->RecordEndScopeObject(); } void BeginEmitBlock(ParseNodeBlock *pnodeBlock, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Js::DebuggerScope* debuggerScope = nullptr; if (BlockHasOwnScope(pnodeBlock, byteCodeGenerator)) { Scope *scope = pnodeBlock->scope; byteCodeGenerator->PushScope(scope); Js::RegSlot scopeLocation = scope->GetLocation(); if (scope->GetMustInstantiate()) { Assert(scopeLocation == Js::Constants::NoRegister); scopeLocation = funcInfo->FirstInnerScopeReg() + scope->GetInnerScopeIndex(); if (scope->GetIsObject()) { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeInObject, scopeLocation); byteCodeGenerator->Writer()->Unsigned1(Js::OpCode::NewBlockScope, scope->GetInnerScopeIndex()); } else { int scopeIndex = Js::DebuggerScope::InvalidScopeIndex; debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeInSlot, scopeLocation, &scopeIndex); // TODO: Handle heap enumeration int scopeSlotCount = scope->GetScopeSlotCount(); byteCodeGenerator->Writer()->Num3(Js::OpCode::NewInnerScopeSlots, scope->GetInnerScopeIndex(), scopeSlotCount + Js::ScopeSlots::FirstSlotIndex, scopeIndex); } } else { // In the direct register access case, there is no block scope emitted but we can still track // the start and end offset of the block. The location registers for let/const variables will still be // captured along with this range in InitBlockScopedContent(). debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeDirect); } bool const isGlobalEvalBlockScope = scope->IsGlobalEvalBlockScope(); Js::RegSlot frameDisplayLoc = Js::Constants::NoRegister; Js::RegSlot tmpInnerEnvReg = Js::Constants::NoRegister; ParseNodePtr pnodeScope; for (pnodeScope = pnodeBlock->pnodeScopes; pnodeScope;) { switch (pnodeScope->nop) { case knopFncDecl: if (pnodeScope->AsParseNodeFnc()->IsDeclaration()) { // The frameDisplayLoc register's lifetime has to be controlled by this function. We can't let // it be released by DefineOneFunction, because further iterations of this loop can allocate // temps, and we can't let frameDisplayLoc be re-purposed until this loop completes. // So we'll supply a temp that we allocate and release here. if (frameDisplayLoc == Js::Constants::NoRegister) { if (funcInfo->frameDisplayRegister != Js::Constants::NoRegister) { frameDisplayLoc = funcInfo->frameDisplayRegister; } else { frameDisplayLoc = funcInfo->GetEnvRegister(); } tmpInnerEnvReg = funcInfo->AcquireTmpRegister(); frameDisplayLoc = byteCodeGenerator->PrependLocalScopes(frameDisplayLoc, tmpInnerEnvReg, funcInfo); } byteCodeGenerator->DefineOneFunction(pnodeScope->AsParseNodeFnc(), funcInfo, true, frameDisplayLoc); } // If this is the global eval block scope, the function is actually assigned to the global // so we don't need to keep the registers. if (isGlobalEvalBlockScope) { funcInfo->ReleaseLoc(pnodeScope); pnodeScope->location = Js::Constants::NoRegister; } pnodeScope = pnodeScope->AsParseNodeFnc()->pnodeNext; break; case knopBlock: pnodeScope = pnodeScope->AsParseNodeBlock()->pnodeNext; break; case knopCatch: pnodeScope = pnodeScope->AsParseNodeCatch()->pnodeNext; break; case knopWith: pnodeScope = pnodeScope->AsParseNodeWith()->pnodeNext; break; } } if (tmpInnerEnvReg != Js::Constants::NoRegister) { funcInfo->ReleaseTmpRegister(tmpInnerEnvReg); } } else { Scope *scope = pnodeBlock->scope; if (scope) { if (scope->GetMustInstantiate()) { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeInObject); } else { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeDirect); } } else { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeBlock, Js::DiagExtraScopesType::DiagBlockScopeInSlot); } } byteCodeGenerator->InitBlockScopedContent(pnodeBlock, debuggerScope, funcInfo); } void EndEmitBlock(ParseNodeBlock *pnodeBlock, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { if (BlockHasOwnScope(pnodeBlock, byteCodeGenerator)) { Scope *scope = pnodeBlock->scope; Assert(scope); Assert(scope == byteCodeGenerator->GetCurrentScope()); byteCodeGenerator->PopScope(); } byteCodeGenerator->RecordEndScopeObject(pnodeBlock); } void CloneEmitBlock(ParseNodeBlock *pnodeBlock, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { if (BlockHasOwnScope(pnodeBlock, byteCodeGenerator)) { // Only let variables have observable behavior when there are per iteration // bindings. const variables do not since they are immutable. Therefore, // (and the spec agrees), only create new scope clones if the loop variable // is a let declaration. bool isConst = false; pnodeBlock->scope->ForEachSymbolUntil([&isConst](Symbol * const sym) { // Exploit the fact that a for loop sxBlock can only have let and const // declarations, and can only have one or the other, regardless of how // many syms there might be. Thus only check the first sym. isConst = sym->GetDecl()->nop == knopConstDecl; return true; }); if (!isConst) { Scope *scope = pnodeBlock->scope; Assert(scope == byteCodeGenerator->GetCurrentScope()); if (scope->GetMustInstantiate()) { Js::OpCode op = scope->GetIsObject() ? Js::OpCode::CloneBlockScope : Js::OpCode::CloneInnerScopeSlots; byteCodeGenerator->Writer()->Unsigned1(op, scope->GetInnerScopeIndex()); } } } } void EmitBlock(ParseNodeBlock *pnodeBlock, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue) { Assert(pnodeBlock->nop == knopBlock); ParseNode *pnode = pnodeBlock->pnodeStmt; if (pnode == nullptr) { return; } BeginEmitBlock(pnodeBlock, byteCodeGenerator, funcInfo); ParseNode *pnodeLastValStmt = pnodeBlock->pnodeLastValStmt; while (pnode->nop == knopList) { ParseNode* stmt = pnode->AsParseNodeBin()->pnode1; if (stmt == pnodeLastValStmt) { // This is the last guaranteed return value, so any potential return values have to be // copied to the return register from this point forward. pnodeLastValStmt = nullptr; } byteCodeGenerator->EmitTopLevelStatement(stmt, funcInfo, fReturnValue && (pnodeLastValStmt == nullptr)); pnode = pnode->AsParseNodeBin()->pnode2; } if (pnode == pnodeLastValStmt) { pnodeLastValStmt = nullptr; } byteCodeGenerator->EmitTopLevelStatement(pnode, funcInfo, fReturnValue && (pnodeLastValStmt == nullptr)); EndEmitBlock(pnodeBlock, byteCodeGenerator, funcInfo); } void ClearTmpRegs(ParseNode* pnode, ByteCodeGenerator* byteCodeGenerator, FuncInfo* emitFunc) { if (emitFunc->IsTmpReg(pnode->location)) { pnode->location = Js::Constants::NoRegister; } } void ByteCodeGenerator::EmitTopLevelStatement(ParseNode *stmt, FuncInfo *funcInfo, BOOL fReturnValue) { if (stmt->nop == knopFncDecl && stmt->AsParseNodeFnc()->IsDeclaration()) { // Function declarations (not function-declaration RHS's) are already fully processed. // Skip them here so the temp registers don't get messed up. return; } if (stmt->nop == knopName || stmt->nop == knopDot) { // Generating span for top level names are mostly useful in debugging mode, because user can debug it even though no side-effect expected. // But the name can have runtime error, e.g., foo.bar; // where foo is not defined. // At this time we need to throw proper line number and offset. so recording on all modes will be useful. StartStatement(stmt); Writer()->Empty(Js::OpCode::Nop); EndStatement(stmt); } Emit(stmt, this, funcInfo, fReturnValue, false/*isConstructorCall*/, nullptr/*bindPnode*/, true/*isTopLevel*/); if (funcInfo->IsTmpReg(stmt->location)) { if (!stmt->isUsed && !fReturnValue) { m_writer.Reg1(Js::OpCode::Unused, stmt->location); } funcInfo->ReleaseLoc(stmt); } } // ByteCodeGenerator::DefineFunctions // // Emit byte code for scope-wide function definitions before any calls in the scope, regardless of lexical // order. Note that stores to the closure array are not emitted until we see the knopFncDecl in the tree // to make sure that sources of the stores have been defined. void ByteCodeGenerator::DefineFunctions(FuncInfo *funcInfoParent) { // DefineCachedFunctions doesn't depend on whether the user vars are declared or not, so // we'll just overload this variable to mean that the functions getting called again and we don't need to do anything if (funcInfoParent->GetHasCachedScope()) { this->DefineCachedFunctions(funcInfoParent); } else { this->DefineUncachedFunctions(funcInfoParent); } } // Iterate over all child functions in a function's parameter and body scopes. template<typename Fn> void MapContainerScopeFunctions(ParseNode* pnodeScope, Fn fn) { auto mapFncDeclsInScopeList = [&](ParseNode *pnodeHead) { for (ParseNode *pnode = pnodeHead; pnode != nullptr;) { switch (pnode->nop) { case knopFncDecl: fn(pnode); pnode = pnode->AsParseNodeFnc()->pnodeNext; break; case knopBlock: pnode = pnode->AsParseNodeBlock()->pnodeNext; break; case knopCatch: pnode = pnode->AsParseNodeCatch()->pnodeNext; break; case knopWith: pnode = pnode->AsParseNodeWith()->pnodeNext; break; default: AssertMsg(false, "Unexpected opcode in tree of scopes"); return; } } }; pnodeScope->AsParseNodeFnc()->MapContainerScopes(mapFncDeclsInScopeList); } void ByteCodeGenerator::DefineCachedFunctions(FuncInfo *funcInfoParent) { ParseNode *pnodeParent = funcInfoParent->root; uint slotCount = 0; auto countFncSlots = [&](ParseNode *pnodeFnc) { if (pnodeFnc->AsParseNodeFnc()->GetFuncSymbol() != nullptr && pnodeFnc->AsParseNodeFnc()->IsDeclaration()) { slotCount++; } }; MapContainerScopeFunctions(pnodeParent, countFncSlots); if (slotCount == 0) { return; } size_t extraBytesActual = AllocSizeMath::Mul(slotCount, sizeof(Js::FuncInfoEntry)); // Reg2Aux takes int for byteCount so we need to convert to int. OOM if we can't because it would truncate data. if (extraBytesActual > INT_MAX) { Js::Throw::OutOfMemory(); } int extraBytes = (int)extraBytesActual; Js::FuncInfoArray *info = AnewPlus(alloc, extraBytes, Js::FuncInfoArray, slotCount); // slotCount is guaranteed to be non-zero here. Js::AuxArray<uint32> * slotIdInCachedScopeToNestedIndexArray = funcInfoParent->GetParsedFunctionBody()->AllocateSlotIdInCachedScopeToNestedIndexArray(slotCount); slotCount = 0; auto fillEntries = [&](ParseNode *pnodeFnc) { Symbol *sym = pnodeFnc->AsParseNodeFnc()->GetFuncSymbol(); if (sym != nullptr && (pnodeFnc->AsParseNodeFnc()->IsDeclaration())) { AssertMsg(!pnodeFnc->AsParseNodeFnc()->IsGenerator(), "Generator functions are not supported by InitCachedFuncs but since they always escape they should disable function caching"); Js::FuncInfoEntry *entry = &info->elements[slotCount]; entry->nestedIndex = pnodeFnc->AsParseNodeFnc()->nestedIndex; entry->scopeSlot = sym->GetScopeSlot(); slotIdInCachedScopeToNestedIndexArray->elements[slotCount] = pnodeFnc->AsParseNodeFnc()->nestedIndex; slotCount++; } }; MapContainerScopeFunctions(pnodeParent, fillEntries); m_writer.AuxNoReg(Js::OpCode::InitCachedFuncs, info, sizeof(Js::FuncInfoArray) + extraBytes, sizeof(Js::FuncInfoArray) + extraBytes); slotCount = 0; auto defineOrGetCachedFunc = [&](ParseNode *pnodeFnc) { Symbol *sym = pnodeFnc->AsParseNodeFnc()->GetFuncSymbol(); if (pnodeFnc->AsParseNodeFnc()->IsDeclaration()) { // Do we need to define the function here (i.e., is it not one of our cached locals)? // Only happens if the sym is null (e.g., function x.y(){}). if (sym == nullptr) { this->DefineOneFunction(pnodeFnc->AsParseNodeFnc(), funcInfoParent); } else if (!sym->IsInSlot(this, funcInfoParent) && sym->GetLocation() != Js::Constants::NoRegister) { // If it was defined by InitCachedFuncs, do we need to put it in a register rather than a slot? m_writer.Reg1Unsigned1(Js::OpCode::GetCachedFunc, sym->GetLocation(), slotCount); } // The "x = function() {...}" case is being generated on the fly, during emission, // so the caller expects to be able to release this register. funcInfoParent->ReleaseLoc(pnodeFnc); pnodeFnc->location = Js::Constants::NoRegister; slotCount++; } }; MapContainerScopeFunctions(pnodeParent, defineOrGetCachedFunc); AdeletePlus(alloc, extraBytes, info); } void ByteCodeGenerator::DefineUncachedFunctions(FuncInfo *funcInfoParent) { ParseNode *pnodeParent = funcInfoParent->root; auto defineCheck = [&](ParseNode *pnodeFnc) { Assert(pnodeFnc->nop == knopFncDecl); // // Don't define the function upfront in following cases // 1. x = function() {...}; // Don't define the function for all modes. // Such a function can only be accessed via the LHS, so we define it at the assignment point // rather than the scope entry to save a register (and possibly save the whole definition). // // 2. x = function f() {...}; // f is not visible in the enclosing scope. // Such function expressions should be emitted only at the assignment point, as can be used only // after the assignment. Might save register. // if (pnodeFnc->AsParseNodeFnc()->IsDeclaration()) { this->DefineOneFunction(pnodeFnc->AsParseNodeFnc(), funcInfoParent); // The "x = function() {...}" case is being generated on the fly, during emission, // so the caller expects to be able to release this register. funcInfoParent->ReleaseLoc(pnodeFnc); pnodeFnc->location = Js::Constants::NoRegister; } }; MapContainerScopeFunctions(pnodeParent, defineCheck); } void EmitAssignmentToFuncName(ParseNodeFnc *pnodeFnc, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfoParent) { // Assign the location holding the func object reference to the given name. Assert(pnodeFnc->pnodeName->nop == knopVarDecl); Symbol *sym = pnodeFnc->pnodeName->sym; if (sym != nullptr && !sym->GetIsFuncExpr()) { if (sym->GetIsModuleExportStorage()) { byteCodeGenerator->EmitPropStore(pnodeFnc->location, sym, nullptr, funcInfoParent); } else if (sym->GetIsGlobal()) { Js::PropertyId propertyId = sym->GetPosition(); byteCodeGenerator->EmitGlobalFncDeclInit(pnodeFnc->location, propertyId, funcInfoParent); if (byteCodeGenerator->GetFlags() & fscrEval && !funcInfoParent->GetIsStrictMode()) { byteCodeGenerator->EmitPropStore(pnodeFnc->location, sym, nullptr, funcInfoParent); } } else { if (sym->NeedsSlotAlloc(byteCodeGenerator, funcInfoParent)) { if (!sym->GetHasNonCommittedReference() || (funcInfoParent->GetParsedFunctionBody()->DoStackNestedFunc())) { // No point in trying to optimize if there are no references before we have to commit to slot. // And not safe to delay putting a stack function in the slot, since we may miss boxing. sym->SetIsCommittedToSlot(); } } if (sym->GetScope()->GetFunc() != byteCodeGenerator->TopFuncInfo()) { byteCodeGenerator->EmitPropStore(pnodeFnc->location, sym, nullptr, funcInfoParent); } else { byteCodeGenerator->EmitLocalPropInit(pnodeFnc->location, sym, funcInfoParent); } Symbol * fncScopeSym = sym->GetFuncScopeVarSym(); if (fncScopeSym) { if (fncScopeSym->GetIsGlobal() && byteCodeGenerator->GetFlags() & fscrEval) { Js::PropertyId propertyId = fncScopeSym->GetPosition(); byteCodeGenerator->EmitGlobalFncDeclInit(pnodeFnc->location, propertyId, funcInfoParent); } else { byteCodeGenerator->EmitPropStore(pnodeFnc->location, fncScopeSym, nullptr, funcInfoParent, false, false, /* isFncDeclVar */true); } } } } } Js::RegSlot ByteCodeGenerator::DefineOneFunction(ParseNodeFnc *pnodeFnc, FuncInfo *funcInfoParent, bool generateAssignment, Js::RegSlot regEnv, Js::RegSlot frameDisplayTemp) { Assert(pnodeFnc->nop == knopFncDecl); funcInfoParent->AcquireLoc(pnodeFnc); if (regEnv == Js::Constants::NoRegister) { // If the child needs a closure, find a heap-allocated frame to pass to it. if (frameDisplayTemp != Js::Constants::NoRegister) { // We allocated a temp to hold a local frame display value. Use that. // It's likely that the FD is on the stack, and we used the temp to load it back. regEnv = frameDisplayTemp; } else if (funcInfoParent->frameDisplayRegister != Js::Constants::NoRegister) { // This function has built a frame display, so pass it down. regEnv = funcInfoParent->frameDisplayRegister; } else { // This function has no captured locals but inherits a closure environment, so pass it down. regEnv = funcInfoParent->GetEnvRegister(); } regEnv = this->PrependLocalScopes(regEnv, Js::Constants::NoRegister, funcInfoParent); } // AssertMsg(funcInfo->nonLocalSymbols == 0 || regEnv != funcInfoParent->nullConstantRegister, // "We need a closure for the nested function"); Assert(pnodeFnc->nestedIndex != (uint)-1); // If we are in a parameter scope and it is not merged with body scope then we have to create the child function as an inner function if (regEnv == funcInfoParent->frameDisplayRegister || regEnv == funcInfoParent->GetEnvRegister()) { m_writer.NewFunction(pnodeFnc->location, pnodeFnc->nestedIndex, pnodeFnc->IsCoroutine(), pnodeFnc->GetHomeObjLocation()); } else { m_writer.NewInnerFunction(pnodeFnc->location, pnodeFnc->nestedIndex, regEnv, pnodeFnc->IsCoroutine(), pnodeFnc->GetHomeObjLocation()); } if (funcInfoParent->IsGlobalFunction() && (this->flags & fscrEval)) { // A function declared at global scope in eval is untrackable, // so make sure the caller's cached scope is invalidated. this->funcEscapes = true; } else { if (pnodeFnc->IsDeclaration()) { Symbol * funcSymbol = pnodeFnc->GetFuncSymbol(); if (funcSymbol) { // In the case where a let/const declaration is the same symbol name // as the function declaration (shadowing case), the let/const var and // the function declaration symbol are the same and share the same flags // (particularly, sym->GetIsBlockVar() for this code path). // // For example: // let a = 0; // <-- sym->GetIsBlockVar() = true // function b(){} // <-- sym2->GetIsBlockVar() = false // // let x = 0; // <-- sym3->GetIsBlockVar() = true // function x(){} // <-- sym3->GetIsBlockVar() = true // // In order to tell if the function is actually part // of a block scope, we compare against the function scope here. // Note that having a function with the same name as a let/const declaration // is a redeclaration error, but we're pushing the fix for this out since it's // a bit involved. Assert(funcInfoParent->GetBodyScope() != nullptr && funcSymbol->GetScope() != nullptr); bool isFunctionDeclarationInBlock = funcSymbol->GetIsBlockVar(); // Track all vars/lets/consts register slot function declarations. if (ShouldTrackDebuggerMetadata() // If this is a let binding function declaration at global level, we want to // be sure to track the register location as well. && !(funcInfoParent->IsGlobalFunction() && !isFunctionDeclarationInBlock)) { if (!funcSymbol->IsInSlot(this, funcInfoParent)) { funcInfoParent->byteCodeFunction->GetFunctionBody()->InsertSymbolToRegSlotList(funcSymbol->GetName(), pnodeFnc->location, funcInfoParent->varRegsCount); } } if (isFunctionDeclarationInBlock) { // We only track inner let bindings for the debugger side. this->TrackFunctionDeclarationPropertyForDebugger(funcSymbol, funcInfoParent); } } } } if (pnodeFnc->IsDefaultModuleExport()) { this->EmitAssignmentToDefaultModuleExport(pnodeFnc, funcInfoParent); } if (pnodeFnc->pnodeName == nullptr || !generateAssignment) { return regEnv; } EmitAssignmentToFuncName(pnodeFnc, this, funcInfoParent); return regEnv; } void ByteCodeGenerator::DefineUserVars(FuncInfo *funcInfo) { // Initialize scope-wide variables on entry to the scope. TODO: optimize by detecting uses that are always reached // by an existing initialization. BOOL fGlobal = funcInfo->IsGlobalFunction(); ParseNode *pnode; Js::FunctionBody *byteCodeFunction = funcInfo->GetParsedFunctionBody(); // Global declarations need a temp register to hold the init value, but the node shouldn't get a register. // Just assign one on the fly and re-use it for all initializations. Js::RegSlot tmpReg = fGlobal ? funcInfo->AcquireTmpRegister() : Js::Constants::NoRegister; for (pnode = funcInfo->root->pnodeVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { Symbol* sym = pnode->AsParseNodeVar()->sym; if (sym != nullptr && !(pnode->AsParseNodeVar()->isBlockScopeFncDeclVar && sym->GetIsBlockVar())) { if (sym->IsSpecialSymbol()) { // Special symbols have already had their initial values stored into their registers. // In default-argument case we've stored those values into their slot locations, as well. // We must do that because a default parameter may access a special symbol through a scope slot. // In the non-default-argument case, though, we didn't yet store the values into the // slots so let's do that now. if (!funcInfo->root->HasNonSimpleParameterList()) { EmitPropStoreForSpecialSymbol(sym->GetLocation(), sym, sym->GetPid(), funcInfo, true); if (ShouldTrackDebuggerMetadata() && !sym->IsInSlot(this, funcInfo)) { byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), sym->GetLocation(), funcInfo->varRegsCount); } } continue; } if (sym->GetIsCatch() || (pnode->nop == knopVarDecl && sym->GetIsBlockVar())) { // The init node was bound to the catch object, because it's inside a catch and has the // same name as the catch object. But we want to define a user var at function scope, // so find the right symbol. (We'll still assign the RHS value to the catch object symbol.) // This also applies to a var declaration in the same scope as a let declaration. #if DBG if (sym->IsArguments()) { // There is a block scoped var named arguments Assert(!funcInfo->GetHasArguments()); continue; } else if (!sym->GetIsCatch()) { // Assert that catch cannot be at function scope and let and var at function scope is redeclaration error. Assert(funcInfo->bodyScope != sym->GetScope()); } #endif sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName()); Assert(sym && !sym->GetIsCatch() && !sym->GetIsBlockVar()); } if (sym->GetSymbolType() == STVariable && !sym->GetIsModuleExportStorage()) { if (fGlobal) { Js::PropertyId propertyId = sym->EnsurePosition(this); // We do need to initialize some globals to avoid JS errors on loading undefined variables. // But we first need to make sure we're not trashing built-ins. if (this->flags & fscrEval) { if (funcInfo->byteCodeFunction->GetIsStrictMode()) { // Check/Init the property of the frame object this->m_writer.ElementRootU(Js::OpCode::LdLocalElemUndef, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } else { // The check and the init involve the first element in the scope chain. this->m_writer.ElementScopedU( Js::OpCode::LdElemUndefScoped, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } } else { this->m_writer.ElementU(Js::OpCode::LdElemUndef, ByteCodeGenerator::RootObjectRegister, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } } else if (!sym->IsArguments()) { if (sym->NeedsSlotAlloc(this, funcInfo)) { if (!sym->GetHasNonCommittedReference() || (sym->GetHasFuncAssignment() && funcInfo->GetParsedFunctionBody()->DoStackNestedFunc())) { // No point in trying to optimize if there are no references before we have to commit to slot. // And not safe to delay putting a stack function in the slot, since we may miss boxing. sym->SetIsCommittedToSlot(); } } // Undef-initialize the home location if it is a register (not closure-captured, or else capture // is delayed) or a property of an object. if ((!sym->GetHasInit() && !sym->IsInSlot(this, funcInfo)) || (funcInfo->bodyScope->GetIsObject() && !funcInfo->GetHasCachedScope())) { Js::RegSlot reg = sym->GetLocation(); if (reg == Js::Constants::NoRegister) { Assert(sym->IsInSlot(this, funcInfo)); reg = funcInfo->AcquireTmpRegister(); } this->m_writer.Reg1(Js::OpCode::LdUndef, reg); this->EmitLocalPropInit(reg, sym, funcInfo); if (ShouldTrackDebuggerMetadata() && !sym->GetHasInit() && !sym->IsInSlot(this, funcInfo)) { byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), reg, funcInfo->varRegsCount); } funcInfo->ReleaseTmpRegister(reg); } } else if (ShouldTrackDebuggerMetadata()) { if (!sym->GetHasInit() && !sym->IsInSlot(this, funcInfo)) { Js::RegSlot reg = sym->GetLocation(); if (reg != Js::Constants::NoRegister) { byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), reg, funcInfo->varRegsCount); } } } sym->SetHasInit(TRUE); } } } if (tmpReg != Js::Constants::NoRegister) { funcInfo->ReleaseTmpRegister(tmpReg); } for (int i = 0; i < funcInfo->nonUserNonTempRegistersToInitialize.Count(); ++i) { m_writer.Reg1(Js::OpCode::LdUndef, funcInfo->nonUserNonTempRegistersToInitialize.Item(i)); } } void ByteCodeGenerator::InitBlockScopedNonTemps(ParseNode *pnode, FuncInfo *funcInfo) { // Initialize all non-temp register variables on entry to the enclosing func - in particular, // those with lifetimes that begin after the start of user code and may not be initialized normally. // This protects us from, for instance, trying to restore garbage on bailout. // It was originally done in debugger mode only, but we do it always to avoid issues with boxing // garbage on exit from jitted loop bodies. while (pnode) { switch (pnode->nop) { case knopFncDecl: { // If this is a block-scoped function, initialize it. ParseNodeFnc * pnodeFnc = pnode->AsParseNodeFnc(); ParseNodeVar *pnodeName = pnodeFnc->pnodeName; if (!pnodeFnc->IsMethod() && pnodeName != nullptr) { Symbol *sym = pnodeName->sym; Assert(sym); if (sym->GetLocation() != Js::Constants::NoRegister && sym->GetScope()->IsBlockScope(funcInfo) && sym->GetScope()->GetFunc() == funcInfo) { this->m_writer.Reg1(Js::OpCode::LdUndef, sym->GetLocation()); } } // No need to recurse to the nested scopes, as they belong to a nested function. pnode = pnodeFnc->pnodeNext; break; } case knopBlock: { ParseNodeBlock * pnodeBlock = pnode->AsParseNodeBlock(); Scope *scope = pnodeBlock->scope; if (scope) { if (scope->IsBlockScope(funcInfo)) { Js::RegSlot scopeLoc = scope->GetLocation(); if (scopeLoc != Js::Constants::NoRegister && !funcInfo->IsTmpReg(scopeLoc)) { this->m_writer.Reg1(Js::OpCode::LdUndef, scopeLoc); } } auto fnInit = [this, funcInfo](ParseNode *pnode) { Symbol *sym = pnode->AsParseNodeVar()->sym; if (!sym->IsInSlot(this, funcInfo) && !sym->GetIsGlobal() && !sym->GetIsModuleImport()) { this->m_writer.Reg1(Js::OpCode::InitUndecl, pnode->AsParseNodeVar()->sym->GetLocation()); } }; IterateBlockScopedVariables(pnodeBlock, fnInit); } InitBlockScopedNonTemps(pnodeBlock->pnodeScopes, funcInfo); pnode = pnodeBlock->pnodeNext; break; } case knopCatch: InitBlockScopedNonTemps(pnode->AsParseNodeCatch()->pnodeScopes, funcInfo); pnode = pnode->AsParseNodeCatch()->pnodeNext; break; case knopWith: { Js::RegSlot withLoc = pnode->location; AssertMsg(withLoc != Js::Constants::NoRegister && !funcInfo->IsTmpReg(withLoc), "We should put with objects at known stack locations in debug mode"); this->m_writer.Reg1(Js::OpCode::LdUndef, withLoc); InitBlockScopedNonTemps(pnode->AsParseNodeWith()->pnodeScopes, funcInfo); pnode = pnode->AsParseNodeWith()->pnodeNext; break; } default: Assert(false); return; } } } void ByteCodeGenerator::EmitScopeObjectInit(FuncInfo *funcInfo) { Assert(!funcInfo->byteCodeFunction->GetFunctionBody()->DoStackNestedFunc()); if (!funcInfo->GetHasCachedScope() /* || forcing scope/inner func caching */) { return; } Scope* currentScope = funcInfo->GetCurrentChildScope(); uint slotCount = currentScope->GetScopeSlotCount(); uint cachedFuncCount = 0; Js::PropertyId firstFuncSlot = Js::Constants::NoProperty; Js::PropertyId firstVarSlot = Js::Constants::NoProperty; uint extraAlloc = UInt32Math::Add(slotCount, Js::ActivationObjectEx::ExtraSlotCount()); extraAlloc = UInt32Math::Mul(extraAlloc, sizeof(Js::PropertyId)); // Create and fill the array of local property ID's. // They all have slots assigned to them already (if they need them): see StartEmitFunction. Js::PropertyIdArray *propIds = funcInfo->GetParsedFunctionBody()->AllocatePropertyIdArrayForFormals(extraAlloc, slotCount, Js::ActivationObjectEx::ExtraSlotCount()); ParseNodeFnc *pnodeFnc = funcInfo->root; ParseNode *pnode; Symbol *sym; if (funcInfo->GetFuncExprNameReference() && pnodeFnc->GetFuncSymbol()->GetScope() == funcInfo->GetBodyScope()) { Symbol::SaveToPropIdArray(pnodeFnc->GetFuncSymbol(), propIds, this); } if (funcInfo->GetHasArguments()) { // Because the arguments object can access all instances of same-named formals ("function(x,x){...}"), // be sure we initialize any duplicate appearances of a formal parameter to "NoProperty". Js::PropertyId slot = 0; auto initArg = [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { Symbol *sym = pnode->AsParseNodeVar()->sym; Assert(sym); if (sym->GetScopeSlot() == slot) { // This is the last appearance of the formal, so record the ID. Symbol::SaveToPropIdArray(sym, propIds, this); } else { // This is an earlier duplicate appearance of the formal, so use NoProperty as a placeholder // since this slot can't be accessed by name. Assert(sym->GetScopeSlot() != Js::Constants::NoProperty && sym->GetScopeSlot() > slot); propIds->elements[slot] = Js::Constants::NoProperty; } } else { // This is for patterns propIds->elements[slot] = Js::Constants::NoProperty; } slot++; }; MapFormalsWithoutRest(pnodeFnc, initArg); // If the rest is in the slot - we need to keep that slot. if (pnodeFnc->pnodeRest != nullptr && pnodeFnc->pnodeRest->sym->IsInSlot(this, funcInfo)) { Symbol::SaveToPropIdArray(pnodeFnc->pnodeRest->sym, propIds, this); } } else { MapFormals(pnodeFnc, [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { Symbol::SaveToPropIdArray(pnode->AsParseNodeVar()->sym, propIds, this); } }); } auto saveFunctionVarsToPropIdArray = [&](ParseNode *pnodeFunction) { if (pnodeFunction->AsParseNodeFnc()->IsDeclaration()) { ParseNode *pnodeName = pnodeFunction->AsParseNodeFnc()->pnodeName; if (pnodeName != nullptr) { while (pnodeName->nop == knopList) { if (pnodeName->AsParseNodeBin()->pnode1->nop == knopVarDecl) { sym = pnodeName->AsParseNodeBin()->pnode1->AsParseNodeVar()->sym; if (sym) { Symbol::SaveToPropIdArray(sym, propIds, this, &firstFuncSlot); } } pnodeName = pnodeName->AsParseNodeBin()->pnode2; } if (pnodeName->nop == knopVarDecl) { sym = pnodeName->AsParseNodeVar()->sym; if (sym) { Symbol::SaveToPropIdArray(sym, propIds, this, &firstFuncSlot); cachedFuncCount++; } } } } }; MapContainerScopeFunctions(pnodeFnc, saveFunctionVarsToPropIdArray); if (currentScope->GetScopeType() != ScopeType_Parameter) { for (pnode = pnodeFnc->pnodeVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { sym = pnode->AsParseNodeVar()->sym; if (!(pnode->AsParseNodeVar()->isBlockScopeFncDeclVar && sym->GetIsBlockVar())) { if (sym->GetIsCatch() || (pnode->nop == knopVarDecl && sym->GetIsBlockVar())) { sym = currentScope->FindLocalSymbol(sym->GetName()); } Symbol::SaveToPropIdArray(sym, propIds, this, &firstVarSlot); } } ParseNodeBlock *pnodeBlock = pnodeFnc->pnodeScopes; for (pnode = pnodeBlock->pnodeLexVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { sym = pnode->AsParseNodeVar()->sym; Symbol::SaveToPropIdArray(sym, propIds, this, &firstVarSlot); } pnodeBlock = pnodeFnc->pnodeBodyScope; for (pnode = pnodeBlock->pnodeLexVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { sym = pnode->AsParseNodeVar()->sym; Symbol::SaveToPropIdArray(sym, propIds, this, &firstVarSlot); } } else { Assert(!funcInfo->IsBodyAndParamScopeMerged()); } // Write the first func slot and first var slot into the auxiliary data Js::PropertyId *slots = propIds->elements + slotCount; slots[0] = cachedFuncCount; slots[1] = firstFuncSlot; slots[2] = firstVarSlot; slots[3] = funcInfo->GetParsedFunctionBody()->NewObjectLiteral(); propIds->hasNonSimpleParams = funcInfo->root->HasNonSimpleParameterList(); funcInfo->GetParsedFunctionBody()->SetHasCachedScopePropIds(true); } void ByteCodeGenerator::SetClosureRegisters(FuncInfo* funcInfo, Js::FunctionBody* byteCodeFunction) { if (funcInfo->frameDisplayRegister != Js::Constants::NoRegister) { byteCodeFunction->MapAndSetLocalFrameDisplayRegister(funcInfo->frameDisplayRegister); } if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { byteCodeFunction->MapAndSetLocalClosureRegister(funcInfo->frameObjRegister); byteCodeFunction->SetHasScopeObject(true); } else if (funcInfo->frameSlotsRegister != Js::Constants::NoRegister) { byteCodeFunction->MapAndSetLocalClosureRegister(funcInfo->frameSlotsRegister); } if (funcInfo->paramSlotsRegister != Js::Constants::NoRegister) { byteCodeFunction->MapAndSetParamClosureRegister(funcInfo->paramSlotsRegister); } } void ByteCodeGenerator::FinalizeRegisters(FuncInfo * funcInfo, Js::FunctionBody * byteCodeFunction) { if (byteCodeFunction->IsCoroutine()) { // EmitYield uses 'false' to create the IteratorResult object funcInfo->AssignFalseConstRegister(); } if (funcInfo->NeedEnvRegister()) { bool constReg = !funcInfo->GetIsTopLevelEventHandler() && funcInfo->IsGlobalFunction() && !(this->flags & fscrEval); funcInfo->AssignEnvRegister(constReg); } // Set the function body's constant count before emitting anything so that the byte code writer // can distinguish constants from variables. byteCodeFunction->CheckAndSetConstantCount(funcInfo->constRegsCount); this->SetClosureRegisters(funcInfo, byteCodeFunction); if (this->IsInDebugMode() || byteCodeFunction->IsCoroutine()) { // Give permanent registers to the inner scopes in debug mode. // TODO: We create seperate debuggerscopes for each block which has own scope. These are stored in the var registers // allocated below. Ideally we should change this logic to not allocate separate registers for these and save the debug // info in corresponding symbols and use it from there. This will also affect the temp register allocation logic in // EmitOneFunction. uint innerScopeCount = funcInfo->InnerScopeCount(); byteCodeFunction->SetInnerScopeCount(innerScopeCount); if (innerScopeCount) { funcInfo->SetFirstInnerScopeReg(funcInfo->NextVarRegister()); for (uint i = 1; i < innerScopeCount; i++) { funcInfo->NextVarRegister(); } } } // NOTE: The FB expects the yield reg to be the final non-temp. if (byteCodeFunction->IsCoroutine()) { funcInfo->AssignYieldRegister(); } Js::RegSlot firstTmpReg = funcInfo->varRegsCount; funcInfo->SetFirstTmpReg(firstTmpReg); byteCodeFunction->SetFirstTmpReg(funcInfo->RegCount()); } void ByteCodeGenerator::InitScopeSlotArray(FuncInfo * funcInfo) { // Record slots info for ScopeSlots/ScopeObject. uint scopeSlotCount = funcInfo->bodyScope->GetScopeSlotCount(); bool isSplitScope = !funcInfo->IsBodyAndParamScopeMerged(); Assert(funcInfo->paramScope == nullptr || funcInfo->paramScope->GetScopeSlotCount() == 0 || isSplitScope); uint scopeSlotCountForParamScope = funcInfo->paramScope != nullptr ? funcInfo->paramScope->GetScopeSlotCount() : 0; if (scopeSlotCount == 0 && scopeSlotCountForParamScope == 0) { return; } Js::FunctionBody *byteCodeFunction = funcInfo->GetParsedFunctionBody(); if (scopeSlotCount > 0 || scopeSlotCountForParamScope > 0) { byteCodeFunction->SetScopeSlotArraySizes(scopeSlotCount, scopeSlotCountForParamScope); } // TODO: Need to add property ids for the case when scopeSlotCountForParamSCope is non-zero if (scopeSlotCount) { Js::PropertyId *propertyIdsForScopeSlotArray = RecyclerNewArrayLeafZ(scriptContext->GetRecycler(), Js::PropertyId, scopeSlotCount); byteCodeFunction->SetPropertyIdsForScopeSlotArray(propertyIdsForScopeSlotArray, scopeSlotCount, scopeSlotCountForParamScope); AssertMsg(!byteCodeFunction->IsReparsed() || byteCodeFunction->WasEverAsmJsMode() || byteCodeFunction->scopeSlotArraySize == scopeSlotCount, "The slot array size is different between debug and non-debug mode"); #if DEBUG for (UINT i = 0; i < scopeSlotCount; i++) { propertyIdsForScopeSlotArray[i] = Js::Constants::NoProperty; } #endif auto setPropertyIdForScopeSlotArray = [scopeSlotCount, propertyIdsForScopeSlotArray] (Js::PropertyId slot, Js::PropertyId propId) { if (slot < 0 || (uint)slot >= scopeSlotCount) { Js::Throw::FatalInternalError(); } propertyIdsForScopeSlotArray[slot] = propId; }; auto setPropIdsForScopeSlotArray = [this, funcInfo, setPropertyIdForScopeSlotArray](Symbol *const sym) { if (sym->NeedsSlotAlloc(this, funcInfo)) { // All properties should get correct propertyId here. Assert(sym->HasScopeSlot()); // We can't allocate scope slot now. Any symbol needing scope slot must have allocated it before this point. setPropertyIdForScopeSlotArray(sym->GetScopeSlot(), sym->EnsurePosition(funcInfo)); } }; funcInfo->GetBodyScope()->ForEachSymbol(setPropIdsForScopeSlotArray); #if DEBUG for (UINT i = 0; i < scopeSlotCount; i++) { Assert(propertyIdsForScopeSlotArray[i] != Js::Constants::NoProperty || funcInfo->frameObjRegister != Js::Constants::NoRegister); // ScopeObject may have unassigned entries, e.g. for same-named parameters } #endif } } // temporarily load all constants and special registers in a single block void ByteCodeGenerator::LoadAllConstants(FuncInfo *funcInfo) { Symbol *sym; Js::FunctionBody *byteCodeFunction = funcInfo->GetParsedFunctionBody(); byteCodeFunction->CreateConstantTable(); if (funcInfo->nullConstantRegister != Js::Constants::NoRegister) { byteCodeFunction->RecordNullObject(byteCodeFunction->MapRegSlot(funcInfo->nullConstantRegister)); } if (funcInfo->undefinedConstantRegister != Js::Constants::NoRegister) { byteCodeFunction->RecordUndefinedObject(byteCodeFunction->MapRegSlot(funcInfo->undefinedConstantRegister)); } if (funcInfo->trueConstantRegister != Js::Constants::NoRegister) { byteCodeFunction->RecordTrueObject(byteCodeFunction->MapRegSlot(funcInfo->trueConstantRegister)); } if (funcInfo->falseConstantRegister != Js::Constants::NoRegister) { byteCodeFunction->RecordFalseObject(byteCodeFunction->MapRegSlot(funcInfo->falseConstantRegister)); } if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { m_writer.RecordObjectRegister(funcInfo->frameObjRegister); if (!funcInfo->GetApplyEnclosesArgs()) { this->EmitScopeObjectInit(funcInfo); } #if DBG uint count = 0; funcInfo->GetBodyScope()->ForEachSymbol([&](Symbol *const sym) { if (sym->NeedsSlotAlloc(this, funcInfo)) { // All properties should get correct propertyId here. count++; } }); if (funcInfo->GetParamScope() != nullptr) { funcInfo->GetParamScope()->ForEachSymbol([&](Symbol *const sym) { if (sym->NeedsSlotAlloc(this, funcInfo)) { // All properties should get correct propertyId here. count++; } }); } // A reparse should result in the same size of the activation object. // Exclude functions which were created from the ByteCodeCache. AssertMsg(!byteCodeFunction->IsReparsed() || byteCodeFunction->HasGeneratedFromByteCodeCache() || byteCodeFunction->scopeObjectSize == count || byteCodeFunction->WasEverAsmJsMode(), "The activation object size is different between debug and non-debug mode"); byteCodeFunction->scopeObjectSize = count; #endif } else if (funcInfo->frameSlotsRegister != Js::Constants::NoRegister) { int scopeSlotCount = funcInfo->bodyScope->GetScopeSlotCount(); int paramSlotCount = funcInfo->paramScope->GetScopeSlotCount(); if (scopeSlotCount == 0 && paramSlotCount == 0) { AssertMsg(funcInfo->frameDisplayRegister != Js::Constants::NoRegister, "Why do we need scope slots?"); m_writer.Reg1(Js::OpCode::LdC_A_Null, funcInfo->frameSlotsRegister); } } if (funcInfo->funcExprScope && funcInfo->funcExprScope->GetIsObject()) { byteCodeFunction->MapAndSetFuncExprScopeRegister(funcInfo->funcExprScope->GetLocation()); byteCodeFunction->SetEnvDepth((uint16)-1); } bool thisLoadedFromParams = false; if (funcInfo->NeedEnvRegister()) { byteCodeFunction->MapAndSetEnvRegister(funcInfo->GetEnvRegister()); if (funcInfo->GetIsTopLevelEventHandler()) { if (funcInfo->GetThisSymbol()) { byteCodeFunction->MapAndSetThisRegisterForEventHandler(funcInfo->GetThisSymbol()->GetLocation()); } // The environment is the namespace hierarchy starting with "this". Assert(!funcInfo->RegIsConst(funcInfo->GetEnvRegister())); thisLoadedFromParams = true; this->InvalidateCachedOuterScopes(funcInfo); } else if (funcInfo->IsGlobalFunction() && !(this->flags & fscrEval)) { Assert(funcInfo->RegIsConst(funcInfo->GetEnvRegister())); if (funcInfo->GetIsStrictMode()) { byteCodeFunction->RecordStrictNullDisplayConstant(byteCodeFunction->MapRegSlot(funcInfo->GetEnvRegister())); } else { byteCodeFunction->RecordNullDisplayConstant(byteCodeFunction->MapRegSlot(funcInfo->GetEnvRegister())); } } else { // environment may be required to load "this" Assert(!funcInfo->RegIsConst(funcInfo->GetEnvRegister())); this->InvalidateCachedOuterScopes(funcInfo); } } if (funcInfo->frameDisplayRegister != Js::Constants::NoRegister) { m_writer.RecordFrameDisplayRegister(funcInfo->frameDisplayRegister); } this->RecordAllIntConstants(funcInfo); this->RecordAllStrConstants(funcInfo); this->RecordAllStringTemplateCallsiteConstants(funcInfo); funcInfo->doubleConstantToRegister.Map([byteCodeFunction](double d, Js::RegSlot location) { byteCodeFunction->RecordFloatConstant(byteCodeFunction->MapRegSlot(location), d); }); // WARNING !!! // DO NOT emit any bytecode before loading the heap arguments. This is because those opcodes may bail // out (unlikely, since opcodes emitted in this function should not correspond to user code, but possible) // and the Jit assumes that there cannot be any bailouts before LdHeapArguments (or its equivalent) if (funcInfo->GetHasArguments()) { sym = funcInfo->GetArgumentsSymbol(); Assert(sym); Assert(funcInfo->GetHasHeapArguments()); if (funcInfo->GetCallsEval() || (!funcInfo->GetApplyEnclosesArgs())) { this->LoadHeapArguments(funcInfo); } } else if (!funcInfo->IsGlobalFunction() && !IsInNonDebugMode()) { uint count = funcInfo->inArgsCount + (funcInfo->root->pnodeRest != nullptr ? 1 : 0) - 1; if (count != 0) { Js::PropertyIdArray *propIds = RecyclerNewPlus(scriptContext->GetRecycler(), UInt32Math::Mul(count, sizeof(Js::PropertyId)), Js::PropertyIdArray, count, 0); GetFormalArgsArray(this, funcInfo, propIds); byteCodeFunction->SetPropertyIdsOfFormals(propIds); } } // Class constructors do not have a [[call]] slot but we don't implement a generic way to express this. // What we do is emit a check for the new flag here. If we don't have CallFlags_New set, the opcode will throw. // We need to do this before emitting 'this' since the base class constructor will try to construct a new object. if (funcInfo->IsClassConstructor()) { m_writer.Empty(Js::OpCode::ChkNewCallFlag); } // new.target may be used to construct the 'this' register so make sure to load it first if (funcInfo->GetNewTargetSymbol()) { this->LoadNewTargetObject(funcInfo); } if (funcInfo->GetThisSymbol()) { this->LoadThisObject(funcInfo, thisLoadedFromParams); } else if (ShouldLoadConstThis(funcInfo)) { this->EmitThis(funcInfo, funcInfo->thisConstantRegister, funcInfo->nullConstantRegister); } if (funcInfo->GetSuperSymbol()) { this->LoadSuperObject(funcInfo); } if (funcInfo->GetSuperConstructorSymbol()) { this->LoadSuperConstructorObject(funcInfo); } // // If the function is a function expression with a name, // load the function object at runtime to its activation object. // sym = funcInfo->root->GetFuncSymbol(); bool funcExprWithName = !funcInfo->IsGlobalFunction() && sym && sym->GetIsFuncExpr(); if (funcExprWithName) { if (funcInfo->GetFuncExprNameReference() || (funcInfo->funcExprScope && funcInfo->funcExprScope->GetIsObject())) { // // x = function f(...) { ... } // A named function expression's name (Symbol:f) belongs to the enclosing scope. // Thus there are no uses of 'f' within the scope of the function (as references to 'f' // are looked up in the closure). So, we can't use f's register as it is from the enclosing // scope's register namespace. So use a tmp register. // In ES5 mode though 'f' is *not* a part of the enclosing scope. So we always assign 'f' a register // from it's register namespace, which LdFuncExpr can use. // Js::RegSlot ldFuncExprDst = sym->GetLocation(); this->m_writer.Reg1(Js::OpCode::LdFuncExpr, ldFuncExprDst); if (sym->IsInSlot(this, funcInfo)) { Js::RegSlot scopeLocation; AnalysisAssert(funcInfo->funcExprScope); if (funcInfo->funcExprScope->GetIsObject()) { scopeLocation = funcInfo->funcExprScope->GetLocation(); this->m_writer.Property(Js::OpCode::StFuncExpr, sym->GetLocation(), scopeLocation, funcInfo->FindOrAddReferencedPropertyId(sym->GetPosition())); } else if (funcInfo->paramScope->GetIsObject() || (funcInfo->paramScope->GetCanMerge() && funcInfo->bodyScope->GetIsObject())) { this->m_writer.ElementU(Js::OpCode::StLocalFuncExpr, sym->GetLocation(), funcInfo->FindOrAddReferencedPropertyId(sym->GetPosition())); } else { Assert(sym->HasScopeSlot()); this->m_writer.SlotI1(Js::OpCode::StLocalSlot, sym->GetLocation(), sym->GetScopeSlot() + Js::ScopeSlots::FirstSlotIndex); } } else if (ShouldTrackDebuggerMetadata()) { funcInfo->byteCodeFunction->GetFunctionBody()->InsertSymbolToRegSlotList(sym->GetName(), sym->GetLocation(), funcInfo->varRegsCount); } } } } void ByteCodeGenerator::InvalidateCachedOuterScopes(FuncInfo *funcInfo) { Assert(funcInfo->GetEnvRegister() != Js::Constants::NoRegister); // Walk the scope stack, from funcInfo outward, looking for scopes that have been cached. Scope *scope = funcInfo->GetBodyScope()->GetEnclosingScope(); uint32 envIndex = 0; while (scope && scope->GetFunc() == funcInfo) { // Skip over FuncExpr Scope and parameter scope for current funcInfo to get to the first enclosing scope of the outer function. scope = scope->GetEnclosingScope(); } for (; scope; scope = scope->GetEnclosingScope()) { FuncInfo *func = scope->GetFunc(); if (scope == func->GetBodyScope()) { if (func->Escapes() && func->GetHasCachedScope()) { AssertOrFailFast(scope->GetIsObject()); this->m_writer.Unsigned1(Js::OpCode::InvalCachedScope, envIndex); } } if (scope->GetMustInstantiate()) { envIndex++; } } } void ByteCodeGenerator::LoadThisObject(FuncInfo *funcInfo, bool thisLoadedFromParams) { Symbol* thisSym = funcInfo->GetThisSymbol(); Assert(thisSym); Assert(!funcInfo->IsLambda()); if (this->scriptContext->GetConfig()->IsES6ClassAndExtendsEnabled() && funcInfo->IsClassConstructor()) { // Derived class constructors initialize 'this' to be Undecl // - we'll check this value during a super call and during 'this' access // // Base class constructors initialize 'this' to a new object using new.target if (funcInfo->IsBaseClassConstructor()) { Symbol* newTargetSym = funcInfo->GetNewTargetSymbol(); Assert(newTargetSym); this->Writer()->Reg2(Js::OpCode::NewScObjectNoCtorFull, thisSym->GetLocation(), newTargetSym->GetLocation()); } else { this->m_writer.Reg1(Js::OpCode::InitUndecl, thisSym->GetLocation()); } } else if (!funcInfo->IsGlobalFunction()) { // // thisLoadedFromParams would be true for the event Handler case, // "this" would have been loaded from parameters to put in the environment // if (!thisLoadedFromParams) { Js::RegSlot tmpReg = funcInfo->AcquireTmpRegister(); m_writer.ArgIn0(tmpReg); EmitThis(funcInfo, thisSym->GetLocation(), tmpReg); funcInfo->ReleaseTmpRegister(tmpReg); } else { EmitThis(funcInfo, thisSym->GetLocation(), thisSym->GetLocation()); } } else { Assert(funcInfo->IsGlobalFunction()); Js::RegSlot root = funcInfo->nullConstantRegister; EmitThis(funcInfo, thisSym->GetLocation(), root); } } void ByteCodeGenerator::LoadNewTargetObject(FuncInfo *funcInfo) { Symbol* newTargetSym = funcInfo->GetNewTargetSymbol(); Assert(newTargetSym); if (funcInfo->IsClassConstructor()) { Assert(!funcInfo->IsLambda()); m_writer.ArgIn0(newTargetSym->GetLocation()); } else if (funcInfo->IsGlobalFunction()) { m_writer.Reg1(Js::OpCode::LdUndef, newTargetSym->GetLocation()); } else { m_writer.Reg1(Js::OpCode::LdNewTarget, newTargetSym->GetLocation()); } } void ByteCodeGenerator::LoadSuperConstructorObject(FuncInfo *funcInfo) { Symbol* superConstructorSym = funcInfo->GetSuperConstructorSymbol(); Assert(superConstructorSym); Assert(!funcInfo->IsLambda()); if (funcInfo->IsDerivedClassConstructor()) { m_writer.Reg1(Js::OpCode::LdFuncObj, superConstructorSym->GetLocation()); } else { m_writer.Reg1(Js::OpCode::LdUndef, superConstructorSym->GetLocation()); } } void ByteCodeGenerator::LoadSuperObject(FuncInfo *funcInfo) { Symbol* superSym = funcInfo->GetSuperSymbol(); Assert(superSym); Assert(!funcInfo->IsLambda()); m_writer.Reg1(Js::OpCode::LdHomeObj, superSym->GetLocation()); } void ByteCodeGenerator::EmitSuperCall(FuncInfo* funcInfo, ParseNodeSuperCall * pnodeSuperCall, BOOL fReturnValue) { FuncInfo* nonLambdaFunc = funcInfo; bool isResultUsed = pnodeSuperCall->isUsed; if (funcInfo->IsLambda()) { nonLambdaFunc = this->FindEnclosingNonLambda(); } if (nonLambdaFunc->IsBaseClassConstructor()) { // super() is not allowed in base class constructors. If we detect this, emit a ReferenceError and skip making the call. this->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_ClassSuperInBaseClass)); return; } pnodeSuperCall->isUsed = true; // pnode->location refers to two things: the result of the inner function call (`temp` in the pseudocode below), // and the result of the super() expression itself funcInfo->AcquireLoc(pnodeSuperCall); // We need to emit 'this' directly so we can skip throwing a reference error if 'this' is currently undecl (we want to get undecl if 'this' is undecl) funcInfo->AcquireLoc(pnodeSuperCall->pnodeThis); EmitPropLoadThis(pnodeSuperCall->pnodeThis->location, pnodeSuperCall->pnodeThis, funcInfo, false); EmitLoad(pnodeSuperCall->pnodeNewTarget, this, funcInfo); Assert(pnodeSuperCall->isSuperCall); EmitLoad(pnodeSuperCall->pnodeTarget, this, funcInfo); // // if (super is class constructor) { // _this = new.target; // } else { // _this = NewScObjFull(new.target); // } // // temp = super.call(_this, new.target); // CallFlag_New | CallFlag_NewTarget | CallFlag_ExtraArg // if (temp is object) { // _this = temp; // } // // if (UndeclBlockVar === this) { // this = _this; // } else { // throw ReferenceError; // } // Js::RegSlot thisForSuperCall = funcInfo->AcquireTmpRegister(); Js::RegSlot valueForThis = funcInfo->AcquireTmpRegister(); Js::RegSlot tmpUndeclReg = funcInfo->AcquireTmpRegister(); Js::ByteCodeLabel useNewTargetForThisLabel = this->Writer()->DefineLabel(); Js::ByteCodeLabel makeCallLabel = this->Writer()->DefineLabel(); Js::ByteCodeLabel useSuperCallResultLabel = this->Writer()->DefineLabel(); Js::ByteCodeLabel doneLabel = this->Writer()->DefineLabel(); Js::RegSlot tmpReg = this->EmitLdObjProto(Js::OpCode::LdFuncObjProto, pnodeSuperCall->pnodeTarget->location, funcInfo); this->Writer()->BrReg1(Js::OpCode::BrOnClassConstructor, useNewTargetForThisLabel, tmpReg); this->Writer()->Reg2(Js::OpCode::NewScObjectNoCtorFull, thisForSuperCall, pnodeSuperCall->pnodeNewTarget->location); this->Writer()->Br(Js::OpCode::Br, makeCallLabel); this->Writer()->MarkLabel(useNewTargetForThisLabel); this->Writer()->Reg2(Js::OpCode::Ld_A, thisForSuperCall, pnodeSuperCall->pnodeNewTarget->location); this->Writer()->MarkLabel(makeCallLabel); EmitCall(pnodeSuperCall, this, funcInfo, fReturnValue, /*fEvaluateComponents*/ true, thisForSuperCall, pnodeSuperCall->pnodeNewTarget->location); // We have to use another temp for the this value before assigning to this register. // This is because IRBuilder does not expect us to use the value of a temp after potentially assigning to that same temp. // Ex: // _this = new.target; // temp = super.call(_this); // if (temp is object) { // _this = temp; // creates a new sym for _this as it was previously used // } // this = _this; // tries to loads a value from the old sym (which is dead) this->Writer()->BrReg1(Js::OpCode::BrOnObject_A, useSuperCallResultLabel, pnodeSuperCall->location); this->Writer()->Reg2(Js::OpCode::Ld_A, valueForThis, thisForSuperCall); this->Writer()->Br(Js::OpCode::Br, doneLabel); this->Writer()->MarkLabel(useSuperCallResultLabel); this->Writer()->Reg2(Js::OpCode::Ld_A, valueForThis, pnodeSuperCall->location); this->Writer()->MarkLabel(doneLabel); // The call is done and we know what we will bind to 'this' so let's check to see if 'this' is already decl. Js::ByteCodeLabel skipLabel = this->Writer()->DefineLabel(); this->Writer()->Reg1(Js::OpCode::InitUndecl, tmpUndeclReg); this->Writer()->BrReg2(Js::OpCode::BrSrEq_A, skipLabel, pnodeSuperCall->pnodeThis->location, tmpUndeclReg); this->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_ClassThisAlreadyAssigned)); this->Writer()->MarkLabel(skipLabel); // If calling code cares about the return value, then move the selected `this` value into the result register. if (isResultUsed) { this->Writer()->Reg2(Js::OpCode::Ld_A, pnodeSuperCall->location, valueForThis); } Symbol* thisSym = pnodeSuperCall->pnodeThis->sym; this->Writer()->Reg2(Js::OpCode::StrictLdThis, pnodeSuperCall->pnodeThis->location, valueForThis); EmitPropStoreForSpecialSymbol(pnodeSuperCall->pnodeThis->location, thisSym, pnodeSuperCall->pnodeThis->pid, funcInfo, false); funcInfo->ReleaseTmpRegister(tmpUndeclReg); funcInfo->ReleaseTmpRegister(valueForThis); funcInfo->ReleaseTmpRegister(thisForSuperCall); funcInfo->ReleaseLoc(pnodeSuperCall->pnodeTarget); funcInfo->ReleaseLoc(pnodeSuperCall->pnodeNewTarget); funcInfo->ReleaseLoc(pnodeSuperCall->pnodeThis); } void ByteCodeGenerator::EmitClassConstructorEndCode(FuncInfo *funcInfo) { Symbol* thisSym = funcInfo->GetThisSymbol(); if (thisSym && thisSym->GetLocation() != Js::Constants::NoRegister) { EmitPropLoad(ByteCodeGenerator::ReturnRegister, thisSym, thisSym->GetPid(), funcInfo, true); this->m_writer.Reg1(Js::OpCode::ChkUndecl, ByteCodeGenerator::ReturnRegister); } } void ByteCodeGenerator::EmitThis(FuncInfo *funcInfo, Js::RegSlot lhsLocation, Js::RegSlot fromRegister) { if (funcInfo->byteCodeFunction->GetIsStrictMode() && !funcInfo->IsGlobalFunction() && !funcInfo->IsLambda()) { m_writer.Reg2(Js::OpCode::StrictLdThis, lhsLocation, fromRegister); } else { m_writer.Reg2Int1(Js::OpCode::LdThis, lhsLocation, fromRegister, this->GetModuleID()); } } void ByteCodeGenerator::EmitLoadFormalIntoRegister(ParseNode *pnodeFormal, Js::RegSlot pos, FuncInfo *funcInfo) { if (pnodeFormal->IsVarLetOrConst()) { // Get the param from its argument position into its assigned register. // The position should match the location, otherwise, it has been shadowed by parameter with the same name Symbol *formal = pnodeFormal->AsParseNodeVar()->sym; if (formal->GetLocation() + 1 == pos) { // Transfer to the frame object, etc., if necessary. this->EmitLocalPropInit(formal->GetLocation(), formal, funcInfo); } } } void ByteCodeGenerator::HomeArguments(FuncInfo *funcInfo) { if (ShouldTrackDebuggerMetadata()) { // Add formals to the debugger propertyidcontainer for reg slots auto addFormalsToPropertyIdContainer = [this, funcInfo](ParseNode *pnodeFormal) { if (pnodeFormal->IsVarLetOrConst()) { Symbol* formal = pnodeFormal->AsParseNodeVar()->sym; if (!formal->IsInSlot(this, funcInfo)) { Assert(!formal->GetHasInit()); funcInfo->GetParsedFunctionBody()->InsertSymbolToRegSlotList(formal->GetName(), formal->GetLocation(), funcInfo->varRegsCount); } } }; MapFormals(funcInfo->root, addFormalsToPropertyIdContainer); } // Transfer formal parameters to their home locations on the local frame. if (funcInfo->GetHasArguments()) { if (funcInfo->root->pnodeRest != nullptr) { // Since we don't have to iterate over arguments here, we'll trust the location to be correct. EmitLoadFormalIntoRegister(funcInfo->root->pnodeRest, funcInfo->root->pnodeRest->sym->GetLocation() + 1, funcInfo); } // The arguments object creation helper does this work for us. return; } Js::ArgSlot pos = 1; auto loadFormal = [&](ParseNode *pnodeFormal) { EmitLoadFormalIntoRegister(pnodeFormal, pos, funcInfo); pos++; }; MapFormals(funcInfo->root, loadFormal); } void ByteCodeGenerator::DefineLabels(FuncInfo *funcInfo) { funcInfo->singleExit = m_writer.DefineLabel(); SList<ParseNodeStmt *>::Iterator iter(&funcInfo->targetStatements); while (iter.Next()) { ParseNodeStmt * node = iter.Data(); node->breakLabel = m_writer.DefineLabel(); node->continueLabel = m_writer.DefineLabel(); node->emitLabels = true; } } void ByteCodeGenerator::EmitGlobalBody(FuncInfo *funcInfo) { // Emit global code (global scope or eval), fixing up the return register with the implicit // return value. ParseNode *pnode = funcInfo->root->pnodeBody; ParseNode *pnodeLastVal = funcInfo->root->AsParseNodeProg()->pnodeLastValStmt; if (pnodeLastVal == nullptr || pnodeLastVal->IsPatternDeclaration()) { // We're not guaranteed to compute any values, so fix up the return register at the top // in case. this->m_writer.Reg1(Js::OpCode::LdUndef, ReturnRegister); } while (pnode->nop == knopList) { ParseNode *stmt = pnode->AsParseNodeBin()->pnode1; if (stmt == pnodeLastVal) { pnodeLastVal = nullptr; } if (pnodeLastVal == nullptr && (this->flags & fscrReturnExpression)) { EmitTopLevelStatement(stmt, funcInfo, true); } else { // Haven't hit the post-dominating return value yet, // so don't bother with the return register. EmitTopLevelStatement(stmt, funcInfo, false); } pnode = pnode->AsParseNodeBin()->pnode2; } EmitTopLevelStatement(pnode, funcInfo, false); } void ByteCodeGenerator::EmitFunctionBody(FuncInfo *funcInfo) { // Emit a function body. Only explicit returns and the implicit "undef" at the bottom // get copied to the return register. ParseNode *pnodeBody = funcInfo->root->pnodeBody; ParseNode *pnode = pnodeBody; while (pnode->nop == knopList) { ParseNode *stmt = pnode->AsParseNodeBin()->pnode1; if (stmt->CapturesSyms()) { CapturedSymMap *map = funcInfo->EnsureCapturedSymMap(); SList<Symbol*> *list = map->Item(stmt); FOREACH_SLIST_ENTRY(Symbol*, sym, list) { if (!sym->GetIsCommittedToSlot()) { Assert(sym->GetLocation() != Js::Constants::NoProperty); sym->SetIsCommittedToSlot(); ParseNode *decl = sym->GetDecl(); Assert(decl); if (PHASE_TRACE(Js::DelayCapturePhase, funcInfo->byteCodeFunction)) { Output::Print(_u("--- DelayCapture: Committed symbol '%s' to slot.\n"), sym->GetName().GetBuffer()); Output::Flush(); } // REVIEW[ianhall]: HACK to work around this causing an error due to sym not yet being initialized // what is this doing? Why are we assigning sym to itself? bool old = sym->GetNeedDeclaration(); sym->SetNeedDeclaration(false); this->EmitPropStore(sym->GetLocation(), sym, sym->GetPid(), funcInfo, decl->nop == knopLetDecl, decl->nop == knopConstDecl); sym->SetNeedDeclaration(old); } } NEXT_SLIST_ENTRY; } EmitTopLevelStatement(stmt, funcInfo, false); pnode = pnode->AsParseNodeBin()->pnode2; } Assert(!pnode->CapturesSyms()); EmitTopLevelStatement(pnode, funcInfo, false); } void ByteCodeGenerator::EmitProgram(ParseNodeProg *pnodeProg) { // Indicate that the binding phase is over. this->isBinding = false; this->trackEnvDepth = true; AssignPropertyIds(pnodeProg->funcInfo->byteCodeFunction); int32 initSize = this->maxAstSize / AstBytecodeRatioEstimate; // Use the temp allocator in bytecode write temp buffer. m_writer.InitData(this->alloc, initSize); #ifdef LOG_BYTECODE_AST_RATIO // log the max Ast size Output::Print(_u("Max Ast size: %d"), initSize); #endif Assert(pnodeProg && pnodeProg->nop == knopProg); if (this->parentScopeInfo) { // Scope stack is already set up the way we want it, so don't visit the global scope. // Start emitting with the nested scope (i.e., the deferred function). this->EmitScopeList(pnodeProg->pnodeScopes); } else { this->EmitScopeList(pnodeProg); } } void EmitDestructuredObject(ParseNode *lhs, Js::RegSlot rhsLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitDestructuredValueOrInitializer(ParseNodePtr lhsElementNode, Js::RegSlot rhsLocation, ParseNodePtr initializer, bool isNonPatternAssignmentTarget, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void ByteCodeGenerator::PopulateFormalsScope(uint beginOffset, FuncInfo *funcInfo, ParseNodeFnc *pnodeFnc) { Js::DebuggerScope *debuggerScope = nullptr; auto processArg = [&](ParseNode *pnodeArg) { if (pnodeArg->IsVarLetOrConst()) { if (debuggerScope == nullptr) { debuggerScope = RecordStartScopeObject(pnodeFnc, funcInfo->paramScope && funcInfo->paramScope->GetIsObject() ? Js::DiagParamScopeInObject : Js::DiagParamScope); debuggerScope->SetBegin(beginOffset); } InsertPropertyToDebuggerScope(funcInfo, debuggerScope, pnodeArg->AsParseNodeVar()->sym); } }; MapFormals(pnodeFnc, processArg); MapFormalsFromPattern(pnodeFnc, processArg); if (debuggerScope != nullptr) { if (!funcInfo->GetParsedFunctionBody()->IsParamAndBodyScopeMerged()) { InsertPropertyToDebuggerScope(funcInfo, debuggerScope, funcInfo->GetArgumentsSymbol()); } RecordEndScopeObject(pnodeFnc); } } void ByteCodeGenerator::InsertPropertyToDebuggerScope(FuncInfo* funcInfo, Js::DebuggerScope* debuggerScope, Symbol* sym) { if (sym) { Js::FunctionBody* funcBody = funcInfo->GetParsedFunctionBody(); Js::DebuggerScopePropertyFlags flag = Js::DebuggerScopePropertyFlags_None; Js::RegSlot location = sym->GetLocation(); if (ShouldTrackDebuggerMetadata() && !funcInfo->IsBodyAndParamScopeMerged() && funcInfo->bodyScope->FindLocalSymbol(sym->GetName()) != nullptr) { flag |= Js::DebuggerScopePropertyFlags_HasDuplicateInBody; location = funcBody->MapRegSlot(location); } debuggerScope->AddProperty(location, sym->EnsurePosition(funcInfo), flag); } } void ByteCodeGenerator::EmitDefaultArgs(FuncInfo *funcInfo, ParseNodeFnc *pnodeFnc) { uint beginOffset = m_writer.GetCurrentOffset(); auto emitDefaultArg = [&](ParseNode *pnodeArg) { if (pnodeArg->nop == knopParamPattern) { this->StartStatement(pnodeArg); Assert(pnodeArg->AsParseNodeParamPattern()->location != Js::Constants::NoRegister); ParseNodePtr pnode1 = pnodeArg->AsParseNodeParamPattern()->pnode1; if (pnode1->IsPattern()) { EmitAssignment(nullptr, pnode1, pnodeArg->AsParseNodeParamPattern()->location, this, funcInfo); } else { Assert(pnode1->nop == knopAsg); Assert(pnode1->AsParseNodeBin()->pnode1->IsPattern()); EmitDestructuredValueOrInitializer(pnode1->AsParseNodeBin()->pnode1, pnodeArg->AsParseNodeParamPattern()->location, pnode1->AsParseNodeBin()->pnode2, false /*isNonPatternAssignmentTarget*/, this, funcInfo); } this->EndStatement(pnodeArg); return; } else if (pnodeArg->IsVarLetOrConst()) { Js::RegSlot location = pnodeArg->AsParseNodeVar()->sym->GetLocation(); if (pnodeArg->AsParseNodeVar()->pnodeInit == nullptr) { // Since the formal hasn't been initialized in LdLetHeapArguments, we'll initialize it here. pnodeArg->AsParseNodeVar()->sym->SetNeedDeclaration(false); EmitPropStore(location, pnodeArg->AsParseNodeVar()->sym, pnodeArg->AsParseNodeVar()->pid, funcInfo, true); return; } // Load the default argument if we got undefined, skip RHS evaluation otherwise. Js::ByteCodeLabel noDefaultLabel = this->m_writer.DefineLabel(); Js::ByteCodeLabel endLabel = this->m_writer.DefineLabel(); this->StartStatement(pnodeArg); // Let us use strict not equal to differentiate between null and undefined m_writer.BrReg2(Js::OpCode::BrSrNeq_A, noDefaultLabel, location, funcInfo->undefinedConstantRegister); Emit(pnodeArg->AsParseNodeVar()->pnodeInit, this, funcInfo, false); pnodeArg->AsParseNodeVar()->sym->SetNeedDeclaration(false); // After emit to prevent foo(a = a) if (funcInfo->GetHasArguments() && pnodeArg->AsParseNodeVar()->sym->IsInSlot(this, funcInfo)) { EmitPropStore(pnodeArg->AsParseNodeVar()->pnodeInit->location, pnodeArg->AsParseNodeVar()->sym, pnodeArg->AsParseNodeVar()->pid, funcInfo, true); m_writer.Br(endLabel); } else { EmitAssignment(nullptr, pnodeArg, pnodeArg->AsParseNodeVar()->pnodeInit->location, this, funcInfo); } funcInfo->ReleaseLoc(pnodeArg->AsParseNodeVar()->pnodeInit); m_writer.MarkLabel(noDefaultLabel); if (funcInfo->GetHasArguments() && pnodeArg->AsParseNodeVar()->sym->IsInSlot(this, funcInfo)) { EmitPropStore(location, pnodeArg->AsParseNodeVar()->sym, pnodeArg->AsParseNodeVar()->pid, funcInfo, true); m_writer.MarkLabel(endLabel); } this->EndStatement(pnodeArg); } }; // If the function is async, we wrap the default arguments in a try catch and reject a Promise in case of error. if (pnodeFnc->IsAsync()) { uint cacheId; Js::ByteCodeLabel catchLabel = m_writer.DefineLabel(); Js::ByteCodeLabel doneLabel = m_writer.DefineLabel(); Js::RegSlot catchArgLocation = funcInfo->AcquireTmpRegister(); Js::RegSlot promiseLocation = funcInfo->AcquireTmpRegister(); Js::RegSlot rejectLocation = funcInfo->AcquireTmpRegister(); // try m_writer.RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ true); m_writer.Br(Js::OpCode::TryCatch, catchLabel); // Rest cannot have a default argument, so we ignore it. MapFormalsWithoutRest(pnodeFnc, emitDefaultArg); m_writer.RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ false); m_writer.Empty(Js::OpCode::Leave); m_writer.Br(doneLabel); // catch m_writer.MarkLabel(catchLabel); m_writer.Reg1(Js::OpCode::Catch, catchArgLocation); m_writer.RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ true); m_writer.Empty(Js::OpCode::Nop); // return Promise.reject(error); cacheId = funcInfo->FindOrAddRootObjectInlineCacheId(Js::PropertyIds::Promise, false, false); m_writer.PatchableRootProperty(Js::OpCode::LdRootFld, promiseLocation, cacheId, false, false); EmitInvoke(rejectLocation, promiseLocation, Js::PropertyIds::reject, this, funcInfo, catchArgLocation); m_writer.Reg2(Js::OpCode::Ld_A, ByteCodeGenerator::ReturnRegister, rejectLocation); m_writer.RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ false); m_writer.Empty(Js::OpCode::Leave); m_writer.Br(funcInfo->singleExit); m_writer.Empty(Js::OpCode::Leave); m_writer.MarkLabel(doneLabel); this->SetHasTry(true); funcInfo->ReleaseTmpRegister(rejectLocation); funcInfo->ReleaseTmpRegister(promiseLocation); funcInfo->ReleaseTmpRegister(catchArgLocation); } else { // Rest cannot have a default argument, so we ignore it. MapFormalsWithoutRest(pnodeFnc, emitDefaultArg); } if (m_writer.GetCurrentOffset() > beginOffset) { PopulateFormalsScope(beginOffset, funcInfo, pnodeFnc); } } void ByteCodeGenerator::EmitOneFunction(ParseNodeFnc *pnodeFnc) { Assert(pnodeFnc && (pnodeFnc->nop == knopProg || pnodeFnc->nop == knopFncDecl)); FuncInfo *funcInfo = pnodeFnc->funcInfo; Assert(funcInfo != nullptr); if (funcInfo->IsFakeGlobalFunction(this->flags)) { return; } Js::ParseableFunctionInfo* deferParseFunction = funcInfo->byteCodeFunction; deferParseFunction->SetGrfscr(deferParseFunction->GetGrfscr() | (this->flags & ~fscrDeferredFncExpression)); deferParseFunction->SetSourceInfo(this->GetCurrentSourceIndex(), funcInfo->root, !!(this->flags & fscrEvalCode), ((this->flags & fscrDynamicCode) && !(this->flags & fscrEvalCode))); deferParseFunction->SetInParamsCount(funcInfo->inArgsCount); if (pnodeFnc->HasDefaultArguments()) { deferParseFunction->SetReportedInParamsCount(pnodeFnc->firstDefaultArg + 1); } else { deferParseFunction->SetReportedInParamsCount(funcInfo->inArgsCount); } // Note: Don't check the actual attributes on the functionInfo here, since CanDefer has been cleared while // we're generating byte code. if (deferParseFunction->IsDeferred() || funcInfo->canDefer) { Js::ScopeInfo::SaveEnclosingScopeInfo(this, funcInfo); } if (funcInfo->root->pnodeBody == nullptr) { if (!PHASE_OFF1(Js::SkipNestedDeferredPhase) && (this->GetFlags() & fscrCreateParserState) == fscrCreateParserState && deferParseFunction->GetCompileCount() == 0) { deferParseFunction->BuildDeferredStubs(funcInfo->root); } Assert(!deferParseFunction->IsFunctionBody() || deferParseFunction->GetFunctionBody()->GetByteCode() != nullptr); return; } Js::FunctionBody* byteCodeFunction = funcInfo->GetParsedFunctionBody(); try { if (!funcInfo->IsGlobalFunction()) { // Note: Do not set the stack nested func flag if the function has been redeferred and recompiled. // In that case the flag already has the value we want. if (CanStackNestedFunc(funcInfo, true) && byteCodeFunction->GetCompileCount() == 0) { #if DBG byteCodeFunction->SetCanDoStackNestedFunc(); #endif if (funcInfo->root->astSize <= ParseNodeFnc::MaxStackClosureAST) { byteCodeFunction->SetStackNestedFunc(true); } } } if (byteCodeFunction->DoStackNestedFunc()) { uint nestedCount = byteCodeFunction->GetNestedCount(); for (uint i = 0; i < nestedCount; i++) { Js::FunctionProxy * nested = byteCodeFunction->GetNestedFunctionProxy(i); if (nested->IsFunctionBody()) { nested->GetFunctionBody()->SetStackNestedFuncParent(byteCodeFunction->GetFunctionInfo()); } } } if (byteCodeFunction->GetByteCode() != nullptr) { // Previously compiled function nested within a re-deferred and re-compiled function. return; } // Bug : 301517 // In the debug mode the hasOnlyThis optimization needs to be disabled, since user can break in this function // and do operation on 'this' and its property, which may not be defined yet. if (funcInfo->root->HasOnlyThisStmts() && !IsInDebugMode()) { byteCodeFunction->SetHasOnlyThisStmts(true); } if (byteCodeFunction->IsInlineApplyDisabled() || this->scriptContext->GetConfig()->IsNoNative()) { if ((pnodeFnc->nop == knopFncDecl) && (funcInfo->GetHasHeapArguments()) && (!funcInfo->GetCallsEval()) && ApplyEnclosesArgs(pnodeFnc, this)) { bool applyEnclosesArgs = true; for (ParseNode* pnodeVar = funcInfo->root->pnodeVars; pnodeVar; pnodeVar = pnodeVar->AsParseNodeVar()->pnodeNext) { Symbol* sym = pnodeVar->AsParseNodeVar()->sym; if (sym->GetSymbolType() == STVariable && !sym->IsArguments()) { applyEnclosesArgs = false; break; } } auto constAndLetCheck = [](ParseNodeBlock *pnodeBlock, bool *applyEnclosesArgs) { if (*applyEnclosesArgs) { for (auto lexvar = pnodeBlock->pnodeLexVars; lexvar; lexvar = lexvar->AsParseNodeVar()->pnodeNext) { Symbol* sym = lexvar->AsParseNodeVar()->sym; if (sym->GetSymbolType() == STVariable && !sym->IsArguments()) { *applyEnclosesArgs = false; break; } } } }; constAndLetCheck(funcInfo->root->pnodeScopes, &applyEnclosesArgs); constAndLetCheck(funcInfo->root->pnodeBodyScope, &applyEnclosesArgs); funcInfo->SetApplyEnclosesArgs(applyEnclosesArgs); } } InitScopeSlotArray(funcInfo); FinalizeRegisters(funcInfo, byteCodeFunction); DebugOnly(Js::RegSlot firstTmpReg = funcInfo->varRegsCount); // Reserve temp registers for the inner scopes. We prefer temps because the JIT will then renumber them // and see different lifetimes. (Note that debug mode requires permanent registers. See FinalizeRegisters.) // Need to revisit the condition when enabling JitES6Generators. uint innerScopeCount = funcInfo->InnerScopeCount(); if (!this->IsInDebugMode() && !byteCodeFunction->IsCoroutine()) { byteCodeFunction->SetInnerScopeCount(innerScopeCount); if (innerScopeCount) { funcInfo->SetFirstInnerScopeReg(funcInfo->AcquireTmpRegister()); for (uint i = 1; i < innerScopeCount; i++) { funcInfo->AcquireTmpRegister(); } } } funcInfo->inlineCacheMap = Anew(alloc, FuncInfo::InlineCacheMap, alloc, funcInfo->RegCount() // Pass the actual register count. // TODO: Check if we can reduce this count ); funcInfo->rootObjectLoadInlineCacheMap = Anew(alloc, FuncInfo::RootObjectInlineCacheIdMap, alloc, 10); funcInfo->rootObjectLoadMethodInlineCacheMap = Anew(alloc, FuncInfo::RootObjectInlineCacheIdMap, alloc, 10); funcInfo->rootObjectStoreInlineCacheMap = Anew(alloc, FuncInfo::RootObjectInlineCacheIdMap, alloc, 10); funcInfo->referencedPropertyIdToMapIndex = Anew(alloc, FuncInfo::RootObjectInlineCacheIdMap, alloc, 10); byteCodeFunction->AllocateLiteralRegexArray(); m_callSiteId = 0; m_writer.Begin(byteCodeFunction, alloc, this->DoJitLoopBodies(funcInfo), funcInfo->hasLoop, this->IsInDebugMode()); this->PushFuncInfo(_u("EmitOneFunction"), funcInfo); this->inPrologue = true; Scope* paramScope = funcInfo->GetParamScope(); Scope* bodyScope = funcInfo->GetBodyScope(); // For now, emit all constant loads at top of function (should instead put in closest dominator of uses). LoadAllConstants(funcInfo); HomeArguments(funcInfo); if (!funcInfo->IsBodyAndParamScopeMerged()) { byteCodeFunction->SetParamAndBodyScopeNotMerged(); // Pop the body scope before emitting the default args PopScope(); Assert(this->GetCurrentScope() == paramScope); } if (funcInfo->root->pnodeRest != nullptr) { byteCodeFunction->SetHasRestParameter(); } if (funcInfo->IsGlobalFunction()) { EnsureNoRedeclarations(pnodeFnc->pnodeScopes, funcInfo); } ::BeginEmitBlock(pnodeFnc->pnodeScopes, this, funcInfo); DefineLabels(funcInfo); // We need to emit the storage for special symbols before we emit the default arguments in case the default // argument expressions reference those special names. if (pnodeFnc->HasNonSimpleParameterList()) { // If the param and body scope are merged, the special symbol vars are located in the body scope so we // need to walk over the var list. if (funcInfo->IsBodyAndParamScopeMerged()) { for (ParseNodePtr pnodeVar = pnodeFnc->pnodeVars; pnodeVar; pnodeVar = pnodeVar->AsParseNodeVar()->pnodeNext) { #if DBG bool reachedEndOfSpecialSymbols = false; #endif Symbol* sym = pnodeVar->AsParseNodeVar()->sym; if (sym != nullptr && sym->IsSpecialSymbol()) { EmitPropStoreForSpecialSymbol(sym->GetLocation(), sym, sym->GetPid(), funcInfo, true); if (ShouldTrackDebuggerMetadata() && !sym->IsInSlot(this, funcInfo)) { byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), sym->GetLocation(), funcInfo->varRegsCount); } } else { #if DBG reachedEndOfSpecialSymbols = true; #else // All of the special symbols exist at the beginning of the var list (parser guarantees this and debug build asserts this) // so we can quit walking at the first non-special one we see. break; #endif } #if DBG if (reachedEndOfSpecialSymbols) { Assert(sym == nullptr || !sym->IsSpecialSymbol()); } #endif } } else { paramScope->ForEachSymbol([&](Symbol* sym) { if (sym && sym->IsSpecialSymbol()) { EmitPropStoreForSpecialSymbol(sym->GetLocation(), sym, sym->GetPid(), funcInfo, true); } }); } } if (pnodeFnc->HasNonSimpleParameterList() || !funcInfo->IsBodyAndParamScopeMerged()) { Assert(pnodeFnc->HasNonSimpleParameterList() || CONFIG_FLAG(ForceSplitScope)); this->InitBlockScopedNonTemps(funcInfo->root->pnodeScopes, funcInfo); EmitDefaultArgs(funcInfo, pnodeFnc); if (!funcInfo->IsBodyAndParamScopeMerged()) { Assert(this->GetCurrentScope() == paramScope); // Push the body scope PushScope(bodyScope); funcInfo->SetCurrentChildScope(bodyScope); // Mark the beginning of the body scope so that new scope slots can be created. this->Writer()->Empty(Js::OpCode::BeginBodyScope); } } // If the function has non simple parameter list, the params needs to be evaluated when the generator object is created // (that is when the function is called). This yield opcode is to mark the begining of the function body. // TODO: Inserting a yield should have almost no impact on perf as it is a direct return from the function. But this needs // to be verified. Ideally if the function has simple parameter list then we can avoid inserting the opcode and the additional call. if (pnodeFnc->IsGenerator()) { Js::RegSlot tempReg = funcInfo->AcquireTmpRegister(); EmitYield(funcInfo->AssignUndefinedConstRegister(), tempReg, this, funcInfo); m_writer.Reg1(Js::OpCode::Unused, tempReg); funcInfo->ReleaseTmpRegister(tempReg); } DefineUserVars(funcInfo); // Emit all scope-wide function definitions before emitting function bodies // so that calls may reference functions they precede lexically. // Note, global eval scope is a fake local scope and is handled as if it were // a lexical block instead of a true global scope, so do not define the functions // here. They will be defined during BeginEmitBlock. if (!(funcInfo->IsGlobalFunction() && this->IsEvalWithNoParentScopeInfo())) { // This only handles function declarations, which param scope cannot have any. DefineFunctions(funcInfo); } if (pnodeFnc->HasNonSimpleParameterList() || !funcInfo->IsBodyAndParamScopeMerged()) { Assert(pnodeFnc->HasNonSimpleParameterList() || CONFIG_FLAG(ForceSplitScope)); this->InitBlockScopedNonTemps(funcInfo->root->pnodeBodyScope, funcInfo); } else { this->InitBlockScopedNonTemps(funcInfo->root->pnodeScopes, funcInfo); } if (!pnodeFnc->HasNonSimpleParameterList() && funcInfo->GetHasArguments() && !NeedScopeObjectForArguments(funcInfo, pnodeFnc)) { // If we didn't create a scope object and didn't have default args, we still need to transfer the formals to their slots. MapFormalsWithoutRest(pnodeFnc, [&](ParseNode *pnodeArg) { EmitPropStore(pnodeArg->AsParseNodeVar()->sym->GetLocation(), pnodeArg->AsParseNodeVar()->sym, pnodeArg->AsParseNodeVar()->pid, funcInfo); }); } // Rest needs to trigger use before declaration until all default args have been processed. if (pnodeFnc->pnodeRest != nullptr) { pnodeFnc->pnodeRest->sym->SetNeedDeclaration(false); } Js::RegSlot formalsUpperBound = Js::Constants::NoRegister; // Needed for tracking the last RegSlot in the param scope if (!funcInfo->IsBodyAndParamScopeMerged()) { // Emit bytecode to copy the initial values from param names to their corresponding body bindings. // We have to do this after the rest param is marked as false for need declaration. Symbol* funcSym = funcInfo->root->GetFuncSymbol(); paramScope->ForEachSymbol([&](Symbol* param) { Symbol* varSym = funcInfo->GetBodyScope()->FindLocalSymbol(param->GetName()); if ((funcSym == nullptr || funcSym != param) // Do not copy the symbol over to body as the function expression symbol // is expected to stay inside the function expression scope && (varSym && varSym->GetSymbolType() == STVariable && (varSym->IsInSlot(this, funcInfo) || varSym->GetLocation() != Js::Constants::NoRegister))) { if (!varSym->GetNeedDeclaration()) { if (param->IsInSlot(this, funcInfo)) { // Simulating EmitPropLoad here. We can't directly call the method as we have to use the param scope specifically. // Walking the scope chain is not possible at this time. Js::RegSlot tempReg = funcInfo->AcquireTmpRegister(); Js::PropertyId slot = param->EnsureScopeSlot(this, funcInfo); Js::ProfileId profileId = funcInfo->FindOrAddSlotProfileId(paramScope, slot); Js::OpCode op = paramScope->GetIsObject() ? Js::OpCode::LdParamObjSlot : Js::OpCode::LdParamSlot; slot = slot + (paramScope->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex); this->m_writer.SlotI1(op, tempReg, slot, profileId); this->EmitPropStore(tempReg, varSym, varSym->GetPid(), funcInfo); funcInfo->ReleaseTmpRegister(tempReg); } else if (param->GetLocation() != Js::Constants::NoRegister) { this->EmitPropStore(param->GetLocation(), varSym, varSym->GetPid(), funcInfo); } else { Assert(param->IsArguments() && !funcInfo->GetHasArguments()); } } else { // There is a let redeclaration of arguments symbol. Any other var will cause a // re-declaration error. Assert(param->IsArguments()); } } if (ShouldTrackDebuggerMetadata() && param->GetLocation() != Js::Constants::NoRegister) { if (formalsUpperBound == Js::Constants::NoRegister || formalsUpperBound < param->GetLocation()) { formalsUpperBound = param->GetLocation(); } } }); } if (ShouldTrackDebuggerMetadata() && byteCodeFunction->GetPropertyIdOnRegSlotsContainer()) { byteCodeFunction->GetPropertyIdOnRegSlotsContainer()->formalsUpperBound = formalsUpperBound; } if (pnodeFnc->pnodeBodyScope != nullptr) { ::BeginEmitBlock(pnodeFnc->pnodeBodyScope, this, funcInfo); } this->inPrologue = false; if (funcInfo->IsGlobalFunction()) { EmitGlobalBody(funcInfo); } else { EmitFunctionBody(funcInfo); } if (pnodeFnc->pnodeBodyScope != nullptr) { ::EndEmitBlock(pnodeFnc->pnodeBodyScope, this, funcInfo); } ::EndEmitBlock(pnodeFnc->pnodeScopes, this, funcInfo); if (!this->IsInDebugMode()) { // Release the temp registers that we reserved for inner scopes above. if (innerScopeCount) { Js::RegSlot tmpReg = funcInfo->FirstInnerScopeReg() + innerScopeCount - 1; for (uint i = 0; i < innerScopeCount; i++) { funcInfo->ReleaseTmpRegister(tmpReg); tmpReg--; } } } Assert(funcInfo->firstTmpReg == firstTmpReg); Assert(funcInfo->curTmpReg == firstTmpReg); Assert(byteCodeFunction->GetFirstTmpReg() == firstTmpReg + byteCodeFunction->GetConstantCount()); byteCodeFunction->CheckAndSetVarCount(funcInfo->varRegsCount); byteCodeFunction->CheckAndSetOutParamMaxDepth(funcInfo->outArgsMaxDepth); byteCodeFunction->SetForInLoopDepth(funcInfo->GetMaxForInLoopLevel()); // Do a uint32 add just to verify that we haven't overflowed the reg slot type. UInt32Math::Add(funcInfo->varRegsCount, funcInfo->constRegsCount); #if DBG_DUMP if (PHASE_STATS1(Js::ByteCodePhase)) { Output::Print(_u(" BCode: %-10d, Aux: %-10d, AuxC: %-10d Total: %-10d, %s\n"), m_writer.ByteCodeDataSize(), m_writer.AuxiliaryDataSize(), m_writer.AuxiliaryContextDataSize(), m_writer.ByteCodeDataSize() + m_writer.AuxiliaryDataSize() + m_writer.AuxiliaryContextDataSize(), funcInfo->name); this->scriptContext->byteCodeDataSize += m_writer.ByteCodeDataSize(); this->scriptContext->byteCodeAuxiliaryDataSize += m_writer.AuxiliaryDataSize(); this->scriptContext->byteCodeAuxiliaryContextDataSize += m_writer.AuxiliaryContextDataSize(); } #endif this->MapCacheIdsToPropertyIds(funcInfo); this->MapReferencedPropertyIds(funcInfo); Assert(this->TopFuncInfo() == funcInfo); PopFuncInfo(_u("EmitOneFunction")); m_writer.SetCallSiteCount(m_callSiteId); #ifdef LOG_BYTECODE_AST_RATIO m_writer.End(funcInfo->root->astSize, this->maxAstSize); #else m_writer.End(); #endif } catch (...) { // Failed to generate byte-code for this function body (likely OOM or stack overflow). Notify the function body so that // it can revert intermediate state changes that may have taken place during byte code generation before the failure. byteCodeFunction->ResetByteCodeGenState(); m_writer.Reset(); throw; } #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase) && !byteCodeFunction->GetIsGlobalFunc()) { if (byteCodeFunction->GetHasTry()) { WritePerfHint(PerfHints::HasTryBlock_Verbose, byteCodeFunction); } if (funcInfo->GetCallsEval()) { WritePerfHint(PerfHints::CallsEval_Verbose, byteCodeFunction); } else if (funcInfo->GetChildCallsEval()) { WritePerfHint(PerfHints::ChildCallsEval, byteCodeFunction); } } #endif if (!byteCodeFunction->GetSourceContextInfo()->IsDynamic() && byteCodeFunction->GetIsTopLevel() && !(this->flags & fscrEvalCode)) { // Add the top level of nested functions to the tracking dictionary. Wait until this point so that all nested functions have gone // through the Emit API so source info, etc., is initialized, and these are not orphaned functions left behind by an unfinished pass. byteCodeFunction->ForEachNestedFunc([&](Js::FunctionProxy * nestedFunc, uint32 i) { if (nestedFunc && nestedFunc->IsDeferredParseFunction() && nestedFunc->GetParseableFunctionInfo()->GetIsDeclaration()) { byteCodeFunction->GetUtf8SourceInfo()->TrackDeferredFunction(nestedFunc->GetLocalFunctionId(), nestedFunc->GetParseableFunctionInfo()); } return true; }); } byteCodeFunction->SetInitialDefaultEntryPoint(); byteCodeFunction->SetCompileCount(UInt32Math::Add(byteCodeFunction->GetCompileCount(), 1)); #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (byteCodeFunction->IsInDebugMode() != scriptContext->IsScriptContextInDebugMode()) // debug mode mismatch { if (m_utf8SourceInfo->GetIsLibraryCode()) { Assert(!byteCodeFunction->IsInDebugMode()); // Library script byteCode is never in debug mode } else { Js::Throw::FatalInternalError(); } } #endif #if DBG_DUMP if (PHASE_DUMP(Js::ByteCodePhase, funcInfo->byteCodeFunction) && Js::Configuration::Global.flags.Verbose) { pnodeFnc->Dump(); } if (this->Trace() || PHASE_DUMP(Js::ByteCodePhase, funcInfo->byteCodeFunction)) { Js::ByteCodeDumper::Dump(byteCodeFunction); } if (PHASE_DUMP(Js::DebuggerScopePhase, funcInfo->byteCodeFunction)) { byteCodeFunction->DumpScopes(); } #endif #if ENABLE_NATIVE_CODEGEN if ((!PHASE_OFF(Js::BackEndPhase, funcInfo->byteCodeFunction)) && !this->forceNoNative && !this->scriptContext->GetConfig()->IsNoNative()) { GenerateFunction(this->scriptContext->GetNativeCodeGenerator(), byteCodeFunction); } #endif } void ByteCodeGenerator::MapCacheIdsToPropertyIds(FuncInfo *funcInfo) { Js::FunctionBody *functionBody = funcInfo->GetParsedFunctionBody(); uint rootObjectLoadInlineCacheStart = funcInfo->GetInlineCacheCount(); uint rootObjectLoadMethodInlineCacheStart = rootObjectLoadInlineCacheStart + funcInfo->GetRootObjectLoadInlineCacheCount(); uint rootObjectStoreInlineCacheStart = rootObjectLoadMethodInlineCacheStart + funcInfo->GetRootObjectLoadMethodInlineCacheCount(); uint totalFieldAccessInlineCacheCount = rootObjectStoreInlineCacheStart + funcInfo->GetRootObjectStoreInlineCacheCount(); functionBody->CreateCacheIdToPropertyIdMap(rootObjectLoadInlineCacheStart, rootObjectLoadMethodInlineCacheStart, rootObjectStoreInlineCacheStart, totalFieldAccessInlineCacheCount, funcInfo->GetIsInstInlineCacheCount()); if (totalFieldAccessInlineCacheCount == 0) { return; } funcInfo->inlineCacheMap->Map([functionBody](Js::RegSlot regSlot, FuncInfo::InlineCacheIdMap *inlineCacheIdMap) { inlineCacheIdMap->Map([functionBody](Js::PropertyId propertyId, FuncInfo::InlineCacheList* inlineCacheList) { if (inlineCacheList) { inlineCacheList->Iterate([functionBody, propertyId](InlineCacheUnit cacheUnit) { CompileAssert(offsetof(InlineCacheUnit, cacheId) == offsetof(InlineCacheUnit, loadCacheId)); if (cacheUnit.loadCacheId != -1) { functionBody->SetPropertyIdForCacheId(cacheUnit.loadCacheId, propertyId); } if (cacheUnit.loadMethodCacheId != -1) { functionBody->SetPropertyIdForCacheId(cacheUnit.loadMethodCacheId, propertyId); } if (cacheUnit.storeCacheId != -1) { functionBody->SetPropertyIdForCacheId(cacheUnit.storeCacheId, propertyId); } }); } }); }); funcInfo->rootObjectLoadInlineCacheMap->Map([functionBody, rootObjectLoadInlineCacheStart](Js::PropertyId propertyId, uint cacheId) { functionBody->SetPropertyIdForCacheId(cacheId + rootObjectLoadInlineCacheStart, propertyId); }); funcInfo->rootObjectLoadMethodInlineCacheMap->Map([functionBody, rootObjectLoadMethodInlineCacheStart](Js::PropertyId propertyId, uint cacheId) { functionBody->SetPropertyIdForCacheId(cacheId + rootObjectLoadMethodInlineCacheStart, propertyId); }); funcInfo->rootObjectStoreInlineCacheMap->Map([functionBody, rootObjectStoreInlineCacheStart](Js::PropertyId propertyId, uint cacheId) { functionBody->SetPropertyIdForCacheId(cacheId + rootObjectStoreInlineCacheStart, propertyId); }); SListBase<uint>::Iterator valueOfIter(&funcInfo->valueOfStoreCacheIds); while (valueOfIter.Next()) { functionBody->SetPropertyIdForCacheId(valueOfIter.Data(), Js::PropertyIds::valueOf); } SListBase<uint>::Iterator toStringIter(&funcInfo->toStringStoreCacheIds); while (toStringIter.Next()) { functionBody->SetPropertyIdForCacheId(toStringIter.Data(), Js::PropertyIds::toString); } #if DBG functionBody->VerifyCacheIdToPropertyIdMap(); #endif } void ByteCodeGenerator::MapReferencedPropertyIds(FuncInfo * funcInfo) { Js::FunctionBody *functionBody = funcInfo->GetParsedFunctionBody(); uint referencedPropertyIdCount = funcInfo->GetReferencedPropertyIdCount(); functionBody->CreateReferencedPropertyIdMap(referencedPropertyIdCount); funcInfo->referencedPropertyIdToMapIndex->Map([functionBody](Js::PropertyId propertyId, uint mapIndex) { functionBody->SetReferencedPropertyIdWithMapIndex(mapIndex, propertyId); }); #if DBG functionBody->VerifyReferencedPropertyIdMap(); #endif } void ByteCodeGenerator::EmitScopeList(ParseNode *pnode, ParseNode *breakOnBodyScopeNode) { while (pnode) { if (breakOnBodyScopeNode != nullptr && breakOnBodyScopeNode == pnode) { break; } switch (pnode->nop) { case knopFncDecl: #ifdef ASMJS_PLAT if (pnode->AsParseNodeFnc()->GetAsmjsMode()) { Js::ExclusiveContext context(this, GetScriptContext()); if (Js::AsmJSCompiler::Compile(&context, pnode->AsParseNodeFnc(), pnode->AsParseNodeFnc()->pnodeParams)) { pnode = pnode->AsParseNodeFnc()->pnodeNext; break; } else if (CONFIG_FLAG(AsmJsStopOnError)) { exit(JSERR_AsmJsCompileError); } else { // If deferral is not allowed, throw and reparse everything with asm.js disabled. throw Js::AsmJsParseException(); } } #endif // FALLTHROUGH case knopProg: if (pnode->AsParseNodeFnc()->funcInfo) { FuncInfo* funcInfo = pnode->AsParseNodeFnc()->funcInfo; Scope* paramScope = funcInfo->GetParamScope(); if (!funcInfo->IsBodyAndParamScopeMerged()) { funcInfo->SetCurrentChildScope(paramScope); } else { funcInfo->SetCurrentChildScope(funcInfo->GetBodyScope()); } this->StartEmitFunction(pnode->AsParseNodeFnc()); PushFuncInfo(_u("StartEmitFunction"), funcInfo); if (!funcInfo->IsBodyAndParamScopeMerged()) { this->EmitScopeList(pnode->AsParseNodeFnc()->pnodeBodyScope->pnodeScopes); } else { this->EmitScopeList(pnode->AsParseNodeFnc()->pnodeScopes); } this->EmitOneFunction(pnode->AsParseNodeFnc()); this->EndEmitFunction(pnode->AsParseNodeFnc()); Assert(pnode->AsParseNodeFnc()->pnodeBody == nullptr || funcInfo->isReused || funcInfo->GetCurrentChildScope() == funcInfo->GetBodyScope()); funcInfo->SetCurrentChildScope(nullptr); } pnode = pnode->AsParseNodeFnc()->pnodeNext; break; case knopBlock: { ParseNodeBlock * pnodeBlock = pnode->AsParseNodeBlock(); this->StartEmitBlock(pnodeBlock); this->EmitScopeList(pnodeBlock->pnodeScopes); this->EndEmitBlock(pnodeBlock); pnode = pnodeBlock->pnodeNext; break; } case knopCatch: { ParseNodeCatch * pnodeCatch = pnode->AsParseNodeCatch(); this->StartEmitCatch(pnodeCatch); this->EmitScopeList(pnodeCatch->pnodeScopes); this->EndEmitCatch(pnodeCatch); pnode = pnodeCatch->pnodeNext; break; } case knopWith: this->StartEmitWith(pnode); this->EmitScopeList(pnode->AsParseNodeWith()->pnodeScopes); this->EndEmitWith(pnode); pnode = pnode->AsParseNodeWith()->pnodeNext; break; default: AssertMsg(false, "Unexpected opcode in tree of scopes"); break; } } } void ByteCodeGenerator::EnsureFncDeclScopeSlot(ParseNodeFnc *pnodeFnc, FuncInfo *funcInfo) { if (pnodeFnc->pnodeName) { Assert(pnodeFnc->pnodeName->nop == knopVarDecl); Symbol *sym = pnodeFnc->pnodeName->sym; // If this function is shadowing the arguments symbol in body then skip it. // We will allocate scope slot for the arguments symbol during EmitLocalPropInit. if (sym && !sym->IsArguments()) { sym->EnsureScopeSlot(this, funcInfo); } } } // Similar to EnsureFncScopeSlot visitor function, but verifies that a slot is needed before assigning it. void ByteCodeGenerator::CheckFncDeclScopeSlot(ParseNodeFnc *pnodeFnc, FuncInfo *funcInfo) { if (pnodeFnc->pnodeName) { Assert(pnodeFnc->pnodeName->nop == knopVarDecl); Symbol *sym = pnodeFnc->pnodeName->sym; if (sym && sym->NeedsSlotAlloc(this, funcInfo)) { sym->EnsureScopeSlot(this, funcInfo); } } } void ByteCodeGenerator::StartEmitFunction(ParseNodeFnc *pnodeFnc) { Assert(pnodeFnc->nop == knopFncDecl || pnodeFnc->nop == knopProg); FuncInfo *funcInfo = pnodeFnc->funcInfo; Scope * const bodyScope = funcInfo->GetBodyScope(); Scope * const paramScope = funcInfo->GetParamScope(); if (funcInfo->byteCodeFunction->IsFunctionParsed() && funcInfo->root->pnodeBody != nullptr) { if (funcInfo->GetParsedFunctionBody()->GetByteCode() == nullptr && !(flags & (fscrEval | fscrImplicitThis))) { // Only set the environment depth if it's truly known (i.e., not in eval or event handler). funcInfo->GetParsedFunctionBody()->SetEnvDepth(this->envDepth); } if (funcInfo->GetCallsEval()) { funcInfo->byteCodeFunction->SetDontInline(true); } Scope * const funcExprScope = funcInfo->funcExprScope; if (funcExprScope) { if (funcInfo->GetCallsEval()) { Assert(funcExprScope->GetIsObject()); } if (funcExprScope->GetIsObject()) { funcExprScope->SetCapturesAll(true); funcExprScope->SetMustInstantiate(true); PushScope(funcExprScope); } else { Symbol *sym = funcInfo->root->GetFuncSymbol(); if (funcInfo->IsBodyAndParamScopeMerged()) { funcInfo->bodyScope->AddSymbol(sym); } else { funcInfo->paramScope->AddSymbol(sym); } sym->EnsureScopeSlot(this, funcInfo); if (sym->GetHasNonLocalReference()) { sym->GetScope()->SetHasOwnLocalInClosure(true); } } } if (pnodeFnc->nop != knopProg) { if (!bodyScope->GetIsObject() && NeedObjectAsFunctionScope(funcInfo, pnodeFnc)) { Assert(bodyScope->GetIsObject()); } if (bodyScope->GetIsObject()) { bodyScope->SetLocation(funcInfo->frameObjRegister); } else { bodyScope->SetLocation(funcInfo->frameSlotsRegister); } if (!funcInfo->IsBodyAndParamScopeMerged()) { if (paramScope->GetIsObject()) { paramScope->SetLocation(funcInfo->frameObjRegister); } else { paramScope->SetLocation(funcInfo->frameSlotsRegister); } } if (bodyScope->GetIsObject()) { // Win8 908700: Disable under F12 debugger because there are too many cached scopes holding onto locals. funcInfo->SetHasCachedScope( !PHASE_OFF(Js::CachedScopePhase, funcInfo->byteCodeFunction) && !funcInfo->Escapes() && funcInfo->frameObjRegister != Js::Constants::NoRegister && !ApplyEnclosesArgs(pnodeFnc, this) && funcInfo->IsBodyAndParamScopeMerged() && // There is eval in the param scope !pnodeFnc->HasDefaultArguments() && !pnodeFnc->HasDestructuredParams() && (PHASE_FORCE(Js::CachedScopePhase, funcInfo->byteCodeFunction) || !IsInDebugMode()) #if ENABLE_TTD && !funcInfo->GetParsedFunctionBody()->GetScriptContext()->GetThreadContext()->IsRuntimeInTTDMode() #endif ); if (funcInfo->GetHasCachedScope()) { Assert(funcInfo->funcObjRegister == Js::Constants::NoRegister); Symbol *funcSym = funcInfo->root->GetFuncSymbol(); if (funcSym && funcSym->GetIsFuncExpr()) { if (funcSym->GetLocation() == Js::Constants::NoRegister) { funcInfo->funcObjRegister = funcInfo->NextVarRegister(); } else { funcInfo->funcObjRegister = funcSym->GetLocation(); } } else { funcInfo->funcObjRegister = funcInfo->NextVarRegister(); } Assert(funcInfo->funcObjRegister != Js::Constants::NoRegister); } ParseNode *pnode; Symbol *sym; if (funcInfo->GetHasArguments()) { // Process function's formal parameters MapFormals(pnodeFnc, [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { pnode->AsParseNodeVar()->sym->EnsureScopeSlot(this, funcInfo); } }); MapFormalsFromPattern(pnodeFnc, [&](ParseNode *pnode) { pnode->AsParseNodeVar()->sym->EnsureScopeSlot(this, funcInfo); }); // Only allocate scope slot for "arguments" when really necessary. "hasDeferredChild" // doesn't require scope slot for "arguments" because inner functions can't access // outer function's arguments directly. sym = funcInfo->GetArgumentsSymbol(); Assert(sym); if (sym->NeedsSlotAlloc(this, funcInfo)) { sym->EnsureScopeSlot(this, funcInfo); } } sym = funcInfo->root->GetFuncSymbol(); if (sym && sym->NeedsSlotAlloc(this, funcInfo)) { if (funcInfo->funcExprScope && funcInfo->funcExprScope->GetIsObject()) { sym->SetScopeSlot(0); } else if (funcInfo->GetFuncExprNameReference()) { sym->EnsureScopeSlot(this, funcInfo); } } if (!funcInfo->GetHasArguments()) { Symbol *formal; Js::ArgSlot pos = 1; auto moveArgToReg = [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { formal = pnode->AsParseNodeVar()->sym; // Get the param from its argument position into its assigned register. // The position should match the location; otherwise, it has been shadowed by parameter with the same name. if (formal->GetLocation() + 1 == pos) { pnode->AsParseNodeVar()->sym->EnsureScopeSlot(this, funcInfo); } } pos++; }; MapFormals(pnodeFnc, moveArgToReg); MapFormalsFromPattern(pnodeFnc, [&](ParseNode *pnode) { pnode->AsParseNodeVar()->sym->EnsureScopeSlot(this, funcInfo); }); } for (pnode = pnodeFnc->pnodeVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { sym = pnode->AsParseNodeVar()->sym; if (!(pnode->AsParseNodeVar()->isBlockScopeFncDeclVar && sym->GetIsBlockVar())) { if (sym->GetIsCatch() || (pnode->nop == knopVarDecl && sym->GetIsBlockVar())) { sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName()); } if (sym->GetSymbolType() == STVariable && !sym->IsArguments()) { sym->EnsureScopeSlot(this, funcInfo); } } } auto ensureFncDeclScopeSlots = [&](ParseNode *pnodeScope) { for (pnode = pnodeScope; pnode;) { switch (pnode->nop) { case knopFncDecl: if (pnode->AsParseNodeFnc()->IsDeclaration()) { EnsureFncDeclScopeSlot(pnode->AsParseNodeFnc(), funcInfo); } pnode = pnode->AsParseNodeFnc()->pnodeNext; break; case knopBlock: pnode = pnode->AsParseNodeBlock()->pnodeNext; break; case knopCatch: pnode = pnode->AsParseNodeCatch()->pnodeNext; break; case knopWith: pnode = pnode->AsParseNodeWith()->pnodeNext; break; } } }; pnodeFnc->MapContainerScopes(ensureFncDeclScopeSlots); if (pnodeFnc->pnodeBody) { Assert(pnodeFnc->pnodeScopes->nop == knopBlock); this->EnsureLetConstScopeSlots(pnodeFnc->pnodeBodyScope, funcInfo); } } else { ParseNode *pnode; Symbol *sym; pnodeFnc->MapContainerScopes([&](ParseNode *pnodeScope) { this->EnsureFncScopeSlots(pnodeScope, funcInfo); }); for (pnode = pnodeFnc->pnodeVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { sym = pnode->AsParseNodeVar()->sym; if (!(pnode->AsParseNodeVar()->isBlockScopeFncDeclVar && sym->GetIsBlockVar())) { if (sym->GetIsCatch() || (pnode->nop == knopVarDecl && sym->GetIsBlockVar())) { sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName()); } if (sym->GetSymbolType() == STVariable && sym->NeedsSlotAlloc(this, funcInfo) && !sym->IsArguments()) { sym->EnsureScopeSlot(this, funcInfo); } } } auto ensureScopeSlot = [&](ParseNode *pnode) { if (pnode->IsVarLetOrConst()) { sym = pnode->AsParseNodeVar()->sym; if (sym->GetSymbolType() == STFormal && sym->NeedsSlotAlloc(this, funcInfo)) { sym->EnsureScopeSlot(this, funcInfo); } } }; // Process function's formal parameters MapFormals(pnodeFnc, ensureScopeSlot); MapFormalsFromPattern(pnodeFnc, ensureScopeSlot); if (funcInfo->GetHasArguments()) { sym = funcInfo->GetArgumentsSymbol(); Assert(sym); // There is no eval so the arguments may be captured in a lambda. // But we cannot relay on slots getting allocated while the lambda is emitted as the function body may be reparsed. sym->EnsureScopeSlot(this, funcInfo); } if (pnodeFnc->pnodeBody) { this->EnsureLetConstScopeSlots(pnodeFnc->pnodeScopes, funcInfo); this->EnsureLetConstScopeSlots(pnodeFnc->pnodeBodyScope, funcInfo); } } // When we have split scope and body scope does not have any scope slots allocated, we don't have to mark the body scope as mustinstantiate. if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { bodyScope->SetMustInstantiate(true); } else if (pnodeFnc->IsBodyAndParamScopeMerged() || bodyScope->GetScopeSlotCount() != 0) { bodyScope->SetMustInstantiate(funcInfo->frameSlotsRegister != Js::Constants::NoRegister); } if (!pnodeFnc->IsBodyAndParamScopeMerged()) { if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { paramScope->SetMustInstantiate(true); } else { // In the case of function expression being captured in the param scope the hasownlocalinclosure will be false for param scope, // as function expression symbol stays in the function expression scope. We don't have to set mustinstantiate for param scope in that case. paramScope->SetMustInstantiate(paramScope->GetHasOwnLocalInClosure()); } } } else { bool newScopeForEval = (funcInfo->byteCodeFunction->GetIsStrictMode() && (this->GetFlags() & fscrEval)); if (newScopeForEval) { Assert(bodyScope->GetIsObject()); } } } if (!funcInfo->IsBodyAndParamScopeMerged()) { ParseNodeBlock * paramBlock = pnodeFnc->pnodeScopes; Assert(paramBlock->blockType == Parameter); PushScope(paramScope); // While emitting the functions we have to stop when we see the body scope block. // Otherwise functions defined in the body scope will not be able to get the right references. this->EmitScopeList(paramBlock->pnodeScopes, pnodeFnc->pnodeBodyScope); Assert(this->GetCurrentScope() == paramScope); } PushScope(bodyScope); } void ByteCodeGenerator::EmitModuleExportAccess(Symbol* sym, Js::OpCode opcode, Js::RegSlot location, FuncInfo* funcInfo) { if (EnsureSymbolModuleSlots(sym, funcInfo)) { this->Writer()->SlotI2(opcode, location, sym->GetModuleIndex(), sym->GetScopeSlot()); } else { this->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(ERRInvalidExportName)); if (opcode == Js::OpCode::LdModuleSlot) { this->Writer()->Reg1(Js::OpCode::LdUndef, location); } } } bool ByteCodeGenerator::EnsureSymbolModuleSlots(Symbol* sym, FuncInfo* funcInfo) { Assert(sym->GetIsModuleExportStorage()); if (sym->GetModuleIndex() != Js::Constants::NoProperty && sym->GetScopeSlot() != Js::Constants::NoProperty) { return true; } Js::JavascriptLibrary* library = this->GetScriptContext()->GetLibrary(); library->EnsureModuleRecordList(); uint moduleIndex = this->GetModuleID(); uint moduleSlotIndex; Js::SourceTextModuleRecord* moduleRecord = library->GetModuleRecord(moduleIndex); if (sym->GetIsModuleImport()) { Js::PropertyId localImportNameId = sym->EnsurePosition(funcInfo); Js::ModuleNameRecord* moduleNameRecord = nullptr; if (!moduleRecord->ResolveImport(localImportNameId, &moduleNameRecord)) { return false; } AnalysisAssert(moduleNameRecord != nullptr); Assert(moduleNameRecord->module->IsSourceTextModuleRecord()); Js::SourceTextModuleRecord* resolvedModuleRecord = (Js::SourceTextModuleRecord*)PointerValue(moduleNameRecord->module); moduleIndex = resolvedModuleRecord->GetModuleId(); moduleSlotIndex = resolvedModuleRecord->GetLocalExportSlotIndexByLocalName(moduleNameRecord->bindingName); } else { Js::PropertyId exportNameId = sym->EnsurePosition(funcInfo); moduleSlotIndex = moduleRecord->GetLocalExportSlotIndexByLocalName(exportNameId); } sym->SetModuleIndex(moduleIndex); sym->SetScopeSlot(moduleSlotIndex); return true; } void ByteCodeGenerator::EmitAssignmentToDefaultModuleExport(ParseNode* pnode, FuncInfo* funcInfo) { // We are assigning pnode to the default export of the current module. uint moduleIndex = this->GetModuleID(); Js::JavascriptLibrary* library = this->GetScriptContext()->GetLibrary(); library->EnsureModuleRecordList(); Js::SourceTextModuleRecord* moduleRecord = library->GetModuleRecord(moduleIndex); uint moduleSlotIndex = moduleRecord->GetLocalExportSlotIndexByExportName(Js::PropertyIds::default_); this->Writer()->SlotI2(Js::OpCode::StModuleSlot, pnode->location, moduleIndex, moduleSlotIndex); } void ByteCodeGenerator::EnsureLetConstScopeSlots(ParseNodeBlock *pnodeBlock, FuncInfo *funcInfo) { bool callsEval = pnodeBlock->GetCallsEval() || pnodeBlock->GetChildCallsEval(); auto ensureLetConstSlots = ([this, funcInfo, callsEval](ParseNode *pnode) { Symbol *sym = pnode->AsParseNodeVar()->sym; if (callsEval || sym->NeedsSlotAlloc(this, funcInfo)) { sym->EnsureScopeSlot(this, funcInfo); this->ProcessCapturedSym(sym); } }); IterateBlockScopedVariables(pnodeBlock, ensureLetConstSlots); } void ByteCodeGenerator::EnsureFncScopeSlots(ParseNode *pnode, FuncInfo *funcInfo) { while (pnode) { switch (pnode->nop) { case knopFncDecl: if (pnode->AsParseNodeFnc()->IsDeclaration()) { CheckFncDeclScopeSlot(pnode->AsParseNodeFnc(), funcInfo); } pnode = pnode->AsParseNodeFnc()->pnodeNext; break; case knopBlock: pnode = pnode->AsParseNodeBlock()->pnodeNext; break; case knopCatch: pnode = pnode->AsParseNodeCatch()->pnodeNext; break; case knopWith: pnode = pnode->AsParseNodeWith()->pnodeNext; break; } } } void ByteCodeGenerator::EndEmitFunction(ParseNodeFnc *pnodeFnc) { Assert(pnodeFnc->nop == knopFncDecl || pnodeFnc->nop == knopProg); Assert(pnodeFnc->nop == knopFncDecl && currentScope->GetEnclosingScope() != nullptr || pnodeFnc->nop == knopProg); PopScope(); // function body FuncInfo *funcInfo = pnodeFnc->funcInfo; Scope* paramScope = funcInfo->paramScope; if (!funcInfo->IsBodyAndParamScopeMerged()) { Assert(this->GetCurrentScope() == paramScope); PopScope(); // Pop the param scope } if (funcInfo->byteCodeFunction->IsFunctionParsed() && funcInfo->root->pnodeBody != nullptr) { // StartEmitFunction omits the matching PushScope for already-parsed functions. // TODO: Refactor Start and EndEmitFunction for clarity. Scope *scope = funcInfo->funcExprScope; if (scope && scope->GetMustInstantiate()) { Assert(currentScope == scope); PopScope(); } } Assert(funcInfo == this->TopFuncInfo()); PopFuncInfo(_u("EndEmitFunction")); } void ByteCodeGenerator::StartEmitCatch(ParseNodeCatch *pnodeCatch) { Assert(pnodeCatch->nop == knopCatch); Scope *scope = pnodeCatch->scope; FuncInfo *funcInfo = scope->GetFunc(); // Catch scope is a dynamic object if it can be passed to a scoped lookup helper (i.e., eval is present or we're in an event handler). if (funcInfo->GetCallsEval() || funcInfo->GetChildCallsEval() || (this->flags & (fscrEval | fscrImplicitThis))) { scope->SetIsObject(); } ParseNode * pnodeParam = pnodeCatch->GetParam(); if (pnodeParam->nop == knopParamPattern) { scope->SetCapturesAll(funcInfo->GetCallsEval() || funcInfo->GetChildCallsEval()); scope->SetMustInstantiate(scope->Count() > 0 && (scope->GetMustInstantiate() || scope->GetCapturesAll() || funcInfo->IsGlobalFunction())); Parser::MapBindIdentifier(pnodeParam->AsParseNodeParamPattern()->pnode1, [&](ParseNodePtr item) { Symbol *sym = item->AsParseNodeVar()->sym; if (funcInfo->IsGlobalFunction()) { sym->SetIsGlobalCatch(true); } Assert(sym->GetScopeSlot() == Js::Constants::NoProperty); if (sym->NeedsSlotAlloc(this, funcInfo)) { sym->EnsureScopeSlot(this, funcInfo); } }); // In the case of pattern we will always going to push the scope. PushScope(scope); } else { Symbol *sym = pnodeParam->AsParseNodeName()->sym; // Catch object is stored in the catch scope if there may be an ambiguous lookup or a var declaration that hides it. scope->SetCapturesAll(funcInfo->GetCallsEval() || funcInfo->GetChildCallsEval() || sym->GetHasNonLocalReference()); scope->SetMustInstantiate(scope->GetCapturesAll() || funcInfo->IsGlobalFunction()); if (funcInfo->IsGlobalFunction()) { sym->SetIsGlobalCatch(true); } if (scope->GetMustInstantiate()) { if (sym->IsInSlot(this, funcInfo)) { // Since there is only one symbol we are pushing to slot. // Also in order to make IsInSlot to return true - forcing the sym-has-non-local-reference. this->ProcessCapturedSym(sym); sym->EnsureScopeSlot(this, funcInfo); } } PushScope(scope); } } void ByteCodeGenerator::EndEmitCatch(ParseNodeCatch *pnodeCatch) { Assert(pnodeCatch->nop == knopCatch); Assert(currentScope == pnodeCatch->scope); PopScope(); } void ByteCodeGenerator::StartEmitBlock(ParseNodeBlock *pnodeBlock) { if (!BlockHasOwnScope(pnodeBlock, this)) { return; } Assert(pnodeBlock->nop == knopBlock); PushBlock(pnodeBlock); Scope *scope = pnodeBlock->scope; if (pnodeBlock->GetCallsEval() || pnodeBlock->GetChildCallsEval() || (this->flags & (fscrEval | fscrImplicitThis))) { Assert(scope->GetIsObject()); } // TODO: Consider nested deferred parsing. if (scope->GetMustInstantiate()) { FuncInfo *funcInfo = scope->GetFunc(); this->EnsureFncScopeSlots(pnodeBlock->pnodeScopes, funcInfo); this->EnsureLetConstScopeSlots(pnodeBlock, funcInfo); PushScope(scope); } } void ByteCodeGenerator::EndEmitBlock(ParseNodeBlock *pnodeBlock) { if (!BlockHasOwnScope(pnodeBlock, this)) { return; } Assert(pnodeBlock->nop == knopBlock); Scope *scope = pnodeBlock->scope; if (scope && scope->GetMustInstantiate()) { Assert(currentScope == pnodeBlock->scope); PopScope(); } PopBlock(); } void ByteCodeGenerator::StartEmitWith(ParseNode *pnodeWith) { Assert(pnodeWith->nop == knopWith); Scope *scope = pnodeWith->AsParseNodeWith()->scope; AssertOrFailFast(scope->GetIsObject()); PushScope(scope); } void ByteCodeGenerator::EndEmitWith(ParseNode *pnodeWith) { Assert(pnodeWith->nop == knopWith); Assert(currentScope == pnodeWith->AsParseNodeWith()->scope); PopScope(); } Js::RegSlot ByteCodeGenerator::PrependLocalScopes(Js::RegSlot evalEnv, Js::RegSlot tempLoc, FuncInfo *funcInfo) { Scope *currScope = this->currentScope; Scope *funcScope = funcInfo->GetCurrentChildScope() ? funcInfo->GetCurrentChildScope() : funcInfo->GetBodyScope(); if (currScope == funcScope) { return evalEnv; } bool acquireTempLoc = tempLoc == Js::Constants::NoRegister; if (acquireTempLoc) { tempLoc = funcInfo->AcquireTmpRegister(); } // The with/catch objects must be prepended to the environment we pass to eval() or to a func declared inside with, // but the list must first be reversed so that innermost scopes appear first in the list. while (currScope != funcScope) { Scope *innerScope; for (innerScope = currScope; innerScope->GetEnclosingScope() != funcScope; innerScope = innerScope->GetEnclosingScope()) ; if (innerScope->GetMustInstantiate()) { if (!innerScope->HasInnerScopeIndex()) { if (evalEnv == funcInfo->GetEnvRegister() || evalEnv == funcInfo->frameDisplayRegister) { this->m_writer.Reg2(Js::OpCode::LdInnerFrameDisplayNoParent, tempLoc, innerScope->GetLocation()); } else { this->m_writer.Reg3(Js::OpCode::LdInnerFrameDisplay, tempLoc, innerScope->GetLocation(), evalEnv); } } else { if (evalEnv == funcInfo->GetEnvRegister() || evalEnv == funcInfo->frameDisplayRegister) { this->m_writer.Reg1Unsigned1(Js::OpCode::LdIndexedFrameDisplayNoParent, tempLoc, innerScope->GetInnerScopeIndex()); } else { this->m_writer.Reg2Int1(Js::OpCode::LdIndexedFrameDisplay, tempLoc, evalEnv, innerScope->GetInnerScopeIndex()); } } evalEnv = tempLoc; } funcScope = innerScope; } if (acquireTempLoc) { funcInfo->ReleaseTmpRegister(tempLoc); } return evalEnv; } void ByteCodeGenerator::EmitLoadInstance(Symbol *sym, IdentPtr pid, Js::RegSlot *pThisLocation, Js::RegSlot *pInstLocation, FuncInfo *funcInfo) { Js::ByteCodeLabel doneLabel = 0; bool fLabelDefined = false; Js::RegSlot scopeLocation = Js::Constants::NoRegister; Js::RegSlot thisLocation = *pThisLocation; Js::RegSlot instLocation = *pInstLocation; Js::PropertyId envIndex = -1; Scope *scope = nullptr; Scope *symScope = sym ? sym->GetScope() : this->globalScope; Assert(symScope); if (sym != nullptr && sym->GetIsModuleExportStorage()) { *pInstLocation = Js::Constants::NoRegister; return; } for (;;) { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); if (scope == this->globalScope) { break; } if (scope != symScope) { // We're not sure where the function is (eval/with/etc). // So we're going to need registers to hold the instance where we (dynamically) find // the function, and possibly to hold the "this" pointer we will pass to it. // Assign them here so that they can't overlap with the scopeLocation assigned below. // Otherwise we wind up with temp lifetime confusion in the IRBuilder. (Win8 281689) if (instLocation == Js::Constants::NoRegister) { instLocation = funcInfo->AcquireTmpRegister(); // The "this" pointer will not be the same as the instance, so give it its own register. thisLocation = funcInfo->AcquireTmpRegister(); } } if (envIndex == -1) { Assert(funcInfo == scope->GetFunc()); scopeLocation = scope->GetLocation(); } if (scope == symScope) { break; } // Found a scope to which the property may have been added. Assert(scope && scope->GetIsDynamic()); if (!fLabelDefined) { fLabelDefined = true; doneLabel = this->m_writer.DefineLabel(); } Js::ByteCodeLabel nextLabel = this->m_writer.DefineLabel(); Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); bool unwrapWithObj = scope->GetScopeType() == ScopeType_With && scriptContext->GetConfig()->IsES6UnscopablesEnabled(); if (envIndex != -1) { this->m_writer.BrEnvProperty( Js::OpCode::BrOnNoEnvProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId), envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::RegSlot tmpReg = funcInfo->AcquireTmpRegister(); AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1(Js::OpCode::LdEnvObj, tmpReg, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::OpCode op = unwrapWithObj ? Js::OpCode::UnwrapWithObj : Js::OpCode::Ld_A; this->m_writer.Reg2(op, instLocation, tmpReg); if (thisLocation != Js::Constants::NoRegister) { this->m_writer.Reg2(op, thisLocation, tmpReg); } funcInfo->ReleaseTmpRegister(tmpReg); } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { this->m_writer.BrLocalProperty(Js::OpCode::BrOnNoLocalProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Assert(!unwrapWithObj); AssertOrFailFast(scope->GetIsObject()); this->m_writer.Reg1(Js::OpCode::LdLocalObj, instLocation); if (thisLocation != Js::Constants::NoRegister) { this->m_writer.Reg1(Js::OpCode::LdLocalObj, thisLocation); } } else { this->m_writer.BrProperty(Js::OpCode::BrOnNoProperty, nextLabel, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Js::OpCode op = unwrapWithObj ? Js::OpCode::UnwrapWithObj : Js::OpCode::Ld_A; this->m_writer.Reg2(op, instLocation, scopeLocation); if (thisLocation != Js::Constants::NoRegister) { this->m_writer.Reg2(op, thisLocation, scopeLocation); } } this->m_writer.Br(doneLabel); this->m_writer.MarkLabel(nextLabel); } if (sym == nullptr || sym->GetIsGlobal()) { if (this->flags & (fscrEval | fscrImplicitThis)) { // Load of a symbol with unknown scope from within eval. // Get it from the closure environment. if (instLocation == Js::Constants::NoRegister) { instLocation = funcInfo->AcquireTmpRegister(); } // TODO: It should be possible to avoid this double call to ScopedLdInst by having it return both // results at once. The reason for the uncertainty here is that we don't know whether the callee // belongs to a "with" object. If it does, we have to pass the "with" object as "this"; in all other // cases, we pass "undefined". For now, there are apparently no significant performance issues. Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); if (thisLocation == Js::Constants::NoRegister) { thisLocation = funcInfo->AcquireTmpRegister(); } this->m_writer.ScopedProperty2(Js::OpCode::ScopedLdInst, instLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId), thisLocation); } else { if (instLocation == Js::Constants::NoRegister) { instLocation = ByteCodeGenerator::RootObjectRegister; } else { this->m_writer.Reg2(Js::OpCode::Ld_A, instLocation, ByteCodeGenerator::RootObjectRegister); } if (thisLocation == Js::Constants::NoRegister) { thisLocation = funcInfo->undefinedConstantRegister; } else { this->m_writer.Reg2(Js::OpCode::Ld_A, thisLocation, funcInfo->undefinedConstantRegister); } } } else if (instLocation != Js::Constants::NoRegister) { if (envIndex != -1) { AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1(Js::OpCode::LdEnvObj, instLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); } else if (scope->HasInnerScopeIndex()) { this->m_writer.Reg1Unsigned1(Js::OpCode::LdInnerScope, instLocation, scope->GetInnerScopeIndex()); } else if (symScope == funcInfo->GetParamScope()) { Assert(funcInfo->frameObjRegister != Js::Constants::NoRegister && !funcInfo->IsBodyAndParamScopeMerged()); this->m_writer.Reg1(Js::OpCode::LdParamObj, instLocation); } else if (symScope != funcInfo->GetBodyScope()) { this->m_writer.Reg2(Js::OpCode::Ld_A, instLocation, scopeLocation); } else { Assert(funcInfo->frameObjRegister != Js::Constants::NoRegister); this->m_writer.Reg1(Js::OpCode::LdLocalObj, instLocation); } if (thisLocation != Js::Constants::NoRegister) { this->m_writer.Reg2(Js::OpCode::Ld_A, thisLocation, funcInfo->undefinedConstantRegister); } else { thisLocation = funcInfo->undefinedConstantRegister; } } *pThisLocation = thisLocation; *pInstLocation = instLocation; if (fLabelDefined) { this->m_writer.MarkLabel(doneLabel); } } void ByteCodeGenerator::EmitGlobalFncDeclInit(Js::RegSlot rhsLocation, Js::PropertyId propertyId, FuncInfo * funcInfo) { // Note: declared variables and assignments in the global function go to the root object directly. if (this->flags & fscrEval) { // Func decl's always get their init values before any use, so we don't pre-initialize the property to undef. // That means that we have to use ScopedInitFld so that we initialize the property on the right instance // even if the instance doesn't have the property yet (i.e., collapse the init-to-undef and the store // into one operation). See WOOB 1121763 and 1120973. this->m_writer.ScopedProperty(Js::OpCode::ScopedInitFunc, rhsLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } else { this->EmitPatchableRootProperty(Js::OpCode::InitRootFld, rhsLocation, propertyId, false, true, funcInfo); } } void ByteCodeGenerator::EmitPatchableRootProperty(Js::OpCode opcode, Js::RegSlot regSlot, Js::PropertyId propertyId, bool isLoadMethod, bool isStore, FuncInfo * funcInfo) { uint cacheId = funcInfo->FindOrAddRootObjectInlineCacheId(propertyId, isLoadMethod, isStore); this->m_writer.PatchableRootProperty(opcode, regSlot, cacheId, isLoadMethod, isStore); } void ByteCodeGenerator::EmitLocalPropInit(Js::RegSlot rhsLocation, Symbol *sym, FuncInfo *funcInfo) { Scope *scope = sym->GetScope(); // Check consistency of sym->IsInSlot. Assert(sym->NeedsSlotAlloc(this, funcInfo) || sym->GetScopeSlot() == Js::Constants::NoProperty); // Arrived at the scope in which the property was defined. if (sym->NeedsSlotAlloc(this, funcInfo)) { // The property is in memory rather than register. We'll have to load it from the slots. if (scope->GetIsObject()) { Assert(!this->TopFuncInfo()->GetParsedFunctionBody()->DoStackNestedFunc()); Js::PropertyId propertyId = sym->EnsurePosition(this); Js::RegSlot objReg; if (scope->HasInnerScopeIndex()) { objReg = funcInfo->InnerScopeToRegSlot(scope); } else { objReg = scope->GetLocation(); } uint cacheId = funcInfo->FindOrAddInlineCacheId(objReg, propertyId, false, true); Js::OpCode op = this->GetInitFldOp(scope, objReg, funcInfo, sym->GetIsNonSimpleParameter()); if (objReg != Js::Constants::NoRegister && objReg == funcInfo->frameObjRegister) { this->m_writer.ElementP(op, rhsLocation, cacheId); } else if (scope->HasInnerScopeIndex()) { this->m_writer.ElementPIndexed(op, rhsLocation, scope->GetInnerScopeIndex(), cacheId); } else { this->m_writer.PatchableProperty(op, rhsLocation, scope->GetLocation(), cacheId); } } else { // Make sure the property has a slot. This will bump up the size of the slot array if necessary. Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); Js::RegSlot slotReg = scope->GetCanMerge() ? funcInfo->frameSlotsRegister : scope->GetLocation(); // Now store the property to its slot. Js::OpCode op = this->GetStSlotOp(scope, -1, slotReg, false, funcInfo); if (slotReg != Js::Constants::NoRegister && slotReg == funcInfo->frameSlotsRegister) { this->m_writer.SlotI1(op, rhsLocation, slot + Js::ScopeSlots::FirstSlotIndex); } else { this->m_writer.SlotI2(op, rhsLocation, scope->GetInnerScopeIndex(), slot + Js::ScopeSlots::FirstSlotIndex); } } } if (sym->GetLocation() != Js::Constants::NoRegister && rhsLocation != sym->GetLocation()) { this->m_writer.Reg2(Js::OpCode::Ld_A, sym->GetLocation(), rhsLocation); } } Js::OpCode ByteCodeGenerator::GetStSlotOp(Scope *scope, int envIndex, Js::RegSlot scopeLocation, bool chkBlockVar, FuncInfo *funcInfo) { Js::OpCode op; if (envIndex != -1) { if (scope->GetIsObject()) { op = Js::OpCode::StEnvObjSlot; } else { op = Js::OpCode::StEnvSlot; } } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameSlotsRegister) { if (scope->GetScopeType() == ScopeType_Parameter && scope != scope->GetFunc()->GetCurrentChildScope()) { // Symbol is from the param scope of a split scope function and we are emitting the body. // We should use the param scope's bytecode now. Assert(!funcInfo->IsBodyAndParamScopeMerged()); op = Js::OpCode::StParamSlot; } else { op = Js::OpCode::StLocalSlot; } } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { if (scope->GetScopeType() == ScopeType_Parameter && scope != scope->GetFunc()->GetCurrentChildScope()) { // Symbol is from the param scope of a split scope function and we are emitting the body. // We should use the param scope's bytecode now. Assert(!funcInfo->IsBodyAndParamScopeMerged()); op = Js::OpCode::StParamObjSlot; } else { op = Js::OpCode::StLocalObjSlot; } } else { Assert(scope->HasInnerScopeIndex()); if (scope->GetIsObject()) { op = Js::OpCode::StInnerObjSlot; } else { op = Js::OpCode::StInnerSlot; } } if (chkBlockVar) { op = this->ToChkUndeclOp(op); } return op; } Js::OpCode ByteCodeGenerator::GetInitFldOp(Scope *scope, Js::RegSlot scopeLocation, FuncInfo *funcInfo, bool letDecl) { Js::OpCode op; if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { op = letDecl ? Js::OpCode::InitLocalLetFld : Js::OpCode::InitLocalFld; } else if (scope->HasInnerScopeIndex()) { op = letDecl ? Js::OpCode::InitInnerLetFld : Js::OpCode::InitInnerFld; } else { op = letDecl ? Js::OpCode::InitLetFld : Js::OpCode::InitFld; } return op; } void ByteCodeGenerator::EmitPropStore(Js::RegSlot rhsLocation, Symbol *sym, IdentPtr pid, FuncInfo *funcInfo, bool isLetDecl, bool isConstDecl, bool isFncDeclVar, bool skipUseBeforeDeclarationCheck) { Js::ByteCodeLabel doneLabel = 0; bool fLabelDefined = false; Js::PropertyId envIndex = -1; Scope *symScope = sym == nullptr || sym->GetIsGlobal() ? this->globalScope : sym->GetScope(); Assert(symScope); // isFncDeclVar denotes that the symbol being stored to here is the var // binding of a function declaration and we know we want to store directly // to it, skipping over any dynamic scopes that may lie in between. Scope *scope = nullptr; Js::RegSlot scopeLocation = Js::Constants::NoRegister; bool scopeAcquired = false; Js::OpCode op; if (sym && sym->GetIsModuleExportStorage()) { if (!isConstDecl && sym->GetDecl() && sym->GetDecl()->nop == knopConstDecl) { this->m_writer.W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(ERRAssignmentToConst)); } EmitModuleExportAccess(sym, Js::OpCode::StModuleSlot, rhsLocation, funcInfo); return; } if (isFncDeclVar) { // async functions allow for the fncDeclVar to be in the body or parameter scope // of the parent function, so we need to calculate envIndex in lieu of the while // loop below. do { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); } while (scope != symScope); Assert(scope == symScope); scopeLocation = scope->GetLocation(); } while (!isFncDeclVar) { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); if (scope == this->globalScope) { break; } if (envIndex == -1) { Assert(funcInfo == scope->GetFunc()); scopeLocation = scope->GetLocation(); } if (scope == symScope) { break; } // Found a scope to which the property may have been added. Assert(scope && scope->GetIsDynamic()); if (!fLabelDefined) { fLabelDefined = true; doneLabel = this->m_writer.DefineLabel(); } Js::ByteCodeLabel nextLabel = this->m_writer.DefineLabel(); Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); Js::RegSlot unwrappedScopeLocation = scopeLocation; bool unwrapWithObj = scope->GetScopeType() == ScopeType_With && scriptContext->GetConfig()->IsES6UnscopablesEnabled(); if (envIndex != -1) { this->m_writer.BrEnvProperty( Js::OpCode::BrOnNoEnvProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId), envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::RegSlot instLocation = funcInfo->AcquireTmpRegister(); AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1( Js::OpCode::LdEnvObj, instLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); if (unwrapWithObj) { this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, instLocation, instLocation); } this->m_writer.PatchableProperty( Js::OpCode::StFld, rhsLocation, instLocation, funcInfo->FindOrAddInlineCacheId(instLocation, propertyId, false, true)); funcInfo->ReleaseTmpRegister(instLocation); } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { this->m_writer.BrLocalProperty(Js::OpCode::BrOnNoLocalProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Assert(!unwrapWithObj); this->m_writer.ElementP(Js::OpCode::StLocalFld, rhsLocation, funcInfo->FindOrAddInlineCacheId(scopeLocation, propertyId, false, true)); } else { this->m_writer.BrProperty(Js::OpCode::BrOnNoProperty, nextLabel, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); if (unwrapWithObj) { unwrappedScopeLocation = funcInfo->AcquireTmpRegister(); this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, unwrappedScopeLocation, scopeLocation); scopeLocation = unwrappedScopeLocation; } uint cacheId = funcInfo->FindOrAddInlineCacheId(scopeLocation, propertyId, false, true); this->m_writer.PatchableProperty(Js::OpCode::StFld, rhsLocation, scopeLocation, cacheId); if (unwrapWithObj) { funcInfo->ReleaseTmpRegister(unwrappedScopeLocation); } } this->m_writer.Br(doneLabel); this->m_writer.MarkLabel(nextLabel); } // Arrived at the scope in which the property was defined. if (!skipUseBeforeDeclarationCheck && sym && sym->GetNeedDeclaration() && scope->GetFunc() == funcInfo) { EmitUseBeforeDeclarationRuntimeError(this, Js::Constants::NoRegister); // Intentionally continue on to do normal EmitPropStore behavior so // that the bytecode ends up well-formed for the backend. This is // in contrast to EmitPropLoad and EmitPropTypeof where they both // tell EmitUseBeforeDeclarationRuntimeError to emit a LdUndef in place // of their load and then they skip emitting their own bytecode. // Potayto potahto. } if (sym == nullptr || sym->GetIsGlobal()) { Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); bool isConsoleScopeLetConst = this->IsConsoleScopeEval() && (isLetDecl || isConstDecl); if (this->flags & fscrEval) { if (funcInfo->byteCodeFunction->GetIsStrictMode() && funcInfo->IsGlobalFunction()) { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->frameDisplayRegister, propertyId, false, true); this->m_writer.ElementP(GetScopedStFldOpCode(funcInfo, isConsoleScopeLetConst), rhsLocation, cacheId); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->GetEnvRegister(), propertyId, false, true); // In "eval", store to a symbol with unknown scope goes through the closure environment. this->m_writer.ElementP(GetScopedStFldOpCode(funcInfo, isConsoleScopeLetConst), rhsLocation, cacheId); } } else if (this->flags & fscrImplicitThis) { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->GetEnvRegister(), propertyId, false, true); // In HTML event handler, store to a symbol with unknown scope goes through the closure environment. this->m_writer.ElementP(GetScopedStFldOpCode(funcInfo, isConsoleScopeLetConst), rhsLocation, cacheId); } else { this->EmitPatchableRootProperty(GetStFldOpCode(funcInfo, true, isLetDecl, isConstDecl, false, forceStrictModeForClassComputedPropertyName), rhsLocation, propertyId, false, true, funcInfo); } } else if (sym->GetIsFuncExpr()) { // Store to function expr variable. // strict mode: we need to throw type error if (funcInfo->byteCodeFunction->GetIsStrictMode()) { // Note that in this case the sym's location belongs to the parent function, so we can't use it. // It doesn't matter which register we use, as long as it's valid for this function. this->m_writer.W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_CantAssignToReadOnly)); } } else if (sym->IsInSlot(this, funcInfo) || envIndex != -1) { if (!isConstDecl && sym->GetIsConst()) { // This is a case where const reassignment can't be proven statically (e.g., eval, with) so // we have to catch it at runtime. this->m_writer.W1( Js::OpCode::RuntimeTypeError, SCODE_CODE(ERRAssignmentToConst)); } // Make sure the property has a slot. This will bump up the size of the slot array if necessary. Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); bool chkBlockVar = !isLetDecl && !isConstDecl && NeedCheckBlockVar(sym, scope, funcInfo); // The property is in memory rather than register. We'll have to load it from the slots. op = this->GetStSlotOp(scope, envIndex, scopeLocation, chkBlockVar, funcInfo); if (envIndex != -1) { this->m_writer.SlotI2(op, rhsLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var), slot + (sym->GetScope()->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex)); } else if (scopeLocation != Js::Constants::NoRegister && (scopeLocation == funcInfo->frameSlotsRegister || scopeLocation == funcInfo->frameObjRegister)) { this->m_writer.SlotI1(op, rhsLocation, slot + (sym->GetScope()->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex)); } else { Assert(scope->HasInnerScopeIndex()); this->m_writer.SlotI2(op, rhsLocation, scope->GetInnerScopeIndex(), slot + (sym->GetScope()->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex)); } if (this->ShouldTrackDebuggerMetadata() && (isLetDecl || isConstDecl)) { Js::PropertyId location = scope->GetIsObject() ? sym->GetLocation() : slot; this->UpdateDebuggerPropertyInitializationOffset(location, sym->GetPosition(), false); } } else if (isConstDecl) { this->m_writer.Reg2(Js::OpCode::InitConst, sym->GetLocation(), rhsLocation); if (this->ShouldTrackDebuggerMetadata()) { this->UpdateDebuggerPropertyInitializationOffset(sym->GetLocation(), sym->GetPosition()); } } else { if (!isConstDecl && sym->GetDecl() && sym->GetDecl()->nop == knopConstDecl) { // This is a case where const reassignment can't be proven statically (e.g., eval, with) so // we have to catch it at runtime. this->m_writer.W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(ERRAssignmentToConst)); } if (rhsLocation != sym->GetLocation()) { this->m_writer.Reg2(Js::OpCode::Ld_A, sym->GetLocation(), rhsLocation); if (this->ShouldTrackDebuggerMetadata() && isLetDecl) { this->UpdateDebuggerPropertyInitializationOffset(sym->GetLocation(), sym->GetPosition()); } } } if (fLabelDefined) { this->m_writer.MarkLabel(doneLabel); } if (scopeAcquired) { funcInfo->ReleaseTmpRegister(scopeLocation); } } Js::OpCode ByteCodeGenerator::GetLdSlotOp(Scope *scope, int envIndex, Js::RegSlot scopeLocation, FuncInfo *funcInfo) { Js::OpCode op; if (envIndex != -1) { if (scope->GetIsObject()) { op = Js::OpCode::LdEnvObjSlot; } else { op = Js::OpCode::LdEnvSlot; } } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameSlotsRegister) { if (scope->GetScopeType() == ScopeType_Parameter && scope != scope->GetFunc()->GetCurrentChildScope()) { // Symbol is from the param scope of a split scope function and we are emitting the body. // We should use the param scope's bytecode now. Assert(!funcInfo->IsBodyAndParamScopeMerged()); op = Js::OpCode::LdParamSlot; } else { op = Js::OpCode::LdLocalSlot; } } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { if (scope->GetScopeType() == ScopeType_Parameter && scope != scope->GetFunc()->GetCurrentChildScope()) { // Symbol is from the param scope of a split scope function and we are emitting the body. // We should use the param scope's bytecode now. Assert(!funcInfo->IsBodyAndParamScopeMerged()); op = Js::OpCode::LdParamObjSlot; } else { op = Js::OpCode::LdLocalObjSlot; } } else if (scope->HasInnerScopeIndex()) { if (scope->GetIsObject()) { op = Js::OpCode::LdInnerObjSlot; } else { op = Js::OpCode::LdInnerSlot; } } else { AssertOrFailFast(scope->GetIsObject()); op = Js::OpCode::LdObjSlot; } return op; } bool ByteCodeGenerator::ShouldLoadConstThis(FuncInfo* funcInfo) { #if DBG // We should load a const 'this' binding if the following holds // - The function has a 'this' name node // - We are in a global or global lambda function // - The function has no 'this' symbol (an indirect eval would have this symbol) if (funcInfo->thisConstantRegister != Js::Constants::NoRegister) { Assert((funcInfo->IsLambda() || funcInfo->IsGlobalFunction()) && !funcInfo->GetThisSymbol() && !(this->flags & fscrEval)); } #endif return funcInfo->thisConstantRegister != Js::Constants::NoRegister; } void ByteCodeGenerator::EmitPropLoadThis(Js::RegSlot lhsLocation, ParseNodeSpecialName *pnodeSpecialName, FuncInfo *funcInfo, bool chkUndecl) { Symbol* sym = pnodeSpecialName->sym; if (!sym && this->ShouldLoadConstThis(funcInfo)) { this->Writer()->Reg2(Js::OpCode::Ld_A, lhsLocation, funcInfo->thisConstantRegister); } else { this->EmitPropLoad(lhsLocation, pnodeSpecialName->sym, pnodeSpecialName->pid, funcInfo, true); if ((!sym || sym->GetNeedDeclaration()) && chkUndecl) { this->Writer()->Reg1(Js::OpCode::ChkUndecl, lhsLocation); } } } void ByteCodeGenerator::EmitPropStoreForSpecialSymbol(Js::RegSlot rhsLocation, Symbol *sym, IdentPtr pid, FuncInfo *funcInfo, bool init) { if (!funcInfo->IsGlobalFunction() || (this->flags & fscrEval)) { if (init) { EmitLocalPropInit(rhsLocation, sym, funcInfo); } else { EmitPropStore(rhsLocation, sym, pid, funcInfo, false, false, false, true); } } } void ByteCodeGenerator::EmitPropLoad(Js::RegSlot lhsLocation, Symbol *sym, IdentPtr pid, FuncInfo *funcInfo, bool skipUseBeforeDeclarationCheck) { // If sym belongs to a parent frame, get it from the closure environment. // If it belongs to this func, but there's a non-local reference, get it from the heap-allocated frame. // (TODO: optimize this by getting the sym from its normal location if there are no non-local defs.) // Otherwise, just copy the value to the lhsLocation. Js::ByteCodeLabel doneLabel = 0; bool fLabelDefined = false; Js::RegSlot scopeLocation = Js::Constants::NoRegister; Js::PropertyId envIndex = -1; Scope *scope = nullptr; Scope *symScope = sym ? sym->GetScope() : this->globalScope; Assert(symScope); if (sym && sym->GetIsModuleExportStorage()) { EmitModuleExportAccess(sym, Js::OpCode::LdModuleSlot, lhsLocation, funcInfo); return; } for (;;) { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); if (scope == this->globalScope) { break; } scopeLocation = scope->GetLocation(); if (scope == symScope) { break; } // Found a scope to which the property may have been added. Assert(scope && scope->GetIsDynamic()); if (!fLabelDefined) { fLabelDefined = true; doneLabel = this->m_writer.DefineLabel(); } Js::ByteCodeLabel nextLabel = this->m_writer.DefineLabel(); Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); Js::RegSlot unwrappedScopeLocation = Js::Constants::NoRegister; bool unwrapWithObj = scope->GetScopeType() == ScopeType_With && scriptContext->GetConfig()->IsES6UnscopablesEnabled(); if (envIndex != -1) { this->m_writer.BrEnvProperty( Js::OpCode::BrOnNoEnvProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId), envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::RegSlot instLocation = funcInfo->AcquireTmpRegister(); AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1( Js::OpCode::LdEnvObj, instLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); if (unwrapWithObj) { this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, instLocation, instLocation); } this->m_writer.PatchableProperty( Js::OpCode::LdFld, lhsLocation, instLocation, funcInfo->FindOrAddInlineCacheId(instLocation, propertyId, false, false)); funcInfo->ReleaseTmpRegister(instLocation); } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { this->m_writer.BrLocalProperty(Js::OpCode::BrOnNoLocalProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Assert(!unwrapWithObj); this->m_writer.ElementP(Js::OpCode::LdLocalFld, lhsLocation, funcInfo->FindOrAddInlineCacheId(scopeLocation, propertyId, false, false)); } else { this->m_writer.BrProperty(Js::OpCode::BrOnNoProperty, nextLabel, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); if (unwrapWithObj) { unwrappedScopeLocation = funcInfo->AcquireTmpRegister(); this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, unwrappedScopeLocation, scopeLocation); scopeLocation = unwrappedScopeLocation; } uint cacheId = funcInfo->FindOrAddInlineCacheId(scopeLocation, propertyId, false, false); this->m_writer.PatchableProperty(Js::OpCode::LdFld, lhsLocation, scopeLocation, cacheId); if (unwrapWithObj) { funcInfo->ReleaseTmpRegister(unwrappedScopeLocation); } } this->m_writer.Br(doneLabel); this->m_writer.MarkLabel(nextLabel); } // Arrived at the scope in which the property was defined. if (sym && sym->GetNeedDeclaration() && scope->GetFunc() == funcInfo && !skipUseBeforeDeclarationCheck) { // Ensure this symbol has a slot if it needs one. if (sym->IsInSlot(this, funcInfo)) { Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); funcInfo->FindOrAddSlotProfileId(scope, slot); } if (skipUseBeforeDeclarationCheck) { if (lhsLocation != Js::Constants::NoRegister) { this->m_writer.Reg1(Js::OpCode::InitUndecl, lhsLocation); } } else { EmitUseBeforeDeclarationRuntimeError(this, lhsLocation); } } else if (sym == nullptr || sym->GetIsGlobal()) { Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); if (this->flags & fscrEval) { if (funcInfo->byteCodeFunction->GetIsStrictMode() && funcInfo->IsGlobalFunction()) { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->frameDisplayRegister, propertyId, false, false); this->m_writer.ElementP(Js::OpCode::ScopedLdFld, lhsLocation, cacheId); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->GetEnvRegister(), propertyId, false, false); // Load of a symbol with unknown scope from within eval // Get it from the closure environment. this->m_writer.ElementP(Js::OpCode::ScopedLdFld, lhsLocation, cacheId); } } else if (this->flags & fscrImplicitThis) { uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->GetEnvRegister(), propertyId, false, false); // Load of a symbol with unknown scope from within event handler. // Get it from the closure environment. this->m_writer.ElementP(Js::OpCode::ScopedLdFld, lhsLocation, cacheId); } else { // Special case non-writable built-ins // TODO: support non-writable global property in general by detecting what attribute the property have current? // But can't be done if we are byte code serialized, because the attribute might be different for use fields // next time we run. May want to catch that in the JIT. Js::OpCode opcode = Js::OpCode::LdRootFld; // These properties are non-writable switch (propertyId) { case Js::PropertyIds::NaN: opcode = Js::OpCode::LdNaN; break; case Js::PropertyIds::Infinity: opcode = Js::OpCode::LdInfinity; break; case Js::PropertyIds::undefined: opcode = Js::OpCode::LdUndef; break; case Js::PropertyIds::__chakraLibrary: if (CONFIG_FLAG(LdChakraLib) || CONFIG_FLAG(TestChakraLib)) { opcode = Js::OpCode::LdChakraLib; } break; } if (opcode == Js::OpCode::LdRootFld) { this->EmitPatchableRootProperty(Js::OpCode::LdRootFld, lhsLocation, propertyId, false, false, funcInfo); } else { this->Writer()->Reg1(opcode, lhsLocation); } } } else if (sym->IsInSlot(this, funcInfo) || envIndex != -1) { // Make sure the property has a slot. This will bump up the size of the slot array if necessary. Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); Js::ProfileId profileId = funcInfo->FindOrAddSlotProfileId(scope, slot); bool chkBlockVar = NeedCheckBlockVar(sym, scope, funcInfo); Js::OpCode op; // Now get the property from its slot. op = this->GetLdSlotOp(scope, envIndex, scopeLocation, funcInfo); slot = slot + (sym->GetScope()->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex); if (envIndex != -1) { this->m_writer.SlotI2(op, lhsLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var), slot, profileId); } else if (scopeLocation != Js::Constants::NoRegister && (scopeLocation == funcInfo->frameSlotsRegister || scopeLocation == funcInfo->frameObjRegister)) { this->m_writer.SlotI1(op, lhsLocation, slot, profileId); } else if (scope->HasInnerScopeIndex()) { this->m_writer.SlotI2(op, lhsLocation, scope->GetInnerScopeIndex(), slot, profileId); } else { AssertOrFailFast(scope->GetIsObject()); this->m_writer.Slot(op, lhsLocation, scopeLocation, slot, profileId); } if (chkBlockVar) { this->m_writer.Reg1(Js::OpCode::ChkUndecl, lhsLocation); } } else { if (lhsLocation != sym->GetLocation()) { this->m_writer.Reg2(Js::OpCode::Ld_A, lhsLocation, sym->GetLocation()); } if (sym->GetIsBlockVar() && ((sym->GetDecl()->nop == knopLetDecl || sym->GetDecl()->nop == knopConstDecl) && sym->GetDecl()->AsParseNodeVar()->isSwitchStmtDecl)) { this->m_writer.Reg1(Js::OpCode::ChkUndecl, lhsLocation); } } if (fLabelDefined) { this->m_writer.MarkLabel(doneLabel); } } bool ByteCodeGenerator::NeedCheckBlockVar(Symbol* sym, Scope* scope, FuncInfo* funcInfo) const { bool tdz = sym->GetIsBlockVar() && (scope->GetFunc() != funcInfo || ((sym->GetDecl()->nop == knopLetDecl || sym->GetDecl()->nop == knopConstDecl) && sym->GetDecl()->AsParseNodeVar()->isSwitchStmtDecl)); return tdz || sym->GetIsNonSimpleParameter(); } void ByteCodeGenerator::EmitPropDelete(Js::RegSlot lhsLocation, Symbol *sym, IdentPtr pid, FuncInfo *funcInfo) { // If sym belongs to a parent frame, delete it from the closure environment. // If it belongs to this func, but there's a non-local reference, get it from the heap-allocated frame. // (TODO: optimize this by getting the sym from its normal location if there are no non-local defs.) // Otherwise, just return false. Js::ByteCodeLabel doneLabel = 0; bool fLabelDefined = false; Js::RegSlot scopeLocation = Js::Constants::NoRegister; Js::PropertyId envIndex = -1; Scope *scope = nullptr; Scope *symScope = sym ? sym->GetScope() : this->globalScope; Assert(symScope); for (;;) { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); if (scope == this->globalScope) { scopeLocation = ByteCodeGenerator::RootObjectRegister; } else if (envIndex == -1) { Assert(funcInfo == scope->GetFunc()); scopeLocation = scope->GetLocation(); } if (scope == symScope) { break; } // Found a scope to which the property may have been added. Assert(scope && scope->GetIsDynamic()); if (!fLabelDefined) { fLabelDefined = true; doneLabel = this->m_writer.DefineLabel(); } Js::ByteCodeLabel nextLabel = this->m_writer.DefineLabel(); Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); bool unwrapWithObj = scope->GetScopeType() == ScopeType_With && scriptContext->GetConfig()->IsES6UnscopablesEnabled(); if (envIndex != -1) { this->m_writer.BrEnvProperty( Js::OpCode::BrOnNoEnvProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId), envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::RegSlot instLocation = funcInfo->AcquireTmpRegister(); AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1( Js::OpCode::LdEnvObj, instLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); if (unwrapWithObj) { this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, instLocation, instLocation); } this->m_writer.Property(Js::OpCode::DeleteFld, lhsLocation, instLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); funcInfo->ReleaseTmpRegister(instLocation); } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { this->m_writer.BrLocalProperty(Js::OpCode::BrOnNoLocalProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Assert(!unwrapWithObj); this->m_writer.ElementU(Js::OpCode::DeleteLocalFld, lhsLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } else { this->m_writer.BrProperty(Js::OpCode::BrOnNoProperty, nextLabel, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Js::RegSlot unwrappedScopeLocation = Js::Constants::NoRegister; if (unwrapWithObj) { unwrappedScopeLocation = funcInfo->AcquireTmpRegister(); this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, unwrappedScopeLocation, scopeLocation); scopeLocation = unwrappedScopeLocation; } this->m_writer.Property(Js::OpCode::DeleteFld, lhsLocation, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); if (unwrapWithObj) { funcInfo->ReleaseTmpRegister(unwrappedScopeLocation); } } this->m_writer.Br(doneLabel); this->m_writer.MarkLabel(nextLabel); } // Arrived at the scope in which the property was defined. if (sym == nullptr || sym->GetIsGlobal()) { Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); if (this->flags & (fscrEval | fscrImplicitThis)) { this->m_writer.ScopedProperty(Js::OpCode::ScopedDeleteFld, lhsLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId), forceStrictModeForClassComputedPropertyName); } else { this->m_writer.Property(Js::OpCode::DeleteRootFld, lhsLocation, ByteCodeGenerator::RootObjectRegister, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } } else { // The delete will look like a non-local reference, so make sure a slot is reserved. sym->EnsureScopeSlot(this, funcInfo); this->m_writer.Reg1(Js::OpCode::LdFalse, lhsLocation); } if (fLabelDefined) { this->m_writer.MarkLabel(doneLabel); } } void ByteCodeGenerator::EmitTypeOfFld(FuncInfo * funcInfo, Js::PropertyId propertyId, Js::RegSlot value, Js::RegSlot instance, Js::OpCode ldFldOp) { uint cacheId; Js::RegSlot tmpReg = funcInfo->AcquireTmpRegister(); switch (ldFldOp) { case Js::OpCode::LdRootFldForTypeOf: cacheId = funcInfo->FindOrAddRootObjectInlineCacheId(propertyId, false, false); this->Writer()->PatchableRootProperty(ldFldOp, tmpReg, cacheId, false, false); break; case Js::OpCode::LdLocalFld: case Js::OpCode::ScopedLdFldForTypeOf: cacheId = funcInfo->FindOrAddInlineCacheId(instance, propertyId, false, false); this->Writer()->ElementP(ldFldOp, tmpReg, cacheId); break; default: cacheId = funcInfo->FindOrAddInlineCacheId(instance, propertyId, false, false); this->Writer()->PatchableProperty(ldFldOp, tmpReg, instance, cacheId); break; } this->Writer()->Reg2(Js::OpCode::Typeof, value, tmpReg); funcInfo->ReleaseTmpRegister(tmpReg); } void ByteCodeGenerator::EmitPropTypeof(Js::RegSlot lhsLocation, Symbol *sym, IdentPtr pid, FuncInfo *funcInfo) { // If sym belongs to a parent frame, delete it from the closure environment. // If it belongs to this func, but there's a non-local reference, get it from the heap-allocated frame. // (TODO: optimize this by getting the sym from its normal location if there are no non-local defs.) // Otherwise, just return false Js::ByteCodeLabel doneLabel = 0; bool fLabelDefined = false; Js::RegSlot scopeLocation = Js::Constants::NoRegister; Js::PropertyId envIndex = -1; Scope *scope = nullptr; Scope *symScope = sym ? sym->GetScope() : this->globalScope; Assert(symScope); if (sym && sym->GetIsModuleExportStorage()) { Js::RegSlot tmpLocation = funcInfo->AcquireTmpRegister(); EmitModuleExportAccess(sym, Js::OpCode::LdModuleSlot, tmpLocation, funcInfo); this->m_writer.Reg2(Js::OpCode::Typeof, lhsLocation, tmpLocation); funcInfo->ReleaseTmpRegister(tmpLocation); return; } for (;;) { scope = this->FindScopeForSym(symScope, scope, &envIndex, funcInfo); if (scope == this->globalScope) { scopeLocation = ByteCodeGenerator::RootObjectRegister; } else if (envIndex == -1) { Assert(funcInfo == scope->GetFunc()); scopeLocation = scope->GetLocation(); } if (scope == symScope) { break; } // Found a scope to which the property may have been added. Assert(scope && scope->GetIsDynamic()); if (!fLabelDefined) { fLabelDefined = true; doneLabel = this->m_writer.DefineLabel(); } Js::ByteCodeLabel nextLabel = this->m_writer.DefineLabel(); Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); bool unwrapWithObj = scope->GetScopeType() == ScopeType_With && scriptContext->GetConfig()->IsES6UnscopablesEnabled(); if (envIndex != -1) { this->m_writer.BrEnvProperty(Js::OpCode::BrOnNoEnvProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId), envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); Js::RegSlot instLocation = funcInfo->AcquireTmpRegister(); AssertOrFailFast(scope->GetIsObject()); this->m_writer.SlotI1(Js::OpCode::LdEnvObj, instLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var)); if (unwrapWithObj) { this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, instLocation, instLocation); } this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, instLocation, Js::OpCode::LdFldForTypeOf); funcInfo->ReleaseTmpRegister(instLocation); } else if (scopeLocation != Js::Constants::NoRegister && scopeLocation == funcInfo->frameObjRegister) { this->m_writer.BrLocalProperty(Js::OpCode::BrOnNoLocalProperty, nextLabel, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Assert(!unwrapWithObj); this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, scopeLocation, Js::OpCode::LdLocalFld); } else { this->m_writer.BrProperty(Js::OpCode::BrOnNoProperty, nextLabel, scopeLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); Js::RegSlot unwrappedScopeLocation = Js::Constants::NoRegister; if (unwrapWithObj) { unwrappedScopeLocation = funcInfo->AcquireTmpRegister(); this->m_writer.Reg2(Js::OpCode::UnwrapWithObj, unwrappedScopeLocation, scopeLocation); scopeLocation = unwrappedScopeLocation; } this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, scopeLocation, Js::OpCode::LdFldForTypeOf); if (unwrapWithObj) { funcInfo->ReleaseTmpRegister(unwrappedScopeLocation); } } this->m_writer.Br(doneLabel); this->m_writer.MarkLabel(nextLabel); } // Arrived at the scope in which the property was defined. if (sym && sym->GetNeedDeclaration() && scope->GetFunc() == funcInfo) { // Ensure this symbol has a slot if it needs one. if (sym->IsInSlot(this, funcInfo)) { Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); funcInfo->FindOrAddSlotProfileId(scope, slot); } EmitUseBeforeDeclarationRuntimeError(this, lhsLocation); } else if (sym == nullptr || sym->GetIsGlobal()) { Js::PropertyId propertyId = sym ? sym->EnsurePosition(this) : pid->GetPropertyId(); if (this->flags & fscrEval) { if (funcInfo->byteCodeFunction->GetIsStrictMode() && funcInfo->IsGlobalFunction()) { this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, funcInfo->frameDisplayRegister, Js::OpCode::ScopedLdFldForTypeOf); } else { this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, funcInfo->GetEnvRegister(), Js::OpCode::ScopedLdFldForTypeOf); } } else if (this->flags & fscrImplicitThis) { this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, funcInfo->GetEnvRegister(), Js::OpCode::ScopedLdFldForTypeOf); } else { this->EmitTypeOfFld(funcInfo, propertyId, lhsLocation, ByteCodeGenerator::RootObjectRegister, Js::OpCode::LdRootFldForTypeOf); } } else if (sym->IsInSlot(this, funcInfo) || envIndex != -1) { // Make sure the property has a slot. This will bump up the size of the slot array if necessary. Js::PropertyId slot = sym->EnsureScopeSlot(this, funcInfo); Js::ProfileId profileId = funcInfo->FindOrAddSlotProfileId(scope, slot); Js::RegSlot tmpLocation = funcInfo->AcquireTmpRegister(); bool chkBlockVar = NeedCheckBlockVar(sym, scope, funcInfo); Js::OpCode op; op = this->GetLdSlotOp(scope, envIndex, scopeLocation, funcInfo); slot = slot + (sym->GetScope()->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex); if (envIndex != -1) { this->m_writer.SlotI2(op, tmpLocation, envIndex + Js::FrameDisplay::GetOffsetOfScopes() / sizeof(Js::Var), slot, profileId); } else if (scopeLocation != Js::Constants::NoRegister && (scopeLocation == funcInfo->frameSlotsRegister || scopeLocation == funcInfo->frameObjRegister)) { this->m_writer.SlotI1(op, tmpLocation, slot, profileId); } else if (scope->HasInnerScopeIndex()) { this->m_writer.SlotI2(op, tmpLocation, scope->GetInnerScopeIndex(), slot, profileId); } else { AssertOrFailFast(scope->GetIsObject()); this->m_writer.Slot(op, tmpLocation, scopeLocation, slot, profileId); } if (chkBlockVar) { this->m_writer.Reg1(Js::OpCode::ChkUndecl, tmpLocation); } this->m_writer.Reg2(Js::OpCode::Typeof, lhsLocation, tmpLocation); funcInfo->ReleaseTmpRegister(tmpLocation); } else { this->m_writer.Reg2(Js::OpCode::Typeof, lhsLocation, sym->GetLocation()); } if (fLabelDefined) { this->m_writer.MarkLabel(doneLabel); } } void ByteCodeGenerator::EnsureNoRedeclarations(ParseNodeBlock *pnodeBlock, FuncInfo *funcInfo) { // Emit dynamic runtime checks for variable re-declarations. Only necessary for global functions (script or eval). // In eval only var declarations can cause redeclaration, and only in non-strict mode, because let/const variables // remain local to the eval code. Assert(pnodeBlock->nop == knopBlock); Assert(pnodeBlock->blockType == PnodeBlockType::Global || pnodeBlock->scope->GetScopeType() == ScopeType_GlobalEvalBlock); if (!(this->flags & fscrEvalCode)) { IterateBlockScopedVariables(pnodeBlock, [this](ParseNode *pnode) { FuncInfo *funcInfo = this->TopFuncInfo(); Symbol *sym = pnode->AsParseNodeVar()->sym; Assert(sym->GetIsGlobal()); Js::PropertyId propertyId = sym->EnsurePosition(this); this->m_writer.ElementRootU(Js::OpCode::EnsureNoRootFld, funcInfo->FindOrAddReferencedPropertyId(propertyId)); }); } auto emitRedeclCheck = [this](Symbol * sym, FuncInfo * funcInfo) { Js::PropertyId propertyId = sym->EnsurePosition(this); if (this->flags & fscrEval) { if (!funcInfo->byteCodeFunction->GetIsStrictMode()) { this->m_writer.ScopedProperty(Js::OpCode::ScopedEnsureNoRedeclFld, ByteCodeGenerator::RootObjectRegister, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } } else { this->m_writer.ElementRootU(Js::OpCode::EnsureNoRootRedeclFld, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } }; // scan for function declarations // these behave like "var" declarations for (ParseNodePtr pnode = pnodeBlock->pnodeScopes; pnode;) { switch (pnode->nop) { case knopFncDecl: if (pnode->AsParseNodeFnc()->IsDeclaration()) { emitRedeclCheck(pnode->AsParseNodeFnc()->pnodeName->sym, funcInfo); } pnode = pnode->AsParseNodeFnc()->pnodeNext; break; case knopBlock: pnode = pnode->AsParseNodeBlock()->pnodeNext; break; case knopCatch: pnode = pnode->AsParseNodeCatch()->pnodeNext; break; case knopWith: pnode = pnode->AsParseNodeWith()->pnodeNext; break; default: Assert(UNREACHED); } } // scan for var declarations for (ParseNode *pnode = funcInfo->root->pnodeVars; pnode; pnode = pnode->AsParseNodeVar()->pnodeNext) { Symbol* sym = pnode->AsParseNodeVar()->sym; if (sym == nullptr || pnode->AsParseNodeVar()->isBlockScopeFncDeclVar || sym->IsSpecialSymbol()) continue; if (sym->GetIsCatch() || (pnode->nop == knopVarDecl && sym->GetIsBlockVar())) { // The init node was bound to the catch object, because it's inside a catch and has the // same name as the catch object. But we want to define a user var at function scope, // so find the right symbol. (We'll still assign the RHS value to the catch object symbol.) // This also applies to a var declaration in the same scope as a let declaration. // Assert that catch cannot be at function scope and let and var at function scope is redeclaration error. Assert(sym->GetIsCatch() || funcInfo->bodyScope != sym->GetScope()); sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName()); Assert(sym && !sym->GetIsCatch() && !sym->GetIsBlockVar()); } Assert(sym->GetIsGlobal()); if (sym->GetSymbolType() == STVariable) { emitRedeclCheck(sym, funcInfo); } } } void ByteCodeGenerator::RecordAllIntConstants(FuncInfo * funcInfo) { Js::FunctionBody *byteCodeFunction = this->TopFuncInfo()->GetParsedFunctionBody(); funcInfo->constantToRegister.Map([byteCodeFunction](unsigned int val, Js::RegSlot location) { byteCodeFunction->RecordIntConstant(byteCodeFunction->MapRegSlot(location), val); }); } void ByteCodeGenerator::RecordAllStrConstants(FuncInfo * funcInfo) { Js::FunctionBody *byteCodeFunction = this->TopFuncInfo()->GetParsedFunctionBody(); funcInfo->stringToRegister.Map([byteCodeFunction](IdentPtr pid, Js::RegSlot location) { byteCodeFunction->RecordStrConstant(byteCodeFunction->MapRegSlot(location), pid->Psz(), pid->Cch(), pid->IsUsedInLdElem()); }); } void ByteCodeGenerator::RecordAllStringTemplateCallsiteConstants(FuncInfo* funcInfo) { Js::FunctionBody *byteCodeFunction = this->TopFuncInfo()->GetParsedFunctionBody(); funcInfo->stringTemplateCallsiteRegisterMap.Map([byteCodeFunction](ParseNodePtr pnode, Js::RegSlot location) { Js::ScriptContext* scriptContext = byteCodeFunction->GetScriptContext(); Js::RecyclableObject* rawArray = ByteCodeGenerator::BuildArrayFromStringList(pnode->AsParseNodeStrTemplate()->pnodeStringRawLiterals, pnode->AsParseNodeStrTemplate()->countStringLiterals, scriptContext); rawArray->Freeze(); Js::RecyclableObject* callsiteObject = ByteCodeGenerator::BuildArrayFromStringList(pnode->AsParseNodeStrTemplate()->pnodeStringLiterals, pnode->AsParseNodeStrTemplate()->countStringLiterals, scriptContext); callsiteObject->SetPropertyWithAttributes(Js::PropertyIds::raw, rawArray, PropertyNone, nullptr); callsiteObject->Freeze(); byteCodeFunction->RecordConstant(byteCodeFunction->MapRegSlot(location), callsiteObject); }); } bool IsApplyArgs(ParseNodeCall* callNode) { ParseNode* target = callNode->pnodeTarget; ParseNode* args = callNode->pnodeArgs; if ((target != nullptr) && (target->nop == knopDot)) { ParseNode* lhsNode = target->AsParseNodeBin()->pnode1; if ((lhsNode != nullptr) && ((lhsNode->nop == knopDot) || (lhsNode->nop == knopName)) && !IsArguments(lhsNode)) { ParseNode* nameNode = target->AsParseNodeBin()->pnode2; if (nameNode != nullptr) { bool nameIsApply = nameNode->AsParseNodeName()->PropertyIdFromNameNode() == Js::PropertyIds::apply; if (nameIsApply && args != nullptr && args->nop == knopList) { ParseNode* arg1 = args->AsParseNodeBin()->pnode1; ParseNode* arg2 = args->AsParseNodeBin()->pnode2; if ((arg1 != nullptr) && ByteCodeGenerator::IsThis(arg1) && (arg2 != nullptr) && (arg2->nop == knopName) && (arg2->AsParseNodeName()->sym != nullptr)) { return arg2->AsParseNodeName()->sym->IsArguments(); } } } } } return false; } void PostCheckApplyEnclosesArgs(ParseNode* pnode, ByteCodeGenerator* byteCodeGenerator, ApplyCheck* applyCheck) { if ((pnode == nullptr) || (!applyCheck->matches)) { return; } if (pnode->nop == knopCall) { if ((!pnode->isUsed) && IsApplyArgs(pnode->AsParseNodeCall())) { if (!applyCheck->insideApplyCall) { applyCheck->matches = false; } applyCheck->insideApplyCall = false; } } } void CheckApplyEnclosesArgs(ParseNode* pnode, ByteCodeGenerator* byteCodeGenerator, ApplyCheck* applyCheck) { if ((pnode == nullptr) || (!applyCheck->matches)) { return; } switch (pnode->nop) { case knopName: { Symbol* sym = pnode->AsParseNodeName()->sym; if (sym != nullptr) { if (sym->IsArguments()) { if (!applyCheck->insideApplyCall) { applyCheck->matches = false; } } } break; } case knopCall: if ((!pnode->isUsed) && IsApplyArgs(pnode->AsParseNodeCall())) { // no nested apply calls if (applyCheck->insideApplyCall) { applyCheck->matches = false; } else { applyCheck->insideApplyCall = true; applyCheck->sawApply = true; pnode->AsParseNodeCall()->isApplyCall = true; } } break; } } unsigned int CountArguments(ParseNode *pnode, BOOL *pSideEffect = nullptr) { // If the caller passed us a pSideEffect, it wants to know whether there are potential // side-effects in the argument list. We need to know this so that the call target // operands can be preserved if necessary. // For now, treat any non-leaf op as a potential side-effect. This causes no detectable slowdowns, // but we can be more precise if we need to be. if (pSideEffect) { *pSideEffect = FALSE; } unsigned int argCount = 1; if (pnode != nullptr) { while (pnode->nop == knopList) { argCount++; if (pSideEffect && !(ParseNode::Grfnop(pnode->AsParseNodeBin()->pnode1->nop) & fnopLeaf)) { *pSideEffect = TRUE; } pnode = pnode->AsParseNodeBin()->pnode2; } argCount++; if (pSideEffect && !(ParseNode::Grfnop(pnode->nop) & fnopLeaf)) { *pSideEffect = TRUE; } } AssertOrFailFastMsg(argCount < Js::Constants::UShortMaxValue, "Number of allowed arguments are already capped at parser level"); return argCount; } void SaveOpndValue(ParseNode *pnode, FuncInfo *funcInfo) { // Save a local name to a register other than its home location. // This guards against side-effects in cases like x.foo(x = bar()). Symbol *sym = nullptr; if (pnode->nop == knopName) { sym = pnode->AsParseNodeName()->sym; } else if (pnode->nop == knopComputedName) { ParseNode *pnode1 = pnode->AsParseNodeUni()->pnode1; if (pnode1->nop == knopName) { sym = pnode1->AsParseNodeName()->sym; } } if (sym == nullptr) { return; } // If the target is a local being kept in its home location, // protect the target's value in the event the home location is overwritten. if (pnode->location != Js::Constants::NoRegister && sym->GetScope()->GetFunc() == funcInfo && pnode->location == sym->GetLocation()) { pnode->location = funcInfo->AcquireTmpRegister(); } } void ByteCodeGenerator::StartStatement(ParseNode* node) { Assert(TopFuncInfo() != nullptr); m_writer.StartStatement(node, TopFuncInfo()->curTmpReg - TopFuncInfo()->firstTmpReg); } void ByteCodeGenerator::EndStatement(ParseNode* node) { m_writer.EndStatement(node); } void ByteCodeGenerator::StartSubexpression(ParseNode* node) { Assert(TopFuncInfo() != nullptr); m_writer.StartSubexpression(node); } void ByteCodeGenerator::EndSubexpression(ParseNode* node) { m_writer.EndSubexpression(node); } void EmitReference(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // Generate code for the LHS of an assignment. switch (pnode->nop) { case knopDot: Emit(pnode->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); break; case knopIndex: Emit(pnode->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); Emit(pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo, false); break; case knopName: break; case knopArrayPattern: case knopObjectPattern: break; case knopCall: case knopNew: // Emit the operands of a call that will be used as a LHS. // These have to be emitted before the RHS, but they have to persist until // the end of the expression. // Emit the call target operands first. switch (pnode->AsParseNodeCall()->pnodeTarget->nop) { case knopDot: case knopIndex: funcInfo->AcquireLoc(pnode->AsParseNodeCall()->pnodeTarget); EmitReference(pnode->AsParseNodeCall()->pnodeTarget, byteCodeGenerator, funcInfo); break; case knopName: { Symbol *sym = pnode->AsParseNodeCall()->pnodeTarget->AsParseNodeName()->sym; if (!sym || sym->GetLocation() == Js::Constants::NoRegister) { funcInfo->AcquireLoc(pnode->AsParseNodeCall()->pnodeTarget); } if (sym && (sym->IsInSlot(byteCodeGenerator, funcInfo) || sym->GetScope()->GetFunc() != funcInfo)) { // Can't get the value from the assigned register, so load it here. EmitLoad(pnode->AsParseNodeCall()->pnodeTarget, byteCodeGenerator, funcInfo); } else { // EmitLoad will check for needsDeclaration and emit the Use Before Declaration error // bytecode op as necessary, but EmitReference does not check this (by design). So we // must manually check here. EmitUseBeforeDeclaration(pnode->AsParseNodeCall()->pnodeTarget->AsParseNodeName()->sym, byteCodeGenerator, funcInfo); EmitReference(pnode->AsParseNodeCall()->pnodeTarget, byteCodeGenerator, funcInfo); } break; } default: EmitLoad(pnode->AsParseNodeCall()->pnodeTarget, byteCodeGenerator, funcInfo); break; } // Now the arg list. We evaluate everything now and emit the ArgOut's later. if (pnode->AsParseNodeCall()->pnodeArgs) { ParseNode *pnodeArg = pnode->AsParseNodeCall()->pnodeArgs; while (pnodeArg->nop == knopList) { Emit(pnodeArg->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); pnodeArg = pnodeArg->AsParseNodeBin()->pnode2; } Emit(pnodeArg, byteCodeGenerator, funcInfo, false); } if (pnode->AsParseNodeCall()->isSuperCall) { Emit(pnode->AsParseNodeSuperCall()->pnodeThis, byteCodeGenerator, funcInfo, false); Emit(pnode->AsParseNodeSuperCall()->pnodeNewTarget, byteCodeGenerator, funcInfo, false); } break; default: Emit(pnode, byteCodeGenerator, funcInfo, false); break; } } void EmitGetIterator(Js::RegSlot iteratorLocation, Js::RegSlot iterableLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitIteratorNext(Js::RegSlot itemLocation, Js::RegSlot iteratorLocation, Js::RegSlot nextInputLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitIteratorClose(Js::RegSlot iteratorLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitIteratorComplete(Js::RegSlot doneLocation, Js::RegSlot iteratorResultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitIteratorValue(Js::RegSlot valueLocation, Js::RegSlot iteratorResultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo); void EmitDestructuredElement(ParseNode *elem, Js::RegSlot sourceLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo *funcInfo) { switch (elem->nop) { case knopVarDecl: case knopLetDecl: case knopConstDecl: // We manually need to set NeedDeclaration since the node won't be visited. elem->AsParseNodeVar()->sym->SetNeedDeclaration(false); break; default: EmitReference(elem, byteCodeGenerator, funcInfo); } EmitAssignment(nullptr, elem, sourceLocation, byteCodeGenerator, funcInfo); funcInfo->ReleaseReference(elem); } void EmitDestructuredRestArray(ParseNode *elem, Js::RegSlot iteratorLocation, Js::RegSlot shouldCallReturnFunctionLocation, Js::RegSlot shouldCallReturnFunctionLocationFinally, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Js::RegSlot restArrayLocation = funcInfo->AcquireTmpRegister(); bool isAssignmentTarget = !(elem->AsParseNodeUni()->pnode1->IsPattern() || elem->AsParseNodeUni()->pnode1->IsVarLetOrConst()); if (isAssignmentTarget) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocationFinally); EmitReference(elem->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); } byteCodeGenerator->Writer()->Reg1Unsigned1( Js::OpCode::NewScArray, restArrayLocation, ByteCodeGenerator::DefaultArraySize); // BytecodeGen can't convey to IRBuilder that some of the temporaries used here are live. When we // have a rest parameter, a counter is used in a loop for the array index, but there is no way to // convey this is live on the back edge. // As a workaround, we have a persistent var reg that is used for the loop counter Js::RegSlot counterLocation = elem->location; // TODO[ianhall]: Is calling EnregisterConstant() during Emit phase allowed? Js::RegSlot zeroConstantReg = byteCodeGenerator->EnregisterConstant(0); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, counterLocation, zeroConstantReg); // loopTop: Js::ByteCodeLabel loopTop = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->MarkLabel(loopTop); Js::RegSlot itemLocation = funcInfo->AcquireTmpRegister(); EmitIteratorNext(itemLocation, iteratorLocation, Js::Constants::NoRegister, byteCodeGenerator, funcInfo); Js::RegSlot doneLocation = funcInfo->AcquireTmpRegister(); EmitIteratorComplete(doneLocation, itemLocation, byteCodeGenerator, funcInfo); Js::ByteCodeLabel iteratorDone = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, iteratorDone, doneLocation); Js::RegSlot valueLocation = funcInfo->AcquireTmpRegister(); EmitIteratorValue(valueLocation, itemLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocationFinally); byteCodeGenerator->Writer()->Element( ByteCodeGenerator::GetStElemIOpCode(funcInfo), valueLocation, restArrayLocation, counterLocation); funcInfo->ReleaseTmpRegister(valueLocation); funcInfo->ReleaseTmpRegister(doneLocation); funcInfo->ReleaseTmpRegister(itemLocation); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Incr_A, counterLocation, counterLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); byteCodeGenerator->Writer()->Br(loopTop); // iteratorDone: byteCodeGenerator->Writer()->MarkLabel(iteratorDone); ParseNode *restElem = elem->AsParseNodeUni()->pnode1; if (isAssignmentTarget) { EmitAssignment(nullptr, restElem, restArrayLocation, byteCodeGenerator, funcInfo); funcInfo->ReleaseReference(restElem); } else { EmitDestructuredElement(restElem, restArrayLocation, byteCodeGenerator, funcInfo); } funcInfo->ReleaseTmpRegister(restArrayLocation); } void EmitDestructuredArray( ParseNode *lhs, Js::RegSlot rhsLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo); void EmitIteratorCloseIfNotDone(Js::RegSlot iteratorLocation, Js::RegSlot doneLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { Js::ByteCodeLabel skipCloseLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, skipCloseLabel, doneLocation); EmitIteratorClose(iteratorLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->MarkLabel(skipCloseLabel); } /* EmitDestructuredArray(lhsArray, rhs): iterator = rhs[@@iterator] if lhsArray empty return for each element in lhsArray except rest value = iterator.next() if element is a nested destructured array EmitDestructuredArray(element, value) else if value is undefined and there is an initializer evaluate initializer evaluate element reference element = initializer else element = value if lhsArray has a rest element rest = [] while iterator is not done value = iterator.next() rest.append(value) */ void EmitDestructuredArrayCore( ParseNode *list, Js::RegSlot iteratorLocation, Js::RegSlot shouldCallReturnFunctionLocation, Js::RegSlot shouldCallReturnFunctionLocationFinally, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo ) { Assert(list != nullptr); ParseNode *elem = nullptr; while (list != nullptr) { ParseNode *init = nullptr; if (list->nop == knopList) { elem = list->AsParseNodeBin()->pnode1; } else { elem = list; } if (elem->nop == knopEllipsis) { break; } switch (elem->nop) { case knopAsg: // An assignment node will always have an initializer init = elem->AsParseNodeBin()->pnode2; elem = elem->AsParseNodeBin()->pnode1; break; case knopVarDecl: case knopLetDecl: case knopConstDecl: init = elem->AsParseNodeVar()->pnodeInit; break; default: break; } byteCodeGenerator->StartStatement(elem); bool isAssignmentTarget = !(elem->IsPattern() || elem->IsVarLetOrConst()); if (isAssignmentTarget) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocationFinally); EmitReference(elem, byteCodeGenerator, funcInfo); } byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); Js::RegSlot itemLocation = funcInfo->AcquireTmpRegister(); EmitIteratorNext(itemLocation, iteratorLocation, Js::Constants::NoRegister, byteCodeGenerator, funcInfo); Js::RegSlot doneLocation = funcInfo->AcquireTmpRegister(); EmitIteratorComplete(doneLocation, itemLocation, byteCodeGenerator, funcInfo); if (elem->nop == knopEmpty) { if (list->nop == knopList) { list = list->AsParseNodeBin()->pnode2; funcInfo->ReleaseTmpRegister(doneLocation); funcInfo->ReleaseTmpRegister(itemLocation); continue; } else { Assert(list->nop == knopEmpty); EmitIteratorCloseIfNotDone(iteratorLocation, doneLocation, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(doneLocation); funcInfo->ReleaseTmpRegister(itemLocation); break; } } // If the iterator hasn't completed, skip assigning undefined. Js::ByteCodeLabel iteratorAlreadyDone = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, iteratorAlreadyDone, doneLocation); // We're not done with the iterator, so assign the .next() value. Js::RegSlot valueLocation = funcInfo->AcquireTmpRegister(); EmitIteratorValue(valueLocation, itemLocation, byteCodeGenerator, funcInfo); Js::ByteCodeLabel beforeDefaultAssign = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocationFinally); byteCodeGenerator->Writer()->Br(beforeDefaultAssign); // iteratorAlreadyDone: byteCodeGenerator->Writer()->MarkLabel(iteratorAlreadyDone); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, valueLocation, funcInfo->undefinedConstantRegister); // beforeDefaultAssign: byteCodeGenerator->Writer()->MarkLabel(beforeDefaultAssign); if (elem->IsPattern()) { // If we get an undefined value and have an initializer, use it in place of undefined. if (init != nullptr) { /* the IR builder uses two symbols for a temp register in the if else path R9 <- R3 if (...) R9 <- R2 R10 = R9.<property> // error -> IR creates a new lifetime for the if path, and the direct path dest is not referenced hence we have to create a new temp TEMP REG USED TO FIX THIS PRODUCES THIS R9 <- R3 if (BrEq_A R9, R3) R10 <- R2 : else R10 <- R9 : skipdefault ... = R10[@@iterator] : loadIter */ // Temp Register Js::RegSlot valueLocationTmp = funcInfo->AcquireTmpRegister(); byteCodeGenerator->StartStatement(init); Js::ByteCodeLabel skipDefault = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel loadIter = byteCodeGenerator->Writer()->DefineLabel(); // check value is undefined byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrSrNeq_A, skipDefault, valueLocation, funcInfo->undefinedConstantRegister); // Evaluate the default expression and assign it. Emit(init, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, valueLocationTmp, init->location); funcInfo->ReleaseLoc(init); // jmp to loadIter byteCodeGenerator->Writer()->Br(loadIter); // skipDefault: byteCodeGenerator->Writer()->MarkLabel(skipDefault); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, valueLocationTmp, valueLocation); // loadIter: // @@iterator byteCodeGenerator->Writer()->MarkLabel(loadIter); byteCodeGenerator->EndStatement(init); if (elem->nop == knopObjectPattern) { EmitDestructuredObject(elem, valueLocationTmp, byteCodeGenerator, funcInfo); } else { // Recursively emit a destructured array using the current .next() as the RHS. EmitDestructuredArray(elem, valueLocationTmp, byteCodeGenerator, funcInfo); } funcInfo->ReleaseTmpRegister(valueLocationTmp); } else { if (elem->nop == knopObjectPattern) { EmitDestructuredObject(elem, valueLocation, byteCodeGenerator, funcInfo); } else { // Recursively emit a destructured array using the current .next() as the RHS. EmitDestructuredArray(elem, valueLocation, byteCodeGenerator, funcInfo); } } } else { EmitDestructuredValueOrInitializer(elem, valueLocation, init, isAssignmentTarget, byteCodeGenerator, funcInfo); } byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); if (list->nop != knopList) { EmitIteratorCloseIfNotDone(iteratorLocation, doneLocation, byteCodeGenerator, funcInfo); } funcInfo->ReleaseTmpRegister(valueLocation); funcInfo->ReleaseTmpRegister(doneLocation); funcInfo->ReleaseTmpRegister(itemLocation); if (isAssignmentTarget) { funcInfo->ReleaseReference(elem); } byteCodeGenerator->EndStatement(elem); if (list->nop == knopList) { list = list->AsParseNodeBin()->pnode2; } else { break; } } // If we saw a rest element, emit the rest array. if (elem != nullptr && elem->nop == knopEllipsis) { EmitDestructuredRestArray(elem, iteratorLocation, shouldCallReturnFunctionLocation, shouldCallReturnFunctionLocationFinally, byteCodeGenerator, funcInfo); } } // Generating // try { // CallIteratorClose // } catch (e) { // do nothing // } void EmitTryCatchAroundClose( Js::RegSlot iteratorLocation, Js::ByteCodeLabel endLabel, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Js::ByteCodeLabel catchLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->Br(Js::OpCode::TryCatch, catchLabel); // // There is no need to add TryScopeRecord here as we are going to call 'return' function and there is not yield expression here. EmitIteratorClose(iteratorLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->Br(endLabel); byteCodeGenerator->Writer()->MarkLabel(catchLabel); Js::RegSlot catchParamLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::Catch, catchParamLocation); funcInfo->ReleaseTmpRegister(catchParamLocation); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); } struct ByteCodeGenerator::TryScopeRecord : public JsUtil::DoublyLinkedListElement<TryScopeRecord> { Js::OpCode op; Js::ByteCodeLabel label; Js::RegSlot reg1; Js::RegSlot reg2; TryScopeRecord(Js::OpCode op, Js::ByteCodeLabel label) : op(op), label(label), reg1(Js::Constants::NoRegister), reg2(Js::Constants::NoRegister) { } TryScopeRecord(Js::OpCode op, Js::ByteCodeLabel label, Js::RegSlot r1, Js::RegSlot r2) : op(op), label(label), reg1(r1), reg2(r2) { } }; // Generating // catch(e) { // if (shouldCallReturn) // CallReturnWhichWrappedByTryCatch // throw e; // } void EmitTopLevelCatch(Js::ByteCodeLabel catchLabel, Js::RegSlot iteratorLocation, Js::RegSlot shouldCallReturnLocation, Js::RegSlot shouldCallReturnLocationFinally, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Js::ByteCodeLabel afterCatchBlockLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->Br(afterCatchBlockLabel); byteCodeGenerator->Writer()->MarkLabel(catchLabel); Js::RegSlot catchParamLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::Catch, catchParamLocation); ByteCodeGenerator::TryScopeRecord tryRecForCatch(Js::OpCode::ResumeCatch, catchLabel); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForCatch); } Js::ByteCodeLabel skipCallCloseLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, skipCallCloseLabel, shouldCallReturnLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnLocationFinally); EmitTryCatchAroundClose(iteratorLocation, skipCallCloseLabel, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->MarkLabel(skipCallCloseLabel); // Rethrow the exception. byteCodeGenerator->Writer()->Reg1(Js::OpCode::Throw, catchParamLocation); funcInfo->ReleaseTmpRegister(catchParamLocation); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->MarkLabel(afterCatchBlockLabel); } // Generating // finally { // if (shouldCallReturn) // CallReturn // } void EmitTopLevelFinally(Js::ByteCodeLabel finallyLabel, Js::RegSlot iteratorLocation, Js::RegSlot shouldCallReturnLocation, Js::RegSlot yieldExceptionLocation, Js::RegSlot yieldOffsetLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { bool isCoroutine = funcInfo->byteCodeFunction->IsCoroutine(); Js::ByteCodeLabel afterFinallyBlockLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(false); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); byteCodeGenerator->Writer()->Br(afterFinallyBlockLabel); byteCodeGenerator->Writer()->MarkLabel(finallyLabel); byteCodeGenerator->Writer()->Empty(Js::OpCode::Finally); ByteCodeGenerator::TryScopeRecord tryRecForFinally(Js::OpCode::ResumeFinally, finallyLabel, yieldExceptionLocation, yieldOffsetLocation); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForFinally); } Js::ByteCodeLabel skipCallCloseLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, skipCallCloseLabel, shouldCallReturnLocation); EmitIteratorClose(iteratorLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->MarkLabel(skipCallCloseLabel); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); funcInfo->ReleaseTmpRegister(yieldOffsetLocation); funcInfo->ReleaseTmpRegister(yieldExceptionLocation); } byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(false); byteCodeGenerator->Writer()->Empty(Js::OpCode::LeaveNull); byteCodeGenerator->Writer()->MarkLabel(afterFinallyBlockLabel); } void EmitCatchAndFinallyBlocks(Js::ByteCodeLabel catchLabel, Js::ByteCodeLabel finallyLabel, Js::RegSlot iteratorLocation, Js::RegSlot shouldCallReturnFunctionLocation, Js::RegSlot shouldCallReturnFunctionLocationFinally, Js::RegSlot yieldExceptionLocation, Js::RegSlot yieldOffsetLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo ) { bool isCoroutine = funcInfo->byteCodeFunction->IsCoroutine(); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } EmitTopLevelCatch(catchLabel, iteratorLocation, shouldCallReturnFunctionLocation, shouldCallReturnFunctionLocationFinally, byteCodeGenerator, funcInfo); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } EmitTopLevelFinally(finallyLabel, iteratorLocation, shouldCallReturnFunctionLocationFinally, yieldExceptionLocation, yieldOffsetLocation, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(shouldCallReturnFunctionLocationFinally); funcInfo->ReleaseTmpRegister(shouldCallReturnFunctionLocation); } // Emit a wrapper try..finaly block around the destructuring elements void EmitDestructuredArray( ParseNode *lhs, Js::RegSlot rhsLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { byteCodeGenerator->StartStatement(lhs); Js::RegSlot iteratorLocation = funcInfo->AcquireTmpRegister(); EmitGetIterator(iteratorLocation, rhsLocation, byteCodeGenerator, funcInfo); Assert(lhs->nop == knopArrayPattern); ParseNode *list = lhs->AsParseNodeArrLit()->pnode1; if (list == nullptr) { // Handline this case ([] = obj); EmitIteratorClose(iteratorLocation, byteCodeGenerator, funcInfo); // No elements to bind or assign. funcInfo->ReleaseTmpRegister(iteratorLocation); byteCodeGenerator->EndStatement(lhs); return; } // This variable facilitates on when to call the return function (which is Iterator close). When we are emitting bytecode for destructuring element // this variable will be set to true. Js::RegSlot shouldCallReturnFunctionLocation = funcInfo->AcquireTmpRegister(); Js::RegSlot shouldCallReturnFunctionLocationFinally = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); byteCodeGenerator->SetHasFinally(true); byteCodeGenerator->SetHasTry(true); byteCodeGenerator->TopFuncInfo()->byteCodeFunction->SetDontInline(true); Js::RegSlot regException = Js::Constants::NoRegister; Js::RegSlot regOffset = Js::Constants::NoRegister; bool isCoroutine = funcInfo->byteCodeFunction->IsCoroutine(); if (isCoroutine) { regException = funcInfo->AcquireTmpRegister(); regOffset = funcInfo->AcquireTmpRegister(); } // Insert try node here Js::ByteCodeLabel finallyLabel = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel catchLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); ByteCodeGenerator::TryScopeRecord tryRecForTryFinally(Js::OpCode::TryFinallyWithYield, finallyLabel); if (isCoroutine) { byteCodeGenerator->Writer()->BrReg2(Js::OpCode::TryFinallyWithYield, finallyLabel, regException, regOffset); tryRecForTryFinally.reg1 = regException; tryRecForTryFinally.reg2 = regOffset; byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTryFinally); } else { byteCodeGenerator->Writer()->Br(Js::OpCode::TryFinally, finallyLabel); } byteCodeGenerator->Writer()->Br(Js::OpCode::TryCatch, catchLabel); ByteCodeGenerator::TryScopeRecord tryRecForTry(Js::OpCode::TryCatch, catchLabel); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTry); } EmitDestructuredArrayCore(list, iteratorLocation, shouldCallReturnFunctionLocation, shouldCallReturnFunctionLocationFinally, byteCodeGenerator, funcInfo); EmitCatchAndFinallyBlocks(catchLabel, finallyLabel, iteratorLocation, shouldCallReturnFunctionLocation, shouldCallReturnFunctionLocationFinally, regException, regOffset, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(iteratorLocation); byteCodeGenerator->EndStatement(lhs); } void EmitNameInvoke(Js::RegSlot lhsLocation, Js::RegSlot objectLocation, Js::RegSlot computedPropIdArrLocation, uint32 *computedIndex, bool hasRest, ParseNodePtr nameNode, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { Assert(nameNode != nullptr); if (nameNode->nop == knopComputedName) { ParseNodePtr pnode1 = nameNode->AsParseNodeUni()->pnode1; Emit(pnode1, byteCodeGenerator, funcInfo, false/*isConstructorCall*/); byteCodeGenerator->Writer()->Element(Js::OpCode::LdElemI_A, lhsLocation, objectLocation, pnode1->location); if (hasRest) { byteCodeGenerator->Writer()->Slot(Js::OpCode::StPropIdArrFromVar, pnode1->location, computedPropIdArrLocation, *computedIndex); (*computedIndex)++; } funcInfo->ReleaseLoc(pnode1); } else { Assert(nameNode->nop == knopStr); Js::PropertyId propertyId = nameNode->AsParseNodeStr()->pid->GetPropertyId(); uint cacheId = funcInfo->FindOrAddInlineCacheId(objectLocation, propertyId, false/*isLoadMethod*/, false/*isStore*/); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, lhsLocation, objectLocation, cacheId); } } void EmitDestructuredValueOrInitializer(ParseNodePtr lhsElementNode, Js::RegSlot rhsLocation, ParseNodePtr initializer, bool isNonPatternAssignmentTarget, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // If we have initializer we need to see if the destructured value is undefined or not - if it is undefined we need to assign initializer Js::ByteCodeLabel useDefault = -1; Js::ByteCodeLabel end = -1; Js::RegSlot rhsLocationTmp = rhsLocation; if (initializer != nullptr) { rhsLocationTmp = funcInfo->AcquireTmpRegister(); useDefault = byteCodeGenerator->Writer()->DefineLabel(); end = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrSrEq_A, useDefault, rhsLocation, funcInfo->undefinedConstantRegister); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, rhsLocationTmp, rhsLocation); byteCodeGenerator->Writer()->Br(end); byteCodeGenerator->Writer()->MarkLabel(useDefault); Emit(initializer, byteCodeGenerator, funcInfo, false/*isConstructorCall*/); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, rhsLocationTmp, initializer->location); funcInfo->ReleaseLoc(initializer); byteCodeGenerator->Writer()->MarkLabel(end); } if (lhsElementNode->nop == knopArrayPattern) { EmitDestructuredArray(lhsElementNode, rhsLocationTmp, byteCodeGenerator, funcInfo); } else if (lhsElementNode->nop == knopObjectPattern) { EmitDestructuredObject(lhsElementNode, rhsLocationTmp, byteCodeGenerator, funcInfo); } else if (isNonPatternAssignmentTarget) { EmitAssignment(nullptr, lhsElementNode, rhsLocationTmp, byteCodeGenerator, funcInfo); } else { EmitDestructuredElement(lhsElementNode, rhsLocationTmp, byteCodeGenerator, funcInfo); } if (initializer != nullptr) { funcInfo->ReleaseTmpRegister(rhsLocationTmp); } } void EmitDestructuredObjectMember(ParseNodePtr memberNode, Js::RegSlot rhsLocation, Js::RegSlot propIdArrLocation, Js::RegSlot computedPropIdArrLocation, uint32 *computedIndex, bool hasRest, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Assert(memberNode->nop == knopObjectPatternMember || memberNode->nop == knopEllipsis); Js::RegSlot nameLocation = funcInfo->AcquireTmpRegister(); ParseNodePtr lhsElementNode = nullptr; if (memberNode->nop == knopObjectPatternMember) { EmitNameInvoke(nameLocation, rhsLocation, computedPropIdArrLocation, computedIndex, hasRest, memberNode->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo); // Imagine we are transforming // {x:x1} = {} to x1 = {}.x (here x1 is the second node of the member but that is our lhsnode) lhsElementNode = memberNode->AsParseNodeBin()->pnode2; } else { // memberNode->nop == knopEllipsis, aka we are performing Rest operation byteCodeGenerator->Writer()->Reg1(Js::OpCode::NewScObjectSimple, nameLocation); byteCodeGenerator->Writer()->Reg4(Js::OpCode::Restify, rhsLocation, nameLocation, propIdArrLocation, computedPropIdArrLocation); lhsElementNode = memberNode->AsParseNodeUni()->pnode1; } ParseNodePtr init = nullptr; if (lhsElementNode->IsVarLetOrConst()) { init = lhsElementNode->AsParseNodeVar()->pnodeInit; } else if (lhsElementNode->nop == knopAsg) { init = lhsElementNode->AsParseNodeBin()->pnode2; lhsElementNode = lhsElementNode->AsParseNodeBin()->pnode1; } EmitDestructuredValueOrInitializer(lhsElementNode, nameLocation, init, false /*isNonPatternAssignmentTarget*/, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(nameLocation); } void EmitObjectPropertyIdsToArray(ByteCodeGenerator *byteCodeGenerator, Js::PropertyId *ids, ParseNodePtr memberNodes, uint32 staticCount, bool *hasComputedProps) { uint32 index = 0; Parser::ForEachItemInList(memberNodes, [&](ParseNodePtr current) { if (current->nop != knopEllipsis) { ParseNodePtr nameNode = current->AsParseNodeBin()->pnode1; Assert(nameNode != nullptr); Assert(nameNode->nop == knopComputedName || nameNode->nop == knopStr); if (nameNode->nop == knopStr) { if (index >= staticCount) { Js::Throw::InternalError(); return; } ids[index] = nameNode->AsParseNodeStr()->pid->GetPropertyId(); index++; } else { *hasComputedProps = true; } } }); } void EmitDestructuredObject(ParseNode *lhs, Js::RegSlot rhsLocationOrig, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Assert(lhs->nop == knopObjectPattern); ParseNodeObjLit *pnodeObjLit = lhs->AsParseNodeObjLit(); ParseNodePtr pnode1 = pnodeObjLit->pnode1; uint32 staticCount = pnodeObjLit->staticCount; uint32 computedCount = pnodeObjLit->computedCount; bool hasRest = pnodeObjLit->hasRest; bool hasComputedProps = false; byteCodeGenerator->StartStatement(lhs); Js::ByteCodeLabel skipThrow = byteCodeGenerator->Writer()->DefineLabel(); Js::RegSlot rhsLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, rhsLocation, rhsLocationOrig); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrNeq_A, skipThrow, rhsLocation, funcInfo->undefinedConstantRegister); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_ObjectCoercible)); byteCodeGenerator->Writer()->MarkLabel(skipThrow); if (pnode1 != nullptr) { Js::RegSlot propIdArrLocation = Js::Constants::NoRegister; Js::RegSlot computedPropIdArrLocation = Js::Constants::NoRegister; if (hasRest) { uint extraAlloc = UInt32Math::Mul(staticCount, sizeof(Js::PropertyId)); uint auxSize = UInt32Math::Add(sizeof(Js::PropertyIdArray), extraAlloc); Js::PropertyIdArray *propIds = AnewPlus(byteCodeGenerator->GetAllocator(), extraAlloc, Js::PropertyIdArray, staticCount, 0); Assert(pnode1->nop == knopList || pnode1->nop == knopObjectPatternMember || pnode1->nop == knopEllipsis); EmitObjectPropertyIdsToArray(byteCodeGenerator, propIds->elements, pnode1, staticCount, &hasComputedProps); // Load static PropertyIdArray here propIdArrLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Auxiliary(Js::OpCode::LdPropIds, propIdArrLocation, propIds, auxSize, staticCount); if (hasComputedProps) { computedPropIdArrLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1Unsigned1(Js::OpCode::NewPropIdArrForCompProps, computedPropIdArrLocation, computedCount); } else { computedPropIdArrLocation = propIdArrLocation; } } uint32 index = 0; Parser::ForEachItemInList(pnode1, [&](ParseNodePtr memberNode) { EmitDestructuredObjectMember(memberNode, rhsLocation, propIdArrLocation, computedPropIdArrLocation, &index, hasRest, byteCodeGenerator, funcInfo); }); if (hasRest) { if (hasComputedProps) { funcInfo->ReleaseTmpRegister(computedPropIdArrLocation); } funcInfo->ReleaseTmpRegister(propIdArrLocation); } } funcInfo->ReleaseTmpRegister(rhsLocation); byteCodeGenerator->EndStatement(lhs); } void EmitAssignment( ParseNode *asgnNode, ParseNode *lhs, Js::RegSlot rhsLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { switch (lhs->nop) { // assignment to a local or global variable case knopVarDecl: case knopLetDecl: case knopConstDecl: { Symbol *sym = lhs->AsParseNodeVar()->sym; Assert(sym != nullptr); byteCodeGenerator->EmitPropStore(rhsLocation, sym, nullptr, funcInfo, lhs->nop == knopLetDecl, lhs->nop == knopConstDecl); break; } case knopName: { // Special names like 'this' or 'new.target' cannot be assigned to ParseNodeName * pnodeNameLhs = lhs->AsParseNodeName(); if (pnodeNameLhs->IsSpecialName()) { byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_CantAssignTo)); } else { byteCodeGenerator->EmitPropStore(rhsLocation, pnodeNameLhs->sym, pnodeNameLhs->pid, funcInfo); } break; } // x.y = case knopDot: { // PutValue(x, "y", rhs) Js::PropertyId propertyId = lhs->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); if (ByteCodeGenerator::IsSuper(lhs->AsParseNodeBin()->pnode1)) { Emit(lhs->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); Js::RegSlot tmpReg = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, lhs->AsParseNodeBin()->pnode1->location, funcInfo); funcInfo->ReleaseLoc(lhs->AsParseNodeSuperReference()->pnodeThis); uint cacheId = funcInfo->FindOrAddInlineCacheId(tmpReg, propertyId, false, true); byteCodeGenerator->Writer()->PatchablePropertyWithThisPtr(Js::OpCode::StSuperFld, rhsLocation, tmpReg, lhs->AsParseNodeSuperReference()->pnodeThis->location, cacheId); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(lhs->AsParseNodeBin()->pnode1->location, propertyId, false, true); byteCodeGenerator->Writer()->PatchableProperty( ByteCodeGenerator::GetStFldOpCode(funcInfo, false, false, false, false, byteCodeGenerator->forceStrictModeForClassComputedPropertyName), rhsLocation, lhs->AsParseNodeBin()->pnode1->location, cacheId); } break; } case knopIndex: { Js::RegSlot targetLocation = lhs->AsParseNodeBin()->pnode1->location; if (ByteCodeGenerator::IsSuper(lhs->AsParseNodeBin()->pnode1)) { // We need to emit the 'this' node for the super reference even if we aren't planning to use the 'this' value. // This is because we might be in a derived class constructor where we haven't yet called super() to bind the 'this' value. // See ecma262 abstract operation 'MakeSuperPropertyReference' Emit(lhs->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(lhs->AsParseNodeSuperReference()->pnodeThis); targetLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, targetLocation, funcInfo); } byteCodeGenerator->Writer()->Element( ByteCodeGenerator::GetStElemIOpCode(funcInfo), rhsLocation, targetLocation, lhs->AsParseNodeBin()->pnode2->location); break; } case knopObjectPattern: { Assert(byteCodeGenerator->IsES6DestructuringEnabled()); // Copy the rhs value to be the result of the assignment if needed. if (asgnNode != nullptr) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, asgnNode->location, rhsLocation); } return EmitDestructuredObject(lhs, rhsLocation, byteCodeGenerator, funcInfo); } case knopArrayPattern: { Assert(byteCodeGenerator->IsES6DestructuringEnabled()); // Copy the rhs value to be the result of the assignment if needed. if (asgnNode != nullptr) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, asgnNode->location, rhsLocation); } return EmitDestructuredArray(lhs, rhsLocation, byteCodeGenerator, funcInfo); } case knopArray: case knopObject: // Assignment to array/object can get through to byte code gen when the parser fails to convert destructuring // assignment to pattern (because of structural mismatch between LHS & RHS?). Revisit when we nail // down early vs. runtime errors for destructuring. byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_CantAssignTo)); break; default: byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_CantAssignTo)); break; } if (asgnNode != nullptr) { // We leave it up to the caller to pass this node only if the assignment expression is used. if (asgnNode->location != rhsLocation) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, asgnNode->location, rhsLocation); } } } void EmitLoad( ParseNode *lhs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // Emit the instructions to load the value into the LHS location. Do not assign/free any temps // in the process. // We usually get here as part of an op-equiv expression: x.y += z; // In such a case, x has to be emitted first, then the value of x.y loaded (by this function), then z emitted. switch (lhs->nop) { // load of a local or global variable case knopName: { funcInfo->AcquireLoc(lhs); byteCodeGenerator->EmitPropLoad(lhs->location, lhs->AsParseNodeName()->sym, lhs->AsParseNodeName()->pid, funcInfo); break; } // = x.y case knopDot: { // get field id for "y" Js::PropertyId propertyId = lhs->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); funcInfo->AcquireLoc(lhs); EmitReference(lhs, byteCodeGenerator, funcInfo); uint cacheId = funcInfo->FindOrAddInlineCacheId(lhs->AsParseNodeBin()->pnode1->location, propertyId, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, lhs->location, lhs->AsParseNodeBin()->pnode1->location, cacheId); break; } case knopIndex: funcInfo->AcquireLoc(lhs); EmitReference(lhs, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Element( Js::OpCode::LdElemI_A, lhs->location, lhs->AsParseNodeBin()->pnode1->location, lhs->AsParseNodeBin()->pnode2->location); break; // f(x) += case knopCall: { ParseNodeCall * pnodeCallLhs = lhs->AsParseNodeCall(); if (pnodeCallLhs->pnodeTarget->nop == knopImport) { ParseNodePtr args = pnodeCallLhs->pnodeArgs; Assert(CountArguments(args) == 2); // import() takes one argument Emit(args, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(args); funcInfo->AcquireLoc(pnodeCallLhs); byteCodeGenerator->Writer()->Reg2(Js::OpCode::ImportCall, pnodeCallLhs->location, args->location); } else { funcInfo->AcquireLoc(pnodeCallLhs); EmitReference(pnodeCallLhs, byteCodeGenerator, funcInfo); EmitCall(pnodeCallLhs, byteCodeGenerator, funcInfo, /*fReturnValue=*/ false, /*fEvaluateComponents=*/ false); } break; } default: funcInfo->AcquireLoc(lhs); Emit(lhs, byteCodeGenerator, funcInfo, false); break; } } void EmitList(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { if (pnode != nullptr) { while (pnode->nop == knopList) { byteCodeGenerator->EmitTopLevelStatement(pnode->AsParseNodeBin()->pnode1, funcInfo, false); pnode = pnode->AsParseNodeBin()->pnode2; } byteCodeGenerator->EmitTopLevelStatement(pnode, funcInfo, false); } } void EmitOneArg( ParseNode *pnode, BOOL fAssignRegs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::ArgSlot &argIndex, Js::ArgSlot &spreadIndex, Js::RegSlot argTempLocation, bool emitProfiledArgout, Js::AuxArray<uint32> *spreadIndices = nullptr ) { bool noArgOuts = argTempLocation != Js::Constants::NoRegister; // If this is a put, the arguments have already been evaluated (see EmitReference). // We just need to emit the ArgOut instructions. if (fAssignRegs) { Emit(pnode, byteCodeGenerator, funcInfo, false); } if (pnode->nop == knopEllipsis) { Assert(spreadIndices != nullptr); spreadIndices->elements[spreadIndex++] = argIndex + 1; // account for 'this' Js::RegSlot regVal = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::LdCustomSpreadIteratorList, regVal, pnode->location); if (noArgOuts) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, argTempLocation, regVal); } else { byteCodeGenerator->Writer()->ArgOut<true>(argIndex + 1, regVal, callSiteId, emitProfiledArgout); } funcInfo->ReleaseTmpRegister(regVal); } else { if (noArgOuts) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, argTempLocation, pnode->location); } else { byteCodeGenerator->Writer()->ArgOut<true>(argIndex + 1, pnode->location, callSiteId, emitProfiledArgout); } } argIndex++; if (fAssignRegs) { funcInfo->ReleaseLoc(pnode); } } size_t EmitArgsWithArgOutsAtEnd( ParseNode *pnode, BOOL fAssignRegs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::RegSlot thisLocation, Js::ArgSlot argsCountForStartCall, bool emitProfiledArgouts, Js::AuxArray<uint32> *spreadIndices = nullptr ) { AssertOrFailFast(pnode != nullptr); Js::ArgSlot argIndex = 0; Js::ArgSlot spreadIndex = 0; Js::RegSlot argTempLocation = funcInfo->AcquireTmpRegister(); Js::RegSlot firstArgTempLocation = argTempLocation; while (pnode->nop == knopList) { EmitOneArg(pnode->AsParseNodeBin()->pnode1, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, argIndex, spreadIndex, argTempLocation, false /*emitProfiledArgout*/, spreadIndices); pnode = pnode->AsParseNodeBin()->pnode2; argTempLocation = funcInfo->AcquireTmpRegister(); } EmitOneArg(pnode, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, argIndex, spreadIndex, argTempLocation, false /*emitProfiledArgout*/, spreadIndices); byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, argsCountForStartCall); // Emit all argOuts now if (thisLocation != Js::Constants::NoRegister) { // Emit the "this" object. byteCodeGenerator->Writer()->ArgOut<true>(0, thisLocation, callSiteId, false /*emitProfiledArgouts*/); } for (Js::ArgSlot index = 0; index < argIndex; index++) { byteCodeGenerator->Writer()->ArgOut<true>(index + 1, firstArgTempLocation + index, callSiteId, emitProfiledArgouts); } // Now release all those temps register for (Js::ArgSlot index = argIndex; index > 0; index--) { funcInfo->ReleaseTmpRegister(argTempLocation--); } return argIndex; } size_t EmitArgs( ParseNode *pnode, BOOL fAssignRegs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, bool emitProfiledArgouts, Js::AuxArray<uint32> *spreadIndices = nullptr ) { Js::ArgSlot argIndex = 0; Js::ArgSlot spreadIndex = 0; if (pnode != nullptr) { while (pnode->nop == knopList) { EmitOneArg(pnode->AsParseNodeBin()->pnode1, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, argIndex, spreadIndex, Js::Constants::NoRegister, emitProfiledArgouts, spreadIndices); pnode = pnode->AsParseNodeBin()->pnode2; } EmitOneArg(pnode, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, argIndex, spreadIndex, Js::Constants::NoRegister, emitProfiledArgouts, spreadIndices); } return argIndex; } void EmitArgListStart( Js::RegSlot thisLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId) { if (thisLocation != Js::Constants::NoRegister) { // Emit the "this" object. byteCodeGenerator->Writer()->ArgOut<true>(0, thisLocation, callSiteId, false /*emitProfiledArgout*/); } } Js::ArgSlot EmitArgListEnd( ParseNode *pnode, Js::RegSlot thisLocation, Js::RegSlot evalLocation, Js::RegSlot newTargetLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, size_t argIndex, Js::ProfileId callSiteId) { BOOL fEvalInModule = false; BOOL fIsEval = (evalLocation != Js::Constants::NoRegister); BOOL fHasNewTarget = (newTargetLocation != Js::Constants::NoRegister); static const size_t maxExtraArgSlot = 4; // max(extraEvalArg, extraArg), where extraEvalArg==2 (moduleRoot,env), extraArg==4 (this, eval, evalInModule, newTarget) AssertOrFailFastMsg(argIndex < Js::Constants::UShortMaxValue - maxExtraArgSlot, "Number of allowed arguments are already capped at parser level"); Js::ArgSlot argSlotIndex = (Js::ArgSlot) argIndex; Js::ArgSlot evalIndex; if (fIsEval && argSlotIndex > 0) { Assert(!fHasNewTarget); // Pass the frame display as an extra argument to "eval". // Do this only if eval is called with some args Js::RegSlot evalEnv; if (funcInfo->IsGlobalFunction() && !(funcInfo->GetIsStrictMode() && byteCodeGenerator->GetFlags() & fscrEval)) { // Use current environment as the environment for the function being called when: // - this is the root global function (not an eval's global function) // - this is an eval's global function that is not in strict mode (see else block) evalEnv = funcInfo->GetEnvRegister(); } else { // Use the frame display as the environment for the function being called when: // - this is not a global function and thus it will have its own scope // - this is an eval's global function that is in strict mode, since in strict mode the eval's global function // has its own scope evalEnv = funcInfo->frameDisplayRegister; } evalEnv = byteCodeGenerator->PrependLocalScopes(evalEnv, evalLocation, funcInfo); // Passing the FrameDisplay as an extra argument evalIndex = argSlotIndex + 1; if (evalEnv == funcInfo->GetEnvRegister() || evalEnv == funcInfo->frameDisplayRegister) { byteCodeGenerator->Writer()->ArgOutEnv(evalIndex); } else { byteCodeGenerator->Writer()->ArgOut<false>(evalIndex, evalEnv, callSiteId, false /*emitProfiledArgout*/); } } if (fHasNewTarget) { Assert(!fIsEval); byteCodeGenerator->Writer()->ArgOut<true>(argSlotIndex + 1, newTargetLocation, callSiteId, false /*emitProfiledArgout*/); } Js::ArgSlot argIntCount = argSlotIndex + 1 + (Js::ArgSlot)fIsEval + (Js::ArgSlot)fEvalInModule + (Js::ArgSlot)fHasNewTarget; // eval and no args passed, return 1 as argument count if (fIsEval && pnode == nullptr) { return 1; } return argIntCount; } Js::ArgSlot EmitArgList( ParseNode *pnode, Js::RegSlot thisLocation, Js::RegSlot newTargetLocation, BOOL fIsEval, BOOL fAssignRegs, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::ArgSlot argsCountForStartCall, bool emitArgOutsAtEnd, bool emitProfiledArgouts, uint16 spreadArgCount = 0, Js::AuxArray<uint32> **spreadIndices = nullptr) { // This function emits the arguments for a call. // ArgOut's with uses immediately following defs. if (!emitArgOutsAtEnd) { byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, argsCountForStartCall); EmitArgListStart(thisLocation, byteCodeGenerator, funcInfo, callSiteId); } Js::RegSlot evalLocation = Js::Constants::NoRegister; // // If Emitting arguments for eval and assigning registers, get a tmpLocation for eval. // This would be used while generating frameDisplay in EmitArgListEnd. // if (fIsEval) { evalLocation = funcInfo->AcquireTmpRegister(); } if (spreadArgCount > 0) { const size_t extraAlloc = UInt32Math::Mul(spreadArgCount, sizeof(uint32)); Assert(spreadIndices != nullptr); *spreadIndices = AnewPlus(byteCodeGenerator->GetAllocator(), extraAlloc, Js::AuxArray<uint32>, spreadArgCount); } size_t argIndex = 0; if (emitArgOutsAtEnd) { argIndex = EmitArgsWithArgOutsAtEnd(pnode, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, thisLocation, argsCountForStartCall, emitProfiledArgouts, spreadIndices == nullptr ? nullptr : *spreadIndices); } else { argIndex = EmitArgs(pnode, fAssignRegs, byteCodeGenerator, funcInfo, callSiteId, emitProfiledArgouts, spreadIndices == nullptr ? nullptr : *spreadIndices); } Js::ArgSlot argumentsCount = EmitArgListEnd(pnode, thisLocation, evalLocation, newTargetLocation, byteCodeGenerator, funcInfo, argIndex, callSiteId); if (fIsEval) { funcInfo->ReleaseTmpRegister(evalLocation); } return argumentsCount; } void EmitConstantArgsToVarArray(ByteCodeGenerator *byteCodeGenerator, __out_ecount(argCount) Js::Var *vars, ParseNode *args, uint argCount) { uint index = 0; while (args->nop == knopList && index < argCount) { if (args->AsParseNodeBin()->pnode1->nop == knopInt) { int value = args->AsParseNodeBin()->pnode1->AsParseNodeInt()->lw; vars[index++] = Js::TaggedInt::ToVarUnchecked(value); } else if (args->AsParseNodeBin()->pnode1->nop == knopFlt) { Js::Var number = Js::JavascriptNumber::New(args->AsParseNodeBin()->pnode1->AsParseNodeFloat()->dbl, byteCodeGenerator->GetScriptContext()); #if ! FLOATVAR byteCodeGenerator->GetScriptContext()->BindReference(number); #endif vars[index++] = number; } else { AnalysisAssert(false); } args = args->AsParseNodeBin()->pnode2; } if (index == argCount) { Assert(false); Js::Throw::InternalError(); return; } if (args->nop == knopInt) { int value = args->AsParseNodeInt()->lw; vars[index++] = Js::TaggedInt::ToVarUnchecked(value); } else if (args->nop == knopFlt) { Js::Var number = Js::JavascriptNumber::New(args->AsParseNodeFloat()->dbl, byteCodeGenerator->GetScriptContext()); #if ! FLOATVAR byteCodeGenerator->GetScriptContext()->BindReference(number); #endif vars[index++] = number; } else { AnalysisAssert(false); } } void EmitConstantArgsToIntArray(ByteCodeGenerator *byteCodeGenerator, __out_ecount(argCount) int32 *vars, ParseNode *args, uint argCount) { uint index = 0; while (args->nop == knopList && index < argCount) { Assert(args->AsParseNodeBin()->pnode1->nop == knopInt); vars[index++] = args->AsParseNodeBin()->pnode1->AsParseNodeInt()->lw; args = args->AsParseNodeBin()->pnode2; } if (index >= argCount) { Js::Throw::InternalError(); return; } Assert(args->nop == knopInt); vars[index++] = args->AsParseNodeInt()->lw; Assert(index == argCount); } void EmitConstantArgsToFltArray(ByteCodeGenerator *byteCodeGenerator, __out_ecount(argCount) double *vars, ParseNode *args, uint argCount) { uint index = 0; while (args->nop == knopList && index < argCount) { OpCode nop = args->AsParseNodeBin()->pnode1->nop; if (nop == knopInt) { vars[index++] = (double)args->AsParseNodeBin()->pnode1->AsParseNodeInt()->lw; } else { Assert(nop == knopFlt); vars[index++] = args->AsParseNodeBin()->pnode1->AsParseNodeFloat()->dbl; } args = args->AsParseNodeBin()->pnode2; } if (index >= argCount) { Js::Throw::InternalError(); return; } if (args->nop == knopInt) { vars[index++] = (double)args->AsParseNodeInt()->lw; } else { Assert(args->nop == knopFlt); vars[index++] = args->AsParseNodeFloat()->dbl; } Assert(index == argCount); } // // Called when we have new Ctr(constant, constant...) // Js::ArgSlot EmitNewObjectOfConstants( ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, unsigned int argCount) { EmitArgListStart(Js::Constants::NoRegister, byteCodeGenerator, funcInfo, Js::Constants::NoProfileId); // Create the vars array Js::VarArrayVarCount *vars = AnewPlus(byteCodeGenerator->GetAllocator(), UInt32Math::Mul((argCount - 1), sizeof(Js::Var)), Js::VarArrayVarCount, Js::TaggedInt::ToVarUnchecked(argCount - 1)); // Emit all constants to the vars array EmitConstantArgsToVarArray(byteCodeGenerator, vars->elements, pnode->AsParseNodeCall()->pnodeArgs, argCount - 1); // Finish the arg list Js::ArgSlot actualArgCount = EmitArgListEnd( pnode->AsParseNodeCall()->pnodeArgs, Js::Constants::NoRegister, Js::Constants::NoRegister, Js::Constants::NoRegister, byteCodeGenerator, funcInfo, argCount - 1, Js::Constants::NoProfileId); // Make sure the cacheId to regSlot map in the ByteCodeWriter is left in a consistent state after writing NewScObject_A byteCodeGenerator->Writer()->RemoveEntryForRegSlotFromCacheIdMap(pnode->AsParseNodeCall()->pnodeTarget->location); // Generate the opcode with vars byteCodeGenerator->Writer()->AuxiliaryContext( Js::OpCode::NewScObject_A, funcInfo->AcquireLoc(pnode), vars, UInt32Math::MulAdd<sizeof(Js::Var), sizeof(Js::VarArray)>((argCount-1)), pnode->AsParseNodeCall()->pnodeTarget->location); AdeletePlus(byteCodeGenerator->GetAllocator(), UInt32Math::Mul((argCount-1), sizeof(Js::VarArrayVarCount)), vars); return actualArgCount; } void EmitMethodFld(bool isRoot, bool isScoped, Js::RegSlot location, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, bool registerCacheIdForCall = true) { Js::OpCode opcode; if (!isRoot) { if (callObjLocation == funcInfo->frameObjRegister) { opcode = Js::OpCode::LdLocalMethodFld; } else { opcode = Js::OpCode::LdMethodFld; } } else if (isScoped) { opcode = Js::OpCode::ScopedLdMethodFld; } else { opcode = Js::OpCode::LdRootMethodFld; } if (isScoped || !isRoot) { Assert(isScoped || !isRoot || callObjLocation == ByteCodeGenerator::RootObjectRegister); uint cacheId = funcInfo->FindOrAddInlineCacheId(callObjLocation, propertyId, true, false); if (callObjLocation == funcInfo->frameObjRegister) { byteCodeGenerator->Writer()->ElementP(opcode, location, cacheId, false /*isCtor*/, registerCacheIdForCall); } else { byteCodeGenerator->Writer()->PatchableProperty(opcode, location, callObjLocation, cacheId, false /*isCtor*/, registerCacheIdForCall); } } else { uint cacheId = funcInfo->FindOrAddRootObjectInlineCacheId(propertyId, true, false); byteCodeGenerator->Writer()->PatchableRootProperty(opcode, location, cacheId, true, false, registerCacheIdForCall); } } void EmitMethodFld(ParseNode *pnode, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, bool registerCacheIdForCall = true) { // Load a call target of the form x.y(). (Call target may be a plain knopName if we're getting it from // the global object, etc.) bool isRoot = pnode->nop == knopName && (pnode->AsParseNodeName()->sym == nullptr || pnode->AsParseNodeName()->sym->GetIsGlobal()); bool isScoped = (byteCodeGenerator->GetFlags() & fscrEval) != 0 || (isRoot && callObjLocation != ByteCodeGenerator::RootObjectRegister); EmitMethodFld(isRoot, isScoped, pnode->location, callObjLocation, propertyId, byteCodeGenerator, funcInfo, registerCacheIdForCall); } // lhs.apply(this, arguments); void EmitApplyCall(ParseNodeCall* pnodeCall, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, BOOL fReturnValue) { ParseNode* applyNode = pnodeCall->pnodeTarget; ParseNode* thisNode = pnodeCall->pnodeArgs->AsParseNodeBin()->pnode1; Assert(applyNode->nop == knopDot); ParseNode* funcNode = applyNode->AsParseNodeBin()->pnode1; Js::ByteCodeLabel slowPath = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel afterSlowPath = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel argsAlreadyCreated = byteCodeGenerator->Writer()->DefineLabel(); Assert(applyNode->nop == knopDot); Emit(funcNode, byteCodeGenerator, funcInfo, false); funcInfo->AcquireLoc(applyNode); Js::PropertyId propertyId = applyNode->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); // As we won't be emitting a call instruction for apply, no need to register the cacheId for apply // load to be associated with the call. This is also required, as in the absence of a corresponding // call for apply, we won't remove the entry for "apply" cacheId from // ByteCodeWriter::callRegToLdFldCacheIndexMap, which is contrary to our assumption that we would // have removed an entry from a map upon seeing its corresponding call. EmitMethodFld(applyNode, funcNode->location, propertyId, byteCodeGenerator, funcInfo, false /*registerCacheIdForCall*/); Symbol *argSym = funcInfo->GetArgumentsSymbol(); Assert(argSym && argSym->IsArguments()); Js::RegSlot argumentsLoc = argSym->GetLocation(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdArgumentsFromFrame, argumentsLoc); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrNotNull_A, argsAlreadyCreated, argumentsLoc); // If apply is overridden, bail to slow path. byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFncNeqApply, slowPath, applyNode->location); // Note: acquire and release a temp register for this stack arg pointer instead of trying to stash it // in funcInfo->stackArgReg. Otherwise, we'll needlessly load and store it in jitted loop bodies and // may crash if we try to unbox it on the store. Js::RegSlot stackArgReg = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdStackArgPtr, stackArgReg); Js::RegSlot argCountLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdArgCnt, argCountLocation); byteCodeGenerator->Writer()->Reg5(Js::OpCode::ApplyArgs, funcNode->location, funcNode->location, thisNode->location, stackArgReg, argCountLocation); funcInfo->ReleaseTmpRegister(argCountLocation); funcInfo->ReleaseTmpRegister(stackArgReg); funcInfo->ReleaseLoc(applyNode); funcInfo->ReleaseLoc(funcNode); // Clear these nodes as they are going to be used to re-generate the slow path. VisitClearTmpRegs(applyNode, byteCodeGenerator, funcInfo); VisitClearTmpRegs(funcNode, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Br(afterSlowPath); // slow path byteCodeGenerator->Writer()->MarkLabel(slowPath); if (funcInfo->frameObjRegister != Js::Constants::NoRegister) { byteCodeGenerator->EmitScopeObjectInit(funcInfo); } byteCodeGenerator->LoadHeapArguments(funcInfo); byteCodeGenerator->Writer()->MarkLabel(argsAlreadyCreated); EmitCall(pnodeCall, byteCodeGenerator, funcInfo, fReturnValue, /*fEvaluateComponents*/true); byteCodeGenerator->Writer()->MarkLabel(afterSlowPath); } void EmitMethodElem(ParseNode *pnode, Js::RegSlot callObjLocation, Js::RegSlot indexLocation, ByteCodeGenerator *byteCodeGenerator) { // Load a call target of the form x[y](). byteCodeGenerator->Writer()->Element(Js::OpCode::LdMethodElem, pnode->location, callObjLocation, indexLocation); } void EmitCallTargetNoEvalComponents( ParseNode *pnodeTarget, BOOL fSideEffectArgs, Js::RegSlot *thisLocation, bool *releaseThisLocation, Js::RegSlot *callObjLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // We first get a reference to the call target, then evaluate the arguments, then // evaluate the call target. // - emit reference to target // - copy instance to scratch reg if necessary. // - assign this // - assign instance for dynamic/global name // - emit args // - do call (CallFld/Elem/I) *releaseThisLocation = true; switch (pnodeTarget->nop) { case knopDot: *thisLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; *callObjLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; break; case knopIndex: *thisLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; *callObjLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; break; case knopName: // If the call target is a name, do some extra work to get its instance and the "this" pointer. byteCodeGenerator->EmitLoadInstance(pnodeTarget->AsParseNodeName()->sym, pnodeTarget->AsParseNodeName()->pid, thisLocation, callObjLocation, funcInfo); if (*thisLocation == Js::Constants::NoRegister) { *thisLocation = funcInfo->undefinedConstantRegister; } break; default: *thisLocation = funcInfo->undefinedConstantRegister; break; } } void EmitCallTarget( ParseNode *pnodeTarget, BOOL fSideEffectArgs, Js::RegSlot *thisLocation, bool *releaseThisLocation, Js::RegSlot *callObjLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // - emit target // - assign this // - emit args // - do call // The call target is fully evaluated before the argument list. Note that we're not handling // put-call cases here currently, as such cases only apply to host objects // and are very unlikely to behave differently depending on the order of evaluation. *releaseThisLocation = true; switch (pnodeTarget->nop) { case knopDot: { ParseNodeBin * pnodeBinTarget = pnodeTarget->AsParseNodeBin(); funcInfo->AcquireLoc(pnodeBinTarget); // Assign the call target operand(s), putting them into expression temps if necessary to protect // them from side-effects. if (fSideEffectArgs) { // Though we're done with target evaluation after this point, still protect opnd1 from // arg side-effects as it's the "this" pointer. SaveOpndValue(pnodeBinTarget->pnode1, funcInfo); } Assert(pnodeBinTarget->pnode2->nop == knopName); if ((pnodeBinTarget->pnode2->AsParseNodeName()->PropertyIdFromNameNode() == Js::PropertyIds::apply) || (pnodeTarget->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode() == Js::PropertyIds::call)) { pnodeBinTarget->pnode1->SetIsCallApplyTargetLoad(); } Emit(pnodeBinTarget->pnode1, byteCodeGenerator, funcInfo, false); Js::PropertyId propertyId = pnodeBinTarget->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); Js::RegSlot protoLocation = pnodeBinTarget->pnode1->location; if (ByteCodeGenerator::IsSuper(pnodeBinTarget->pnode1)) { Emit(pnodeBinTarget->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); protoLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, protoLocation, funcInfo); funcInfo->ReleaseLoc(pnodeBinTarget->AsParseNodeSuperReference()->pnodeThis); funcInfo->ReleaseLoc(pnodeBinTarget->pnode1); // Function calls on the 'super' object should maintain current 'this' pointer *thisLocation = pnodeBinTarget->AsParseNodeSuperReference()->pnodeThis->location; *releaseThisLocation = false; } else { *thisLocation = pnodeBinTarget->pnode1->location; } EmitMethodFld(pnodeBinTarget, protoLocation, propertyId, byteCodeGenerator, funcInfo); break; } case knopIndex: { funcInfo->AcquireLoc(pnodeTarget); // Assign the call target operand(s), putting them into expression temps if necessary to protect // them from side-effects. if (fSideEffectArgs || !(ParseNode::Grfnop(pnodeTarget->AsParseNodeBin()->pnode2->nop) & fnopLeaf)) { // Though we're done with target evaluation after this point, still protect opnd1 from // arg or opnd2 side-effects as it's the "this" pointer. SaveOpndValue(pnodeTarget->AsParseNodeBin()->pnode1, funcInfo); } Emit(pnodeTarget->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); Emit(pnodeTarget->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo, false); Js::RegSlot indexLocation = pnodeTarget->AsParseNodeBin()->pnode2->location; Js::RegSlot protoLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; if (ByteCodeGenerator::IsSuper(pnodeTarget->AsParseNodeBin()->pnode1)) { Emit(pnodeTarget->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); protoLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, protoLocation, funcInfo); funcInfo->ReleaseLoc(pnodeTarget->AsParseNodeSuperReference()->pnodeThis); // Function calls on the 'super' object should maintain current 'this' pointer *thisLocation = pnodeTarget->AsParseNodeSuperReference()->pnodeThis->location; *releaseThisLocation = false; } else { *thisLocation = pnodeTarget->AsParseNodeBin()->pnode1->location; } EmitMethodElem(pnodeTarget, protoLocation, indexLocation, byteCodeGenerator); funcInfo->ReleaseLoc(pnodeTarget->AsParseNodeBin()->pnode2); // don't release indexLocation until after we use it. if (ByteCodeGenerator::IsSuper(pnodeTarget->AsParseNodeBin()->pnode1)) { funcInfo->ReleaseLoc(pnodeTarget->AsParseNodeBin()->pnode1); } break; } case knopName: { ParseNodeName * pnodeNameTarget = pnodeTarget->AsParseNodeName(); if (!pnodeNameTarget->IsSpecialName()) { funcInfo->AcquireLoc(pnodeNameTarget); // Assign the call target operand(s), putting them into expression temps if necessary to protect // them from side-effects. if (fSideEffectArgs) { SaveOpndValue(pnodeNameTarget, funcInfo); } byteCodeGenerator->EmitLoadInstance(pnodeNameTarget->sym, pnodeNameTarget->pid, thisLocation, callObjLocation, funcInfo); if (*callObjLocation != Js::Constants::NoRegister) { // Load the call target as a property of the instance. Js::PropertyId propertyId = pnodeNameTarget->PropertyIdFromNameNode(); EmitMethodFld(pnodeNameTarget, *callObjLocation, propertyId, byteCodeGenerator, funcInfo); break; } } // FALL THROUGH to evaluate call target. } default: // Assign the call target operand(s), putting them into expression temps if necessary to protect // them from side-effects. Emit(pnodeTarget, byteCodeGenerator, funcInfo, false); *thisLocation = funcInfo->undefinedConstantRegister; break; } // "This" pointer should have been assigned by the above. Assert(*thisLocation != Js::Constants::NoRegister); } void EmitCallI( ParseNodeCall *pnodeCall, BOOL fEvaluateComponents, BOOL fIsEval, BOOL fHasNewTarget, uint32 actualArgCount, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::AuxArray<uint32> *spreadIndices = nullptr) { // Emit a call where the target is in a register, because it's either a local name or an expression we've // already evaluated. ParseNode *pnodeTarget = pnodeCall->pnodeTarget; Js::OpCode op; Js::CallFlags callFlags = Js::CallFlags::CallFlags_None; uint spreadExtraAlloc = 0; bool isSuperCall = pnodeCall->isSuperCall; Js::ArgSlot actualArgSlotCount = (Js::ArgSlot) actualArgCount; // check for integer overflow if ((size_t)actualArgSlotCount != actualArgCount) { Js::Throw::OutOfMemory(); } if (fEvaluateComponents && !isSuperCall) { // Release the call target operands we assigned above. If we didn't assign them here, // we'll need them later, so we can't re-use them for the result of the call. funcInfo->ReleaseLoc(pnodeTarget); } // Grab a register for the call result. if (pnodeCall->isUsed) { funcInfo->AcquireLoc(pnodeCall); } if (fIsEval) { op = Js::OpCode::CallIExtendedFlags; callFlags = Js::CallFlags::CallFlags_ExtraArg; } else { if (isSuperCall) { callFlags = Js::CallFlags_New; } if (fHasNewTarget) { callFlags = (Js::CallFlags) (callFlags | Js::CallFlags::CallFlags_ExtraArg | Js::CallFlags::CallFlags_NewTarget); } if (pnodeCall->spreadArgCount > 0) { op = (isSuperCall || fHasNewTarget) ? Js::OpCode::CallIExtendedFlags : Js::OpCode::CallIExtended; } else { op = (isSuperCall || fHasNewTarget) ? Js::OpCode::CallIFlags : Js::OpCode::CallI; } } if (op == Js::OpCode::CallI || op == Js::OpCode::CallIFlags) { if (isSuperCall) { Js::RegSlot tmpReg = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdFuncObjProto, pnodeTarget->location, funcInfo); byteCodeGenerator->Writer()->CallI(op, pnodeCall->location, tmpReg, actualArgSlotCount, callSiteId, callFlags); } else { byteCodeGenerator->Writer()->CallI(op, pnodeCall->location, pnodeTarget->location, actualArgSlotCount, callSiteId, callFlags); } } else { uint spreadIndicesSize = 0; Js::CallIExtendedOptions options = Js::CallIExtended_None; if (pnodeCall->spreadArgCount > 0) { Assert(spreadIndices != nullptr); spreadExtraAlloc = UInt32Math::Mul(spreadIndices->count, sizeof(uint32)); spreadIndicesSize = UInt32Math::Add(sizeof(*spreadIndices), spreadExtraAlloc); options = Js::CallIExtended_SpreadArgs; } if (isSuperCall) { Js::RegSlot tmpReg = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdFuncObjProto, pnodeTarget->location, funcInfo); byteCodeGenerator->Writer()->CallIExtended(op, pnodeCall->location, tmpReg, actualArgSlotCount, options, spreadIndices, spreadIndicesSize, callSiteId, callFlags); } else { byteCodeGenerator->Writer()->CallIExtended(op, pnodeCall->location, pnodeTarget->location, actualArgSlotCount, options, spreadIndices, spreadIndicesSize, callSiteId, callFlags); } } if (pnodeCall->spreadArgCount > 0) { Assert(spreadExtraAlloc != 0); AdeletePlus(byteCodeGenerator->GetAllocator(), spreadExtraAlloc, spreadIndices); } } void EmitCallInstrNoEvalComponents( ParseNodeCall *pnodeCall, BOOL fIsEval, Js::RegSlot thisLocation, Js::RegSlot callObjLocation, uint32 actualArgCount, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::AuxArray<uint32> *spreadIndices = nullptr) { // Emit the call instruction. The call target is a reference at this point, and we evaluate // it as part of doing the actual call. // Note that we don't handle the (fEvaluateComponents == TRUE) case in this function. // (This function is only called on the !fEvaluateComponents branch in EmitCall.) ParseNode *pnodeTarget = pnodeCall->pnodeTarget; switch (pnodeTarget->nop) { case knopDot: { Assert(pnodeTarget->AsParseNodeBin()->pnode2->nop == knopName); Js::PropertyId propertyId = pnodeTarget->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); EmitMethodFld(pnodeTarget, callObjLocation, propertyId, byteCodeGenerator, funcInfo); EmitCallI(pnodeCall, /*fEvaluateComponents*/ FALSE, fIsEval, /*fHasNewTarget*/ FALSE, actualArgCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); } break; case knopIndex: { EmitMethodElem(pnodeTarget, pnodeTarget->AsParseNodeBin()->pnode1->location, pnodeTarget->AsParseNodeBin()->pnode2->location, byteCodeGenerator); EmitCallI(pnodeCall, /*fEvaluateComponents*/ FALSE, fIsEval, /*fHasNewTarget*/ FALSE, actualArgCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); } break; case knopName: { if (callObjLocation != Js::Constants::NoRegister) { // We still have to get the property from its instance, so emit CallFld. if (thisLocation != callObjLocation) { funcInfo->ReleaseTmpRegister(thisLocation); } funcInfo->ReleaseTmpRegister(callObjLocation); Js::PropertyId propertyId = pnodeTarget->AsParseNodeName()->PropertyIdFromNameNode(); EmitMethodFld(pnodeTarget, callObjLocation, propertyId, byteCodeGenerator, funcInfo); EmitCallI(pnodeCall, /*fEvaluateComponents*/ FALSE, fIsEval, /*fHasNewTarget*/ FALSE, actualArgCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); break; } } // FALL THROUGH default: EmitCallI(pnodeCall, /*fEvaluateComponents*/ FALSE, fIsEval, /*fHasNewTarget*/ FALSE, actualArgCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); break; } } void EmitCallInstr( ParseNodeCall *pnodeCall, BOOL fIsEval, BOOL fHasNewTarget, Js::RegSlot thisLocation, Js::RegSlot callObjLocation, uint32 actualArgCount, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, Js::ProfileId callSiteId, Js::AuxArray<uint32> *spreadIndices = nullptr) { // Emit a call instruction. The call target has been fully evaluated already, so we always // emit a CallI through the register that holds the target value. // Note that we don't handle !fEvaluateComponents cases at this point. // (This function is only called on the fEvaluateComponents branch in EmitCall.) if (thisLocation != Js::Constants::NoRegister) { funcInfo->ReleaseTmpRegister(thisLocation); } if (callObjLocation != Js::Constants::NoRegister && callObjLocation != thisLocation) { funcInfo->ReleaseTmpRegister(callObjLocation); } EmitCallI(pnodeCall, /*fEvaluateComponents*/ TRUE, fIsEval, fHasNewTarget, actualArgCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); } void EmitNew(ParseNode* pnode, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { Js::ArgSlot argCount = pnode->AsParseNodeCall()->argCount; argCount++; // include "this" BOOL fSideEffectArgs = FALSE; unsigned int tmpCount = CountArguments(pnode->AsParseNodeCall()->pnodeArgs, &fSideEffectArgs); AssertOrFailFastMsg(argCount == tmpCount, "argCount cannot overflow as max args capped at parser level"); byteCodeGenerator->StartStatement(pnode); // Start call, allocate out param space funcInfo->StartRecordingOutArgs(argCount); // Assign the call target operand(s), putting them into expression temps if necessary to protect // them from side-effects. if (fSideEffectArgs) { SaveOpndValue(pnode->AsParseNodeCall()->pnodeTarget, funcInfo); } Emit(pnode->AsParseNodeCall()->pnodeTarget, byteCodeGenerator, funcInfo, false, true); if (pnode->AsParseNodeCall()->pnodeArgs == nullptr) { funcInfo->ReleaseLoc(pnode->AsParseNodeCall()->pnodeTarget); Js::OpCode op = (CreateNativeArrays(byteCodeGenerator, funcInfo) && CallTargetIsArray(pnode->AsParseNodeCall()->pnodeTarget)) ? Js::OpCode::NewScObjArray : Js::OpCode::NewScObject; Assert(argCount == 1); Js::ProfileId callSiteId = byteCodeGenerator->GetNextCallSiteId(op); byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, argCount); byteCodeGenerator->Writer()->CallI(op, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeCall()->pnodeTarget->location, argCount, callSiteId); } else { uint32 actualArgCount = 0; if (IsCallOfConstants(pnode)) { byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, argCount); funcInfo->ReleaseLoc(pnode->AsParseNodeCall()->pnodeTarget); actualArgCount = EmitNewObjectOfConstants(pnode, byteCodeGenerator, funcInfo, argCount); } else { Js::OpCode op; if ((CreateNativeArrays(byteCodeGenerator, funcInfo) && CallTargetIsArray(pnode->AsParseNodeCall()->pnodeTarget))) { op = pnode->AsParseNodeCall()->spreadArgCount > 0 ? Js::OpCode::NewScObjArraySpread : Js::OpCode::NewScObjArray; } else { op = pnode->AsParseNodeCall()->spreadArgCount > 0 ? Js::OpCode::NewScObjectSpread : Js::OpCode::NewScObject; } Js::ProfileId callSiteId = byteCodeGenerator->GetNextCallSiteId(op); // Only emit profiled argouts if we're going to profile this call. bool emitProfiledArgouts = callSiteId != byteCodeGenerator->GetCurrentCallSiteId(); Js::AuxArray<uint32> *spreadIndices = nullptr; actualArgCount = EmitArgList(pnode->AsParseNodeCall()->pnodeArgs, Js::Constants::NoRegister, Js::Constants::NoRegister, false, true, byteCodeGenerator, funcInfo, callSiteId, argCount, pnode->AsParseNodeCall()->hasDestructuring, emitProfiledArgouts, pnode->AsParseNodeCall()->spreadArgCount, &spreadIndices); funcInfo->ReleaseLoc(pnode->AsParseNodeCall()->pnodeTarget); if (pnode->AsParseNodeCall()->spreadArgCount > 0) { Assert(spreadIndices != nullptr); uint spreadExtraAlloc = UInt32Math::Mul(spreadIndices->count, sizeof(uint32)); uint spreadIndicesSize = UInt32Math::Add(sizeof(*spreadIndices), spreadExtraAlloc); byteCodeGenerator->Writer()->CallIExtended(op, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeCall()->pnodeTarget->location, (uint16)actualArgCount, Js::CallIExtended_SpreadArgs, spreadIndices, spreadIndicesSize, callSiteId); } else { byteCodeGenerator->Writer()->CallI(op, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeCall()->pnodeTarget->location, (uint16)actualArgCount, callSiteId); } } Assert(argCount == actualArgCount); } // End call, pop param space funcInfo->EndRecordingOutArgs(argCount); return; } void EmitCall( ParseNodeCall * pnodeCall, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, BOOL fReturnValue, BOOL fEvaluateComponents, Js::RegSlot overrideThisLocation, Js::RegSlot newTargetLocation) { // If the call returns a float, we'll note this in the byte code. Js::RegSlot thisLocation = Js::Constants::NoRegister; Js::RegSlot callObjLocation = Js::Constants::NoRegister; BOOL fHasNewTarget = newTargetLocation != Js::Constants::NoRegister; BOOL fSideEffectArgs = FALSE; BOOL fIsSuperCall = pnodeCall->isSuperCall; ParseNode *pnodeTarget = pnodeCall->pnodeTarget; ParseNode *pnodeArgs = pnodeCall->pnodeArgs; uint16 spreadArgCount = pnodeCall->spreadArgCount; if (CreateNativeArrays(byteCodeGenerator, funcInfo) && CallTargetIsArray(pnodeTarget)) { // some minifiers (potentially incorrectly) assume that "v = new Array()" and "v = Array()" are equivalent, // and replace the former with the latter to save 4 characters. What that means for us is that it, at least // initially, uses the "Call" path. We want to guess that it _is_ just "new Array()" and change over to the // "new" path, since then our native array handling can kick in. /*EmitNew(pnode, byteCodeGenerator, funcInfo); return;*/ } unsigned int argCount = CountArguments(pnodeArgs, &fSideEffectArgs); BOOL fIsEval = pnodeCall->isEvalCall; Js::ArgSlot argSlotCount = (Js::ArgSlot)argCount; if (fIsEval) { Assert(!fHasNewTarget); // // "eval" takes the closure environment as an extra argument // Pass the closure env only if some argument is passed // For just eval(), don't pass the closure environment // if (argCount > 1) { argCount++; } } else if (fHasNewTarget) { // When we need to pass new.target explicitly, it is passed as an extra argument. // This is similar to how eval passes an extra argument for the frame display and is // used to support cases where we need to pass both 'this' and new.target as part of // a function call. // OpCode::LdNewTarget knows how to look at the call flags and fetch this argument. argCount++; } // argCount indicates the total arguments count including the extra arguments. // argSlotCount indicates the actual arguments count. So argCount should always never be les sthan argSlotCount. if (argCount < (unsigned int)argSlotCount) { Js::Throw::OutOfMemory(); } if (fReturnValue) { pnodeCall->isUsed = true; } // // Set up the call. // bool releaseThisLocation = true; // We already emit the call target for super calls in EmitSuperCall if (!fIsSuperCall) { if (!fEvaluateComponents) { EmitCallTargetNoEvalComponents(pnodeTarget, fSideEffectArgs, &thisLocation, &releaseThisLocation, &callObjLocation, byteCodeGenerator, funcInfo); } else { EmitCallTarget(pnodeTarget, fSideEffectArgs, &thisLocation, &releaseThisLocation, &callObjLocation, byteCodeGenerator, funcInfo); } } // If we are strictly overriding the this location, ignore what the call target set this location to. if (overrideThisLocation != Js::Constants::NoRegister) { thisLocation = overrideThisLocation; releaseThisLocation = false; } // Evaluate the arguments (nothing mode-specific here). // Start call, allocate out param space // We have to use the arguments count including the extra args to Start Call as we use it to allocated space for all the args funcInfo->StartRecordingOutArgs(argCount); Js::ProfileId callSiteId = byteCodeGenerator->GetNextCallSiteId(Js::OpCode::CallI); // Only emit profiled argouts if we're going to allocate callSiteInfo (on the DynamicProfileInfo) for this call. bool emitProfiledArgouts = callSiteId != byteCodeGenerator->GetCurrentCallSiteId(); Js::AuxArray<uint32> *spreadIndices; EmitArgList(pnodeArgs, thisLocation, newTargetLocation, fIsEval, fEvaluateComponents, byteCodeGenerator, funcInfo, callSiteId, (Js::ArgSlot)argCount, pnodeCall->hasDestructuring, emitProfiledArgouts, spreadArgCount, &spreadIndices); if (!fEvaluateComponents) { EmitCallInstrNoEvalComponents(pnodeCall, fIsEval, thisLocation, callObjLocation, argSlotCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); } else { EmitCallInstr(pnodeCall, fIsEval, fHasNewTarget, releaseThisLocation ? thisLocation : Js::Constants::NoRegister, callObjLocation, argSlotCount, byteCodeGenerator, funcInfo, callSiteId, spreadIndices); } // End call, pop param space funcInfo->EndRecordingOutArgs((Js::ArgSlot)argCount); } void EmitInvoke( Js::RegSlot location, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { EmitMethodFld(false, false, location, callObjLocation, propertyId, byteCodeGenerator, funcInfo); funcInfo->StartRecordingOutArgs(1); Js::ProfileId callSiteId = byteCodeGenerator->GetNextCallSiteId(Js::OpCode::CallI); byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, 1); EmitArgListStart(callObjLocation, byteCodeGenerator, funcInfo, callSiteId); byteCodeGenerator->Writer()->CallI(Js::OpCode::CallI, location, location, 1, callSiteId); } void EmitInvoke( Js::RegSlot location, Js::RegSlot callObjLocation, Js::PropertyId propertyId, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, Js::RegSlot arg1Location) { EmitMethodFld(false, false, location, callObjLocation, propertyId, byteCodeGenerator, funcInfo); funcInfo->StartRecordingOutArgs(2); Js::ProfileId callSiteId = byteCodeGenerator->GetNextCallSiteId(Js::OpCode::CallI); byteCodeGenerator->Writer()->StartCall(Js::OpCode::StartCall, 2); EmitArgListStart(callObjLocation, byteCodeGenerator, funcInfo, callSiteId); byteCodeGenerator->Writer()->ArgOut<true>(1, arg1Location, callSiteId, false /*emitProfiledArgout*/); byteCodeGenerator->Writer()->CallI(Js::OpCode::CallI, location, location, 2, callSiteId); } void EmitComputedFunctionNameVar(ParseNode *nameNode, ParseNodeFnc *exprNode, ByteCodeGenerator *byteCodeGenerator) { AssertMsg(exprNode != nullptr, "callers of this function should pass in a valid expression Node"); Assert(exprNode->HasComputedName()); if (nameNode == nullptr) { return; } if (exprNode->pnodeName == nullptr) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::SetComputedNameVar, exprNode->location, nameNode->location); } } void EmitMemberNode(ParseNode *memberNode, Js::RegSlot objectLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, ParseNode* parentNode, bool useStore, bool* isObjectEmpty = nullptr) { ParseNode *nameNode = memberNode->AsParseNodeBin()->pnode1; ParseNode *exprNode = memberNode->AsParseNodeBin()->pnode2; bool isFncDecl = exprNode->nop == knopFncDecl; bool isClassMember = isFncDecl && exprNode->AsParseNodeFnc()->IsClassMember(); if (isFncDecl) { Assert(exprNode->AsParseNodeFnc()->HasHomeObj()); exprNode->AsParseNodeFnc()->SetHomeObjLocation(objectLocation); } // Moved SetComputedNameVar before LdFld of prototype because loading the prototype undefers the function TypeHandler // which makes this bytecode too late to influence the function.name. if (nameNode->nop == knopComputedName) { // Computed property name // Transparently pass the name expr // The Emit will replace this with a temp register if necessary to preserve the value. nameNode->location = nameNode->AsParseNodeUni()->pnode1->location; // Save the previous value of the flag to be restored later. bool prevFlag = byteCodeGenerator->forceStrictModeForClassComputedPropertyName; // Strict mode must be enforced on the evaluation of computed property names inside // classes, thus enable the flag if the computed property name is a class member. byteCodeGenerator->forceStrictModeForClassComputedPropertyName = isClassMember || prevFlag; EmitBinaryOpnds(nameNode, exprNode, byteCodeGenerator, funcInfo); // Restore the flag's previous value. byteCodeGenerator->forceStrictModeForClassComputedPropertyName = prevFlag; if (isFncDecl && !exprNode->AsParseNodeFnc()->IsClassConstructor()) { EmitComputedFunctionNameVar(nameNode, exprNode->AsParseNodeFnc(), byteCodeGenerator); } } // Classes allocates a RegSlot as part of Instance Methods EmitClassInitializers, // but if we don't have any members then we don't need to load the prototype. Assert(isClassMember == (isObjectEmpty != nullptr)); if (isClassMember && *isObjectEmpty) { *isObjectEmpty = false; int cacheId = funcInfo->FindOrAddInlineCacheId(parentNode->location, Js::PropertyIds::prototype, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, objectLocation, parentNode->location, cacheId); } if (nameNode->nop == knopComputedName) { AssertOrFailFast(memberNode->nop == knopGetMember || memberNode->nop == knopSetMember || memberNode->nop == knopMember); Js::OpCode setOp = memberNode->nop == knopGetMember ? (isClassMember ? Js::OpCode::InitClassMemberGetComputedName : Js::OpCode::InitGetElemI) : memberNode->nop == knopSetMember ? (isClassMember ? Js::OpCode::InitClassMemberSetComputedName : Js::OpCode::InitSetElemI) : (isClassMember ? Js::OpCode::InitClassMemberComputedName : Js::OpCode::InitComputedProperty); // Save the previous value of the flag to be restored later. bool prevFlag = byteCodeGenerator->forceStrictModeForClassComputedPropertyName; byteCodeGenerator->forceStrictModeForClassComputedPropertyName = isClassMember || prevFlag; // Strict mode must be enforced on the evaluation of computed property names inside // classes, thus enable the flag if the computed property name is a class member. byteCodeGenerator->Writer()->Element(setOp, exprNode->location, objectLocation, nameNode->location, true, byteCodeGenerator->forceStrictModeForClassComputedPropertyName); // Restore the flag's previous value. byteCodeGenerator->forceStrictModeForClassComputedPropertyName = prevFlag; funcInfo->ReleaseLoc(exprNode); funcInfo->ReleaseLoc(nameNode); return; } Js::OpCode stFldOpCode = (Js::OpCode)0; if (useStore) { stFldOpCode = ByteCodeGenerator::GetStFldOpCode(funcInfo, false, false, false, isClassMember); } Emit(exprNode, byteCodeGenerator, funcInfo, false); Js::PropertyId propertyId = nameNode->AsParseNodeStr()->pid->GetPropertyId(); if (Js::PropertyIds::name == propertyId && exprNode->nop == knopFncDecl && exprNode->AsParseNodeFnc()->IsStaticMember() && parentNode != nullptr && parentNode->nop == knopClassDecl && parentNode->AsParseNodeClass()->pnodeConstructor != nullptr) { Js::ParseableFunctionInfo* nameFunc = parentNode->AsParseNodeClass()->pnodeConstructor->funcInfo->byteCodeFunction->GetParseableFunctionInfo(); nameFunc->SetIsStaticNameFunction(true); } if (memberNode->nop == knopMember || memberNode->nop == knopMemberShort) { // The internal prototype should be set only if the production is of the form PropertyDefinition : PropertyName : AssignmentExpression if (propertyId == Js::PropertyIds::__proto__ && memberNode->nop != knopMemberShort && (exprNode->nop != knopFncDecl || !exprNode->AsParseNodeFnc()->IsMethod())) { byteCodeGenerator->Writer()->Property(Js::OpCode::InitProto, exprNode->location, objectLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(objectLocation, propertyId, false, true); Js::OpCode patchablePropertyOpCode; if (useStore) { patchablePropertyOpCode = stFldOpCode; } else if (isClassMember) { patchablePropertyOpCode = Js::OpCode::InitClassMember; } else { patchablePropertyOpCode = Js::OpCode::InitFld; } byteCodeGenerator->Writer()->PatchableProperty(patchablePropertyOpCode, exprNode->location, objectLocation, cacheId); } } else { AssertOrFailFast(memberNode->nop == knopGetMember || memberNode->nop == knopSetMember); Js::OpCode setOp = memberNode->nop == knopGetMember ? (isClassMember ? Js::OpCode::InitClassMemberGet : Js::OpCode::InitGetFld) : (isClassMember ? Js::OpCode::InitClassMemberSet : Js::OpCode::InitSetFld); byteCodeGenerator->Writer()->Property(setOp, exprNode->location, objectLocation, funcInfo->FindOrAddReferencedPropertyId(propertyId)); } funcInfo->ReleaseLoc(exprNode); if (propertyId == Js::PropertyIds::valueOf) { byteCodeGenerator->GetScriptContext()->optimizationOverrides.SetSideEffects(Js::SideEffects_ValueOf); } else if (propertyId == Js::PropertyIds::toString) { byteCodeGenerator->GetScriptContext()->optimizationOverrides.SetSideEffects(Js::SideEffects_ToString); } } void EmitObjectSpreadNode(ParseNode *spreadNode, Js::RegSlot objectLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Js::RegSlot fromObjectLocation; ParseNode *exprNode = spreadNode->AsParseNodeUni()->pnode1; Emit(exprNode, byteCodeGenerator, funcInfo, false); fromObjectLocation = exprNode->location; byteCodeGenerator->Writer()->Reg2(Js::OpCode::SpreadObjectLiteral, fromObjectLocation, objectLocation); funcInfo->ReleaseLoc(exprNode); } void EmitClassInitializers(ParseNode *memberList, Js::RegSlot objectLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, ParseNode* parentNode, bool isObjectEmpty) { if (memberList != nullptr) { while (memberList->nop == knopList) { ParseNode *memberNode = memberList->AsParseNodeBin()->pnode1; EmitMemberNode(memberNode, objectLocation, byteCodeGenerator, funcInfo, parentNode, /*useStore*/ false, &isObjectEmpty); memberList = memberList->AsParseNodeBin()->pnode2; } EmitMemberNode(memberList, objectLocation, byteCodeGenerator, funcInfo, parentNode, /*useStore*/ false, &isObjectEmpty); } } void EmitObjectInitializers(ParseNode *memberList, Js::RegSlot objectLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { ParseNode *pmemberList = memberList; unsigned int argCount = 0; uint32 value; Js::PropertyId propertyId; // // 1. Add all non-int property ids to a dictionary propertyIds with value true // 2. Get the count of propertyIds // 3. Create a propertyId array of size count // 4. Put the propIds in the auxiliary area // 5. Get the objectLiteralCacheId // 6. Generate propId inits with values // // Handle propertyId collision typedef JsUtil::BaseHashSet<Js::PropertyId, ArenaAllocator, PowerOf2SizePolicy> PropertyIdSet; PropertyIdSet* propertyIds = Anew(byteCodeGenerator->GetAllocator(), PropertyIdSet, byteCodeGenerator->GetAllocator(), 17); bool hasComputedNameOrSpread = false; if (memberList != nullptr) { while (memberList->nop == knopList) { if (memberList->AsParseNodeBin()->pnode1->nop == knopEllipsis || memberList->AsParseNodeBin()->pnode1->AsParseNodeBin()->pnode1->nop == knopComputedName) { hasComputedNameOrSpread = true; break; } propertyId = memberList->AsParseNodeBin()->pnode1->AsParseNodeBin()->pnode1->AsParseNodeStr()->pid->GetPropertyId(); if (!byteCodeGenerator->GetScriptContext()->IsNumericPropertyId(propertyId, &value)) { propertyIds->Item(propertyId); } memberList = memberList->AsParseNodeBin()->pnode2; } if (memberList->nop != knopEllipsis && memberList->AsParseNodeBin()->pnode1->nop != knopComputedName && !hasComputedNameOrSpread) { propertyId = memberList->AsParseNodeBin()->pnode1->AsParseNodeStr()->pid->GetPropertyId(); if (!byteCodeGenerator->GetScriptContext()->IsNumericPropertyId(propertyId, &value)) { propertyIds->Item(propertyId); } } } argCount = propertyIds->Count(); memberList = pmemberList; if ((memberList == nullptr) || (argCount == 0)) { // Empty literal or numeric property only object literal byteCodeGenerator->Writer()->Reg1(Js::OpCode::NewScObjectSimple, objectLocation); } else { uint32 allocSize = UInt32Math::Mul(argCount, sizeof(Js::PropertyId)); Js::PropertyIdArray *propIds = AnewPlus(byteCodeGenerator->GetAllocator(), allocSize, Js::PropertyIdArray, argCount, 0); if (propertyIds->ContainsKey(Js::PropertyIds::__proto__)) { // Always record whether the initializer contains __proto__ no matter if current environment has it enabled // or not, in case the bytecode is later run with __proto__ enabled. propIds->has__proto__ = true; } unsigned int argIndex = 0; while (memberList->nop == knopList) { if (memberList->AsParseNodeBin()->pnode1->nop == knopEllipsis || memberList->AsParseNodeBin()->pnode1->AsParseNodeBin()->pnode1->nop == knopComputedName) { break; } propertyId = memberList->AsParseNodeBin()->pnode1->AsParseNodeBin()->pnode1->AsParseNodeStr()->pid->GetPropertyId(); if (!byteCodeGenerator->GetScriptContext()->IsNumericPropertyId(propertyId, &value) && propertyIds->Remove(propertyId)) { propIds->elements[argIndex] = propertyId; argIndex++; } memberList = memberList->AsParseNodeBin()->pnode2; } if (memberList->nop != knopEllipsis && memberList->AsParseNodeBin()->pnode1->nop != knopComputedName && !hasComputedNameOrSpread) { propertyId = memberList->AsParseNodeBin()->pnode1->AsParseNodeStr()->pid->GetPropertyId(); if (!byteCodeGenerator->GetScriptContext()->IsNumericPropertyId(propertyId, &value) && propertyIds->Remove(propertyId)) { propIds->elements[argIndex] = propertyId; argIndex++; } } uint32 literalObjectId = funcInfo->GetParsedFunctionBody()->NewObjectLiteral(); // Generate the opcode with propIds and cacheId byteCodeGenerator->Writer()->Auxiliary(Js::OpCode::NewScObjectLiteral, objectLocation, propIds, UInt32Math::Add(sizeof(Js::PropertyIdArray), allocSize), literalObjectId); Adelete(byteCodeGenerator->GetAllocator(), propertyIds); AdeletePlus(byteCodeGenerator->GetAllocator(), allocSize, propIds); } memberList = pmemberList; bool useStore = false; // Generate the actual assignment to those properties if (memberList != nullptr) { while (memberList->nop == knopList) { ParseNode *memberNode = memberList->AsParseNodeBin()->pnode1; if (memberNode->nop == knopEllipsis) { byteCodeGenerator->StartSubexpression(memberNode); EmitObjectSpreadNode(memberNode, objectLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->EndSubexpression(memberNode); } else { if (memberNode->AsParseNodeBin()->pnode1->nop == knopComputedName) { useStore = true; } byteCodeGenerator->StartSubexpression(memberNode); EmitMemberNode(memberNode, objectLocation, byteCodeGenerator, funcInfo, nullptr, useStore); byteCodeGenerator->EndSubexpression(memberNode); } memberList = memberList->AsParseNodeBin()->pnode2; } byteCodeGenerator->StartSubexpression(memberList); if (memberList->nop == knopEllipsis) { EmitObjectSpreadNode(memberList, objectLocation, byteCodeGenerator, funcInfo); } else { EmitMemberNode(memberList, objectLocation, byteCodeGenerator, funcInfo, nullptr, useStore); } byteCodeGenerator->EndSubexpression(memberList); } } void EmitStringTemplate(ParseNodeStrTemplate *pnodeStrTemplate, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Assert(pnodeStrTemplate->pnodeStringLiterals); // For a tagged string template, we will create the callsite constant object as part of the FunctionBody constants table. // We only need to emit code for non-tagged string templates here. if (!pnodeStrTemplate->isTaggedTemplate) { // If we have no substitutions and this is not a tagged template, we can emit just the single cooked string. if (pnodeStrTemplate->pnodeSubstitutionExpressions == nullptr) { Assert(pnodeStrTemplate->pnodeStringLiterals->nop != knopList); funcInfo->AcquireLoc(pnodeStrTemplate); Emit(pnodeStrTemplate->pnodeStringLiterals, byteCodeGenerator, funcInfo, false); Assert(pnodeStrTemplate->location != pnodeStrTemplate->pnodeStringLiterals->location); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnodeStrTemplate->location, pnodeStrTemplate->pnodeStringLiterals->location); funcInfo->ReleaseLoc(pnodeStrTemplate->pnodeStringLiterals); } else { // If we have substitutions but no tag function, we can skip the callSite object construction (and also ignore raw strings). funcInfo->AcquireLoc(pnodeStrTemplate); // First string must be a list node since we have substitutions. AssertMsg(pnodeStrTemplate->pnodeStringLiterals->nop == knopList, "First string in the list must be a knopList node."); ParseNode* stringNodeList = pnodeStrTemplate->pnodeStringLiterals; // Emit the first string and load that into the pnode location. Emit(stringNodeList->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); Assert(pnodeStrTemplate->location != stringNodeList->AsParseNodeBin()->pnode1->location); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnodeStrTemplate->location, stringNodeList->AsParseNodeBin()->pnode1->location); funcInfo->ReleaseLoc(stringNodeList->AsParseNodeBin()->pnode1); ParseNode* expressionNodeList = pnodeStrTemplate->pnodeSubstitutionExpressions; ParseNode* stringNode; ParseNode* expressionNode; // Now append the substitution expressions and remaining string constants via normal add operator // We will always have one more string constant than substitution expression // `strcon1 ${expr1} strcon2 ${expr2} strcon3` = strcon1 + expr1 + strcon2 + expr2 + strcon3 // // strcon1 --- step 1 (above) // expr1 \__ step 2 // strcon2 / // expr2 \__ step 3 // strcon3 / while (stringNodeList->nop == knopList) { // If the current head of the expression list is a list, fetch the node and walk the list. if (expressionNodeList->nop == knopList) { expressionNode = expressionNodeList->AsParseNodeBin()->pnode1; expressionNodeList = expressionNodeList->AsParseNodeBin()->pnode2; } else { // This is the last element of the expression list. expressionNode = expressionNodeList; } // Emit the expression and append it to the string we're building. Emit(expressionNode, byteCodeGenerator, funcInfo, false); Js::RegSlot toStringLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Conv_Str, toStringLocation, expressionNode->location); byteCodeGenerator->Writer()->Reg3(Js::OpCode::Add_A, pnodeStrTemplate->location, pnodeStrTemplate->location, toStringLocation); funcInfo->ReleaseTmpRegister(toStringLocation); funcInfo->ReleaseLoc(expressionNode); // Move to the next string in the list - we already got ahead of the expressions in the first string literal above. stringNodeList = stringNodeList->AsParseNodeBin()->pnode2; // If the current head of the string literal list is also a list node, need to fetch the actual string literal node. if (stringNodeList->nop == knopList) { stringNode = stringNodeList->AsParseNodeBin()->pnode1; } else { // This is the last element of the string literal list. stringNode = stringNodeList; } // Emit the string node following the previous expression and append it to the string. // This is either just some string in the list or it is the last string. Emit(stringNode, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg3(Js::OpCode::Add_A, pnodeStrTemplate->location, pnodeStrTemplate->location, stringNode->location); funcInfo->ReleaseLoc(stringNode); } } } } void SetNewArrayElements(ParseNode *pnode, Js::RegSlot arrayLocation, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { ParseNode *args = pnode->AsParseNodeUni()->pnode1; uint argCount = pnode->AsParseNodeArrLit()->count; uint spreadCount = pnode->AsParseNodeArrLit()->spreadCount; bool nativeArrays = CreateNativeArrays(byteCodeGenerator, funcInfo); bool arrayIntOpt = nativeArrays && pnode->AsParseNodeArrLit()->arrayOfInts; if (arrayIntOpt) { int extraAlloc = 0, auxSize = 0; if (Int32Math::Mul(argCount, sizeof(int32), &extraAlloc) || Int32Math::Add(sizeof(Js::AuxArray<int>), extraAlloc, &auxSize)) { ::Math::DefaultOverflowPolicy(); } Js::AuxArray<int> *ints = AnewPlus(byteCodeGenerator->GetAllocator(), extraAlloc, Js::AuxArray<int32>, argCount); EmitConstantArgsToIntArray(byteCodeGenerator, ints->elements, args, argCount); Assert(!pnode->AsParseNodeArrLit()->hasMissingValues); byteCodeGenerator->Writer()->Auxiliary( Js::OpCode::NewScIntArray, pnode->location, ints, auxSize, argCount); AdeletePlus(byteCodeGenerator->GetAllocator(), extraAlloc, ints); return; } bool arrayNumOpt = nativeArrays && pnode->AsParseNodeArrLit()->arrayOfNumbers; if (arrayNumOpt) { int extraAlloc = 0, auxSize = 0; if (Int32Math::Mul(argCount, sizeof(double), &extraAlloc) || Int32Math::Add(sizeof(Js::AuxArray<double>), extraAlloc, &auxSize)) { ::Math::DefaultOverflowPolicy(); } Js::AuxArray<double> *doubles = AnewPlus(byteCodeGenerator->GetAllocator(), extraAlloc, Js::AuxArray<double>, argCount); EmitConstantArgsToFltArray(byteCodeGenerator, doubles->elements, args, argCount); Assert(!pnode->AsParseNodeArrLit()->hasMissingValues); byteCodeGenerator->Writer()->Auxiliary( Js::OpCode::NewScFltArray, pnode->location, doubles, auxSize, argCount); AdeletePlus(byteCodeGenerator->GetAllocator(), extraAlloc, doubles); return; } bool arrayLitOpt = pnode->AsParseNodeArrLit()->arrayOfTaggedInts && pnode->AsParseNodeArrLit()->count > 1; Assert(!arrayLitOpt || !nativeArrays); Js::RegSlot spreadArrLoc = arrayLocation; Js::AuxArray<uint32> *spreadIndices = nullptr; const uint extraAlloc = UInt32Math::Mul(spreadCount, sizeof(uint32)); if (pnode->AsParseNodeArrLit()->spreadCount > 0) { arrayLocation = funcInfo->AcquireTmpRegister(); spreadIndices = AnewPlus(byteCodeGenerator->GetAllocator(), extraAlloc, Js::AuxArray<uint32>, spreadCount); } byteCodeGenerator->Writer()->Reg1Unsigned1( pnode->AsParseNodeArrLit()->hasMissingValues ? Js::OpCode::NewScArrayWithMissingValues : Js::OpCode::NewScArray, arrayLocation, argCount); if (args != nullptr) { Js::OpCode opcode; Js::RegSlot arrLoc; if (argCount == 1 && !byteCodeGenerator->Writer()->DoProfileNewScArrayOp(Js::OpCode::NewScArray)) { opcode = Js::OpCode::StArrItemC_CI4; arrLoc = arrayLocation; } else if (arrayLitOpt) { opcode = Js::OpCode::StArrSegItem_A; arrLoc = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::LdArrHead, arrLoc, arrayLocation); } else if (Js::JavascriptArray::HasInlineHeadSegment(argCount)) { // The head segment will be allocated inline as an interior pointer. To keep the array alive, the set operation // should be done relative to the array header to keep it alive (instead of the array segment). opcode = Js::OpCode::StArrInlineItem_CI4; arrLoc = arrayLocation; } else if (argCount <= Js::JavascriptArray::MaxInitialDenseLength) { opcode = Js::OpCode::StArrSegItem_CI4; arrLoc = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::LdArrHead, arrLoc, arrayLocation); } else { opcode = Js::OpCode::StArrItemI_CI4; arrLoc = arrayLocation; } if (arrayLitOpt) { uint32 allocSize = UInt32Math::Mul(argCount, sizeof(Js::Var)); Js::VarArray *vars = AnewPlus(byteCodeGenerator->GetAllocator(), allocSize, Js::VarArray, argCount); EmitConstantArgsToVarArray(byteCodeGenerator, vars->elements, args, argCount); // Generate the opcode with vars byteCodeGenerator->Writer()->Auxiliary(Js::OpCode::StArrSegItem_A, arrLoc, vars, UInt32Math::Add(sizeof(Js::VarArray), allocSize), argCount); AdeletePlus(byteCodeGenerator->GetAllocator(), allocSize, vars); } else { uint i = 0; unsigned spreadIndex = 0; Js::RegSlot rhsLocation; while (args->nop == knopList) { if (args->AsParseNodeBin()->pnode1->nop != knopEmpty) { Emit(args->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); rhsLocation = args->AsParseNodeBin()->pnode1->location; Js::RegSlot regVal = rhsLocation; if (args->AsParseNodeBin()->pnode1->nop == knopEllipsis) { AnalysisAssert(spreadIndices); regVal = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::LdCustomSpreadIteratorList, regVal, rhsLocation); spreadIndices->elements[spreadIndex++] = i; } byteCodeGenerator->Writer()->ElementUnsigned1(opcode, regVal, arrLoc, i); if (args->AsParseNodeBin()->pnode1->nop == knopEllipsis) { funcInfo->ReleaseTmpRegister(regVal); } funcInfo->ReleaseLoc(args->AsParseNodeBin()->pnode1); } args = args->AsParseNodeBin()->pnode2; i++; } if (args->nop != knopEmpty) { Emit(args, byteCodeGenerator, funcInfo, false); rhsLocation = args->location; Js::RegSlot regVal = rhsLocation; if (args->nop == knopEllipsis) { regVal = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::LdCustomSpreadIteratorList, regVal, rhsLocation); AnalysisAssert(spreadIndices); spreadIndices->elements[spreadIndex] = i; } byteCodeGenerator->Writer()->ElementUnsigned1(opcode, regVal, arrLoc, i); if (args->nop == knopEllipsis) { funcInfo->ReleaseTmpRegister(regVal); } funcInfo->ReleaseLoc(args); i++; } Assert(i <= argCount); } if (arrLoc != arrayLocation) { funcInfo->ReleaseTmpRegister(arrLoc); } } if (pnode->AsParseNodeArrLit()->spreadCount > 0) { byteCodeGenerator->Writer()->Reg2Aux(Js::OpCode::SpreadArrayLiteral, spreadArrLoc, arrayLocation, spreadIndices, UInt32Math::Add(sizeof(Js::AuxArray<uint32>), extraAlloc), extraAlloc); AdeletePlus(byteCodeGenerator->GetAllocator(), extraAlloc, spreadIndices); funcInfo->ReleaseTmpRegister(arrayLocation); } } // FIX: TODO: mixed-mode expressions (arithmetic expressions mixed with boolean expressions); current solution // will not short-circuit in some cases and is not complete (for example: var i=(x==y)) // This uses Aho and Ullman style double-branch generation (p. 494 ASU); we will need to peephole optimize or replace // with special case for single-branch style. void EmitBooleanExpression( _In_ ParseNode* expr, Js::ByteCodeLabel trueLabel, Js::ByteCodeLabel falseLabel, _In_ ByteCodeGenerator* byteCodeGenerator, _In_ FuncInfo* funcInfo, bool trueFallthrough, bool falseFallthrough) { Assert(!trueFallthrough || !falseFallthrough); byteCodeGenerator->StartStatement(expr); switch (expr->nop) { case knopLogOr: { Js::ByteCodeLabel leftFalse = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(expr->AsParseNodeBin()->pnode1, trueLabel, leftFalse, byteCodeGenerator, funcInfo, false, true); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); byteCodeGenerator->Writer()->MarkLabel(leftFalse); EmitBooleanExpression(expr->AsParseNodeBin()->pnode2, trueLabel, falseLabel, byteCodeGenerator, funcInfo, trueFallthrough, falseFallthrough); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); break; } case knopLogAnd: { Js::ByteCodeLabel leftTrue = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(expr->AsParseNodeBin()->pnode1, leftTrue, falseLabel, byteCodeGenerator, funcInfo, true, false); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); byteCodeGenerator->Writer()->MarkLabel(leftTrue); EmitBooleanExpression(expr->AsParseNodeBin()->pnode2, trueLabel, falseLabel, byteCodeGenerator, funcInfo, trueFallthrough, falseFallthrough); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); break; } case knopLogNot: EmitBooleanExpression(expr->AsParseNodeUni()->pnode1, falseLabel, trueLabel, byteCodeGenerator, funcInfo, falseFallthrough, trueFallthrough); funcInfo->ReleaseLoc(expr->AsParseNodeUni()->pnode1); break; case knopEq: case knopEqv: case knopNEqv: case knopNe: case knopLt: case knopLe: case knopGe: case knopGt: EmitBinaryOpnds(expr->AsParseNodeBin()->pnode1, expr->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); byteCodeGenerator->Writer()->BrReg2(nopToOp[expr->nop], trueLabel, expr->AsParseNodeBin()->pnode1->location, expr->AsParseNodeBin()->pnode2->location); if (!falseFallthrough) { byteCodeGenerator->Writer()->Br(falseLabel); } break; case knopTrue: if (!trueFallthrough) { byteCodeGenerator->Writer()->Br(trueLabel); } break; case knopFalse: if (!falseFallthrough) { byteCodeGenerator->Writer()->Br(falseLabel); } break; default: // Note: we usually release the temp assigned to a node after we Emit it. // But in this case, EmitBooleanExpression is just a wrapper around a normal Emit call, // and the caller of EmitBooleanExpression expects to be able to release this register. Emit(expr, byteCodeGenerator, funcInfo, false); if (trueFallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, falseLabel, expr->location); } else { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); if (!falseFallthrough) { byteCodeGenerator->Writer()->Br(falseLabel); } } break; } byteCodeGenerator->EndStatement(expr); } void EmitGeneratingBooleanExpression(ParseNode *expr, Js::ByteCodeLabel trueLabel, bool truefallthrough, Js::ByteCodeLabel falseLabel, bool falsefallthrough, Js::RegSlot writeto, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { switch (expr->nop) { case knopLogOr: { byteCodeGenerator->StartStatement(expr); Js::ByteCodeLabel leftFalse = byteCodeGenerator->Writer()->DefineLabel(); EmitGeneratingBooleanExpression(expr->AsParseNodeBin()->pnode1, trueLabel, false, leftFalse, true, writeto, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); byteCodeGenerator->Writer()->MarkLabel(leftFalse); EmitGeneratingBooleanExpression(expr->AsParseNodeBin()->pnode2, trueLabel, truefallthrough, falseLabel, falsefallthrough, writeto, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); byteCodeGenerator->EndStatement(expr); break; } case knopLogAnd: { byteCodeGenerator->StartStatement(expr); Js::ByteCodeLabel leftTrue = byteCodeGenerator->Writer()->DefineLabel(); EmitGeneratingBooleanExpression(expr->AsParseNodeBin()->pnode1, leftTrue, true, falseLabel, false, writeto, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); byteCodeGenerator->Writer()->MarkLabel(leftTrue); EmitGeneratingBooleanExpression(expr->AsParseNodeBin()->pnode2, trueLabel, truefallthrough, falseLabel, falsefallthrough, writeto, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); byteCodeGenerator->EndStatement(expr); break; } case knopLogNot: { byteCodeGenerator->StartStatement(expr); // this time we want a boolean expression, since Logical Not is nice and only returns true or false Js::ByteCodeLabel emitTrue = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel emitFalse = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(expr->AsParseNodeUni()->pnode1, emitFalse, emitTrue, byteCodeGenerator, funcInfo, false, true); byteCodeGenerator->Writer()->MarkLabel(emitTrue); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, writeto); byteCodeGenerator->Writer()->Br(trueLabel); byteCodeGenerator->Writer()->MarkLabel(emitFalse); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, writeto); if (!falsefallthrough) { byteCodeGenerator->Writer()->Br(falseLabel); } funcInfo->ReleaseLoc(expr->AsParseNodeUni()->pnode1); byteCodeGenerator->EndStatement(expr); break; } case knopEq: case knopEqv: case knopNEqv: case knopNe: case knopLt: case knopLe: case knopGe: case knopGt: byteCodeGenerator->StartStatement(expr); EmitBinaryOpnds(expr->AsParseNodeBin()->pnode1, expr->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(expr->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(expr); byteCodeGenerator->Writer()->Reg3(nopToCMOp[expr->nop], expr->location, expr->AsParseNodeBin()->pnode1->location, expr->AsParseNodeBin()->pnode2->location); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, writeto, expr->location); // The inliner likes small bytecode if (!(truefallthrough || falsefallthrough)) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); byteCodeGenerator->Writer()->Br(falseLabel); } else if (truefallthrough && !falsefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, falseLabel, expr->location); } else if (falsefallthrough && !truefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); } byteCodeGenerator->EndStatement(expr); break; case knopTrue: byteCodeGenerator->StartStatement(expr); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, writeto); if (!truefallthrough) { byteCodeGenerator->Writer()->Br(trueLabel); } byteCodeGenerator->EndStatement(expr); break; case knopFalse: byteCodeGenerator->StartStatement(expr); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, writeto); if (!falsefallthrough) { byteCodeGenerator->Writer()->Br(falseLabel); } byteCodeGenerator->EndStatement(expr); break; default: // Note: we usually release the temp assigned to a node after we Emit it. // But in this case, EmitBooleanExpression is just a wrapper around a normal Emit call, // and the caller of EmitBooleanExpression expects to be able to release this register. // For diagnostics purposes, register the name and dot to the statement list. if (expr->nop == knopName || expr->nop == knopDot) { byteCodeGenerator->StartStatement(expr); Emit(expr, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, writeto, expr->location); // The inliner likes small bytecode if (!(truefallthrough || falsefallthrough)) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); byteCodeGenerator->Writer()->Br(falseLabel); } else if (truefallthrough && !falsefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, falseLabel, expr->location); } else if (falsefallthrough && !truefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); } byteCodeGenerator->EndStatement(expr); } else { Emit(expr, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, writeto, expr->location); // The inliner likes small bytecode if (!(truefallthrough || falsefallthrough)) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); byteCodeGenerator->Writer()->Br(falseLabel); } else if (truefallthrough && !falsefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrFalse_A, falseLabel, expr->location); } else if (falsefallthrough && !truefallthrough) { byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, trueLabel, expr->location); } } break; } } // used by while and for loops void EmitLoop( ParseNodeLoop *loopNode, ParseNode *cond, ParseNode *body, ParseNode *incr, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue, BOOL doWhile = FALSE, ParseNodeBlock *forLoopBlock = nullptr) { // Need to increment loop count whether we are going to profile or not for HasLoop() Js::ByteCodeLabel loopEntrance = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel continuePastLoop = byteCodeGenerator->Writer()->DefineLabel(); uint loopId = byteCodeGenerator->Writer()->EnterLoop(loopEntrance); loopNode->loopId = loopId; if (doWhile) { Emit(body, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(body); if (loopNode->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(loopNode->continueLabel); } if (!ByteCodeGenerator::IsFalse(cond) || byteCodeGenerator->IsInDebugMode()) { EmitBooleanExpression(cond, loopEntrance, continuePastLoop, byteCodeGenerator, funcInfo, false, false); } funcInfo->ReleaseLoc(cond); } else { if (cond) { if (!(cond->nop == knopInt && cond->AsParseNodeInt()->lw != 0)) { Js::ByteCodeLabel trueLabel = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(cond, trueLabel, continuePastLoop, byteCodeGenerator, funcInfo, true, false); byteCodeGenerator->Writer()->MarkLabel(trueLabel); } funcInfo->ReleaseLoc(cond); } Emit(body, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(body); if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled() && forLoopBlock != nullptr) { CloneEmitBlock(forLoopBlock, byteCodeGenerator, funcInfo); } if (loopNode->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(loopNode->continueLabel); } if (incr != nullptr) { Emit(incr, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(incr); } byteCodeGenerator->Writer()->Br(loopEntrance); } byteCodeGenerator->Writer()->MarkLabel(continuePastLoop); if (loopNode->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(loopNode->breakLabel); } byteCodeGenerator->Writer()->ExitLoop(loopId); } void ByteCodeGenerator::EmitInvertedLoop(ParseNodeLoop* outerLoop, ParseNodeFor* invertedLoop, FuncInfo* funcInfo) { Js::ByteCodeLabel invertedLoopLabel = this->m_writer.DefineLabel(); Js::ByteCodeLabel afterInvertedLoop = this->m_writer.DefineLabel(); // emit branch around original Emit(outerLoop->AsParseNodeFor()->pnodeInit, this, funcInfo, false); funcInfo->ReleaseLoc(outerLoop->AsParseNodeFor()->pnodeInit); this->m_writer.BrS(Js::OpCode::BrNotHasSideEffects, invertedLoopLabel, Js::SideEffects_Any); // emit original EmitLoop(outerLoop, outerLoop->AsParseNodeFor()->pnodeCond, outerLoop->AsParseNodeFor()->pnodeBody, outerLoop->AsParseNodeFor()->pnodeIncr, this, funcInfo, false); // clear temporary registers since inverted loop may share nodes with // emitted original loop VisitClearTmpRegs(outerLoop, this, funcInfo); // emit branch around inverted this->m_writer.Br(afterInvertedLoop); this->m_writer.MarkLabel(invertedLoopLabel); // Emit a zero trip test for the original outer-loop if the outer-loop // has a condition if (outerLoop->AsParseNodeFor()->pnodeCond) { Js::ByteCodeLabel zeroTrip = this->m_writer.DefineLabel(); ParseNode* testNode = this->GetParser()->CopyPnode(outerLoop->AsParseNodeFor()->pnodeCond); EmitBooleanExpression(testNode, zeroTrip, afterInvertedLoop, this, funcInfo, true, false); this->m_writer.MarkLabel(zeroTrip); funcInfo->ReleaseLoc(testNode); } // emit inverted Emit(invertedLoop->pnodeInit, this, funcInfo, false); funcInfo->ReleaseLoc(invertedLoop->pnodeInit); EmitLoop(invertedLoop, invertedLoop->pnodeCond, invertedLoop->pnodeBody, invertedLoop->pnodeIncr, this, funcInfo, false); this->m_writer.MarkLabel(afterInvertedLoop); } void EmitGetIterator(Js::RegSlot iteratorLocation, Js::RegSlot iterableLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { // get iterator object from the iterable EmitInvoke(iteratorLocation, iterableLocation, Js::PropertyIds::_symbolIterator, byteCodeGenerator, funcInfo); // throw TypeError if the result is not an object Js::ByteCodeLabel skipThrow = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrOnObject_A, skipThrow, iteratorLocation); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_NeedObject)); byteCodeGenerator->Writer()->MarkLabel(skipThrow); } void EmitIteratorNext(Js::RegSlot itemLocation, Js::RegSlot iteratorLocation, Js::RegSlot nextInputLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { // invoke next() on the iterator if (nextInputLocation == Js::Constants::NoRegister) { EmitInvoke(itemLocation, iteratorLocation, Js::PropertyIds::next, byteCodeGenerator, funcInfo); } else { EmitInvoke(itemLocation, iteratorLocation, Js::PropertyIds::next, byteCodeGenerator, funcInfo, nextInputLocation); } // throw TypeError if the result is not an object Js::ByteCodeLabel skipThrow = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrOnObject_A, skipThrow, itemLocation); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_NeedObject)); byteCodeGenerator->Writer()->MarkLabel(skipThrow); } // Generating // if (hasReturnFunction) { // value = Call Retrun; // if (value != Object) // throw TypeError; // } void EmitIteratorClose(Js::RegSlot iteratorLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { Js::RegSlot returnLocation = funcInfo->AcquireTmpRegister(); Js::ByteCodeLabel skipThrow = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel noReturn = byteCodeGenerator->Writer()->DefineLabel(); uint cacheId = funcInfo->FindOrAddInlineCacheId(iteratorLocation, Js::PropertyIds::return_, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, returnLocation, iteratorLocation, cacheId); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrEq_A, noReturn, returnLocation, funcInfo->undefinedConstantRegister); EmitInvoke(returnLocation, iteratorLocation, Js::PropertyIds::return_, byteCodeGenerator, funcInfo); // throw TypeError if the result is not an Object byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrOnObject_A, skipThrow, returnLocation); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_NeedObject)); byteCodeGenerator->Writer()->MarkLabel(skipThrow); byteCodeGenerator->Writer()->MarkLabel(noReturn); funcInfo->ReleaseTmpRegister(returnLocation); } void EmitIteratorComplete(Js::RegSlot doneLocation, Js::RegSlot iteratorResultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { // get the iterator result's "done" property uint cacheId = funcInfo->FindOrAddInlineCacheId(iteratorResultLocation, Js::PropertyIds::done, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, doneLocation, iteratorResultLocation, cacheId); // Do not need to do ToBoolean explicitly with current uses of EmitIteratorComplete since BrTrue_A does this. // Add a ToBoolean controlled by template flag if needed for new uses later on. } void EmitIteratorValue(Js::RegSlot valueLocation, Js::RegSlot iteratorResultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { // get the iterator result's "value" property uint cacheId = funcInfo->FindOrAddInlineCacheId(iteratorResultLocation, Js::PropertyIds::value, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, valueLocation, iteratorResultLocation, cacheId); } void EmitForInOfLoopBody(ParseNodeForInOrForOf *loopNode, Js::ByteCodeLabel loopEntrance, Js::ByteCodeLabel continuePastLoop, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue) { if (loopNode->pnodeLval->nop != knopVarDecl && loopNode->pnodeLval->nop != knopLetDecl && loopNode->pnodeLval->nop != knopConstDecl) { EmitReference(loopNode->pnodeLval, byteCodeGenerator, funcInfo); } else { Symbol * sym = loopNode->pnodeLval->AsParseNodeVar()->sym; sym->SetNeedDeclaration(false); } if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { BeginEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); } EmitAssignment(nullptr, loopNode->pnodeLval, loopNode->itemLocation, byteCodeGenerator, funcInfo); // The StartStatement is already done in the caller of this function. byteCodeGenerator->EndStatement(loopNode->pnodeLval); funcInfo->ReleaseReference(loopNode->pnodeLval); Emit(loopNode->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(loopNode->pnodeBody); if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { EndEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); } funcInfo->ReleaseTmpRegister(loopNode->itemLocation); if (loopNode->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(loopNode->continueLabel); } byteCodeGenerator->Writer()->Br(loopEntrance); byteCodeGenerator->Writer()->MarkLabel(continuePastLoop); if (loopNode->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(loopNode->breakLabel); } } void EmitForIn(ParseNodeForInOrForOf *loopNode, Js::ByteCodeLabel loopEntrance, Js::ByteCodeLabel continuePastLoop, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue) { Assert(loopNode->nop == knopForIn); Assert(loopNode->location == Js::Constants::NoRegister); // Grab registers for the enumerator and for the current enumerated item. // The enumerator register will be released after this call returns. loopNode->itemLocation = funcInfo->AcquireTmpRegister(); uint forInLoopLevel = funcInfo->AcquireForInLoopLevel(); // get enumerator from the collection byteCodeGenerator->Writer()->Reg1Unsigned1(Js::OpCode::InitForInEnumerator, loopNode->pnodeObj->location, forInLoopLevel); // The StartStatement is already done in the caller of the current function, which is EmitForInOrForOf byteCodeGenerator->EndStatement(loopNode); // Need to increment loop count whether we are going into profile or not for HasLoop() uint loopId = byteCodeGenerator->Writer()->EnterLoop(loopEntrance); loopNode->loopId = loopId; // The EndStatement will happen in the EmitForInOfLoopBody function byteCodeGenerator->StartStatement(loopNode->pnodeLval); // branch past loop when MoveAndGetNext returns nullptr byteCodeGenerator->Writer()->BrReg1Unsigned1(Js::OpCode::BrOnEmpty, continuePastLoop, loopNode->itemLocation, forInLoopLevel); EmitForInOfLoopBody(loopNode, loopEntrance, continuePastLoop, byteCodeGenerator, funcInfo, fReturnValue); byteCodeGenerator->Writer()->ExitLoop(loopId); funcInfo->ReleaseForInLoopLevel(forInLoopLevel); if (!byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { EndEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); } } void EmitForInOrForOf(ParseNodeForInOrForOf *loopNode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue) { bool isForIn = (loopNode->nop == knopForIn); Assert(isForIn || loopNode->nop == knopForOf); BeginEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); byteCodeGenerator->StartStatement(loopNode); if (!isForIn) { funcInfo->AcquireLoc(loopNode); } // Record the branch bytecode offset. // This is used for "ignore exception" and "set next stmt" scenarios. See ProbeContainer::GetNextUserStatementOffsetForAdvance: // If there is a branch recorded between current offset and next stmt offset, we'll use offset of the branch recorded, // otherwise use offset of next stmt. // The idea here is that when we bail out after ignore exception, we need to bail out to the beginning of the ForIn, // but currently ForIn stmt starts at the condition part, which is needed for correct handling of break point on ForIn // (break every time on the loop back edge) and correct display of current statement under debugger. // See WinBlue 231880 for details. byteCodeGenerator->Writer()->RecordStatementAdjustment(Js::FunctionBody::SAT_All); if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled() && loopNode->pnodeBlock->HasBlockScopedContent()) { byteCodeGenerator->Writer()->RecordForInOrOfCollectionScope(); } Js::ByteCodeLabel loopEntrance = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel continuePastLoop = byteCodeGenerator->Writer()->DefineLabel(); if (loopNode->pnodeLval->nop == knopVarDecl) { EmitReference(loopNode->pnodeLval, byteCodeGenerator, funcInfo); } Emit(loopNode->pnodeObj, byteCodeGenerator, funcInfo, false); // evaluate collection expression funcInfo->ReleaseLoc(loopNode->pnodeObj); if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { EndEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); if (loopNode->pnodeBlock->scope != nullptr) { loopNode->pnodeBlock->scope->ForEachSymbol([](Symbol *sym) { sym->SetIsTrackedForDebugger(false); }); } } if (isForIn) { EmitForIn(loopNode, loopEntrance, continuePastLoop, byteCodeGenerator, funcInfo, fReturnValue); if (!byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { EndEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); } return; } Js::ByteCodeLabel skipThrow = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrNeq_A, skipThrow, loopNode->pnodeObj->location, funcInfo->undefinedConstantRegister); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_ObjectCoercible)); byteCodeGenerator->Writer()->MarkLabel(skipThrow); Js::RegSlot regException = Js::Constants::NoRegister; Js::RegSlot regOffset = Js::Constants::NoRegister; // These two temp variables store the information of return function to be called or not. // one variable is used for catch block and one is used for finally block. These variable will be set to true when we think that return function // to be called on abrupt loop break. // Why two variables? since these are temps and JIT does like not flow if single variable is used in multiple blocks. Js::RegSlot shouldCallReturnFunctionLocation = funcInfo->AcquireTmpRegister(); Js::RegSlot shouldCallReturnFunctionLocationFinally = funcInfo->AcquireTmpRegister(); bool isCoroutine = funcInfo->byteCodeFunction->IsCoroutine(); if (isCoroutine) { regException = funcInfo->AcquireTmpRegister(); regOffset = funcInfo->AcquireTmpRegister(); } // Grab registers for the enumerator and for the current enumerated item. // The enumerator register will be released after this call returns. loopNode->itemLocation = funcInfo->AcquireTmpRegister(); // We want call profile information on the @@iterator call, so instead of adding a GetForOfIterator bytecode op // to do all the following work in a helper do it explicitly in bytecode so that the @@iterator call is exposed // to the profiler and JIT. byteCodeGenerator->SetHasFinally(true); byteCodeGenerator->SetHasTry(true); byteCodeGenerator->TopFuncInfo()->byteCodeFunction->SetDontInline(true); // do a ToObject on the collection Js::RegSlot tmpObj = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Conv_Obj, tmpObj, loopNode->pnodeObj->location); EmitGetIterator(loopNode->location, tmpObj, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(tmpObj); // The whole loop is surrounded with try..catch..finally - in order to capture the abrupt completion. Js::ByteCodeLabel finallyLabel = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel catchLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); ByteCodeGenerator::TryScopeRecord tryRecForTryFinally(Js::OpCode::TryFinallyWithYield, finallyLabel); if (isCoroutine) { byteCodeGenerator->Writer()->BrReg2(Js::OpCode::TryFinallyWithYield, finallyLabel, regException, regOffset); tryRecForTryFinally.reg1 = regException; tryRecForTryFinally.reg2 = regOffset; byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTryFinally); } else { byteCodeGenerator->Writer()->Br(Js::OpCode::TryFinally, finallyLabel); } byteCodeGenerator->Writer()->Br(Js::OpCode::TryCatch, catchLabel); ByteCodeGenerator::TryScopeRecord tryRecForTry(Js::OpCode::TryCatch, catchLabel); if (isCoroutine) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTry); } byteCodeGenerator->EndStatement(loopNode); // Need to increment loop count whether we are going into profile or not for HasLoop() uint loopId = byteCodeGenerator->Writer()->EnterLoop(loopEntrance); loopNode->loopId = loopId; byteCodeGenerator->StartStatement(loopNode->pnodeLval); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, shouldCallReturnFunctionLocationFinally); EmitIteratorNext(loopNode->itemLocation, loopNode->location, Js::Constants::NoRegister, byteCodeGenerator, funcInfo); Js::RegSlot doneLocation = funcInfo->AcquireTmpRegister(); EmitIteratorComplete(doneLocation, loopNode->itemLocation, byteCodeGenerator, funcInfo); // branch past loop if the result's done property is truthy byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, continuePastLoop, doneLocation); funcInfo->ReleaseTmpRegister(doneLocation); // otherwise put result's value property in itemLocation EmitIteratorValue(loopNode->itemLocation, loopNode->itemLocation, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocation); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, shouldCallReturnFunctionLocationFinally); EmitForInOfLoopBody(loopNode, loopEntrance, continuePastLoop, byteCodeGenerator, funcInfo, fReturnValue); byteCodeGenerator->Writer()->ExitLoop(loopId); EmitCatchAndFinallyBlocks(catchLabel, finallyLabel, loopNode->location, shouldCallReturnFunctionLocation, shouldCallReturnFunctionLocationFinally, regException, regOffset, byteCodeGenerator, funcInfo); if (!byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { EndEmitBlock(loopNode->pnodeBlock, byteCodeGenerator, funcInfo); } } void EmitArrayLiteral(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { funcInfo->AcquireLoc(pnode); ParseNode *args = pnode->AsParseNodeUni()->pnode1; if (args == nullptr) { byteCodeGenerator->Writer()->Reg1Unsigned1( pnode->AsParseNodeArrLit()->hasMissingValues ? Js::OpCode::NewScArrayWithMissingValues : Js::OpCode::NewScArray, pnode->location, ByteCodeGenerator::DefaultArraySize); } else { SetNewArrayElements(pnode, pnode->location, byteCodeGenerator, funcInfo); } } void EmitJumpCleanup(ParseNodeStmt *pnode, ParseNode *pnodeTarget, ByteCodeGenerator *byteCodeGenerator, FuncInfo * funcInfo) { for (; pnode != pnodeTarget; pnode = pnode->pnodeOuter) { switch (pnode->nop) { case knopTry: case knopCatch: case knopFinally: // We insert OpCode::Leave when there is a 'return' inside try/catch/finally. // This is for flow control and does not participate in identifying boundaries of try/catch blocks, // thus we shouldn't call RecordCrossFrameEntryExitRecord() here. byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); break; case knopForOf: #if ENABLE_PROFILE_INFO if (Js::DynamicProfileInfo::EnableImplicitCallFlags(funcInfo->GetParsedFunctionBody())) { byteCodeGenerator->Writer()->Unsigned1(Js::OpCode::ProfiledLoopEnd, pnode->AsParseNodeLoop()->loopId); } #endif // The ForOf loop code is wrapped around try..catch..finally - Forcing couple Leave bytecode over here byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); break; #if ENABLE_PROFILE_INFO case knopWhile: case knopDoWhile: case knopFor: case knopForIn: if (Js::DynamicProfileInfo::EnableImplicitCallFlags(funcInfo->GetParsedFunctionBody())) { byteCodeGenerator->Writer()->Unsigned1(Js::OpCode::ProfiledLoopEnd, pnode->AsParseNodeLoop()->loopId); } break; #endif } } } void EmitBinaryOpnds(ParseNode *pnode1, ParseNode *pnode2, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // If opnd2 can overwrite opnd1, make sure the value of opnd1 is stashed away. if (MayHaveSideEffectOnNode(pnode1, pnode2)) { SaveOpndValue(pnode1, funcInfo); } Emit(pnode1, byteCodeGenerator, funcInfo, false); if (pnode1->nop == knopComputedName && pnode2->nop == knopClassDecl && (pnode2->AsParseNodeClass()->pnodeConstructor == nullptr || pnode2->AsParseNodeClass()->pnodeConstructor->nop != knopVarDecl)) { Emit(pnode2, byteCodeGenerator, funcInfo, false, false, pnode1); } else { Emit(pnode2, byteCodeGenerator, funcInfo, false); } } void EmitBinaryReference(ParseNode *pnode1, ParseNode *pnode2, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fLoadLhs) { // Make sure that the RHS of an assignment doesn't kill the opnd's of the expression on the LHS. switch (pnode1->nop) { case knopName: if (fLoadLhs && MayHaveSideEffectOnNode(pnode1, pnode2)) { // Given x op y, y may kill x, so stash x. // Note that this only matters if we're loading x prior to the op. SaveOpndValue(pnode1, funcInfo); } break; case knopDot: if (fLoadLhs) { // We're loading the value of the LHS before the RHS, so make sure the LHS gets a register first. funcInfo->AcquireLoc(pnode1); } if (MayHaveSideEffectOnNode(pnode1->AsParseNodeBin()->pnode1, pnode2)) { // Given x.y op z, z may kill x, so stash x away. SaveOpndValue(pnode1->AsParseNodeBin()->pnode1, funcInfo); } break; case knopIndex: if (fLoadLhs) { // We're loading the value of the LHS before the RHS, so make sure the LHS gets a register first. funcInfo->AcquireLoc(pnode1); } if (MayHaveSideEffectOnNode(pnode1->AsParseNodeBin()->pnode1, pnode2) || MayHaveSideEffectOnNode(pnode1->AsParseNodeBin()->pnode1, pnode1->AsParseNodeBin()->pnode2)) { // Given x[y] op z, y or z may kill x, so stash x away. SaveOpndValue(pnode1->AsParseNodeBin()->pnode1, funcInfo); } if (MayHaveSideEffectOnNode(pnode1->AsParseNodeBin()->pnode2, pnode2)) { // Given x[y] op z, z may kill y, so stash y away. // But make sure that x gets a register before y. funcInfo->AcquireLoc(pnode1->AsParseNodeBin()->pnode1); SaveOpndValue(pnode1->AsParseNodeBin()->pnode2, funcInfo); } break; } if (fLoadLhs) { // Emit code to load the value of the LHS. EmitLoad(pnode1, byteCodeGenerator, funcInfo); } else { // Emit code to evaluate the LHS opnds, but don't load the LHS's value. EmitReference(pnode1, byteCodeGenerator, funcInfo); } // Evaluate the RHS. Emit(pnode2, byteCodeGenerator, funcInfo, false); } void EmitUseBeforeDeclarationRuntimeError(ByteCodeGenerator * byteCodeGenerator, Js::RegSlot location) { byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_UseBeforeDeclaration)); if (location != Js::Constants::NoRegister) { // Optionally load something into register in order to do not confuse IRBuilder. This value will never be used. byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, location); } } void EmitUseBeforeDeclaration(Symbol *sym, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { // Don't emit static use-before-declaration error in a closure or dynamic scope case. We detect such cases with dynamic checks, // if necessary. if (sym != nullptr && !sym->GetIsModuleExportStorage() && sym->GetNeedDeclaration() && byteCodeGenerator->GetCurrentScope()->HasStaticPathToAncestor(sym->GetScope()) && sym->GetScope()->GetFunc() == funcInfo) { EmitUseBeforeDeclarationRuntimeError(byteCodeGenerator, Js::Constants::NoRegister); } } void EmitBinary(Js::OpCode opcode, ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { byteCodeGenerator->StartStatement(pnode); EmitBinaryOpnds(pnode->AsParseNodeBin()->pnode1, pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg3(opcode, pnode->location, pnode->AsParseNodeBin()->pnode1->location, pnode->AsParseNodeBin()->pnode2->location); byteCodeGenerator->EndStatement(pnode); } bool CollectConcat(ParseNode *pnodeAdd, DListCounted<ParseNode *, ArenaAllocator>& concatOpnds, ArenaAllocator *arenaAllocator) { Assert(pnodeAdd->nop == knopAdd); Assert(pnodeAdd->CanFlattenConcatExpr()); bool doConcatString = false; DList<ParseNode*, ArenaAllocator> pnodeStack(arenaAllocator); pnodeStack.Prepend(pnodeAdd->AsParseNodeBin()->pnode2); ParseNode * pnode = pnodeAdd->AsParseNodeBin()->pnode1; while (true) { if (!pnode->CanFlattenConcatExpr()) { concatOpnds.Append(pnode); } else if (pnode->nop == knopStr) { concatOpnds.Append(pnode); // Detect if there are any string larger then the append size limit. // If there are, we can do concat; otherwise, still use add so we will not lose the AddLeftDead opportunities. doConcatString = doConcatString || !Js::CompoundString::ShouldAppendChars(pnode->AsParseNodeStr()->pid->Cch()); } else { Assert(pnode->nop == knopAdd); pnodeStack.Prepend(pnode->AsParseNodeBin()->pnode2); pnode = pnode->AsParseNodeBin()->pnode1; continue; } if (pnodeStack.Empty()) { break; } pnode = pnodeStack.Head(); pnodeStack.RemoveHead(); } return doConcatString; } void EmitConcat3(ParseNode *pnode, ParseNode *pnode1, ParseNode *pnode2, ParseNode *pnode3, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { byteCodeGenerator->StartStatement(pnode); if (MayHaveSideEffectOnNode(pnode1, pnode2) || MayHaveSideEffectOnNode(pnode1, pnode3)) { SaveOpndValue(pnode1, funcInfo); } if (MayHaveSideEffectOnNode(pnode2, pnode3)) { SaveOpndValue(pnode2, funcInfo); } Emit(pnode1, byteCodeGenerator, funcInfo, false); Emit(pnode2, byteCodeGenerator, funcInfo, false); Emit(pnode3, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode3); funcInfo->ReleaseLoc(pnode2); funcInfo->ReleaseLoc(pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg4(Js::OpCode::Concat3, pnode->location, pnode1->location, pnode2->location, pnode3->location); byteCodeGenerator->EndStatement(pnode); } void EmitNewConcatStrMulti(ParseNode *pnode, uint8 count, ParseNode *pnode1, ParseNode *pnode2, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { EmitBinaryOpnds(pnode1, pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnode2); funcInfo->ReleaseLoc(pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg3B1(Js::OpCode::NewConcatStrMulti, pnode->location, pnode1->location, pnode2->location, count); } void EmitAdd(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo) { Assert(pnode->nop == knopAdd); if (pnode->CanFlattenConcatExpr()) { // We should only have a string concat if the feature is on. Assert(!PHASE_OFF1(Js::ByteCodeConcatExprOptPhase)); DListCounted<ParseNode*, ArenaAllocator> concatOpnds(byteCodeGenerator->GetAllocator()); bool doConcatString = CollectConcat(pnode, concatOpnds, byteCodeGenerator->GetAllocator()); if (doConcatString) { uint concatCount = concatOpnds.Count(); Assert(concatCount >= 2); // Don't do concatN if the number is too high // CONSIDER: although we could have done multiple ConcatNs if (concatCount > 2 && concatCount <= UINT8_MAX) { #if DBG char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif ParseNode * pnode1 = concatOpnds.Head(); concatOpnds.RemoveHead(); ParseNode * pnode2 = concatOpnds.Head(); concatOpnds.RemoveHead(); if (concatCount == 3) { OUTPUT_TRACE_DEBUGONLY(Js::ByteCodeConcatExprOptPhase, _u("%s(%s) offset:#%d : Concat3\n"), funcInfo->GetParsedFunctionBody()->GetDisplayName(), funcInfo->GetParsedFunctionBody()->GetDebugNumberSet(debugStringBuffer), byteCodeGenerator->Writer()->ByteCodeDataSize()); EmitConcat3(pnode, pnode1, pnode2, concatOpnds.Head(), byteCodeGenerator, funcInfo); return; } OUTPUT_TRACE_DEBUGONLY(Js::ByteCodeConcatExprOptPhase, _u("%s(%s) offset:#%d: ConcatMulti %d\n"), funcInfo->GetParsedFunctionBody()->GetDisplayName(), funcInfo->GetParsedFunctionBody()->GetDebugNumberSet(debugStringBuffer), byteCodeGenerator->Writer()->ByteCodeDataSize(), concatCount); byteCodeGenerator->StartStatement(pnode); funcInfo->AcquireLoc(pnode); // CONSIDER: this may cause the backend not able CSE repeating pattern within the concat. EmitNewConcatStrMulti(pnode, (uint8)concatCount, pnode1, pnode2, byteCodeGenerator, funcInfo); uint i = 2; do { ParseNode * currNode = concatOpnds.Head(); concatOpnds.RemoveHead(); ParseNode * currNode2 = concatOpnds.Head(); concatOpnds.RemoveHead(); EmitBinaryOpnds(currNode, currNode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(currNode2); funcInfo->ReleaseLoc(currNode); byteCodeGenerator->Writer()->Reg3B1( Js::OpCode::SetConcatStrMultiItem2, pnode->location, currNode->location, currNode2->location, (uint8)i); i += 2; } while (concatOpnds.Count() > 1); if (!concatOpnds.Empty()) { ParseNode * currNode = concatOpnds.Head(); Emit(currNode, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(currNode); byteCodeGenerator->Writer()->Reg2B1( Js::OpCode::SetConcatStrMultiItem, pnode->location, currNode->location, (uint8)i); i++; } Assert(concatCount == i); byteCodeGenerator->EndStatement(pnode); return; } } // Since we collected all the node already, let's just emit them instead of doing it recursively. byteCodeGenerator->StartStatement(pnode); ParseNode * currNode = concatOpnds.Head(); concatOpnds.RemoveHead(); ParseNode * currNode2 = concatOpnds.Head(); concatOpnds.RemoveHead(); EmitBinaryOpnds(currNode, currNode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(currNode2); funcInfo->ReleaseLoc(currNode); Js::RegSlot dstReg = funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg3( Js::OpCode::Add_A, dstReg, currNode->location, currNode2->location); while (!concatOpnds.Empty()) { currNode = concatOpnds.Head(); concatOpnds.RemoveHead(); Emit(currNode, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(currNode); byteCodeGenerator->Writer()->Reg3( Js::OpCode::Add_A, dstReg, dstReg, currNode->location); } byteCodeGenerator->EndStatement(pnode); } else { EmitBinary(Js::OpCode::Add_A, pnode, byteCodeGenerator, funcInfo); } } void ByteCodeGenerator::EmitLeaveOpCodesBeforeYield() { for (TryScopeRecord* node = this->tryScopeRecordsList.Tail(); node != nullptr; node = node->Previous()) { switch (node->op) { case Js::OpCode::TryFinallyWithYield: this->Writer()->Empty(Js::OpCode::LeaveNull); break; case Js::OpCode::TryCatch: case Js::OpCode::ResumeFinally: case Js::OpCode::ResumeCatch: this->Writer()->Empty(Js::OpCode::Leave); break; default: AssertMsg(false, "Unexpected OpCode before Yield in the Try-Catch-Finally cache for generator!"); break; } } } void ByteCodeGenerator::EmitTryBlockHeadersAfterYield() { for (TryScopeRecord* node = this->tryScopeRecordsList.Head(); node != nullptr; node = node->Next()) { switch (node->op) { case Js::OpCode::TryCatch: this->Writer()->Br(node->op, node->label); break; case Js::OpCode::TryFinallyWithYield: case Js::OpCode::ResumeFinally: this->Writer()->BrReg2(node->op, node->label, node->reg1, node->reg2); break; case Js::OpCode::ResumeCatch: this->Writer()->Empty(node->op); break; default: AssertMsg(false, "Unexpected OpCode after yield in the Try-Catch-Finally cache for generator!"); break; } } } void EmitYield(Js::RegSlot inputLocation, Js::RegSlot resultLocation, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo, Js::RegSlot yieldStarIterator) { // If the bytecode emitted by this function is part of 'yield*', inputLocation is the object // returned by the iterable's next/return/throw method. Otherwise, it is the yielded value. if (yieldStarIterator == Js::Constants::NoRegister) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::NewScObjectSimple, funcInfo->yieldRegister); uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->yieldRegister, Js::PropertyIds::value, false, true); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::StFld, inputLocation, funcInfo->yieldRegister, cacheId); cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->yieldRegister, Js::PropertyIds::done, false, true); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::StFld, funcInfo->falseConstantRegister, funcInfo->yieldRegister, cacheId); } else { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, funcInfo->yieldRegister, inputLocation); } byteCodeGenerator->EmitLeaveOpCodesBeforeYield(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Yield, funcInfo->yieldRegister, funcInfo->yieldRegister); byteCodeGenerator->EmitTryBlockHeadersAfterYield(); if (yieldStarIterator == Js::Constants::NoRegister) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::ResumeYield, resultLocation, funcInfo->yieldRegister); } else { byteCodeGenerator->Writer()->Reg3(Js::OpCode::ResumeYieldStar, resultLocation, funcInfo->yieldRegister, yieldStarIterator); } } void EmitYieldStar(ParseNodeUni* yieldStarNode, ByteCodeGenerator* byteCodeGenerator, FuncInfo* funcInfo) { funcInfo->AcquireLoc(yieldStarNode); Js::ByteCodeLabel loopEntrance = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel continuePastLoop = byteCodeGenerator->Writer()->DefineLabel(); Js::RegSlot iteratorLocation = funcInfo->AcquireTmpRegister(); // Evaluate operand Emit(yieldStarNode->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(yieldStarNode->pnode1); EmitGetIterator(iteratorLocation, yieldStarNode->pnode1->location, byteCodeGenerator, funcInfo); // Call the iterator's next() EmitIteratorNext(yieldStarNode->location, iteratorLocation, funcInfo->undefinedConstantRegister, byteCodeGenerator, funcInfo); uint loopId = byteCodeGenerator->Writer()->EnterLoop(loopEntrance); // since a yield* doesn't have a user defined body, we cannot return from this loop // which means we don't need to support EmitJumpCleanup() and there do not need to // remember the loopId like the loop statements do. Js::RegSlot doneLocation = funcInfo->AcquireTmpRegister(); EmitIteratorComplete(doneLocation, yieldStarNode->location, byteCodeGenerator, funcInfo); // branch past the loop if the done property is truthy byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, continuePastLoop, doneLocation); funcInfo->ReleaseTmpRegister(doneLocation); EmitYield(yieldStarNode->location, yieldStarNode->location, byteCodeGenerator, funcInfo, iteratorLocation); funcInfo->ReleaseTmpRegister(iteratorLocation); byteCodeGenerator->Writer()->Br(loopEntrance); byteCodeGenerator->Writer()->MarkLabel(continuePastLoop); byteCodeGenerator->Writer()->ExitLoop(loopId); // Put the iterator result's value in yieldStarNode->location. // It will be used as the result value of the yield* operator expression. EmitIteratorValue(yieldStarNode->location, yieldStarNode->location, byteCodeGenerator, funcInfo); } void TrackIntConstantsOnGlobalUserObject(ByteCodeGenerator *byteCodeGenerator, bool isSymGlobalAndSingleAssignment, Js::PropertyId propertyId) { if (isSymGlobalAndSingleAssignment) { byteCodeGenerator->GetScriptContext()->TrackIntConstPropertyOnGlobalUserObject(propertyId); } } void TrackIntConstantsOnGlobalObject(ByteCodeGenerator *byteCodeGenerator, bool isSymGlobalAndSingleAssignment, Js::PropertyId propertyId) { if (isSymGlobalAndSingleAssignment) { byteCodeGenerator->GetScriptContext()->TrackIntConstPropertyOnGlobalObject(propertyId); } } void TrackIntConstantsOnGlobalObject(ByteCodeGenerator *byteCodeGenerator, Symbol *sym) { if (sym && sym->GetIsGlobal() && sym->IsAssignedOnce()) { Js::PropertyId propertyId = sym->EnsurePosition(byteCodeGenerator); byteCodeGenerator->GetScriptContext()->TrackIntConstPropertyOnGlobalObject(propertyId); } } void TrackMemberNodesInObjectForIntConstants(ByteCodeGenerator *byteCodeGenerator, ParseNodePtr objNode) { Assert(objNode->nop == knopObject); ParseNodePtr memberList = objNode->AsParseNodeUni()->pnode1; while (memberList != nullptr) { ParseNodePtr memberNode = memberList->nop == knopList ? memberList->AsParseNodeBin()->pnode1 : memberList; if (memberNode->nop != knopEllipsis) { ParseNodePtr memberNameNode = memberNode->AsParseNodeBin()->pnode1; ParseNodePtr memberValNode = memberNode->AsParseNodeBin()->pnode2; if (memberNameNode->nop != knopComputedName && memberValNode->nop == knopInt) { Js::PropertyId propertyId = memberNameNode->AsParseNodeStr()->pid->GetPropertyId(); TrackIntConstantsOnGlobalUserObject(byteCodeGenerator, true, propertyId); } } memberList = memberList->nop == knopList ? memberList->AsParseNodeBin()->pnode2 : nullptr; } } void TrackGlobalIntAssignmentsForknopDotProps(ParseNodePtr knopDotNode, ByteCodeGenerator * byteCodeGenerator) { Assert(knopDotNode->nop == knopDot); ParseNodePtr objectNode = knopDotNode->AsParseNodeBin()->pnode1; ParseNodeName * propertyNode = knopDotNode->AsParseNodeBin()->pnode2->AsParseNodeName(); bool isSymGlobalAndSingleAssignment = false; if (objectNode->nop == knopName) { if (ByteCodeGenerator::IsThis(objectNode)) { // Assume 'this' always refer to GlobalObject // Cases like "this.a = " isSymGlobalAndSingleAssignment = propertyNode->pid->IsSingleAssignment(); Js::PropertyId propertyId = propertyNode->PropertyIdFromNameNode(); TrackIntConstantsOnGlobalObject(byteCodeGenerator, isSymGlobalAndSingleAssignment, propertyId); } else { Symbol * sym = objectNode->AsParseNodeName()->sym; isSymGlobalAndSingleAssignment = sym && sym->GetIsGlobal() && sym->IsAssignedOnce() && propertyNode->pid->IsSingleAssignment(); Js::PropertyId propertyId = propertyNode->PropertyIdFromNameNode(); TrackIntConstantsOnGlobalUserObject(byteCodeGenerator, isSymGlobalAndSingleAssignment, propertyId); } } } void TrackGlobalIntAssignments(ParseNodePtr pnode, ByteCodeGenerator * byteCodeGenerator) { // Track the Global Int Constant properties' assignments here. uint nodeType = ParseNode::Grfnop(pnode->nop); if (nodeType & fnopAsg) { if (nodeType & fnopBin) { ParseNodePtr lhs = pnode->AsParseNodeBin()->pnode1; ParseNodePtr rhs = pnode->AsParseNodeBin()->pnode2; Assert(lhs && rhs); // Don't track other than integers and objects with member nodes. if (rhs->nop == knopObject) { TrackMemberNodesInObjectForIntConstants(byteCodeGenerator, rhs); } else if (rhs->nop != knopInt && ((rhs->nop != knopLsh && rhs->nop != knopRsh) || (rhs->AsParseNodeBin()->pnode1->nop != knopInt || rhs->AsParseNodeBin()->pnode2->nop != knopInt))) { return; } if (lhs->nop == knopName) { // Handle "a = <Integer>" cases here Symbol * sym = lhs->AsParseNodeName()->sym; TrackIntConstantsOnGlobalObject(byteCodeGenerator, sym); } else if (lhs->nop == knopDot && lhs->AsParseNodeBin()->pnode2->nop == knopName) { // Cases like "obj.a = <Integer>" TrackGlobalIntAssignmentsForknopDotProps(lhs, byteCodeGenerator); } } else if (nodeType & fnopUni) { ParseNodePtr lhs = pnode->AsParseNodeUni()->pnode1; if (lhs->nop == knopName) { // Cases like "a++" Symbol * sym = lhs->AsParseNodeName()->sym; TrackIntConstantsOnGlobalObject(byteCodeGenerator, sym); } else if (lhs->nop == knopDot && lhs->AsParseNodeBin()->pnode2->nop == knopName) { // Cases like "obj.a++" TrackGlobalIntAssignmentsForknopDotProps(lhs, byteCodeGenerator); } } } } void Emit(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *funcInfo, BOOL fReturnValue, bool isConstructorCall, ParseNode * bindPnode, bool isTopLevel) { if (pnode == nullptr) { return; } ThreadContext::ProbeCurrentStackNoDispose(Js::Constants::MinStackByteCodeVisitor, byteCodeGenerator->GetScriptContext()); TrackGlobalIntAssignments(pnode, byteCodeGenerator); // printNop(pnode->nop); switch (pnode->nop) { case knopList: EmitList(pnode, byteCodeGenerator, funcInfo); break; case knopInt: // currently, these are loaded at the top break; // PTNODE(knopFlt , "flt const" ,None ,Flt ,fnopLeaf|fnopConst) case knopFlt: // currently, these are loaded at the top break; // PTNODE(knopStr , "str const" ,None ,Pid ,fnopLeaf|fnopConst) case knopStr: // TODO: protocol for combining string constants break; // PTNODE(knopRegExp , "reg expr" ,None ,Pid ,fnopLeaf|fnopConst) case knopRegExp: funcInfo->GetParsedFunctionBody()->SetLiteralRegex(pnode->AsParseNodeRegExp()->regexPatternIndex, pnode->AsParseNodeRegExp()->regexPattern); byteCodeGenerator->Writer()->Reg1Unsigned1(Js::OpCode::NewRegEx, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeRegExp()->regexPatternIndex); break; // PTNODE(knopNull , "null" ,Null ,None ,fnopLeaf) case knopNull: // enregistered break; // PTNODE(knopFalse , "false" ,False ,None ,fnopLeaf) case knopFalse: // enregistered break; // PTNODE(knopTrue , "true" ,True ,None ,fnopLeaf) case knopTrue: // enregistered break; // PTNODE(knopEmpty , "empty" ,Empty ,None ,fnopLeaf) case knopEmpty: break; // Unary operators. // PTNODE(knopNot , "~" ,BitNot ,Uni ,fnopUni) case knopNot: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); byteCodeGenerator->Writer()->Reg2( Js::OpCode::Not_A, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeUni()->pnode1->location); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; // PTNODE(knopNeg , "unary -" ,Neg ,Uni ,fnopUni) case knopNeg: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg2( Js::OpCode::Neg_A, pnode->location, pnode->AsParseNodeUni()->pnode1->location); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; // PTNODE(knopPos , "unary +" ,Pos ,Uni ,fnopUni) case knopPos: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); byteCodeGenerator->Writer()->Reg2( Js::OpCode::Conv_Num, funcInfo->AcquireLoc(pnode), pnode->AsParseNodeUni()->pnode1->location); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; // PTNODE(knopLogNot , "!" ,LogNot ,Uni ,fnopUni) case knopLogNot: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Js::ByteCodeLabel doneLabel = byteCodeGenerator->Writer()->DefineLabel(); // For boolean expressions that compute a result, we have to burn a register for the result // so that the back end can identify it cheaply as a single temp lifetime. Revisit this if we do // full-on renaming in the back end. funcInfo->AcquireLoc(pnode); if (pnode->AsParseNodeUni()->pnode1->nop == knopInt) { int32 value = pnode->AsParseNodeUni()->pnode1->AsParseNodeInt()->lw; Js::OpCode op = value ? Js::OpCode::LdFalse : Js::OpCode::LdTrue; byteCodeGenerator->Writer()->Reg1(op, pnode->location); } else { Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdFalse, pnode->location); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrTrue_A, doneLabel, pnode->AsParseNodeUni()->pnode1->location); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, pnode->location); byteCodeGenerator->Writer()->MarkLabel(doneLabel); } funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; } // PTNODE(knopEllipsis , "..." ,Spread ,Uni , fnopUni) case knopEllipsis: { Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); // Transparently pass the location of the object or array. pnode->location = pnode->AsParseNodeUni()->pnode1->location; break; } // PTNODE(knopIncPost , "post++" ,Inc ,Uni ,fnopUni|fnopAsg) case knopIncPost: case knopDecPost: // FALL THROUGH to the faster pre-inc/dec case if the result of the expression is not needed. if (pnode->isUsed || fReturnValue) { byteCodeGenerator->StartStatement(pnode); const Js::OpCode op = (pnode->nop == knopDecPost) ? Js::OpCode::Sub_A : Js::OpCode::Add_A; ParseNode* pnode1 = pnode->AsParseNodeUni()->pnode1; // Grab a register for the expression result. funcInfo->AcquireLoc(pnode); // Load the initial value, convert it (this is the expression result), and increment it. EmitLoad(pnode1, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Conv_Num, pnode->location, pnode1->location); // Use temporary register if lhs cannot be assigned Js::RegSlot incDecResult = pnode1->location; if (funcInfo->RegIsConst(incDecResult) || (pnode1->nop == knopName && pnode1->AsParseNodeName()->sym && pnode1->AsParseNodeName()->sym->GetIsFuncExpr())) { incDecResult = funcInfo->AcquireTmpRegister(); } Js::RegSlot oneReg = funcInfo->constantToRegister.LookupWithKey(1, Js::Constants::NoRegister); Assert(oneReg != Js::Constants::NoRegister); byteCodeGenerator->Writer()->Reg3(op, incDecResult, pnode->location, oneReg); // Store the incremented value. EmitAssignment(nullptr, pnode1, incDecResult, byteCodeGenerator, funcInfo); // Release the incremented value and the l-value. if (incDecResult != pnode1->location) { funcInfo->ReleaseTmpRegister(incDecResult); } funcInfo->ReleaseLoad(pnode1); byteCodeGenerator->EndStatement(pnode); break; } else { pnode->nop = (pnode->nop == knopIncPost) ? knopIncPre : knopDecPre; } // FALL THROUGH to the fast pre-inc/dec case if the result of the expression is not needed. // PTNODE(knopIncPre , "++ pre" ,Inc ,Uni ,fnopUni|fnopAsg) case knopIncPre: case knopDecPre: { byteCodeGenerator->StartStatement(pnode); const Js::OpCode op = (pnode->nop == knopDecPre) ? Js::OpCode::Decr_A : Js::OpCode::Incr_A; ParseNode* pnode1 = pnode->AsParseNodeUni()->pnode1; // Assign a register for the result only if the result is used or the LHS can't be assigned to // (i.e., is a constant). const bool need_result_location = pnode->isUsed || fReturnValue || funcInfo->RegIsConst(pnode1->location) || (pnode1->nop == knopName && pnode1->AsParseNodeName()->sym && pnode1->AsParseNodeName()->sym->GetIsFuncExpr()); if (need_result_location) { const Js::RegSlot result_location = funcInfo->AcquireLoc(pnode); EmitLoad(pnode1, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg2(op, result_location, pnode1->location); // Store the incremented value and release the l-value. EmitAssignment(nullptr, pnode1, result_location, byteCodeGenerator, funcInfo); } else { EmitLoad(pnode1, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->Reg2(op, pnode1->location, pnode1->location); // Store the incremented value and release the l-value. EmitAssignment(nullptr, pnode1, pnode1->location, byteCodeGenerator, funcInfo); } funcInfo->ReleaseLoad(pnode1); byteCodeGenerator->EndStatement(pnode); break; } // PTNODE(knopTypeof , "typeof" ,None ,Uni ,fnopUni) case knopTypeof: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); ParseNode* pnodeOpnd = pnode->AsParseNodeUni()->pnode1; switch (pnodeOpnd->nop) { case knopDot: { Emit(pnodeOpnd->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); Js::PropertyId propertyId = pnodeOpnd->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); Assert(pnodeOpnd->AsParseNodeBin()->pnode2->nop == knopName); funcInfo->ReleaseLoc(pnodeOpnd->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->EmitTypeOfFld(funcInfo, propertyId, pnode->location, pnodeOpnd->AsParseNodeBin()->pnode1->location, Js::OpCode::LdFldForTypeOf); break; } case knopIndex: { EmitBinaryOpnds(pnodeOpnd->AsParseNodeBin()->pnode1, pnodeOpnd->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnodeOpnd->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pnodeOpnd->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Element(Js::OpCode::TypeofElem, pnode->location, pnodeOpnd->AsParseNodeBin()->pnode1->location, pnodeOpnd->AsParseNodeBin()->pnode2->location); break; } case knopName: { ParseNodeName * pnodeNameOpnd = pnodeOpnd->AsParseNodeName(); if (pnodeNameOpnd->IsUserIdentifier()) { funcInfo->AcquireLoc(pnode); byteCodeGenerator->EmitPropTypeof(pnode->location, pnodeNameOpnd->sym, pnodeNameOpnd->pid, funcInfo); break; } // Special names should fallthrough to default case } default: Emit(pnodeOpnd, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnodeOpnd); byteCodeGenerator->Writer()->Reg2( Js::OpCode::Typeof, funcInfo->AcquireLoc(pnode), pnodeOpnd->location); break; } ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; } // PTNODE(knopVoid , "void" ,Void ,Uni ,fnopUni) case knopVoid: Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, funcInfo->AcquireLoc(pnode)); break; // PTNODE(knopArray , "arr cnst" ,None ,Uni ,fnopUni) case knopArray: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); EmitArrayLiteral(pnode, byteCodeGenerator, funcInfo); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; // PTNODE(knopObject , "obj cnst" ,None ,Uni ,fnopUni) case knopObject: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); funcInfo->AcquireLoc(pnode); EmitObjectInitializers(pnode->AsParseNodeUni()->pnode1, pnode->location, byteCodeGenerator, funcInfo); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; // PTNODE(knopComputedName, "[name]" ,None ,Uni ,fnopUni) case knopComputedName: Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); if (pnode->location == Js::Constants::NoRegister) { // The name is some expression with no home location. We can just re-use the register. pnode->location = pnode->AsParseNodeUni()->pnode1->location; } else if (pnode->location != pnode->AsParseNodeUni()->pnode1->location) { // The name had to be protected from side-effects of the RHS. byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeUni()->pnode1->location); } break; // Binary and Ternary Operators case knopAdd: EmitAdd(pnode, byteCodeGenerator, funcInfo); break; case knopSub: case knopMul: case knopExpo: case knopDiv: case knopMod: case knopOr: case knopXor: case knopAnd: case knopLsh: case knopRsh: case knopRs2: case knopIn: EmitBinary(nopToOp[pnode->nop], pnode, byteCodeGenerator, funcInfo); break; case knopInstOf: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); EmitBinaryOpnds(pnode->AsParseNodeBin()->pnode1, pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); uint cacheId = funcInfo->NewIsInstInlineCache(); byteCodeGenerator->Writer()->Reg3C(nopToOp[pnode->nop], pnode->location, pnode->AsParseNodeBin()->pnode1->location, pnode->AsParseNodeBin()->pnode2->location, cacheId); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); } break; case knopEq: case knopEqv: case knopNEqv: case knopNe: case knopLt: case knopLe: case knopGe: case knopGt: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); EmitBinaryOpnds(pnode->AsParseNodeBin()->pnode1, pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg3(nopToCMOp[pnode->nop], pnode->location, pnode->AsParseNodeBin()->pnode1->location, pnode->AsParseNodeBin()->pnode2->location); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; case knopNew: { EmitNew(pnode, byteCodeGenerator, funcInfo); byteCodeGenerator->EndStatement(pnode); break; } case knopDelete: { ParseNode *pexpr = pnode->AsParseNodeUni()->pnode1; byteCodeGenerator->StartStatement(pnode); switch (pexpr->nop) { case knopName: { ParseNodeName * pnodeName = pexpr->AsParseNodeName(); if (pnodeName->IsSpecialName()) { funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdTrue, pnode->location); } else { funcInfo->AcquireLoc(pnode); byteCodeGenerator->EmitPropDelete(pnode->location, pnodeName->sym, pnodeName->pid, funcInfo); } break; } case knopDot: { if (ByteCodeGenerator::IsSuper(pexpr->AsParseNodeBin()->pnode1)) { byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeReferenceError, SCODE_CODE(JSERR_DeletePropertyWithSuper)); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, pnode->location); } else { Emit(pexpr->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pexpr->AsParseNodeBin()->pnode1); Js::PropertyId propertyId = pexpr->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Property(Js::OpCode::DeleteFld, pnode->location, pexpr->AsParseNodeBin()->pnode1->location, funcInfo->FindOrAddReferencedPropertyId(propertyId), byteCodeGenerator->forceStrictModeForClassComputedPropertyName); } break; } case knopIndex: { EmitBinaryOpnds(pexpr->AsParseNodeBin()->pnode1, pexpr->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pexpr->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pexpr->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Element(Js::OpCode::DeleteElemI_A, pnode->location, pexpr->AsParseNodeBin()->pnode1->location, pexpr->AsParseNodeBin()->pnode2->location); break; } default: { Emit(pexpr, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pexpr); byteCodeGenerator->Writer()->Reg2( Js::OpCode::Delete_A, funcInfo->AcquireLoc(pnode), pexpr->location); break; } } byteCodeGenerator->EndStatement(pnode); break; } case knopCall: { ParseNodeCall * pnodeCall = pnode->AsParseNodeCall(); byteCodeGenerator->StartStatement(pnodeCall); if (pnodeCall->isSuperCall) { byteCodeGenerator->EmitSuperCall(funcInfo, pnodeCall->AsParseNodeSuperCall(), fReturnValue); } else if (pnodeCall->pnodeTarget->nop == knopImport) { ParseNodePtr args = pnodeCall->pnodeArgs; Assert(CountArguments(args) == 2); // import() takes one argument Emit(args, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(args); funcInfo->AcquireLoc(pnodeCall); byteCodeGenerator->Writer()->Reg2(Js::OpCode::ImportCall, pnodeCall->location, args->location); } else { if (pnodeCall->isApplyCall && funcInfo->GetApplyEnclosesArgs()) { // TODO[ianhall]: Can we remove the ApplyCall bytecode gen time optimization? EmitApplyCall(pnodeCall, byteCodeGenerator, funcInfo, fReturnValue); } else { EmitCall(pnodeCall, byteCodeGenerator, funcInfo, fReturnValue, /*fEvaluateComponents*/ true); } } byteCodeGenerator->EndStatement(pnode); break; } case knopIndex: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); EmitBinaryOpnds(pnode->AsParseNodeBin()->pnode1, pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo); Js::RegSlot callObjLocation = pnode->AsParseNodeBin()->pnode1->location; Js::RegSlot protoLocation = callObjLocation; if (ByteCodeGenerator::IsSuper(pnode->AsParseNodeBin()->pnode1)) { Emit(pnode->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); protoLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, callObjLocation, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeSuperReference()->pnodeThis); } funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode2); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Element( Js::OpCode::LdElemI_A, pnode->location, protoLocation, pnode->AsParseNodeBin()->pnode2->location); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; } // this is MemberExpression as rvalue case knopDot: { Emit(pnode->AsParseNodeBin()->pnode1, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode1); funcInfo->AcquireLoc(pnode); Js::PropertyId propertyId = pnode->AsParseNodeBin()->pnode2->AsParseNodeName()->PropertyIdFromNameNode(); Js::RegSlot callObjLocation = pnode->AsParseNodeBin()->pnode1->location; Js::RegSlot protoLocation = callObjLocation; if (propertyId == Js::PropertyIds::length) { uint cacheId = funcInfo->FindOrAddInlineCacheId(protoLocation, propertyId, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdLen_A, pnode->location, protoLocation, cacheId); } else if (pnode->IsCallApplyTargetLoad()) { if (ByteCodeGenerator::IsSuper(pnode->AsParseNodeBin()->pnode1)) { Emit(pnode->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); protoLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, callObjLocation, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeSuperReference()->pnodeThis); } uint cacheId = funcInfo->FindOrAddInlineCacheId(protoLocation, propertyId, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFldForCallApplyTarget, pnode->location, protoLocation, cacheId); } else { if (ByteCodeGenerator::IsSuper(pnode->AsParseNodeBin()->pnode1)) { Emit(pnode->AsParseNodeSuperReference()->pnodeThis, byteCodeGenerator, funcInfo, false); protoLocation = byteCodeGenerator->EmitLdObjProto(Js::OpCode::LdHomeObjProto, callObjLocation, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeSuperReference()->pnodeThis); uint cacheId = funcInfo->FindOrAddInlineCacheId(protoLocation, propertyId, false, false); byteCodeGenerator->Writer()->PatchablePropertyWithThisPtr(Js::OpCode::LdSuperFld, pnode->location, protoLocation, pnode->AsParseNodeSuperReference()->pnodeThis->location, cacheId, isConstructorCall); } else { uint cacheId = funcInfo->FindOrAddInlineCacheId(callObjLocation, propertyId, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, pnode->location, callObjLocation, cacheId, isConstructorCall); } } break; } // PTNODE(knopAsg , "=" ,None ,Bin ,fnopBin|fnopAsg) case knopAsg: { ParseNode *lhs = pnode->AsParseNodeBin()->pnode1; ParseNode *rhs = pnode->AsParseNodeBin()->pnode2; byteCodeGenerator->StartStatement(pnode); if (pnode->isUsed || fReturnValue) { // If the assignment result is used, grab a register to hold it and pass it to EmitAssignment, // which will copy the assigned value there. funcInfo->AcquireLoc(pnode); EmitBinaryReference(lhs, rhs, byteCodeGenerator, funcInfo, false); EmitAssignment(pnode, lhs, rhs->location, byteCodeGenerator, funcInfo); } else { EmitBinaryReference(lhs, rhs, byteCodeGenerator, funcInfo, false); EmitAssignment(nullptr, lhs, rhs->location, byteCodeGenerator, funcInfo); } funcInfo->ReleaseLoc(rhs); if (!(byteCodeGenerator->IsES6DestructuringEnabled() && (lhs->IsPattern()))) { funcInfo->ReleaseReference(lhs); } byteCodeGenerator->EndStatement(pnode); break; } case knopName: funcInfo->AcquireLoc(pnode); if (ByteCodeGenerator::IsThis(pnode)) { byteCodeGenerator->EmitPropLoadThis(pnode->location, pnode->AsParseNodeSpecialName(), funcInfo, true); } else { byteCodeGenerator->EmitPropLoad(pnode->location, pnode->AsParseNodeName()->sym, pnode->AsParseNodeName()->pid, funcInfo); } break; case knopComma: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); // The parser marks binary opnd pnodes as used, but value of the first opnd of a comma is not used. // Easier to correct this here than to check every binary op in the parser. ParseNode *pnode1 = pnode->AsParseNodeBin()->pnode1; pnode1->isUsed = false; if (pnode1->nop == knopComma) { // Spot fix for giant comma expressions that send us into OOS if we use a simple recursive // algorithm. Instead of recursing on comma LHS's, iterate over them, pushing the RHS's onto // a stack. (This suggests a model for removing recursion from Emit altogether...) ArenaAllocator *alloc = byteCodeGenerator->GetAllocator(); SList<ParseNode *> rhsStack(alloc); do { rhsStack.Push(pnode1->AsParseNodeBin()->pnode2); pnode1 = pnode1->AsParseNodeBin()->pnode1; pnode1->isUsed = false; } while (pnode1->nop == knopComma); Emit(pnode1, byteCodeGenerator, funcInfo, false); if (funcInfo->IsTmpReg(pnode1->location)) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::Unused, pnode1->location); } while (!rhsStack.Empty()) { ParseNode *pnodeRhs = rhsStack.Pop(); pnodeRhs->isUsed = false; Emit(pnodeRhs, byteCodeGenerator, funcInfo, false); if (funcInfo->IsTmpReg(pnodeRhs->location)) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::Unused, pnodeRhs->location); } funcInfo->ReleaseLoc(pnodeRhs); } } else { Emit(pnode1, byteCodeGenerator, funcInfo, false); if (funcInfo->IsTmpReg(pnode1->location)) { byteCodeGenerator->Writer()->Reg1(Js::OpCode::Unused, pnode1->location); } } funcInfo->ReleaseLoc(pnode1); pnode->AsParseNodeBin()->pnode2->isUsed = pnode->isUsed || fReturnValue; Emit(pnode->AsParseNodeBin()->pnode2, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnode->AsParseNodeBin()->pnode2); funcInfo->AcquireLoc(pnode); if (pnode->AsParseNodeBin()->pnode2->isUsed && pnode->location != pnode->AsParseNodeBin()->pnode2->location) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeBin()->pnode2->location); } ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); } break; // The binary logical ops && and || resolve to the value of the left-hand expression if its // boolean value short-circuits the operation, and to the value of the right-hand expression // otherwise. (In other words, the "truth" of the right-hand expression is never tested.) // PTNODE(knopLogOr , "||" ,None ,Bin ,fnopBin) case knopLogOr: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Js::ByteCodeLabel doneLabel = byteCodeGenerator->Writer()->DefineLabel(); // We use a single dest here for the whole generating boolean expr, because we were poorly // optimizing the previous version where we had a dest for each level funcInfo->AcquireLoc(pnode); EmitGeneratingBooleanExpression(pnode, doneLabel, true, doneLabel, true, pnode->location, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->MarkLabel(doneLabel); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; } // PTNODE(knopLogAnd , "&&" ,None ,Bin ,fnopBin) case knopLogAnd: { STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); Js::ByteCodeLabel doneLabel = byteCodeGenerator->Writer()->DefineLabel(); // We use a single dest here for the whole generating boolean expr, because we were poorly // optimizing the previous version where we had a dest for each level funcInfo->AcquireLoc(pnode); EmitGeneratingBooleanExpression(pnode, doneLabel, true, doneLabel, true, pnode->location, byteCodeGenerator, funcInfo); byteCodeGenerator->Writer()->MarkLabel(doneLabel); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; } // PTNODE(knopQmark , "?" ,None ,Tri ,fnopBin) case knopQmark: { Js::ByteCodeLabel trueLabel = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel falseLabel = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel skipLabel = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(pnode->AsParseNodeTri()->pnode1, trueLabel, falseLabel, byteCodeGenerator, funcInfo, true, false); byteCodeGenerator->Writer()->MarkLabel(trueLabel); funcInfo->ReleaseLoc(pnode->AsParseNodeTri()->pnode1); // For boolean expressions that compute a result, we have to burn a register for the result // so that the back end can identify it cheaply as a single temp lifetime. Revisit this if we do // full-on renaming in the back end. funcInfo->AcquireLoc(pnode); Emit(pnode->AsParseNodeTri()->pnode2, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeTri()->pnode2->location); funcInfo->ReleaseLoc(pnode->AsParseNodeTri()->pnode2); // Record the branch bytecode offset byteCodeGenerator->Writer()->RecordStatementAdjustment(Js::FunctionBody::SAT_FromCurrentToNext); byteCodeGenerator->Writer()->Br(skipLabel); byteCodeGenerator->Writer()->MarkLabel(falseLabel); Emit(pnode->AsParseNodeTri()->pnode3, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeTri()->pnode3->location); funcInfo->ReleaseLoc(pnode->AsParseNodeTri()->pnode3); byteCodeGenerator->Writer()->MarkLabel(skipLabel); break; } case knopAsgAdd: case knopAsgSub: case knopAsgMul: case knopAsgDiv: case knopAsgExpo: case knopAsgMod: case knopAsgAnd: case knopAsgXor: case knopAsgOr: case knopAsgLsh: case knopAsgRsh: case knopAsgRs2: { byteCodeGenerator->StartStatement(pnode); ParseNode *lhs = pnode->AsParseNodeBin()->pnode1; ParseNode *rhs = pnode->AsParseNodeBin()->pnode2; // Assign a register for the result only if the result is used or the LHS can't be assigned to // (i.e., is a constant). const bool need_result_location = pnode->isUsed || fReturnValue || funcInfo->RegIsConst(lhs->location) || (lhs->nop == knopName && lhs->AsParseNodeName()->sym && lhs->AsParseNodeName()->sym->GetIsFuncExpr()); if (need_result_location) { const Js::RegSlot result_location = funcInfo->AcquireLoc(pnode); // Grab a register for the initial value and load it. EmitBinaryReference(lhs, rhs, byteCodeGenerator, funcInfo, true); funcInfo->ReleaseLoc(rhs); // Do the arithmetic, store the result, and release the l-value. byteCodeGenerator->Writer()->Reg3(nopToOp[pnode->nop], result_location, lhs->location, rhs->location); EmitAssignment(pnode, lhs, result_location, byteCodeGenerator, funcInfo); } else { // Grab a register for the initial value and load it. Might modify lhs->location. EmitBinaryReference(lhs, rhs, byteCodeGenerator, funcInfo, true); funcInfo->ReleaseLoc(rhs); // Do the arithmetic, store the result, and release the l-value. byteCodeGenerator->Writer()->Reg3(nopToOp[pnode->nop], lhs->location, lhs->location, rhs->location); EmitAssignment(nullptr, lhs, lhs->location, byteCodeGenerator, funcInfo); } funcInfo->ReleaseLoad(lhs); byteCodeGenerator->EndStatement(pnode); break; } // General nodes. // PTNODE(knopTempRef , "temp ref" ,None ,Uni ,fnopUni) case knopTempRef: // TODO: check whether mov is necessary funcInfo->AcquireLoc(pnode); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeUni()->pnode1->location); break; // PTNODE(knopTemp , "temp" ,None ,None ,fnopLeaf) case knopTemp: // Emit initialization code if (pnode->AsParseNodeVar()->pnodeInit != nullptr) { byteCodeGenerator->StartStatement(pnode); Emit(pnode->AsParseNodeVar()->pnodeInit, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, pnode->location, pnode->AsParseNodeVar()->pnodeInit->location); funcInfo->ReleaseLoc(pnode->AsParseNodeVar()->pnodeInit); byteCodeGenerator->EndStatement(pnode); } break; // PTNODE(knopVarDecl , "varDcl" ,None ,Var ,fnopNone) case knopVarDecl: case knopConstDecl: case knopLetDecl: { // Emit initialization code ParseNodePtr initNode = pnode->AsParseNodeVar()->pnodeInit; AssertMsg(pnode->nop != knopConstDecl || initNode != nullptr, "knopConstDecl expected to have an initializer"); if (initNode != nullptr || pnode->nop == knopLetDecl) { Symbol *sym = pnode->AsParseNodeVar()->sym; Js::RegSlot rhsLocation; byteCodeGenerator->StartStatement(pnode); if (initNode != nullptr) { Emit(initNode, byteCodeGenerator, funcInfo, false); rhsLocation = initNode->location; if (initNode->nop == knopObject) { TrackMemberNodesInObjectForIntConstants(byteCodeGenerator, initNode); } else if (initNode->nop == knopInt) { TrackIntConstantsOnGlobalObject(byteCodeGenerator, sym); } } else { Assert(pnode->nop == knopLetDecl); rhsLocation = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, rhsLocation); } if (pnode->nop != knopVarDecl) { Assert(sym->GetDecl() == pnode || (sym->IsArguments() && !funcInfo->GetHasArguments())); sym->SetNeedDeclaration(false); } EmitAssignment(nullptr, pnode, rhsLocation, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(rhsLocation); byteCodeGenerator->EndStatement(pnode); } break; } // PTNODE(knopFncDecl , "fncDcl" ,None ,Fnc ,fnopLeaf) case knopFncDecl: // The "function declarations" were emitted in DefineFunctions() if (!pnode->AsParseNodeFnc()->IsDeclaration()) { byteCodeGenerator->DefineOneFunction(pnode->AsParseNodeFnc(), funcInfo, false); } break; // PTNODE(knopClassDecl, "class" ,None ,None ,fnopLeaf) case knopClassDecl: { ParseNodeClass * pnodeClass = pnode->AsParseNodeClass(); funcInfo->AcquireLoc(pnodeClass); Assert(pnodeClass->pnodeConstructor); pnodeClass->pnodeConstructor->location = pnodeClass->location; BeginEmitBlock(pnodeClass->pnodeBlock, byteCodeGenerator, funcInfo); // Extends if (pnodeClass->pnodeExtends) { // We can't do StartStatement/EndStatement for pnodeExtends here because the load locations may differ between // defer and nondefer parse modes. Emit(pnodeClass->pnodeExtends, byteCodeGenerator, funcInfo, false); } // Constructor Emit(pnodeClass->pnodeConstructor, byteCodeGenerator, funcInfo, false); EmitComputedFunctionNameVar(bindPnode, pnodeClass->pnodeConstructor, byteCodeGenerator); if (pnodeClass->pnodeExtends) { byteCodeGenerator->StartStatement(pnodeClass->pnodeExtends); byteCodeGenerator->Writer()->InitClass(pnodeClass->location, pnodeClass->pnodeExtends->location); byteCodeGenerator->EndStatement(pnodeClass->pnodeExtends); } else { byteCodeGenerator->Writer()->InitClass(pnodeClass->location); } Js::RegSlot protoLoc = funcInfo->AcquireTmpRegister(); //register set if we have Instance Methods int cacheId = funcInfo->FindOrAddInlineCacheId(pnodeClass->location, Js::PropertyIds::prototype, false, false); byteCodeGenerator->Writer()->PatchableProperty(Js::OpCode::LdFld, protoLoc, pnodeClass->location, cacheId); // Static Methods EmitClassInitializers(pnodeClass->pnodeStaticMembers, pnodeClass->location, byteCodeGenerator, funcInfo, pnode, /*isObjectEmpty*/ false); // Instance Methods EmitClassInitializers(pnodeClass->pnodeMembers, protoLoc, byteCodeGenerator, funcInfo, pnode, /*isObjectEmpty*/ true); funcInfo->ReleaseTmpRegister(protoLoc); // Emit name binding. if (pnodeClass->pnodeName) { Symbol * sym = pnodeClass->pnodeName->sym; sym->SetNeedDeclaration(false); byteCodeGenerator->EmitPropStore(pnodeClass->location, sym, nullptr, funcInfo, false, true); } EndEmitBlock(pnodeClass->pnodeBlock, byteCodeGenerator, funcInfo); if (pnodeClass->pnodeExtends) { funcInfo->ReleaseLoc(pnodeClass->pnodeExtends); } if (pnodeClass->pnodeDeclName) { Symbol * sym = pnodeClass->pnodeDeclName->sym; sym->SetNeedDeclaration(false); byteCodeGenerator->EmitPropStore(pnodeClass->location, sym, nullptr, funcInfo, true, false); } if (pnodeClass->IsDefaultModuleExport()) { byteCodeGenerator->EmitAssignmentToDefaultModuleExport(pnodeClass, funcInfo); } break; } case knopStrTemplate: STARTSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); EmitStringTemplate(pnode->AsParseNodeStrTemplate(), byteCodeGenerator, funcInfo); ENDSTATEMENET_IFTOPLEVEL(isTopLevel, pnode); break; case knopEndCode: byteCodeGenerator->Writer()->RecordStatementAdjustment(Js::FunctionBody::SAT_All); // load undefined for the fallthrough case: if (!funcInfo->IsGlobalFunction()) { if (funcInfo->IsClassConstructor()) { // For class constructors, we need to explicitly load 'this' into the return register. byteCodeGenerator->EmitClassConstructorEndCode(funcInfo); } else { // In the global function, implicit return values are copied to the return register, and if // necessary the return register is initialized at the top. Don't clobber the value here. byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, ByteCodeGenerator::ReturnRegister); } } // Label for non-fall-through return byteCodeGenerator->Writer()->MarkLabel(funcInfo->singleExit); if (funcInfo->GetHasCachedScope()) { byteCodeGenerator->Writer()->Empty(Js::OpCode::CommitScope); } byteCodeGenerator->StartStatement(pnode); byteCodeGenerator->Writer()->Empty(Js::OpCode::Ret); byteCodeGenerator->EndStatement(pnode); break; // PTNODE(knopDebugger , "debugger" ,None ,None ,fnopNone) case knopDebugger: byteCodeGenerator->StartStatement(pnode); byteCodeGenerator->Writer()->Empty(Js::OpCode::Break); byteCodeGenerator->EndStatement(pnode); break; // PTNODE(knopFor , "for" ,None ,For ,fnopBreak|fnopContinue) case knopFor: { ParseNodeFor * pnodeFor = pnode->AsParseNodeFor(); if (pnodeFor->pnodeInverted != nullptr) { byteCodeGenerator->EmitInvertedLoop(pnodeFor, pnodeFor->pnodeInverted, funcInfo); } else { BeginEmitBlock(pnodeFor->pnodeBlock, byteCodeGenerator, funcInfo); Emit(pnodeFor->pnodeInit, byteCodeGenerator, funcInfo, false); funcInfo->ReleaseLoc(pnodeFor->pnodeInit); if (byteCodeGenerator->IsES6ForLoopSemanticsEnabled()) { CloneEmitBlock(pnodeFor->pnodeBlock, byteCodeGenerator, funcInfo); } EmitLoop(pnodeFor, pnodeFor->pnodeCond, pnodeFor->pnodeBody, pnodeFor->pnodeIncr, byteCodeGenerator, funcInfo, fReturnValue, FALSE, pnodeFor->pnodeBlock); EndEmitBlock(pnodeFor->pnodeBlock, byteCodeGenerator, funcInfo); } break; } // PTNODE(knopIf , "if" ,None ,If ,fnopNone) case knopIf: { ParseNodeIf * pnodeIf = pnode->AsParseNodeIf(); byteCodeGenerator->StartStatement(pnodeIf); Js::ByteCodeLabel trueLabel = byteCodeGenerator->Writer()->DefineLabel(); Js::ByteCodeLabel falseLabel = byteCodeGenerator->Writer()->DefineLabel(); EmitBooleanExpression(pnodeIf->pnodeCond, trueLabel, falseLabel, byteCodeGenerator, funcInfo, true, false); funcInfo->ReleaseLoc(pnodeIf->pnodeCond); byteCodeGenerator->EndStatement(pnodeIf); byteCodeGenerator->Writer()->MarkLabel(trueLabel); Emit(pnodeIf->pnodeTrue, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeIf->pnodeTrue); if (pnodeIf->pnodeFalse != nullptr) { // has else clause Js::ByteCodeLabel skipLabel = byteCodeGenerator->Writer()->DefineLabel(); // Record the branch bytecode offset byteCodeGenerator->Writer()->RecordStatementAdjustment(Js::FunctionBody::SAT_FromCurrentToNext); // then clause skips else clause byteCodeGenerator->Writer()->Br(skipLabel); // generate code for else clause byteCodeGenerator->Writer()->MarkLabel(falseLabel); Emit(pnodeIf->pnodeFalse, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeIf->pnodeFalse); byteCodeGenerator->Writer()->MarkLabel(skipLabel); } else { byteCodeGenerator->Writer()->MarkLabel(falseLabel); } if (pnodeIf->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(pnodeIf->breakLabel); } break; } case knopWhile: { ParseNodeWhile * pnodeWhile = pnode->AsParseNodeWhile(); EmitLoop(pnodeWhile, pnodeWhile->pnodeCond, pnodeWhile->pnodeBody, nullptr, byteCodeGenerator, funcInfo, fReturnValue); break; } // PTNODE(knopDoWhile , "do-while" ,None ,While,fnopBreak|fnopContinue) case knopDoWhile: { ParseNodeWhile * pnodeWhile = pnode->AsParseNodeWhile(); EmitLoop(pnodeWhile, pnodeWhile->pnodeCond, pnodeWhile->pnodeBody, nullptr, byteCodeGenerator, funcInfo, fReturnValue, true); break; } // PTNODE(knopForIn , "for in" ,None ,ForIn,fnopBreak|fnopContinue|fnopCleanup) case knopForIn: EmitForInOrForOf(pnode->AsParseNodeForInOrForOf(), byteCodeGenerator, funcInfo, fReturnValue); break; case knopForOf: EmitForInOrForOf(pnode->AsParseNodeForInOrForOf(), byteCodeGenerator, funcInfo, fReturnValue); break; // PTNODE(knopReturn , "return" ,None ,Uni ,fnopNone) case knopReturn: { ParseNodeReturn * pnodeReturn = pnode->AsParseNodeReturn(); byteCodeGenerator->StartStatement(pnodeReturn); if (pnodeReturn->pnodeExpr != nullptr) { if (pnodeReturn->pnodeExpr->location == Js::Constants::NoRegister) { // No need to burn a register for the return value. If we need a temp, use R0 directly. pnodeReturn->pnodeExpr->location = ByteCodeGenerator::ReturnRegister; } Emit(pnodeReturn->pnodeExpr, byteCodeGenerator, funcInfo, fReturnValue); if (pnodeReturn->pnodeExpr->location != ByteCodeGenerator::ReturnRegister) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, ByteCodeGenerator::ReturnRegister, pnodeReturn->pnodeExpr->location); } funcInfo->GetParsedFunctionBody()->SetHasNoExplicitReturnValue(false); } else { byteCodeGenerator->Writer()->Reg1(Js::OpCode::LdUndef, ByteCodeGenerator::ReturnRegister); } if (funcInfo->IsClassConstructor()) { // return expr; // becomes like below: // // if (IsObject(expr)) { // return expr; // } else if (IsBaseClassConstructor) { // return this; // } else if (!IsUndefined(expr)) { // throw TypeError; // } Js::ByteCodeLabel returnExprLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg1(Js::OpCode::BrOnObject_A, returnExprLabel, ByteCodeGenerator::ReturnRegister); if (funcInfo->IsBaseClassConstructor()) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, ByteCodeGenerator::ReturnRegister, funcInfo->GetThisSymbol()->GetLocation()); } else { Js::ByteCodeLabel returnThisLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::BrSrEq_A, returnThisLabel, ByteCodeGenerator::ReturnRegister, funcInfo->undefinedConstantRegister); byteCodeGenerator->Writer()->W1(Js::OpCode::RuntimeTypeError, SCODE_CODE(JSERR_ClassDerivedConstructorInvalidReturnType)); byteCodeGenerator->Writer()->MarkLabel(returnThisLabel); byteCodeGenerator->EmitClassConstructorEndCode(funcInfo); } byteCodeGenerator->Writer()->MarkLabel(returnExprLabel); } if (pnodeReturn->grfnop & fnopCleanup) { EmitJumpCleanup(pnodeReturn, nullptr, byteCodeGenerator, funcInfo); } byteCodeGenerator->Writer()->Br(funcInfo->singleExit); byteCodeGenerator->EndStatement(pnodeReturn); break; } // PTNODE(knopBlock , "{}" ,None ,Block,fnopNone) case knopBlock: { ParseNodeBlock * pnodeBlock = pnode->AsParseNodeBlock(); if (pnodeBlock->pnodeStmt != nullptr) { EmitBlock(pnodeBlock, byteCodeGenerator, funcInfo, fReturnValue); if (pnodeBlock->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(pnodeBlock->breakLabel); } } break; } // PTNODE(knopWith , "with" ,None ,With ,fnopCleanup) case knopWith: { ParseNodeWith * pnodeWith = pnode->AsParseNodeWith(); Assert(pnodeWith->pnodeObj != nullptr); byteCodeGenerator->StartStatement(pnodeWith); // Copy the with object to a temp register (the location assigned to pnode) so that if the with object // is overwritten in the body, the lookups are not affected. funcInfo->AcquireLoc(pnodeWith); Emit(pnodeWith->pnodeObj, byteCodeGenerator, funcInfo, false); Js::RegSlot regVal = (byteCodeGenerator->GetScriptContext()->GetConfig()->IsES6UnscopablesEnabled()) ? funcInfo->AcquireTmpRegister() : pnodeWith->location; byteCodeGenerator->Writer()->Reg2(Js::OpCode::Conv_Obj, regVal, pnodeWith->pnodeObj->location); if (byteCodeGenerator->GetScriptContext()->GetConfig()->IsES6UnscopablesEnabled()) { byteCodeGenerator->Writer()->Reg2(Js::OpCode::NewWithObject, pnodeWith->location, regVal); } byteCodeGenerator->EndStatement(pnodeWith); #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HasWithBlock, funcInfo->byteCodeFunction->GetFunctionBody(), byteCodeGenerator->Writer()->GetCurrentOffset() - 1); } #endif if (pnodeWith->pnodeBody != nullptr) { Scope *scope = pnodeWith->scope; scope->SetLocation(pnodeWith->location); byteCodeGenerator->PushScope(scope); Js::DebuggerScope *debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeWith, Js::DiagExtraScopesType::DiagWithScope, regVal); if (byteCodeGenerator->ShouldTrackDebuggerMetadata()) { byteCodeGenerator->Writer()->AddPropertyToDebuggerScope(debuggerScope, regVal, Js::Constants::NoProperty, /*shouldConsumeRegister*/ true, Js::DebuggerScopePropertyFlags_WithObject); } Emit(pnodeWith->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeWith->pnodeBody); byteCodeGenerator->PopScope(); byteCodeGenerator->RecordEndScopeObject(pnodeWith); } if (pnodeWith->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(pnodeWith->breakLabel); } if (byteCodeGenerator->GetScriptContext()->GetConfig()->IsES6UnscopablesEnabled()) { funcInfo->ReleaseTmpRegister(regVal); } funcInfo->ReleaseLoc(pnodeWith->pnodeObj); break; } // PTNODE(knopBreak , "break" ,None ,Jump ,fnopNone) case knopBreak: { ParseNodeJump * pnodeJump = pnode->AsParseNodeJump(); Assert(pnodeJump->pnodeTarget->emitLabels); byteCodeGenerator->StartStatement(pnodeJump); if (pnodeJump->grfnop & fnopCleanup) { EmitJumpCleanup(pnodeJump, pnodeJump->pnodeTarget, byteCodeGenerator, funcInfo); } byteCodeGenerator->Writer()->Br(pnodeJump->pnodeTarget->breakLabel); if (pnodeJump->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(pnodeJump->breakLabel); } byteCodeGenerator->EndStatement(pnodeJump); break; } case knopContinue: { ParseNodeJump * pnodeJump = pnode->AsParseNodeJump(); Assert(pnodeJump->pnodeTarget->emitLabels); byteCodeGenerator->StartStatement(pnodeJump); if (pnodeJump->grfnop & fnopCleanup) { EmitJumpCleanup(pnodeJump, pnodeJump->pnodeTarget, byteCodeGenerator, funcInfo); } byteCodeGenerator->Writer()->Br(pnodeJump->pnodeTarget->continueLabel); byteCodeGenerator->EndStatement(pnodeJump); break; } // PTNODE(knopContinue , "continue" ,None ,Jump ,fnopNone) case knopSwitch: { ParseNodeSwitch * pnodeSwitch = pnode->AsParseNodeSwitch(); BOOL fHasDefault = false; Assert(pnodeSwitch->pnodeVal != nullptr); byteCodeGenerator->StartStatement(pnodeSwitch); Emit(pnodeSwitch->pnodeVal, byteCodeGenerator, funcInfo, false); Js::RegSlot regVal = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg2(Js::OpCode::BeginSwitch, regVal, pnodeSwitch->pnodeVal->location); BeginEmitBlock(pnodeSwitch->pnodeBlock, byteCodeGenerator, funcInfo); byteCodeGenerator->EndStatement(pnodeSwitch); // TODO: if all cases are compile-time constants, emit a switch statement in the byte // code so the BE can optimize it. ParseNodeCase *pnodeCase; for (pnodeCase = pnodeSwitch->pnodeCases; pnodeCase; pnodeCase = pnodeCase->pnodeNext) { // Jump to the first case body if this one doesn't match. Make sure any side-effects of the case // expression take place regardless. pnodeCase->labelCase = byteCodeGenerator->Writer()->DefineLabel(); if (pnodeCase == pnodeSwitch->pnodeDefault) { fHasDefault = true; continue; } Emit(pnodeCase->pnodeExpr, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->BrReg2( Js::OpCode::Case, pnodeCase->labelCase, regVal, pnodeCase->pnodeExpr->location); funcInfo->ReleaseLoc(pnodeCase->pnodeExpr); } // No explicit case value matches. Jump to the default arm (if any) or break out altogether. if (fHasDefault) { byteCodeGenerator->Writer()->Br(Js::OpCode::EndSwitch, pnodeSwitch->pnodeDefault->labelCase); } else { if (!pnodeSwitch->emitLabels) { pnodeSwitch->breakLabel = byteCodeGenerator->Writer()->DefineLabel(); } byteCodeGenerator->Writer()->Br(Js::OpCode::EndSwitch, pnodeSwitch->breakLabel); } // Now emit the case arms to which we jump on matching a case value. for (pnodeCase = pnodeSwitch->pnodeCases; pnodeCase; pnodeCase = pnodeCase->pnodeNext) { byteCodeGenerator->Writer()->MarkLabel(pnodeCase->labelCase); Emit(pnodeCase->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeCase->pnodeBody); } EndEmitBlock(pnodeSwitch->pnodeBlock, byteCodeGenerator, funcInfo); funcInfo->ReleaseTmpRegister(regVal); funcInfo->ReleaseLoc(pnodeSwitch->pnodeVal); if (!fHasDefault || pnodeSwitch->emitLabels) { byteCodeGenerator->Writer()->MarkLabel(pnodeSwitch->breakLabel); } break; } case knopTryCatch: { Js::ByteCodeLabel catchLabel = (Js::ByteCodeLabel) - 1; ParseNodeTryCatch * pnodeTryCatch = pnode->AsParseNodeTryCatch(); ParseNodeTry *pnodeTry = pnodeTryCatch->pnodeTry; Assert(pnodeTry); ParseNodeCatch *pnodeCatch = pnodeTryCatch->pnodeCatch; Assert(pnodeCatch); catchLabel = byteCodeGenerator->Writer()->DefineLabel(); // Note: try uses OpCode::Leave which causes a return to parent interpreter thunk, // same for catch block. Thus record cross interpreter frame entry/exit records for them. byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ true); byteCodeGenerator->Writer()->Br(Js::OpCode::TryCatch, catchLabel); ByteCodeGenerator::TryScopeRecord tryRecForTry(Js::OpCode::TryCatch, catchLabel); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTry); } Emit(pnodeTry->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeTry->pnodeBody); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(/* isEnterBlock = */ false); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->Br(pnodeTryCatch->breakLabel); byteCodeGenerator->Writer()->MarkLabel(catchLabel); ParseNode *pnodeObj = pnodeCatch->GetParam(); Assert(pnodeObj); Js::RegSlot location; bool acquiredTempLocation = false; Js::DebuggerScope *debuggerScope = nullptr; Js::DebuggerScopePropertyFlags debuggerPropertyFlags = Js::DebuggerScopePropertyFlags_CatchObject; bool isPattern = pnodeObj->nop == knopParamPattern; if (isPattern) { location = pnodeObj->AsParseNodeParamPattern()->location; } else { location = pnodeObj->AsParseNodeName()->sym->GetLocation(); } if (location == Js::Constants::NoRegister) { location = funcInfo->AcquireLoc(pnodeObj); acquiredTempLocation = true; } byteCodeGenerator->Writer()->Reg1(Js::OpCode::Catch, location); Scope *scope = pnodeCatch->scope; byteCodeGenerator->PushScope(scope); if (scope->GetMustInstantiate()) { Assert(scope->GetLocation() == Js::Constants::NoRegister); if (scope->GetIsObject()) { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeTryCatch, Js::DiagCatchScopeInObject, funcInfo->InnerScopeToRegSlot(scope)); byteCodeGenerator->Writer()->Unsigned1(Js::OpCode::NewPseudoScope, scope->GetInnerScopeIndex()); } else { int index = Js::DebuggerScope::InvalidScopeIndex; debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeTryCatch, Js::DiagCatchScopeInSlot, funcInfo->InnerScopeToRegSlot(scope), &index); byteCodeGenerator->Writer()->Num3(Js::OpCode::NewInnerScopeSlots, scope->GetInnerScopeIndex(), scope->GetScopeSlotCount() + Js::ScopeSlots::FirstSlotIndex, index); } } else { debuggerScope = byteCodeGenerator->RecordStartScopeObject(pnodeTryCatch, Js::DiagCatchScopeDirect, location); } auto ParamTrackAndInitialization = [&](Symbol *sym, bool initializeParam, Js::RegSlot location) { if (sym->IsInSlot(byteCodeGenerator, funcInfo)) { Assert(scope->GetMustInstantiate()); if (scope->GetIsObject()) { Js::OpCode op = (sym->GetDecl()->nop == knopLetDecl) ? Js::OpCode::InitUndeclLetFld : byteCodeGenerator->GetInitFldOp(scope, scope->GetLocation(), funcInfo, false); Js::PropertyId propertyId = sym->EnsurePosition(byteCodeGenerator); uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->InnerScopeToRegSlot(scope), propertyId, false, true); byteCodeGenerator->Writer()->ElementPIndexed(op, location, scope->GetInnerScopeIndex(), cacheId); byteCodeGenerator->TrackActivationObjectPropertyForDebugger(debuggerScope, sym, debuggerPropertyFlags); } else { byteCodeGenerator->TrackSlotArrayPropertyForDebugger(debuggerScope, sym, sym->EnsurePosition(byteCodeGenerator), debuggerPropertyFlags); if (initializeParam) { byteCodeGenerator->EmitLocalPropInit(location, sym, funcInfo); } else { Js::RegSlot tmpReg = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->Reg1(Js::OpCode::InitUndecl, tmpReg); byteCodeGenerator->EmitLocalPropInit(tmpReg, sym, funcInfo); funcInfo->ReleaseTmpRegister(tmpReg); } } } else { byteCodeGenerator->TrackRegisterPropertyForDebugger(debuggerScope, sym, funcInfo, debuggerPropertyFlags); if (initializeParam) { byteCodeGenerator->EmitLocalPropInit(location, sym, funcInfo); } else { byteCodeGenerator->Writer()->Reg1(Js::OpCode::InitUndecl, location); } } }; ByteCodeGenerator::TryScopeRecord tryRecForCatch(Js::OpCode::ResumeCatch, catchLabel); if (isPattern) { Parser::MapBindIdentifier(pnodeObj->AsParseNodeParamPattern()->pnode1, [&](ParseNodePtr item) { Js::RegSlot itemLocation = item->AsParseNodeVar()->sym->GetLocation(); if (itemLocation == Js::Constants::NoRegister) { // The var has no assigned register, meaning it's captured, so we have no reg to write to. // Emit the designated return reg in the byte code to avoid asserting on bad register. itemLocation = ByteCodeGenerator::ReturnRegister; } ParamTrackAndInitialization(item->AsParseNodeVar()->sym, false /*initializeParam*/, itemLocation); }); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); // Now emitting bytecode for destructuring pattern byteCodeGenerator->StartStatement(pnodeCatch); ParseNodePtr pnode1 = pnodeObj->AsParseNodeParamPattern()->pnode1; Assert(pnode1->IsPattern()); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForCatch); } EmitAssignment(nullptr, pnode1, location, byteCodeGenerator, funcInfo); byteCodeGenerator->EndStatement(pnodeCatch); } else { ParamTrackAndInitialization(pnodeObj->AsParseNodeName()->sym, true /*initializeParam*/, location); if (scope->GetMustInstantiate()) { pnodeObj->AsParseNodeName()->sym->SetIsGlobalCatch(true); } byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); // Allow a debugger to stop on the 'catch (e)' byteCodeGenerator->StartStatement(pnodeCatch); byteCodeGenerator->Writer()->Empty(Js::OpCode::Nop); byteCodeGenerator->EndStatement(pnodeCatch); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForCatch); } } Emit(pnodeCatch->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } byteCodeGenerator->PopScope(); byteCodeGenerator->RecordEndScopeObject(pnodeTryCatch); funcInfo->ReleaseLoc(pnodeCatch->pnodeBody); if (acquiredTempLocation) { funcInfo->ReleaseLoc(pnodeObj); } byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(false); byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->MarkLabel(pnodeTryCatch->breakLabel); break; } case knopTryFinally: { Js::ByteCodeLabel finallyLabel = (Js::ByteCodeLabel) - 1; ParseNodeTryFinally * pnodeTryFinally = pnode->AsParseNodeTryFinally(); ParseNodeTry *pnodeTry = pnodeTryFinally->pnodeTry; Assert(pnodeTry); ParseNodeFinally *pnodeFinally = pnodeTryFinally->pnodeFinally; Assert(pnodeFinally); // If we yield from the finally block after an exception, we have to store the exception object for the future next call. // When we yield from the Try-Finally the offset to the end of the Try block is needed for the branch instruction. Js::RegSlot regException = Js::Constants::NoRegister; Js::RegSlot regOffset = Js::Constants::NoRegister; finallyLabel = byteCodeGenerator->Writer()->DefineLabel(); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); // [CONSIDER][aneeshd] Ideally the TryFinallyWithYield opcode needs to be used only if there is a yield expression. // For now, if the function is generator we are using the TryFinallyWithYield. ByteCodeGenerator::TryScopeRecord tryRecForTry(Js::OpCode::TryFinallyWithYield, finallyLabel); if (funcInfo->byteCodeFunction->IsCoroutine()) { regException = funcInfo->AcquireTmpRegister(); regOffset = funcInfo->AcquireTmpRegister(); byteCodeGenerator->Writer()->BrReg2(Js::OpCode::TryFinallyWithYield, finallyLabel, regException, regOffset); tryRecForTry.reg1 = regException; tryRecForTry.reg2 = regOffset; byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForTry); } else { byteCodeGenerator->Writer()->Br(Js::OpCode::TryFinally, finallyLabel); } // Increasing the stack as we will be storing the additional values when we enter try..finally. funcInfo->StartRecordingOutArgs(1); Emit(pnodeTry->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeTry->pnodeBody); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); } byteCodeGenerator->Writer()->Empty(Js::OpCode::Leave); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(false); // Note: although we don't use OpCode::Leave for finally block, // OpCode::LeaveNull causes a return to parent interpreter thunk. // This has to be on offset prior to offset of 1st statement of finally. byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(true); byteCodeGenerator->Writer()->Br(pnodeTryFinally->breakLabel); byteCodeGenerator->Writer()->MarkLabel(finallyLabel); byteCodeGenerator->Writer()->Empty(Js::OpCode::Finally); ByteCodeGenerator::TryScopeRecord tryRecForFinally(Js::OpCode::ResumeFinally, finallyLabel, regException, regOffset); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForFinally); } Emit(pnodeFinally->pnodeBody, byteCodeGenerator, funcInfo, fReturnValue); funcInfo->ReleaseLoc(pnodeFinally->pnodeBody); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.UnlinkFromEnd(); funcInfo->ReleaseTmpRegister(regOffset); funcInfo->ReleaseTmpRegister(regException); } funcInfo->EndRecordingOutArgs(1); byteCodeGenerator->Writer()->RecordCrossFrameEntryExitRecord(false); byteCodeGenerator->Writer()->Empty(Js::OpCode::LeaveNull); byteCodeGenerator->Writer()->MarkLabel(pnodeTryFinally->breakLabel); break; } case knopThrow: byteCodeGenerator->StartStatement(pnode); Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); byteCodeGenerator->Writer()->Reg1(Js::OpCode::Throw, pnode->AsParseNodeUni()->pnode1->location); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); byteCodeGenerator->EndStatement(pnode); break; case knopYieldLeaf: byteCodeGenerator->StartStatement(pnode); funcInfo->AcquireLoc(pnode); EmitYield(funcInfo->undefinedConstantRegister, pnode->location, byteCodeGenerator, funcInfo); byteCodeGenerator->EndStatement(pnode); break; case knopAwait: case knopYield: byteCodeGenerator->StartStatement(pnode); funcInfo->AcquireLoc(pnode); Emit(pnode->AsParseNodeUni()->pnode1, byteCodeGenerator, funcInfo, false); EmitYield(pnode->AsParseNodeUni()->pnode1->location, pnode->location, byteCodeGenerator, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeUni()->pnode1); byteCodeGenerator->EndStatement(pnode); break; case knopYieldStar: byteCodeGenerator->StartStatement(pnode); EmitYieldStar(pnode->AsParseNodeUni(), byteCodeGenerator, funcInfo); byteCodeGenerator->EndStatement(pnode); break; case knopExportDefault: Emit(pnode->AsParseNodeExportDefault()->pnodeExpr, byteCodeGenerator, funcInfo, false); byteCodeGenerator->EmitAssignmentToDefaultModuleExport(pnode->AsParseNodeExportDefault()->pnodeExpr, funcInfo); funcInfo->ReleaseLoc(pnode->AsParseNodeExportDefault()->pnodeExpr); pnode = pnode->AsParseNodeExportDefault()->pnodeExpr; break; default: AssertMsg(0, "emit unhandled pnode op"); break; } if (fReturnValue && IsExpressionStatement(pnode, byteCodeGenerator->GetScriptContext()) && !pnode->IsPatternDeclaration()) { // If this statement may produce the global function's return value, copy its result to the return register. // fReturnValue implies global function, which implies that "return" is a parse error. Assert(funcInfo->IsGlobalFunction()); Assert(pnode->nop != knopReturn); byteCodeGenerator->Writer()->Reg2(Js::OpCode::Ld_A, ByteCodeGenerator::ReturnRegister, pnode->location); } }
40.271226
264
0.630358
[ "object", "model" ]
11e0a20c64fa2b06803e4e521a1ff16f6ec146b8
18,661
cpp
C++
applications/StabilizedCFDApplication/custom_elements/dss_ps.cpp
lcirrott/Kratos
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
[ "BSD-4-Clause" ]
2
2020-04-30T19:13:08.000Z
2021-04-14T19:40:47.000Z
applications/StabilizedCFDApplication/custom_elements/dss_ps.cpp
lcirrott/Kratos
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
[ "BSD-4-Clause" ]
1
2020-04-30T19:19:09.000Z
2020-05-02T14:22:36.000Z
applications/StabilizedCFDApplication/custom_elements/dss_ps.cpp
lcirrott/Kratos
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
[ "BSD-4-Clause" ]
1
2020-06-12T08:51:24.000Z
2020-06-12T08:51:24.000Z
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #include "dss_ps.h" #include "utilities/math_utils.h" namespace Kratos { /////////////////////////////////////////////////////////////////////////////////////////////////// // Life cycle template< unsigned int TDim > DSS_PS<TDim>::DSS_PS(IndexType NewId): DSS<TDim>(NewId) {} template< unsigned int TDim > DSS_PS<TDim>::DSS_PS(IndexType NewId, const NodesArrayType& ThisNodes): DSS<TDim>(NewId,ThisNodes) {} template< unsigned int TDim > DSS_PS<TDim>::DSS_PS(IndexType NewId, GeometryType::Pointer pGeometry): DSS<TDim>(NewId,pGeometry) {} template< unsigned int TDim > DSS_PS<TDim>::DSS_PS(IndexType NewId, GeometryType::Pointer pGeometry, PropertiesType::Pointer pProperties): DSS<TDim>(NewId,pGeometry,pProperties) {} template< unsigned int TDim > DSS_PS<TDim>::~DSS_PS() {} /////////////////////////////////////////////////////////////////////////////////////////////////// // Public Operations template< unsigned int TDim > Element::Pointer DSS_PS<TDim>::Create(IndexType NewId,NodesArrayType const& ThisNodes,PropertiesType::Pointer pProperties) const { return Element::Pointer(new DSS_PS(NewId, this->GetGeometry().Create(ThisNodes), pProperties)); } template< unsigned int TDim > void DSS_PS<TDim>::CalculateLocalVelocityContribution(MatrixType &rDampMatrix, VectorType &rRightHandSideVector, ProcessInfo &rCurrentProcessInfo) { const GeometryType& rGeom = this->GetGeometry(); const unsigned int NumNodes = rGeom.PointsNumber(); const unsigned int LocalSize = NumNodes*(TDim+1); // Resize and intialize output if( rDampMatrix.size1() != LocalSize ) rDampMatrix.resize(LocalSize,LocalSize,false); if( rRightHandSideVector.size() != LocalSize ) rRightHandSideVector.resize(LocalSize,false); rDampMatrix = ZeroMatrix(LocalSize,LocalSize); rRightHandSideVector = ZeroVector(LocalSize); // Get Shape function data Vector GaussWeights; Matrix ShapeFunctions; ShapeFunctionDerivativesArrayType ShapeDerivatives; this->CalculateGeometryData(GaussWeights,ShapeFunctions,ShapeDerivatives); const unsigned int NumGauss = GaussWeights.size(); // Iterate over integration points to evaluate local contribution for (unsigned int g = 0; g < NumGauss; g++) { const double GaussWeight = GaussWeights[g]; const ShapeFunctionsType& rN = row(ShapeFunctions,g); const ShapeFunctionDerivativesType& rDN_DX = ShapeDerivatives[g]; this->AddSystemTerms(g,GaussWeight,rN,rDN_DX,rCurrentProcessInfo,rDampMatrix,rRightHandSideVector); } // Rewrite local contribution into residual form (A*dx = b - A*x) VectorType U = ZeroVector(LocalSize); int LocalIndex = 0; for (unsigned int i = 0; i < NumNodes; ++i) { const array_1d<double,3> &rVel = this->GetGeometry()[i].FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < TDim; ++d) // Velocity Dofs U[LocalIndex++] = rVel[d]; U[LocalIndex++] = this->GetGeometry()[i].FastGetSolutionStepValue(PRESSURE); // Pressure Dof } noalias(rRightHandSideVector) -= prod(rDampMatrix, U); this->AddPressureSubscale(rDampMatrix,rRightHandSideVector,rCurrentProcessInfo); } //template< unsigned int TDim > //void DSS_PS<TDim>::CalculateMassMatrix(MatrixType &rMassMatrix, ProcessInfo &rCurrentProcessInfo) //{ // const GeometryType& rGeom = this->GetGeometry(); // const unsigned int NumNodes = rGeom.PointsNumber(); // const unsigned int LocalSize = NumNodes*(TDim+1); // // Resize and intialize output // if( rMassMatrix.size1() != LocalSize ) // rMassMatrix.resize(LocalSize,LocalSize,false); // rMassMatrix = ZeroMatrix(LocalSize,LocalSize); // // Get Shape function data // Vector GaussWeights; // Matrix ShapeFunctions; // ShapeFunctionDerivativesArrayType ShapeDerivatives; // this->CalculateGeometryData(GaussWeights,ShapeFunctions,ShapeDerivatives); // const unsigned int NumGauss = GaussWeights.size(); // // Iterate over integration points to evaluate local contribution // for (unsigned int g = 0; g < NumGauss; g++) // { // const double GaussWeight = GaussWeights[g]; // const ShapeFunctionsType& rN = row(ShapeFunctions,g); // const ShapeFunctionDerivativesType& rDN_DX = ShapeDerivatives[g]; // this->AddMassTerms(GaussWeight,rN,rMassMatrix); // /* Note on OSS and full projection: Riccardo says that adding the terms provided by // * AddMassStabilization (and incluiding their corresponding terms in the projeciton) // * could help reduce the non-linearity of the coupling between projection and u,p // * However, leaving them on gives a lot of trouble whith the Bossak scheme: // * think that we solve F - (1-alpha)*M*u^(n+1) - alpha*M*u^(n) - K(u^(n+1)) = 0 // * so the projection of the dynamic terms should be Pi( (1-alpha)*u^(n+1) - alpha*u^(n) ) // */ // if ( rCurrentProcessInfo[OSS_SWITCH] != 1.0 ) // this->AddMassStabilization(g,GaussWeight,rN,rDN_DX,rCurrentProcessInfo,rMassMatrix); // } // this->AddPressureSubscaleMass(rMassMatrix,rCurrentProcessInfo); //} /////////////////////////////////////////////////////////////////////////////////////////////////// // Input and output template< unsigned int TDim > std::string DSS_PS<TDim>::Info() const { std::stringstream buffer; buffer << "DSS_PS #" << this->Id(); return buffer.str(); } template< unsigned int TDim > void DSS_PS<TDim>::PrintInfo(std::ostream& rOStream) const { rOStream << "DSS_PS" << TDim << "D"; } /////////////////////////////////////////////////////////////////////////////////////////////////// // Protected functions /////////////////////////////////////////////////////////////////////////////////////////////////// template< unsigned int TDim > void DSS_PS<TDim>::CalculateStaticTau(double Density, double KinematicVisc, const array_1d<double,3> &Velocity, double ElemSize, const ProcessInfo& rProcessInfo, double &TauOne, double &TauTwo) { const double c1 = 8.0; const double c2 = 2.0; double VelNorm = Velocity[0]*Velocity[0]; for (unsigned int d = 1; d < TDim; d++) VelNorm += Velocity[d]*Velocity[d]; VelNorm = std::sqrt(VelNorm); double InvTau = Density * ( c1 * KinematicVisc / (ElemSize*ElemSize) + c2 * VelNorm / ElemSize ); TauOne = 1.0/InvTau; TauTwo = 0.0; } /////////////////////////////////////////////////////////////////////////////////////////////////// template< unsigned int TDim > void DSS_PS<TDim>::AddPressureSubscale(MatrixType& rLHS, VectorType& rRHS, ProcessInfo& rProcessInfo) { const unsigned int NumNodes = this->GetGeometry().PointsNumber(); const unsigned int BlockSize = TDim+1; const unsigned int LocalSize = BlockSize*NumNodes; const unsigned int ExtraDofs = NumNodes; /* * Full system matrix is a block matrix including extra Dofs which represent the pressure subscale. * These extera dofs are local to each element and will be statically condesed in this funcion. * Notation of the block matrix * * | rLHS Right | | u,p | | rRHS | * | Down A | *| ps | = | b | */ // MatrixType Right = ZeroMatrix(LocalSize,ExtraDofs); // MatrixType Down = ZeroMatrix(ExtraDofs,LocalSize); // MatrixType A = ZeroMatrix(ExtraDofs,ExtraDofs); // VectorType b = ZeroVector(ExtraDofs); // MatrixType DynAux = ZeroMatrix(LocalSize,ExtraDofs); MatrixType Right = ZeroMatrix(LocalSize,ExtraDofs+1); MatrixType Down = ZeroMatrix(ExtraDofs+1,LocalSize); MatrixType A = ZeroMatrix(ExtraDofs+1,ExtraDofs+1); VectorType b = ZeroVector(ExtraDofs+1); // MatrixType DynAux = ZeroMatrix(LocalSize,ExtraDofs+1); // Get Shape function data Vector GaussWeights; Matrix ShapeFunctions; ShapeFunctionDerivativesArrayType ShapeDerivatives; this->CalculateGeometryData(GaussWeights,ShapeFunctions,ShapeDerivatives); const unsigned int NumGauss = GaussWeights.size(); // Iterate over integration points to evaluate local contribution for (unsigned int g = 0; g < NumGauss; g++) { const double GaussWeight = GaussWeights[g]; const ShapeFunctionsType& rN = row(ShapeFunctions,g); const ShapeFunctionDerivativesType& rDN_DX = ShapeDerivatives[g]; // Interpolate nodal data on the integration point double Density; this->EvaluateInPoint(Density,DENSITY,rN); array_1d<double,3> BodyForce(3,0.0); this->EvaluateInPoint(BodyForce,BODY_FORCE,rN); //this->BodyForceTest(rProcessInfo,rN,BodyForce); array_1d<double,3> ConvVel(3,0.0); this->ResolvedConvectiveVelocity(ConvVel,rN); double ElemSize = this->ElementSize(); //double ElemSize = this->ElementSize(ConvVel,rDN_DX); double Viscosity = this->EffectiveViscosity(rN,rDN_DX,ElemSize,rProcessInfo); double TauOne; double TauTwo; this->CalculateStaticTau(Density,Viscosity,ConvVel,ElemSize,rProcessInfo,TauOne,TauTwo); Vector AGradN; this->ConvectionOperator(AGradN,ConvVel,rDN_DX); // Multiplying some quantities by density to have correct units Viscosity *= Density; // Dynamic viscosity BodyForce *= Density; // Force per unit of volume AGradN *= Density; // Convective term is always multiplied by density for (unsigned int i = 0; i < NumNodes; i++) { for (unsigned int j = 0; j < NumNodes; j++) { //A(i,j) -= GaussWeight * rN[i] * rN[j]; for (unsigned int d = 0; d < TDim; d++) { A(i,j) += GaussWeight * TauOne * rDN_DX(i,d) * rDN_DX(j,d); //A(i,j) += GaussWeight * rDN_DX(i,d) * TauOne * rDN_DX(j,d); Down(i,BlockSize*j+d) += GaussWeight * rN[i] * rDN_DX(j,d); Right(BlockSize*i+d,j) -= GaussWeight * rDN_DX(i,d) * rN[j]; // Right(BlockSize*i+d,j) += GaussWeight * rN[i] * rDN_DX(j,d); // Down(i,BlockSize*j+d) -= GaussWeight * rDN_DX(i,d) * rN[j]; // Down(i,BlockSize*j+d) -= GaussWeight * rDN_DX(i,d) * TauOne * AGradN[j]; // Down(i,BlockSize*j+TDim) -= GaussWeight * rDN_DX(i,d) * TauOne * rDN_DX(j,d); // DynAux(i,BlockSize*j+d) -= GaussWeight * rDN_DX(i,d) * TauOne * rN[j]; // Right(BlockSize*i+d,j) -= GaussWeight * rDN_DX(j,d) * TauOne * AGradN[i]; // Right(BlockSize*i+TDim,j) -= GaussWeight * rDN_DX(i,d) * TauOne * rDN_DX(j,d); // Right(BlockSize*i+d,j) += GaussWeight * AGradN[i] * rDN_DX(j,d); //Right(BlockSize*i+TDim,j) += GaussWeight * rDN_DX(i,d) * rDN_DX(j,d); //Right(BlockSize*i+d,j) -= GaussWeight * rDN_DX(i,d) * rN[j]; // b[i] -= GaussWeight * rDN_DX(i,d) * TauOne * BodyForce[d]; } } } } // Impose a restriction so that the local laplacian can be inverted: average of the solution of Ax = b is zero for (unsigned int i = 0; i < ExtraDofs; i++) { A(ExtraDofs,i) = 1.0; A(i,ExtraDofs) = 1.0; } /* FOR BDF2 // Dynamic contribution const Vector& rBDFcoeffs = rProcessInfo[BDF_COEFFICIENTS]; Down += rBDFcoeffs[0] * DynAux; for (unsigned int step = 1; step < 3; step++ ) { VectorType U = ZeroVector(LocalSize); int LocalIndex = 0; for (unsigned int i = 0; i < NumNodes; ++i) { const array_1d<double,3> &rVel = this->GetGeometry()[i].FastGetSolutionStepValue(VELOCITY,step); for (unsigned int d = 0; d < TDim; ++d) // Velocity Dofs U[LocalIndex++] = rVel[d]; //U[LocalIndex++] = this->GetGeometry()[i].FastGetSolutionStepValue(PRESSURE,step); // Pressure Dof LocalIndex++; // nothing dynamic about pressure } U *= rBDFcoeffs[step]; noalias(b) -= prod(DynAux, U); } */ // Rewrite local contribution into residual form (A*dx = b - A*x) VectorType U = ZeroVector(LocalSize); int LocalIndex = 0; for (unsigned int i = 0; i < NumNodes; ++i) { const array_1d<double,3> &rVel = this->GetGeometry()[i].FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < TDim; ++d) // Velocity Dofs U[LocalIndex++] = rVel[d]; U[LocalIndex++] = this->GetGeometry()[i].FastGetSolutionStepValue(PRESSURE); // Pressure Dof } noalias(b) -= prod(Down, U); // Condense the extra block back into the system MatrixType InvA; double det; MathUtils<double>::InvertMatrix(A,InvA,det); rLHS -= prod( Right, MatrixType( prod(InvA,Down)) ); rRHS -= prod( Right, VectorType( prod(InvA,b)) ); } template< unsigned int TDim > void DSS_PS<TDim>::AddPressureSubscaleMass(MatrixType& rLHS, ProcessInfo& rProcessInfo) { const unsigned int NumNodes = this->GetGeometry().PointsNumber(); const unsigned int BlockSize = TDim+1; const unsigned int LocalSize = BlockSize*NumNodes; const unsigned int ExtraDofs = NumNodes; /* * Full system matrix is a block matrix including extra Dofs which represent the pressure subscale. * These extra dofs are local to each element and will be statically condesed in this funcion. * Notation of the block matrix * * | rLHS Right | | u,p | | rRHS | * | Down A | *| ps | = | b | */ // MatrixType Right = ZeroMatrix(LocalSize,ExtraDofs); // MatrixType Down = ZeroMatrix(ExtraDofs,LocalSize); // MatrixType A = ZeroMatrix(ExtraDofs,ExtraDofs); // VectorType b = ZeroVector(ExtraDofs); // MatrixType DynAux = ZeroMatrix(LocalSize,ExtraDofs); MatrixType Right = ZeroMatrix(LocalSize,ExtraDofs+1); MatrixType Down = ZeroMatrix(ExtraDofs+1,LocalSize); MatrixType A = ZeroMatrix(ExtraDofs+1,ExtraDofs+1); VectorType b = ZeroVector(ExtraDofs+1); // Get Shape function data Vector GaussWeights; Matrix ShapeFunctions; ShapeFunctionDerivativesArrayType ShapeDerivatives; this->CalculateGeometryData(GaussWeights,ShapeFunctions,ShapeDerivatives); const unsigned int NumGauss = GaussWeights.size(); // Iterate over integration points to evaluate local contribution for (unsigned int g = 0; g < NumGauss; g++) { const double GaussWeight = GaussWeights[g]; const ShapeFunctionsType& rN = row(ShapeFunctions,g); const ShapeFunctionDerivativesType& rDN_DX = ShapeDerivatives[g]; // Interpolate nodal data on the integration point double Density; this->EvaluateInPoint(Density,DENSITY,rN); array_1d<double,3> BodyForce(3,0.0); this->EvaluateInPoint(BodyForce,BODY_FORCE,rN); //this->BodyForceTest(rProcessInfo,rN,BodyForce); array_1d<double,3> ConvVel(3,0.0); this->ResolvedConvectiveVelocity(ConvVel,rN); double ElemSize = this->ElementSize(); //double ElemSize = this->ElementSize(ConvVel,rDN_DX); double Viscosity = this->EffectiveViscosity(rN,rDN_DX,ElemSize,rProcessInfo); double TauOne; double TauTwo; this->CalculateStaticTau(Density,Viscosity,ConvVel,ElemSize,rProcessInfo,TauOne,TauTwo); Vector AGradN; this->ConvectionOperator(AGradN,ConvVel,rDN_DX); // Multiplying some quantities by density to have correct units Viscosity *= Density; // Dynamic viscosity BodyForce *= Density; // Force per unit of volume AGradN *= Density; // Convective term is always multiplied by density for (unsigned int i = 0; i < NumNodes; i++) { for (unsigned int j = 0; j < NumNodes; j++) { //A(i,j) -= GaussWeight * rN[i] * rN[j]; for (unsigned int d = 0; d < TDim; d++) { A(i,j) += GaussWeight * rDN_DX(i,d) * rDN_DX(j,d); //A(i,j) += GaussWeight * rDN_DX(i,d) * TauOne * rDN_DX(j,d); Down(i,BlockSize*j+d) -= GaussWeight * rDN_DX(i,d) * TauOne * rN[j]; //Right(BlockSize*j+d,i) -= GaussWeight * rDN_DX(i,d) * TauOne * AGradN[j]; //Right(BlockSize*j+d,i) -= GaussWeight * rDN_DX(i,d) * TauOne * rDN_DX(j,d); Right(BlockSize*j+d,i) -= GaussWeight * rDN_DX(i,d) * AGradN[j]; Right(BlockSize*j+d,i) -= GaussWeight * rDN_DX(i,d) * rDN_DX(j,d); b[i] -= GaussWeight * rDN_DX(i,d) * TauOne * BodyForce[d]; } } } } // Impose a restriction so that the local laplacian can be inverted: average of the solution of Ax = b is zero for (unsigned int i = 0; i < ExtraDofs; i++) { A(ExtraDofs,i) = 1.0; A(i,ExtraDofs) = 1.0; } // Condense the extra block back into the system MatrixType InvA; double det; MathUtils<double>::InvertMatrix(A,InvA,det); rLHS -= prod( Right, MatrixType( prod(InvA,Down)) ); } /////////////////////////////////////////////////////////////////////////////////////////////////// // Private functions /////////////////////////////////////////////////////////////////////////////////////////////////// // serializer template< unsigned int TDim > void DSS_PS<TDim>::save(Serializer& rSerializer) const { typedef DSS<TDim> _basetype; KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, _basetype ); } template< unsigned int TDim > void DSS_PS<TDim>::load(Serializer& rSerializer) { typedef DSS<TDim> _basetype; KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, _basetype); } /////////////////////////////////////////////////////////////////////////////////////////////////// // Class template instantiation /////////////////////////////////////////////////////////////////////////////////////////////////// template class DSS_PS<2>; template class DSS_PS<3>; }
36.952475
146
0.597717
[ "shape", "vector" ]
11e0bdb2996c53347a6fd3ff1ef7ac0651cee5c4
12,179
cc
C++
src/malloc_extension.cc
anony88888/google-perftools
d8a402bc65011765befa2103f3d000efc9a90063
[ "BSD-3-Clause" ]
1
2016-05-08T22:31:48.000Z
2016-05-08T22:31:48.000Z
src/malloc_extension.cc
anony88888/google-perftools
d8a402bc65011765befa2103f3d000efc9a90063
[ "BSD-3-Clause" ]
null
null
null
src/malloc_extension.cc
anony88888/google-perftools
d8a402bc65011765befa2103f3d000efc9a90063
[ "BSD-3-Clause" ]
null
null
null
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat <opensource@google.com> #include "config.h" #include <assert.h> #include <stdio.h> #include <string.h> #include <stdio.h> #if defined HAVE_STDINT_H #include <stdint.h> #elif defined HAVE_INTTYPES_H #include <inttypes.h> #else #include <sys/types.h> #endif #include <string> #include HASH_SET_H // defined in config.h #include "base/dynamic_annotations.h" #include "base/sysinfo.h" // for FillProcSelfMaps #include "google/malloc_extension.h" #include "maybe_threads.h" using STL_NAMESPACE::string; static void DumpAddressMap(string* result) { *result += "\nMAPPED_LIBRARIES:\n"; // We keep doubling until we get a fit const size_t old_resultlen = result->size(); for (int amap_size = 10240; amap_size < 10000000; amap_size *= 2) { result->resize(old_resultlen + amap_size); const int bytes_written = tcmalloc::FillProcSelfMaps(&((*result)[old_resultlen]), amap_size); if (bytes_written < amap_size - 1) { // we fit! (*result)[old_resultlen + bytes_written] = '\0'; result->resize(old_resultlen + bytes_written); return; } } result->reserve(old_resultlen); // just don't print anything } // Note: this routine is meant to be called before threads are spawned. void MallocExtension::Initialize() { static bool initialize_called = false; if (initialize_called) return; initialize_called = true; #ifdef __GLIBC__ // GNU libc++ versions 3.3 and 3.4 obey the environment variables // GLIBCPP_FORCE_NEW and GLIBCXX_FORCE_NEW respectively. Setting // one of these variables forces the STL default allocator to call // new() or delete() for each allocation or deletion. Otherwise // the STL allocator tries to avoid the high cost of doing // allocations by pooling memory internally. However, tcmalloc // does allocations really fast, especially for the types of small // items one sees in STL, so it's better off just using us. // TODO: control whether we do this via an environment variable? setenv("GLIBCPP_FORCE_NEW", "1", false /* no overwrite*/); setenv("GLIBCXX_FORCE_NEW", "1", false /* no overwrite*/); // Now we need to make the setenv 'stick', which it may not do since // the env is flakey before main() is called. But luckily stl only // looks at this env var the first time it tries to do an alloc, and // caches what it finds. So we just cause an stl alloc here. string dummy("I need to be allocated"); dummy += "!"; // so the definition of dummy isn't optimized out #endif /* __GLIBC__ */ } // Default implementation -- does nothing MallocExtension::~MallocExtension() { } bool MallocExtension::VerifyAllMemory() { return true; } bool MallocExtension::VerifyNewMemory(void* p) { return true; } bool MallocExtension::VerifyArrayNewMemory(void* p) { return true; } bool MallocExtension::VerifyMallocMemory(void* p) { return true; } bool MallocExtension::GetNumericProperty(const char* property, size_t* value) { return false; } bool MallocExtension::SetNumericProperty(const char* property, size_t value) { return false; } void MallocExtension::GetStats(char* buffer, int length) { assert(length > 0); buffer[0] = '\0'; } bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total, int histogram[kMallocHistogramSize]) { *blocks = 0; *total = 0; memset(histogram, 0, sizeof(histogram)); return true; } void** MallocExtension::ReadStackTraces() { return NULL; } void** MallocExtension::ReadHeapGrowthStackTraces() { return NULL; } void MallocExtension::MarkThreadIdle() { // Default implementation does nothing } void MallocExtension::ReleaseFreeMemory() { // Default implementation does nothing } void MallocExtension::SetMemoryReleaseRate(double rate) { // Default implementation does nothing } double MallocExtension::GetMemoryReleaseRate() { return -1.0; } // The current malloc extension object. We also keep a pointer to // the default implementation so that the heap-leak checker does not // complain about a memory leak. static pthread_once_t module_init = PTHREAD_ONCE_INIT; static MallocExtension* default_instance = NULL; static MallocExtension* current_instance = NULL; static void InitModule() { default_instance = new MallocExtension; current_instance = default_instance; } MallocExtension* MallocExtension::instance() { perftools_pthread_once(&module_init, InitModule); return current_instance; } void MallocExtension::Register(MallocExtension* implementation) { perftools_pthread_once(&module_init, InitModule); // When running under valgrind, our custom malloc is replaced with // valgrind's one and malloc extensions will not work. if (!RunningOnValgrind()) { current_instance = implementation; } } // ----------------------------------------------------------------------- // Heap sampling support // ----------------------------------------------------------------------- namespace { // Accessors uintptr_t Count(void** entry) { return reinterpret_cast<uintptr_t>(entry[0]); } uintptr_t Size(void** entry) { return reinterpret_cast<uintptr_t>(entry[1]); } uintptr_t Depth(void** entry) { return reinterpret_cast<uintptr_t>(entry[2]); } void* PC(void** entry, int i) { return entry[3+i]; } // Hash table routines for grouping all entries with same stack trace struct StackTraceHash { size_t operator()(void** entry) const { uintptr_t h = 0; for (int i = 0; i < Depth(entry); i++) { h += reinterpret_cast<uintptr_t>(PC(entry, i)); h += h << 10; h ^= h >> 6; } h += h << 3; h ^= h >> 11; return h; } // Less operator for MSVC's hash containers. bool operator()(void** entry1, void** entry2) const { if (Depth(entry1) != Depth(entry2)) return Depth(entry1) < Depth(entry2); for (int i = 0; i < Depth(entry1); i++) { if (PC(entry1, i) != PC(entry2, i)) { return PC(entry1, i) < PC(entry2, i); } } return false; // entries are equal } // These two public members are required by msvc. 4 and 8 are the // default values. static const size_t bucket_size = 4; static const size_t min_buckets = 8; }; struct StackTraceEqual { bool operator()(void** entry1, void** entry2) const { if (Depth(entry1) != Depth(entry2)) return false; for (int i = 0; i < Depth(entry1); i++) { if (PC(entry1, i) != PC(entry2, i)) { return false; } } return true; } }; #ifdef _WIN32 typedef HASH_NAMESPACE::hash_set<void**, StackTraceHash> StackTraceTable; #else typedef HASH_NAMESPACE::hash_set<void**, StackTraceHash, StackTraceEqual> StackTraceTable; #endif void PrintCountAndSize(string* result, uintptr_t count, uintptr_t size) { char buf[100]; snprintf(buf, sizeof(buf), "%6lld: %8lld [%6lld: %8lld] @", static_cast<long long>(count), static_cast<long long>(size), static_cast<long long>(count), static_cast<long long>(size)); *result += buf; } void PrintHeader(string* result, const char* label, void** entries) { // Compute the total count and total size uintptr_t total_count = 0; uintptr_t total_size = 0; for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) { total_count += Count(entry); total_size += Size(entry); } *result += string("heap profile: "); PrintCountAndSize(result, total_count, total_size); *result += string(" ") + label + "\n"; } void PrintStackEntry(string* result, void** entry) { PrintCountAndSize(result, Count(entry), Size(entry)); for (int i = 0; i < Depth(entry); i++) { char buf[32]; snprintf(buf, sizeof(buf), " %p", PC(entry, i)); *result += buf; } *result += "\n"; } } void MallocExtension::GetHeapSample(string* result) { void** entries = ReadStackTraces(); if (entries == NULL) { *result += "This malloc implementation does not support sampling.\n" "As of 2005/01/26, only tcmalloc supports sampling, and you\n" "are probably running a binary that does not use tcmalloc.\n"; return; } // Group together all entries with same stack trace StackTraceTable table; for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) { StackTraceTable::iterator iter = table.find(entry); if (iter == table.end()) { // New occurrence table.insert(entry); } else { void** canonical = *iter; canonical[0] = reinterpret_cast<void*>(Count(canonical) + Count(entry)); canonical[1] = reinterpret_cast<void*>(Size(canonical) + Size(entry)); } } PrintHeader(result, "heap", entries); for (StackTraceTable::iterator iter = table.begin(); iter != table.end(); ++iter) { PrintStackEntry(result, *iter); } DumpAddressMap(result); delete[] entries; } void MallocExtension::GetHeapGrowthStacks(string* result) { void** entries = ReadHeapGrowthStackTraces(); if (entries == NULL) { *result += "This malloc implementation does not support " "ReadHeapGrowthStackTraces().\n" "As of 2005/09/27, only tcmalloc supports this, and you\n" "are probably running a binary that does not use tcmalloc.\n"; return; } // Do not canonicalize the stack entries, so that we get a // time-ordered list of stack traces, which may be useful if the // client wants to focus on the latest stack traces. PrintHeader(result, "growth", entries); for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) { PrintStackEntry(result, entry); } delete[] entries; DumpAddressMap(result); } // These are C shims that work on the current instance. #define C_SHIM(fn, retval, paramlist, arglist) \ extern "C" retval MallocExtension_##fn paramlist { \ return MallocExtension::instance()->fn arglist; \ } C_SHIM(VerifyAllMemory, bool, (), ()); C_SHIM(VerifyNewMemory, bool, (void* p), (p)); C_SHIM(VerifyArrayNewMemory, bool, (void* p), (p)); C_SHIM(VerifyMallocMemory, bool, (void* p), (p)); C_SHIM(MallocMemoryStats, bool, (int* blocks, size_t* total, int histogram[kMallocHistogramSize]), (blocks, total, histogram)); C_SHIM(GetStats, void, (char* buffer, int buffer_length), (buffer, buffer_length)); C_SHIM(GetNumericProperty, bool, (const char* property, size_t* value), (property, value)); C_SHIM(SetNumericProperty, bool, (const char* property, size_t value), (property, value)); C_SHIM(MarkThreadIdle, void, (), ()); C_SHIM(ReleaseFreeMemory, void, (), ());
33.275956
90
0.680926
[ "object" ]