id
int64
0
877k
file_name
stringlengths
3
109
file_path
stringlengths
13
185
content
stringlengths
31
9.38M
size
int64
31
9.38M
language
stringclasses
1 value
extension
stringclasses
11 values
total_lines
int64
1
340k
avg_line_length
float64
2.18
149k
max_line_length
int64
7
2.22M
alphanum_fraction
float64
0
1
repo_name
stringlengths
6
66
repo_stars
int64
94
47.3k
repo_forks
int64
0
12k
repo_open_issues
int64
0
3.4k
repo_license
stringclasses
11 values
repo_extraction_date
stringclasses
197 values
exact_duplicates_redpajama
bool
2 classes
near_duplicates_redpajama
bool
2 classes
exact_duplicates_githubcode
bool
2 classes
exact_duplicates_stackv2
bool
1 class
exact_duplicates_stackv1
bool
2 classes
near_duplicates_githubcode
bool
2 classes
near_duplicates_stackv1
bool
2 classes
near_duplicates_stackv2
bool
1 class
1,534,221
generator.h
KimangKhenng_Traffic-SImulation-and-Visualization/Utilities/generator.h
#ifndef GENERATOR_H #define GENERATOR_H #include "vehiclesgenerator.h" #include "UI/simulationscene.h" class Generator : public QObject { Q_OBJECT public: Generator(SimulationScene *scene); Generator(); ~Generator(); void setMethod(const GENMETHOD& x); void startGenerator(); void stopGenerator(); void startAutoGeneraion(); void setTimer(const int& N_S,const int& S_N,const int& E_W,const int& W_E); void setScene(SimulationScene *scene); void turnOn(); void turnOff(); void setMode(const VEHICLEMETHOD &mode); void setVisionOn(const bool& vision); void setInteraction(const bool& interact); public slots: void makeNorthSouth(); void makeSouthNorth(); void makeWestEast(); void makeEastWest(); private: SimulationScene *m_scene; QList<QTimer*> m_timer; int m_number_N_S; int m_number_S_N; int m_number_W_E; int m_number_E_W; int m_time_N_S; int m_time_S_N; int m_time_W_E; int m_time_E_W; GENMETHOD m_method; VEHICLEMETHOD m_mode; bool m_running_state; bool m_VisionOn; bool m_IsInteraction; }; #endif // GENERATOR_H
1,158
C++
.h
45
21.622222
79
0.690712
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,222
trafficdetector.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/trafficdetector.h
#ifndef TRAFFICDETECTOR_H #define TRAFFICDETECTOR_H #define LENGTH 80 #include <QElapsedTimer> #include <QGraphicsItem> #include <QPainter> #include "Vehicle/vehicle.h" class TrafficDetector : public QObject,public QGraphicsItem { Q_OBJECT Q_INTERFACES(QGraphicsItem) public: //@Override QRectF boundingRect() const Q_DECL_OVERRIDE; void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) Q_DECL_OVERRIDE; //Override TrafficDetector(float length, QGraphicsItem *parent = nullptr); ~TrafficDetector() override; float getFlow() const; bool isContainedVehicles() const; int getNumbersOfVehicles() const; float getDensity() const; float getSaturationFlowRate() const; float getAverageSpeed() const; float getHeadWay() const; void turnOffDisplay(); void turnOnDisplay(); void turnOn(); void turnOff(); // void startEngine(); // void stopEngine(); QElapsedTimer *getTimer() const; // bool getIs_active() const; public slots: void advance(int phase) Q_DECL_OVERRIDE; void forward(); private: float m_detector_length; float m_flow; int m_number_of_vehicle; float m_density; float m_saturation_flow_rate; QElapsedTimer *m_timer; //QTimer *m_counter; bool m_is_active; bool m_isOn; // QVector<float> m_flow_data; // QVector<int> m_number_data; // QVector<float> m_density_data; // QVector<float> m_headway_data; }; #endif // TRAFFICDETECTOR_H
1,514
C++
.h
52
25.634615
107
0.719973
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,223
trafficcontroller.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/trafficcontroller.h
#ifndef TRAFFICCONTROLLER_H #define TRAFFICCONTROLLER_H #include "TrafficLight/trafficlight.h" #include "TrafficLight/lightwidget.h" #include "trafficdetector.h" #include "commonenum.h" class TrafficController : public QGraphicsItemGroup { public: TrafficController(QGraphicsItemGroup *parent = nullptr); TrafficLight* getTrafficLight(region r); //void turnTrafficOn(); //void turnTrafficOff(); void turnOffDetector(); void turnOnDetector(); void showDetector(); void hideDetector(); void startTrafficLightAll(); void stopTrafficLightAll(); void manualControl(); void setLightDuration(const int& green,const int& left, const int& yellow,const int& red); QList<TrafficDetector *> getDetector(); QList<TrafficDetector *> getDetectorByRegion(region x) const; QList<TrafficLight *> getTraffic_light(); QList<QElapsedTimer *> *getTimer(); void updateDetectors(); void turnOffLightInteraction(); void turnOnLightInteraction(); private: //QStateMachine *m_state; QList<TrafficLight *> m_traffic_light; QList<QState *> *m_state_list; QList<TrafficDetector *> m_detector; }; #endif // TRAFFICCONTROLLER_H
1,195
C++
.h
35
30.314286
94
0.750217
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,224
vehiclesight.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/Vehicle/vehiclesight.h
#ifndef VEHICLESIGHT_H #define VEHICLESIGHT_H #include <QGraphicsRectItem> class Vehicle; class VehicleSight :public QGraphicsRectItem { public: VehicleSight(const QRectF& rec,QGraphicsItem *parent = nullptr); Vehicle *vehicle() const; private: Vehicle *m_vehicle; }; #endif // VEHICLESIGHT_H
308
C++
.h
13
21.538462
68
0.791096
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,225
vehicle.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/Vehicle/vehicle.h
#ifndef VEHICLE_H #define VEHICLE_H #define GAPACCAPANCE 10 #include "Entities/TrafficLight/trafficlight.h" #include "vehiclesight.h" #include <QGraphicsPixmapItem> #include <QObject> #include <qmath.h> static const float ACCER = 0.01; class SimulationScene; class Vehicle: public QObject,public QGraphicsPixmapItem { Q_OBJECT Q_INTERFACES(QGraphicsItem) public: Vehicle(QGraphicsItem *parent = nullptr); //////// /// \brief Vehicle /// Prevent Copying Vehicle(const Vehicle&) = delete; /////// /// \brief operator = /// \return Address of Vehicle /// Prevent using operator = Vehicle& operator = (const Vehicle&) = delete; ~Vehicle(); //// Overloading Function QRectF boundingRect() const Q_DECL_OVERRIDE; QPainterPath shape() const Q_DECL_OVERRIDE; //void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) Q_DECL_OVERRIDE; // Overloading Funcion ////////////////////// /// \brief rotate_to_point /// \param point = point for vehicle to rotate to /// use to rotate to a specfic point /// ////////////////// void rotate_to_point(QPointF point); ///////////////////// /// \brief extract_coordinate /// \param path = QPainterPath as path to follow /// Give vehile a path to follow /// path = QPainterPath /// Draw By Yourself void extract_coordinate(QPainterPath path); ///////////////////// /// \brief setDirection /// \param dir = region for vehicle to appear /// set turning direction for vehicle void setDirection(Direction dir); ///////////////////// /// \brief setTrafficLight /// \param light /// set Traffic to be obeyed for vehicle /// //////////// void initialize(); ///////////////// /// \brief setRegion /// \param r /// set Which region vehicle belong to void setRegion(region r); /// //////////// bool is_in_stop_point(); QList<QPointF> get_path() const; bool Isinthejunction(); QPointF get_position() const; int get_current_index() const; QPointF get_initial_path() const; region getRegion() const; Direction getDir() const; void setDir(const Direction &dir); void turnOnSightSeeing(); void turnOffSightSeeing(); void turnOnEngine(); void turnOffEngine(); bool isContainedSignal() const; void setMode(const VEHICLEMETHOD &mode); bool isDeletable() const; qreal getSpeed() const; void turnOffInteraction(); void turnOnInteraction(); //Reimplement Event // void mousePressEvent(QGraphicsSceneMouseEvent *mouseEvent) override; void hoverEnterEvent(QGraphicsSceneHoverEvent *event) override; void hoverLeaveEvent(QGraphicsSceneHoverEvent *event) override; void mousePressEvent(QGraphicsSceneMouseEvent *event) override; public slots: void advance(int phase) Q_DECL_OVERRIDE; void update(const VEHICLEMETHOD& mode = VEHICLEMETHOD::SIGHTSEEING); //void forward(); private: QPixmap generateImage() const; Vehicle *getCollding(); Vehicle *nextVehicle(); SimulationScene *myScene() const; double distanceToOtherVehicle(QGraphicsItem *v) const; void adjustSpeedIntersection(); void adjustSpeedIntersection(Vehicle *leader); bool ifAllowed() const; bool hasInfront(); bool is_enter_the_junction() const; bool isAboutToCrash() const; void reset_speed(); void decelerate(QPointF rhs); void accelerate(); void accelerate(Vehicle *leader); void stop_advance(); bool isInsideIntersection(); qreal m_angle; qreal m_speed; qreal m_acceleration; // QColor m_color; QList<QPointF> m_path_to_follow; QPointF m_destination; int m_point_index; int m_step_count; bool m_driving_state; VehicleSight *m_sightseeing; VehicleSight *m_sightseeing_small; Direction m_dir; region m_region; //QTimer *m_internal_timer; VEHICLEMETHOD m_mode; bool m_Is_deletable; Vehicle* m_leader; }; #endif // VEHICLE_H
4,066
C++
.h
126
27.753968
109
0.67388
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,226
trafficlight.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/TrafficLight/trafficlight.h
#ifndef TRAFFICLIGHT_H #define TRAFFICLIGHT_H #include <QState> #include <QStateMachine> #include <QFinalState> #include "lightwidget.h" #include "lightwidgetleft.h" #include "commonenum.h" class TrafficLight :public QObject, public QGraphicsItem { Q_OBJECT Q_INTERFACES(QGraphicsItem) public: //Override TrafficLight(region re,QGraphicsItem *parent = nullptr); TrafficLight(QGraphicsItem *parent = nullptr); ~TrafficLight() override; //@Override QRectF boundingRect() const Q_DECL_OVERRIDE; void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) Q_DECL_OVERRIDE; region getRegion(); bool checkDir(Direction dir); void setManualControl(); void setUpFacilities(); void setDuration(const int &left,const int &yellow,const int &green,const int &red); void setInitialState(const STATE_MACHINE& state); void startTrafficLight(); void stopTrafficLight(); LightWidget *getMainGreen() const; LightWidget *getMainRed() const; LightWidget *getMainYellow() const; LightWidget *getLeftGreen() const; QList<LightWidget *> *getLight() const; void setRegion(const region &region); TRAFFICMODE getMode() const; void setMode(const TRAFFICMODE &mode); void turnOffInteraction(); void turnOnInteraction(); protected: void hoverEnterEvent(QGraphicsSceneHoverEvent *event) override; void hoverLeaveEvent(QGraphicsSceneHoverEvent *event) override; void mousePressEvent(QGraphicsSceneMouseEvent *event) override; private: QState *makeState(LightWidget *light, int duration,QState *parent = nullptr); int m_red_duration; int m_left_duration; int m_main_green_duration; int m_yellow_duration; LightWidget *m_main_light_green; LightWidget *m_main_light_red; LightWidget *m_main_light_yellow; LightWidget *m_left_light; QList<LightWidget *> *m_light; region m_region; TRAFFICMODE m_mode; QState *m_MainGreen_Going_Left; QState *m_Left_Going_Yellow; QState *m_Yellow_Going_Red; QState *m_Red_Going_Yellow; QState *m_Yellow_Going_Green; QStateMachine *m_state_machine; }; #endif // TRAFFICLIGHT_H
2,199
C++
.h
63
30.746032
107
0.74589
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,227
lightwidget.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/TrafficLight/lightwidget.h
#ifndef LIGHTWIDGET_H #define LIGHTWIDGET_H #include <QObject> #include <QColor> #include <QPainter> #include <QGraphicsItem> #include <QStyleOptionGraphicsItem> const static float LightSize = 25; const static float WidgetDimension = 25; const static float LightScale = 0.6; class LightWidget :public QObject,/*public QGraphicsLayoutItem,*/ public QGraphicsItem { Q_OBJECT Q_INTERFACES(QGraphicsItem) // Q_INTERFACES(QGraphicsLayoutItem) Q_PROPERTY(bool on READ isOn WRITE setOn) public: LightWidget(const QColor &color,QGraphicsItem *parent = nullptr); // Virtual Function Area (Must be implemented) QRectF boundingRect() const Q_DECL_OVERRIDE; void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) Q_DECL_OVERRIDE; // void setGeometry(const QRectF &rect); // QSizeF sizeHint(Qt::SizeHint which, const QSizeF &constraint) const; // End Virtual Function Area QColor getColor() const; bool isOn() const; void setOn(bool on); void setColor(const QColor &color); void TurnOnInteraction(); void TurnOffInteraction(); protected: virtual void mousePressEvent(QGraphicsSceneMouseEvent *event) Q_DECL_OVERRIDE; virtual void hoverEnterEvent(QGraphicsSceneHoverEvent *event) Q_DECL_OVERRIDE; virtual void hoverLeaveEvent(QGraphicsSceneHoverEvent *event) Q_DECL_OVERRIDE; public slots: void turnOff(); void turnOn(); private: QColor m_color; bool m_on; bool m_IsClickable; }; #endif // LIGHTWIDGET_H
1,527
C++
.h
43
32.27907
107
0.764905
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,228
lightwidgetleft.h
KimangKhenng_Traffic-SImulation-and-Visualization/Entities/TrafficLight/lightwidgetleft.h
#ifndef LIGHTWIDGETLEFT_H #define LIGHTWIDGETLEFT_H #include "lightwidget.h" class LightWidgetLeft : public LightWidget { public: LightWidgetLeft(const QColor &color,QGraphicsItem *parent = nullptr); void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) Q_DECL_OVERRIDE; }; #endif // LIGHTWIDGETLEFT_H
346
C++
.h
10
32.5
107
0.807808
KimangKhenng/Traffic-SImulation-and-Visualization
37
11
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,238
main.cc
ppLorins_aurora/src/main.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "gflags/gflags.h" #include "glog/logging.h" #include "global/global_env.h" int main(int argc,char** argv) { google::ParseCommandLineFlags(&argc, &argv, true); google::InitGoogleLogging(argv[0]); FLAGS_log_dir = "."; FLAGS_logbuflevel = -1; try { //Start the whole thing. ::RaftCore::Global::GlobalEnv::InitialEnv(); ::RaftCore::Global::GlobalEnv::RunServer(); } catch (std::exception &e) { LOG(ERROR) << "got an exception:" << e.what(); throw e; } return 0; }
1,326
C++
.cc
33
36.545455
73
0.691888
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,239
follower_entity.cc
ppLorins_aurora/src/leader/follower_entity.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/comm_view.h" #include "leader/leader_view.h" #include "global/global_env.h" #include "storage/storage.h" #include "leader/follower_entity.h" namespace RaftCore::Leader { using ::RaftCore::Common::CommonView; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Global::GlobalEnv; using ::RaftCore::Storage::StorageMgr; using ::RaftCore::Service::OwnershipDelegator; const char* FollowerEntity::m_status_macro_names[] = { "NORMAL","RESYNC_LOG","RESYNC_DATA"}; FollowerEntity::FollowerEntity(const std::string &follower_addr, FollowerStatus status, uint32_t joint_consensus_flag, std::shared_ptr<CompletionQueue> input_cq) noexcept { this->m_shp_channel_pool.reset(new ChannelPool(follower_addr,::RaftCore::Config::FLAGS_channel_pool_size)); auto _channel = this->m_shp_channel_pool->GetOneChannel(); uint32_t _notify_threads_num = ::RaftCore::Config::FLAGS_notify_cq_threads * ::RaftCore::Config::FLAGS_notify_cq_num; for (std::size_t n = 0; n < _notify_threads_num; ++n) { this->m_append_client_pool[n].reset(new ClientPool<AppendEntriesAsyncClient>(this)); this->m_commit_client_pool[n].reset(new ClientPool<CommitEntriesAsyncClient>(this)); } uint32_t _pool_size = ::RaftCore::Config::FLAGS_client_pool_size; for (std::size_t i = 0; i < _pool_size; ++i) { uint32_t _idx = i % _notify_threads_num; auto shp_cq = input_cq; if (!shp_cq) shp_cq = GlobalEnv::GetClientCQInstance(_idx); auto* _p_client = new AppendEntriesAsyncClient(_channel, shp_cq); auto _shp_client = _p_client->OwnershipDelegator<AppendEntriesAsyncClient>::GetOwnership(); this->m_append_client_pool[_idx]->Back(_shp_client); } uint32_t _group_commit = ::RaftCore::Config::FLAGS_group_commit_count; uint32_t _commit_client_size = _pool_size / _group_commit; CHECK(_commit_client_size > 0) << "pool_size:" << _pool_size << ",group_commit:" << _commit_client_size; VLOG(89) << "debug commit client size:" << _commit_client_size << ",addr:" << this->my_addr; for (std::size_t i = 0; i < _commit_client_size; ++i) { uint32_t _idx = i % _notify_threads_num; auto shp_cq = input_cq; if (!shp_cq) shp_cq = GlobalEnv::GetClientCQInstance(_idx); auto* _p_client = new CommitEntriesAsyncClient(_channel, shp_cq); auto _shp_client = _p_client->OwnershipDelegator<CommitEntriesAsyncClient>::GetOwnership(); this->m_commit_client_pool[_idx]->Back(_shp_client); } this->m_joint_consensus_flag = joint_consensus_flag; this->my_addr = follower_addr; this->m_status = status; this->m_last_sent_committed.store(CommonView::m_zero_log_id); } FollowerEntity::~FollowerEntity() noexcept{} bool FollowerEntity::UpdateLastSentCommitted(const LogIdentifier &to) noexcept { while (true) { auto _cur_last_commit = this->m_last_sent_committed.load(); if (to <= _cur_last_commit) return false; if (!this->m_last_sent_committed.compare_exchange_weak(_cur_last_commit, to)) continue; VLOG(89) << "m_last_sent_committed update to:" << to << ", addr:" << this->my_addr; break; } return true; } std::shared_ptr<AppendEntriesAsyncClient> FollowerEntity::FetchAppendClient(void* &pool) noexcept { auto _tid = std::this_thread::get_id(); auto &_uptr_pool = this->m_append_client_pool[LeaderView::m_notify_thread_mapping[_tid]]; pool = _uptr_pool.get(); return _uptr_pool->Fetch(); } std::shared_ptr<CommitEntriesAsyncClient> FollowerEntity::FetchCommitClient(void* &pool) noexcept { auto _tid = std::this_thread::get_id(); auto &_uptr_pool = this->m_commit_client_pool[LeaderView::m_notify_thread_mapping[_tid]]; pool = _uptr_pool.get(); return _uptr_pool->Fetch(); } }
4,647
C++
.cc
89
47.280899
121
0.688383
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,240
leader_view.cc
ppLorins_aurora/src/leader/leader_view.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/comm_defs.h" #include "common/error_code.h" #include "tools/timer.h" #include "tools/lock_free_priority_queue.h" #include "binlog/binlog_singleton.h" #include "storage/storage_singleton.h" #include "state/state_mgr.h" #include "election/election.h" #include "member/member_manager.h" #include "leader/follower_entity.h" #include "client/client_framework.h" #include "service/service.h" #include "leader/leader_view.h" namespace RaftCore::Leader { using ::raft::AppendEntriesRequest; using ::raft::AppendEntriesResponse; using ::raft::CommitEntryRequest; using ::raft::CommitEntryResponse; using ::RaftCore::DataStructure::LockFreePriotityQueue; using ::RaftCore::Member::MemberMgr; using ::RaftCore::Member::EJointStatus; using ::RaftCore::Client::UnarySyncClient; using ::RaftCore::Service::Write; std::unordered_map<std::string,TypePtrFollowerEntity> LeaderView::m_hash_followers; TrivialLockDoubleList<MemoryLogItemLeader> LeaderView::m_entity_pending_list( std::shared_ptr<MemoryLogItemLeader>(new MemoryLogItemLeader(0x0, 0x0)), std::shared_ptr<MemoryLogItemLeader>(new MemoryLogItemLeader(_MAX_UINT32_, _MAX_UINT64_))); std::condition_variable LeaderView::m_cv; std::mutex LeaderView::m_cv_mutex; const char* LeaderView::m_invoker_macro_names[] = { "CLIENT_RPC","BACKGROUP_THREAD" }; std::shared_timed_mutex LeaderView::m_hash_followers_mutex; LockFreeUnorderedSingleList<DoubleListNode<MemoryLogItemLeader>> LeaderView::m_garbage; std::atomic<LogIdentifier> LeaderView::m_last_cut_log; LeaderView::ServerStatus LeaderView::m_status = LeaderView::ServerStatus::NORMAL; TrivialLockSingleList<CutEmptyContext> LeaderView::m_cut_empty_list(std::shared_ptr<CutEmptyContext>(new CutEmptyContext(-1)), std::shared_ptr<CutEmptyContext>(new CutEmptyContext(1))); LockFreeUnorderedSingleList<SingleListNode<CutEmptyContext>> LeaderView::m_cut_empty_garbage; std::atomic<uint32_t> LeaderView::m_last_log_waiting_num; std::unordered_map<std::thread::id,uint32_t> LeaderView::m_notify_thread_mapping; //To avoid issues caused by including header files mutually. using ::raft::ErrorCode; using ::RaftCore::BinLog::BinLogGlobal; using ::RaftCore::BinLog::FileMetaData; using ::RaftCore::DataStructure::LockFreeQueue; using ::RaftCore::DataStructure::LockFreeQueueBase; using ::RaftCore::Storage::StorageMgr; using ::RaftCore::Storage::StorageGlobal; using ::RaftCore::Timer::GlobalTimer; using ::RaftCore::State::StateMgr; using ::RaftCore::State::RaftRole; using ::RaftCore::Election::ElectionMgr; using ::RaftCore::Leader::FollowerStatus; using ::RaftCore::Common::ReadLock; using ::RaftCore::Tools::TypeSysTimePoint; void LeaderView::Initialize(const ::RaftCore::Topology& _topo) noexcept { CommonView::Initialize(); //Follower initialization must being after threads-mapping initialization. for (const auto & _follower_addr : _topo.m_followers) m_hash_followers[_follower_addr] = std::shared_ptr<FollowerEntity>(new FollowerEntity(_follower_addr)); int consumer_threads_num = ::RaftCore::Config::FLAGS_lockfree_queue_consumer_threads_num; if (consumer_threads_num == 0) consumer_threads_num = ::RaftCore::Common::CommonView::m_cpu_cores * 2; auto _heartbeat = [&]()->bool { if (!::RaftCore::Config::FLAGS_do_heartbeat) return true; BroadcastHeatBeat(); //Unit test need a switch. return !::RaftCore::Config::FLAGS_heartbeat_oneshot; }; GlobalTimer::AddTask(::RaftCore::Config::FLAGS_leader_heartbeat_interval_ms,_heartbeat); //Register connection pool GC. auto _conn_pool_deque_gc = []() ->bool { LockFreeDeque<AppendEntriesAsyncClient>::GC(); LockFreeDeque<CommitEntriesAsyncClient>::GC(); return true; }; GlobalTimer::AddTask(::RaftCore::Config::FLAGS_gc_interval_ms ,_conn_pool_deque_gc); //Register GC task to the global timer. CommonView::InstallGC<TrivialLockDoubleList,DoubleListNode,MemoryLogItemLeader>(&m_garbage); CommonView::InstallGC<TrivialLockSingleList,SingleListNode,CutEmptyContext>(&m_cut_empty_garbage); //Add task & callbacks by tasks' priority,highest priority add first. auto *_p_client_reacting_queue = new LockFreeQueue<BackGroundTask::ClientReactContext>(); _p_client_reacting_queue->Initilize(ClientReactCB, ::RaftCore::Config::FLAGS_lockfree_queue_client_react_elements); m_priority_queue.AddTask(LockFreePriotityQueue::TaskType::CLIENT_REACTING,(LockFreeQueueBase*)_p_client_reacting_queue); auto *_p_fresher_sync_data_queue = new LockFreeQueue<BackGroundTask::SyncDataContenxt>(); _p_fresher_sync_data_queue->Initilize(SyncDataCB,::RaftCore::Config::FLAGS_lockfree_queue_resync_data_elements); m_priority_queue.AddTask(LockFreePriotityQueue::TaskType::RESYNC_DATA,(LockFreeQueueBase*)_p_fresher_sync_data_queue); auto *_p_resync_log_queue = new LockFreeQueue<BackGroundTask::ReSyncLogContext>(); _p_resync_log_queue->Initilize(ReSyncLogCB,::RaftCore::Config::FLAGS_lockfree_queue_resync_log_elements); m_priority_queue.AddTask(LockFreePriotityQueue::TaskType::RESYNC_LOG,(LockFreeQueueBase*)_p_resync_log_queue); //Launching the background threads for processing that queue. m_priority_queue.Launch(); m_last_log_waiting_num.store(0); //Must be initialized before 'CutEmptyRoutine' started. CommonView::m_running_flag = true; //Start Leader routine thread. for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_iterating_threads; ++i) CommonView::m_vec_routine.emplace_back(new std::thread(Write::CutEmptyRoutine)); } void LeaderView::UnInitialize() noexcept { //Waiting for routine thread exit. CommonView::m_running_flag = false; for (auto* p_thread : CommonView::m_vec_routine) { p_thread->join(); delete p_thread; } m_hash_followers.clear(); m_entity_pending_list.Clear(); m_cut_empty_list.Clear(); CommonView::UnInitialize(); } void LeaderView::UpdateThreadMapping() noexcept { std::vector<std::thread::id> _notify_thread_ids; for (auto &_one_group : GlobalEnv::m_vec_notify_cq_workgroup) _one_group.GetThreadId(_notify_thread_ids); for (std::size_t n = 0; n < _notify_thread_ids.size(); ++n) m_notify_thread_mapping[_notify_thread_ids[n]] = n; } void LeaderView::BroadcastHeatBeat() noexcept { auto *_p_ref = &m_hash_followers; auto *_p_ref_mutex = &m_hash_followers_mutex; auto *_p_ref_joint = &MemberMgr::m_joint_summary; auto *_p_ref_joing_mutex = &MemberMgr::m_mutex; //Sending heartbeat to the nodes in the current cluster. { ReadLock _r_lock(*_p_ref_mutex); for (auto &_pair_kv : *_p_ref) { VLOG(89) << "heartbeat sending, follower:" << _pair_kv.second->my_addr; _pair_kv.second->m_shp_channel_pool->HeartBeat(ElectionMgr::m_cur_term.load(),StateMgr::GetMyAddr()); } } //Sending heartbeat to the nodes in the new cluster if there are any. do{ ReadLock _r_lock(*_p_ref_joing_mutex); if (_p_ref_joint->m_joint_status != EJointStatus::JOINT_CONSENSUS) break; for (auto &_pair_kv : _p_ref_joint->m_joint_topology.m_added_nodes) _pair_kv.second->m_shp_channel_pool->HeartBeat(ElectionMgr::m_cur_term.load(),StateMgr::GetMyAddr()); } while (false); } auto LeaderView::PrepareAppendEntriesRequest(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context) { auto _null_shp = std::shared_ptr<AppendEntriesRequest>(); //Need to get a new handler of binlog file. std::string _binlog_file_name = BinLogGlobal::m_instance.GetBinlogFileName(); std::FILE* _f_handler = std::fopen(_binlog_file_name.c_str(),_AURORA_BINLOG_READ_MODE_); if (_f_handler == nullptr) { LOG(ERROR) << "ReSyncLogCB open binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; return _null_shp; } std::list<std::shared_ptr<FileMetaData::IdxPair>> _file_meta; BinLogGlobal::m_instance.GetOrderedMeta(_file_meta); //Find the earlier X entries auto _reverse_iter = _file_meta.crbegin(); if (_reverse_iter == _file_meta.crend()) { LOG(WARNING) << "binlog is empty"; return _null_shp; } //_reverse_bound is the first element of _file_meta. auto _reverse_bound = _file_meta.crend(); _reverse_bound--; auto _log_id_lcl = StorageGlobal::m_instance.GetLastCommitted(); uint32_t _precede_lcl_counter = 0; //Count for #log entries that preceding the LCL. /*Since _reverse_iter is a reserve iterator , and we are getting the non-reserve iterator based on it , so there is one more place(the '<=' in the for loop statement below) to advance.*/ for (std::size_t n = 0; n <= ::RaftCore::Config::FLAGS_resync_log_reverse_step_len; ) { if ((*_reverse_iter)->operator<(_log_id_lcl)) _precede_lcl_counter++; if ((*_reverse_iter)->operator<(shp_context->m_last_sync_point)) n++; _reverse_iter++; //Should stopping at the first element of _file_meta. if (_reverse_iter == _reverse_bound) break; } /*_cur_iter will points to _reverse_iter-1, aka, the second element of _file_meta after the following line, because every log entry need its previous log info when doing resyncing, so we cannot start at the first one.*/ auto _cur_iter = _reverse_iter.base(); /*The start point log's ID must be greater than (ID-LCL - FLAGS_binlog_reserve_log_num), otherwise the further ahead log entries may be absent.*/ if (_precede_lcl_counter > ::RaftCore::Config::FLAGS_binlog_reserve_log_num) { shp_context->m_hold_pre_lcl = true; BinLogGlobal::m_instance.AddPreLRLUseCount(); } /*Note: _reverse_iter is now points to the previous entry of 'STEP_LEN' or the boundary which at least >= (ID-LCL - FLAGS_binlog_reserve_log_num). In both cases we just need to begin iterating at _reverse_iter-1 .*/ //Appending new entries std::shared_ptr<AppendEntriesRequest> _shp_req(new AppendEntriesRequest()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); _shp_req->mutable_base()->set_term(ElectionMgr::m_cur_term.load()); //Update last sync point to the first entry that will be sent in the next steps. shp_context->m_last_sync_point = *(*_cur_iter); /*There will not much log entries between [ (ID-LCL - FLAGS_binlog_reserve_log_num) , ID-LRL ], so resync all the logs in one RPC is acceptable. */ //_cur_iter unchanged ,_pre_iter points to the previous position of _cur_iter. auto _pre_iter = (--_cur_iter)++; unsigned char* _p_buf = nullptr; for (; _cur_iter!=_file_meta.cend(); ++_pre_iter,++_cur_iter) { auto _p_entry = _shp_req->add_replicate_entity(); auto _p_entity_id = _p_entry->mutable_entity_id(); _p_entity_id->set_term((*_cur_iter)->m_term); _p_entity_id->set_idx((*_cur_iter)->m_index); auto _p_pre_entity_id = _p_entry->mutable_pre_log_id(); _p_pre_entity_id->set_term((*_pre_iter)->m_term); _p_pre_entity_id->set_idx((*_pre_iter)->m_index); //Seek to position if (std::fseek(_f_handler, (*_cur_iter)->m_offset, SEEK_SET) != 0) { LOG(ERROR) << "ReSyncLogCB seek binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; std::fclose(_f_handler); return _null_shp; } //Read protobuf buf length uint32_t _buf_len = 0; if (std::fread(&_buf_len,1,_FOUR_BYTES_,_f_handler) != _FOUR_BYTES_) { LOG(ERROR) << "ReSyncLogCB read binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; std::fclose(_f_handler); return _null_shp; } ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_buf_len, &_buf_len); //Read protobuf buf _p_buf = (_p_buf) ? (unsigned char*)std::realloc(_p_buf,_buf_len): (unsigned char*)malloc(_buf_len); if ( std::fread(_p_buf,1,_buf_len, _f_handler) != _buf_len) { LOG(ERROR) << "ReSyncLogCB read binlog file " << _binlog_file_name << " fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; std::free(_p_buf); std::fclose(_f_handler); return _null_shp; } ::raft::BinlogItem _binlog_item; if (!_binlog_item.ParseFromArray(_p_buf,_buf_len)) { LOG(ERROR) << "ReSyncLogCB parse protobuf buffer fail " << _binlog_file_name << ",follower:" << shp_context->m_follower->my_addr; std::free(_p_buf); std::fclose(_f_handler); return _null_shp; } auto _p_wop = _p_entry->mutable_write_op(); _p_wop->set_key(_binlog_item.entity().write_op().key()); _p_wop->set_value(_binlog_item.entity().write_op().value()); } if (_p_buf) std::free(_p_buf); //VLOG(89) << "debug pos2" << ",leader sent resync log:" << _shp_req->DebugString(); std::fclose(_f_handler); return _shp_req; } void LeaderView::AddRescynDataTask(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context) noexcept { //Prevent from duplicated task being executed. if (shp_context->m_follower->m_status == FollowerStatus::RESYNC_DATA) { LOG(INFO) << "a RESYNC_DATA task already in progress for follower:" << shp_context->m_follower->my_addr << ", no need to generate a new one, just return"; return; } shp_context->m_follower->m_status = FollowerStatus::RESYNC_DATA; /*Here is synonymous to that , the leader is talking to the follower , and says : "Currently I don't have enough log entries to heal your log falling behind issue , you have to resync all the whole data , namely , starting the resync data procedure all over again. " */ std::shared_ptr<BackGroundTask::SyncDataContenxt> _shp_sync_data_ctx(new BackGroundTask::SyncDataContenxt(shp_context->m_follower)); //Pass the callback function down through since SyncData will eventually need to all that ,too. _shp_sync_data_ctx->m_on_success_cb = shp_context->m_on_success_cb; int _ret_code = m_priority_queue.Push(LockFreePriotityQueue::TaskType::RESYNC_DATA, &_shp_sync_data_ctx); LOG(INFO) << "Add SYNC-DATA task bool ret:" << _ret_code << ",logID:" << shp_context->m_last_sync_point << ",follower:" << shp_context->m_follower->my_addr; } bool LeaderView::ReSyncLogCB(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context) noexcept{ LOG(INFO) << "resync log background task received, peer:" << shp_context->m_follower->my_addr << ",last synced point:" << shp_context->m_last_sync_point; //Follower must in RESYNC_LOG state if (shp_context->m_follower->m_status != FollowerStatus::RESYNC_LOG) { LOG(WARNING) << "ReSyncLogCB follower " << shp_context->m_follower->my_addr << " is under " << FollowerEntity::MacroToString(shp_context->m_follower->m_status) << " status,won't resync log to it"; return false; } auto _shp_req = PrepareAppendEntriesRequest(shp_context); if (!_shp_req) { LOG(ERROR) << "PrepareAppendEntriesRequest got an empty result,probably due to a resync-data event happened,check it."; return false; } auto _shp_channel = shp_context->m_follower->m_shp_channel_pool->GetOneChannel(); UnarySyncClient<AppendEntriesRequest, AppendEntriesResponse> _sync_log_client(_shp_channel); auto _rpc = std::bind(&::raft::RaftService::Stub::AppendEntries, _sync_log_client.GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); ::grpc::Status _status; auto &_rsp = _sync_log_client.DoRPC([&](std::shared_ptr<AppendEntriesRequest>& req) { req = _shp_req; }, _rpc ,::RaftCore::Config::FLAGS_leader_resync_log_rpc_timeo_ms, _status); if (!_status.ok()) { LOG(ERROR) << "ReSyncLogCB AppendEntries fail,error code:" << _status.error_code() << ",err msg: " << _status.error_message() ; return false; } //const auto &_last_entity = _shp_req->replicate_entity(_shp_req->replicate_entity_size() - 1); ErrorCode _error_code = _rsp.comm_rsp().result(); if (_error_code!=ErrorCode::SUCCESS && _error_code!=ErrorCode::SUCCESS_MERGED) { if (_error_code != ErrorCode::APPEND_ENTRY_CONFLICT && _error_code != ErrorCode::WAITING_TIMEOUT && _error_code != ErrorCode::OVERSTEP_LCL) { LOG(ERROR) << "ReSyncLogCB AppendEntries fail,detail:" << _rsp.DebugString(); return false; } if (_error_code == ErrorCode::OVERSTEP_LCL) { AddRescynDataTask(shp_context); return true; } //If still conflict, the task should be re-queued , no task could hold a thread for a long time. int _ret_code = m_priority_queue.Push(LockFreePriotityQueue::TaskType::RESYNC_LOG, &shp_context); LOG(INFO) << "Add RESYNC-LOG task ret:" << _ret_code << ",last synced point:" << shp_context->m_last_sync_point << ", remote peer:" << shp_context->m_follower->my_addr; return true; } //Reduce the use count for pre-lrl if currently holding one. if (shp_context->m_hold_pre_lcl) { shp_context->m_hold_pre_lcl = false; BinLogGlobal::m_instance.SubPreLRLUseCount(); } //Reset follower status to NORMAL,allowing user threads to do normal AppendEntries RPC again. shp_context->m_follower->m_status = FollowerStatus::NORMAL; if (shp_context->m_on_success_cb) shp_context->m_on_success_cb(shp_context->m_follower); return true; } bool LeaderView::SyncLogAfterLCL(std::shared_ptr<BackGroundTask::SyncDataContenxt> &shp_context) { /*Note : If start syncing logs , never turn back , do it until finished. Two reasons : 1. #logs which larger then ID-LCL is quite small. 2. If turn back , could incur follower committing a log which already been synced in the data zone, this potential break the version sequence of the committed data. */ //Prepare sync log. std::list<std::shared_ptr<FileMetaData::IdxPair>> _file_meta; BinLogGlobal::m_instance.GetOrderedMeta(_file_meta); //Find the start syncing point. int _precede_lcl_counter = 0; //count for num of exceeding the LCL. auto _iter_begin = _file_meta.cend(); for (auto _iter = _file_meta.crbegin(); _iter != _file_meta.crend();++_iter) { if ((*_iter)->operator<(shp_context->m_last_sync)) _precede_lcl_counter++; if ((*_iter)->operator>(shp_context->m_last_sync)) continue; _iter_begin = _iter.base(); break; } if (_iter_begin == _file_meta.cend()) { LOG(ERROR) << "SyncDataCB cannot find the sync log starting point"; return false; } int _reserve_before_lcl = ::RaftCore::Config::FLAGS_binlog_reserve_log_num; CHECK(_precede_lcl_counter < _reserve_before_lcl) << "SyncDBAfter LCL fail,_precede_lcl_counter :" << _precede_lcl_counter << ",exceeds limit:" << _reserve_before_lcl; TypePtrFollowerEntity _shp_follower = shp_context->m_follower; auto _follower_addr = _shp_follower->my_addr; //Start syncing log. std::string _binlog_file_name = BinLogGlobal::m_instance.GetBinlogFileName(); std::FILE* _f_handler = std::fopen(_binlog_file_name.c_str(),_AURORA_BINLOG_READ_MODE_); if (_f_handler == nullptr) { LOG(ERROR) << "ReSyncLogCB open binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << _follower_addr; return false; } if (std::fseek(_f_handler, (*_iter_begin)->m_offset, SEEK_SET) != 0) { LOG(ERROR) << "ReSyncLogCB seek binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << _follower_addr; std::fclose(_f_handler); return false; } int _rpc_counter = 0; unsigned char* _p_buf = nullptr; auto &_shp_client = shp_context->m_shp_client; auto _shp_stream = _shp_client->GetReaderWriter(); auto* _rsp = _shp_client->GetResponse(); auto _shp_req = _shp_client->GetInstantiatedReq(); _shp_req->mutable_base()->set_term(ElectionMgr::m_cur_term.load()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); _shp_req->clear_entity(); _shp_req->set_msg_type(::raft::SyncDataMsgType::SYNC_LOG); bool _sync_log_result = true; while (true) { _shp_req->clear_entity(); bool _read_end = false; for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_resync_data_log_num_each_rpc; ++i) { uint32_t _buf_len = 0; if (std::fread(&_buf_len, _FOUR_BYTES_, 1, _f_handler) != 1) { LOG(ERROR) << "ReSyncLogCB read binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; _sync_log_result = false; break; } ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_buf_len, &_buf_len); //Read protobuf buf _p_buf = (_p_buf) ? (unsigned char*)std::realloc(_p_buf,_buf_len): (unsigned char*)malloc(_buf_len); if (std::fread(_p_buf, _buf_len, 1, _f_handler) != 1) { LOG(ERROR) << "ReSyncLogCB read binlog file " << _binlog_file_name << "fail..,errno:" << errno << ",follower:" << shp_context->m_follower->my_addr; _sync_log_result = false; break; } ::raft::BinlogItem _binlog_item; if (!_binlog_item.ParseFromArray(_p_buf,_buf_len)) { LOG(ERROR) << "ReSyncLogCB parse protobuf buffer fail " << _binlog_file_name << ",follower:" << shp_context->m_follower->my_addr; _sync_log_result = false; break; } auto *_p_entity = _shp_req->add_entity(); //TODO:figure out why this could resulting in a coredump in ~BinlogItem(). //_p_entity->Swap(_binlog_item.mutable_entity()); //_binlog_item.clear_entity(); _p_entity->CopyFrom(_binlog_item.entity()); if (!EntityIDSmaller(_p_entity->entity_id(), BinLogGlobal::m_instance.GetLastReplicated())) { _read_end = true; break; } } if (!_sync_log_result) break; if (!_shp_stream->Write(*_shp_req)) { LOG(ERROR) << "SyncDataCB send log fail,follower:" << shp_context->m_follower->my_addr << ",logID:" << shp_context->m_last_sync; _sync_log_result = false; break; } if (!_shp_stream->Read(_rsp)) { LOG(ERROR) << "SyncDataCB get prepare result fail,follower:" << shp_context->m_follower->my_addr << ",logID:" << shp_context->m_last_sync; break; } if (_rsp->comm_rsp().result() != ErrorCode::SYNC_LOG_CONFRIMED) { LOG(ERROR) << "SyncDataCB prepare fail,follower:" << shp_context->m_follower->my_addr << ",logID:" << shp_context->m_last_sync; break; } if (_read_end) break; } std::fclose(_f_handler); if (_p_buf) std::free(_p_buf); if (!_shp_stream->WritesDone()) { LOG(ERROR) << "SyncDataCB send log WritesDone fail,follower:" << shp_context->m_follower->my_addr << ",logID:" << shp_context->m_last_sync; return false; } shp_context->m_final_status = _shp_stream->Finish(); if (!shp_context->m_final_status.ok()) { LOG(ERROR) << "SyncDataCB send log final status fail,follower:" << shp_context->m_follower->my_addr << ",logID:" << shp_context->m_last_sync << ",error_code:" << shp_context->m_final_status.error_code() << ",error_status:" << shp_context->m_final_status.error_message(); return false; } if (!_sync_log_result) return false; return true; } bool LeaderView::ClientReactCB(std::shared_ptr<BackGroundTask::ClientReactContext> &shp_context) noexcept { void* _tag = shp_context->m_react_info.m_tag; ::RaftCore::Common::ReactBase* _p_ins = static_cast<::RaftCore::Common::ReactBase*>(_tag); _p_ins->React(shp_context->m_react_info.m_cq_result); return true; } bool LeaderView::SyncDataCB(std::shared_ptr<BackGroundTask::SyncDataContenxt> &shp_context) noexcept{ TypePtrFollowerEntity _shp_follower = shp_context->m_follower; auto _follower_addr = _shp_follower->my_addr; LOG(INFO) << "sync data background task received,peer:" << _follower_addr; //Iterating over the storage , sync data to the follower in a batch manner. auto GetCurrentMS = []()->uint64_t{ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); }; uint64_t _start_ts = GetCurrentMS(); auto ScheduleNext = [&]()->bool{ uint64_t _now = GetCurrentMS(); if (_now - _start_ts <= ::RaftCore::Config::FLAGS_resync_data_task_max_time_ms) return false; int _ret_code = m_priority_queue.Push(LockFreePriotityQueue::TaskType::RESYNC_DATA, &shp_context); LOG(INFO) << "Add RESYNC_DATA task result:" << _ret_code << ",follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; if (_ret_code != QUEUE_SUCC) return false; return true; }; //After stream established , follower gets into its RPC interface and start waiting to read. auto &_shp_client = shp_context->m_shp_client; auto _shp_req = _shp_client->GetInstantiatedReq(); _shp_req->mutable_base()->set_term(ElectionMgr::m_cur_term.load()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); auto _shp_stream = _shp_client->GetReaderWriter(); auto* _rsp = _shp_client->GetResponse(); if (shp_context->IsBeginning()) { _shp_req->set_msg_type(::raft::SyncDataMsgType::PREPARE); if (!_shp_stream->Write(*_shp_req)) { LOG(ERROR) << "SyncDataCB send prepare msg fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; return false; } VLOG(89) << " sync_data_debug PREPARE sent."; if (!_shp_stream->Read(_rsp)) { LOG(ERROR) << "SyncDataCB get prepare result fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; return false; } VLOG(89) << " sync_data_debug prepare received."; if (_rsp->comm_rsp().result() != ErrorCode::PREPARE_CONFRIMED) { LOG(ERROR) << "SyncDataCB prepare fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync << ",result:" << _rsp->DebugString(); return false; } } while (true) { _shp_req->clear_entity(); _shp_req->set_msg_type(::raft::SyncDataMsgType::SYNC_DATA); std::list<StorageMgr::StorageItem> _list; StorageGlobal::m_instance.GetSlice(shp_context->m_last_sync,::RaftCore::Config::FLAGS_resync_data_item_num_each_rpc,_list); if (_list.empty()) { VLOG(89) << "list empty after GetSlice"; break; } for (const auto &_item : _list) { auto *_p_entity = _shp_req->add_entity(); auto *_p_wop = _p_entity->mutable_write_op(); //Ownership of the following two can be taken over. _p_wop->set_allocated_key(_item.m_key.get()); _p_wop->set_allocated_value(_item.m_value.get()); auto _p_entity_id = _p_entity->mutable_entity_id(); _p_entity_id->set_term(_item.m_log_id.m_term); _p_entity_id->set_idx(_item.m_log_id.m_index); } bool _rst = _shp_stream->Write(*_shp_req); //Release the allocated write_op first. for (int i = 0; i < _shp_req->entity_size(); ++i) { auto *_p_wop = _shp_req->mutable_entity(i)->mutable_write_op(); _p_wop->release_key(); _p_wop->release_value(); } if (!_rst) { LOG(ERROR) << "SyncDataCB send data fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; return false; } if (!_shp_stream->Read(_rsp)) { LOG(ERROR) << "SyncDataCB get prepare result fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; return false; } if (_rsp->comm_rsp().result() != ErrorCode::SYNC_DATA_CONFRIMED) { LOG(ERROR) << "SyncDataCB prepare fail,follower:" << _follower_addr << ",logID:" << shp_context->m_last_sync; return false; } //Update last synced storage data item. /*TODO: Prevent from losing data(a rare case) when using the 'm_last_sync' as the task restart point since the order is not strictly guaranteed among sstables.*/ shp_context->m_last_sync.Set(_list.back().m_log_id); //Return if successfully push the task again to the queue. if (ScheduleNext()) return true; //Means there are no more data items to be synced due to 'GetSlice'. if (int(_list.size()) < ::RaftCore::Config::FLAGS_resync_data_item_num_each_rpc) { VLOG(89) << "list num less than required after GetSlice."; break; } } //Re-check if current thread timed out. if (ScheduleNext()) return true; LOG(INFO) << "SYNC_DATA end, start sync log after lrl."; bool _resync_log_rst = SyncLogAfterLCL(shp_context); if (_resync_log_rst && shp_context->m_on_success_cb) shp_context->m_on_success_cb(_shp_follower); //Reset follower status to NORMAL,allow user threads to do normal AppendEntries RPC. shp_context->m_follower->m_status = FollowerStatus::NORMAL; return true; } void LeaderView::ClientThreadReacting(const ReactInfo &info) noexcept { std::shared_ptr<ReactInfo> _shp_task(new ReactInfo(info)); int _ret_code = LeaderView::m_priority_queue.Push(LockFreePriotityQueue::TaskType::CLIENT_REACTING, &_shp_task); if (_ret_code != QUEUE_SUCC) LOG(ERROR) << "Add CLIENT_REACTING task fail,ret:" << _ret_code; } }
31,899
C++
.cc
586
46.308874
168
0.635896
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,241
memory_log_leader.cc
ppLorins_aurora/src/leader/memory_log_leader.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "leader/memory_log_leader.h" namespace RaftCore::Leader { MemoryLogItemLeader::~MemoryLogItemLeader() noexcept{ /*The write_op of m_entity of LeaderLogItem if from set_allocated_write_op in the leader service, so we need to release the ownership of write_op before releasing m_entity. */ //this->m_entity->release_write_op(); } MemoryLogItemLeader::MemoryLogItemLeader(uint32_t _term, uint64_t _index) noexcept:MemoryLogItemBase(_term, _index) {} MemoryLogItemLeader::MemoryLogItemLeader(const ::raft::Entity &_entity) noexcept:MemoryLogItemBase(_entity) {} bool MemoryLogItemLeader::operator<(const MemoryLogItemLeader& _other)const noexcept { return this->MemoryLogItemBase::operator<(_other); } bool MemoryLogItemLeader::operator>(const MemoryLogItemLeader& _other)const noexcept { return this->MemoryLogItemBase::operator>(_other); } bool MemoryLogItemLeader::operator==(const MemoryLogItemLeader& _other)const noexcept { return this->MemoryLogItemBase::operator==(_other); } bool MemoryLogItemLeader::operator!=(const MemoryLogItemLeader& _other)const noexcept { return !this->MemoryLogItemBase::operator==(_other); } bool CmpMemoryLogLeader(const MemoryLogItemLeader& left, const MemoryLogItemLeader& right) noexcept { return CmpMemoryLog(&left,&right); } }
2,093
C++
.cc
39
51.435897
118
0.772059
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,242
leader_request.cc
ppLorins_aurora/src/leader/leader_request.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "leader/leader_request.h" namespace RaftCore::Leader { template<typename T,typename R,typename Q> LeaderRequest<T, R, Q>::LeaderRequest() noexcept {} template<typename T,typename R,typename Q> LeaderRequest<T,R,Q>::~LeaderRequest() noexcept {} }
1,035
C++
.cc
21
47.809524
73
0.75498
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,243
client_pool.cc
ppLorins_aurora/src/leader/client_pool.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <typeinfo> #include "grpc++/create_channel.h" #include "config/config.h" #include "common/comm_defs.h" #include "global/global_env.h" #include "client/client_impl.h" #include "leader/client_pool.h" namespace RaftCore::Leader { using ::RaftCore::Global::GlobalEnv; using ::RaftCore::Client::HeartbeatSyncClient; template<typename T> ClientPool<T>::ClientPool(FollowerEntity* p_follower) noexcept { this->m_p_parent_follower = p_follower; } template<typename T> ClientPool<T>::~ClientPool() noexcept{} template<typename T> FollowerEntity* ClientPool<T>::GetParentFollower() noexcept { return this->m_p_parent_follower; } template<typename T> std::shared_ptr<T> ClientPool<T>::Fetch() noexcept { return m_pool.Pop(); } template<typename T> void ClientPool<T>::Back(std::shared_ptr<T> &client) noexcept { return m_pool.Push(client); } }
1,643
C++
.cc
43
36.488372
73
0.753943
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,244
channel_pool.cc
ppLorins_aurora/src/leader/channel_pool.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <typeinfo> #include "grpc++/create_channel.h" #include "config/config.h" #include "client/client_impl.h" #include "leader/channel_pool.h" namespace RaftCore::Leader { using ::RaftCore::Client::HeartbeatSyncClient; ChannelPool::ChannelPool(const std::string &peer_addr, uint32_t pool_size) noexcept { this->m_channel_pool.reset(new TypeVecChannel()); for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_conn_per_link; ++i) { for (std::size_t j = 0; j < pool_size; ++j) { auto _channel_args = ::grpc::ChannelArguments(); std::string _key = "custom_key_" + std::to_string(i); std::string _val = "custom_val_" + std::to_string(i); _channel_args.SetString(_key, _val); auto _shp_channel = ::grpc::CreateCustomChannel(peer_addr, ::grpc::InsecureChannelCredentials(), _channel_args); this->m_channel_pool->emplace_back(_shp_channel); } } this->m_peer_addr = peer_addr; } ChannelPool::~ChannelPool() noexcept{} std::shared_ptr<::grpc::Channel> ChannelPool::GetOneChannel() noexcept { uint32_t _random_idx = this->m_idx.fetch_add(1,std::memory_order_relaxed) % this->m_channel_pool->size(); return this->m_channel_pool->operator[](_random_idx); } void ChannelPool::HeartBeat(uint32_t term,const std::string &my_addr) noexcept{ auto _shp_channel = this->GetOneChannel(); HeartbeatSyncClient _heartbeat_client(_shp_channel); auto _setter = [&](std::shared_ptr<::raft::HeartBeatRequest>& req) { req->mutable_base()->set_addr(my_addr); req->mutable_base()->set_term(term); }; auto _rpc = std::bind(&::raft::RaftService::Stub::HeartBeat, _heartbeat_client.GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); ::grpc::Status _status; auto &_rsp = _heartbeat_client.DoRPC(_setter, _rpc, ::RaftCore::Config::FLAGS_leader_heartbeat_rpc_timeo_ms, _status); if (!_status.ok()) { LOG(ERROR) << "heart to follower:" << this->m_peer_addr << " rpc fail" << ",err code:" << _status.error_code() << ",err msg:" << _status.error_message(); return; } if (_rsp.result()!=::raft::ErrorCode::SUCCESS) { LOG(ERROR) << "heart to follower:" << this->m_peer_addr << " svr return fail," << ",msg:" << _rsp.err_msg(); return; } VLOG(99) << "follower " << this->m_peer_addr << " checking heartbeat success!"; } }
3,254
C++
.cc
64
45.75
124
0.658028
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,245
leader_bg_task.cc
ppLorins_aurora/src/leader/leader_bg_task.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "service/service.h" #include "member/member_manager.h" #include "leader/leader_bg_task.h" namespace RaftCore::Leader::BackGroundTask { using ::RaftCore::Member::MemberMgr; TwoPhaseCommitContext::PhaseState::RpcStatistic::RpcStatistic(){ this->Reset(); } TwoPhaseCommitContext::PhaseState::RpcStatistic::~RpcStatistic(){} std::string TwoPhaseCommitContext::PhaseState::RpcStatistic::Dump() const noexcept { char sz_buf[256] = { 0 }; std::snprintf(sz_buf,sizeof(sz_buf),"cq_entrust_num:%d,events_got:%d,succeed_num:%d,implicitly_fail_num:%d,explicitly_fail_num:%d", this->m_cq_entrust_num.load(),this->EventsGot(),this->m_succeed_num.load(), this->m_implicitly_fail_num.load(),this->m_explicitly_fail_num.load()); return sz_buf; } int TwoPhaseCommitContext::PhaseState::RpcStatistic::EventsGot()const noexcept { return this->m_succeed_num.load() + this->m_explicitly_fail_num.load() + this->m_implicitly_fail_num.load(); } void TwoPhaseCommitContext::PhaseState::RpcStatistic::Reset() noexcept { this->m_cq_entrust_num.store(0); this->m_succeed_num.store(0); this->m_implicitly_fail_num.store(0); this->m_explicitly_fail_num.store(0); } TwoPhaseCommitContext::PhaseState::PhaseState() { this->Reset(); } TwoPhaseCommitContext::PhaseState::~PhaseState() {} std::string TwoPhaseCommitContext::PhaseState::Dump() const noexcept{ auto _cur_cluster_dump = this->m_cur_cluster.Dump(); auto _new_cluster_dump = this->m_new_cluster.Dump(); char sz_buf[512] = { 0 }; std::snprintf(sz_buf,sizeof(sz_buf),"cur_cluster:[%s],new_cluster:[%s]",_cur_cluster_dump.c_str(),_new_cluster_dump.c_str()); return sz_buf; } void TwoPhaseCommitContext::PhaseState::Reset() noexcept { this->m_cur_cluster.Reset(); this->m_new_cluster.Reset(); this->m_conn_todo_set.clear(); } void TwoPhaseCommitContext::PhaseState::Increase(uint32_t flag, std::atomic<int> &cur_cluster_data, std::atomic<int> &new_cluster_data) noexcept { if (flag & int(JointConsensusMask::IN_OLD_CLUSTER)) cur_cluster_data.fetch_add(1); if (flag & int(JointConsensusMask::IN_NEW_CLUSTER)) new_cluster_data.fetch_add(1); } void TwoPhaseCommitContext::PhaseState::IncreaseEntrust(uint32_t flag) noexcept { this->Increase(flag,this->m_cur_cluster.m_cq_entrust_num, this->m_new_cluster.m_cq_entrust_num); } void TwoPhaseCommitContext::PhaseState::IncreaseSuccess(uint32_t flag) noexcept { this->Increase(flag,this->m_cur_cluster.m_succeed_num, this->m_new_cluster.m_succeed_num); } void TwoPhaseCommitContext::PhaseState::IncreaseImplicitFail(uint32_t flag) noexcept { this->Increase(flag,this->m_cur_cluster.m_implicitly_fail_num, this->m_new_cluster.m_implicitly_fail_num); } void TwoPhaseCommitContext::PhaseState::IncreaseExplicitFail(uint32_t flag) noexcept { this->Increase(flag,this->m_cur_cluster.m_explicitly_fail_num, this->m_new_cluster.m_explicitly_fail_num); } bool TwoPhaseCommitContext::PhaseState::JudgeClusterPotentiallySucceed(RpcStatistic &cluster_stat, std::size_t majority) noexcept { return (cluster_stat.m_succeed_num.load() + cluster_stat.m_implicitly_fail_num.load()) >= (int)majority; } FinishStatus TwoPhaseCommitContext::PhaseState::JudgeClusterDetermined(RpcStatistic &cluster_stat,std::size_t majority) noexcept { if (cluster_stat.m_succeed_num.load() >= (int)majority) return FinishStatus::POSITIVE_FINISHED; int _unknown = cluster_stat.m_cq_entrust_num - cluster_stat.EventsGot(); if (cluster_stat.m_succeed_num.load() + _unknown < (int)majority) return FinishStatus::NEGATIVE_FINISHED; return FinishStatus::UNFINISHED; } bool TwoPhaseCommitContext::PhaseState::JudgeFinished() noexcept { return (this->m_cur_cluster.m_cq_entrust_num == this->m_cur_cluster.EventsGot() && this->m_new_cluster.m_cq_entrust_num == this->m_new_cluster.EventsGot()); } bool TwoPhaseCommitContext::JudgePhaseIPotentiallySucceed() noexcept { auto &_phaseI = this->m_phaseI_state; if (!_phaseI.JudgeClusterPotentiallySucceed(_phaseI.m_cur_cluster, this->m_cluster_majority)) return false; return _phaseI.JudgeClusterPotentiallySucceed(_phaseI.m_new_cluster, this->m_new_cluster_majority); } FinishStatus TwoPhaseCommitContext::JudgePhaseIDetermined() noexcept { auto &_phaseI = this->m_phaseI_state; FinishStatus _cur = _phaseI.JudgeClusterDetermined(_phaseI.m_cur_cluster, this->m_cluster_majority); FinishStatus _new = _phaseI.JudgeClusterDetermined(_phaseI.m_new_cluster, this->m_new_cluster_majority); if ((_cur == FinishStatus::NEGATIVE_FINISHED) || (_new == FinishStatus::NEGATIVE_FINISHED)) return FinishStatus::NEGATIVE_FINISHED; if ((_cur == FinishStatus::POSITIVE_FINISHED) && (_new == FinishStatus::POSITIVE_FINISHED)) return FinishStatus::POSITIVE_FINISHED; return FinishStatus::UNFINISHED; } TwoPhaseCommitContext::TwoPhaseCommitContext() { this->Reset(); } TwoPhaseCommitContext::~TwoPhaseCommitContext() {} bool TwoPhaseCommitContext::JudgeAllFinished() noexcept { if (!this->m_phaseI_state.JudgeFinished()) return false; int _phaseII_obligation_x = this->m_phaseI_state.m_cur_cluster.m_cq_entrust_num.load(); if (this->m_phaseII_state.m_cur_cluster.m_succeed_num.load() < _phaseII_obligation_x) return false; int _phaseII_obligation_y = this->m_phaseI_state.m_new_cluster.m_cq_entrust_num.load(); if (this->m_phaseII_state.m_new_cluster.m_succeed_num.load() < _phaseII_obligation_y) return false; return true; } std::string TwoPhaseCommitContext::Dump() const noexcept{ auto _phaseI_dump = this->m_phaseI_state.Dump(); auto _phaseII_dump = this->m_phaseII_state.Dump(); char sz_buf[1024] = { 0 }; std::snprintf(sz_buf,sizeof(sz_buf),"phaseI_state:[%s],phaseII_state:[%s],m_cluster_size:%u," "m_cluster_majority:%u,m_new_cluster_size:%u,m_new_cluster_majority:%u", _phaseI_dump.c_str(),_phaseII_dump.c_str(),(uint32_t)this->m_cluster_size,(uint32_t)this->m_cluster_majority, (uint32_t)this->m_new_cluster_size,(uint32_t)this->m_new_cluster_majority); return sz_buf; } void TwoPhaseCommitContext::Reset() noexcept { this->m_phaseI_state.Reset(); this->m_phaseII_state.Reset(); } SyncDataContenxt::SyncDataContenxt(TypePtrFollowerEntity &shp_follower) noexcept{ //Sync Data will hold one connection until this job finished,since it is stateful. this->m_follower = shp_follower; auto _shp_channel = this->m_follower->m_shp_channel_pool->GetOneChannel(); this->m_shp_client.reset(new BackGroundTask::SyncDataSyncClient(_shp_channel)); this->m_last_sync.Set(0, 0); } SyncDataContenxt::~SyncDataContenxt() noexcept {} bool SyncDataContenxt::IsBeginning() const noexcept { return (this->m_last_sync.m_term == 0 && this->m_last_sync.m_index == 0); } LogReplicationContext::LogReplicationContext()noexcept { this->m_p_joint_snapshot = new MemberMgr::JointSummary(); } LogReplicationContext::~LogReplicationContext()noexcept{ delete (MemberMgr::JointSummary*)this->m_p_joint_snapshot; } CutEmptyContext::CutEmptyContext(int value_flag)noexcept{ this->m_value_flag = value_flag; this->m_generation_tp = std::chrono::system_clock::now(); this->m_processed_flag.store(false); } CutEmptyContext::~CutEmptyContext()noexcept{} bool CutEmptyContext::operator<(const CutEmptyContext& other)const noexcept { if (this->m_value_flag < 0 || other.m_value_flag > 0) return true; if (this->m_value_flag > 0 || other.m_value_flag < 0) return false; const auto &_shp_req = this->m_write_request->GetReqCtx(); const auto &_shp_req_other = other.m_write_request->GetReqCtx(); return _shp_req->m_cur_log_id < _shp_req_other->m_cur_log_id; } bool CutEmptyContext::operator>(const CutEmptyContext& other)const noexcept { if (this->m_value_flag < 0 || other.m_value_flag > 0) return false; if (this->m_value_flag > 0 || other.m_value_flag < 0) return true; const auto &_shp_req = this->m_write_request->GetReqCtx(); const auto &_shp_req_other = other.m_write_request->GetReqCtx(); return _shp_req->m_cur_log_id > _shp_req_other->m_cur_log_id; } bool CutEmptyContext::operator==(const CutEmptyContext& other)const noexcept { if (other.m_value_flag != 0 || this->m_value_flag != 0) return false; const auto &_shp_req = this->m_write_request->GetReqCtx(); const auto &_shp_req_other = other.m_write_request->GetReqCtx(); return _shp_req->m_cur_log_id == _shp_req_other->m_cur_log_id; } }
9,502
C++
.cc
177
49.361582
135
0.720523
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,246
service.cc
ppLorins_aurora/src/service/service.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <functional> #include "common/log_identifier.h" #include "common/error_code.h" #include "state/state_mgr.h" #include "global/global_env.h" #include "follower/follower_view.h" #include "follower/follower_request.h" #include "follower/memory_log_follower.h" #include "binlog/binlog_singleton.h" #include "storage/storage_singleton.h" #include "leader/follower_entity.h" #include "leader/memory_log_leader.h" #include "tools/lock_free_priority_queue.h" #include "tools/utilities.h" #include "election/election.h" #include "member/member_manager.h" #include "service/service.h" #include "client/client_impl.h" namespace RaftCore::Service { using grpc::CompletionQueue; using ::raft::Entity; using ::raft::ErrorCode; using ::RaftCore::State::RaftRole; using ::RaftCore::State::StateMgr; using ::RaftCore::Common::CommonView; using ::RaftCore::Common::ReadLock; using ::RaftCore::Common::WriteLock; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Common::TypeEntityList; using ::RaftCore::Follower::MemoryLogItemFollower; using ::RaftCore::Follower::CmpMemoryLogFollower; using ::RaftCore::Follower::FollowerView; using ::RaftCore::Follower::TypeMemlogFollowerList; using ::RaftCore::BinLog::BinLogGlobal; using ::RaftCore::BinLog::BinLogOperator; using ::RaftCore::Leader::CmpMemoryLogLeader; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Leader::FollowerEntity; using ::RaftCore::Leader::TypePtrFollowerEntity; using ::RaftCore::Leader::FollowerStatus; using ::RaftCore::Leader::BackGroundTask::ReSyncLogContext; using ::RaftCore::DataStructure::UnorderedSingleListNode; using ::RaftCore::DataStructure::DoubleListNode; using ::RaftCore::DataStructure::LockFreePriotityQueue; using ::RaftCore::DataStructure::DoubleListNode; using ::RaftCore::Election::ElectionMgr; using ::RaftCore::Member::MemberMgr; using ::RaftCore::Member::EJointStatus; using ::RaftCore::Member::JointConsensusMask; using ::RaftCore::Tools::TypeSysTimePoint; using ::RaftCore::Global::GlobalEnv; using ::RaftCore::Client::AppendEntriesAsyncClient; using ::RaftCore::Storage::StorageGlobal; const char* RPCBase::m_status_macro_names[] = {"NORMAL","HALTED","SHUTTING_DOWN"}; RPCBase::RPCBase() {} RPCBase::~RPCBase() {} bool RPCBase::LeaderCheckVailidity( ::raft::ClientCommonResponse* response) noexcept { response->set_result(ErrorCode::SUCCESS); auto _current_role = StateMgr::GetRole(); if ( _current_role != RaftRole::LEADER) { response->set_result(ErrorCode::FAIL); if (_current_role == RaftRole::CANDIDATE) { response->set_err_msg("I'm not a leader ,tell you the right leader."); return false; } ::RaftCore::Topology _topo; ::RaftCore::CTopologyMgr::Read(&_topo); //I'm a follower response->set_err_msg("I'm not a leader ,tell you the right leader."); response->set_redirect_to(_topo.m_leader); return false; } auto _status = LeaderView::m_status; if (_status != LeaderView::ServerStatus::NORMAL) { response->set_result(ErrorCode::FAIL); response->set_err_msg(std::string("I'm in a status of:") + this->MacroToString(_status)); return false; } return true; } std::string RPCBase::FollowerCheckValidity(const ::raft::RequestBase &req_base, TypeTimePoint* p_tp, LogIdentifier *p_cur_id) noexcept { //Check current node status auto _current_role = StateMgr::GetRole(); if ( _current_role != RaftRole::FOLLOWER) return "I'm not a follower, I'm a:" + std::string(StateMgr::GetRoleStr()); //if (p_tp != nullptr) // ::RaftCore::Tools::EndTiming(*p_tp, "start processing debugpos1.2", p_cur_id); //Check leader address validity ::RaftCore::Topology _topo; ::RaftCore::CTopologyMgr::Read(&_topo); //if (p_tp != nullptr) // ::RaftCore::Tools::EndTiming(*p_tp, "start processing debugpos1.3", p_cur_id); if (_topo.m_leader != req_base.addr()) return "Sorry,my leader is[" + _topo.m_leader + "],not you" + "[" + req_base.addr() +"]"; //Check leader term validity if (req_base.term() < ElectionMgr::m_cur_term.load()) return "your term " + std::to_string(req_base.term()) + " is smaller than mine:" + std::to_string(ElectionMgr::m_cur_term.load()); if (req_base.term() > ElectionMgr::m_cur_term.load()) return "your term " + std::to_string(req_base.term()) + " is greater than mine:" + std::to_string(ElectionMgr::m_cur_term.load()) + ",waiting for you heartbeat msg only by which I could upgrade my term."; //if (p_tp != nullptr) // ::RaftCore::Tools::EndTiming(*p_tp, "start processing debugpos1.4", p_cur_id); return ""; } bool RPCBase::ValidClusterNode(const std::string &peer_addr) noexcept { ::RaftCore::Topology _topo; ::RaftCore::CTopologyMgr::Read(&_topo); if (_topo.InCurrentCluster(peer_addr)) return true; ReadLock _r_lock(MemberMgr::m_mutex); if (MemberMgr::m_joint_summary.m_joint_status != EJointStatus::JOINT_CONSENSUS) return false; const auto &_new_cluster = MemberMgr::m_joint_summary.m_joint_topology.m_new_cluster; return _new_cluster.find(peer_addr)!=_new_cluster.cend(); } Write::Write(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { /*Set parent delegator's managed ownership, need to ahead of the following 'Initialize' since otherwise this object will serving request promptly yet not ready for that. */ this->ResetOwnership(this); this->m_phaseI_determined_point.store(false); this->m_phaseII_ready_list.store(nullptr); #ifdef _SVC_WRITE_TEST_ this->m_epoch = std::chrono::system_clock::from_time_t(std::mktime(&m_start_tm)); #endif this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestWrite(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } Write::~Write() {} void Write::FinishRequest(WriteProcessStage state) noexcept { this->m_write_stage = state; this->m_responder.Finish(this->m_response, ::grpc::Status::OK, this); } void Write::React(bool cq_result) noexcept { if (!cq_result) { /*Only when m_shp_req_ctx containing something, it's worthy to log, otherwise it's from the default pool's request.*/ if (this->m_shp_req_ctx) LOG(ERROR) << "Server WriteRequest got false result from CQ, log:" << this->m_shp_req_ctx->m_cur_log_id; this->ReleaseOwnership(); return; } bool _result = true; switch (this->m_write_stage) { case WriteProcessStage::CREATE: new Write(this->m_async_service, this->m_server_notify_cq,this->m_server_call_cq); _result = this->BeforeReplicate(); if (!_result) this->FinishRequest(WriteProcessStage::ABOURTED); break; case WriteProcessStage::FRONT_FINISH: this->ReleaseOwnership(); break; case WriteProcessStage::ABOURTED: this->ReleaseOwnership(); break; default: CHECK(false) << "Unexpected tag " << int(this->m_write_stage); break; } } ::grpc::Status Write::Process() noexcept { return ::grpc::Status::OK; } bool Write::PrepareReplicationStatistic(std::list<std::shared_ptr<AppendEntriesAsyncClient>> &entrust_list) noexcept { int _entrusted_client_num = 0; auto &_phaseI_state = this->m_shp_req_ctx->m_phaseI_state; uint32_t _total_us = 0; auto _prepare_statistic = [&](TypePtrFollowerEntity& shp_follower) { if (shp_follower->m_status != FollowerStatus::NORMAL) { LOG(WARNING) << "follower " << shp_follower->my_addr << " is under " << FollowerEntity::MacroToString(shp_follower->m_status) << ",won't appending entries to it"; return; } void* _p_pool = nullptr; auto _shp_client = shp_follower->FetchAppendClient(_p_pool); VLOG(90) << "AppendEntriesAsyncClient fetched:" << shp_follower->my_addr; CHECK(_shp_client) << "no available AppendEntries clients, may need a bigger pool."; /*The self-delegated ownership will be existing at the mean time, we can just copy it from the delegator. */ _shp_client->OwnershipDelegator<Write>::CopyOwnership(this->GetOwnership()); _shp_client->PushCallBackArgs(_p_pool); entrust_list.emplace_back(_shp_client); _entrusted_client_num++; _phaseI_state.IncreaseEntrust(shp_follower->m_joint_consensus_flag); }; //Prepare the commit request ahead of time. this->m_shp_commit_req.reset(new ::raft::CommitEntryRequest()); this->m_shp_commit_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); this->m_shp_commit_req->mutable_base()->set_term(this->m_shp_req_ctx->m_cur_log_id.m_term); auto _p_entity_id = this->m_shp_commit_req->mutable_entity_id(); _p_entity_id->set_term(this->m_shp_req_ctx->m_cur_log_id.m_term); _p_entity_id->set_idx(this->m_shp_req_ctx->m_cur_log_id.m_index); std::size_t follower_num = 0; { ReadLock _r_lock(LeaderView::m_hash_followers_mutex); for (auto &_pair_kv : LeaderView::m_hash_followers) _prepare_statistic(_pair_kv.second); follower_num = LeaderView::m_hash_followers.size(); } this->m_shp_req_ctx->m_cluster_size = follower_num + 1; this->m_shp_req_ctx->m_cluster_majority = (follower_num + 1) / 2 + 1; // +1 means including the leader. if ((std::size_t)_entrusted_client_num < this->m_shp_req_ctx->m_cluster_majority) { LOG(ERROR) << "can't get majority client entrusted for the stable cluster,log:" << this->m_shp_req_ctx->m_cur_log_id; return false; } _entrusted_client_num = 0; uint32_t _leader_joint_consensus_flag = (uint32_t)JointConsensusMask::IN_OLD_CLUSTER; std::size_t _new_cluster_node_num = 0; do{ ReadLock _r_lock(MemberMgr::m_mutex); if (MemberMgr::m_joint_summary.m_joint_status != EJointStatus::JOINT_CONSENSUS) break; for (auto &_pair_kv : MemberMgr::m_joint_summary.m_joint_topology.m_added_nodes) _prepare_statistic(_pair_kv.second); if (!MemberMgr::m_joint_summary.m_joint_topology.m_leader_gone_away) _leader_joint_consensus_flag |= (uint32_t)JointConsensusMask::IN_NEW_CLUSTER; _new_cluster_node_num = MemberMgr::m_joint_summary.m_joint_topology.m_new_cluster.size(); *((MemberMgr::JointSummary*)this->m_shp_req_ctx->m_p_joint_snapshot) = MemberMgr::m_joint_summary; } while (false); this->m_shp_req_ctx->m_new_cluster_size = _new_cluster_node_num; this->m_shp_req_ctx->m_new_cluster_majority = (_new_cluster_node_num > 0) ? (_new_cluster_node_num / 2 + 1) : 0; if ((std::size_t)_entrusted_client_num < this->m_shp_req_ctx->m_new_cluster_majority) { LOG(ERROR) << "can't get majority client entrusted for the joint cluster,log:" << this->m_shp_req_ctx->m_cur_log_id; return false; } //Count the leader to the majority. _phaseI_state.IncreaseSuccess(_leader_joint_consensus_flag); return true; } bool Write::PrepareReplicationContext(uint32_t cur_term, uint32_t pre_term) noexcept { std::shared_ptr<::raft::AppendEntriesRequest> _shp_req(new ::raft::AppendEntriesRequest()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); _shp_req->mutable_base()->set_term(cur_term); auto _p_entry = _shp_req->add_replicate_entity(); auto _p_entity_id = _p_entry->mutable_entity_id(); _p_entity_id->set_term(cur_term); _p_entity_id->set_idx(this->m_guid_pair.m_cur_guid); auto _p_pre_entity_id = _p_entry->mutable_pre_log_id(); _p_pre_entity_id->set_term(pre_term); _p_pre_entity_id->set_idx(this->m_guid_pair.m_pre_guid); auto _p_wop = _p_entry->mutable_write_op(); #ifdef _SVC_WRITE_TEST_ auto us = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now() - this->m_epoch); _shp_req->set_debug_info(std::to_string(us.count())); #endif //Memory copy overhead happened here, no way to avoid,background threads and phaseII need this too. //TODO: memory copy overhead can be optimized out since there is no 'background threads' in the async mode. _p_wop->set_key(this->m_client_request->req().key()); _p_wop->set_value(this->m_client_request->req().value()); this->m_shp_req_ctx.reset(new LogReplicationContext()); this->m_shp_req_ctx->m_cur_log_id.m_term = cur_term; this->m_shp_req_ctx->m_cur_log_id.m_index = this->m_guid_pair.m_cur_guid; auto &_phaseI_state = this->m_shp_req_ctx->m_phaseI_state; auto _req_setter = [&_shp_req](std::shared_ptr<::raft::AppendEntriesRequest>& _target)->void { _target = _shp_req; }; //Require get current replication context prepared before entrusting any of the request. std::list<std::shared_ptr<AppendEntriesAsyncClient>> _entrust_list; if (!this->PrepareReplicationStatistic(_entrust_list)) { LOG(ERROR) << "can't get majority client entrusted,log:" << this->m_shp_req_ctx->m_cur_log_id; return false; } for (auto &_shp_client : _entrust_list) { auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncAppendEntries, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); _shp_client->EntrustRequest(_req_setter, _f_prepare, ::RaftCore::Config::FLAGS_leader_append_entries_rpc_timeo_ms); } return true; } bool Write::BeforeReplicate() noexcept { this->m_tp_start = ::RaftCore::Tools::StartTimeing(); this->m_rsp = this->m_response.mutable_client_comm_rsp(); if (!this->LeaderCheckVailidity(this->m_rsp)) return false; /* Area X */ //----------------Step 0: get the unique log entry term and index by guid----------------// this->m_guid_pair = GuidGenerator::GenerateGuid(); auto _cur_term = ElectionMgr::m_cur_term.load(); VLOG(89) << "Generating GUID done,idx:" << this->m_guid_pair.m_cur_guid; #ifdef _SVC_WRITE_TEST_ auto _p_test_wop = this->m_request.mutable_req(); std::string _idx = std::to_string(this->m_guid_pair.m_cur_guid); const uint32_t &_val = this->m_request.timestamp(); this->m_rsp->set_err_msg(std::to_string(_val)); _p_test_wop->set_key("test_client_key_" + _idx); #endif this->m_client_request = &this->m_request; //----------------Step 1: Add the current request to the pending list----------------// this->m_shp_entity.reset(new MemoryLogItemLeader(_cur_term,this->m_guid_pair.m_cur_guid)); this->m_shp_entity->GetEntity()->set_allocated_write_op(const_cast<::raft::WriteRequest*>(&this->m_client_request->req())); auto _p_entity_id = this->m_shp_entity->GetEntity()->mutable_entity_id(); _p_entity_id->set_term(_cur_term); _p_entity_id->set_idx(this->m_guid_pair.m_cur_guid); /*If this leader has just been elected out, it's term will be different from the latest log entry in the binlog file, needing to make sure pre_term is correct.*/ uint32_t _pre_term = _cur_term; //VLOG(89) << "my pre guid:" << this->m_guid_pair.m_pre_guid << ",debut:" << ElectionMgr::m_leader_debut << ",debut LRL:" << ElectionMgr::m_pre_term_lrl; this->m_first_of_cur_term = ElectionMgr::m_leader_debut && (ElectionMgr::m_pre_term_lrl.m_index == this->m_guid_pair.m_pre_guid); //Current guid is the first released guid under the leader's new term. if (this->m_first_of_cur_term) _pre_term = ElectionMgr::m_pre_term_lrl.m_term; this->m_p_pre_entity_id = this->m_shp_entity->GetEntity()->mutable_pre_log_id(); this->m_p_pre_entity_id->set_term(_pre_term); this->m_p_pre_entity_id->set_idx(this->m_guid_pair.m_pre_guid); LeaderView::m_entity_pending_list.Insert(this->m_shp_entity); //Test.. //this->m_shp_entity->GetEntity()->release_write_op(); //this->FinishRequest(WriteProcessStage::FRONT_FINISH); //return true; //Note: all get good result(~5w/s tp, ~2ms lt.) before here. //----------------Step 2: replicated to the majority of cluster----------------// if (!this->PrepareReplicationContext(_cur_term, _pre_term)) { LeaderView::m_entity_pending_list.Delete(this->m_shp_entity); this->m_rsp->set_result(ErrorCode::FAIL); this->m_rsp->set_err_msg("PrepareReplicationContext fail."); return false; } //Note: get a bad result(~2w/s tp, ~15ms lt.) if reach here. ::RaftCore::Tools::EndTiming(this->m_tp_start, "finished entrust phaseI clients:", &this->m_shp_req_ctx->m_cur_log_id); //Test //this->FinishRequest(WriteProcessStage::FRONT_FINISH); #ifdef _SVC_WRITE_TEST_ auto _now_us = (std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now() - this->m_epoch)).count(); uint64_t _lantency_us = (uint64_t)(_now_us - _val); VLOG(2) << "server side single req latency(us):" << _lantency_us << ",idx:" << this->m_shp_req_ctx->m_cur_log_id; #endif return true; } void Write::CommitDoneCallBack(const ::grpc::Status &status, const ::raft::CommitEntryResponse& rsp, FollowerEntity* ptr_follower) noexcept { VLOG(89) << "CommitDoneCallBack called,log:" << this->m_shp_req_ctx->m_cur_log_id << ",addr:" << ptr_follower->my_addr; auto _joint_consensus_state = ptr_follower->m_joint_consensus_flag; auto &_phaseII_state = this->m_shp_req_ctx->m_phaseII_state; if (!status.ok()) { LOG(ERROR) << "CommitEntries:RPC fail,error code:" << status.error_code() << ",error msg:" << status.error_message() << ",logID:" << this->m_shp_req_ctx->m_cur_log_id << ",remote peer:" << ptr_follower->my_addr; if (status.error_code() == ::grpc::StatusCode::DEADLINE_EXCEEDED) _phaseII_state.IncreaseImplicitFail(_joint_consensus_state); else _phaseII_state.IncreaseExplicitFail(_joint_consensus_state); return; } const ::raft::CommonResponse& comm_rsp = rsp.comm_rsp(); auto _error_code = comm_rsp.result(); if (_error_code!=ErrorCode::SUCCESS && _error_code!=ErrorCode::ALREADY_COMMITTED) { LOG(ERROR) << "CommitEntries:RPC return fail,error code:" << comm_rsp.result() << ",error msg:" << comm_rsp.err_msg() << ",logID" << this->m_shp_req_ctx->m_cur_log_id; _phaseII_state.IncreaseExplicitFail(_joint_consensus_state); return; } _phaseII_state.IncreaseSuccess(_joint_consensus_state); } const std::shared_ptr<LogReplicationContext>& Write::GetReqCtx() noexcept { return this->m_shp_req_ctx; } void Write::ProcessReplicateFailure(const ::raft::CommonResponse& comm_rsp, TwoPhaseCommitContext::PhaseState &phaseI_state, FollowerEntity* ptr_follower, uint32_t joint_consensus_state) noexcept { LOG(ERROR) << "AppendEntries:RPC return fail,detail:" << comm_rsp.DebugString() << ",logID" << this->m_shp_req_ctx->m_cur_log_id << ",remote peer:" << ptr_follower->my_addr; auto _error_code = comm_rsp.result(); if (_error_code == ErrorCode::FAIL) { phaseI_state.IncreaseExplicitFail(joint_consensus_state); return; } if (_error_code == ErrorCode::IMPLICIT_FAIL) { phaseI_state.IncreaseImplicitFail(joint_consensus_state); return; } if (_error_code != ErrorCode::APPEND_ENTRY_CONFLICT && _error_code != ErrorCode::WAITING_TIMEOUT && _error_code != ErrorCode::OVERSTEP_LCL ) { LOG(ERROR) << "unexpected returned value: " << _error_code << ",logID:" << this->m_shp_req_ctx->m_cur_log_id; phaseI_state.IncreaseExplicitFail(joint_consensus_state); return; } if (_error_code == ErrorCode::APPEND_ENTRY_CONFLICT || _error_code == ErrorCode::OVERSTEP_LCL) phaseI_state.IncreaseExplicitFail(joint_consensus_state); else phaseI_state.IncreaseImplicitFail(joint_consensus_state); LogIdentifier _sync_point = (_error_code == ErrorCode::APPEND_ENTRY_CONFLICT) ? \ this->m_shp_req_ctx->m_cur_log_id : BinLogGlobal::m_instance.GetLastReplicated(); this->AddResyncLogTask(ptr_follower, _sync_point); return; } void Write::AddResyncLogTask(FollowerEntity* ptr_follower, const LogIdentifier &sync_point) noexcept { /*Follower status has already been set 2o resync, some other threads must have started resyncing-log no need to do more.*/ if (ptr_follower->m_status == FollowerStatus::RESYNC_LOG) { LOG(INFO) << "a RESYNC_LOG task already in progress for follower:" << ptr_follower->my_addr << ", no need to generate a new one, just return"; return; } // Set follower status ptr_follower->m_status = FollowerStatus::RESYNC_LOG; // Generate a task std::shared_ptr<ReSyncLogContext> _shp_task(new ReSyncLogContext()); _shp_task->m_last_sync_point = sync_point; //Find the follower's shared_ptr and copy the ownership. { ReadLock _r_lock(LeaderView::m_hash_followers_mutex); auto _cmp = [&](const std::pair<std::string, TypePtrFollowerEntity> &_pair) { return _pair.first == ptr_follower->my_addr; }; auto _iter = std::find_if(LeaderView::m_hash_followers.cbegin(), LeaderView::m_hash_followers.cend(),_cmp); if (_iter != LeaderView::m_hash_followers.cend()) _shp_task->m_follower = _iter->second; } if (!_shp_task->m_follower) { LOG(ERROR) << "Can't find the corresponding follower in leader's view " << ptr_follower->my_addr << ",remote peer:" << ptr_follower->my_addr; return; } auto _ret_code = LeaderView::m_priority_queue.Push(LockFreePriotityQueue::TaskType::RESYNC_LOG, &_shp_task); if (_ret_code != QUEUE_SUCC) { LOG(ERROR) << "Add RESYNC-LOG task fail,ret:" << _ret_code << ",logID:" << _shp_task->m_last_sync_point << ",remote peer:" << ptr_follower->my_addr; return; } LOG(ERROR) << "Add RESYNC-LOG succeed,sync point" << _shp_task->m_last_sync_point << ",remote peer:" << ptr_follower->my_addr; } void Write::EntrustCommitRequest(FollowerEntity* ptr_follower, AppendEntriesAsyncClient* ptr_client) noexcept { //Must update statistic data before really do entrust. auto &_phaseII_state = this->m_shp_req_ctx->m_phaseII_state; _phaseII_state.IncreaseEntrust(ptr_follower->m_joint_consensus_flag); void* _p_pool = nullptr; auto _shp_client = ptr_follower->FetchCommitClient(_p_pool); VLOG(90) << "CommitEntriesAsyncClient fetched:" << ptr_follower->my_addr << ",log:" << this->m_shp_req_ctx->m_cur_log_id; CHECK(_shp_client) << "no available Commit clients, may need a bigger pool."; /*The commit-client-delegated ownership will be existing at the mean time, we can just copy it from the delegator. */ auto _shp_write = ptr_client->OwnershipDelegator<Write>::GetOwnership(); _shp_client->OwnershipDelegator<Write>::CopyOwnership(_shp_write); _shp_client->PushCallBackArgs(_p_pool); auto _req_setter = [&](std::shared_ptr<::raft::CommitEntryRequest>& _target)->void { _target = this->m_shp_commit_req; }; auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncCommitEntries, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); _shp_client->EntrustRequest(_req_setter, _f_prepare, ::RaftCore::Config::FLAGS_leader_commit_entries_rpc_timeo_ms); }; bool Write::UpdatePhaseIStatistic(const ::grpc::Status &status, const ::raft::AppendEntriesResponse& rsp, FollowerEntity* ptr_follower) noexcept { auto _joint_consensus_state = ptr_follower->m_joint_consensus_flag; auto &_phaseI_state = this->m_shp_req_ctx->m_phaseI_state; if (!status.ok()) { LOG(ERROR) << "AppendEntries:RPC fail,error code:" << status.error_code() << ",error msg:" << status.error_message() << ",logID:" << this->m_shp_req_ctx->m_cur_log_id << ",remote peer:" << ptr_follower->my_addr; if (status.error_code() == ::grpc::StatusCode::DEADLINE_EXCEEDED) { _phaseI_state.IncreaseImplicitFail(_joint_consensus_state); LogIdentifier _sync_point = BinLogGlobal::m_instance.GetLastReplicated(); this->AddResyncLogTask(ptr_follower, _sync_point); return false; } _phaseI_state.IncreaseExplicitFail(_joint_consensus_state); return false; } const ::raft::CommonResponse& comm_rsp = rsp.comm_rsp(); auto _error_code = comm_rsp.result(); if (_error_code!=ErrorCode::SUCCESS && _error_code!=ErrorCode::SUCCESS_MERGED) { this->ProcessReplicateFailure(comm_rsp, _phaseI_state, ptr_follower, _joint_consensus_state); return false; } //Here succeed. _phaseI_state.IncreaseSuccess(_joint_consensus_state); if (_error_code == ErrorCode::SUCCESS_MERGED) { LOG(INFO) << "This log has been unsuccessfully merged, no need to entrust a commit client, " << "logID:" << this->m_shp_req_ctx->m_cur_log_id << ",remote peer:" << ptr_follower->my_addr; return false; } return true; } void Write::ReplicateDoneCallBack(const ::grpc::Status &status, const ::raft::AppendEntriesResponse& rsp, FollowerEntity* ptr_follower, AppendEntriesAsyncClient* ptr_client) noexcept { //Test. //return; ::RaftCore::Tools::EndTiming(this->m_tp_start, "replication callback comes:", &this->m_shp_req_ctx->m_cur_log_id); bool _phaseI_result = this->UpdatePhaseIStatistic(status,rsp,ptr_follower); if (!_phaseI_result) LOG(INFO) << "replication callback won't entrust a commit client due to above described" << " reasons, logID:" << this->m_shp_req_ctx->m_cur_log_id << ",remote peer:" << ptr_follower->my_addr; const auto &_cur_log_id = this->m_shp_req_ctx->m_cur_log_id; uint32_t _diff = _cur_log_id.GreaterThan(ptr_follower->m_last_sent_committed.load()); bool _group_commit_reached = _diff >= ::RaftCore::Config::FLAGS_group_commit_count; bool _need_entrust = _phaseI_result && _group_commit_reached; auto _push_list_if_necessary = [&]() ->void { if (!_need_entrust) return; if (!ptr_follower->UpdateLastSentCommitted(_cur_log_id)) return; auto *_p_cur_client_head = this->m_phaseII_ready_list.load(); auto * _p_new_node = new UnorderedSingleListNode<FollowerEntity>(ptr_follower); _p_new_node->m_next = _p_cur_client_head; while (!this->m_phaseII_ready_list.compare_exchange_strong(_p_cur_client_head, _p_new_node)) _p_new_node->m_next = _p_cur_client_head; }; //Judge if replication result has been determined. FinishStatus _determined_value = this->m_shp_req_ctx->JudgePhaseIDetermined(); if (_determined_value == FinishStatus::UNFINISHED) { //If not determined, just push the current client(if any) to the entrust list. _push_list_if_necessary(); return; } _need_entrust &= (_determined_value == FinishStatus::POSITIVE_FINISHED); bool _determined = false; if (!this->m_phaseI_determined_point.compare_exchange_strong(_determined, true)) { /*Only determined with a success result, can we do further processing(aka,pushing the client to the entrust list) */ if (_need_entrust) if (ptr_follower->UpdateLastSentCommitted(_cur_log_id)) this->EntrustCommitRequest(ptr_follower, ptr_client); return; } //Only one thread could reach here for a certain log entry. _push_list_if_necessary(); //Push current request to list. this->AfterDetermined(ptr_client); } FinishStatus Write::JudgeReplicationResult() noexcept{ const auto &_entity_id = this->m_shp_entity->GetEntity()->entity_id(); //If majority succeed. FinishStatus _ret_val = this->m_shp_req_ctx->JudgePhaseIDetermined(); if (_ret_val == FinishStatus::POSITIVE_FINISHED) { /*No matter what's the reason, just return FAIL to the client ,and don't distinguish the IMPLICIT_FAIL case from all the other failure cases. */ if (LeaderView::m_status == LeaderView::ServerStatus::HALTED) { //Waiting in a conservative manner. LeaderView::m_last_log_waiting_num.fetch_add(1); auto _last_released_guid = this->WaitForLastGuidReleasing(); if (_entity_id.idx() == _last_released_guid) this->LastlogResolve(true, _last_released_guid); } return _ret_val; } this->m_rsp->set_result(ErrorCode::FAIL); this->m_rsp->set_err_msg("cannot replicate to the majority"); //Push the [implicit] failed request to the bg queue. auto _shp_ctx = std::shared_ptr<CutEmptyContext>(new CutEmptyContext()); _shp_ctx->m_write_request = this->GetOwnership(); LeaderView::m_cut_empty_list.Insert(_shp_ctx); VLOG(89) << "Write Request failed, pushed it to bg list:" << this->m_shp_req_ctx->m_cur_log_id.m_index; /*Note:The failure cases, regardless explicit or implicit, are indicating the advent of errors, making it reasonable for the server to stop and take a look at what happened and choose the best way to deal with the causes, only after that the server can continue serving the clients. Besides ,the 'UpdateServerStatus' function returning false is just okay, because other threads with a larger guid_pair may have already set server status to LeaderView::ServerStatus::HALTED. */ this->UpdateServerStatus(this->m_guid_pair.m_cur_guid, LeaderView::ServerStatus::HALTED); /*There is a time windows during which one thread can still generating guids even the server status already been set to HALT(the corresponding code is marked as 'Area X' in the above code). The following code aim at waiting it to elapse.*/ auto _last_released_guid = this->WaitForLastGuidReleasing(); LOG(ERROR) << "AppendEntries:cannot replicate to the majority of cluster,write fail ,idx:" << this->m_shp_req_ctx->m_cur_log_id.m_index << ",context details:" << this->m_shp_req_ctx->Dump(); CHECK(this->m_guid_pair.m_cur_guid <= _last_released_guid) << "guid issue :" << this->m_guid_pair.m_cur_guid << "|" << _last_released_guid; //Increasing the waiting num. LeaderView::m_last_log_waiting_num.fetch_add(1); //Current log id is the LRG server halting on. if (this->m_guid_pair.m_cur_guid == _last_released_guid) { //latest log update overall info. And there are no potential failures for the latest issued log id. this->LastlogResolve(false, _last_released_guid); } return _ret_val; } void Write::ReleasePhaseIIReadyList()noexcept { auto *_p_cur_client = this->m_phaseII_ready_list.load(); while (_p_cur_client != nullptr) { auto *_p_tmp = _p_cur_client; _p_cur_client = _p_cur_client->m_next; //Shouldn't delete the _p_tmp->m_data, it's the ptr_follower, just detach it. _p_tmp->m_data = nullptr; /*Note: just delete the outer side wrapper(aka the 'UnorderedSingleListNode'), rather than the inner data, which will be released by itself in the future.*/ delete _p_tmp; } this->m_phaseII_ready_list.store(nullptr); } bool Write::AppendBinlog(AppendEntriesAsyncClient* ptr_client) noexcept{ //Only one thread could reach here for a certain log entry. const auto &_log_id = this->m_shp_req_ctx->m_cur_log_id; auto _cmp = [&](const MemoryLogItemLeader &one) ->bool{ return !::RaftCore::Common::EntityIDLarger(one.GetEntity()->entity_id(), _log_id); }; DoubleListNode<MemoryLogItemLeader> *_p_head = LeaderView::m_entity_pending_list.CutHead(_cmp); if (_p_head == nullptr) { VLOG(89) << "CutHead empty occur, transfer to bg list:" << _log_id; //Push unfinished requests to background singlist_ordered_queue. auto _shp_ctx = std::shared_ptr<CutEmptyContext>(new CutEmptyContext()); _shp_ctx->m_write_request = ptr_client->OwnershipDelegator<Write>::GetOwnership(); LeaderView::m_cut_empty_list.Insert(_shp_ctx); return false; } std::list<std::shared_ptr<Entity>> _input_list; auto _push = [&](decltype(_p_head) p_cur)->void { _input_list.emplace_back(p_cur->m_val->GetEntity()); }; DoubleListNode<MemoryLogItemLeader>::Apply(_p_head, _push); //Note: Multiple thread appending could happen CHECK(BinLogGlobal::m_instance.AppendEntry(_input_list)) << "AppendEntry to binlog fail,never should this happen,something terribly wrong."; ::RaftCore::Tools::EndTiming(this->m_tp_start, "Append binlog done.", &this->m_shp_req_ctx->m_cur_log_id); //No shared resource between the waiting threads and the notifying thread(s), no mutex is needed here. LeaderView::m_cv.notify_all(); LeaderView::m_garbage.PushFront(_p_head); return true; } void Write::AfterDetermined(AppendEntriesAsyncClient* ptr_client) noexcept { ::RaftCore::Tools::EndTiming(this->m_tp_start, "log entry is determined now.", &this->m_shp_req_ctx->m_cur_log_id); //Only one thread could reach here for a certain log entry. FinishStatus _ret_val = this->JudgeReplicationResult(); CHECK(_ret_val != FinishStatus::UNFINISHED) << "got an undetermined result in AfterDetermined."; if (_ret_val == FinishStatus::NEGATIVE_FINISHED) { this->ReleasePhaseIIReadyList(); return; } //Now, phaseI succeed, entrust all the pending phaseII request. auto *_p_cur_client = this->m_phaseII_ready_list.load(); while (_p_cur_client != nullptr) { auto *_p_tmp = _p_cur_client; _p_cur_client = _p_cur_client->m_next; this->EntrustCommitRequest(_p_tmp->m_data, ptr_client); /*Note: just delete the outer side wrapper(aka the 'UnorderedSingleListNode'), rather than the inner data, which will be released by itself in the future.*/ _p_tmp->m_data = nullptr; delete _p_tmp; } ::RaftCore::Tools::EndTiming(this->m_tp_start, "entrust all phaseII [necessary] clients done.", &this->m_shp_req_ctx->m_cur_log_id); //----------------Step 3: append to local binlog----------------// if (!this->AppendBinlog(ptr_client)) return; this->AfterAppendBinlog(); } void Write::AfterAppendBinlog() noexcept { this->m_shp_entity->GetEntity()->release_write_op(); if (this->m_rsp->result() != ErrorCode::SUCCESS) { this->FinishRequest(WriteProcessStage::ABOURTED); return; } if (this->m_first_of_cur_term) ElectionMgr::m_leader_debut = false; //----------------Step 4: update local storage----------------// //Note: Out of order setting could happen , but is acceptable for blind writing operations. const auto &_log_id = this->m_shp_req_ctx->m_cur_log_id; WriteProcessStage _final_status = WriteProcessStage::FRONT_FINISH; if (!StorageGlobal::m_instance.Set(_log_id, this->m_client_request->req().key(), this->m_client_request->req().value())) { LOG(ERROR) << "Write to storage fail,logID:" << _log_id; this->m_rsp->set_result(ErrorCode::FAIL); this->m_rsp->set_err_msg("Log replicated succeed , but cannot write to storage."); WriteProcessStage _final_status = WriteProcessStage::ABOURTED; } else ::RaftCore::Tools::EndTiming(this->m_tp_start, "Update storage done.", &this->m_shp_req_ctx->m_cur_log_id); this->FinishRequest(_final_status); } void Write::CutEmptyRoutine() noexcept { LOG(INFO) << "leader CutEmpty msg processor thread started."; while (true) { if (!CommonView::m_running_flag) return; auto _wait_cond = [&]()->bool { return !LeaderView::m_cut_empty_list.Empty(); }; auto _wait_timeo_us = std::chrono::microseconds(::RaftCore::Config::FLAGS_iterating_wait_timeo_us); std::unique_lock<std::mutex> _unique_wrapper(LeaderView::m_cv_mutex); bool _waiting_result = LeaderView::m_cv.wait_for(_unique_wrapper, _wait_timeo_us, _wait_cond); //There is no shared state among different threads, so it's better to release this lock ASAP. _unique_wrapper.unlock(); if (!_waiting_result) continue; auto _now = std::chrono::system_clock::now(); std::shared_ptr<CutEmptyContext> _shp_last_return; bool _recheck = false; auto _lambda = [&](std::shared_ptr<CutEmptyContext> &one) { auto &_p_req = one->m_write_request; auto _upper = BinLogGlobal::m_instance.GetLastReplicated(); if (_p_req->ProcessCutEmptyRequest(_now, _upper, one, _recheck)) { _shp_last_return = one; return true; } return false;//No need to go further, stop iterating over the list. }; LeaderView::m_cut_empty_list.Iterate(_lambda); if (!_shp_last_return) continue; auto* _p_head = LeaderView::m_cut_empty_list.CutHeadByValue(*_shp_last_return); if (_p_head == nullptr) continue; /* Double check here for 2 reasons: 1. For the way of TrivialLockSingleList' work, to get rid of missing elements haven't finish inserting during the first iterating of _lambda. 2. failed write requests need a recheck, and reset its result to SUCCESS if necessary. */ LeaderView::m_cut_empty_list.IterateCutHead(_lambda, _p_head); LeaderView::m_cut_empty_garbage.PushFront(_p_head); } } bool Write::ProcessCutEmptyRequest(const TypeSysTimePoint &tp, const LogIdentifier &current_lrl, std::shared_ptr<CutEmptyContext> &one, bool recheck) noexcept { if (one->m_processed_flag.load()) return true; const auto &_cur_log_id = this->m_shp_req_ctx->m_cur_log_id; if (recheck) CHECK(current_lrl >= _cur_log_id); //::RaftCore::Tools::EndTiming(this->m_tp_start, "entry process disorder.", &_cur_log_id); auto _diff = std::chrono::duration_cast<std::chrono::milliseconds>(tp - one->m_generation_tp); if (!one->m_log_flag && _diff.count() >= ::RaftCore::Config::FLAGS_cut_empty_timeos_ms) { LOG(ERROR) << "waiting for CutHead append to binlog timeout,cur_log_id:" << _cur_log_id << ",lrl:" << current_lrl << ", wait ms:" << ::RaftCore::Config::FLAGS_cut_empty_timeos_ms; one->m_log_flag = true; } //Current request's log hasn't been appended to the binlog file. if (current_lrl < _cur_log_id) return false; //Once the current disorder message has already been processed by other iterating threads. bool _processed = false; if (!one->m_processed_flag.compare_exchange_strong(_processed, true)) { VLOG(89) << "cutEmpty req processing permission has been taken:" << _cur_log_id; return true; } //All requests go through here are successfully processed at end. this->m_rsp->set_result(ErrorCode::SUCCESS); this->AfterAppendBinlog(); //::RaftCore::Tools::EndTiming(this->m_tp_start, "process CutEmpty done ,has responded to client.", &_cur_log_id); return true; } uint32_t Write::GetConservativeTimeoutValue(uint64_t idx,bool last_guid) const noexcept { auto _snapshot = BinLogGlobal::m_instance.GetLastReplicated(); int _minimum_factor = 2; if (!last_guid) _minimum_factor += 1; int _rpc_timeout = ::RaftCore::Config::FLAGS_leader_append_entries_rpc_timeo_ms; return uint32_t(((idx - _snapshot.m_index) / 2 + _minimum_factor) * _rpc_timeout); } bool Write::UpdateServerStatus(uint64_t guid,LeaderView::ServerStatus status) noexcept { //Only bigger guids are allowed to modify the server status.Since what we need is the most updated status. { ReadLock _r_lock(this->m_mutex); if (guid < this->m_last_trigger_guid) return false; } WriteLock _w_lock(this->m_mutex); this->m_last_trigger_guid = guid; auto _old_status = LeaderView::m_status; if (_old_status == status) return true; LeaderView::m_status = status; if (status == LeaderView::ServerStatus::HALTED) this->m_wait_time_point = std::chrono::steady_clock::now() + std::chrono::microseconds(::RaftCore::Config::FLAGS_cgg_wait_for_last_released_guid_finish_us); LOG(INFO) << "update server status from " << this->MacroToString(_old_status) << " to " << this->MacroToString(status) << " with the guid of " << guid; return true; } void Write::LastlogResolve(bool result, uint64_t last_released_guid) noexcept { /*This is a relative accurate method for judging if all the logs before the LRG has been determined, false negative might occur but it's acceptable.*/ LogIdentifier _cur_lrl = BinLogGlobal::m_instance.GetLastReplicated(); uint64_t _gap = last_released_guid - _cur_lrl.m_index; do { VLOG(89) << "Waiting all logs before LRG resolved, last_released_guid:" << last_released_guid << ", cur_lrl:" << _cur_lrl.m_index << ",gap:" << _gap << ", waiting_num:" << LeaderView::m_last_log_waiting_num.load() << ", should be quickly resolved."; /*Note: _gap could < m_last_log_waiting_num at last whereas they should be equal, since _cur_lrl is a relative accurate calculated value.*/ } while (LeaderView::m_last_log_waiting_num.load() < _gap); //To get around the issues(CHECK failed) caused by above deviation, wait an additional time. uint32_t _wait_ms = ::RaftCore::Config::FLAGS_leader_last_log_resolve_additional_wait_ms; std::this_thread::sleep_for(std::chrono::milliseconds(_wait_ms)); if (!result) { //Get the latest LRL as the new base guid. _cur_lrl = BinLogGlobal::m_instance.GetLastReplicated(); GuidGenerator::SetNextBasePoint(_cur_lrl.m_index); LOG(INFO) << "base point of guid has been set to:" << _cur_lrl.m_index; } /*Once reach here, the false negative requests should have been reset to SUCCESS, and all elements in the 'm_cut_empty_list' now are the ones that are true negative fail, what we need to do is just send them home, aka return to the client.*/ auto *_p_remains = LeaderView::m_cut_empty_list.SetEmpty(); auto _lambda = [&](std::shared_ptr<CutEmptyContext> &one) { auto &_p_req = one->m_write_request; #ifdef _SVC_WRITE_TEST_ const auto &_entity_id = _p_req->GetInnerLog()->GetEntity()->entity_id(); VLOG(89) << "start processing the bg list after last log resolved:" << _entity_id.idx(); #endif _p_req->AfterAppendBinlog(); return true; }; if (_p_remains != nullptr) { LeaderView::m_cut_empty_list.IterateCutHead(_lambda, _p_remains); LeaderView::m_cut_empty_garbage.PushFront(_p_remains); } //It's the last log thread's duty to clear all the remaining item in the pending list. LeaderView::m_entity_pending_list.Clear(); LOG(INFO) << "last log resolved with the last release guid:" << last_released_guid; LeaderView::m_last_log_waiting_num.store(0); CHECK(this->UpdateServerStatus(last_released_guid, LeaderView::ServerStatus::NORMAL)) << "latest log:" << last_released_guid <<" update server status to NORMAL fail."; } uint64_t Write::WaitForLastGuidReleasing() const noexcept { //Business logic guaranteeing that there are no race conditions for 'm_wait_time_point'. std::this_thread::sleep_until(this->m_wait_time_point); return GuidGenerator::GetLastReleasedGuid(); } Read::Read(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestRead(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status Read::Process() noexcept { auto _p_rsp = this->m_response.mutable_client_comm_rsp(); if (!this->LeaderCheckVailidity(_p_rsp)) return ::grpc::Status::OK; auto *_p_val = this->m_response.mutable_value(); if (!StorageGlobal::m_instance.Get(this->m_request.key(), *_p_val)) LOG(INFO) << "val doesn't exist for key :" << this->m_request.key(); return ::grpc::Status::OK; } MembershipChange::MembershipChange(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestMembershipChange(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status MembershipChange::Process() noexcept { //TODO: There should have some authentications here. auto *_p_rsp = this->m_response.mutable_client_comm_rsp(); if (!this->LeaderCheckVailidity(_p_rsp)) return ::grpc::Status::OK; std::set<std::string> _new_cluster; for (int i = 0; i < this->m_request.node_list_size(); ++i) _new_cluster.emplace(this->m_request.node_list(i)); const char* _p_err_msg = MemberMgr::PullTrigger(_new_cluster); if (_p_err_msg) { LOG(ERROR) << "[Membership Change] pull the trigger fail:" << _p_err_msg; _p_rsp->set_result(ErrorCode::FAIL); _p_rsp->set_err_msg("pull trigger fail,check the log for details."); return ::grpc::Status::OK; } _p_rsp->set_result(ErrorCode::SUCCESS); return ::grpc::Status::OK; } AppendEntries::AppendEntries(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->ResetOwnership(this); this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestAppendEntries(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } AppendEntries::~AppendEntries()noexcept {} std::string AppendEntries::ComposeInputLogs() noexcept { //Check validity of the input logs MemoryLogItemFollower *_p_previous = nullptr; const auto &_replicate_entity = this->m_request.replicate_entity(); for (auto iter = _replicate_entity.cbegin(); iter != _replicate_entity.cend(); ++iter) { //Note: This is where memory copy overhead occurs! MemoryLogItemFollower *_p_log_item = new MemoryLogItemFollower(*iter); this->m_log_list.emplace_back(_p_log_item); //Ensure the log entries are continuous if (!_p_previous) { _p_previous = _p_log_item; continue; } if (!_p_log_item->AfterOf(*_p_previous)) { char sz_err[1024] = { 0 }; std::snprintf(sz_err,sizeof(sz_err),"inputing logs are not continuous,pre:%d|%llu,cur:%d|%llu", _p_previous->GetEntity()->pre_log_id().term(), _p_previous->GetEntity()->pre_log_id().idx(), _p_previous->GetEntity()->entity_id().term(), _p_previous->GetEntity()->entity_id().idx()); LOG(ERROR) << sz_err; return sz_err; } _p_previous = _p_log_item; } return ""; } void AppendEntries::ProcessOverlappedLog() noexcept { static const char* _p_err_msg = "revert log fail."; static const char* _p_step_over = "overstep lcl"; const char* _p_ret_msg = ""; /*Note : This can be invoked simultaneously , for correctness and simplicity , only one thread could successfully reverted the binlog, others will fail, in which case we will return an explicit fail to the client . */ ErrorCode _error_code = ErrorCode::SUCCESS; const auto &_lcl = StorageGlobal::m_instance.GetLastCommitted(); auto _revert_code = BinLogGlobal::m_instance.RevertLog(this->m_log_list, _lcl); if (_revert_code > BinLogOperator::BinlogErrorCode::SUCCEED_MAX) { auto _lrl = BinLogGlobal::m_instance.GetLastReplicated(); LOG(ERROR) << "log conflict detected,but reverting log fail,current ID-LRL:" << _lrl << ",_pre_entity_id:" << this->m_pre_entity_id->DebugString() << ",retCode:" << int(_revert_code); _error_code = ErrorCode::FAIL; _p_ret_msg = _p_err_msg; if (_revert_code == BinLogOperator::BinlogErrorCode::NO_CONSISTENT_ERROR) _error_code = ErrorCode::APPEND_ENTRY_CONFLICT; else if (_revert_code == BinLogOperator::BinlogErrorCode::OVER_BOUNDARY) { _error_code = ErrorCode::OVERSTEP_LCL; _p_ret_msg = _p_step_over; } } else if (_revert_code == BinLogOperator::BinlogErrorCode::SUCCEED_TRUNCATED) { /*Note: this elif section may executing simultaneously, but the follower would quickly get resolved by a new RESYNC_LOG command issued by the lead. */ static std::mutex _m; std::unique_lock<std::mutex> _mutex_lock(_m); //In case of successfully reverting log, all the pending lists are also become invalid,need to be cleared. FollowerView::m_phaseI_pending_list.DeleteAll(); //Cannot use 'Clear' avoiding conflict with 'Insert' operations. FollowerView::m_phaseII_pending_list.Clear(); /*There maybe remaining items in this->m_log_list those already been appended to the binlog after reverting,for phaseII correctly committing ,they need to be inserted to phaseII_pending_list.*/ std::for_each(this->m_log_list.cbegin(), this->m_log_list.cend(), [&](const auto &_one) { FollowerView::m_phaseII_pending_list.Insert(_one); }); } else if (_revert_code == BinLogOperator::BinlogErrorCode::SUCCEED_MERGED) _error_code = ErrorCode::SUCCESS_MERGED; this->m_rsp->set_result(_error_code); this->m_rsp->set_err_msg(_p_ret_msg); } bool AppendEntries::BeforeJudgeOrder() noexcept { this->m_tp_start = ::RaftCore::Tools::StartTimeing(); this->m_rsp = this->m_response.mutable_comm_rsp(); this->m_rsp->set_result(ErrorCode::SUCCESS); /* //Testing... const auto &_entity = this->m_request.replicate_entity(this->m_request.replicate_entity_size() - 1); uint32_t _idx = _entity.entity_id().idx(); VLOG(89) << " msg received & idx:" << _idx; return true; */ auto _err_msg = this->FollowerCheckValidity(this->m_request.base(), &this->m_tp_start, &this->m_last_log); if (!_err_msg.empty()) { LOG(ERROR) << "check request validity fail :" << _err_msg; this->m_rsp->set_result(ErrorCode::FAIL); this->m_rsp->set_err_msg(_err_msg); return true; } _err_msg = this->ComposeInputLogs(); if (!_err_msg.empty()) { LOG(ERROR) << "input log invalid,detail"; this->m_rsp->set_result(ErrorCode::FAIL); this->m_rsp->set_err_msg(_err_msg); return true; } //this->m_log_list need to be sorted. auto _cmp = [](const std::shared_ptr<MemoryLogItemFollower>& left, const std::shared_ptr<MemoryLogItemFollower>& right) ->bool { return ::RaftCore::Common::EntityIDSmaller(left->GetEntity()->entity_id(),right->GetEntity()->entity_id()); }; this->m_log_list.sort(_cmp); this->m_pre_entity_id = &(this->m_log_list.front()->GetEntity()->pre_log_id()); this->m_last_entity_id = &(this->m_log_list.back()->GetEntity()->entity_id()); this->m_last_log = ::RaftCore::Common::ConvertID(*this->m_last_entity_id); ::RaftCore::Tools::EndTiming(this->m_tp_start, "start processing to :", &this->m_last_log); //Check if the first log conflict with the written logs auto _lrl = BinLogGlobal::m_instance.GetLastReplicated(); if (::RaftCore::Common::EntityIDSmaller(*this->m_pre_entity_id,_lrl)) { ::RaftCore::Tools::EndTiming(this->m_tp_start, "start process overlap log.", &this->m_last_log); this->ProcessOverlappedLog(); auto _lrl = BinLogGlobal::m_instance.GetLastReplicated(); ::RaftCore::Tools::EndTiming(this->m_tp_start, "overlap log process done, lrl:", &_lrl); return true; } /*Inserting the log entries to the follower's pending list in a reverse order to get rid of the 'partially inserted' problem. */ std::for_each(this->m_log_list.crbegin(), this->m_log_list.crend(), [&](const auto &_one) { FollowerView::m_phaseI_pending_list.Insert(_one); }); /* If the minimum ID of the log entries is greater than the ID-LRL, means the current thread need to wait.. */ _lrl = BinLogGlobal::m_instance.GetLastReplicated(); VLOG(89) << "debug pos1,pre_id:" << ::RaftCore::Common::ConvertID(*this->m_pre_entity_id) << ",snapshot:" << _lrl; if (!::RaftCore::Common::EntityIDEqual(*this->m_pre_entity_id, _lrl)) { this->m_append_entries_stage = AppendEntriesProcessStage::WAITING; //Here need to wait on a CV, push it to background threads. auto _shp_ctx = std::shared_ptr<DisorderMessageContext>(new DisorderMessageContext()); _shp_ctx->m_append_request = this->GetOwnership(); FollowerView::m_disorder_list.Insert(_shp_ctx); ::RaftCore::Tools::EndTiming(this->m_tp_start, "insert a disorder msg.", &this->m_last_log); return false; } this->ProcessAdjacentLog(); ::RaftCore::Tools::EndTiming(this->m_tp_start, "adjacent log process done: ", &this->m_last_log); return true; } const LogIdentifier& AppendEntries::GetLastLogID() const noexcept { return this->m_last_log; } void AppendEntries::DisorderLogRoutine() noexcept { LOG(INFO) << "follower disorder msg processor thread started."; while (true) { if (!CommonView::m_running_flag) return; auto _wait_cond = [&]()->bool { return !FollowerView::m_disorder_list.Empty(); }; auto _wait_timeo_us = std::chrono::microseconds(::RaftCore::Config::FLAGS_iterating_wait_timeo_us); std::unique_lock<std::mutex> _unique_wrapper(FollowerView::m_cv_mutex); bool _waiting_result = FollowerView::m_cv.wait_for(_unique_wrapper, _wait_timeo_us, _wait_cond); //There is no shared state among different threads, so it's better to release this lock ASAP. _unique_wrapper.unlock(); if (!_waiting_result) continue; auto _now = std::chrono::system_clock::now(); std::shared_ptr<DisorderMessageContext> _shp_last_return; auto _lambda = [&](std::shared_ptr<DisorderMessageContext> &one) { auto &_shp_req = one->m_append_request; auto _upper = BinLogGlobal::m_instance.GetLastReplicated(); if (_shp_req->ProcessDisorderLog(_now, _upper, one)) { _shp_last_return = one; return true; } return false;//No need to go further, stop iterating over the list. }; FollowerView::m_disorder_list.Iterate(_lambda); if (!_shp_last_return) continue; auto* _p_head = FollowerView::m_disorder_list.CutHeadByValue(*_shp_last_return); if (_p_head == nullptr) continue; //_shp_last_return may become invalid here. /*For the way of TrivialLockSingleList' work, to get rid of missing elements haven't finish inserting during the first iterating of _lambda */ FollowerView::m_disorder_list.IterateCutHead(_lambda, _p_head); FollowerView::m_disorder_garbage.PushFront(_p_head); } } bool AppendEntries::ProcessDisorderLog(const TypeSysTimePoint &tp, const LogIdentifier &upper_log, std::shared_ptr<DisorderMessageContext> &one) noexcept { /*Judge whether the current disorder message has already been processed by the current or other iterating threads.*/ if (one->m_processed_flag.load()) return true; /*There is no need to consider the overlapped logs, because : 1. elements in the pending list can be over written. 2. overlapped ones can all get a positive result from the follower, you can do nothing to prevent this. */ //Current request's log hasn't been appended to the binlog file. bool _not_reach_me = upper_log < this->m_last_log; bool _adjacent = ::RaftCore::Common::EntityIDEqual(*this->m_pre_entity_id, upper_log); if (_not_reach_me && !_adjacent) return false; bool _processed = false; if (!one->m_processed_flag.compare_exchange_strong(_processed, true)) { VLOG(89) << "disorder req processing permission has been taken:" << this->m_last_log; return true; } if (_adjacent) { VLOG(89) << "process adjacent in routine:" << this->m_last_log; this->ProcessAdjacentLog(); } auto _diff = std::chrono::duration_cast<std::chrono::milliseconds>(tp - one->m_generation_tp); if (_diff.count() >= ::RaftCore::Config::FLAGS_disorder_msg_timeo_ms) { /*Here we don't need to delete elements from FollowerView::m_phaseI_pending_list where encounter failures with it, just leave it here, and they'll get replaced with the ones.*/ LOG(ERROR) << "Waiting for cv timeout,upper log:" << upper_log << ", last log:" << this->m_last_log << ",pre_log_id:" << ::RaftCore::Common::ConvertID(*this->m_pre_entity_id) << ", diff:" << _diff.count() << ", wait ms:" << ::RaftCore::Config::FLAGS_disorder_msg_timeo_ms; //Return a WAITING_TIMEOUT error indicating the leader that this follower need to be set to `RESYNC_LOG` status. this->m_rsp->set_result(ErrorCode::WAITING_TIMEOUT); this->m_rsp->set_err_msg("Waiting for cv timeout."); } this->m_append_entries_stage = AppendEntriesProcessStage::FINISH; this->m_responder.Finish(this->m_response, ::grpc::Status::OK, this); ::RaftCore::Tools::EndTiming(this->m_tp_start, "process disorder done ,has responded to client.", &this->m_last_log); return true; } ::grpc::Status AppendEntries::Process() noexcept { return ::grpc::Status::OK; } void AppendEntries::React(bool cq_result) noexcept { if (!cq_result) { LOG(ERROR) << "AppendEntries got false result from CQ,last log:" << this->m_last_log; this->ReleaseOwnership(); return; } switch (this->m_append_entries_stage) { case AppendEntriesProcessStage::CREATE: /* Spawn a new subclass instance to serve new clients while we process the one for this . The instance will deallocate itself as part of its FINISH state.*/ new AppendEntries(this->m_async_service,this->m_server_notify_cq,this->m_server_call_cq); if (this->BeforeJudgeOrder()) { this->m_append_entries_stage = AppendEntriesProcessStage::FINISH; this->m_responder.Finish(this->m_response, ::grpc::Status::OK, this); } break; case AppendEntriesProcessStage::WAITING: //do nothing. break; case AppendEntriesProcessStage::FINISH: this->ReleaseOwnership(); break; default: CHECK(false) << "Unexpected tag " << int(this->m_append_entries_stage); break; } } void AppendEntries::ProcessAdjacentLog() noexcept { DoubleListNode<MemoryLogItemFollower> *_p_head = FollowerView::m_phaseI_pending_list.CutHead(CmpMemoryLogFollower); CHECK(_p_head) << "cut head empty"; if (_p_head->m_atomic_next.load() == nullptr) { const auto &_pre_log_id = _p_head->m_val->GetEntity()->pre_log_id(); auto _lrl = BinLogGlobal::m_instance.GetLastReplicated(); CHECK(::RaftCore::Common::EntityIDEqual(_pre_log_id,_lrl)) << "cut head got one element but its pre_log_id != ID-LCL :" << _pre_log_id.ShortDebugString() << "!=" << _lrl; } std::list<std::shared_ptr<Entity>> _input_list; int _cuthead_size = 0; auto _push = [&](decltype(_p_head) p_cur)->void{ _cuthead_size++; _input_list.emplace_back(p_cur->m_val->GetEntity()); /*Insert will take the ownership of p_cur, so no need to release them later.*/ FollowerView::m_phaseII_pending_list.Insert(p_cur); }; DoubleListNode<MemoryLogItemFollower>::Apply(_p_head, _push); CHECK(BinLogGlobal::m_instance.AppendEntry(_input_list)) << "AppendEntry to binlog fail,never should this happen,something terribly wrong."; //notify the background thread that the LRL has updated now. FollowerView::m_cv.notify_all(); } CommitEntries::CommitEntries(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestCommitEntries(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status CommitEntries::Process() noexcept { //...Test.... //this->m_response.mutable_comm_rsp()->set_result(ErrorCode::SUCCESS); //return ::grpc::Status::OK; auto _req_term = this->m_request.entity_id().term(); auto _req_idx = this->m_request.entity_id().idx(); VLOG(89) << "Enter CommitEntries,term:" << _req_term << ",idx:" << _req_idx; auto _p_rsp = this->m_response.mutable_comm_rsp(); _p_rsp->set_result(ErrorCode::SUCCESS); auto _err_msg = this->FollowerCheckValidity(this->m_request.base()); if (!_err_msg.empty()) { _p_rsp->set_result(ErrorCode::FAIL); _p_rsp->set_err_msg(_err_msg); VLOG(89) << "done CommitEntries,pos0:" << _req_term << ",idx:" << _req_idx; return ::grpc::Status::OK; } LogIdentifier req_log; req_log.Set(_req_term, _req_idx); if (req_log < StorageGlobal::m_instance.GetLastCommitted()) { VLOG(89) << "done CommitEntries,pos1:" << _req_term << ",idx:" << _req_idx; _p_rsp->set_result(ErrorCode::ALREADY_COMMITTED); return ::grpc::Status::OK; } auto follower_log_item = MemoryLogItemFollower(_req_term, _req_idx); DoubleListNode<MemoryLogItemFollower> *_p_head = FollowerView::m_phaseII_pending_list.CutHeadByValue(follower_log_item); if (_p_head == nullptr) { //In case of (req_log >= ID-LCL && cannot get value<follower_log_item) ,means the requested entry has already been committed. _p_rsp->set_result(ErrorCode::ALREADY_COMMITTED); _p_rsp->set_err_msg("CutHeadByValue got a nullptr"); VLOG(89) << "done CommitEntries,pos2:" << _req_term << ",idx:" << _req_idx; return ::grpc::Status::OK; } int _cuthead_size = 0; //Updating storage. auto _store = [&](decltype(_p_head) p_cur)->void{ auto _entity = p_cur->m_val->GetEntity(); _cuthead_size++; LogIdentifier _log; _log.Set(_entity->entity_id().term(),_entity->entity_id().idx()); /*Since write_op is inside the pb structure rather than in a 'shared_ptr', we have to do a memory copy here.*/ StorageGlobal::m_instance.Set(_log,_entity->write_op().key(), _entity->write_op().value()); }; DoubleListNode<MemoryLogItemFollower>::Apply(_p_head, _store); FollowerView::m_garbage.PushFront(_p_head); VLOG(89) << "done CommitEntries,term:" << _req_term << ",idx:" << _req_idx << ",cuthead size:" << _cuthead_size; return ::grpc::Status::OK; } SyncData::SyncData(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestSyncData(&this->m_server_context, &this->m_reader_writer, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status SyncData::Process() noexcept { auto _p_comm_rsp = this->m_response.mutable_comm_rsp(); auto _err_msg = this->FollowerCheckValidity(this->m_request.base()); if (!_err_msg.empty()) { LOG(ERROR) << "[Sync Data Stream] read an invalid input ,error msg:" << _err_msg; _p_comm_rsp->set_result(ErrorCode::FAIL); _p_comm_rsp->set_err_msg(_err_msg); return ::grpc::Status::OK; } const ::raft::SyncDataMsgType &_msg_type = this->m_request.msg_type(); switch (_msg_type) { case ::raft::SyncDataMsgType::PREPARE: { LOG(INFO) << "[Sync Data Stream]receive PREPARE."; FollowerView::Clear(); StorageGlobal::m_instance.Reset(); if (!BinLogGlobal::m_instance.Clear()) LOG(ERROR) << "SyncData clear storage data fail:"; _p_comm_rsp->set_result(ErrorCode::PREPARE_CONFRIMED); break; } case ::raft::SyncDataMsgType::SYNC_DATA: LOG(INFO) << "[Sync Data Stream]receive SYNC_DATA, size:" << this->m_request.entity_size(); for (int i = 0; i < this->m_request.entity_size(); ++i) { const ::raft::Entity &_entity = this->m_request.entity(i); LogIdentifier _log_id = ::RaftCore::Common::ConvertID(_entity.entity_id()); if (!StorageGlobal::m_instance.Set(_log_id, _entity.write_op().key(), _entity.write_op().key())) { LOG(ERROR) << "SyncData set storage fail,log id:" << _log_id; break; } /*Along with storing ,the latest log entry should also be appended to the binlog file for further uses.*/ if (i != this->m_request.entity_size() - 1) continue; std::shared_ptr<::raft::Entity> _shp_entity(new ::raft::Entity()); auto _p_entity_id = _shp_entity->mutable_entity_id(); _p_entity_id->set_term(_entity.entity_id().term()); _p_entity_id->set_idx(_entity.entity_id().idx()); _shp_entity->set_allocated_write_op(const_cast<::raft::WriteRequest*>(&_entity.write_op())); auto _set_head_error_code = BinLogGlobal::m_instance.SetHead(_shp_entity); _shp_entity->release_write_op(); if (_set_head_error_code != BinLogOperator::BinlogErrorCode::SUCCEED_TRUNCATED) { LOG(ERROR) << "SyncData SetHead fail,log id:" << _log_id; break; // break for loop. } } _p_comm_rsp->set_result(ErrorCode::SYNC_DATA_CONFRIMED); break; case ::raft::SyncDataMsgType::SYNC_LOG: { LOG(INFO) << "[Sync Data Stream]receive SYNC_LOG, size:" << this->m_request.entity_size(); TypeEntityList _input_list; for (int i = 0; i < this->m_request.entity_size(); ++i) { const ::raft::Entity &_entity = this->m_request.entity(i); /*Note: 1.must specify the deleter for std::shared_ptr<Entity>,otherwise double-free could happen. 2. convert Entity* to const Entity* is safe demonstrated by other tests. */ _input_list.emplace_back(const_cast<Entity*>(&_entity), [](auto p) {}); //Also need to add log entries to pending list II. std::shared_ptr<MemoryLogItemFollower> _shp_follower_log(new MemoryLogItemFollower(_entity)); DoubleListNode<MemoryLogItemFollower> *_p_node = new DoubleListNode<MemoryLogItemFollower>(_shp_follower_log); FollowerView::m_phaseII_pending_list.Insert(_p_node); } CHECK(BinLogGlobal::m_instance.AppendEntry(_input_list)) << "AppendEntry to binlog fail,never should this happen,something terribly wrong."; _p_comm_rsp->set_result(ErrorCode::SYNC_LOG_CONFRIMED); break; } default: LOG(ERROR) << "SyncData unknown msgType:" << _msg_type; break; } return ::grpc::Status::OK; } MemberChangePrepare::MemberChangePrepare(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestMemberChangePrepare(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status MemberChangePrepare::Process() noexcept { LOG(INFO) << "[Membership Change] MemberChangePrepare starts."; auto *_p_rsp = this->m_response.mutable_comm_rsp(); auto _err_msg = this->FollowerCheckValidity(this->m_request.base()); if (!_err_msg.empty()) { LOG(ERROR) << "[Membership Change] cannot do membership change prepare,error message:" << _err_msg; _p_rsp->set_result(ErrorCode::FAIL); _p_rsp->set_err_msg(_err_msg); return ::grpc::Status::OK; } std::set<std::string> _new_cluster; for (int i = 0; i < this->m_request.node_list_size(); ++i) _new_cluster.emplace(this->m_request.node_list(i)); MemberMgr::JointTopology _joint_topo; _joint_topo.Update(&_new_cluster); MemberMgr::SwitchToJointConsensus(_joint_topo,this->m_request.version()); std::string _removed_nodes="",_added_nodes=""; { ReadLock _r_lock(MemberMgr::m_mutex); for (const auto& _node : MemberMgr::m_joint_summary.m_joint_topology.m_added_nodes) _added_nodes += (_node.first + "|"); for (const auto& _node : MemberMgr::m_joint_summary.m_joint_topology.m_removed_nodes) _removed_nodes += (_node + "|"); } LOG(INFO) << "[Membership Change] switched to JointConsensus status with new nodes:" << _added_nodes << " and removed nodes:" << _removed_nodes; _p_rsp->set_result(ErrorCode::SUCCESS); LOG(INFO) << "[Membership Change] MemberChangePrepare ends."; return ::grpc::Status::OK; } MemberChangeCommit::MemberChangeCommit(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestMemberChangeCommit(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status MemberChangeCommit::Process() noexcept { LOG(INFO) << "[Membership Change] MemberChangeCommit starts."; auto *_p_rsp = this->m_response.mutable_comm_rsp(); auto _err_msg = this->FollowerCheckValidity(this->m_request.base()); if (!_err_msg.empty()) { LOG(ERROR) << "[Membership Change] cannot do membership change commit,error message:" << _err_msg; _p_rsp->set_result(ErrorCode::FAIL); _p_rsp->set_err_msg(_err_msg); return ::grpc::Status::OK; } bool _still_in_new_cluster = MemberMgr::SwitchToStable(); LOG(INFO) << "[Membership Change] switched to Stable status "; if(!_still_in_new_cluster){ LOG(INFO) << "[Membership Change]I'm no longer in the new cluster , shutdown myself in 3 seconds,goodbye and have a good time."; //Must start a new thread to shutdown myself. auto _shutdown = [&]()->void { std::this_thread::sleep_for(std::chrono::seconds(3)); this->SetServerShuttingDown(); GlobalEnv::ShutDown(); }; std::thread _t(_shutdown); _t.detach(); } /*If the old leader is not in the new cluster, the nodes in the new cluster will soon after start new rounds of elections, to achieve this, we need to reset the heartbeat clock. */ if (this->m_request.has_flag()) { if (this->m_request.flag() == ::raft::MembershipFlag::NEWBIE) { WriteLock _w_lock(FollowerView::m_last_heartbeat_lock); FollowerView::m_last_heartbeat = std::chrono::steady_clock::now(); } } _p_rsp->set_result(ErrorCode::SUCCESS); LOG(INFO) << "[Membership Change] MemberChangeCommit ends."; return ::grpc::Status::OK; } void MemberChangeCommit::SetServerShuttingDown() noexcept { WriteLock _w_lock(this->m_mutex); LeaderView::m_status = LeaderView::ServerStatus::SHUTTING_DOWN; } PreVote::PreVote(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestPreVote(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status PreVote::Process() noexcept { auto _p_rsp = this->m_response.mutable_comm_rsp(); _p_rsp->set_result(ErrorCode::PREVOTE_YES); auto _req_term = this->m_request.base().term(); auto _req_addr = this->m_request.base().addr(); if (!this->ValidClusterNode(_req_addr)) { _p_rsp->set_result(ErrorCode::PREVOTE_NO); _p_rsp->set_err_msg("You are not in my cluster config list:" + _req_addr); return ::grpc::Status::OK; } ElectionMgr::AddVotingTerm(_req_term,_req_addr); //Only candidate can vote. auto _current_role = StateMgr::GetRole(); if (_current_role != RaftRole::CANDIDATE) { _p_rsp->set_result(ErrorCode::PREVOTE_NO); _p_rsp->set_err_msg("I'm a " + std::string(StateMgr::GetRoleStr(_current_role)) + " rather than a candidate."); } return ::grpc::Status::OK; } Vote::Vote(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestVote(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status Vote::Process() noexcept { auto _p_rsp = this->m_response.mutable_comm_rsp(); _p_rsp->set_result(ErrorCode::VOTE_YES); auto _req_term = this->m_request.base().term(); auto _req_addr = this->m_request.base().addr(); if (!this->ValidClusterNode(_req_addr)) { _p_rsp->set_result(ErrorCode::PREVOTE_NO); _p_rsp->set_err_msg("You are not in my cluster config list:" + _req_addr); return ::grpc::Status::OK; } ElectionMgr::AddVotingTerm(_req_term,_req_addr); auto _current_role = StateMgr::GetRole(); if (_current_role != RaftRole::CANDIDATE) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("I'm not a candidate."); return ::grpc::Status::OK; } auto _my_term = ElectionMgr::m_cur_term.load(); if ( _req_term < _my_term) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("I have a greater term:" + std::to_string(_my_term) + " than yours:" + std::to_string(_req_term)); return ::grpc::Status::OK; } if (_req_term == _my_term) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("I already issued an election in your term:" + std::to_string(_req_term)); return ::grpc::Status::OK; } //Judge LOG ID. const auto & _id_lrl = BinLogGlobal::m_instance.GetLastReplicated(); if (::RaftCore::Common::EntityIDSmaller(this->m_request.last_log_entity(), _id_lrl)) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("Your log ID " + this->m_request.last_log_entity().DebugString() + " is smaller than mine:" + _id_lrl.ToString()); return ::grpc::Status::OK; } if (::RaftCore::Common::EntityIDEqual(this->m_request.last_log_entity(), _id_lrl)) { auto _req_version = this->m_request.member_version(); auto _my_version = MemberMgr::GetVersion(); if (_req_version < _my_version) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("Your membership version: " + std::to_string(_req_version) + " is smaller than mine:" + std::to_string(_my_version)); return ::grpc::Status::OK; } } auto _voted_addr = ElectionMgr::TryVote(_req_term, _req_addr); if (!_voted_addr.empty()) { _p_rsp->set_result(ErrorCode::VOTE_NO); _p_rsp->set_err_msg("Try vote fail,I've already voted addr:" + _voted_addr); return ::grpc::Status::OK; } return ::grpc::Status::OK; } HeartBeat::HeartBeat(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->Initialize(shp_svc, shp_notify_cq, shp_call_cq); this->m_async_service->RequestHeartBeat(&this->m_server_context, &this->m_request, &this->m_responder, this->m_server_call_cq.get(), this->m_server_notify_cq.get(), this); } ::grpc::Status HeartBeat::Process() noexcept { VLOG(89) << "receive heartbeat."; const auto &_leader_term = this->m_request.base().term(); const auto &_leader_addr = this->m_request.base().addr(); if (!this->ValidClusterNode(_leader_addr)) { this->m_response.set_result(ErrorCode::PREVOTE_NO); this->m_response.set_err_msg("You are not in my cluster config list:" + _leader_addr); return ::grpc::Status::OK; } /*(Read & judge & take action) wrap the three to be an atomic operation. Tiny overhead for processing the periodically sent heartbeat messages. */ WriteLock _w_lock(ElectionMgr::m_election_mutex); uint32_t _cur_term = ElectionMgr::m_cur_term.load(); if (_leader_term < _cur_term) { LOG(ERROR) << "a lower term heartbeat received,detail:" << this->m_request.DebugString(); this->m_response.set_result(ErrorCode::FAIL); this->m_response.set_err_msg("your term " + std::to_string(_leader_term) + " is smaller than mine:" + std::to_string(_cur_term)); return ::grpc::Status::OK; } { WriteLock _w_lock(FollowerView::m_last_heartbeat_lock); FollowerView::m_last_heartbeat = std::chrono::steady_clock::now(); } this->m_response.set_result(::raft::SUCCESS); auto _cur_role = StateMgr::GetRole(); if (_leader_term == _cur_term) { CHECK(_cur_role != RaftRole::LEADER) << "I'm a leader,receive heartbeat from the same term,detail:" << this->m_request.DebugString(); if (_cur_role == RaftRole::FOLLOWER) { ::RaftCore::Topology _topo; ::RaftCore::CTopologyMgr::Read(&_topo); CHECK(_topo.m_leader == _leader_addr) << "A different leader under term:" << _cur_term << " found" << ",my leader addr:" << _topo.m_leader << ",peer leader addr : " << _leader_addr << ",ignore it."; } else if (_cur_role == RaftRole::CANDIDATE) ElectionMgr::NotifyNewLeaderEvent(_leader_term,_leader_addr); return ::grpc::Status::OK; } //Now :_leader_term > _cur_term ,switch role is needed. LOG(INFO) << "higher term found: " << _leader_term << ",current term:" << _cur_term << ",prepare to switch to follower with respect to the new leader :" << _leader_addr; if (_cur_role == RaftRole::CANDIDATE) { //I'm in a electing state. ElectionMgr::NotifyNewLeaderEvent(_leader_term,_leader_addr); } else if (_cur_role == RaftRole::FOLLOWER) { ::RaftCore::Topology _topo; ::RaftCore::CTopologyMgr::Read(&_topo); //New leader with a higher term found ,move the old leader to follower list. _topo.m_followers.emplace(_topo.m_leader); _topo.m_followers.erase(_leader_addr); _topo.m_candidates.erase(_leader_addr); _topo.m_leader = _leader_addr; ::RaftCore::CTopologyMgr::Update(_topo); ElectionMgr::m_cur_term.store(_leader_term); } else if (_cur_role == RaftRole::LEADER) { //Leader step down. ElectionMgr::m_cur_term.store(_leader_term); ElectionMgr::SwitchRole(RaftRole::FOLLOWER, _leader_addr); } this->m_response.set_result(::raft::SUCCESS); return ::grpc::Status::OK; } }
82,228
C++
.cc
1,505
47.346844
185
0.651568
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,247
ownership_delegator.cc
ppLorins_aurora/src/service/ownership_delegator.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "service/ownership_delegator.h" namespace RaftCore::Service { template<typename T> OwnershipDelegator<T>::OwnershipDelegator() { this->m_p_shp_delegator = new std::shared_ptr<T>(); } template<typename T> OwnershipDelegator<T>::~OwnershipDelegator() { delete this->m_p_shp_delegator; } template<typename T> void OwnershipDelegator<T>::ResetOwnership(T *src) noexcept{ this->m_p_shp_delegator->reset(src); } template<typename T> void OwnershipDelegator<T>::ReleaseOwnership() noexcept{ this->m_p_shp_delegator->reset(); } template<typename T> std::shared_ptr<T> OwnershipDelegator<T>::GetOwnership()noexcept { return *this->m_p_shp_delegator; } template<typename T> void OwnershipDelegator<T>::CopyOwnership(std::shared_ptr<T> from)noexcept { *this->m_p_shp_delegator = from; } }
1,593
C++
.cc
41
36.97561
76
0.752597
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,248
storage_singleton.cc
ppLorins_aurora/src/storage/storage_singleton.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "storage/storage_singleton.h" namespace RaftCore::Storage { StorageMgr StorageGlobal::m_instance; }
887
C++
.cc
18
47.944444
73
0.754345
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,249
storage.cc
ppLorins_aurora/src/storage/storage.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <regex> #include "boost/filesystem.hpp" #include "binlog/binlog_meta_data.h" #include "binlog/binlog_singleton.h" #include "storage/storage.h" #include "state/state_mgr.h" #include "common/comm_view.h" namespace RaftCore::Storage { namespace fs = ::boost::filesystem; using ::RaftCore::Common::ReadLock; using ::RaftCore::Common::WriteLock; using ::RaftCore::Common::CommonView; using ::RaftCore::BinLog::FileMetaData; using ::RaftCore::BinLog::BinLogOperator; using ::RaftCore::Storage::HashableString; using ::RaftCore::State::StateMgr; StorageMgr::StorageMgr() noexcept : m_path(_AURORA_DATA_DIR_) {} StorageMgr::~StorageMgr() noexcept { if (this->m_initialized) this->UnInitialize(); } bool StorageMgr::Initialize(const char* role, bool reset) noexcept { CHECK(role != nullptr); this->m_role_str = role; this->m_garbage_sstable.store(nullptr); this->m_garbage_memory_table.store(nullptr); this->m_last_committed.store(CommonView::m_zero_log_id); this->m_last_persist.store(CommonView::m_zero_log_id); if (!fs::exists(this->m_path)) fs::create_directory(this->m_path); CHECK(fs::is_directory(this->m_path)) << "scan data directory fail,cannot save current file"; this->m_memory_table_head.store(new UnorderedSingleListNode<MemoryTable>()); std::list<std::string> _all_sstable_files; for (auto&& x : fs::directory_iterator(this->m_path)) { std::string _file_name = x.path().filename().string(); std::string::size_type pos = _file_name.find(_AURORA_SSTABLE_PREFIX_); if (pos == std::string::npos) continue; _all_sstable_files.emplace_back(_file_name); } /*Sort by descending order.File number won't be large,it's acceptable to sorting a std::list compared to using a 'std::vector' and 'std::sort' .*/ _all_sstable_files.sort([](const std::string &left, const std::string &right)->bool {return right < left; }); this->m_sstable_table_head.store(nullptr); UnorderedSingleListNode<SSTAble>* _p_cur_node = this->m_sstable_table_head.load(); bool _merged_flag = false; for (const auto &_item : _all_sstable_files) { const std::string &_file_name = _item; auto _cur_path = this->m_path / _file_name; if (_merged_flag) { LOG(INFO) << "Deleting merged files during initializing: " << _file_name; fs::remove(fs::path(_cur_path)); continue; } LOG(INFO) << "Parsing and loading sstable:" << _cur_path.string(); auto* _p_new_node = new UnorderedSingleListNode<SSTAble>(_cur_path.string().c_str()); if (_p_cur_node) _p_cur_node->m_next.store(_p_new_node); else this->m_sstable_table_head.store(_p_new_node); _p_cur_node = _p_new_node; //File with a merged suffix must be the last one need to be loaded. if (_file_name.find(_AURORA_SSTABLE_MERGE_SUFFIX_) != std::string::npos) _merged_flag = true; //Delete all following sstable files, since they are already merged. } //Find latest entry ID that has been stored in SSTAbles. LogIdentifier _max_log_id; _max_log_id.Set(0, 0); _p_cur_node = this->m_sstable_table_head.load(); if (_p_cur_node != nullptr) _max_log_id = _p_cur_node->m_data->GetMaxLogID(); else LOG(WARNING) << "no sstable found,data will remain empty after initialization."; this->m_last_committed.store(_max_log_id); this->m_last_persist.store(_max_log_id); //Construct memory table from binlog by _max_log_id. if (!reset) this->ConstructMemoryTable(_max_log_id); this->m_initialized = true; LOG(INFO) << "[Storage] m_last_committed initialized as:" << this->m_last_committed.load(); return true; } bool StorageMgr::ConstructFromBinlog(const LogIdentifier &from, const std::string &binlog_file_name) noexcept { LOG(INFO) << "[Storage] parsing binlog file:" << binlog_file_name; BinLogOperator _cur_binlog; _cur_binlog.Initialize(binlog_file_name.c_str(), true); std::list<std::shared_ptr<FileMetaData::IdxPair>> _file_meta; _cur_binlog.GetOrderedMeta(_file_meta); if (_file_meta.empty()) return true; std::FILE* _f_handler = std::fopen(binlog_file_name.c_str(),_AURORA_BINLOG_READ_MODE_); auto _riter = _file_meta.crbegin(); for (; _riter != _file_meta.crend(); ++_riter) { if ((*_riter)->operator<=(from)) break; } bool _finished = (_riter != _file_meta.crend()); unsigned char* _p_buf = nullptr; for (auto _iter = _riter.base(); _iter != _file_meta.cend(); _iter++) { //Seek to position CHECK(std::fseek(_f_handler, (*_iter)->m_offset, SEEK_SET) == 0) << "ConstructMemoryTable seek binlog file " << binlog_file_name << "fail..,errno:" << errno; //Read protobuf buf length uint32_t _buf_len = 0; CHECK(std::fread(&_buf_len, 1, _FOUR_BYTES_, _f_handler) == _FOUR_BYTES_) << "ConstructMemoryTable read binlog file " << binlog_file_name << "fail..,errno:" << errno; ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_buf_len, &_buf_len); //Read protobuf buf _p_buf = (_p_buf) ? (unsigned char*)std::realloc(_p_buf,_buf_len): (unsigned char*)malloc(_buf_len); CHECK(std::fread(_p_buf, 1, _buf_len, _f_handler) == _buf_len) << "ConstructMemoryTable read binlog file " << binlog_file_name << " fail..,errno:" << errno; ::raft::BinlogItem _binlog_item; CHECK(_binlog_item.ParseFromArray(_p_buf, _buf_len)) << "ConstructMemoryTable parse protobuf buffer fail " << binlog_file_name; //If the first log entry's pre_id matches 'from', also means the parsing process is finished. if (_iter == _riter.base()) { auto _pre_of_first = ::RaftCore::Common::ConvertID(_binlog_item.entity().pre_log_id()); _finished |= (_pre_of_first == from); } auto *_p_wop = _binlog_item.mutable_entity()->mutable_write_op(); const auto &_entity_id = _binlog_item.entity().entity_id(); this->m_memory_table_head.load()->m_data->Insert(*_p_wop->mutable_key(), *_p_wop->mutable_value(), _entity_id.term(), _entity_id.idx()); LogIdentifier _cur_id; _cur_id.Set(_entity_id.term(), _entity_id.idx()); /*Note: m_last_committed may greater than the real LCL of the current server, it's okay b/c: 1> if current server is the leader, any log entries in the binlog must have been committed, as the way aurora works. Update@2019-11-05: it's still okay for the leader when an optimization of parallel wiring binlog and replicating is carried out since b/c it is(or be elected as) the leader, all its logs will be treated as committed eventually, regardless of whether they have been majority confirmed or not. 2> if current server is a follower, and the 'm_last_committed' > the last consistent log entry, a SYNC_DATA would eventually triggered. 3> if current server is a candidate, no influence on that. */ if (_cur_id > this->m_last_committed.load()) this->m_last_committed.store(_cur_id); } CHECK(fclose(_f_handler) == 0) << "ConstructMemoryTable: close binlog file fail."; if (_finished) LOG(INFO) << "binlog:" << binlog_file_name << " reach end,from:" << from << ", _riter idx:" << (*_riter.base())->m_index; return _finished; } void StorageMgr::ConstructMemoryTable(const LogIdentifier &from) noexcept { FindRoleBinlogFiles(this->m_role_str, this->m_loaded_binlog_files); if (this->m_loaded_binlog_files.empty()) { LOG(INFO) << "found no binlog available."; return; } /*Sort by descending order.File number won't be large,it's acceptable to sorting a std::list compared to using a 'std::vector' and 'std::sort' .*/ this->m_loaded_binlog_files.sort([](const std::string &left, const std::string &right)->bool { auto _get_suffix = [](const std::string &file_name) { int _suffix = 0; std::string::size_type pos = file_name.find("-"); if (pos != std::string::npos) _suffix = std::atoi(file_name.substr(pos + 1).c_str()); return _suffix; }; return _get_suffix(left) > _get_suffix(right); }); //list: 5/4/3/2/1/0, but 0 is the latest one, move it to the first place of the list. std::string _lastest_file = this->m_loaded_binlog_files.back(); this->m_loaded_binlog_files.pop_back(); this->m_loaded_binlog_files.push_front(_lastest_file); //VLOG(89) << "debug size:" << this->m_loaded_binlog_files.size() << ",last:" << _lastest_file; bool _find_latest = false; for (const auto& _file_name : this->m_loaded_binlog_files) { if (!this->ConstructFromBinlog(from, _file_name)) continue; LOG(INFO) << "[Storage] binlog file before(not include): " << _file_name << " can be manually deleted"; _find_latest = true; break; } CHECK(_find_latest) << "binlog content incomplete, last persistent id:" << this->m_last_persist.load(); } void StorageMgr::UnInitialize() noexcept { this->ClearInMemoryData(); this->m_initialized = false; } bool StorageMgr::Get(const std::string &key,std::string &val) const noexcept{ //Find in memory tables. auto _cur_mem_node = this->m_memory_table_head.load(); while (_cur_mem_node != nullptr) { if (_cur_mem_node->m_data->GetData(key, val)) return true; _cur_mem_node = _cur_mem_node->m_next.load(); } //Find in SSTables. auto _p_cur_sstable_node = this->m_sstable_table_head.load(); while (_p_cur_sstable_node != nullptr) { if (_p_cur_sstable_node->m_data->Read(key, val)) return true; _p_cur_sstable_node = _p_cur_sstable_node->m_next.load(); } return false; } void StorageMgr::DumpMemoryTable(const MemoryTable *src) noexcept { auto *_cur_head = this->m_sstable_table_head.load(); auto *_new_sstable_head = new UnorderedSingleListNode<SSTAble>(*src); _new_sstable_head->m_next.store(_cur_head); while (!this->m_sstable_table_head.compare_exchange_strong(_cur_head, _new_sstable_head)) { _new_sstable_head->m_next.store(_cur_head); LOG(WARNING) << "concurrently dumping memory table CAS conflict ,continue..."; } VLOG(89) << "storage successfully dumped a memory table to sstable:" << _new_sstable_head->m_data->GetFilename(); } void StorageMgr::PurgeGarbage() noexcept { //Purging process should be mutual exclusion from releasing process. WriteLock _w_lock(this->m_mutex); VLOG(89) << "storage purging started."; this->PurgeMemoryTable(); #ifdef _STORAGE_TEST_ while (this->PurgeSSTable()); #else this->PurgeSSTable(); #endif } void StorageMgr::PurgeMemoryTable() noexcept { auto _p_cur = this->m_garbage_memory_table.load(); if (_p_cur == nullptr) return ; while (!this->m_garbage_memory_table.compare_exchange_weak(_p_cur, nullptr)) continue; //Here '_p_cur' is the cut off list and all elements in it should be reclaimed. while (_p_cur != nullptr) { auto *_p_pre = _p_cur; _p_cur = _p_cur->m_next.load(); delete _p_pre; LOG(INFO) << "purged one memory table."; } } void StorageMgr::RecycleLast2SStables() noexcept { auto *_p_cur_garbage = this->m_garbage_sstable.load(); if (_p_cur_garbage == nullptr) return; auto *_p_next_garbage = _p_cur_garbage->m_next.load(); if (_p_next_garbage == nullptr) return; int _garbage_size = 2; auto *_p_pre_garbage = _p_cur_garbage; while (_p_next_garbage->m_next.load() != nullptr) { if (_p_pre_garbage != _p_cur_garbage) _p_cur_garbage = _p_cur_garbage; _p_cur_garbage = _p_next_garbage; _p_next_garbage = _p_next_garbage->m_next.load(); _garbage_size++; } //Detach the last two garbage nodes. if (_garbage_size == 2) { if (!this->m_garbage_sstable.compare_exchange_strong(_p_cur_garbage, nullptr)) { LOG(INFO) << "recursive RecycleLast2SStables occurred."; return this->RecycleLast2SStables(); } } else { CHECK(_p_pre_garbage->m_next.compare_exchange_strong(_p_cur_garbage, nullptr)) << "recycle sstable garbage last2 CAS fail."; } auto* _release_cur = _p_cur_garbage; while (_release_cur != nullptr) { std::string _filename = _release_cur->m_data->GetFilename(); auto *_p_tmp = _release_cur->m_next.load(); //Clear the in-memory data. delete _release_cur; //Remove the sstable file. LOG(INFO) << "Deleting garbage sstable files : " << _filename; fs::remove(fs::path(_filename)); _release_cur = _p_tmp; } } bool StorageMgr::PurgeSSTable() noexcept { this->RecycleLast2SStables(); auto _p_cur = this->m_sstable_table_head.load(); if (_p_cur == nullptr) return false; auto _p_next = _p_cur->m_next.load(); if (_p_next == nullptr) return false; int _garbage_part_size = 2; auto _p_pre = _p_cur; while (_p_next->m_next.load() != nullptr) { if (_p_pre != _p_cur) _p_pre = _p_cur; _p_cur = _p_next; _p_next = _p_next->m_next.load(); ++_garbage_part_size; } UnorderedSingleListNode<SSTAble> *_new_sstable_node = new UnorderedSingleListNode<SSTAble>(*_p_next->m_data,*_p_cur->m_data); if (_garbage_part_size == 2) this->m_sstable_table_head.store(_new_sstable_node); else { /*_p_pre always pointing to the element immediately preceding the one to be merged into, and its origin value is related to the #sstables.And the following CAS operation should always succeed since there is no multiple thread updating scenarios for the 'next' pointer of _p_pre.*/ CHECK(_p_pre->m_next.compare_exchange_strong(_p_cur, _new_sstable_node)) << "update merged sstable previous next pointer fail."; } /*Note :We cannot release the purged sstable objects immediately after the moment we've finished purging since there may have other threads accessing them. Here we push them into a 'garbage' list and releasing them later,aka next round of purging. */ LOG(INFO) << "merged sstable of " << _p_cur->m_data->GetFilename() << " and " << _p_next->m_data->GetFilename() << " into " << _new_sstable_node->m_data->GetFilename(); //Insert the new nodes at garbage list's head. auto *_p_cur_head = m_garbage_sstable.load(); _p_next->m_next.store(_p_cur_head); while (!m_garbage_sstable.compare_exchange_weak(_p_cur_head, _p_cur)) _p_next->m_next.store(_p_cur_head); return true; } bool StorageMgr::Set(const LogIdentifier &log_id ,const std::string &key, const std::string &value) noexcept{ auto *_cur_head = this->m_memory_table_head.load(); _cur_head->m_data->Insert(key, value, log_id.m_term, log_id.m_index); LogIdentifier _cur_id; _cur_id.Set(this->m_last_committed.load()); while (log_id > _cur_id) { if (this->m_last_committed.compare_exchange_strong(_cur_id, log_id)) break; } if (_cur_head->m_data->Size() <= ::RaftCore::Config::FLAGS_memory_table_max_item) return true; UnorderedSingleListNode<MemoryTable>* _new_head = new UnorderedSingleListNode<MemoryTable>(); bool _insert_succeed = false; while (_cur_head->m_data->Size() > ::RaftCore::Config::FLAGS_memory_table_max_item) { _new_head->m_next.store(_cur_head); _insert_succeed = this->m_memory_table_head.compare_exchange_strong(_cur_head, _new_head); if (!_insert_succeed) continue; //Here '_cur_head' already been updated to the latest head of 'm_memory_table_head'. this->DumpMemoryTable(_cur_head->m_data); /*Note: 1.Since DumpMemoryTable() can be executed simultaneously, we need to ensure that all subsequent nodes after '_cur_head' in the single linked list are dumped(aka,their data can be found in the sstable link list now) before cutting off _cur_head's ancestor's link to it. Otherwise client cannot query data in the node which are cut off too early. 2. Records among sstables can be not in order because of the reasons: 1> records can invode 'StorageMgr::Set' simultanously. 2> memory tables are dummped into stables simultanously. 3> sstable file names are not in lexicographical order if more one sstables falls into the same microsecond time windows. But the change of get unordered records among sstables is slim: 1> & 2> depends on a concurrent dumping which itself is also rare. 3> can be basically ignored. It's still acceptable even if that happened: 1) No impacts on reading, you can't decide a strict order for records that very close to each other, in the first place. 2) get unordered result in 'GetSlice', but it's okay for the scenario where it applies to : sync data to the lag behind followers. */ while (_cur_head->m_next.load() != nullptr); _new_head->m_next.store(nullptr); auto *_p_garbage_head = this->m_garbage_memory_table.load(); _cur_head->m_next = _p_garbage_head; while (!this->m_garbage_memory_table.compare_exchange_weak(_p_garbage_head, _cur_head)) _cur_head->m_next = _p_garbage_head; return true; } if (!_insert_succeed) delete _new_head; return true; } const LogIdentifier StorageMgr::GetLastCommitted() const noexcept { return this->m_last_committed.load(); } void StorageMgr::ClearInMemoryData() noexcept { //Clear in-memory data themselves. this->ReleaseData<MemoryTable>(this->m_garbage_memory_table); this->ReleaseData<MemoryTable>(this->m_memory_table_head); this->ReleaseData<SSTAble>(this->m_sstable_table_head); this->ReleaseData<SSTAble>(this->m_garbage_sstable); } void StorageMgr::Reset() noexcept { this->ClearInMemoryData(); //Clear on-disk data. CHECK(!this->m_loaded_binlog_files.empty()); if(this->m_loaded_binlog_files.size() > 1) { auto _iter = this->m_loaded_binlog_files.cbegin(); //Skip the first one since it's the default binlog and shall be delete in the BinlogMgr. _iter++; for (; _iter != this->m_loaded_binlog_files.cend(); ++_iter) { const std::string &_delete_binlog_file = *_iter; LOG(INFO) << "Deleting the loaded binlog files for storage Reset:" << _delete_binlog_file; CHECK(std::remove(_delete_binlog_file.c_str()) == 0); } } LOG(INFO) << "Deleting the whole data directory for storage Reset."; fs::remove(this->m_path); //TODO: figure why sometimes this failed under win10. CHECK(!fs::exists(this->m_path)); fs::create_directory(this->m_path); //Reinitialization. this->Initialize(this->m_role_str.c_str(), true); } void StorageMgr::GetSliceInSSTable(const LogIdentifier& start_at, int step_len, std::list<StorageItem> &output_list) const noexcept { //Once goes here, the binlog must have been iterated, so we only need to auto *_p_cur_sstable = this->m_sstable_table_head.load(); if (_p_cur_sstable == nullptr) return; std::vector<decltype(_p_cur_sstable)> _access_list; do { auto _cur_max_id = _p_cur_sstable->m_data->GetMaxLogID(); if (start_at >= _cur_max_id) break; _access_list.emplace_back(_p_cur_sstable); _p_cur_sstable = _p_cur_sstable->m_next.load(); } while (_p_cur_sstable != nullptr); if (_access_list.empty()) return; //Records' log ID in the newer sstable are all larger than those in the older sstable,so just need to start at _p_pre. int _counter = 0; auto _reading = [&](const SSTAble::Meta &meta,const HashableString &key) ->bool{ if (_counter >= step_len) return false; LogIdentifier _cur_id; _cur_id.Set(meta.m_term,meta.m_index); if (_cur_id <= start_at) return true; std::string _val = ""; _p_cur_sstable->m_data->Read(*key.GetStrPtr(), _val); output_list.emplace_back(_cur_id, key.GetStrPtr(), std::make_shared<std::string>(std::move(_val))); _counter++; return true; }; for (auto _iter = _access_list.crbegin(); _iter != _access_list.crend(); ++_iter) { _p_cur_sstable = *_iter; if (!(*_iter)->m_data->IterateByVal(_reading)) break; } return; } void StorageMgr::GetSliceInMemory(const LogIdentifier& start_at, int step_len, std::list<StorageItem> &output_list) const noexcept { //Once goes here, the binlog must have been iterated, so we only need to auto *_p_cur_memory_table = this->m_memory_table_head.load(); if (_p_cur_memory_table == nullptr) return; /*Caveat : There is an implicit constrain that the dumping memory tables won't be reclaimed during the following iterations. Since there are several seconds before next GC, we can just rely on it.*/ std::vector<decltype(_p_cur_memory_table)> _access_list; /*TODO: Prevent from a rare case of losing dumping tables while switch from iterating sstable to iterating memory table.*/ while (_p_cur_memory_table != nullptr) { _access_list.push_back(_p_cur_memory_table); _p_cur_memory_table = _p_cur_memory_table->m_next.load(); } CHECK(!_access_list.empty()); //std::function<bool(const TypePtrHashableString&,const TypePtrHashValue&)> op int _counter = 0; auto _reading = [&](const HashValue &hash_val,const HashableString &key) ->bool{ if (_counter >= step_len) return false; LogIdentifier _cur_id; _cur_id.Set(hash_val.m_term, hash_val.m_index); if (_cur_id <= start_at) return true; output_list.emplace_back(_cur_id, key.GetStrPtr(), hash_val.m_val); _counter++; return true; }; for (auto _iter = _access_list.crbegin(); _iter != _access_list.crend(); ++_iter) { _p_cur_memory_table = *_iter; (*_iter)->m_data->IterateByVal(_reading); } return; } void StorageMgr::GetSlice(const LogIdentifier& start_at,uint32_t step_len,std::list<StorageItem> &output_list) const noexcept { output_list.clear(); this->GetSliceInSSTable(start_at, step_len, output_list); std::size_t _got_size = output_list.size(); if (_got_size >= step_len) return; uint32_t _remain = (uint32_t)(step_len - _got_size); this->GetSliceInMemory(start_at, _remain, output_list); } void StorageMgr::FindRoleBinlogFiles(const std::string &role, std::list<std::string> &output) { output.clear(); std::string _filename_reg_pattern = _AURORA_BINLOG_NAME_REG_ + std::string("\\.") + role + std::string("(-[0-9]*){0,1}"); LOG(INFO) << "searching binlog file with pattern:" << _filename_reg_pattern; std::regex _pattern(_filename_reg_pattern); std::smatch _sm; for (auto&& x : fs::directory_iterator(fs::path("."))) { std::string _file_name = x.path().filename().string(); if (!std::regex_match(_file_name, _sm, _pattern)) continue; output.emplace_back(_file_name); } } }
24,598
C++
.cc
500
42.088
144
0.639527
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,250
sstable.cc
ppLorins_aurora/src/storage/sstable.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <unordered_set> #include <chrono> #include "boost/filesystem.hpp" #include "common/comm_view.h" #include "config/config.h" #include "tools/utilities.h" #include "storage/sstable.h" #define _AURORA_SSTABLE_OP_MODE_ "ab+" #define _AURORA_SSTABLE_READ_MODE_ "rb" #define _AURORA_SSTABLE_FOOTER_ "!@#$sstable$#@!" namespace RaftCore::Storage { namespace fs = ::boost::filesystem; using ::RaftCore::Common::CommonView; using ::RaftCore::Tools::ConvertToBigEndian; using ::RaftCore::Tools::ConvertBigEndianToLocal; using ::RaftCore::Storage::TypePtrHashableString; using ::RaftCore::Storage::PtrHSHasher; using ::RaftCore::Storage::PtrHSEqualer; SSTAble::Meta::Meta(uint32_t a, uint16_t b, uint32_t c, uint16_t d,uint32_t e,uint64_t f) { this->m_key_offset = a; this->m_key_len = b; this->m_val_offset = c; this->m_val_len = d; this->m_term = e; this->m_index = f; } bool SSTAble::Meta::operator<(const Meta &other) { if (this->m_term < other.m_term) return true; if (this->m_term > other.m_term) return false; return this->m_index < other.m_index; } SSTAble::SSTAble(const char* file) noexcept { this->m_shp_meta = std::make_shared<TypeOffset>(::RaftCore::Config::FLAGS_sstable_table_hash_slot_num); this->m_min_log_id.Set(CommonView::m_max_log_id); this->m_max_log_id.Set(CommonView::m_zero_log_id); this->m_associated_file = file; this->ParseFile(); } SSTAble::SSTAble(const MemoryTable &src) noexcept { this->m_shp_meta = std::make_shared<TypeOffset>(::RaftCore::Config::FLAGS_sstable_table_hash_slot_num); this->CreateFile(); this->DumpFrom(src); } SSTAble::SSTAble(const SSTAble &from, const SSTAble &to) noexcept { this->m_shp_meta = std::make_shared<TypeOffset>(::RaftCore::Config::FLAGS_sstable_table_hash_slot_num); std::list<TypePtrHashableString> _from_keys; from.m_shp_meta->GetOrderedByKey(_from_keys); CHECK(_from_keys.size() > 0) << "sstable meta size invalid ,file:" << from.GetFilename(); std::list<TypePtrHashableString> _to_keys; to.m_shp_meta->GetOrderedByKey(_to_keys); CHECK(_to_keys.size() > 0) << "sstable meta size invalid ,file:" << to.GetFilename(); //Merge sort. MemoryTable _mem_table; std::unordered_set<TypePtrHashableString,PtrHSHasher,PtrHSEqualer> _intersection; auto _cmp = [](const TypePtrHashableString &left, const TypePtrHashableString &right) ->bool { return left->operator<(*right); }; std::set_intersection(_from_keys.cbegin(), _from_keys.cend(), _to_keys.cbegin(), _to_keys.cend(), std::inserter(_intersection,_intersection.end()),_cmp); auto _update_mem_table = [&](const std::list<TypePtrHashableString> &_key_list,const SSTAble &sstable,bool filter=false) { for (const auto &_key : _key_list) { if (filter && _intersection.find(_key) != _intersection.cend()) continue; std::string _val = ""; const std::string &_key_str = _key->GetStr(); CHECK(sstable.Read(_key_str, _val)) << "key:" << _key_str << " doesn't exist in file:" << sstable.GetFilename(); TypePtrMeta _shp_meta; CHECK(sstable.m_shp_meta->Read(*_key, _shp_meta)) << "key:" << _key_str << " doesn't exist in meta:" << sstable.GetFilename(); _mem_table.Insert(_key_str,_val,_shp_meta->m_term,_shp_meta->m_index); } }; _update_mem_table(_to_keys, to); _update_mem_table(_from_keys, from, true); std::string _new_file_name = to.m_associated_file + _AURORA_SSTABLE_MERGE_SUFFIX_; this->CreateFile(_new_file_name.c_str()); this->DumpFrom(_mem_table); } SSTAble::~SSTAble() noexcept { if (this->m_file_handler != nullptr) CHECK(fclose(this->m_file_handler) == 0) << "close sstable file fail."; } const std::string& SSTAble::GetFilename() const noexcept { return this->m_associated_file; } void SSTAble::CreateFile(const char* file_name) noexcept { char sz_file[128] = { 0 }; if (file_name) { std::snprintf(sz_file,sizeof(sz_file),"%s",file_name); this->m_associated_file = sz_file; } else { auto _rand = ::RaftCore::Tools::GenerateRandom(0, 1000); auto _now = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()); std::snprintf(sz_file,sizeof(sz_file),_AURORA_SSTABLE_PREFIX_".%llu.%03u",_now.count(),_rand); fs::path _path(_AURORA_DATA_DIR_); if (!fs::exists(_path)) fs::create_directory(_path); _path /= sz_file; this->m_associated_file = _path.string(); } this->m_file_handler = std::fopen(this->m_associated_file.c_str(), _AURORA_SSTABLE_OP_MODE_); CHECK(this->m_file_handler != nullptr) << "create sstable file fail."; } void SSTAble::ParseFile() noexcept { this->m_file_handler = std::fopen(this->m_associated_file.c_str(),_AURORA_SSTABLE_READ_MODE_); CHECK(this->m_file_handler != nullptr) << "open sstable file fail."; CHECK(std::fseek(this->m_file_handler, 0, SEEK_END) == 0) << "seek sstable file fail" << this->m_associated_file; std::size_t _footer_len = std::strlen(_AURORA_SSTABLE_FOOTER_); std::size_t _mininal_file_size = _FOUR_BYTES_ * 3 + _footer_len; uint32_t _file_size = std::ftell(this->m_file_handler); CHECK(_file_size >= _mininal_file_size) << "ftell sstable file " << this->m_associated_file << " fail..,errno:" << errno; std::size_t _tail_len = _footer_len + _FOUR_BYTES_ * 2; long _tail_offset = (long)(_file_size - _tail_len); CHECK(std::fseek(this->m_file_handler, _tail_offset, SEEK_SET) == 0); unsigned int _init_size = 1024; unsigned char *_p_buf = (unsigned char *)malloc(_init_size); std::size_t _read = std::fread(_p_buf, 1, _tail_len, this->m_file_handler); CHECK(_read==_tail_len) << "fread fail,need:" << _tail_len << ",actual:" << _read; //Check footer. CHECK(std::strncmp((char*)&_p_buf[8], _AURORA_SSTABLE_FOOTER_, _footer_len) == 0) << "sstable footer wrong,check it:" << this->m_associated_file; //Read meta offset. uint32_t _meta_offset = 0; std::memcpy(&_meta_offset, &_p_buf[4], _FOUR_BYTES_); ConvertBigEndianToLocal<uint32_t>(_meta_offset,&_meta_offset); //Read meta checksum. uint32_t _meta_crc = 0; std::memcpy(&_meta_crc, &_p_buf, _FOUR_BYTES_); ConvertBigEndianToLocal<uint32_t>(_meta_crc,&_meta_crc); CHECK(std::fseek(this->m_file_handler, _meta_offset, SEEK_SET) == 0); std::size_t _meta_area_len = _tail_offset - _meta_offset; if (_meta_area_len > _init_size) _p_buf = (unsigned char*)std::realloc(_p_buf, _meta_area_len); this->ParseMeta(_p_buf,_meta_area_len); free(_p_buf); } void SSTAble::ParseMeta(unsigned char* &allocated_buf,std::size_t meta_len) noexcept { std::size_t _read = std::fread(allocated_buf, 1, meta_len, this->m_file_handler); CHECK(_read==meta_len) << "fread fail,need:" << meta_len << ",actual:" << _read; auto *_p_cur = allocated_buf; while (_p_cur < allocated_buf + meta_len) { //Parse key offset. uint32_t _key_offset = 0; std::memcpy(&_key_offset, _p_cur, _FOUR_BYTES_); ConvertBigEndianToLocal<uint32_t>(_key_offset,&_key_offset); _p_cur += _FOUR_BYTES_; //Parse key len. uint16_t _key_len = 0; std::memcpy(&_key_len, _p_cur, _TWO_BYTES_); ConvertBigEndianToLocal<uint16_t>(_key_len,&_key_len); _p_cur += _TWO_BYTES_; //Parse val offset. uint32_t _val_offset = 0; std::memcpy(&_val_offset, _p_cur, _FOUR_BYTES_); ConvertBigEndianToLocal<uint32_t>(_val_offset,&_val_offset); _p_cur += _FOUR_BYTES_; //Parse val len. uint16_t _val_len = 0; std::memcpy(&_val_len, _p_cur, _TWO_BYTES_); ConvertBigEndianToLocal<uint16_t>(_val_len,&_val_len); _p_cur += _TWO_BYTES_; //Parse term. uint32_t _term = 0; std::memcpy(&_term, _p_cur, _FOUR_BYTES_); ConvertBigEndianToLocal<uint32_t>(_term,&_term); _p_cur += _FOUR_BYTES_; //Parse index. uint64_t _index = 0; std::memcpy(&_index, _p_cur, _EIGHT_BYTES_); ConvertBigEndianToLocal<uint64_t>(_index,&_index); _p_cur += _EIGHT_BYTES_; //Read key. std::string _key(_key_len,0); CHECK(std::fseek(this->m_file_handler, _key_offset, SEEK_SET) == 0); std::fread((char*)_key.data(), 1, _key_len, this->m_file_handler); TypePtrHashableString _shp_key(new HashableString(_key)); TypePtrMeta _shp_meta(new Meta(_key_offset,_key_len,_val_offset,_val_len,_term,_index)); this->m_shp_meta->Insert(_shp_key,_shp_meta); //Update max log ID. LogIdentifier _cur_id; _cur_id.Set(_term, _index); if (_cur_id > this->m_max_log_id) this->m_max_log_id.Set(_cur_id); if (_cur_id < this->m_min_log_id) this->m_min_log_id.Set(_cur_id); } } bool SSTAble::Read(const std::string &key, std::string &val) const noexcept { std::shared_ptr<Meta> _shp_meta; if (!this->m_shp_meta->Read(HashableString(key,true), _shp_meta)) return false; //Need to open a new fd to support concurrently reading. std::FILE* _handler = std::fopen(this->m_associated_file.c_str(),_AURORA_SSTABLE_READ_MODE_); CHECK(_handler != nullptr) << "fopen sstable file fail."; CHECK(std::fseek(_handler, _shp_meta->m_val_offset, SEEK_SET) == 0); val.resize(_shp_meta->m_val_len); std::size_t _read = std::fread((char*)val.data(), 1, _shp_meta->m_val_len, _handler); CHECK(_read==_shp_meta->m_val_len) << "fread fail,need:" << _shp_meta->m_val_len << ",actual:" << _read; CHECK(std::fclose(_handler)==0) << "Read sstable file fclose failed"; return true; } void SSTAble::AppendKvPair(const TypePtrHashableString &key, const TypePtrHashValue &val, void* buf, uint32_t buff_len, uint32_t &buf_offset, uint32_t &file_offset) noexcept { uint16_t _key_len = (uint16_t)key->GetStr().length(); uint16_t _val_len = (uint16_t)val->m_val->length(); int _cur_len = _key_len + _val_len; if (buf_offset + _cur_len > buff_len) { CHECK(std::fwrite(buf, 1, buf_offset, this->m_file_handler) == buf_offset) << "fwrite KV records fail,error no:" << errno; CHECK (std::fflush(this->m_file_handler) == 0 ) << "fflush KV data to end of binlog file fail..."; //Reset the offset after a successful flush. buf_offset = 0; } unsigned char* _p_start_point = (unsigned char*)buf + buf_offset; unsigned char* _p_cur = _p_start_point; //Advance the global position identifiers. buf_offset += _cur_len; //Field-1 : key content. std::memcpy(_p_cur, key->GetStr().data(), _key_len); _p_cur += _key_len; //Field-2 : val content. std::memcpy(_p_cur, val->m_val->data(), _val_len); _p_cur += _val_len; uint32_t _record_crc = ::RaftCore::Tools::CalculateCRC32(_p_start_point, _cur_len); this->m_record_crc += _record_crc; uint32_t _val_offset = file_offset + _key_len; this->m_shp_meta->Insert(key, std::make_shared<Meta>(file_offset, _key_len, _val_offset, _val_len, val->m_term, val->m_index)); file_offset += _cur_len; //Update max log ID. LogIdentifier _cur_id; _cur_id.Set(val->m_term, val->m_index); if (_cur_id > this->m_max_log_id) this->m_max_log_id.Set(_cur_id); if (_cur_id < this->m_min_log_id) this->m_min_log_id.Set(_cur_id); } void SSTAble::AppendChecksum(uint32_t checksum) noexcept { uint32_t _copy = checksum; ConvertToBigEndian<uint32_t>(_copy, &_copy); CHECK(std::fwrite(&_copy, 1, _FOUR_BYTES_, this->m_file_handler) == _FOUR_BYTES_) << "fwrite CRC fail,error no:" << errno; CHECK (std::fflush(this->m_file_handler) == 0 ) << "fflush checksum to end of binlog file fail..."; } void SSTAble::CalculateMetaOffset() noexcept { this->m_meta_offset = std::ftell(this->m_file_handler); CHECK(this->m_meta_offset >= 0) << "ftell sstable file " << this->m_associated_file << "fail..,errno:" << errno; } void SSTAble::AppendMetaOffset() noexcept { uint32_t _copy = this->m_meta_offset; ConvertToBigEndian<uint32_t>(_copy, &_copy); CHECK(std::fwrite(&_copy, 1, _FOUR_BYTES_, this->m_file_handler) == _FOUR_BYTES_) << "fwrite CRC fail,error no:" << errno; CHECK(std::fflush(this->m_file_handler) == 0) << "fflush meta offset to end of binlog file fail..."; } void SSTAble::AppendMeta(const TypePtrHashableString &key, const TypePtrMeta &shp_meta, void* buf, uint32_t buff_len, uint32_t &buf_offset, uint32_t &file_offset) noexcept { if (buf_offset + this->m_single_meta_len > buff_len) { CHECK(std::fwrite(buf, 1, buf_offset, this->m_file_handler) == buf_offset) << "fwrite meta fail,error no:" << errno; CHECK (std::fflush(this->m_file_handler) == 0 ) << "fflush meta to end of binlog file fail..."; buf_offset = 0; } uint32_t _key_offset = shp_meta->m_key_offset; uint16_t _key_len = (uint16_t)key->GetStr().length(); uint32_t _val_offset = shp_meta->m_val_offset; uint16_t _val_len = shp_meta->m_val_len; uint32_t _term = shp_meta->m_term; uint64_t _index = shp_meta->m_index; int _cur_buf_len = this->m_single_meta_len; unsigned char* _p_start_point = (unsigned char*)buf + buf_offset; auto *_p_cur = _p_start_point; buf_offset += this->m_single_meta_len; //Field-1 : key offset. ConvertToBigEndian<uint32_t>(_key_offset, &_key_offset); std::memcpy(_p_cur, (unsigned char*)&_key_offset, _FOUR_BYTES_); _p_cur += _FOUR_BYTES_; //Field-2 : key len. ConvertToBigEndian<uint16_t>(_key_len, &_key_len); std::memcpy(_p_cur, (unsigned char*)&_key_len, _TWO_BYTES_); _p_cur += _TWO_BYTES_; //Field-3 : val offset. ConvertToBigEndian<uint32_t>(_val_offset, &_val_offset); std::memcpy(_p_cur, (unsigned char*)&_val_offset, _FOUR_BYTES_); _p_cur += _FOUR_BYTES_; //Field-4 : val len. ConvertToBigEndian<uint16_t>(_val_len, &_val_len); std::memcpy(_p_cur, (unsigned char*)&_val_len, _TWO_BYTES_); _p_cur += _TWO_BYTES_; //Field-5 : term. ConvertToBigEndian<uint32_t>(_term, &_term); std::memcpy(_p_cur, (unsigned char*)&_term, _FOUR_BYTES_); _p_cur += _FOUR_BYTES_; //Field-6 : index. ConvertToBigEndian<uint64_t>(_index, &_index); std::memcpy(_p_cur, (unsigned char*)&_index, _EIGHT_BYTES_); _p_cur += _EIGHT_BYTES_; uint32_t _meta_crc = ::RaftCore::Tools::CalculateCRC32(_p_start_point, _cur_buf_len); this->m_meta_crc += _meta_crc; } void SSTAble::AppendFooter() noexcept { static std::size_t _len = std::strlen(_AURORA_SSTABLE_FOOTER_); CHECK(std::fwrite(_AURORA_SSTABLE_FOOTER_, 1, _len, this->m_file_handler) == _len) << "fwrite footer fail,error no:" << errno; CHECK (std::fflush(this->m_file_handler) == 0 ) << "fflush footer to end of binlog file fail..."; } void SSTAble::DumpFrom(const MemoryTable &src) noexcept { const uint32_t _estimated_avg_record_bytes = 20; uint32_t _buf_size = _estimated_avg_record_bytes * ::RaftCore::Config::FLAGS_memory_table_max_item; uint32_t _buf_offset = 0, _file_offset = 0; void* _p_buf = malloc(_buf_size); //Append KV records. auto _append_kv = [&](const TypePtrHashableString &shp_key, const TypePtrHashValue &shp_val)->bool { this->AppendKvPair(shp_key, shp_val, _p_buf, _buf_size, _buf_offset, _file_offset); return true; }; src.IterateByKey(_append_kv); //Check if there are remaining bytes if (_buf_offset > 0) { CHECK(std::fwrite(_p_buf, 1, _buf_offset, this->m_file_handler) == _buf_offset) << "fwrite KV records fail,error no:" << errno; CHECK (std::fflush(this->m_file_handler) == 0 ) << "fflush KV data to end of binlog file fail..."; } //Append checksum of KV records. this->AppendChecksum(this->m_record_crc); //Get the offset of meta. Must be called immediately after appending KV checksum. this->CalculateMetaOffset(); //To reuse the buff. _buf_offset = 0; //Append Meta data. auto _append_meta = [&](const TypePtrHashableString &shp_key, const TypePtrMeta &shp_meta)->bool { this->AppendMeta(shp_key, shp_meta, _p_buf, _buf_size, _buf_offset, _file_offset); return true; }; this->m_shp_meta->Iterate(_append_meta); free(_p_buf); _p_buf = nullptr; //Append checksum of meta. this->AppendChecksum(this->m_meta_crc); this->AppendMetaOffset(); //Append footprint. this->AppendFooter(); } LogIdentifier SSTAble::GetMaxLogID() const noexcept { return this->m_max_log_id; } LogIdentifier SSTAble::GetMinLogID() const noexcept { return this->m_min_log_id; } bool SSTAble::IterateByVal(std::function<bool(const Meta &meta, const HashableString &key)> op) const noexcept { LockFreeHash<HashableString, Meta>::ValueComparator _cmp = [](const TypePtrMeta &left, const TypePtrMeta &right)->bool { return *left < *right; }; std::map<std::shared_ptr<Meta>, std::shared_ptr<HashableString>,decltype(_cmp)> _ordered_by_value_map(_cmp); this->m_shp_meta->GetOrderedByValue(_ordered_by_value_map); for (const auto &_item : _ordered_by_value_map) if (!op(*_item.first, *_item.second)) return false; return true; } }
18,410
C++
.cc
366
44.491803
157
0.638419
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,251
hashable_string.cc
ppLorins_aurora/src/storage/hashable_string.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "tools/utilities.h" #include "storage/hashable_string.h" namespace RaftCore::Storage { HashableString::HashableString(const std::string &other,bool on_fly) noexcept { //Ignore delete operation. if (on_fly) this->m_shp_str.reset(const_cast<std::string*>(&other), [](auto *p) {}); else this->m_shp_str = std::make_shared<std::string>(other); } HashableString::~HashableString() noexcept {} bool HashableString::operator<(const HashableString &other)const noexcept { return std::strcmp(this->m_shp_str->c_str(), other.m_shp_str->c_str()) < 0; } bool HashableString::operator==(const HashableString &other)const noexcept { return std::strcmp(this->m_shp_str->c_str(), other.m_shp_str->c_str()) == 0; } bool HashableString::operator==(const std::string &other)const noexcept { return std::strcmp(this->m_shp_str->c_str(), other.c_str()) == 0; } const HashableString& HashableString::operator=(const HashableString &other)noexcept { //Deleter will also be transferred. this->m_shp_str = other.m_shp_str; return *this; } std::size_t HashableString::Hash() const noexcept { return std::hash<std::string>{}(*this->m_shp_str); } const std::string& HashableString::GetStr() const noexcept { return *this->m_shp_str; } const std::shared_ptr<std::string> HashableString::GetStrPtr() const noexcept { return this->m_shp_str; } std::size_t PtrHSHasher::operator()(const TypePtrHashableString &shp_hashable_string)const { return std::hash<std::string>{}(shp_hashable_string->GetStr()); } bool PtrHSEqualer::operator()(const TypePtrHashableString &left,const TypePtrHashableString &right)const { return left->GetStr() == right->GetStr(); } }
2,494
C++
.cc
55
42.727273
106
0.725434
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,252
memory_table.cc
ppLorins_aurora/src/storage/memory_table.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config/config.h" #include "storage/memory_table.h" namespace RaftCore::Storage { using ::RaftCore::Storage::TypePtrHashableString; HashValue::HashValue(uint32_t a, uint64_t b, const std::string &c) { this->m_term = a; this->m_index = b; this->m_val = std::make_shared<std::string>(c); } bool HashValue::operator<(const HashValue &other)const noexcept { if (this->m_term < other.m_term) return true; if (this->m_term > other.m_term) return false; return this->m_index < other.m_index; } MemoryTable::MemoryTable() noexcept { this->m_shp_records = std::make_shared<TypeRecords>(::RaftCore::Config::FLAGS_memory_table_hash_slot_num); } MemoryTable::~MemoryTable() noexcept {} void MemoryTable::Insert(const std::string &key,const std::string &val,uint32_t term,uint64_t index) noexcept{ //Memory copy overhead,can't not get around. TypePtrHashableString _shp_key = std::make_shared<HashableString>(key); TypePtrHashValue _shp_val = std::make_shared<HashValue>(term, index, val); this->m_shp_records->Insert(_shp_key,_shp_val); } void MemoryTable::IterateByKey(std::function<bool(const TypePtrHashableString&, const TypePtrHashValue&)> op) const noexcept { std::list<TypePtrHashableString> _ordered_meta; this->m_shp_records->GetOrderedByKey(_ordered_meta); for (const auto &_meta : _ordered_meta) { TypePtrHashValue _shp_val; this->m_shp_records->Read(*_meta, _shp_val); if (!op(_meta, _shp_val)) break; } } bool MemoryTable::IterateByVal(std::function<bool(const HashValue&, const HashableString&)> op) const noexcept { LockFreeHash<HashableString, HashValue>::ValueComparator _cmp = [](const TypePtrHashValue &left, const TypePtrHashValue &right)->bool { return *left < *right; }; std::map<std::shared_ptr<HashValue>, std::shared_ptr<HashableString>,decltype(_cmp)> _ordered_by_value_map(_cmp); this->m_shp_records->GetOrderedByValue(_ordered_by_value_map); for (const auto &_item : _ordered_by_value_map) if (!op(*_item.first, *_item.second)) return false; return true; } bool MemoryTable::GetData(const std::string &key,std::string &val) const noexcept { TypePtrHashValue _shp_val; if (!this->m_shp_records->Read(HashableString(key,true), _shp_val)) return false; val = *_shp_val->m_val; return true; } std::size_t MemoryTable::Size() const noexcept { return this->m_shp_records->Size(); } }
3,290
C++
.cc
70
43.014286
165
0.707615
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,253
lock_free_unordered_single_list.cc
ppLorins_aurora/src/tools/lock_free_unordered_single_list.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "tools/lock_free_unordered_single_list.h" namespace RaftCore::DataStructure { template <typename T> template <typename ...Args> UnorderedSingleListNode<T>::UnorderedSingleListNode(Args&&... args)noexcept { this->m_data = new T(std::forward<Args>(args)...); this->m_next.store(nullptr); } template <typename T> UnorderedSingleListNode<T>::~UnorderedSingleListNode()noexcept { if (this->m_data != nullptr) { delete this->m_data; this->m_data = nullptr; } } template <typename T> UnorderedSingleListNode<T>::UnorderedSingleListNode(T* p_src)noexcept { this->m_data = p_src; this->m_next.store(nullptr); } template <typename T> LockFreeUnorderedSingleList<T>::LockFreeUnorderedSingleList() noexcept { this->m_head.store(nullptr); } template <typename T> LockFreeUnorderedSingleList<T>::~LockFreeUnorderedSingleList() noexcept{} template <typename T> void LockFreeUnorderedSingleList<T>::SetDeleter(std::function<void(T*)> deleter)noexcept { this->m_deleter = deleter; } template <typename T> void LockFreeUnorderedSingleList<T>::PushFront(T* src) noexcept { auto *_p_cur_head = this->m_head.load(); auto * _p_new_node = new UnorderedSingleListNode<T>(src); _p_new_node->m_next = _p_cur_head; while (!this->m_head.compare_exchange_weak(_p_cur_head, _p_new_node)) _p_new_node->m_next = _p_cur_head; } template <typename T> void LockFreeUnorderedSingleList<T>::PurgeSingleList(uint32_t retain_num) noexcept { std::size_t _cur_num = 1; auto *_p_start_point = this->m_head.load(); while (_p_start_point != nullptr) { _p_start_point = _p_start_point->m_next.load(); _cur_num++; if (_cur_num >= retain_num) break; } if (_p_start_point == nullptr) return; auto *_p_cur = _p_start_point->m_next.load(); _p_start_point->m_next.store(nullptr); std::size_t _released_num = 0; while (_p_cur != nullptr) { auto *_p_next = _p_cur->m_next.load(); //Use the customizable deleter. this->m_deleter(_p_cur->m_data); _p_cur->m_data = nullptr; delete _p_cur; _p_cur = _p_next; _released_num++; } if (_released_num > 0) VLOG(89) << "released " << _released_num << " elements in singleList's garbage list."; } #ifdef _UNORDERED_SINGLE_LIST_TEST_ template <typename T> uint32_t LockFreeUnorderedSingleList<T>::Size() noexcept { uint32_t _size = 0; auto *_p_cur = this->m_head.load(); while (_p_cur != nullptr) { _p_cur = _p_cur->m_next; _size++; } return _size; } template <typename T> void LockFreeUnorderedSingleList<T>::Iterate(std::function<void(T*)> func) noexcept { auto *_p_cur = this->m_head.load(); while (_p_cur != nullptr) { auto *_p_next = _p_cur->m_next.load(); func(_p_cur->m_data); _p_cur = _p_next; } } #endif }
3,728
C++
.cc
102
32.372549
94
0.669539
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,254
trivial_lock_single_list.cc
ppLorins_aurora/src/tools/trivial_lock_single_list.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "tools/trivial_lock_single_list.h" namespace RaftCore::DataStructure { template <typename T> SingleListNode<T>::SingleListNode(const std::shared_ptr<T> &shp_val) noexcept{ static_assert(std::is_base_of<OrderedTypeBase<T>,T>::value,"template parameter of TrivialLockSingleList invalid"); this->m_val = shp_val; this->m_atomic_next.store(nullptr); } template <typename T> SingleListNode<T>::~SingleListNode() noexcept {} template <typename T> bool SingleListNode<T>::operator<(const SingleListNode& other) const noexcept { return *this->m_val < *other.m_val; } template <typename T> bool SingleListNode<T>::operator>(const SingleListNode& other) const noexcept { return *this->m_val > *other.m_val; } template <typename T> bool SingleListNode<T>::operator==(const SingleListNode& other) const noexcept { return *this->m_val == *other.m_val; } template <typename T> void SingleListNode<T>::Apply(SingleListNode<T>* phead, std::function<void(SingleListNode<T>*)> unary) noexcept { auto _p_cur = phead; while (_p_cur != nullptr) { //Note: unary may modify _p_cur's next pointer after its execution. auto _p_next = _p_cur->m_atomic_next.load(); unary(_p_cur); _p_cur = _p_next; } } template <typename T> TrivialLockSingleList<T>::TrivialLockSingleList(const std::shared_ptr<T> &p_min, const std::shared_ptr<T> &p_max) noexcept { static_assert(std::is_base_of<OrderedTypeBase<T>,T>::value,"template parameter of TrivialLockSingleList invalid"); this->m_head = new SingleListNode<T>(p_min); this->m_tail = new SingleListNode<T>(p_max); this->m_head->m_atomic_next.store(this->m_tail); } template <typename T> TrivialLockSingleList<T>::~TrivialLockSingleList() noexcept{ this->Clear(); } template <typename T> void TrivialLockSingleList<T>::Clear() noexcept { auto *_p_cur = this->m_head->m_atomic_next.load(); while (_p_cur != this->m_tail) { auto tmp = _p_cur; _p_cur = _p_cur->m_atomic_next.load(); delete tmp; } this->m_head->m_atomic_next.store(this->m_tail); } template <typename T> SingleListNode<T>* TrivialLockSingleList<T>::SetEmpty() noexcept { auto *_p_old = this->m_head->m_atomic_next.load(); while (!this->m_head->m_atomic_next.compare_exchange_weak(_p_old, this->m_tail)) continue; if (_p_old == this->m_tail) return nullptr; auto *_p_cur = _p_old; auto *_p_next = _p_cur->m_atomic_next.load(); while (_p_next != this->m_tail) { _p_cur = _p_next; _p_next = _p_next->m_atomic_next.load(); } _p_cur->m_atomic_next.store(nullptr); return _p_old; } template <typename T> void TrivialLockSingleList<T>::Insert(const std::shared_ptr<T> &p_one) noexcept { SingleListNode<T>* new_node = new SingleListNode<T>(p_one); this->Insert(new_node); } template <typename T> void TrivialLockSingleList<T>::Insert(SingleListNode<T>* new_node) noexcept { this->InsertTracker(new_node); } template <typename T> void TrivialLockSingleList<T>::InsertTracker(SingleListNode<T>* new_node) noexcept { ThreadIDWrapper<void> *_p_tracker = new ThreadIDWrapper<void>(std::this_thread::get_id()); bool _ownership_taken = false; if (this->m_p_insert_footprint->Upsert(_p_tracker, new_node)) _ownership_taken = true; while (!this->InsertRaw(new_node)) VLOG(89) << "-------redo InsertRaw!-----"; //Since _p_tracker already exist, the return value must be true. CHECK(!this->m_p_insert_footprint->Upsert(_p_tracker, nullptr)); if (!_ownership_taken) delete _p_tracker; } template <typename T> bool TrivialLockSingleList<T>::InsertRaw(SingleListNode<T>* new_node) noexcept { const auto &_p_one = new_node->m_val; auto *_p_pre = this->m_head; auto *_p_cur = _p_pre->m_atomic_next.load(); while (true) { //Reaching the end of the cut head list, need to start over again. if (_p_cur == nullptr) return false; if (_p_cur == this->m_tail) { new_node->m_atomic_next.store(this->m_tail); if (!_p_pre->m_atomic_next.compare_exchange_strong(_p_cur, new_node)) continue; break; } if (*_p_one == *_p_cur->m_val && !_p_cur->IsDeleted()) return true; if (*_p_one > *_p_cur->m_val) { _p_pre = _p_cur; _p_cur = _p_cur->m_atomic_next.load(); continue; } //Once get here, the new node should be inserted between _p_pre and _p_cur. new_node->m_atomic_next.store(_p_cur); if (!_p_pre->m_atomic_next.compare_exchange_strong(_p_cur, new_node)) continue; break; } return true; } template <typename T> bool TrivialLockSingleList<T>::Delete(const std::shared_ptr<T> &p_one) noexcept { auto *_p_node = new SingleListNode<T>(p_one); ThreadIDWrapper<void> *_p_tracker = new ThreadIDWrapper<void>(std::this_thread::get_id()); bool _ownership_taken = false; if (this->m_p_insert_footprint->Upsert(_p_tracker, _p_node)) _ownership_taken = true; //Default to that the element to be deleted not found. bool _ret_val = false; auto *_p_cur = this->m_head; while (_p_cur != this->m_tail) { if (*p_one != *_p_cur->m_val) { _p_cur = _p_cur->m_atomic_next.load(); continue; } _p_cur->SetDeleted(); _ret_val = true; break; } CHECK(!this->m_p_insert_footprint->Upsert(_p_tracker, nullptr)); if (!_ownership_taken) { delete _p_tracker; delete _p_node; } return _ret_val; } template <typename T> void TrivialLockSingleList<T>::SiftOutDeleted(SingleListNode<T>* &output_head) noexcept { auto *_to_remove = output_head; decltype(_to_remove) _p_pre = nullptr; while (_to_remove != nullptr) { bool _deleted = false; if (_to_remove->IsDeleted()) { auto _p_next = _to_remove->m_atomic_next.load(); if (_p_pre) _p_pre->m_atomic_next.store(_p_next); else output_head = _p_next; _deleted = true; } auto _tmp = _to_remove; _to_remove = _to_remove->m_atomic_next.load(); if (_deleted) delete _tmp; else _p_pre = _tmp; } } template <typename T> SingleListNode<T>* TrivialLockSingleList<T>::CutHead(std::function<bool(const T &one)> criteria) noexcept { /*Note: In the current design , there can be only one thread invoking this method.But it is allowed to have several other threads doing Insert at the mean time. */ std::unique_lock<std::recursive_mutex> _mutex_lock(this->m_recursive_mutex); auto *_p_pre = this->m_head; auto *_p_cur = _p_pre->m_atomic_next.load(); auto *_p_start = _p_cur; if (_p_cur == this->m_tail) return nullptr; //VLOG(89) << "debug double list cutting starts from:" << _p_cur->m_val->PrintMe(); while (true) { if (criteria(*_p_cur->m_val)) { _p_pre = _p_cur; _p_cur = _p_cur->m_atomic_next.load(); if (_p_cur != this->m_tail) continue; } //No nodes are available if (_p_cur == _p_start) return nullptr; //-----------Start cutting head-----------// if (!_p_pre->m_atomic_next.compare_exchange_strong(_p_cur, nullptr)) { _p_cur = _p_pre; continue; } //Cutting done,just break out. break; } /*Note: Once goes here, _p_pre is the latest item of the cut out list and _p_cur is the first item of the remaining list. */ //Detach first node auto _p_tmp = _p_start; while (!this->m_head->m_atomic_next.compare_exchange_strong(_p_tmp, _p_cur)) { decltype(_p_cur) _p_tmp_x = nullptr; CHECK(_p_pre->m_atomic_next.compare_exchange_strong(_p_tmp_x, _p_cur)); //VLOG(89) << "-------recursive cuthead occur!-----"; return this->CutHead(criteria); } auto *_output_head = _p_start; //VLOG(89) << "cut head waitdone"; this->WaitForListClean(_p_cur); //Now the list is cut off with the deleted elements. Need to erase the deleted elements this->SiftOutDeleted(_output_head); //VLOG(89) << "debug double list leave with something"; return _output_head; } template <typename T> SingleListNode<T>* TrivialLockSingleList<T>::CutHeadByValue(const T &val) noexcept { auto judge_smaller_equal = [&](const T &one) -> bool{ return one <= val; }; //VLOG(89) << "start cuthead less than:" << val.PrintMe(); return this->CutHead(judge_smaller_equal); } template <typename T> void TrivialLockSingleList<T>::ReleaseCutHead(SingleListNode<T>* output_head) noexcept { auto _p_cur = output_head; while (_p_cur != nullptr) { auto _p_next = _p_cur->m_atomic_next.load(); delete _p_cur; _p_cur = _p_next; } } template <typename T> void TrivialLockSingleList<T>::IterateCutHead(std::function<bool(std::shared_ptr<T> &)> accessor, SingleListNode<T>* output_head) const noexcept { auto _p_cur = output_head; while (_p_cur != nullptr) { auto _p_next = _p_cur->m_atomic_next.load(); accessor(_p_cur->m_val); _p_cur = _p_next; } } template <typename T> void TrivialLockSingleList<T>::Iterate(std::function<bool(std::shared_ptr<T> &)> accessor) const noexcept { auto *_cur = this->m_head->m_atomic_next.load(); while (_cur != nullptr) { if (_cur == this->m_tail) break; auto _p_tmp = _cur; _cur = _cur->m_atomic_next.load(); if (_p_tmp->IsDeleted()) continue; if (!accessor(_p_tmp->m_val)) break; } } template <typename T> bool TrivialLockSingleList<T>::Empty() const noexcept { return this->m_head->m_atomic_next.load() == this->m_tail; } #ifdef _SINGLE_LIST_TEST_ template <typename T> int TrivialLockSingleList<T>::GetSize() const noexcept { int _size = 0; auto *_cur = this->m_head->m_atomic_next.load(); while (_cur != this->m_tail) { if (!_cur->IsDeleted()) _size++; _cur = _cur->m_atomic_next.load(); } return _size; } template <typename T> SingleListNode<T>* TrivialLockSingleList<T>::GetHead() const noexcept { return this->m_head; } #endif }
11,299
C++
.cc
290
32.948276
146
0.625744
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,255
trivial_lock_double_list.cc
ppLorins_aurora/src/tools/trivial_lock_double_list.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "tools/trivial_lock_double_list.h" namespace RaftCore::DataStructure { template <typename T> DoubleListNode<T>::DoubleListNode(const std::shared_ptr<T> &p_val) noexcept{ static_assert(std::is_base_of<OrderedTypeBase<T>,T>::value,"template parameter of TrivialLockDoubleList invalid"); this->m_atomic_pre.store(nullptr); this->m_atomic_next.store(nullptr); this->m_val = p_val; } template <typename T> DoubleListNode<T>::~DoubleListNode() noexcept {} template <typename T> bool DoubleListNode<T>::operator<(const DoubleListNode& other) const noexcept { return *this->m_val < *other.m_val; } template <typename T> bool DoubleListNode<T>::operator>(const DoubleListNode& other) const noexcept { return *this->m_val > *other.m_val; } template <typename T> bool DoubleListNode<T>::operator==(const DoubleListNode& other) const noexcept { return *this->m_val == *other.m_val; } template <typename T> void DoubleListNode<T>::Apply(DoubleListNode<T>* phead, std::function<void(DoubleListNode<T>*)> unary) noexcept { auto _p_cur = phead; while (_p_cur != nullptr) { //Caution: unary may modify _p_cur's next pointer after its execution. auto _p_next = _p_cur->m_atomic_next.load(); unary(_p_cur); _p_cur = _p_next; } } template <typename T> TrivialLockDoubleList<T>::TrivialLockDoubleList(const std::shared_ptr<T> &p_min ,const std::shared_ptr<T> &p_max) noexcept{ static_assert(std::is_base_of<OrderedTypeBase<T>,T>::value,"template parameter of TrivialLockDoubleList invalid"); CHECK(p_min && p_max) << "TrivialLockDoubleList init fail"; /* There are at least two nodes in the double linked list: 1> the HEAD node with the minimum of T 2> the TAIL node with the maximum of T This is for conveniently inserting. */ //Init dump head node<T> this->m_head = new DoubleListNode<T>(p_min); //Init dump tail node<T> this->m_tail = new DoubleListNode<T>(p_max); //Join the two together,no need to lock this->m_head->m_atomic_next.store(this->m_tail); this->m_tail->m_atomic_pre.store(this->m_head); } template <typename T> TrivialLockDoubleList<T>::~TrivialLockDoubleList() noexcept{ this->Clear(); delete this->m_head; delete this->m_tail; } template <typename T> void TrivialLockDoubleList<T>::Clear() noexcept { //Reserve head & tail node. auto _p_cur = this->m_head->m_atomic_next.load(); while (_p_cur != this->m_tail) { auto tmp = _p_cur; _p_cur = _p_cur->m_atomic_next.load(); delete tmp; } this->m_head->m_atomic_next.store(this->m_tail); this->m_tail->m_atomic_pre.store(this->m_head); } template <typename T> void TrivialLockDoubleList<T>::Insert(const std::shared_ptr<T> &p_one) noexcept { DoubleListNode<T>* new_node = new DoubleListNode<T>(p_one); this->Insert(new_node); } template <typename T> void TrivialLockDoubleList<T>::Insert(DoubleListNode<T>* new_node) noexcept { this->InsertTracker(new_node); } template <typename T> void TrivialLockDoubleList<T>::InsertTracker(DoubleListNode<T>* new_node) noexcept { ThreadIDWrapper<void> *_p_tracker = new ThreadIDWrapper<void>(std::this_thread::get_id()); bool _ownership_taken = false; if (this->m_p_insert_footprint->Upsert(_p_tracker, new_node)) _ownership_taken = true; while (!this->InsertRaw(new_node)) VLOG(89) << "-------redo InsertRaw!-----"; //Since _p_tracker already exist, the return value must be true. CHECK(!this->m_p_insert_footprint->Upsert(_p_tracker, nullptr)); if (!_ownership_taken) delete _p_tracker; } template <typename T> bool TrivialLockDoubleList<T>::InsertRaw(DoubleListNode<T>* new_node) noexcept{ const auto &_p_one = new_node->m_val; //The pointer points to the node just after the current node being iterated auto _p_next = this->m_tail; //The pointer to the current node being iterated, initially they are both pointing to the tail auto _p_cur = _p_next->m_atomic_pre.load(); /* Note : The above two pointers are not necessarily being adjacent all the time. They may pointing to the same node in certain scenarios . */ while (true) { /*Note: Case where '_p_cur == nullptr' could happen: iterating reach the end of a cut head . Since _p_cur already points to the cut list, it need to start all over again. */ if (_p_cur == nullptr) return false; //Replace the old value with the new one in case of a partial comparison. if (*_p_one == *_p_cur->m_val && !_p_cur->IsDeleted()) { //TODO: delete then insert. //_p_cur->m_val = _p_one; return true; } /*Note: 1.Deleted elements will be treated like the normal(non-deleted) ones. Since maintaining order of the list without considering the deleted elements is just equivalent to considering them,thinking about inserting a new node after a deleted node with a value greater than it violating nothing on the correctness , but the latter form will introduce huge complexity. 2.For the case of inserting CAS fail and due to conflict with cutting head, the following judge will get satisfied AS BEFORE. And will trigger _p_cur==nullptr eventually, so it's safe to do a recursive insertion above. */ if ( *_p_one < *_p_cur->m_val ) { //Both moving toward to the head direction _p_next = _p_cur; _p_cur = _p_cur->m_atomic_pre.load(); continue; } //For the deleted elements,insert the equivalent one just after it as above says. //Alway assume the new node should be inserted between _p_cur and _p_next at the moment new_node->m_atomic_pre.store(_p_cur); new_node->m_atomic_next.store(_p_next); //Start inserting.... auto tmp_next = _p_next; // Copy it out first. This is very important !! if (!_p_cur->m_atomic_next.compare_exchange_strong(tmp_next, new_node)) { /*Collision happened. Other thread(s) have already modified the 'next' pointer of '_p_cur',we need to redo the inserting process.There are two scenarios where this could happen: 1> other thread(s) are inserting new node. 2> other thread(s) are cutting head. */ //Reset the conditions and start the inserting process from scratch all over again VLOG(89) << "insert_CAS_fail," << _p_cur << " insert_next_changefrom " << _p_next << " to " << tmp_next; _p_cur = _p_next; continue; } auto _p_tmp = _p_cur; bool _rst = _p_next->m_atomic_pre.compare_exchange_strong(_p_tmp, new_node); if(!_rst) CHECK(false) << "TrivialLockDoubleList<T>::Insert unexpected inserting status found," << "cannot CAS node's previous pointer,something terribly wrong happened." << " insert_CAS_fail ," << _p_next << " insert_pre_changefrom " << _p_cur << " to " << _p_tmp << ", head:" << this->m_head; //this->GetSize(); //Here the new node should be inserted properly,stop iterating break; } return true; } template <typename T> bool TrivialLockDoubleList<T>::Delete(const std::shared_ptr<T> &p_one) noexcept { //Use a stack memory to get around multiple thread allocation/deallocation issue. auto *_p_node = new DoubleListNode<T>(p_one); ThreadIDWrapper<void> *_p_tracker = new ThreadIDWrapper<void>(std::this_thread::get_id()); bool _ownership_taken = false; if (this->m_p_insert_footprint->Upsert(_p_tracker, _p_node)) _ownership_taken = true; //Default to that the element to be deleted not found. bool _ret_val = false; auto *_p_cur = this->m_tail->m_atomic_pre.load(); //Reach head of the [cutoff] list. while (_p_cur != nullptr && _p_cur != this->m_head) { if (*p_one != *_p_cur->m_val) { _p_cur = _p_cur->m_atomic_pre.load(); continue; } _p_cur->SetDeleted(); _ret_val = true; break; } CHECK(!this->m_p_insert_footprint->Upsert(_p_tracker, nullptr)); if (!_ownership_taken) { delete _p_tracker; delete _p_node; } return _ret_val; } template <typename T> void TrivialLockDoubleList<T>::DeleteAll() noexcept { //Reserve head & tail node. auto _p_cur = this->m_head->m_atomic_next.load(); while (_p_cur != this->m_tail) { _p_cur->SetDeleted(); _p_cur = _p_cur->m_atomic_next.load(); } } template <typename T> bool TrivialLockDoubleList<T>::MoveForward(DoubleListNode<T>* &p_pre,DoubleListNode<T>* &p_next) noexcept { p_pre = p_next; p_next = this->FindNextNonDelete(p_next); if (p_next == this->m_tail) return false; return true; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::ExpandForward(DoubleListNode<T>* p_cur) noexcept { if (p_cur == this->m_tail) return p_cur; auto *_p_pre = p_cur; auto *_p_x = _p_pre->m_atomic_next.load(); while (_p_x != this->m_tail) { if (!_p_x->IsDeleted()) break; if (_p_x->operator!=(*p_cur)) break; _p_pre = _p_x; _p_x = _p_pre->m_atomic_next.load(); } return _p_pre; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::ExpandBackward(DoubleListNode<T>* p_cur) noexcept { if (p_cur == this->m_head) return p_cur; auto *_p_pre = p_cur; auto *_p_x = _p_pre->m_atomic_pre.load(); while (_p_x != this->m_head) { if (!_p_x->IsDeleted()) break; if (_p_x->operator!=(*p_cur)) break; _p_pre = _p_x; _p_x = _p_pre->m_atomic_pre.load(); } return _p_pre; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::FindNextNonDelete(DoubleListNode<T>* p_cur) noexcept { if (p_cur == this->m_tail) return this->m_tail; auto * _p_next_non_deleted = p_cur->m_atomic_next.load(); while (_p_next_non_deleted != this->m_tail) { if (!_p_next_non_deleted->IsDeleted()) break; _p_next_non_deleted = _p_next_non_deleted->m_atomic_next.load(); } return _p_next_non_deleted; } template <typename T> void TrivialLockDoubleList<T>::SiftOutDeleted(DoubleListNode<T>* &output_head) noexcept { auto _to_remove = output_head; while (_to_remove != nullptr) { bool _deleted = false; if (_to_remove->IsDeleted()) { auto _p_pre = _to_remove->m_atomic_pre.load(); auto _p_next = _to_remove->m_atomic_next.load(); if (_p_pre) _p_pre->m_atomic_next.store(_p_next); else output_head = _p_next; if(_p_next) _p_next->m_atomic_pre.store(_p_pre); _deleted = true; } auto tmp = _to_remove; _to_remove = _to_remove->m_atomic_next.load(); if (_deleted) delete tmp; } } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::CutHead(std::function<bool(const T &left, const T &right)> criteria) noexcept { /*Note: In the current design , there can be only one thread invoking this method.But it is allowed to have several other threads doing Insert at the mean time. */ std::unique_lock<std::recursive_mutex> _mutex_lock(this->m_recursive_mutex); //VLOG(89) << "list_debug pos1"; auto *_p_cur = this->FindNextNonDelete(this->m_head); if (_p_cur == this->m_tail) { //VLOG(89) << "list_debug pos1.1"; return nullptr; } //VLOG(89) << "list_debug pos2"; //output_head always point the first element regardless of deleted or non-deleted. auto* _p_immediate_first = this->m_head->m_atomic_next.load(); auto *output_head = _p_immediate_first; //The first element will always be cut off. auto _p_next = this->FindNextNonDelete(_p_cur); while (true) { //VLOG(89) << "list_debug pos2.1"; if (criteria(*_p_cur->m_val, *_p_next->m_val)) { //VLOG(89) << "list_debug pos2.2"; if (this->MoveForward(_p_cur, _p_next)) { //VLOG(89) << "list_debug pos2.3"; continue; } } //VLOG(89) << "list_debug pos3"; //To get around the deleted elements, we need to expand '_p_cur' and '_p_next'. _p_cur = this->ExpandForward(_p_cur); _p_next = this->ExpandBackward(_p_next); //-----------Start cutting head-----------// /*This is the tricky part,need to consider simultaneously Inserting and CutHeading : If we set _p_cur->next to nullptr successfully, no other threads could insert new node between _p_cur and _p_next.This is the critical safety guarantee for other operations. */ auto _p_tmp = _p_next; if (!_p_cur->m_atomic_next.compare_exchange_strong(_p_tmp, nullptr)) { /*Strong CAS fail ,means that other thread(s) already made _p_next to point to the newly inserted node.What we need to do is just redo the iterating from current node. Also _p_next need to be updated to the newly inserted node,otherwise the criteria will be evaluated to false forever. */ VLOG(89) << "cuthead_CAS_fail," << _p_cur << " next_changefrom " << _p_next << " to " << _p_tmp; _p_next = _p_tmp; continue; } //Cutting done,just break out. break; } /*Note: Once goes here, _p_cur is the latest item of the cut out list and _p_next is the first item of the remaining list. */ //Connect the head with the first non adjacent node. auto _p_tmp = this->m_head->m_atomic_next.load(); while (!this->m_head->m_atomic_next.compare_exchange_strong(_p_tmp,_p_next)) { /*If reach here , mean new nodes already been inserted between m_head and the cutting head. To avoid cut off non adjacent nodes,the cutting head process need starting allover again. */ //Recovery _p_cur's next pointer,should never fail. decltype(_p_next) _p_tmp = nullptr; CHECK(_p_cur->m_atomic_next.compare_exchange_strong(_p_tmp, _p_next)); VLOG(89) << "-------recursive cuthead occur!-----"; return this->CutHead(criteria); } //VLOG(89) << "list_debug pos4"; _p_tmp = _p_cur; while (!_p_next->m_atomic_pre.compare_exchange_strong(_p_tmp, this->m_head)) { /* '_p_next->m_atomic_pre' may still points to the old place due to an uncomplete inserting process. What we need to do is just waiting for it pointing to the updated place where is exactly _p_cur. */ LOG(WARNING) << "cutting head tail->pre CAS_fail " << _p_next << " previous change from " << _p_cur << " to " << _p_tmp << ",head:" << this->m_head << ",tail:" << this->m_tail << ",do CAS again."; _p_tmp = _p_cur; continue; } //Detach first node output_head->m_atomic_pre.store(nullptr); //VLOG(89) << "list_debug pos5 " << output_head; //Waiting for all threads that are iterating in the cut off list to be finished. this->WaitForListClean(_p_cur); //VLOG(89) << "list_debug pos6 " << output_head; //auto *_tmp = output_head; //while (_tmp != nullptr) { // VLOG(89) << "list_debug pos6.1 " << _tmp->IsDeleted(); // _tmp = _tmp->m_atomic_next.load(); //} //Now the list is cut off with the deleted elements. Need to erase the deleted elements this->SiftOutDeleted(output_head); //VLOG(89) << "list_debug pos7 " << output_head; return output_head; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::CutHead(std::function<bool(const T &one)> criteria) noexcept { /*Note: In the current design , there can be only one thread invoking this method.But it is allowed to have several other threads doing Insert at the mean time. */ std::unique_lock<std::recursive_mutex> _mutex_lock(this->m_recursive_mutex); //VLOG(89) << "debug double list enter"; auto _p_cur = this->m_head->m_atomic_next.load(); if (_p_cur == this->m_tail) return nullptr; auto _p_pre = this->m_head; auto _p_start = _p_cur; auto output_head = _p_cur; while (true) { if (criteria(*_p_cur->m_val)) { _p_pre = _p_cur; _p_cur = _p_cur->m_atomic_next.load(); if (_p_cur != this->m_tail) continue; } //No nodes are available if (_p_cur == _p_start) return nullptr; //-----------Start cutting head-----------// /*This is the tricky part,need to consider simultaneously Inserting and CutHeading : If we set _p_cur->next to nullptr successfully, no other threads could insert new node between _p_pre and _p_cur.This is the critical safety guarantee for other operations. */ auto _p_tmp = _p_cur; if (!_p_pre->m_atomic_next.compare_exchange_strong(_p_tmp, nullptr)) { /*Strong CAS fail ,means that other thread(s) already made _p_cur to point to the newly inserted node.What we need to do is just redo the iterating from current node. Also _p_cur need to be updated to the newly inserted node,otherwise the criteria will be evaluated to false forever. */ _p_cur = _p_pre; continue; } //Cutting done,just break out. break; } /*Note: Once goes here, _p_pre is the latest item of the cut out list and _p_cur is the first item of the remaining list. */ //Detach first node auto _p_tmp = _p_start; while (!this->m_head->m_atomic_next.compare_exchange_strong(_p_tmp, _p_cur)) { decltype(_p_cur) _p_tmp = nullptr; CHECK(_p_pre->m_atomic_next.compare_exchange_strong(_p_tmp, _p_cur)); VLOG(89) << "-------recursive cuthead occur!-----"; return this->CutHead(criteria); } _p_tmp = _p_pre; while (!_p_cur->m_atomic_pre.compare_exchange_strong(_p_tmp, this->m_head)) { LOG(WARNING) << "cutting head tail->pre CAS_fail " << _p_cur << " previous change from " << _p_pre << " to " << _p_tmp << ",head:" << this->m_head << ",tail:" << this->m_tail << ",do CAS again."; _p_tmp = _p_pre; continue; } output_head->m_atomic_pre.store(nullptr); //Waiting for all threads that are iterating in the cut off list to be finished. this->WaitForListClean(_p_pre); //Now the list is cut off with the deleted elements. Need to erase the deleted elements this->SiftOutDeleted(output_head); //VLOG(89) << "debug double list leave"; return output_head; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::CutHeadByValue(const T &val) noexcept { auto judge_smaller_equal = [&](const T &one) -> bool{ return one <= val; }; return this->CutHead(judge_smaller_equal); } template <typename T> void TrivialLockDoubleList<T>::ReleaseCutHead(DoubleListNode<T>* output_head) noexcept { auto _p_cur = output_head; while (_p_cur != nullptr) { auto _p_next = _p_cur->m_atomic_next.load(); delete _p_cur; _p_cur = _p_next; } } template <typename T> void TrivialLockDoubleList<T>::IterateCutHead(std::function<bool(T &)> accessor, DoubleListNode<T>* output_head) const noexcept { auto _p_cur = output_head; while (_p_cur != nullptr) { auto _p_next = _p_cur->m_atomic_next.load(); accessor(*_p_cur->m_val); _p_cur = _p_next; } } template <typename T> void TrivialLockDoubleList<T>::Iterate(std::function<bool(T &)> accessor) const noexcept { auto _cur = this->m_head->m_atomic_next.load(); while (_cur) { if (_cur == this->m_tail) break; auto _p_tmp = _cur; _cur = _cur->m_atomic_next.load(); if (_p_tmp->IsDeleted()) continue; if (!accessor(*_p_tmp->m_val)) break; } } template <typename T> bool TrivialLockDoubleList<T>::Empty() const noexcept { return this->m_head->m_atomic_next.load() == this->m_tail; } #ifdef _TRIIAL_DOUBLE_LIST_TEST_ template <typename T> int TrivialLockDoubleList<T>::GetSize() const noexcept { int _size = 0; auto _cur = this->m_head; while (_cur) { if (!_cur->IsDeleted()) _size++; _cur = _cur->m_atomic_next.load(); } //Exclude head & tail. return _size - 2; } template <typename T> DoubleListNode<T>* TrivialLockDoubleList<T>::GetHead() const noexcept { return this->m_head; } #endif }
21,877
C++
.cc
492
37.361789
129
0.619561
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,256
trivial_lock_list_base.cc
ppLorins_aurora/src/tools/trivial_lock_list_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "config/config.h" #include "tools/trivial_lock_list_base.h" namespace RaftCore::DataStructure { template<typename T> ThreadIDWrapper<T>::ThreadIDWrapper(std::thread::id tid)noexcept { this->m_tid = tid; } template<typename T> ThreadIDWrapper<T>::~ThreadIDWrapper()noexcept {} template<typename T> bool ThreadIDWrapper<T>::operator<(const ThreadIDWrapper &other)const noexcept { return this->m_tid < other.m_tid; } template<typename T> bool ThreadIDWrapper<T>::operator==(const ThreadIDWrapper &other)const noexcept { return this->m_tid == other.m_tid; } //TODO:figure out why this compiles fail under VS2015 //template <typename T> //const TrivialLockSingleList<T>::ThreadIDWrapper& TrivialLockSingleList<T>::ThreadIDWrapper::operator=( // const ThreadIDWrapper &other)noexcept { // this->m_tid = other.m_tid; // return *this; //} template<typename T> std::size_t ThreadIDWrapper<T>::Hash()const noexcept { return std::hash<std::thread::id>{}(this->m_tid); } template<typename T> std::thread::id ThreadIDWrapper<T>::GetTid() const noexcept { return this->m_tid; } template<typename T> OperationTracker<T>::OperationTracker()noexcept { uint32_t _slot_num = ::RaftCore::Config::FLAGS_list_op_tracker_hash_slot_num; this->m_p_insert_footprint = new LockFreeHashAtomic<ThreadIDWrapper<void>, T>(_slot_num); static_assert(std::is_base_of<OrderedTypeBase<T>,T>::value,"template parameter of OperationTracker invalid"); } template<typename T> OperationTracker<T>::~OperationTracker()noexcept {} template <typename T> void OperationTracker<T>::WaitForListClean(T* output_tail) noexcept { /*Note: This is for waiting for unfinished writing insertions inside the cut off list to be finished. Thus can prevent from losing elements, but still cannot prevent from instantly freeing elements after cutting head, it still needs a delay free process. */ bool _finished = true; auto _checker = [&](const std::shared_ptr<ThreadIDWrapper<void>> &k, const std::shared_ptr<T> &v) ->bool { if (!v) return true; if (*v.get() > *output_tail) return true; _finished = false; return false; }; do { std::this_thread::yield(); _finished = true; this->m_p_insert_footprint->Iterate(_checker); } while (!_finished); } } //end namespace
3,185
C++
.cc
76
38.578947
113
0.723087
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,257
timer.cc
ppLorins_aurora/src/tools/timer.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <chrono> #include <thread> #include "common/comm_defs.h" #include "config/config.h" #include "tools/timer.h" namespace RaftCore::Timer { std::priority_queue<GlobalTimer::Task,std::deque<GlobalTimer::Task>,GlobalTimer::TaskCmp> GlobalTimer::m_heap; std::shared_timed_mutex GlobalTimer::m_share_timed_mutex; volatile GlobalTimer::ETimerThreadState GlobalTimer::m_thread_state = GlobalTimer::ETimerThreadState::INITIALIZED; using ::RaftCore::Common::WriteLock; using ::RaftCore::Common::ReadLock; bool GlobalTimer::TaskCmp::operator()(const Task &x, const Task &y) { return x.m_next_run > y.m_next_run; } void GlobalTimer::Task::operator=(const GlobalTimer::Task &one) { this->m_next_run = one.m_next_run; this->m_interval_ms = one.m_interval_ms; this->m_processor = one.m_processor; } void GlobalTimer::Initialize() noexcept { m_thread_state = ETimerThreadState::RUNNING; std::thread _t([&]() { ThreadEntrance(); }); _t.detach(); } void GlobalTimer::UnInitialize() noexcept { Stop(); WriteLock _w_lock(m_share_timed_mutex); while (!m_heap.empty()) m_heap.pop(); } void GlobalTimer::AddTask(uint32_t interval_ms, std::function<bool()> processor) noexcept{ //interval_ms==0 mean one shot, should be supported. if (interval_ms > 0) CHECK(interval_ms >= ::RaftCore::Config::FLAGS_timer_precision_ms) << "timer precision not enough :" << ::RaftCore::Config::FLAGS_timer_precision_ms << "|" << interval_ms; uint64_t _now = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); WriteLock _w_lock(m_share_timed_mutex); m_heap.emplace(_now + interval_ms,interval_ms, processor); } void GlobalTimer::ThreadEntrance() noexcept { LOG(INFO) << "Global timer thread started."; while (true) { std::this_thread::sleep_for(std::chrono::milliseconds(::RaftCore::Config::FLAGS_timer_precision_ms)); if (m_thread_state == ETimerThreadState::STOPPING) break; uint64_t _now = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); WriteLock _w_lock(m_share_timed_mutex); while (!m_heap.empty()) { /*Avoiding one single task takes too much time thus timing out all the subsequent tasks. Falling into a deal loop. */ if (m_thread_state == ETimerThreadState::STOPPING) break; const auto &_task = m_heap.top(); if (_task.m_next_run > _now) break; //Run the task.Returned value indicating whether that task wants to be executed next time. if (_task.m_processor()) { //Push the next round of execution of task. auto _new = _task; _new.m_next_run = _now + _task.m_interval_ms; //emplace() will causes issues under MSVC 2015.details see : https://gist.github.com/ppLorins/09de033a4b0748d883c8bf8fe12b7703 //m_heap.emplace(_now + _task.m_interval_ms, _task.m_interval_ms, _task.m_processor); m_heap.push(_new); } m_heap.pop(); } } m_thread_state = ETimerThreadState::STOPPED; LOG(INFO) << "Global timer thread exited."; } void GlobalTimer::Stop() noexcept { m_thread_state = ETimerThreadState::STOPPING; while (m_thread_state != ETimerThreadState::STOPPED) std::this_thread::sleep_for(std::chrono::microseconds(::RaftCore::Config::FLAGS_thread_stop_waiting_us)); } }
4,413
C++
.cc
90
42.888889
142
0.670399
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,258
lock_free_hash.cc
ppLorins_aurora/src/tools/lock_free_hash.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <functional> #include <unordered_set> #include "common/comm_defs.h" #include "tools/utilities.h" #include "config/config.h" #include "tools/lock_free_hash.h" namespace RaftCore::DataStructure { template <typename T> bool HashTypeBase<T>::IsDeleted() const noexcept { return this->m_deleted; } template <typename T> void HashTypeBase<T>::SetDeleted() const noexcept { this->m_deleted = true; } template <typename T> void HashTypeBase<T>::SetValid() const noexcept{ this->m_deleted = false; } template <typename T,typename R> HashNode<T,R>::HashNode(const std::shared_ptr<T> &key,const std::shared_ptr<R> &val) noexcept{ this->m_shp_key = key; this->m_shp_val = val; this->m_next = nullptr; } /* template <typename T,typename R> void HashNode<T, R>::Update(const std::shared_ptr<R> &val) noexcept { this->m_shp_val = val; }*/ template <typename T,typename R> HashNode<T, R>::~HashNode() noexcept {} /* template <typename T,typename R> void HashNode<T, R>::Update(const std::shared_ptr<R> &val) noexcept { //this->m_mutex.lock(); this->m_shp_val = val; //this->m_mutex.unlock(); }*/ template <typename T,typename R> HashNode<T, R>* HashNode<T, R>::GetNext() const noexcept { return this->m_next; } template <typename T,typename R> void HashNode<T,R>::SetNext(const HashNode<T,R> * const p_next) noexcept{ this->m_next = const_cast<HashNode<T,R>*>(p_next); } template <typename T,typename R> void HashNode<T,R>::ModifyKey(std::function<void(std::shared_ptr<T>&)> op) noexcept { op(this->m_shp_key); } template <typename T,typename R> bool HashNode<T,R>::IsDeleted() const noexcept { return this->m_shp_key->IsDeleted(); } template <typename T,typename R> void HashNode<T,R>::SetDeleted() const noexcept { this->m_shp_key->SetDeleted(); } template <typename T,typename R> void HashNode<T, R>::SetValid() const noexcept { this->m_shp_key->SetValid(); } template <typename T,typename R> void HashNode<T, R>::SetTag(uint32_t tag) noexcept { this->m_iterating_tag = tag; } template <typename T,typename R> uint32_t HashNode<T, R>::GetTag() const noexcept { return this->m_iterating_tag; } template <typename T,typename R> void HashNode<T, R>::LockValue() noexcept {} template <typename T,typename R> void HashNode<T, R>::UnLockValue() noexcept {} template <typename T,typename R> std::shared_ptr<T> HashNode<T,R>::GetKey() const noexcept { return this->m_shp_key; } template <typename T,typename R> std::size_t HashNode<T, R>::GetKeyHash() const noexcept { return this->m_shp_key->Hash(); } template <typename T,typename R> std::shared_ptr<R> HashNode<T,R>::GetVal() const noexcept { return this->m_shp_val; } template <typename T,typename R> bool HashNode<T,R>::operator==(const HashNode<T,R>& one) const noexcept { return (*this->m_shp_key) == *one.m_shp_key; } template <typename T,typename R> bool HashNode<T,R>::operator==(const T& one) const noexcept { return (*this->m_shp_key) == one; } template <typename T,typename R,template<typename,typename> typename NodeType> LockFreeHash<T, R, NodeType>::LockFreeHash(uint32_t slot_num) noexcept { static_assert(std::is_base_of<HashTypeBase<T>,T>::value,"template parameter of LockFreeHash invalid"); int _one_slot_size = sizeof(void*) + sizeof(NodeType<T, R>) + sizeof(std::atomic<NodeType<T, R>*>) + sizeof(T) + ::RaftCore::Tools::SizeOfX<R>(); if (slot_num == 0) slot_num = ::RaftCore::Tools::RoundUp(::RaftCore::Config::FLAGS_binlog_meta_hash_buf_size * 1024 * 1024 / _one_slot_size); this->m_slots_num = slot_num; this->m_slots_mask = ::RaftCore::Tools::GetMask(this->m_slots_num); int buf_size = this->m_slots_num * sizeof(void*); this->m_solts = (std::atomic<NodeType<T,R>*> **)std::malloc(buf_size); for (std::size_t i = 0; i < this->m_slots_num; ++i) this->m_solts[i] = new std::atomic<NodeType<T,R>*>(nullptr); this->m_size.store(0); } template <typename T,typename R,template<typename,typename> typename NodeType> LockFreeHash<T, R, NodeType>::~LockFreeHash() noexcept { this->Clear(true); std::free(this->m_solts); } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::Clear(bool destruct) noexcept { assert(this->m_solts != nullptr); //int _tmp_1 = 0; for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T,R>*> *_p_atomic = this->m_solts[i] ; NodeType<T,R>* _p_cur = _p_atomic->load(); while (_p_cur != nullptr) { auto *_tmp = _p_cur; _p_cur = _p_cur->GetNext(); delete _tmp; //_tmp_1++; } if (destruct) delete _p_atomic; else _p_atomic->store(nullptr); } //VLOG(89) << "hash released " << _tmp_1 << " inserted elements"; this->m_size.store(0); } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::Insert(const std::shared_ptr<T> &key, const std::shared_ptr<R> &val, uint32_t tag) noexcept { std::size_t hash_val = key->Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<NodeType<T,R>*> * p_atomic = this->m_solts[idx]; NodeType<T,R>* p_cur = p_atomic->load(); bool _key_exist = false; while (p_cur != nullptr) { if (!p_cur->operator==(*key)) { p_cur = p_cur->GetNext(); //move next continue; } _key_exist = true; break; } NodeType<T,R>* p_old_head = p_atomic->load(); NodeType<T,R>* p_new_node = new NodeType<T,R>(key,val); p_new_node->SetNext(p_old_head); p_new_node->SetTag(tag); /*"When a compare-and-exchange is in a loop, the weak version will yield better performance on some platforms" from https://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange. Moreover , whether specify the following memory order or not influencing little on overall performance under my test. */ while (!p_atomic->compare_exchange_weak(p_old_head, p_new_node, std::memory_order_acq_rel, std::memory_order_acquire)) p_new_node->SetNext(p_old_head); /*Set the first existing key to be deleted , ensuring the key can be inserted at head. There is a time slice during which iterating could read redundant elements.This is avoided by recording what has been read when traversing. */ if (!_key_exist) this->m_size.fetch_add(1); else p_cur->SetDeleted(); } template <typename T,typename R,template<typename,typename> typename NodeType> bool LockFreeHash<T, R, NodeType>::Find(const T &key) const noexcept { std::size_t hash_val = key.Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<NodeType<T,R>*> * p_atomic = this->m_solts[idx]; NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { if (p_cur->operator==(key) && !p_cur->IsDeleted()) return true; //move next p_cur = p_cur->GetNext(); } return false; } /* template <typename T,typename R,template<typename,typename> typename NodeType> bool LockFreeHash<T, R, NodeType>::Upsert(const T *key, const std::shared_ptr<R> val) noexcept { std::size_t hash_val = key->Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<NodeType<T,R>*> * p_atomic = this->m_solts[idx]; NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { if (p_cur->operator==(*key) && !p_cur->IsDeleted()) { p_cur->Update(val); return false; } //move next p_cur = p_cur->GetNext(); } std::shared_ptr<T> _shp_key(const_cast<T*>(key)); this->Insert(_shp_key, val); return true; }*/ template <typename T,typename R,template<typename,typename> typename NodeType> uint32_t LockFreeHash<T, R, NodeType>::Size() const noexcept { return this->m_size.load(); } template <typename T,typename R,template<typename,typename> typename NodeType> bool LockFreeHash<T, R, NodeType>::Read(const T &key, std::shared_ptr<R> &val) const noexcept { std::size_t hash_val = key.Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<NodeType<T,R>*> * p_atomic = this->m_solts[idx]; NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { bool _found = p_cur->operator==(key) && !p_cur->IsDeleted(); if (!_found) { p_cur = p_cur->GetNext(); continue; } val = p_cur->GetVal(); return true; } return false; } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::Delete(const T &key) noexcept { std::size_t hash_val = key.Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<NodeType<T,R>*> * p_atomic = this->m_solts[idx]; NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { if (p_cur->operator==(key) && !p_cur->IsDeleted()) { p_cur->SetDeleted(); this->m_size.fetch_sub(1); return ; } //move next p_cur = p_cur->GetNext(); } } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::GetOrderedByKey(std::list<std::shared_ptr<T>> &_output) const noexcept { _output.clear(); std::set<std::shared_ptr<T>,MyComparator> _rb_tree; for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T,R>*> *p_atomic = this->m_solts[i] ; const NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { auto _p_tmp = p_cur; p_cur = p_cur->GetNext(); if(_p_tmp->IsDeleted()) continue; //Avoid reading redundant keys. if (_rb_tree.find(_p_tmp->GetKey()) == _rb_tree.cend()) _rb_tree.emplace(_p_tmp->GetKey()); } } for (auto iter = _rb_tree.begin(); iter != _rb_tree.end(); iter++) _output.emplace_back(*iter); } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::GetOrderedByValue(std::map<std::shared_ptr<R>, std::shared_ptr<T>, ValueComparator> &_output) const noexcept { std::unordered_set<std::shared_ptr<T>,MyHasher,MyEqualer> _traversed; for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T,R>*> *p_atomic = this->m_solts[i] ; const NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { auto _p_tmp = p_cur; p_cur = p_cur->GetNext(); if(_p_tmp->IsDeleted()) continue; auto _shp_key = _p_tmp->GetKey(); if (_traversed.find(_shp_key) != _traversed.cend()) continue; _output.emplace(_p_tmp->GetVal(), _shp_key); _traversed.emplace(_shp_key); } } } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::Map(std::function<void(std::shared_ptr<T>&)> op) noexcept { uint32_t _cur_tag = ::RaftCore::Tools::GenerateRandom(1, _MAX_UINT32_); for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T, R>*> *p_atomic = this->m_solts[i]; NodeType<T, R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { auto _p_tmp = p_cur; p_cur = p_cur->GetNext(); //If already been iterated or deleted,jump over. if (_p_tmp->GetTag() == _cur_tag || _p_tmp->IsDeleted()) continue; std::size_t _old_hash_val = _p_tmp->GetKeyHash(); //Do modifying. _p_tmp->ModifyKey(op); std::size_t _new_hash_val = _p_tmp->GetKeyHash(); if (_old_hash_val == _new_hash_val) continue; //Copy the object out first. std::shared_ptr<T> _shp_new_key = std::make_shared<T>(*_p_tmp->GetKey()); //Set the original object to be deleted. _p_tmp->SetDeleted(); //Insert the new element with a new tag. this->Insert(_shp_new_key,_p_tmp->GetVal(),_cur_tag); } } } template <typename T,typename R,template<typename,typename> typename NodeType> bool LockFreeHash<T, R, NodeType>::CheckCond(std::function<bool(const T &key)> criteria) const noexcept { std::unordered_set<std::shared_ptr<T>,MyHasher,MyEqualer> _traversed; for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T,R>*> *p_atomic = this->m_solts[i] ; const NodeType<T,R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { auto _p_tmp = p_cur; p_cur = p_cur->GetNext(); if(_p_tmp->IsDeleted()) continue; auto _shp_key = _p_tmp->GetKey(); if (_traversed.find(_shp_key) != _traversed.cend()) continue; if (!criteria(*_shp_key)) return false; _traversed.emplace(_shp_key); } } return true; } template <typename T,typename R,template<typename,typename> typename NodeType> void LockFreeHash<T, R, NodeType>::Iterate(std::function<bool(const std::shared_ptr<T> &k, const std::shared_ptr<R> &v)> op) noexcept { std::unordered_set<std::shared_ptr<T>,MyHasher,MyEqualer> _traversed; for (std::size_t i = 0; i < this->m_slots_num; ++i) { std::atomic<NodeType<T, R>*> *p_atomic = this->m_solts[i]; NodeType<T, R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { auto *_p_tmp = p_cur; p_cur = p_cur->GetNext(); //If already been iterated or deleted,jump over. if (_p_tmp->IsDeleted()) continue; auto _shp_key = _p_tmp->GetKey(); if (_traversed.find(_shp_key) != _traversed.cend()) continue; _p_tmp->LockValue(); bool _rst = op(_shp_key, _p_tmp->GetVal()); _p_tmp->UnLockValue(); if (!_rst) return; _traversed.emplace(_shp_key); } } } }
15,201
C++
.cc
364
35.478022
145
0.620575
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,259
lock_free_queue.cc
ppLorins_aurora/src/tools/lock_free_queue.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/error_code.h" #include "tools/utilities.h" #include "tools/lock_free_queue.h" namespace RaftCore::DataStructure { template <typename T> const char* QueueNode<T>::m_status_macro_names[] = {"SLOT_EMPTY","SLOT_PRODUCING","SLOT_PRODUCED","SLOT_CONSUMING"}; template <typename T> QueueNode<T>::QueueNode() noexcept { this->m_state.store(SlotState::SLOT_EMPTY); } template <typename T> QueueNode<T>::~QueueNode() noexcept {} template <typename T> LockFreeQueue<T>::LockFreeQueue() noexcept{} template <typename T> void LockFreeQueue<T>::Initilize(TypeCallBackFunc fn_cb,int queue_size) noexcept{ this->m_element_size = ::RaftCore::Tools::RoundUp(queue_size); this->m_element_mask = ::RaftCore::Tools::GetMask(this->m_element_size); this->m_data = new QueueNode<T>[this->m_element_size]; this->m_fn_cb = fn_cb; this->m_head.store(0); this->m_tail.store(0); } template <typename T> LockFreeQueue<T>::~LockFreeQueue() noexcept{ delete []this->m_data; } template <typename T> uint32_t LockFreeQueue<T>::GetSize() const noexcept { uint32_t _cur_head = this->m_head.load(); uint32_t _cur_tail = this->m_tail.load(); uint32_t _size = 0; while (_cur_tail != _cur_head) { _cur_tail = (_cur_tail + 1) & this->m_element_mask; _size++; } return _size; } template <typename T> uint32_t LockFreeQueue<T>::GetCapacity() const noexcept { return this->m_element_size; } template <typename T> bool LockFreeQueue<T>::Empty() const noexcept { return this->m_head.load() == this->m_tail.load(); } template <typename T> int LockFreeQueue<T>::PopConsume() noexcept { std::shared_ptr<T> _p_item; int n_rst = this->Pop(_p_item); if (n_rst!=QUEUE_SUCC) { if (n_rst!=QUEUE_EMPTY) LOG(ERROR) << "Consumer : Pop failed,returned Val:" << n_rst; return n_rst; } if (!this->m_fn_cb(_p_item)) LOG(ERROR) << "Consumer : Process entry failed"; return QUEUE_SUCC; } template <typename T> int LockFreeQueue<T>::Pop(std::shared_ptr<T> &ptr_element) noexcept { uint32_t _cur_tail = this->m_tail.load(); //Queue empty if (_cur_tail == this->m_head.load()) return QUEUE_EMPTY; uint32_t next = (_cur_tail + 1) & this->m_element_mask; //Get the position where to consume. /* Note:compare_exchange_weak are allowed to fail spuriously, which is, act as if *this != expected even if they are equal. Meaning when compare_exchange_weak return false: 1> *this != expected and they are actually not equal, no problems. 2> *this != expected but they are actually equal: 1) _cur_tail will be replaced with _cur_tail itself, it doesn't change. 2) next will be re-calculated , it also doesn't change. In a word,nothing wrong would happened under spuriously fail. */ while (!this->m_tail.compare_exchange_weak(_cur_tail, next)) { //Threads can go over the produced range, but it is not an error. if (_cur_tail == this->m_head.load()) { VLOG(89) << "consuming,found slot is not produced,at position:" << _cur_tail << ",probably due to empty" ; return QUEUE_EMPTY; } next = (_cur_tail + 1) & this->m_element_mask; } //Only one thread are allowed to operate on the element at index 'next' SlotState slot_state = SlotState::SLOT_PRODUCED; while (!this->m_data[next].m_state.compare_exchange_weak(slot_state, SlotState::SLOT_CONSUMING)) { /* slot_state == SlotState::SLOT_PRODUCED || //CAS spurious fail. slot_state == SlotState::SLOT_EMPTY || //Producer is producing ,try again. slot_state == SlotState::SLOT_PRODUCING; //Producer is producing ,try again. slot_state == SlotState::SLOT_CONSUMING; //Overlapping consuming occurred. */ slot_state = SlotState::SLOT_PRODUCED; } //Consuming ptr_element = this->m_data[next].m_val; this->m_data[next].m_val.reset(); //Release the ownership //Update slot state slot_state = SlotState::SLOT_CONSUMING; CHECK(this->m_data[next].m_state.compare_exchange_strong(slot_state,SlotState::SLOT_EMPTY)) << "cannot update state from consuming to empty at position:" << next << ",detected state:" << QueueNode<T>::MacroToString(slot_state) << ",something is terribly wrong" ; return QUEUE_SUCC; } template <typename T> int LockFreeQueue<T>::Push(void* ptr_shp_element) noexcept { uint32_t _cur_head = this->m_head.load(); uint32_t next = (_cur_head + 1) & this->m_element_mask; //Check the validity of position 'next' if (next == this->m_tail.load()) return QUEUE_FULL; //Get the position where to produce. /*Note : spuriously fail of compare_exchange_weak is acceptable, since when it happened,_cur_head would remain the same as what is is before calling this function, and will go to next round of execution.As explained above.*/ while (!this->m_head.compare_exchange_weak(_cur_head, next)) { next = (_cur_head + 1) & this->m_element_mask; if (next == this->m_tail.load()) { LOG(WARNING) << "producing,found slot is not empty,at position:" << next << ",probably due to full" ; return QUEUE_FULL; } } //Which is guaranteed here is that only one thread will be allowed to operate on the element at index 'next'. SlotState slot_state = SlotState::SLOT_EMPTY; while(!this->m_data[next].m_state.compare_exchange_weak(slot_state, SlotState::SLOT_PRODUCING)) { /* slot_state == SlotState::SLOT_EMPTY || //CAS spurious fail. slot_state == SlotState::SLOT_PRODUCED || //Consumer is consuming this node,try again. slot_state == SlotState::SLOT_CONSUMING || //Consumer is consuming this node,try again. slot_state == SlotState::SLOT_PRODUCING ; //Overlapping producing occurred. */ slot_state = SlotState::SLOT_EMPTY; continue; } //Producing this->m_data[next].m_val = *((std::shared_ptr<T>*)ptr_shp_element); //Update slot state slot_state = SlotState::SLOT_PRODUCING; CHECK(this->m_data[next].m_state.compare_exchange_strong(slot_state, SlotState::SLOT_PRODUCED)) << "cannot update state from producing to produced at position:" << next << ",detected state:" << QueueNode<T>::MacroToString(slot_state) << ",something is terribly wrong" ; return QUEUE_SUCC; } }
7,281
C++
.cc
154
42.019481
165
0.670244
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,260
utilities.cc
ppLorins_aurora/src/tools/utilities.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "boost/asio.hpp" #include "tools/utilities.h" #define _FOUR_BYTES_BITS_ (32) namespace RaftCore::Tools { void GetLocalIPs(std::list<std::string>& ips){ boost::asio::io_service io_service; boost::asio::ip::tcp::resolver resolver(io_service); std::string h = boost::asio::ip::host_name(); std::for_each(resolver.resolve({ h, "" }), {}, [&](const auto& re) { ips.emplace_back(re.endpoint().address().to_string()); }); //Forcibly add loop back address due to it may not appear under certain platforms. const static std::string _loop_back_addr = "127.0.0.1"; if (std::find(ips.cbegin(), ips.cend(), _loop_back_addr) == ips.cend()) ips.emplace_back(_loop_back_addr); } uint32_t RoundUp(uint32_t num) { const static uint32_t mask = 0x80000000; uint32_t tmp = num; uint32_t ret = 0x80000000; for (int i=0; i < _FOUR_BYTES_BITS_; ++i) { if (tmp & mask) break; tmp <<= 1; ret >>= 1; } int _shift = (tmp & ~mask)? 1 : 0 ; return ret << _shift; } uint32_t GetMask(uint32_t num) { uint32_t tmp = num; uint32_t mask = 0x80000000; int _counter = 1; for (int i=0; i < _FOUR_BYTES_BITS_; ++i) { if (tmp & mask) break; tmp <<= 1; _counter++; } uint32_t ret = 0x1; for (int i = 0; i < _FOUR_BYTES_BITS_ - _counter - 1; ++i) { ret <<= 1; ret++; } return ret; } TypeTimePoint StartTimeing() { return std::chrono::steady_clock::now(); } uint64_t EndTiming(const TypeTimePoint &tp_start, const char* operation_name, const LogIdentifier *p_cur_id) { auto _now = std::chrono::steady_clock::now(); std::chrono::microseconds _us = std::chrono::duration_cast<std::chrono::microseconds>(_now - tp_start); uint64_t _cost_us = _us.count(); if (p_cur_id == nullptr) VLOG(88) << operation_name << " cost us:" << _cost_us; else VLOG(88) << operation_name << " cost us:" << _cost_us << " ,idx:" << *p_cur_id; return _cost_us; } void StringSplit(const std::string &input, char delimiter, std::set<std::string> &output) { std::list<std::string> _output; StringSplit(input, delimiter, _output); for (const auto &_item : _output) output.emplace(_item); } void StringSplit(const std::string &input, char delimiter, std::list<std::string> &output) { std::size_t _pos = 0, _cur_pos=0; while ((_cur_pos = input.find(delimiter, _pos)) != std::string::npos) { std::size_t _len = _cur_pos - _pos ; if (_len > 0) output.emplace_back(input.substr(_pos, _len)); _pos = _cur_pos; if (++_pos >= input.length()) break; } if (_pos < input.length()) output.emplace_back(input.substr(_pos)); } std::string TimePointToString(const TypeSysTimePoint &tp){ //Get seconds. char _buf[128]; std::time_t _t = std::chrono::system_clock::to_time_t(tp); std::tm * _ptm = std::localtime(&_t); std::strftime(_buf, 32, "%Y.%m.%d %a, %H:%M:%S", _ptm); //Get milliseconds. char _result[128]; std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch()); std::snprintf(_result,sizeof(_result),"%s.%llu",_buf,ms.count() % 1000); return std::string(_result); } uint32_t GenerateRandom(uint32_t from, uint32_t to) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<unsigned long> dis(from,to); return dis(gen); } }
4,367
C++
.cc
112
34.044643
112
0.628531
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,261
lock_free_deque.cc
ppLorins_aurora/src/tools/lock_free_deque.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "tools/lock_free_deque.h" namespace RaftCore::DataStructure { template <typename T> LockFreeUnorderedSingleList<DequeNode<T>> LockFreeDeque<T>::m_garbage; template <typename T> DequeNode<T>::DequeNode() noexcept{} template <typename T> DequeNode<T>::DequeNode(const std::shared_ptr<T> &p_val) noexcept{ this->m_atomic_next.store(nullptr); this->m_val = p_val; //This is an estimated value for security. //TODO:why this can't compile. //::RaftCore::Config::FLAGS_garbage_deque_retain_num = CommonView::m_cpu_cores; } template <typename T> DequeNode<T>::~DequeNode() noexcept {} template <typename T> LockFreeDeque<T>::LockFreeDeque() noexcept{ //The dummy node points to itself. this->m_dummy = new DequeNode<T>(); this->m_dummy->m_atomic_next.store(this->m_dummy); //Head and tail are initially points the dummy node which indicating the entire deque is empty. this->m_head.store(this->m_dummy); this->m_tail.store(this->m_dummy); #ifdef _DEQUE_TEST_ this->m_logical_size.store(0); this->m_physical_size.store(0); #endif } template <typename T> LockFreeDeque<T>::~LockFreeDeque() noexcept{ auto *_p_cur = this->m_head.load()->m_atomic_next.load(); while (_p_cur != this->m_dummy) { auto _p_tmp = _p_cur->m_atomic_next.load(); delete _p_cur; _p_cur = _p_tmp; } delete this->m_dummy; } template <typename T> void LockFreeDeque<T>::Push(const std::shared_ptr<T> &p_one, EDequeNodeFlag flag) noexcept { //Node node points to dummy. auto* _p_new_node = new DequeNode<T>(p_one); _p_new_node->m_atomic_next.store(this->m_dummy); _p_new_node->m_flag = flag; auto* _p_insert_after = this->m_tail.load(); std::atomic<DequeNode<T>*> *_p_insert_pos = &_p_insert_after->m_atomic_next; auto* _p_tmp = this->m_dummy; /*Note: 1. 'compare_exchange_weak' is a better approach for performance, but for now, it is acceptable to use 'compare_exchange_strong' making code simpler and being more readable. 2. _p_insert_after may has been freed if the free operation didn't get deferred ,so the reference to _p_insert_pos->compare_exchange_strong will core in that scene. */ while (!_p_insert_pos->compare_exchange_strong(_p_tmp, _p_new_node)) { _p_insert_after = _p_tmp; _p_insert_pos = &_p_insert_after->m_atomic_next; /*Insert operation must append the '_p_new_node' at the end of the deque. So '_p_tmp' must be set to 'this->m_dummy' each time compare_exchange_strong fails. */ _p_tmp = this->m_dummy; } auto * _p_from = _p_insert_after; while (!this->m_tail.compare_exchange_weak(_p_from, _p_new_node)) _p_from = _p_insert_after; #ifdef _DEQUE_TEST_ if (flag == EDequeNodeFlag::NO_COUNTING) return; if (flag == EDequeNodeFlag::NORMAL) this->m_logical_size.fetch_add(1); this->m_physical_size.fetch_add(1); #endif } template <typename T> std::shared_ptr<T> LockFreeDeque<T>::Pop() noexcept { while (true) { auto *_deque_node = this->PopNode(); if (_deque_node == nullptr) return std::shared_ptr<T>(); //Encountering a 'fake node'. if (_deque_node->m_flag == EDequeNodeFlag::FAKE_NODE) continue; auto _transfer = _deque_node->m_val; //Once the ownership has been copied out, the node itself will release it. _deque_node->m_val.reset(); return _transfer; } } template <typename T> DequeNode<T>* LockFreeDeque<T>::PopNode() noexcept { std::atomic<DequeNode<T>*> *_p_head_next = &this->m_head.load()->m_atomic_next; //Judge if list is empty auto *_p_cur = _p_head_next->load(); if (_p_cur == this->m_dummy) return _p_cur; auto *_p_cur_next = _p_cur->m_atomic_next.load(); while (true) { //If '_p_cur' is the last node at the moment. if (_p_cur_next == this->m_dummy) { //If '_p_cur' is a 'fake-node'. if (_p_cur->m_flag == EDequeNodeFlag::FAKE_NODE) return nullptr; //'_p_cur' isn't a 'fake-node'. Push a 'fake-node' first. this->Push(std::shared_ptr<T>(), EDequeNodeFlag::FAKE_NODE); //'_p_cur' next pointer changed, update it. _p_cur_next = _p_cur->m_atomic_next.load(); } /* _p_cur may has been freed if freeing process didn't get deferred , so the reference to _p_cur->m_atomic_next.load() will core in that scene. */ if (!_p_head_next->compare_exchange_strong(_p_cur, _p_cur_next)) { //Now, '_p_cur' is the next node about to be popped. _p_cur_next = _p_cur->m_atomic_next.load(); continue; } break; } m_garbage.PushFront(_p_cur); #ifdef _DEQUE_TEST_ if (_p_cur->m_flag == EDequeNodeFlag::NO_COUNTING) return _p_cur; if (_p_cur->m_flag == EDequeNodeFlag::NORMAL) this->m_logical_size.fetch_sub(1); this->m_physical_size.fetch_sub(1); #endif //Always return the ptr of the node successfully popped, regardless what the role it is. return _p_cur; } template <typename T> void LockFreeDeque<T>::GC() noexcept { m_garbage.PurgeSingleList(::RaftCore::Config::FLAGS_garbage_deque_retain_num); } #ifdef _DEQUE_TEST_ template <typename T> std::size_t LockFreeDeque<T>::GetLogicalSize() const noexcept { return this->m_logical_size.load(); } template <typename T> std::size_t LockFreeDeque<T>::GetPhysicalSize() const noexcept { return this->m_physical_size.load(); } template <typename T> std::size_t LockFreeDeque<T>::Size() const noexcept { return this->GetLogicalSize(); } template <typename T> std::size_t LockFreeDeque<T>::GetSizeByIterating() const noexcept { int _counter = 0; auto _cur = this->m_head.load()->m_atomic_next.load(); while (_cur != this->m_dummy) { if (_cur->m_flag != EDequeNodeFlag::FAKE_NODE) _counter++; _cur = _cur->m_atomic_next.load(); } return _counter; } #endif }
6,897
C++
.cc
170
35.217647
145
0.649288
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,262
lock_free_hash_specific.cc
ppLorins_aurora/src/tools/lock_free_hash_specific.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "tools/lock_free_hash_specific.h" namespace RaftCore::DataStructure { template <typename T,typename R> HashNodeAtomic<T, R>::HashNodeAtomic(const std::shared_ptr<T> &key, const std::shared_ptr<R> &val) noexcept : HashNode<T, R>(key, val) {} template <typename T,typename R> void HashNodeAtomic<T, R>::Update(R* val)noexcept { this->SpinLock(); //Do not delete the managed ptr. this->m_shp_val.reset(val, [](auto *p) {}); this->SpinUnLock(); } template <typename T,typename R> void HashNodeAtomic<T, R>::LockValue() noexcept { this->SpinLock(); } template <typename T,typename R> void HashNodeAtomic<T, R>::UnLockValue() noexcept { this->SpinUnLock(); } template <typename T,typename R> HashNodeAtomic<T, R>* HashNodeAtomic<T, R>::GetNext() const noexcept { return dynamic_cast<HashNodeAtomic<T, R>*>(this->m_next); } template <typename T,typename R> LockFreeHashAtomic<T, R>::LockFreeHashAtomic(uint32_t slot_num)noexcept : LockFreeHash<T, R, HashNodeAtomic>(slot_num) { } template <typename T,typename R> bool LockFreeHashAtomic<T, R>::Upsert(const T *key, R* p_avl) noexcept { std::size_t hash_val = key->Hash(); std::size_t idx = hash_val & this->m_slots_mask; std::atomic<HashNodeAtomic<T, R>*> * p_atomic = this->m_solts[idx]; HashNodeAtomic<T, R>* p_cur = p_atomic->load(); while (p_cur != nullptr) { if (p_cur->operator==(*key) && !p_cur->IsDeleted()) { std::atomic<R*> _atomic(p_avl); p_cur->Update(_atomic); return false; } //move next p_cur = p_cur->GetNext(); } std::shared_ptr<T> _shp_key(const_cast<T*>(key)); //Here we need to use an empty deleter. std::shared_ptr<R> _shp_val(p_avl, [](auto* p) {}); this->Insert(_shp_key, _shp_val); return true; } } //end namespace
2,646
C++
.cc
64
37.703125
120
0.684005
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,263
data_structure_base.cc
ppLorins_aurora/src/tools/data_structure_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "tools/data_structure_base.h" namespace RaftCore::DataStructure { template <typename T> OrderedTypeBase<T>::OrderedTypeBase() noexcept{} template <typename T> OrderedTypeBase<T>::~OrderedTypeBase() noexcept{} template <typename T> bool OrderedTypeBase<T>::operator!=(const T&_other)const noexcept { return !this->operator==(_other); } template <typename T> bool OrderedTypeBase<T>::operator<=(const T& _other)const noexcept { if (this->operator==(_other)) return true; return this->operator<(_other); } template <typename T> bool OrderedTypeBase<T>::operator>=(const T& _other)const noexcept { if (this->operator==(_other)) return true; return this->operator>(_other); } template <typename T> LogicalDelete<T>::LogicalDelete() noexcept {} template <typename T> LogicalDelete<T>::~LogicalDelete() noexcept {} template <typename T> bool LogicalDelete<T>::IsDeleted() const noexcept { return this->m_deleted; } template <typename T> void LogicalDelete<T>::SetDeleted() noexcept { this->m_deleted = true; } template <typename T> LockableNode<T>::LockableNode()noexcept { this->m_spin_lock.clear(); } template <typename T> LockableNode<T>::~LockableNode()noexcept {} template <typename T> void LockableNode<T>::SpinLock()noexcept { while (this->m_spin_lock.test_and_set()); } template <typename T> void LockableNode<T>::SpinUnLock()noexcept { this->m_spin_lock.clear(); } } //end namespace
2,257
C++
.cc
64
33.015625
73
0.740433
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,264
lock_free_priority_queue.cc
ppLorins_aurora/src/tools/lock_free_priority_queue.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/comm_defs.h" #include "common/error_code.h" #include "config/config.h" #include "tools/lock_free_priority_queue.h" namespace RaftCore::DataStructure { LockFreePriotityQueue::Task::Task(TaskType x,LockFreeQueueBase *y)noexcept{ this->m_task_type = x; this->m_pc_queue.reset(y); } LockFreePriotityQueue::Task::Task(const Task& one) noexcept{ this->m_task_type = one.m_task_type; /*To avoid compile errors under darwin clang,parameter of the copy-constructor must be const-qualified. */ Task &_real_one = const_cast<Task&>(one); this->m_pc_queue.swap(_real_one.m_pc_queue); } void LockFreePriotityQueue::Task::operator=(Task& one) noexcept{ this->m_task_type = one.m_task_type; this->m_pc_queue.swap(one.m_pc_queue); } LockFreePriotityQueue::Task::~Task() { m_pc_queue.reset(); } bool LockFreePriotityQueue::Task::operator<(const Task& _other) { return this->m_task_type < _other.m_task_type; } LockFreePriotityQueue::LockFreePriotityQueue() noexcept{} LockFreePriotityQueue::~LockFreePriotityQueue() noexcept{} void LockFreePriotityQueue::Initialize(int consumer_threads_num) noexcept { this->m_consumer_thread_num = consumer_threads_num; this->m_stop = false; this->m_running_thread_num.store(0); } void LockFreePriotityQueue::UnInitialize() noexcept { this->Stop(); this->m_task_queue.clear(); } void LockFreePriotityQueue::AddTask(TaskType _task_type, LockFreeQueueBase* _queue) noexcept { this->m_task_queue.emplace(std::piecewise_construct, std::forward_as_tuple((uint32_t)_task_type), std::forward_as_tuple(_task_type, _queue)); //auto _iter = this->m_task_queue.begin(); } int LockFreePriotityQueue::Push(TaskType _task_type,void* _shp_element) noexcept { uint32_t _task_type_uint = uint32_t(_task_type); auto _iter = this->m_task_queue.find(_task_type_uint); CHECK(_iter != this->m_task_queue.cend()) << ",task type:" << _task_type_uint; int _ret = _iter->second.m_pc_queue->Push(_shp_element); if (_ret == QUEUE_SUCC) this->m_cv.notify_one(); //It's not mandatory to hold the corresponding lock. return _ret; } void LockFreePriotityQueue::Launch() noexcept { for (int i = 0; i < this->m_consumer_thread_num; ++i) { std::thread* _p_thread = new std::thread(&LockFreePriotityQueue::ThreadEntrance,this); LOG(INFO) << "MCMP queue background thread :" << _p_thread->get_id() << " started"; _p_thread->detach(); } } void LockFreePriotityQueue::Stop() noexcept { this->m_stop = true; while (this->m_running_thread_num.load() != 0) std::this_thread::sleep_for(std::chrono::microseconds(::RaftCore::Config::FLAGS_thread_stop_waiting_us)); } uint32_t LockFreePriotityQueue::GetSize() const noexcept { uint32_t _sum = 0; for (const auto &_item : this->m_task_queue) _sum += _item.second.m_pc_queue->GetSize(); return _sum; } void LockFreePriotityQueue::ThreadEntrance() noexcept { CHECK(this->m_task_queue.size() > 0); this->m_running_thread_num.fetch_add(1); auto _wait_us = std::chrono::milliseconds(::RaftCore::Config::FLAGS_lockfree_queue_consumer_wait_ms); auto _cond_data_arrived = [&]()->bool{ for (auto _it = this->m_task_queue.cbegin(); _it != this->m_task_queue.cend(); _it++) if (!_it->second.m_pc_queue->Empty()) return true; return false; }; std::unique_lock<std::mutex> _unique_wrapper(this->m_cv_mutex, std::defer_lock); while (true) { //To detect somewhere else want the consuming threads to end. if (this->m_stop) break; /*Trade-off: there is a small windows during which we would lose messages, in that case, we'll wait until timeout reach. */ _unique_wrapper.lock(); bool _job_comes = this->m_cv.wait_for(_unique_wrapper, _wait_us, _cond_data_arrived); _unique_wrapper.unlock(); if (!_job_comes) continue; //Start from begin. auto _iter = this->m_task_queue.begin(); //Drain all queues. while(true){ if (_iter->second.m_pc_queue->PopConsume() == QUEUE_SUCC) continue; // Queue is empty or PopConsume fail. _iter++; //If there are no more tasks, quit current loop. if (_iter == this->m_task_queue.end()) break; } } this->m_running_thread_num.fetch_sub(1); } }
5,296
C++
.cc
122
38.057377
113
0.666926
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,265
election.cc
ppLorins_aurora/src/election/election.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <thread> #include <random> #include <vector> #include <fstream> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "boost/filesystem.hpp" #include "protocol/raft.grpc.pb.h" #include "config/config.h" #include "binlog/binlog_singleton.h" #include "member/member_manager.h" #include "global/global_env.h" #include "client/client_impl.h" #include "storage/storage.h" #include "tools/timer.h" #include "leader/leader_view.h" #include "election/election.h" #define _AURORA_ELECTION_TERM_PREFIX_ "current term:" #define _AURORA_ELECTION_VOTEFOR_PREFIX_ "I [tried] voted for:" #define _AURORA_ELECTION_KNOWN_VOTING_PREFIX_ "known others [tried] voted terms:" namespace RaftCore::Election { namespace fs = ::boost::filesystem; using ::RaftCore::BinLog::BinLogGlobal; using ::raft::VoteResponse; using ::raft::ErrorCode; using ::RaftCore::Common::ReadLock; using ::RaftCore::Common::WriteLock; using ::RaftCore::State::StateMgr; using ::RaftCore::Global::GlobalEnv; using ::RaftCore::Timer::GlobalTimer; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Member::MemberMgr; using ::RaftCore::Member::JointConsensusMask; using ::RaftCore::Member::EJointStatus; using ::RaftCore::Client::PrevoteAsyncClient; using ::RaftCore::Client::VoteAsyncClient; using ::RaftCore::Client::WriteSyncClient; using ::RaftCore::Storage::StorageMgr; std::atomic<uint32_t> ElectionMgr::m_cur_term; std::map<uint32_t, std::string> ElectionMgr::m_voted; std::shared_timed_mutex ElectionMgr::m_voted_mutex; ElectionMgr::NewLeaderEvent ElectionMgr::m_new_leader_event; uint32_t ElectionMgr::m_cur_cluster_size; std::shared_timed_mutex ElectionMgr::m_election_mutex; std::map<uint32_t, std::set<std::string>> ElectionMgr::m_known_voting; std::shared_timed_mutex ElectionMgr::m_known_voting_mutex; std::thread* ElectionMgr::m_p_thread = nullptr; #ifdef _ELECTION_TEST_ volatile bool ElectionMgr::m_candidate_routine_running = false; #endif MemberMgr::JointSummary ElectionMgr::m_joint_snapshot; uint32_t ElectionMgr::m_cur_cluster_vote_counter; uint32_t ElectionMgr::m_new_cluster_vote_counter; volatile bool ElectionMgr::m_leader_debut = false; LogIdentifier ElectionMgr::m_pre_term_lrl; TwoPhaseCommitBatchTask<std::string> ElectionMgr::m_phaseI_task; TwoPhaseCommitBatchTask<std::string> ElectionMgr::m_phaseII_task; void ElectionMgr::Initialize() noexcept { LoadFile(); } void ElectionMgr::UnInitialize() noexcept { SaveFile(); } void ElectionMgr::SaveFile() noexcept{ //Destroy contents if file exists, and since the content is human readable , no need to open in binary mode. std::FILE* f_handler = std::fopen(_AURORA_ELECTION_CONFIG_FILE_, "w+"); CHECK(f_handler != nullptr) << "open BaseState file " << _AURORA_ELECTION_CONFIG_FILE_ << "fail..,errno:" << errno; std::string _voted_info = ""; { ReadLock _r_lock(m_voted_mutex); for (auto iter = m_voted.crbegin(); iter != m_voted.crend(); ++iter) _voted_info += (std::to_string(iter->first) + "|" + iter->second + ","); } std::string _known_voting = ""; { ReadLock _r_lock(m_known_voting_mutex); for (auto &_pair_kv : m_known_voting) { std::string _votings = ""; for (auto& _item : _pair_kv.second) _votings += _item + "%"; _known_voting += (std::to_string(_pair_kv.first) + "|" + _votings + ","); } } std::string buf = _AURORA_ELECTION_TERM_PREFIX_ + std::to_string(m_cur_term.load()) + "\n" + _AURORA_ELECTION_VOTEFOR_PREFIX_ + _voted_info + "\n" + _AURORA_ELECTION_KNOWN_VOTING_PREFIX_ + _known_voting + "\n"; std::size_t written = fwrite(buf.data(), 1, buf.size(), f_handler); CHECK(written == buf.size()) << "fwrite BaseState file fail...,errno:" << errno << ",written:" << written << ",expected:" << buf.size(); CHECK(!std::fclose(f_handler)) << "close BaseState file fail...,errno:" << errno; } void ElectionMgr::Reset() noexcept { m_new_leader_event.m_notify_flag = false; } void ElectionMgr::LoadFile() noexcept{ m_cur_term.store(_MAX_UINT32_); { WriteLock _w_lock(m_voted_mutex); m_voted.clear(); } { WriteLock _w_lock(m_known_voting_mutex); m_known_voting.clear(); } //The local scope is to release the handle by std::ifstream. std::ifstream f_input(_AURORA_ELECTION_CONFIG_FILE_); for (std::string _ori_line; std::getline(f_input, _ori_line); ) { std::string _line = ""; _line.reserve(_ori_line.length()); std::copy_if(_ori_line.begin(), _ori_line.end(), std::back_inserter(_line), [](char c) { return c != '\r' && c != '\n'; }); if (_line.find(_AURORA_ELECTION_TERM_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in state file, _line:" << _line; m_cur_term.store(std::atol(_line.substr(pos + 1).c_str())); continue; } if (_line.find(_AURORA_ELECTION_VOTEFOR_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in state file, _line:" << _line; std::list<std::string> _output; ::RaftCore::Tools::StringSplit(_line.substr(pos + 1),',',_output); for (const auto &_item : _output) { std::list<std::string> _inner_output; ::RaftCore::Tools::StringSplit(_item,'|',_inner_output); CHECK(_inner_output.size() == 2); auto _iter = _inner_output.cbegin(); uint32_t _term = std::atol((*_iter++).c_str()); WriteLock _w_lock(m_voted_mutex); m_voted[_term] = *_iter; } continue; } if (_line.find(_AURORA_ELECTION_KNOWN_VOTING_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in state file, _line:" << _line; std::list<std::string> _term_list; ::RaftCore::Tools::StringSplit(_line.substr(pos + 1),',',_term_list); for (const auto &_item : _term_list) { std::list<std::string> _voting_list; ::RaftCore::Tools::StringSplit(_item,'|', _voting_list); CHECK(_voting_list.size() == 2); auto _iter = _voting_list.cbegin(); uint32_t _term = std::atol((*_iter++).c_str()); std::list<std::string> _votings; ::RaftCore::Tools::StringSplit(*_iter,'%',_votings); WriteLock _w_lock(m_known_voting_mutex); if (m_known_voting.find(_term) == m_known_voting.cend()) m_known_voting[_term] = std::set<std::string>(); for (const auto& _item : _votings) m_known_voting[_term].emplace(_item); } continue; } } f_input.close(); //Give default values if not found in the state file bool give_default = false; if (m_cur_term.load() == _MAX_UINT32_) { m_cur_term.store(0); //term started from 0 give_default = true; } if (give_default) ElectionMgr::SaveFile(); } void ElectionMgr::ElectionThread() noexcept{ //Reset election environment before doing it. Reset(); auto _entrance = [&]() ->void{ #ifdef _ELECTION_TEST_ m_candidate_routine_running = true; #endif LOG(INFO) << "start electing,switch role:[Follower --> Candidate]"; SwitchRole(RaftRole::CANDIDATE); CandidateRoutine(); #ifdef _ELECTION_TEST_ m_candidate_routine_running = false; #endif }; if (m_p_thread) delete m_p_thread; m_p_thread = new std::thread(_entrance); m_p_thread->detach(); } #ifdef _ELECTION_TEST_ void ElectionMgr::WaitElectionThread()noexcept { while (m_candidate_routine_running) std::this_thread::sleep_for(std::chrono::microseconds(::RaftCore::Config::FLAGS_election_thread_wait_us)); } #endif std::string ElectionMgr::TryVote(uint32_t term, const std::string &addr)noexcept { { WriteLock _w_lock(m_voted_mutex); if (m_voted.find(term) != m_voted.end()) { LOG(WARNING) << "voting in a term that is already been voted before:" << term << ",original voted addr:" << m_voted[term] << ",current trying to vote addr:" << addr; return m_voted[term]; } m_voted[term] = addr; } SaveFile(); return ""; } void ElectionMgr::RenameBinlogNames(RaftRole old_role, RaftRole target_role) noexcept { const char* _old_role_str = StateMgr::GetRoleStr(old_role); const char* _target_role_str = StateMgr::GetRoleStr(target_role); std::list<std::string> _binlog_files; StorageMgr::FindRoleBinlogFiles(_old_role_str, _binlog_files); for (const auto&file_name : _binlog_files) { std::string _target_file_name = file_name; auto _pos = _target_file_name.find(_old_role_str); CHECK(_pos != std::string::npos) << "rename binlog fail when switching role, old_file_name:" << file_name << ",old_role:" << _old_role_str; _target_file_name.replace(_pos, std::strlen(_old_role_str), _target_role_str); fs::path _target_file(_target_file_name); if (fs::exists(_target_file)){ LOG(WARNING) << "target binlog exist, delete it:" << _target_file_name; fs::remove(_target_file); } CHECK(std::rename(file_name.c_str(), _target_file_name.c_str()) == 0) << "rename binlog file fail...,errno:" << errno; LOG(INFO) << "Switching role, rename binlog name from :" << file_name << " to " << _target_file_name; } } void ElectionMgr::SwitchRole(RaftRole target_role, const std::string &new_leader) noexcept { auto _old_role = StateMgr::GetRole(); if (_old_role == target_role) { LOG(WARNING) << "same role switching detected ,from " << StateMgr::GetRoleStr(_old_role) << " to " << StateMgr::GetRoleStr(target_role); return; } StateMgr::SwitchTo(target_role,new_leader); //Re-initialize global env. GlobalEnv::UnInitialEnv(_old_role); //Switch role also needs to rename binlog file names. RenameBinlogNames(_old_role, target_role); /*Only after `GlobalEnv::UnInitialEnv` could StateMgr::SwitchTo be called since otherwise `GlobalEnv::UnInitialEnv` would read the modified current state. */ GlobalEnv::InitialEnv(true); } void ElectionMgr::CandidateRoutine() noexcept{ std::random_device rd; std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd() int _sleep_min = ::RaftCore::Config::FLAGS_election_term_interval_min_ms; int _sleep_max = ::RaftCore::Config::FLAGS_election_term_interval_max_ms; std::uniform_int_distribution<> dis(_sleep_min, _sleep_max); //For a consistent read , we need a snap shot. m_joint_snapshot.Reset(); { ReadLock _r_lock(MemberMgr::m_mutex); m_joint_snapshot = MemberMgr::m_joint_summary; } Topology _topo; CTopologyMgr::Read(&_topo); m_cur_cluster_size = _topo.GetClusterSize(); PreparePrevoteTask(_topo); std::shared_ptr<VoteRequest> _shp_req(new VoteRequest()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); _shp_req->mutable_base()->set_term(m_cur_term.load()); auto _lrl = BinLogGlobal::m_instance.GetLastReplicated(); _shp_req->mutable_last_log_entity()->set_term(_lrl.m_term); _shp_req->mutable_last_log_entity()->set_idx(_lrl.m_index); _shp_req->set_member_version(MemberMgr::GetVersion()); /*IMPORTANT : This is the term before the first round of election.We need a backing term mechanism otherwise candidate doesn't have a change to be instructed by the leader which is either newly elected or the old spurious failed one. */ auto _start_term = m_cur_term.load(); while (true) { int _sleep_random = dis(gen); std::this_thread::sleep_for(std::chrono::milliseconds(_sleep_random)); LOG(INFO) << "[Candidate] Just slept " << _sleep_random << " milliseconds under term :" << m_cur_term.load(); if (IncreaseToMaxterm()) break; LOG(INFO) << "[Candidate] I'm successfully increased to term:" << m_cur_term.load() << ",start issue pre-voting requests to the other nodes."; //Update term in the request, too. _shp_req->mutable_base()->set_term(m_cur_term.load()); if (!BroadcastVoting(_shp_req, _topo, VoteType::PreVote)) { LOG(INFO) << "[Candidate] pre-vote rejected term: " << m_cur_term.load() << ",now back term to " << _start_term << " ,may because the current node temporarily losing heartbeat messages from leader, " << "yet the leader is still alive to the other nodes,switch role:[Candidate --> Follwoer]"; //A role switching from Candidate-->Follower should also revert the term to avoid infinite starting new round of election. m_cur_term.store(_start_term); SwitchRole(RaftRole::FOLLOWER); break; } LOG(INFO) << "[Candidate] pre-voting succeed under term: " << m_cur_term.load() << ",start issue voting requests to the other nodes."; if (!BroadcastVoting(_shp_req, _topo, VoteType::Vote)) { LOG(INFO) << "[Candidate] vote rejected term: " << m_cur_term.load() << ",starting a new round, " <<"and back my term to the starting term:" <<_start_term; /*Note: There is a term going back policy, to prevent candidate with a term always greater than the newly elected leader ,resulting in infinite starting new election round. */ m_cur_term.store(_start_term); continue; } LOG(INFO) << "[Candidate] voting success! Become the new leader of term: " << m_cur_term.load() << ",switch role:[Candidate --> Leader]" ; //Record the snapshot of LRL for being used in leader's new term. m_pre_term_lrl.Set(_lrl); m_leader_debut = true; SwitchRole(RaftRole::LEADER); /*After successfully elected as leader, we submit a non-op log to ensure logs consistent amid the new topology in advance. To simplify, we just sent a write request as a usual client and this operation could be time consuming due to the possible log-resync process. Adding that the non-op isn't necessary in aurora's design(normal subsequent requests will also trigger the resync process if it's required), this is just to finish the resync job ASAP after a new leader elected.. */ auto _heartbeat = []()->bool { LeaderView::BroadcastHeatBeat(); return false; //One shot. }; GlobalTimer::AddTask(0, _heartbeat); //Intend to execute immediately. //Wait sometime to ensure the heartbeat has been sent & acknowledged by the followers. std::this_thread::sleep_for(std::chrono::milliseconds(::RaftCore::Config::FLAGS_election_wait_non_op_finish_ms)); LOG(INFO) << "[Leader] start issue non-op request"; //First one is to make the lag behind followers to catch up. std::string _tag = std::to_string(_sleep_random) + "_prepare"; SentNonOP(_tag); //Wait sometime to ensure the followers has caught up. std::this_thread::sleep_for(std::chrono::milliseconds(::RaftCore::Config::FLAGS_election_wait_non_op_finish_ms)); //Second one is to truncate the overstepped followers to truncate the additional logs. _tag = std::to_string(_sleep_random) + "_commit"; SentNonOP(_tag); break; } } void ElectionMgr::SentNonOP(const std::string &tag) noexcept { std::string _local_add = _AURORA_LOCAL_IP_ + std::string(":") + std::to_string(::RaftCore::Config::FLAGS_port); std::shared_ptr<::grpc::Channel> _channel = grpc::CreateChannel(_local_add, grpc::InsecureChannelCredentials()); WriteSyncClient _write_client(_channel); auto _setter = [&](std::shared_ptr<::raft::ClientWriteRequest>& req) { req->mutable_req()->set_key("aurora-reserved-non-op-key_" + tag); req->mutable_req()->set_value("aurora-reserved-non-op-value_" + tag); }; auto _rpc = std::bind(&::raft::RaftService::Stub::Write, _write_client.GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); ::grpc::Status _status; auto &_rsp = _write_client.DoRPC(_setter, _rpc, ::RaftCore::Config::FLAGS_election_non_op_timeo_ms, _status); if (!_status.ok()) { LOG(ERROR) << "submit non-op fail,err msg:" << _status.error_message(); return; } if (_rsp.client_comm_rsp().result() != ::raft::ErrorCode::SUCCESS) { LOG(ERROR) << "submit non-op fail,err msg:" << _rsp.client_comm_rsp().err_msg(); return; } LOG(INFO) << "submit non-op succeed."; } bool ElectionMgr::IncreaseToMaxterm() noexcept{ auto _cur_term = m_cur_term.load(); //Quickly find the next votable term. while (true) { if (m_new_leader_event.m_notify_flag) { LOG(INFO) << "[Candidate] a higher term found , switch role:[Candidate --> Follwoer]. "; m_cur_term.store(m_new_leader_event.m_new_leader_term); SwitchRole(RaftRole::FOLLOWER,m_new_leader_event.m_new_leader_addr); return true; } auto _old_term = _cur_term; _cur_term = _cur_term + 1; LOG(INFO) << "[Candidate] term increased normally : " << _old_term << " --> " << _cur_term; auto _find_to_max_term = [&]()->void { ReadLock _r_lock(m_known_voting_mutex); auto _last_iter = m_known_voting.crbegin(); if (_last_iter == m_known_voting.crend()) return; /*Make sure the term to be used is greater than the largest known one at present, to get rid of term conflict as far as possible.*/ if (_cur_term <= _last_iter->first) { _old_term = _cur_term; _cur_term = _last_iter->first + 1; LOG(INFO) << "[Candidate] term increased jumping: " << _old_term << " --> " << _cur_term; } }; _find_to_max_term(); auto _voted_addr = TryVote(_cur_term, StateMgr::GetMyAddr()); if (!_voted_addr.empty()) { LOG(INFO) << "[Candidate] I'm candidate,voting myself at term " << _cur_term << " fail,found I've voted some other nodes under this term ,that is:" << _voted_addr; continue; } break; } m_cur_term.store(_cur_term); return false; } void ElectionMgr::NotifyNewLeaderEvent(uint32_t term,const std::string addr)noexcept { //The field updating order is important. m_new_leader_event.m_new_leader_term = term; m_new_leader_event.m_new_leader_addr = addr; m_new_leader_event.m_notify_flag = true; } void ElectionMgr::PreparePrevoteTask(const Topology &topo)noexcept { auto _add = [&](const std::string & node_addr) ->void{ if (node_addr == StateMgr::GetMyAddr()) return; m_phaseI_task.m_todo.emplace_back(node_addr); uint32_t _flag = int(JointConsensusMask::IN_OLD_CLUSTER); if (m_joint_snapshot.m_joint_status == EJointStatus::JOINT_CONSENSUS) { const auto& _new_cluster = m_joint_snapshot.m_joint_topology.m_new_cluster; if (_new_cluster.find(node_addr) != _new_cluster.cend()) _flag |= int(JointConsensusMask::IN_NEW_CLUSTER); } m_phaseI_task.m_flags.emplace_back(_flag); }; std::for_each(topo.m_followers.cbegin(), topo.m_followers.cend(), _add); std::for_each(topo.m_candidates.cbegin(), topo.m_candidates.cend(), _add); if (m_joint_snapshot.m_joint_status == EJointStatus::JOINT_CONSENSUS) { const auto& _new_nodes = m_joint_snapshot.m_joint_topology.m_added_nodes; for (auto _iter = _new_nodes.cbegin(); _iter != _new_nodes.cend(); ++_iter) { m_phaseI_task.m_todo.emplace_back(_iter->first); m_phaseI_task.m_flags.emplace_back(uint32_t(JointConsensusMask::IN_NEW_CLUSTER)); } } } bool ElectionMgr::BroadcastVoting(std::shared_ptr<VoteRequest> shp_req, const Topology &topo, VoteType vote_type) noexcept{ TwoPhaseCommitBatchTask<std::string>* _p_task_list = &m_phaseI_task; if (vote_type == VoteType::Vote) _p_task_list = &m_phaseII_task; std::shared_ptr<::grpc::CompletionQueue> _shp_cq(new ::grpc::CompletionQueue()); auto _req_setter = [&shp_req](std::shared_ptr<::raft::VoteRequest>& _target)->void { _target = shp_req; }; auto _entrust_prevote_client = [&](auto &_shp_channel,std::size_t idx){ auto _shp_client = new PrevoteAsyncClient(_shp_channel, _shp_cq); _shp_client->PushCallBackArgs(reinterpret_cast<void*>(idx)); auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncPreVote, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); _shp_client->EntrustRequest(_req_setter, _f_prepare, ::RaftCore::Config::FLAGS_election_vote_rpc_timeo_ms); }; auto _entrust_vote_client = [&](auto &_shp_channel,std::size_t idx){ auto _shp_client = new VoteAsyncClient(_shp_channel, _shp_cq); _shp_client->PushCallBackArgs(reinterpret_cast<void*>(idx)); auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncVote, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); _shp_client->EntrustRequest(_req_setter, _f_prepare, ::RaftCore::Config::FLAGS_election_vote_rpc_timeo_ms); }; int _entrust_total_num = 0; for (std::size_t i = 0; i < _p_task_list->m_todo.size(); ++i) { auto _shp_channel = ::grpc::CreateChannel(_p_task_list->m_todo[i], ::grpc::InsecureChannelCredentials()); if (vote_type == VoteType::PreVote) _entrust_prevote_client(_shp_channel,i); else _entrust_vote_client(_shp_channel,i); _entrust_total_num++; } //initialized to 1 means including myself. m_cur_cluster_vote_counter = 1; m_new_cluster_vote_counter = 1; PollingCQ(_shp_cq,_entrust_total_num); uint32_t _cur_cluster_majority = m_cur_cluster_size / 2 + 1; bool _succeed = m_cur_cluster_vote_counter >= _cur_cluster_majority; if (m_joint_snapshot.m_joint_status == EJointStatus::JOINT_CONSENSUS) { std::size_t _new_cluster_total_nodes = m_joint_snapshot.m_joint_topology.m_new_cluster.size(); std::size_t _new_cluster_majority = _new_cluster_total_nodes / 2 + 1; _succeed &= (m_new_cluster_vote_counter >= _new_cluster_majority); } return _succeed; } void ElectionMgr::PollingCQ(std::shared_ptr<::grpc::CompletionQueue> shp_cq,int entrust_num)noexcept { void* tag; bool ok; int _counter = 0; while (_counter < entrust_num) { if (!shp_cq->Next(&tag, &ok)) break; ::RaftCore::Common::ReactBase* _p_ins = (::RaftCore::Common::ReactBase*)tag; _p_ins->React(ok); _counter++; } } void ElectionMgr::AddVotingTerm(uint32_t term,const std::string &addr) noexcept{ WriteLock _w_lock(m_known_voting_mutex); LOG(INFO) << "add known voting term " << term << " from " << addr; if (m_known_voting.find(term) == m_known_voting.cend()) m_known_voting[term] = std::set<std::string>(); m_known_voting[term].emplace(addr); } void ElectionMgr::CallBack(const ::grpc::Status &status, const ::raft::VoteResponse& rsp, VoteType vote_type,uint32_t idx) noexcept { TwoPhaseCommitBatchTask<std::string>* _p_task_list = &m_phaseI_task; if (vote_type == VoteType::Vote) _p_task_list = &m_phaseII_task; if (!status.ok()){ LOG(ERROR) << "rpc status fail,idx:" << idx << ",addr:" << _p_task_list->m_todo[idx] << ",error code:" << status.error_code() << ",error msg:" << status.error_message() ; return; } std::string _vote_type_str = (vote_type == VoteType::Vote) ?"vote" : "prevote"; ErrorCode _err_code = (vote_type == VoteType::Vote) ? ErrorCode::VOTE_YES : ErrorCode::PREVOTE_YES; if (rsp.comm_rsp().result() != _err_code) { LOG(INFO) << "peer " << _p_task_list->m_todo[idx] << " rejected " << _vote_type_str << ",error message:" << rsp.comm_rsp().err_msg(); return; } LOG(INFO) << "peer " << _p_task_list->m_todo[idx] << " approved,vote type: " << _vote_type_str; if (_p_task_list->m_flags[idx] & int(JointConsensusMask::IN_OLD_CLUSTER)) m_cur_cluster_vote_counter++; if (_p_task_list->m_flags[idx] & int(JointConsensusMask::IN_NEW_CLUSTER)) m_new_cluster_vote_counter++; if (vote_type == VoteType::PreVote) { m_phaseII_task.m_todo.emplace_back(_p_task_list->m_todo[idx]); m_phaseII_task.m_flags.emplace_back(_p_task_list->m_flags[idx]); } } }
26,358
C++
.cc
519
43.138728
150
0.626685
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,266
log_identifier.cc
ppLorins_aurora/src/common/log_identifier.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/log_identifier.h" namespace RaftCore::Common { void LogIdentifier::Set(const LogIdentifier &_other) noexcept{ this->m_term = _other.m_term; this->m_index = _other.m_index; } void LogIdentifier::Set(uint32_t term,uint64_t index) noexcept{ this->m_term = term; this->m_index = index; } uint32_t LogIdentifier::GreaterThan(const LogIdentifier& _other) const noexcept { if (this->operator<(_other)) return -1; if (this->m_term > _other.m_term) return 0x7FFFFFFF; return (uint32_t)(this->m_index - _other.m_index); } bool LogIdentifier::operator==(const LogIdentifier& _other) const noexcept{ return (_other.m_term == this->m_term && _other.m_index == this->m_index); } bool LogIdentifier::operator!=(const LogIdentifier& _other) const noexcept{ return !(_other == *this); } bool LogIdentifier::operator< (const LogIdentifier &_other) const noexcept{ if (this->m_term < _other.m_term) return true; if (this->m_term > _other.m_term) return false; return this->m_index < _other.m_index; } bool LogIdentifier::operator<= (const LogIdentifier &_other) const noexcept{ return (this->operator <(_other) || this->operator ==(_other)); } bool LogIdentifier::operator> (const LogIdentifier &_other) const noexcept{ if (this->m_term > _other.m_term) return true; if (this->m_term < _other.m_term) return false; return this->m_index > _other.m_index; } bool LogIdentifier::operator>= (const LogIdentifier &_other) const noexcept{ return (this->operator >(_other) || this->operator ==(_other)); } std::string LogIdentifier::ToString() const noexcept{ return "LogIdentifier term:" + std::to_string(this->m_term) + ",idx:" + std::to_string(this->m_index); } LogIdentifier ConvertID(const ::raft::EntityID &entity_id) { LogIdentifier _id; _id.Set(entity_id.term(),entity_id.idx()); return _id; } std::ostream& operator<<(std::ostream& os, const LogIdentifier& obj) { os << "LogIdentifier term:" << obj.m_term << ",idx:" << obj.m_index; return os; } bool EntityIDEqual(const ::raft::EntityID &left, const LogIdentifier &right) { return (left.term() == right.m_term && left.idx() == right.m_index); } bool EntityIDLarger(const ::raft::EntityID &left, const LogIdentifier &right) { if (left.term() > right.m_term) { return true; } if (left.term() < right.m_term) { return false; } return left.idx() > right.m_index; } bool EntityIDLargerEqual(const ::raft::EntityID &left, const LogIdentifier &right) { if (left.term() > right.m_term) { return true; } if (left.term() < right.m_term) { return false; } return left.idx() >= right.m_index; } bool EntityIDSmaller(const ::raft::EntityID &left, const LogIdentifier &right) { if (left.term() < right.m_term) { return true; } if (left.term() > right.m_term) { return false; } return left.idx() < right.m_index; } }
3,812
C++
.cc
100
34.26
106
0.67727
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,267
react_base.cc
ppLorins_aurora/src/common/react_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "common/react_base.h" namespace RaftCore::Common { ReactInfo::ReactInfo() noexcept {} void ReactInfo::Set(bool cq_result, void* tag) noexcept { this->m_cq_result = cq_result; this->m_tag = tag; } ReactInfo::ReactInfo(const ReactInfo &other) noexcept { this->m_cq_result = other.m_cq_result; this->m_tag = other.m_tag; } ReactBase::ReactBase() noexcept{} ReactBase::~ReactBase() noexcept{} void ReactBase::GeneralReacting(const ReactInfo &info)noexcept { ::RaftCore::Common::ReactBase* _p_ins = static_cast<::RaftCore::Common::ReactBase*>(info.m_tag); _p_ins->React(info.m_cq_result); } }
1,431
C++
.cc
33
41.212121
100
0.733382
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,268
request_base.cc
ppLorins_aurora/src/common/request_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "common/request_base.h" namespace RaftCore::Common { template<typename T,typename R,typename Q> const char* BidirectionalRequest<T, R, Q>::m_status_macro_names[] = { "READ","WRITE","CONNECT","DONE","FINISH" }; template<typename T> RequestBase<T>::RequestBase() noexcept {} template<typename T> void RequestBase<T>::Initialize(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq) noexcept { this->m_async_service = shp_svc; this->m_server_notify_cq = shp_notify_cq; this->m_server_call_cq = shp_call_cq; } template<typename T> RequestBase<T>::~RequestBase() noexcept{} template<typename T,typename R> RequestTpl<T, R>::RequestTpl() noexcept {} template<typename T,typename R> RequestTpl<T,R>::~RequestTpl() noexcept {} template<typename T,typename R,typename Q> UnaryRequest<T, R, Q>::UnaryRequest() noexcept : m_responder(&this->m_server_context) { static_assert(std::is_base_of<UnaryRequest, Q>::value, "Q is not a derived from UnaryRequest."); this->m_stage = ProcessStage::CREATE; } template<typename T,typename R,typename Q> void UnaryRequest<T,R,Q>::React(bool cq_result) noexcept { Q* _p_downcast = dynamic_cast<Q*>(this); if (!cq_result) { LOG(ERROR) << "UnaryRequest got false result from CQ."; delete _p_downcast; return; } auto _status = ::grpc::Status::OK; switch (this->m_stage) { case ProcessStage::CREATE: /* Spawn a new subclass instance to serve new clients while we process the one for this . The instance will deallocate itself as part of its FINISH state.*/ new Q(this->m_async_service,this->m_server_notify_cq,this->m_server_call_cq); // The actual processing. _status = this->Process(); /* And we are done! Let the gRPC runtime know we've finished, using the memory address of this instance as the uniquely identifying tag for the event.*/ this->m_stage = ProcessStage::FINISH; this->m_responder.Finish(this->m_response, _status, _p_downcast); break; case ProcessStage::FINISH: delete _p_downcast; break; default: CHECK(false) << "Unexpected tag " << int(this->m_stage); break; } } template<typename T,typename R,typename Q> UnaryRequest<T,R,Q>::~UnaryRequest() noexcept {} template<typename T,typename R,typename Q> BidirectionalRequest<T, R, Q>::BidirectionalRequest() noexcept : m_reader_writer(&this->m_server_context) { this->m_stage = ProcessStage::CONNECT; this->m_server_context.AsyncNotifyWhenDone(this); } template<typename T,typename R,typename Q> BidirectionalRequest<T,R,Q>::~BidirectionalRequest() noexcept {} template<typename T,typename R,typename Q> void BidirectionalRequest<T,R,Q>::React(bool cq_result) noexcept { Q* _p_downcast = dynamic_cast<Q*>(this); if (!cq_result && (this->m_stage != ProcessStage::READ)) { LOG(ERROR) << "BidirectionalRequest got false result from CQ, state:" << this->GetStageName(); delete _p_downcast; return; } /*The `ServerAsyncReaderWriter::Finish()` call will resulting into two notifications for a single request. Processing those two notifications simultaneously will causing problems. So we need a synchronization here. */ std::unique_lock<std::mutex> _wlock(this->m_mutex); auto _status = ::grpc::Status::OK; switch (this->m_stage) { case ProcessStage::READ: //Meaning client said it wants to end the stream either by a 'WritesDone' or 'finish' call. if (!cq_result) { this->m_reader_writer.Finish(::grpc::Status::OK, _p_downcast); this->m_stage = ProcessStage::DONE; break; } _status = this->Process(); if (!_status.ok()) { LOG(ERROR) << "bidirectional request going to return a non-success result:" << _status.error_code() << ",msg:" << _status.error_message(); this->m_reader_writer.Finish(::grpc::Status::OK, _p_downcast); this->m_stage = ProcessStage::DONE; break; } this->m_reader_writer.Write(this->m_response, _p_downcast); this->m_stage = ProcessStage::WRITE; break; case ProcessStage::WRITE: this->m_reader_writer.Read(&this->m_request, _p_downcast); this->m_stage = ProcessStage::READ; break; case ProcessStage::CONNECT: //Spawn a new instance to serve further incoming request. new Q(this->m_async_service,this->m_server_notify_cq,this->m_server_call_cq); this->m_reader_writer.Read(&this->m_request, _p_downcast); this->m_stage = ProcessStage::READ; break; case ProcessStage::DONE: this->m_stage = ProcessStage::FINISH; break; case ProcessStage::FINISH: _wlock.unlock(); delete _p_downcast; break; default: CHECK(false) << "Unexpected tag " << int(this->m_stage); } } template<typename T,typename R,typename Q> const char* BidirectionalRequest<T, R, Q>::GetStageName()const noexcept { return m_status_macro_names[(int)this->m_stage]; } }
6,100
C++
.cc
138
38.471014
114
0.674379
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,269
comm_view.cc
ppLorins_aurora/src/common/comm_view.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <thread> #include "tools/timer.h" #include "storage/storage_singleton.h" #include "common/comm_view.h" namespace RaftCore::Common { int CommonView::m_cpu_cores; LockFreePriotityQueue CommonView::m_priority_queue; volatile bool CommonView::m_running_flag = false; LogIdentifier CommonView::m_zero_log_id; LogIdentifier CommonView::m_max_log_id; std::vector<std::thread*> CommonView::m_vec_routine; using ::RaftCore::Timer::GlobalTimer; using ::RaftCore::Storage::StorageGlobal; void CommonView::Initialize() noexcept { m_zero_log_id.m_term = 0; m_zero_log_id.m_index = 0; m_max_log_id.m_term = 0xFFFFFFFF; m_max_log_id.m_index = 0xFFFFFFFFFFFFFFFF; m_cpu_cores = std::thread::hardware_concurrency(); CHECK(m_cpu_cores > 0); //Register storage's GC. auto *_p_storage = &StorageGlobal::m_instance; auto _storage_gc = [_p_storage]()->bool { _p_storage->PurgeGarbage(); return true; }; bool _enable_sstable_gc = true; #ifdef _COMMON_VIEW_TEST_ _enable_sstable_gc = ::RaftCore::Config::FLAGS_enable_sstable_gc; #endif if (_enable_sstable_gc) GlobalTimer::AddTask(::RaftCore::Config::FLAGS_sstable_purge_interval_second*1000,_storage_gc); int consumer_threads_num = ::RaftCore::Config::FLAGS_lockfree_queue_consumer_threads_num; if (consumer_threads_num == 0) consumer_threads_num = m_cpu_cores; //Start initializing the MCMP queue. m_priority_queue.Initialize(consumer_threads_num); } void CommonView::UnInitialize() noexcept { CommonView::m_vec_routine.clear(); m_priority_queue.UnInitialize(); } }
2,422
C++
.cc
57
39.140351
103
0.722103
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,270
react_group.cc
ppLorins_aurora/src/common/react_group.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "common/react_group.h" namespace RaftCore::Common { template<typename T> ReactWorkGroup<T>::ReactWorkGroup(TypePtrCQ<T> shp_cq, TypeReactorFunc reactor, int therad_num) noexcept { this->m_shp_cq = shp_cq; this->m_reactor = reactor; this->m_polling_threads_num = therad_num; } template<typename T> ReactWorkGroup<T>::~ReactWorkGroup() {} template<typename T> void ReactWorkGroup<T>::StartPolling() noexcept { for (int i = 0; i < this->m_polling_threads_num; ++i) { std::thread *_p_thread = new std::thread(&ReactWorkGroup<T>::GrpcPollingThread, this); this->m_vec_threads.emplace_back(_p_thread); LOG(INFO) << "polling thread:" << _p_thread->get_id() << " for cq :" << this->m_shp_cq.get() << " started."; } } template<typename T> void ReactWorkGroup<T>::GrpcPollingThread() noexcept { void* tag; bool ok; ::RaftCore::Common::ReactInfo _info; while (this->m_shp_cq->Next(&tag, &ok)) { _info.Set(ok, tag); this->m_reactor(_info); } } template<typename T> void ReactWorkGroup<T>::WaitPolling() noexcept { for (auto& _thread : this->m_vec_threads) _thread->join(); } template<typename T> TypePtrCQ<T> ReactWorkGroup<T>::GetCQ() noexcept { return this->m_shp_cq; } template<typename T> void ReactWorkGroup<T>::ShutDownCQ() noexcept { this->m_shp_cq->Shutdown(); } template<typename T> void ReactWorkGroup<T>::GetThreadId(std::vector<std::thread::id> &ids) noexcept { for (auto& _thread : this->m_vec_threads) ids.emplace_back(_thread->get_id()); } }
2,391
C++
.cc
63
34.714286
106
0.694937
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,271
memory_log_base.cc
ppLorins_aurora/src/common/memory_log_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/memory_log_base.h" namespace RaftCore::Common { MemoryLogItemBase::MemoryLogItemBase(uint32_t _term, uint64_t _index) noexcept{ auto p_obj = new ::raft::Entity(); auto _p_entity_id = p_obj->mutable_entity_id(); _p_entity_id->set_term(_term); _p_entity_id->set_idx(_index); this->m_entity.reset(p_obj); } MemoryLogItemBase::MemoryLogItemBase(const ::raft::Entity &_entity)noexcept { /*This is where memory copy overhead occurs.Because the content of AppendEntriesRequest object need to be retained until next CommitEntries RPC call. */ this->m_entity.reset(new ::raft::Entity(_entity)); } MemoryLogItemBase::~MemoryLogItemBase() noexcept{} std::shared_ptr<::raft::Entity> MemoryLogItemBase::GetEntity()const noexcept { return m_entity; } bool MemoryLogItemBase::operator<(const MemoryLogItemBase &_other) const noexcept{ if (this->m_entity->entity_id().term() < _other.m_entity->entity_id().term()) return true; if (this->m_entity->entity_id().term() > _other.m_entity->entity_id().term()) return false; return this->m_entity->entity_id().idx() < _other.m_entity->entity_id().idx(); } bool MemoryLogItemBase::operator==(const MemoryLogItemBase& _other)const noexcept { return (this->m_entity->entity_id().term() == _other.m_entity->entity_id().term() && this->m_entity->entity_id().idx() == _other.m_entity->entity_id().idx()); } bool MemoryLogItemBase::operator!=(const MemoryLogItemBase& _other)const noexcept { return !this->operator==(_other); } bool MemoryLogItemBase::operator>(const MemoryLogItemBase& _other)const noexcept { if (this->m_entity->entity_id().term() > _other.m_entity->entity_id().term()) return true; if (this->m_entity->entity_id().term() < _other.m_entity->entity_id().term()) return false; return this->m_entity->entity_id().idx() > _other.m_entity->entity_id().idx(); } bool MemoryLogItemBase::AfterOf(const MemoryLogItemBase& _other)const noexcept { return (this->m_entity->pre_log_id().term() == _other.m_entity->entity_id().term() && this->m_entity->pre_log_id().idx() == _other.m_entity->entity_id().idx()); } bool CmpMemoryLog(const MemoryLogItemBase *left, const MemoryLogItemBase *right) noexcept { return left->GetEntity()->entity_id().term() == right->GetEntity()->pre_log_id().term() && left->GetEntity()->entity_id().idx() == right->GetEntity()->pre_log_id().idx() ; } }
3,257
C++
.cc
62
48.951613
94
0.699149
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,272
comm_defs.cc
ppLorins_aurora/src/common/comm_defs.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/comm_defs.h" namespace RaftCore::Common { bool EntityIDEqual(const ::raft::EntityID &left, const ::raft::EntityID &right) { return (left.term() == right.term() && left.idx() == right.idx()); } bool EntityIDSmaller(const ::raft::EntityID &left, const ::raft::EntityID &right) { if (left.term() < right.term()) return true; if (left.term() == right.term()) return left.idx() < right.idx(); return false; } bool EntityIDSmallerEqual(const ::raft::EntityID &left, const ::raft::EntityID &right) { if (left.term() < right.term()) return true; if (left.term() == right.term()) return left.idx() <= right.idx(); return false; } }
1,484
C++
.cc
34
40.5
88
0.693111
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,273
config.cc
ppLorins_aurora/src/config/config.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config/config.h" namespace RaftCore::Config { DEFINE_uint32(notify_cq_num, 2, "#notify CQ the server use."); DEFINE_uint32(notify_cq_threads, 4, "#threads polling on each notify CQ."); DEFINE_uint32(call_cq_num, 2, "#call CQ the server use."); DEFINE_uint32(call_cq_threads, 2, "#threads polling on each call CQ."); DEFINE_uint32(client_thread_num, 2, "#threads polling on each client CQ."); DEFINE_uint32(request_pool_size, 100, "#call data instance each thread hold."); DEFINE_uint32(binlog_append_file_timeo_us, 1500, "append binlog cv wait timeout in microseconds ."); DEFINE_uint32(binlog_meta_hash_buf_size, 4, "binlog's meta data hash buf size in MB, each server instance has only one of such buffer."); DEFINE_uint32(binlog_max_size, 1024 * 1024 * 128, "max size(meta data not included) in bytes for each individual binlog file,used together with binlog_max_log_num."); DEFINE_uint32(binlog_max_log_num, 1024 * 1024 * 4, "max #logs for each individual binlog file,used together with binlog_max_size."); DEFINE_uint32(binlog_parse_buf_size, 64, "buf size(in MB) used for parsing binlog on server startup."); DEFINE_string(ip, "0.0.0.0", "svr listening ipv4 address."); DEFINE_uint32(port, 10010, "svr listening port."); DEFINE_uint32(guid_step_len, 200, "guid increase step length."); DEFINE_uint32(guid_disk_random_write_hint_ms, 20, "non SSD disk random write latency hint in milliseconds"); DEFINE_uint32(leader_heartbeat_rpc_timeo_ms, 50, "heartbeat timeout in millisecond."); DEFINE_uint32(leader_append_entries_rpc_timeo_ms, 100, "AppendEntries rpc timeout in millisecond."); DEFINE_uint32(leader_commit_entries_rpc_timeo_ms, 100, "CommitEntries rpc timeout in millisecond."); DEFINE_uint32(leader_resync_log_rpc_timeo_ms, 3000, "resync log rpc timeout in millisecond."); DEFINE_uint32(leader_heartbeat_interval_ms, 500, "leader dedicated thread sending heartbeat intervals in ms."); DEFINE_uint32(leader_last_log_resolve_additional_wait_ms, 100, "additional wait ms when resolving the last log."); DEFINE_uint32(lockfree_queue_resync_log_elements,2 * 1024,"#elements in the resync log queue,round up to nearest 2^n."); DEFINE_uint32(lockfree_queue_resync_data_elements, 1 * 1024, "#elements in the resync data queue,round up to nearest 2^n."); DEFINE_uint32(lockfree_queue_client_react_elements, 1024 * 1024, "#elements in the client react queue,round up to nearest 2^n."); DEFINE_uint32(lockfree_queue_consumer_wait_ms, 800, "the waiting time in us that waiting on the queue's CV."); DEFINE_uint32(lockfree_queue_consumer_threads_num, 16, "#consuemr thread,0 means spawninig them by #CPU cores."); DEFINE_uint32(list_op_tracker_hash_slot_num, 64, "#slots a operation tracker will use."); DEFINE_uint32(timer_precision_ms, 10, "timer check intervals in milliseconds."); DEFINE_uint32(thread_stop_waiting_us, 3, "the interval in us to check if the thread if stopped as intended."); DEFINE_uint32(gc_interval_ms, 30, "the interval in ms for garbage collecting."); DEFINE_uint32(garbage_deque_retain_num, 5000, "the #(garbage deque node) retaining when doing GC in deque."); DEFINE_uint32(conn_per_link, 64, "#tcp connections between each leader<-->follower link."); DEFINE_uint32(channel_pool_size, 10, "#channels for each tcp connection."); DEFINE_uint32(client_pool_size, 10000, "#clients the client pool maintained for each follower."); DEFINE_uint32(resync_log_reverse_step_len, 20, "the reversing step len when the leader try to find the lastest consistent log entry with a follower."); DEFINE_uint32(resync_data_item_num_each_rpc, 1024, "the #data items in a single sending of a stream RPC call."); DEFINE_uint32(resync_data_log_num_each_rpc, 1024, "the #replicated logs in a single sending of a stream RPC call."); DEFINE_uint32(resync_data_task_max_time_ms, 200, "the max time in millisecond a resync data task can hold in a single execution."); DEFINE_uint32(binlog_reserve_log_num, 100, "the #log reserved before the ID-LCL when rotating binlog file."); DEFINE_uint32(group_commit_count, 500, "#previous appending requests that a commit request at least represents."); DEFINE_uint32(cut_empty_timeos_ms, 500, "In leader, if a replicated msg cannot be successfully processed within this time, an error will be returned."); DEFINE_uint32(iterating_threads, 4, "#threads for iterating the unfinished requests."); DEFINE_uint32(iterating_wait_timeo_us, 50 * 1000, "follower disorder threads wait on CV timeout in microseconds."); DEFINE_uint32(follower_check_heartbeat_interval_ms, 10, "interval in milliseconds of follower's checking leader's heartbea behavior,must be."); DEFINE_uint32(disorder_msg_timeo_ms, 1000, "In follower, if a disorder msg cannot be successfully processed within this time, an error will be returned."); DEFINE_uint32(cgg_wait_for_last_released_guid_finish_us, 50, "there is a time windows one thread can still generating guids even server status already been \ set to HALT.This is the us waiting to it elapses."); DEFINE_uint32(election_heartbeat_timeo_ms, 3000, "this is the duraion after it elapsed the follower will start to turn into candidate role."); DEFINE_uint32(election_term_interval_min_ms, 150, "the lower bound of sleeping interval before incease term and start a new election."); DEFINE_uint32(election_term_interval_max_ms, 300, "the upper bound of sleeping interval before incease term and start a new election."); DEFINE_uint32(election_vote_rpc_timeo_ms, 2000, "vote rpc timeout in millisecond."); DEFINE_uint32(election_non_op_timeo_ms, 500, "timeo value in ms of the submitting non-op log entry operation after new leader elected."); DEFINE_uint32(election_wait_non_op_finish_ms, 200, "time in ms waiting for non-op finished."); DEFINE_uint32(memchg_sync_data_wait_seconds, 1, "leader will wait for the newly joined nodes to finish sync all the data,this is how long it will wait during each round of waiting."); DEFINE_uint32(memchg_rpc_timeo_ms, 50, "membership change RPC timeout in milliseconds."); DEFINE_uint32(memory_table_max_item, 1024 * 1024 * 2, "max #records a memory can hold."); DEFINE_uint32(memory_table_hash_slot_num, 10 * 1000, "#slots a memory table object's inner hash object can hold."); DEFINE_uint32(sstable_table_hash_slot_num, 10 * 1000, "#slots a sstable table object's inner hash object can hold."); DEFINE_uint32(sstable_purge_interval_second, 10, "Interval in seconds of merging and purging sstabls."); DEFINE_uint32(child_glog_v, 90, "the GLOG_v environment variable used for child processes in gtest."); DEFINE_uint32(election_thread_wait_us, 1000, "the waiting time between each check of election thread exiting."); DEFINE_bool(do_heartbeat, true, "whether leader sending heartbeat message to followers or not."); DEFINE_bool(heartbeat_oneshot, false, "sending heartbeat message just once."); DEFINE_bool(member_leader_gone, false, "whether the old leader will exist in the new cluster or not."); DEFINE_uint32(concurrent_client_thread_num, 0, "#thread client using when doing benchmark."); DEFINE_bool(enable_sstable_gc, true, "whether enable sstable purging or not."); DEFINE_bool(checking_heartbeat, true, "whether follower checking heartbeat or not."); DEFINE_uint32(append_entries_start_idx, 8057, "#log start index for the AppendEntries interface."); DEFINE_bool(clear_existing_sstable_files, true, "whether delete all existing sstable files or not."); DEFINE_uint32(hash_slot_num, 500, "#slots in lockfree hash."); DEFINE_uint32(resync_log_start_idx, 8057, "#log start index for the LeaderView::ResyncLog interface."); DEFINE_uint32(deque_op_count, 100000, "#operations for lockfree deque testing."); DEFINE_uint32(meta_count, 80000, "#meta items for testing memory useage."); DEFINE_uint32(follower_svc_benchmark_req_round, 10000, "#rounds(phaseI+phaseII) of requests sent during follower service benchmarking."); DEFINE_uint32(leader_svc_benchmark_req_count, 10000, "#requests of requests sent during leader service benchmarking."); DEFINE_uint32(benchmark_client_cq_num, 2, "#CQ client used to trigger the requests."); DEFINE_uint32(benchmark_client_polling_thread_num_per_cq, 4, "#threads client per CQ used to trigger the requests."); DEFINE_uint32(client_write_timo_ms, 50, "timeout value(ms) for client writing."); DEFINE_uint32(benchmark_client_entrusting_thread_num, 1, "."); DEFINE_string(target_ip, "default_none", "the target ip for a new benchmark server."); DEFINE_string(my_ip, "default_none", "the ip addr to indicate myself in client req."); DEFINE_uint32(storage_get_slice_count, 10, "#elements get from get_slice()."); DEFINE_uint32(retain_num_unordered_single_list, 100, "retain num for unordered_single_list unit test."); DEFINE_bool(do_commit, false, "whether issue the commit request or not after appenedEntries."); DEFINE_uint32(value_len, 2, "value length in unite test."); DEFINE_uint32(client_count, 10000, "test client count."); DEFINE_uint32(launch_threads_num, 0, "#threads in data structures benchmark."); DEFINE_uint32(queue_initial_size, 10000000, "initial size for lockfree queue in unit test."); DEFINE_uint32(queue_op_count, 1000000, "#operations for lockfree queue unit test."); DEFINE_uint32(conn_op_count, 100000, "#operations for follower unit test."); }
10,392
C++
.cc
107
92.280374
187
0.741568
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,274
state_mgr.cc
ppLorins_aurora/src/state/state_mgr.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <regex> #include "common/comm_defs.h" #include "config/config.h" #include "tools/utilities.h" #include "state/state_mgr.h" namespace RaftCore::State { RaftRole StateMgr::m_cur_state = RaftRole::UNKNOWN; bool StateMgr::m_initialized = false; std::string StateMgr::m_my_addr = ""; std::list<std::string> StateMgr::m_nic_addrs; using ::RaftCore::CTopologyMgr; using ::RaftCore::Topology; void StateMgr::Initialize(const ::RaftCore::Topology &global_topo) noexcept { ::RaftCore::Tools::GetLocalIPs(m_nic_addrs); std::for_each(m_nic_addrs.begin(), m_nic_addrs.end(), [](std::string &_ip) { _ip += std::string(":" + std::to_string(::RaftCore::Config::FLAGS_port) ); }); std::list<std::string> _find_in; if (global_topo.m_my_addr.empty()) _find_in = m_nic_addrs; else _find_in.emplace_back(global_topo.m_my_addr); auto _init_topology = [&]() -> ::RaftCore::State::RaftRole { if (std::find(_find_in.cbegin(), _find_in.cend(), global_topo.m_leader) != std::end(_find_in)) { m_my_addr = global_topo.m_leader; return ::RaftCore::State::RaftRole::LEADER; } for (const auto & _item : global_topo.m_followers) { if (std::find(_find_in.cbegin(), _find_in.cend(), _item) == std::end(_find_in)) continue; m_my_addr = _item; return ::RaftCore::State::RaftRole::FOLLOWER; } for (const auto & _item : global_topo.m_candidates) { if (std::find(_find_in.cbegin(), _find_in.cend(), _item) == std::end(_find_in)) continue; m_my_addr = _item; return ::RaftCore::State::RaftRole::CANDIDATE; } //If I'm not in the topology list , assume I'm an empty node ready to be joined. return ::RaftCore::State::RaftRole::UNKNOWN; }; m_cur_state = _init_topology(); CHECK(m_cur_state != ::RaftCore::State::RaftRole::UNKNOWN) << "m_cur_state invalid."; m_initialized = true; } bool StateMgr::Ready() noexcept { return m_initialized; } void StateMgr::UnInitialize() noexcept { m_cur_state = ::RaftCore::State::RaftRole::UNKNOWN; m_my_addr = ""; } State::RaftRole StateMgr::GetRole() noexcept{ return m_cur_state; } const std::list<std::string>& StateMgr::GetNICAddrs() noexcept { return m_nic_addrs; } bool StateMgr::AddressUndetermined() noexcept { return m_my_addr.empty(); } const char* StateMgr::GetRoleStr(RaftRole state) noexcept { RaftRole _role = m_cur_state; if (state != RaftRole::UNKNOWN) _role = state; if (_role == RaftRole::LEADER) return _ROLE_STR_LEADER_; else if (_role == RaftRole::FOLLOWER) return _ROLE_STR_FOLLOWER_; else if (_role == RaftRole::CANDIDATE) return _ROLE_STR_CANDIDATE_; else if (_role == RaftRole::UNKNOWN) return _ROLE_STR_UNKNOWN_; else CHECK(false); return nullptr; } void StateMgr::SwitchTo(RaftRole state,const std::string &new_leader) noexcept { /*There are 4 valid transitions: 1. Leader -> Follower. (step down.) 2. Follower -> Candidate. (start electing.) 3. Candidate -> Follower. (new leader elected but not me.) 4. Candidate -> Follower. (new leader elected.It's me.) */ Topology _topo; CTopologyMgr::Read(&_topo); //Update topology before switching role. if (m_cur_state == RaftRole::LEADER) { CHECK(state == RaftRole::FOLLOWER) << "invalid state transition found : Leader -> " << state; //Check new leader address format validity. std::regex _pattern("\\d{1,3}\.\\d{1,3}\.\\d{1,3}\.\\d{1,3}:\\d+"); std::smatch _sm; CHECK(std::regex_match(new_leader, _sm, _pattern)) << "new leader format valid:" << new_leader; _topo.m_leader = new_leader; _topo.m_followers.erase(new_leader); _topo.m_candidates.erase(new_leader); _topo.m_followers.emplace(m_my_addr); } else if (m_cur_state == RaftRole::FOLLOWER) { CHECK(state == RaftRole::CANDIDATE) << "invalid state transition found : Follower -> " << state; _topo.m_followers.erase(m_my_addr); _topo.m_candidates.emplace(m_my_addr); } else if (m_cur_state == RaftRole::CANDIDATE) { CHECK(state == RaftRole::LEADER || state == RaftRole::FOLLOWER) << "invalid state transition found : Candidate -> " << state; _topo.m_candidates.erase(m_my_addr); if (state == RaftRole::LEADER) { _topo.m_followers.emplace(_topo.m_leader); _topo.m_candidates.erase(m_my_addr); _topo.m_leader = m_my_addr; } else if (state == RaftRole::FOLLOWER) { /* This transition can be caused of : 1. pre-vote fail. 2. a new leader has been detected. */ //This is for case 2. if (!new_leader.empty()) { _topo.m_followers.emplace(_topo.m_leader); _topo.m_followers.erase(new_leader); _topo.m_candidates.erase(new_leader); _topo.m_leader = new_leader; } _topo.m_followers.emplace(m_my_addr); } } else CHECK(false) << "unknown role found :" << m_cur_state; m_cur_state = state; //Writing new topology to config file. CTopologyMgr::Update(_topo); } const std::string& StateMgr::GetMyAddr() noexcept { return m_my_addr; } void StateMgr::SetMyAddr(const std::string& addr) noexcept { m_my_addr = addr; } std::ostream& operator<<(std::ostream& os, const RaftRole& obj) { os << StateMgr::GetRoleStr(obj); return os; } }
6,461
C++
.cc
154
35.487013
133
0.626978
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,275
topology_mgr.cc
ppLorins_aurora/src/topology/topology_mgr.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <regex> #include "common/comm_defs.h" #include "topology/topology_mgr.h" #define _AURORA_TOPOLOGY_LEADER_INDICATOR_ "leader" #define _AURORA_TOPOLOGY_FOLLOWER_INDICATOR_ "followers" #define _AURORA_TOPOLOGY_CANDIDATE_INDICATOR_ "candidates" #define _AURORA_TOPOLOGY_MYADDR_INDICATOR_ "my_addr" namespace RaftCore { std::fstream CTopologyMgr::m_file_stream; Topology CTopologyMgr::m_ins; std::shared_timed_mutex CTopologyMgr::m_mutex; using ::RaftCore::Common::ReadLock; using ::RaftCore::Common::WriteLock; std::ostream& operator<<(std::ostream& os, const Topology& obj) { os << "------ Topology ------" << std::endl; os << "leader:" << std::endl << obj.m_leader << std::endl; os << "followers:" << std::endl; for (const auto &_item : obj.m_followers) { os << _item << std::endl; } os << "candidates:" << std::endl; for (const auto &_item : obj.m_candidates) { os << _item << std::endl; } os << "my_addr:" << obj.m_my_addr << std::endl; return os; } Topology::Topology()noexcept { this->Reset(); } void Topology::Reset() noexcept{ this->m_leader = ""; this->m_followers.clear(); this->m_candidates.clear(); this->m_my_addr = ""; } uint32_t Topology::GetClusterSize() const noexcept { return (uint32_t)this->m_followers.size() + (uint32_t)this->m_candidates.size() + 1; } bool Topology::InCurrentCluster(const std::string &node) noexcept{ if (node == this->m_leader) return true; if (this->m_candidates.find(node) != this->m_candidates.cend()) return true; if (this->m_followers.find(node) != this->m_followers.cend()) return true; return false; } void CTopologyMgr::Initialize() noexcept { Load(); } void CTopologyMgr::UnInitialize() noexcept { m_file_stream.close(); } bool CTopologyMgr::Load() noexcept{ m_file_stream.open(_AURORA_TOPOLOGY_CONFFIG_FILE_); if (!m_file_stream.is_open()) { LOG(ERROR) << "open topology config file " << _AURORA_TOPOLOGY_CONFFIG_FILE_ << " fail."; return false; } std::regex _pattern("(#*)\\d{1,3}\.\\d{1,3}\.\\d{1,3}\.\\d{1,3}:\\d+"); std::smatch _sm; WriteLock _w_lock(m_mutex); m_ins.Reset(); int _section_flg = 0; // 1: leader , 2: follower,3:candidate, 4:my_addr for (std::string _ori_line; std::getline(m_file_stream, _ori_line);) { std::string _line = ""; //_line.reserve(_ori_line.length()); std::copy_if(_ori_line.begin(), _ori_line.end(), std::back_inserter(_line), [](char c) { return c != '\r' && c != '\n'; }); if (_line == _AURORA_TOPOLOGY_LEADER_INDICATOR_) { _section_flg = 1; continue; } if (_line == _AURORA_TOPOLOGY_FOLLOWER_INDICATOR_) { _section_flg = 2; continue; } if (_line == _AURORA_TOPOLOGY_CANDIDATE_INDICATOR_) { _section_flg = 3; continue; } if (_line == _AURORA_TOPOLOGY_MYADDR_INDICATOR_) { _section_flg = 4; continue; } if (!std::regex_match(_line, _sm, _pattern)) { LOG(ERROR) << "unrecognized line found when parsing topology config file, ignore it:" << _line; continue; } //Support comment. if (_sm[1] == "#") continue; if (_section_flg == 1) m_ins.m_leader = _line; else if (_section_flg == 2) m_ins.m_followers.emplace(_line); else if (_section_flg == 3) m_ins.m_candidates.emplace(_line); else if (_section_flg == 4) m_ins.m_my_addr = _line; else CHECK(false) << "unknown section flag : " << _section_flg; } //'m_my_addr' must be in the cluster. CHECK(m_ins.InCurrentCluster(m_ins.m_my_addr)); return true; } void CTopologyMgr::Read(Topology *p_output) noexcept{ if (p_output == nullptr) { LOG(ERROR) << "input data is null,invalid."; return ; } ReadLock _r_lock(m_mutex); p_output->m_leader = m_ins.m_leader; p_output->m_followers = m_ins.m_followers; p_output->m_candidates = m_ins.m_candidates; p_output->m_my_addr = m_ins.m_my_addr; } void CTopologyMgr::Update(const Topology &input) noexcept{ WriteLock _w_lock(m_mutex); m_ins.m_leader = input.m_leader; m_ins.m_followers = input.m_followers; m_ins.m_candidates = input.m_candidates; m_ins.m_my_addr = input.m_my_addr; m_file_stream.close(); //Reopen file for re-writing and truncate the old contents m_file_stream.open(_AURORA_TOPOLOGY_CONFFIG_FILE_,std::ios_base::in | std::ios_base::out | std::ios_base::trunc); std::string content = ""; content.append(std::string(_AURORA_TOPOLOGY_LEADER_INDICATOR_) + "\n"); content.append(m_ins.m_leader + "\n"); content.append(std::string(_AURORA_TOPOLOGY_FOLLOWER_INDICATOR_) + "\n"); for (const auto &item : m_ins.m_followers) content.append((item + "\n")); content.append(std::string(_AURORA_TOPOLOGY_CANDIDATE_INDICATOR_) + "\n"); for (const auto &item : m_ins.m_candidates) content.append((item + "\n")); content.append(std::string(_AURORA_TOPOLOGY_MYADDR_INDICATOR_) + "\n"); content.append(m_ins.m_my_addr + "\n"); m_file_stream.write(content.c_str(),content.length()); m_file_stream.flush(); } }
6,213
C++
.cc
156
34.089744
131
0.622126
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,276
guid_generator.cc
ppLorins_aurora/src/guid/guid_generator.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "boost/filesystem.hpp" #include "config/config.h" #include "common/comm_defs.h" #include "guid/guid_generator.h" #define _AURORA_FILE_BUF_SIZE_ (1024) namespace RaftCore::Guid { std::atomic<uint64_t> GuidGenerator::m_last_released_guid; void GuidGenerator::Initialize(uint64_t last_released) noexcept{ m_last_released_guid.store(last_released); } void GuidGenerator::UnInitialize() noexcept{} GuidGenerator::GUIDPair GuidGenerator::GenerateGuid() noexcept{ uint64_t _old_val = m_last_released_guid.fetch_add(1); uint64_t _deserved_val = _old_val + 1; return { _old_val,_deserved_val }; } void GuidGenerator::SetNextBasePoint(uint64_t base_point) noexcept { m_last_released_guid.store(base_point); } uint64_t GuidGenerator::GetLastReleasedGuid() noexcept { return m_last_released_guid.load(); } }
1,616
C++
.cc
37
41.567568
73
0.754161
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,277
member_manager.cc
ppLorins_aurora/src/member/member_manager.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <string> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "protocol/raft.grpc.pb.h" #include "common/comm_defs.h" #include "config/config.h" #include "topology/topology_mgr.h" #include "storage/storage_singleton.h" #include "leader/follower_entity.h" #include "leader/leader_view.h" #include "leader/client_pool.h" #include "tools/lock_free_priority_queue.h" #include "tools/utilities.h" #include "state/state_mgr.h" #include "election/election.h" #include "global/global_env.h" #include "client/client_impl.h" #include "member/member_manager.h" #define _AURORA_MEMBER_CLUSTER_STATUS_PREFIX_ "cluster status:" #define _AURORA_MEMBER_NEW_CLUSTER_PREFIX_ "new cluster:" #define _AURORA_MEMBER_VERSION_PREFIX_ "version:" namespace RaftCore::Member { std::condition_variable MemberMgr::m_resync_data_cv; std::mutex MemberMgr::m_resync_data_cv_mutex; MemberMgr::JointSummary MemberMgr::m_joint_summary; std::shared_timed_mutex MemberMgr::m_mutex; std::atomic<bool> MemberMgr::m_in_processing; MemberMgr::JointTopology MemberMgr::m_joint_topo_snapshot; const char* MemberMgr::m_macro_names[] = {"STABLE","JOINT_CONSENSUS"}; MemberMgr::MemberChangeContext MemberMgr::m_memchg_ctx; TwoPhaseCommitBatchTask<TypePtrFollowerEntity> MemberMgr::m_phaseI_task; TwoPhaseCommitBatchTask<TypePtrFollowerEntity> MemberMgr::m_phaseII_task; #ifdef _MEMBER_MANAGEMENT_TEST_ bool MemberMgr::m_execution_flag = false; #endif using ::raft::ErrorCode; using ::grpc::CompletionQueue; using ::RaftCore::Common::WriteLock; using ::RaftCore::Common::ReadLock; using ::RaftCore::Common::FinishStatus; using ::RaftCore::Leader::FollowerEntity; using ::RaftCore::Leader::FollowerStatus; using ::RaftCore::Storage::StorageGlobal; using ::RaftCore::Topology; using ::RaftCore::CTopologyMgr; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Leader::BackGroundTask::ReSyncLogContext; using ::RaftCore::DataStructure::LockFreePriotityQueue; using ::RaftCore::State::StateMgr; using ::RaftCore::State::RaftRole; using ::RaftCore::Election::ElectionMgr; using ::RaftCore::Global::GlobalEnv; using ::RaftCore::Tools::TypeSysTimePoint; using ::RaftCore::Client::MemberChangePrepareAsyncClient; using ::RaftCore::Client::MemberChangeCommitAsyncClient; const MemberMgr::JointTopology& MemberMgr::JointTopology::operator=(const MemberMgr::JointTopology &one) { this->m_new_cluster = one.m_new_cluster; this->m_added_nodes = one.m_added_nodes; this->m_removed_nodes = one.m_removed_nodes; this->m_leader_gone_away = one.m_leader_gone_away; this->m_old_leader = one.m_old_leader; return *this; } const MemberMgr::JointTopology& MemberMgr::JointTopology::operator=(MemberMgr::JointTopology &&one) { this->m_new_cluster = std::move(one.m_new_cluster); this->m_added_nodes = std::move(one.m_added_nodes); this->m_removed_nodes = std::move(one.m_removed_nodes); this->m_leader_gone_away = one.m_leader_gone_away; this->m_old_leader = one.m_old_leader; return *this; } void MemberMgr::JointTopology::Reset() noexcept{ this->m_new_cluster.clear(); this->m_added_nodes.clear(); this->m_removed_nodes.clear(); this->m_leader_gone_away = false; this->m_old_leader = ""; } void MemberMgr::JointTopology::Update(const std::set<std::string> * p_new_cluster)noexcept { if (p_new_cluster) this->m_new_cluster = *p_new_cluster; //Topology should be ready for reading. Topology _cur_topo; CTopologyMgr::Read(&_cur_topo); bool _is_leader = StateMgr::GetRole() == RaftRole::LEADER; //Find added nodes. this->m_added_nodes.clear(); for (const auto& _item : this->m_new_cluster) { if (_cur_topo.InCurrentCluster(_item)) continue; FollowerEntity* _p_follower = nullptr; if (_is_leader) _p_follower = new FollowerEntity(_item, FollowerStatus::RESYNC_LOG, uint32_t(JointConsensusMask::IN_NEW_CLUSTER)); this->m_added_nodes.emplace(_item, _p_follower); } //Find removed nodes. this->m_removed_nodes.clear(); { ReadLock _r_lock(LeaderView::m_hash_followers_mutex); for (const auto& _pair : LeaderView::m_hash_followers) if (this->m_new_cluster.find(_pair.first) == this->m_new_cluster.cend()) this->m_removed_nodes.emplace(_pair.first); } this->m_leader_gone_away = (this->m_new_cluster.find(_cur_topo.m_leader)==this->m_new_cluster.cend()); this->m_old_leader = _cur_topo.m_leader; } void MemberMgr::JointSummary::Reset()noexcept { this->m_joint_status = EJointStatus::STABLE; this->m_joint_topology.Reset(); //m_version is monotonic,shouldn't been reset in any time. } void MemberMgr::Initialize() noexcept { m_in_processing.store(false); m_joint_summary.Reset(); ResetMemchgEnv(); LoadFile(); } void MemberMgr::UnInitialize() noexcept { m_joint_summary.Reset(); } void MemberMgr::ResetMemchgEnv() noexcept { //Reset all the followings before using them. m_joint_topo_snapshot.Reset(); m_memchg_ctx.Reset(); } void MemberMgr::LoadFile() noexcept { //Read the config file. std::ifstream f_input(_AURORA_MEMBER_CONFIG_FILE_); for (std::string _ori_line; std::getline(f_input, _ori_line); ) { std::string _line = ""; std::copy_if(_ori_line.begin(), _ori_line.end(), std::back_inserter(_line), [](char c) { return c != '\r' && c != '\n'; }); if (_line.find(_AURORA_MEMBER_CLUSTER_STATUS_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in member config file, _line:" << _line; m_joint_summary.m_joint_status = StringToMacro(_line.substr(pos + 1).c_str()); continue; } if (_line.find(_AURORA_MEMBER_NEW_CLUSTER_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in member config file, _line:" << _line; ::RaftCore::Tools::StringSplit(_line.substr(pos + 1),',',m_joint_summary.m_joint_topology.m_new_cluster); continue; } if (_line.find(_AURORA_MEMBER_VERSION_PREFIX_) != std::string::npos) { std::size_t pos = _line.find(":"); CHECK (pos != std::string::npos) << "cannot find delimiter[:] in member config file, _line:" << _line; m_joint_summary.m_version = std::atol(_line.substr(pos + 1).c_str()); continue; } } m_joint_summary.m_joint_topology.Update(); } void MemberMgr::SaveFile() noexcept{ ReadLock _r_lock(m_mutex); std::FILE* f_handler = std::fopen(_AURORA_MEMBER_CONFIG_FILE_, "w+"); CHECK(f_handler != nullptr) << "open BaseState file " << _AURORA_MEMBER_CONFIG_FILE_ << "fail..,errno:" << errno; auto &_cluster_topo = m_joint_summary.m_joint_topology; std::string _new_cluster = ""; for (auto iter = _cluster_topo.m_new_cluster.crbegin(); iter != _cluster_topo.m_new_cluster.crend(); ++iter) _new_cluster += ((*iter) + ","); std::string buf = _AURORA_MEMBER_CLUSTER_STATUS_PREFIX_ + std::string(MacroToString(m_joint_summary.m_joint_status)) + "\n" + _AURORA_MEMBER_NEW_CLUSTER_PREFIX_ + _new_cluster + "\n" + _AURORA_MEMBER_VERSION_PREFIX_ + std::to_string(m_joint_summary.m_version) + "\n"; std::size_t written = fwrite(buf.data(), 1, buf.size(), f_handler); CHECK(written == buf.size()) << "fwrite BaseState file fail...,errno:" << errno << ",written:" << written << ",expected:" << buf.size(); CHECK(!std::fclose(f_handler)) << "close BaseState file fail...,errno:" << errno; } void MemberMgr::NotifyOnSynced(TypePtrFollowerEntity &shp_follower) noexcept { LOG(INFO) << "[Membership Change] peer " << shp_follower->my_addr << " notify called,switched to NORMAL status"; shp_follower->m_status = FollowerStatus::NORMAL; std::unique_lock<std::mutex> _lock(m_resync_data_cv_mutex); m_resync_data_cv.notify_all(); } void MemberMgr::SwitchToJointConsensus(JointTopology &updated_topo,uint32_t version)noexcept { WriteLock _w_lock(m_mutex); m_joint_summary.m_joint_status = EJointStatus::JOINT_CONSENSUS; m_joint_summary.m_joint_topology = std::move(updated_topo); //Update old cluster followers' status. if (StateMgr::GetRole() == RaftRole::LEADER) { for (const auto& _item : m_joint_summary.m_joint_topology.m_new_cluster) { ReadLock _r_lock(LeaderView::m_hash_followers_mutex); auto _iter = LeaderView::m_hash_followers.find(_item); if (_iter != LeaderView::m_hash_followers.cend()) _iter->second->m_joint_consensus_flag |= uint32_t(JointConsensusMask::IN_NEW_CLUSTER); } } m_joint_summary.m_version++; if (version != _MAX_UINT32_) m_joint_summary.m_version = version; _w_lock.unlock(); //Persist changes. SaveFile(); } uint32_t MemberMgr::GetVersion()noexcept { ReadLock _r_lock(m_mutex); return m_joint_summary.m_version; } bool MemberMgr::SwitchToStable()noexcept { /*Since topology config and membership-change config are in separated files, they cannot be updated atomically, but if we update topology first ,the leader or follower server can still serve normally after recovered from a crash. */ ::RaftCore::Topology _old_topo; ::RaftCore::CTopologyMgr::Read(&_old_topo); //Update to the latest topology. ::RaftCore::Topology _new_topo; { ReadLock _w_lock(m_mutex); auto &_cluster_topo = m_joint_summary.m_joint_topology; for (const auto &_node : _cluster_topo.m_new_cluster) { if (_node == _old_topo.m_leader) { _new_topo.m_leader = _node; continue; } _new_topo.m_followers.emplace(_node); } _new_topo.m_my_addr = _old_topo.m_my_addr; ::RaftCore::CTopologyMgr::Update(_new_topo); //Update to latest follower list in leader's view. if (StateMgr::GetRole() == RaftRole::LEADER) { WriteLock _r_lock(LeaderView::m_hash_followers_mutex); LeaderView::m_hash_followers.clear(); for (const auto &_node : _cluster_topo.m_new_cluster) if (_node != _new_topo.m_leader) LeaderView::m_hash_followers[_node] = std::shared_ptr<FollowerEntity>(new FollowerEntity(_node)); } } { WriteLock _w_lock(m_mutex); m_joint_summary.Reset(); m_joint_summary.m_version++; } //Persist changes. SaveFile(); return _new_topo.InCurrentCluster(StateMgr::GetMyAddr()); } std::string MemberMgr::FindPossibleAddress(const std::list<std::string> &nic_addrs)noexcept{ ReadLock _r_lock(m_mutex); auto &_new_nodes = m_joint_summary.m_joint_topology.m_added_nodes; for (const auto &_item : nic_addrs) { auto _iter = _new_nodes.find(_item); if (_iter != _new_nodes.cend()) return _iter->first; } return ""; } const char* MemberMgr::MacroToString(EJointStatus enum_val) noexcept { return m_macro_names[int(enum_val)]; } EJointStatus MemberMgr::StringToMacro(const char* src) noexcept { int _size = sizeof(m_macro_names) / sizeof(const char*); for (int i = 0; i < _size; ++i) if (std::strncmp(src, m_macro_names[i], std::strlen(m_macro_names[i])) == 0) return (EJointStatus)i; CHECK(false) << "convert string to enum fail,unknown cluster status :" << src; //Just for erase compile warnings. return EJointStatus::STABLE; } const char* MemberMgr::PullTrigger(const std::set<std::string> &new_cluster)noexcept { bool _in_processing = false; if (!m_in_processing.compare_exchange_strong(_in_processing, true)) { static const char * _p_err_msg = "I'm changing the membership now,cannot process another changing request."; LOG(ERROR) << "[MembershipChange] " << _p_err_msg; return _p_err_msg; } /*Issuing a resync log task to the background threads.This will cover all cases: 1. the new node's log doesn't lag too far behind ,a few resync log operations is enough. 2. the new node's log do lag too far behind , will trigger the resync-data operation eventually. 3. the new node's log is empty , trigger resync-data operation eventually. */ /*Using ID-LCL instead of ID-LRL to reduce the amount of log entries each resync log RPC may carry, even though both the two options will eventually triggered the SYNC-DATA process. */ auto _id_lcl = StorageGlobal::m_instance.GetLastCommitted(); m_joint_topo_snapshot.Update(&new_cluster); { ReadLock _r_lock(m_mutex); for (auto _iter = m_joint_topo_snapshot.m_added_nodes.cbegin(); _iter != m_joint_topo_snapshot.m_added_nodes.cend(); ++_iter) { std::shared_ptr<ReSyncLogContext> _shp_task(new ReSyncLogContext()); _shp_task->m_last_sync_point = _id_lcl; _shp_task->m_follower = _iter->second; _shp_task->m_on_success_cb = &MemberMgr::NotifyOnSynced; auto _ret_code = LeaderView::m_priority_queue.Push(LockFreePriotityQueue::TaskType::RESYNC_LOG, &_shp_task); if (_ret_code != QUEUE_SUCC) LOG(INFO) << "[Membership Change] Add RESYNC-LOG task ret code:" << _ret_code << ",logID:" << _shp_task->m_last_sync_point << ",remote peer:" << _iter->first; } } /*Note:Waiting for the log replication task to finish is a time consuming operation, we'd better entrust that to a dedicated thread, returning to client immediately. It's the client's(usually the administrator) duty to check when the membership change job will be done. */ std::thread _th_member_changing(MemberMgr::Routine); _th_member_changing.detach(); return nullptr; } void MemberMgr::WaitForSyncDataDone() noexcept { std::size_t _required_synced_size = m_joint_topo_snapshot.m_added_nodes.size(); std::unique_lock<std::mutex> _lock(m_resync_data_cv_mutex); auto _wait_cond = [&]()->bool{ std::size_t _counter = 0; //Calculate #followers that are fully synced. { ReadLock _r_lock(m_mutex); for (auto iter = m_joint_topo_snapshot.m_added_nodes.cbegin(); iter != m_joint_topo_snapshot.m_added_nodes.cend(); ++iter) if (iter->second->m_status == FollowerStatus::NORMAL) { _counter++; LOG(INFO) << "[Membership Change] new node " << iter->second->my_addr << ",finished sync data"; } } /*Need to wait until all new nodes get synchronized,reason for this is that there will be no change for nodes who lag behind to catch up in the future , in the current implementation. */ return _counter >= _required_synced_size ; }; auto _wait_sec = std::chrono::seconds(::RaftCore::Config::FLAGS_memchg_sync_data_wait_seconds); while (!m_resync_data_cv.wait_for(_lock, _wait_sec, _wait_cond)) LOG(WARNING) << "[Membership Change] syncing data is not finished yet,continue waiting..."; LOG(INFO) << "[Membership Change] finish to sync data & logs to all the new added nodes."; } void MemberMgr::Routine() noexcept { LOG(INFO) << "[Membership Change] Routine started, waiting for the majority of new cluster get synced."; WaitForSyncDataDone(); uint32_t _next_verion = 1; { ReadLock _r_lock(m_mutex); _next_verion += m_joint_summary.m_version; } auto& _ctx_phaseI = m_memchg_ctx.m_phaseI_state; auto &_joint_new_cluster = m_joint_topo_snapshot.m_new_cluster; { ReadLock _r_lock(LeaderView::m_hash_followers_mutex); for (auto &_pair_kv : LeaderView::m_hash_followers) { m_phaseI_task.m_todo.emplace_back(_pair_kv.second); bool _in_new = _joint_new_cluster.find(_pair_kv.first) != _joint_new_cluster.cend(); uint32_t _flag = uint32_t(JointConsensusMask::IN_OLD_CLUSTER); if (_joint_new_cluster.find(_pair_kv.first) != _joint_new_cluster.cend()) _flag |= uint32_t(JointConsensusMask::IN_NEW_CLUSTER); m_phaseI_task.m_flags.emplace_back(_flag); } _ctx_phaseI.m_cur_cluster.m_cq_entrust_num = (int)LeaderView::m_hash_followers.size(); m_memchg_ctx.m_cluster_size = LeaderView::m_hash_followers.size() + 1; m_memchg_ctx.m_cluster_majority = m_memchg_ctx.m_cluster_size / 2 + 1; } auto &_joint_added_nodes = m_joint_topo_snapshot.m_added_nodes; for (auto _iter = _joint_added_nodes.begin(); _iter != _joint_added_nodes.end(); ++_iter) { m_phaseI_task.m_todo.emplace_back(_iter->second); m_phaseI_task.m_flags.emplace_back(uint32_t(JointConsensusMask::IN_NEW_CLUSTER)); } _ctx_phaseI.m_new_cluster.m_cq_entrust_num = (int)_joint_new_cluster.size(); if (!m_joint_topo_snapshot.m_leader_gone_away) _ctx_phaseI.m_new_cluster.m_cq_entrust_num--; m_memchg_ctx.m_new_cluster_size = _joint_new_cluster.size(); m_memchg_ctx.m_new_cluster_majority = m_memchg_ctx.m_new_cluster_size / 2 + 1; //Requests in the two phase rpc are the same. auto cur_term = ElectionMgr::m_cur_term.load(); TypePtrMemberChangReq _shp_req(new MemberChangeInnerRequest()); _shp_req->mutable_base()->set_addr(StateMgr::GetMyAddr()); _shp_req->mutable_base()->set_term(cur_term); _shp_req->set_version(_next_verion); for (const auto& _node : _joint_new_cluster) _shp_req->add_node_list(_node); CHECK(PropagateMemberChange(_shp_req, PhaseID::PhaseI)) << "[Membership Change] prepare phase fail " << "of membership changing,cannot revert,check this."; //m_joint_topology is moved after this call. SwitchToJointConsensus(m_joint_topo_snapshot); LOG(INFO) << "switching to joint consensus done. going to do phaseII."; #ifdef _MEMBER_MANAGEMENT_TEST_ PendingExecution(); #endif CHECK(PropagateMemberChange(_shp_req, PhaseID::PhaseII)) << "[Membership Change] commit phase fail " << "of membership changing,cannot revert,check this."; bool _still_in_new_cluster = SwitchToStable(); LOG(INFO) << "phaseII and switching to stable done."; /*After successfully changed the membership from the C-old to C-new , there is still one more thing to do : If the current leader , aka this node, is not belonging to the C-new cluster,it need to be stepped down to follower according to the RAFT paper, but in this implementation ,we just shut it down which is also correct, but quite simple and directly. */ if (!_still_in_new_cluster) { LOG(INFO) << "I'm no longer in the new cluster , shutdown myself ,goodbye and have a good time."; GlobalEnv::ShutDown(); } bool _in_processing = true; CHECK(m_in_processing.compare_exchange_strong(_in_processing, false)) << "[MembershipChange] cannot switch in processing status back to true,check this." ; } bool MemberMgr::PropagateMemberChange(TypePtrMemberChangReq &shp_req, PhaseID phase_id) noexcept { TypePtrMemberChangReq* _p_newbie_req = &shp_req; auto *_p_phase_task = &m_phaseI_task; if (phase_id == PhaseID::PhaseII) { _p_phase_task = &m_phaseII_task; MemberChangeInnerRequest *_newbie_req = new MemberChangeInnerRequest(*shp_req); _newbie_req->set_flag(::raft::MembershipFlag::NEWBIE); _p_newbie_req = new TypePtrMemberChangReq(_newbie_req); } std::vector<TypePtrFollowerEntity> &_todo_set = _p_phase_task->m_todo; std::vector<uint32_t> &_flags = _p_phase_task->m_flags; auto _req_setter = [&](std::shared_ptr<::raft::MemberChangeInnerRequest>& _target, bool newbie = false)->void { _target = newbie ? *_p_newbie_req : shp_req; }; std::shared_ptr<::grpc::CompletionQueue> _shp_cq(new ::grpc::CompletionQueue()); auto _entrust_prepare_client = [&](auto &_shp_channel,auto &shp_follower, std::size_t idx){ auto _shp_client = new MemberChangePrepareAsyncClient(_shp_channel, _shp_cq); auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncMemberChangePrepare, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); auto _bind_setter = std::bind(_req_setter, std::placeholders::_1, false); _shp_client->EntrustRequest(_bind_setter, _f_prepare, ::RaftCore::Config::FLAGS_memchg_rpc_timeo_ms); _shp_client->PushCallBackArgs(shp_follower.get()); _shp_client->PushCallBackArgs(reinterpret_cast<void*>(idx)); }; auto _entrust_commit_client = [&](auto &_shp_channel, auto &shp_follower, std::size_t idx) { auto _shp_client = new MemberChangeCommitAsyncClient(_shp_channel, _shp_cq); auto &_added_nodes = m_joint_topo_snapshot.m_added_nodes; bool _im_new_node = m_joint_topo_snapshot.m_leader_gone_away; _im_new_node &= _added_nodes.find(shp_follower->my_addr) != _added_nodes.cend(); auto _f_prepare = std::bind(&::raft::RaftService::Stub::PrepareAsyncMemberChangeCommit, _shp_client->GetStub().get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); auto _bind_setter = std::bind(_req_setter, std::placeholders::_1, _im_new_node); _shp_client->EntrustRequest(_bind_setter, _f_prepare, ::RaftCore::Config::FLAGS_memchg_rpc_timeo_ms); _shp_client->PushCallBackArgs(shp_follower.get()); _shp_client->PushCallBackArgs(reinterpret_cast<void*>(idx)); }; int _entrust_total_num = 0; for (std::size_t i = 0; i < _todo_set.size(); ++i) { auto &_shp_follower = _todo_set[i]; if (_shp_follower->m_status != FollowerStatus::NORMAL) { LOG(WARNING) << "[Membership Change] follower " << _shp_follower->my_addr << " is under " << FollowerEntity::MacroToString(_shp_follower->m_status) << ",won't propagate member change prepare request to it"; continue; } auto _shp_channel = _shp_follower->m_shp_channel_pool->GetOneChannel(); if (phase_id == PhaseID::PhaseI) _entrust_prepare_client(_shp_channel, _shp_follower, i); else { _entrust_commit_client(_shp_channel, _shp_follower, i); } _entrust_total_num++; } //Polling for phaseI. PollingCQ(_shp_cq,_entrust_total_num); if (phase_id == PhaseID::PhaseII) { delete _p_newbie_req; return m_memchg_ctx.JudgeAllFinished(); } //phase_id == PhaseID::PhaseI return m_memchg_ctx.JudgePhaseIDetermined() == FinishStatus::POSITIVE_FINISHED; } void MemberMgr::Statistic(const ::grpc::Status &status, const ::raft::MemberChangeInnerResponse& rsp, void* ptr_follower, uint32_t joint_flag, PhaseID phase_id) noexcept { auto* _ptr_follower = (FollowerEntity*)ptr_follower; const auto& _addr = _ptr_follower->my_addr; auto *_phase_state = &m_memchg_ctx.m_phaseI_state; if (phase_id == PhaseID::PhaseII) _phase_state = &m_memchg_ctx.m_phaseII_state; std::string _phase_str = (phase_id == PhaseID::PhaseI) ? "prepare" : "commit"; if (!status.ok()) { LOG(ERROR) << "[Membership Change]" << _phase_str << "fail,error code:" << status.error_code() << ",error msg:" << status.error_message() << ",follower joint consensus flag:" << joint_flag << ",remote peer:" << _addr; _phase_state->IncreaseExplicitFail(joint_flag); return; } const auto &comm_rsp = rsp.comm_rsp(); auto _error_code = comm_rsp.result(); if (_error_code!=ErrorCode::SUCCESS) { LOG(INFO) << "[Membership Change] peer " << _addr << " " << _phase_str << " fail,follower joint consensus flag:" << joint_flag << ",remote peer:" << _addr; _phase_state->IncreaseExplicitFail(joint_flag); return; } _phase_state->IncreaseSuccess(joint_flag); LOG(INFO) << "[Membership Change] peer " << _addr << " " << _phase_str << " successfully,follower joint consensus flag:" << joint_flag << ",remote peer:" << _addr; } void MemberMgr::MemberChangePrepareCallBack(const ::grpc::Status &status, const ::raft::MemberChangeInnerResponse& rsp, void* ptr_follower, uint32_t idx) noexcept { Statistic(status, rsp, ptr_follower, m_phaseI_task.m_flags[idx], PhaseID::PhaseI); for (std::size_t i = 0; i < m_phaseI_task.m_todo.size(); ++i) { auto &_shp_follower = m_phaseI_task.m_todo[i]; if (_shp_follower->my_addr == ((FollowerEntity*)ptr_follower)->my_addr) { m_phaseII_task.m_todo.emplace_back(_shp_follower); uint32_t _node_flag = m_phaseI_task.m_flags[i]; m_phaseII_task.m_flags.emplace_back(_node_flag); m_memchg_ctx.m_phaseII_state.IncreaseEntrust(_node_flag); } } } void MemberMgr::MemberChangeCommitCallBack(const ::grpc::Status &status, const ::raft::MemberChangeInnerResponse& rsp, void* ptr_follower, uint32_t idx)noexcept { Statistic(status, rsp, ptr_follower, m_phaseII_task.m_flags[idx], PhaseID::PhaseII); if (m_memchg_ctx.JudgeAllFinished()) LOG(INFO) << "[Membership Change] done"; } void MemberMgr::PollingCQ(std::shared_ptr<::grpc::CompletionQueue> shp_cq,int entrust_num)noexcept { void* tag; bool ok; int _counter = 0; while (_counter < entrust_num) { if (!shp_cq->Next(&tag, &ok)) break; ::RaftCore::Common::ReactBase* _p_ins = (::RaftCore::Common::ReactBase*)tag; _p_ins->React(ok); _counter++; } } #ifdef _MEMBER_MANAGEMENT_TEST_ void MemberMgr::PendingExecution()noexcept { while(!m_execution_flag) std::this_thread::sleep_for(std::chrono::seconds(1)); m_execution_flag = false; } void MemberMgr::ContinueExecution()noexcept { m_execution_flag = true; } #endif }
27,122
C++
.cc
526
44.859316
174
0.656213
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,278
candidate_view.cc
ppLorins_aurora/src/candidate/candidate_view.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "candidate/candidate_view.h" namespace RaftCore::Candidate { void CandidateView::Initialize() noexcept{ CommonView::Initialize(); } void CandidateView::UnInitialize() noexcept { CommonView::UnInitialize(); } }
1,006
C++
.cc
23
42
73
0.753593
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,279
candidate_request.cc
ppLorins_aurora/src/candidate/candidate_request.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "candidate/candidate_request.h" namespace RaftCore::Candidate { template<typename T,typename R,typename Q> CandidateUnaryRequest<T, R, Q>::CandidateUnaryRequest() noexcept {} template<typename T,typename R,typename Q> CandidateUnaryRequest<T,R,Q>::~CandidateUnaryRequest() noexcept {} }
1,076
C++
.cc
21
49.761905
73
0.764593
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,280
global_env.cc
ppLorins_aurora/src/global/global_env.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <string> #include "grpc/grpc.h" #include "grpc++/server_context.h" #include "grpc++/security/server_credentials.h" #include "common/request_base.h" #include "binlog/binlog_singleton.h" #include "storage/storage_singleton.h" #include "service/service.h" #include "topology/topology_mgr.h" #include "leader/leader_view.h" #include "follower/follower_view.h" #include "candidate/candidate_view.h" #include "guid/guid_generator.h" #include "election/election.h" #include "tools/timer.h" #include "tools/lock_free_priority_queue.h" #include "global/global_env.h" namespace RaftCore::Global { std::unique_ptr<::grpc::Server> GlobalEnv::m_pserver; volatile bool GlobalEnv::m_running = false; std::vector<ReactWorkGroup<>> GlobalEnv::m_vec_notify_cq_workgroup; std::vector<ReactWorkGroup<>> GlobalEnv::m_vec_call_cq_workgroup; std::vector<ReactWorkGroup<CompletionQueue>> GlobalEnv::m_vec_client_cq_workgroup; std::shared_ptr<::raft::RaftService::AsyncService> GlobalEnv::m_async_service; volatile bool GlobalEnv::m_cq_fully_shutdown = false; std::string GlobalEnv::m_server_addr; ::grpc::ServerBuilder GlobalEnv::m_builder; bool GlobalEnv::IsRunning() noexcept { return m_running; } TypePtrCQ<CompletionQueue> GlobalEnv::GetClientCQInstance(uint32_t idx) noexcept { return m_vec_client_cq_workgroup[idx].GetCQ(); } void GlobalEnv::InitGrpcEnv() noexcept { m_server_addr = std::string(::RaftCore::Config::FLAGS_ip) + ":" + std::to_string(::RaftCore::Config::FLAGS_port); m_builder.AddListeningPort(m_server_addr, ::grpc::InsecureServerCredentials()); m_async_service.reset(new ::raft::RaftService::AsyncService()); m_builder.RegisterService(m_async_service.get()); m_vec_notify_cq_workgroup.clear(); m_vec_call_cq_workgroup.clear(); TypeReactorFunc _reactor = ::RaftCore::Common::ReactBase::GeneralReacting; uint32_t _notify_cq_thread_num = ::RaftCore::Config::FLAGS_notify_cq_threads; uint32_t _notify_cq_num = ::RaftCore::Config::FLAGS_notify_cq_num; for (std::size_t i = 0; i < _notify_cq_num; ++i) m_vec_notify_cq_workgroup.emplace_back(m_builder.AddCompletionQueue(), _reactor, _notify_cq_thread_num); for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_call_cq_num; ++i) { uint32_t _call_cq_thread_num = ::RaftCore::Config::FLAGS_call_cq_threads; m_vec_call_cq_workgroup.emplace_back(m_builder.AddCompletionQueue(), _reactor, _call_cq_thread_num); } //Each CQ can only get one thread to achieve maximum entrusting speed. uint32_t _client_cq_num = _notify_cq_thread_num * _notify_cq_num; //The additional CQ is for the response processing dedicated CQ. if (::RaftCore::State::StateMgr::GetRole() == State::RaftRole::LEADER) { m_vec_client_cq_workgroup.clear(); for (std::size_t i = 0; i < _client_cq_num; ++i) { uint32_t _client_cq_thread_num = ::RaftCore::Config::FLAGS_client_thread_num; TypeReactorFunc _client_reactor = ::RaftCore::Leader::LeaderView::ClientThreadReacting; TypePtrCQ<CompletionQueue> _shp_cq(new CompletionQueue()); m_vec_client_cq_workgroup.emplace_back(_shp_cq, _client_reactor, _client_cq_thread_num); } } } void GlobalEnv::StartGrpcService() noexcept{ m_pserver = m_builder.BuildAndStart(); //Spawning request pool instances must be after server successfully built. for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_notify_cq_num; ++i) { for (std::size_t j = 0; j < ::RaftCore::Config::FLAGS_request_pool_size; ++j) SpawnFamilyBucket(m_async_service, i); } LOG(INFO) << "Server listening on " << m_server_addr; auto _start_workgroup_threads = [&](auto &work_group) { for (auto &_item : work_group) _item.StartPolling(); }; LOG(INFO) << "spawning notify_cq polling threads."; _start_workgroup_threads(m_vec_notify_cq_workgroup); //After starting notify threads, we need to update the mapping. ::RaftCore::Leader::LeaderView::UpdateThreadMapping(); LOG(INFO) << "spawning call_cq polling threads."; _start_workgroup_threads(m_vec_call_cq_workgroup); LOG(INFO) << "spawning client_cq polling threads."; _start_workgroup_threads(m_vec_client_cq_workgroup); m_running = true; auto _wait_workgroup_threads = [&](auto &work_group) { for (auto &_item : work_group) _item.WaitPolling(); }; LOG(INFO) << "waiting notify_cq polling threads to exist"; _wait_workgroup_threads(m_vec_notify_cq_workgroup); LOG(INFO) << "waiting call_cq polling threads to exist"; _wait_workgroup_threads(m_vec_call_cq_workgroup); LOG(INFO) << "waiting client_cq polling threads to exist"; _wait_workgroup_threads(m_vec_client_cq_workgroup); m_cq_fully_shutdown = true; VLOG(89) << "fully shutdown set to true"; } void GlobalEnv::SpawnFamilyBucket(std::shared_ptr<::raft::RaftService::AsyncService> shp_svc, std::size_t cq_idx) noexcept { auto _notify_cq = m_vec_notify_cq_workgroup[cq_idx].GetCQ(); std::size_t _call_cq_idx = cq_idx % m_vec_call_cq_workgroup.size(); auto _call_cq = m_vec_call_cq_workgroup[_call_cq_idx].GetCQ(); //Entrusting a complete set of request instances to each CQ. new ::RaftCore::Service::Write(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::Read(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::MembershipChange(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::AppendEntries(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::CommitEntries(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::SyncData(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::MemberChangePrepare(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::MemberChangeCommit(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::PreVote(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::Vote(shp_svc, _notify_cq, _call_cq); new ::RaftCore::Service::HeartBeat(shp_svc, _notify_cq, _call_cq); } void GlobalEnv::StopGrpcService() noexcept { m_pserver->Shutdown(); m_pserver->Wait(); auto _shutdown_workgroup_cqs = [&](auto &work_group) { for (auto &_item : work_group) _item.ShutDownCQ(); }; LOG(INFO) << "shutting down notify_cq."; _shutdown_workgroup_cqs(m_vec_notify_cq_workgroup); LOG(INFO) << "shutting down call_cq."; _shutdown_workgroup_cqs(m_vec_call_cq_workgroup); LOG(INFO) << "shutting down client_cq."; _shutdown_workgroup_cqs(m_vec_client_cq_workgroup); //Need to wait for all CQs shutdown. while (!m_cq_fully_shutdown); m_running = false; } void GlobalEnv::InitialEnv(bool switching_role) noexcept { LOG(INFO) << "start initialing global env."; //Check config validity first. CHECK(::RaftCore::Config::FLAGS_follower_check_heartbeat_interval_ms < ::RaftCore::Config::FLAGS_leader_heartbeat_interval_ms); CHECK(::RaftCore::Config::FLAGS_leader_heartbeat_interval_ms < ::RaftCore::Config::FLAGS_election_heartbeat_timeo_ms); CHECK(::RaftCore::Config::FLAGS_memory_table_max_item < ::RaftCore::Config::FLAGS_binlog_max_log_num); CHECK(::RaftCore::Config::FLAGS_garbage_deque_retain_num >= 1); CHECK(::RaftCore::Config::FLAGS_notify_cq_num * ::RaftCore::Config::FLAGS_notify_cq_threads <= ::RaftCore::Config::FLAGS_client_pool_size); //TODO: check #threads doesn't exceeds m_step_len //#-------------------------------Init topology-------------------------------#// ::RaftCore::CTopologyMgr::Initialize(); ::RaftCore::Topology global_topo; ::RaftCore::CTopologyMgr::Read(&global_topo); //#-------------------------------Init State Manager-------------------------------#// ::RaftCore::State::StateMgr::Initialize(global_topo); /* if (::RaftCore::State::StateMgr::AddressUndetermined()) { const auto &_nic_addrs = ::RaftCore::State::StateMgr::GetNICAddrs(); auto _address = ::RaftCore::Member::MemberMgr::FindPossibleAddress(_nic_addrs); CHECK(!_address.empty()) << "can't find my address in both topology and membership config files."; ::RaftCore::State::StateMgr::SetMyAddr(_address); }*/ //#-------------------------------Init Global Timer-------------------------------#// ::RaftCore::Timer::GlobalTimer::Initialize(); const char* _p_role = ::RaftCore::State::StateMgr::GetRoleStr(); LOG(INFO) << "--------------------started as " << _p_role << "--------------------"; //#-------------------------------Init Storage-------------------------------#// ::RaftCore::Storage::StorageGlobal::m_instance.Initialize(_p_role); /*BinLogGlobal must be initialized after StorageGlobal to avoid opening the binlog file for multiple times. */ //#-------------------------------Init Binlog Operator-------------------------------#// ::RaftCore::BinLog::BinLogGlobal::m_instance.Initialize(_p_role); //#-------------------------------Init Guid File-------------------------------#// auto _lrl = ::RaftCore::BinLog::BinLogGlobal::m_instance.GetLastReplicated(); ::RaftCore::Guid::GuidGenerator::Initialize(_lrl.m_index); //#-------------------------------Init grpc env-------------------------------#// auto _current_role = ::RaftCore::State::StateMgr::GetRole(); if (!switching_role) InitGrpcEnv(); //#-------------------------------Init leader-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::LEADER) { //LeaderView initialization must be after grpc env initial. ::RaftCore::Leader::LeaderView::Initialize(global_topo); } //#-------------------------------Init Follower-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::FOLLOWER) ::RaftCore::Follower::FollowerView::Initialize(switching_role); //#-------------------------------Init Candidate-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::CANDIDATE) ::RaftCore::Candidate::CandidateView::Initialize(); //#-------------------------------Init Election Manager.-------------------------------#// ::RaftCore::Election::ElectionMgr::Initialize(); //#-------------------------------Init Membership Manager.-------------------------------#// ::RaftCore::Member::MemberMgr::Initialize(); LOG(INFO) << "finish initialing global env."; } void GlobalEnv::UnInitialEnv(::RaftCore::State::RaftRole state) noexcept { auto _current_role = ::RaftCore::State::StateMgr::GetRole(); bool _from_old_state = (state != ::RaftCore::State::RaftRole::UNKNOWN); if (_from_old_state) _current_role = state; //#-------------------------------UnInit Global Timer-------------------------------#// /*Note : This should firstly be done before state manager ,which is the dependee.*/ ::RaftCore::Timer::GlobalTimer::UnInitialize(); //#----------------------------UnInit Server State.-----------------------------#// /*Note : This should firstly be done to prevent server from serving newly coming requests.*/ ::RaftCore::State::StateMgr::UnInitialize(); ::RaftCore::CTopologyMgr::UnInitialize(); //#-------------------------------UnInit guid file-------------------------------#// ::RaftCore::Guid::GuidGenerator::UnInitialize(); //#-------------------------------UnInit binlog operator-------------------------------#// ::RaftCore::BinLog::BinLogGlobal::m_instance.UnInitialize(); //#-------------------------------UnInit Storage-------------------------------#// ::RaftCore::Storage::StorageGlobal::m_instance.UnInitialize(); //#-------------------------------UnInit leader-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::LEADER) ::RaftCore::Leader::LeaderView::UnInitialize(); //#-------------------------------UnInit Follower-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::FOLLOWER) ::RaftCore::Follower::FollowerView::UnInitialize(); //#-------------------------------UnInit Candidate-------------------------------#// if (_current_role == ::RaftCore::State::RaftRole::CANDIDATE) ::RaftCore::Candidate::CandidateView::UnInitialize(); //#-------------------------------UnInit Election Manager.-------------------------------#// ::RaftCore::Election::ElectionMgr::UnInitialize(); //#-------------------------------UnInit Membership Manager.-------------------------------#// ::RaftCore::Member::MemberMgr::UnInitialize(); } void GlobalEnv::RunServer() noexcept{ //#-------------------------------Start server-------------------------------#// StartGrpcService(); } void GlobalEnv::StopServer() noexcept { StopGrpcService(); } void GlobalEnv::ShutDown() noexcept { StopServer(); UnInitialEnv(); } }
13,824
C++
.cc
244
51.663934
143
0.61458
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,281
follower_bg_task.cc
ppLorins_aurora/src/follower/follower_bg_task.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "service/service.h" #include "member/member_manager.h" #include "follower/follower_bg_task.h" namespace RaftCore::Follower::BackGroundTask { DisorderMessageContext::DisorderMessageContext(int value_flag)noexcept{ this->m_value_flag = value_flag; this->m_generation_tp = std::chrono::system_clock::now(); this->m_processed_flag.store(false); //VLOG(89) << "DisorderMessageContext constructed"; } DisorderMessageContext::~DisorderMessageContext()noexcept{ //VLOG(89) << "DisorderMessageContext destructed"; } bool DisorderMessageContext::operator<(const DisorderMessageContext& other)const noexcept { if (this->m_value_flag < 0 || other.m_value_flag > 0) return true; if (this->m_value_flag > 0 || other.m_value_flag < 0) return false; auto &_shp_req = this->m_append_request; auto &_shp_req_other = other.m_append_request; return _shp_req->GetLastLogID() < _shp_req_other->GetLastLogID(); } bool DisorderMessageContext::operator>(const DisorderMessageContext& other)const noexcept { if (this->m_value_flag < 0 || other.m_value_flag > 0) return false; if (this->m_value_flag > 0 || other.m_value_flag < 0) return true; auto &_shp_req = this->m_append_request; auto &_shp_req_other = other.m_append_request; return _shp_req->GetLastLogID() > _shp_req_other->GetLastLogID(); } bool DisorderMessageContext::operator==(const DisorderMessageContext& other)const noexcept { if (other.m_value_flag != 0 || this->m_value_flag != 0) return false; auto &_shp_req = this->m_append_request; auto &_shp_req_other = other.m_append_request; return _shp_req->GetLastLogID() == _shp_req_other->GetLastLogID(); } }
2,532
C++
.cc
53
44.132075
92
0.712016
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,282
follower_view.cc
ppLorins_aurora/src/follower/follower_view.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config/config.h" #include "election/election.h" #include "tools/timer.h" #include "tools/lock_free_priority_queue.h" #include "service/service.h" #include "follower/follower_view.h" namespace RaftCore::Follower { using ::RaftCore::Service::AppendEntries; std::condition_variable FollowerView::m_cv; std::mutex FollowerView::m_cv_mutex; std::chrono::time_point<std::chrono::steady_clock> FollowerView::m_last_heartbeat; std::shared_timed_mutex FollowerView::m_last_heartbeat_lock; /* Note: each follower has exactly one pending_list , the head and tail nodes were constant during the lifetime of this pending list, so it's okay to 'new' an object without caring about when to 'delete' . */ TrivialLockDoubleList<MemoryLogItemFollower> FollowerView::m_phaseI_pending_list(std::shared_ptr<MemoryLogItemFollower>(new MemoryLogItemFollower(0x0, 0x0)), std::shared_ptr<MemoryLogItemFollower>(new MemoryLogItemFollower(_MAX_UINT32_, _MAX_UINT64_))); TrivialLockDoubleList<MemoryLogItemFollower> FollowerView::m_phaseII_pending_list(std::shared_ptr<MemoryLogItemFollower>(new MemoryLogItemFollower(0x0, 0x0)), std::shared_ptr<MemoryLogItemFollower>(new MemoryLogItemFollower(_MAX_UINT32_, _MAX_UINT64_))); TrivialLockSingleList<DisorderMessageContext> FollowerView::m_disorder_list(std::shared_ptr<DisorderMessageContext>(new DisorderMessageContext(-1)), std::shared_ptr<DisorderMessageContext>(new DisorderMessageContext(1))); LockFreeUnorderedSingleList<DoubleListNode<MemoryLogItemFollower>> FollowerView::m_garbage; LockFreeUnorderedSingleList<SingleListNode<DisorderMessageContext>> FollowerView::m_disorder_garbage; using ::RaftCore::Common::ReadLock; using ::RaftCore::Timer::GlobalTimer; using ::RaftCore::Election::ElectionMgr; void FollowerView::Initialize(bool switching_role) noexcept{ CommonView::Initialize(); //Register GC task to the global timer. CommonView::InstallGC<TrivialLockDoubleList,DoubleListNode,MemoryLogItemFollower>(&m_garbage); CommonView::InstallGC<TrivialLockSingleList,SingleListNode,DisorderMessageContext>(&m_disorder_garbage); #ifdef _FOLLOWER_VIEW_TEST_ auto _test = []()->bool { //VLOG(89) << "I'm alive to debug the stuck issue..."; return true; }; GlobalTimer::AddTask(1000, _test); #endif //Initial to time epoch. m_last_heartbeat = std::chrono::time_point<std::chrono::steady_clock>(); //Avoiding immediately checking heartbeat timeout after a switching role event happened. if (switching_role) m_last_heartbeat = std::chrono::steady_clock::now(); decltype(m_last_heartbeat) * _p_last_heartbeat = &m_last_heartbeat; auto _check_heartbeat = [_p_last_heartbeat,switching_role]()->bool { //Just for unit test. if (!::RaftCore::Config::FLAGS_checking_heartbeat) return true; ReadLock _r_lock(FollowerView::m_last_heartbeat_lock); /*In a general startup case(non switching-role) , Only after receiving the 1st heartbeat msg from server, could the checking mechanism really getting started.*/ if (!switching_role && (std::chrono::duration_cast<std::chrono::seconds>(_p_last_heartbeat->time_since_epoch()).count() == 0)) return true; auto _diff = std::chrono::steady_clock::now() - (*_p_last_heartbeat); _r_lock.unlock(); auto _diff_ms = std::chrono::duration_cast<std::chrono::milliseconds>(_diff).count(); if (_diff_ms <= ::RaftCore::Config::FLAGS_election_heartbeat_timeo_ms) return true; LOG(INFO) << "leader heartbeat timeout line reached,start electing,diff_ms:" << _diff_ms; /*Election's heartbeat timeout happened ,starting to turn into candidate state.It will do this by creating a new thread to IMMEDIATELY re-initialize the global env , which will terminate the current timer thread the other way round. */ ElectionMgr::ElectionThread(); return false; }; GlobalTimer::AddTask(::RaftCore::Config::FLAGS_follower_check_heartbeat_interval_ms,_check_heartbeat); CommonView::m_running_flag = true; //Start follower routine thread. for (std::size_t i = 0; i < ::RaftCore::Config::FLAGS_iterating_threads; ++i) CommonView::m_vec_routine.emplace_back(new std::thread(AppendEntries::DisorderLogRoutine)); } void FollowerView::UnInitialize() noexcept { //Waiting for routine thread exit. CommonView::m_running_flag = false; for (auto* p_thread : CommonView::m_vec_routine) { p_thread->join(); delete p_thread; } Clear(); CommonView::UnInitialize(); } void FollowerView::Clear() noexcept { m_phaseI_pending_list.Clear(); m_phaseII_pending_list.Clear(); m_disorder_list.Clear(); } }
5,589
C++
.cc
101
50.594059
159
0.729908
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,283
memory_log_follower.cc
ppLorins_aurora/src/follower/memory_log_follower.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "common/comm_defs.h" #include "common/log_identifier.h" #include "follower/memory_log_follower.h" namespace RaftCore::Follower { MemoryLogItemFollower::~MemoryLogItemFollower() noexcept{ //VLOG(89) << "MemoryLogItemFollower destructed " << ::RaftCore::Common::ConvertID(this->m_entity->entity_id()); } MemoryLogItemFollower::MemoryLogItemFollower(uint32_t _term, uint64_t _index) noexcept:MemoryLogItemBase(_term, _index) { //VLOG(89) << "MemoryLogItemFollower constructed pos1"; } MemoryLogItemFollower::MemoryLogItemFollower(const ::raft::Entity &_entity) noexcept: MemoryLogItemBase(_entity) { //VLOG(89) << "MemoryLogItemFollower constructed pos2"; } bool MemoryLogItemFollower::operator==(const MemoryLogItemFollower& _other)const noexcept { if (!MemoryLogItemBase::operator==(_other)) return false; /*MemoryLogItemFollower comparing additional fields because of it is used in TrivialLockDoubleList<T> and could possibly been inserted with same log_id and pre_log_id but different <k,v> pairs. */ if (!EntityIDEqual(this->m_entity->pre_log_id(), _other.GetEntity()->pre_log_id())) return false; //Don't compare their contents, it should be able to being rewrite only by term & idx. //(not TODO): improve the comparison by comparing their crc32 values rather than the value itself. //return (this->m_entity->write_op().key() == _other.m_entity->write_op().key() && this->m_entity->write_op().value() == _other.m_entity->write_op().value()); return true; } bool MemoryLogItemFollower::operator!=(const MemoryLogItemFollower& _other)const noexcept { return !this->operator==(_other); } bool MemoryLogItemFollower::operator<=(const MemoryLogItemFollower& _other)const noexcept { return EntityIDSmallerEqual(this->m_entity->entity_id(), _other.m_entity->entity_id()); } bool MemoryLogItemFollower::operator<(const MemoryLogItemFollower& _other)const noexcept { return this->MemoryLogItemBase::operator<(_other); } bool MemoryLogItemFollower::operator>(const MemoryLogItemFollower& _other)const noexcept { return this->MemoryLogItemBase::operator>(_other); } bool CmpMemoryLogFollower(const MemoryLogItemFollower& left, const MemoryLogItemFollower& right) noexcept { return CmpMemoryLog(&left,&right); } }
3,089
C++
.cc
55
53.327273
162
0.751907
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,284
follower_request.cc
ppLorins_aurora/src/follower/follower_request.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "follower/follower_request.h" namespace RaftCore::Follower { template<typename T,typename R,typename Q> FollowerUnaryRequest<T, R, Q>::FollowerUnaryRequest() noexcept {} template<typename T,typename R,typename Q> FollowerUnaryRequest<T, R, Q>::~FollowerUnaryRequest() noexcept {} template<typename T,typename R,typename Q> FollowerBidirectionalRequest<T, R, Q>::FollowerBidirectionalRequest() noexcept {} template<typename T,typename R,typename Q> FollowerBidirectionalRequest<T,R,Q>::~FollowerBidirectionalRequest() noexcept {} }
1,321
C++
.cc
25
51.4
81
0.775097
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,285
gtest_main.cc
ppLorins_aurora/src/gtest/gtest_main.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <iostream> #include "gtest/test_all.h" int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, true); google::InitGoogleLogging(argv[0]); FLAGS_log_dir = "."; FLAGS_logbuflevel = -1; return RUN_ALL_TESTS(); }
1,086
C++
.cc
24
42.75
73
0.725714
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,286
idiot_client.cc
ppLorins_aurora/src/gtest/other/idiot_client.cc
/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <iostream> #include <memory> #include <string> #include <vector> #include <thread> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "grpc/support/log.h" #include "grpc++/server_context.h" #include "grpc++/security/server_credentials.h" #include "grpc++/completion_queue.h" #include "raft.pb.h" #include "raft.grpc.pb.h" using ::grpc::Server; using ::grpc::ServerAsyncResponseWriter; using ::grpc::ServerBuilder; using ::grpc::ServerContext; using ::grpc::CompletionQueue; using ::grpc::ServerCompletionQueue; using ::grpc::Status; class ChannelMgr { public: static void Initialize(int conn_size,std::string addr)noexcept { for (int i = 0; i < conn_size; ++i) { auto _channel_args = ::grpc::ChannelArguments(); std::string _key = "key_" + std::to_string(i); std::string _val = "val_" + std::to_string(i); _channel_args.SetString(_key,_val); auto shp_channel = ::grpc::CreateCustomChannel(addr, grpc::InsecureChannelCredentials(), _channel_args); m_channel_pool.emplace_back(shp_channel); } } static std::shared_ptr<::grpc::Channel> GetOneChannel()noexcept { static std::atomic<uint32_t> _idx; uint32_t _old_val = _idx.fetch_add(1); uint32_t _pool_idx = _old_val % m_channel_pool.size(); return m_channel_pool[_pool_idx]; } static std::vector<std::shared_ptr<::grpc::Channel>> m_channel_pool; }; std::vector<std::shared_ptr<::grpc::Channel>> ChannelMgr::m_channel_pool; uint32_t g_count = 50000; std::string g_my_addr = "127.0.0.1:10010"; struct AsyncClientCall { ::raft::AppendEntriesResponse reply; ::grpc::ClientContext context; Status status; std::unique_ptr<::grpc::ClientAsyncResponseReader<::raft::AppendEntriesResponse>> response_reader; }; void AsyncCompleteRpc(CompletionQueue* polling_cq) { void* got_tag; bool ok = false; uint32_t _counter = 0; auto _start = std::chrono::steady_clock::now(); //std::cout << "thread " << std::this_thread::get_id() << " start timer" << std::endl;; while (polling_cq->Next(&got_tag, &ok)) { //std::cout << "before counter:" << _counter << std::endl; //std::this_thread::sleep_for(std::chrono::seconds(2)); AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag); GPR_ASSERT(ok); if (!call->status.ok()) { std::cout << call->status.error_code() << ",msg:" << call->status.error_message(); GPR_ASSERT(false); } delete call; if (++_counter >= g_count) break; } auto _end = std::chrono::steady_clock::now(); auto _ms = std::chrono::duration_cast<std::chrono::milliseconds>(_end - _start); std::cout << "thread " << std::this_thread::get_id() << " inner time cost:" << _ms.count() << std::endl; uint32_t _throughput = g_count / float(_ms.count()) * 1000; std::cout << "thread " << std::this_thread::get_id() << " inner throughput : " << _throughput << std::endl; } class GreeterClient { public: explicit GreeterClient(std::shared_ptr<::grpc::Channel> shp_channel,CompletionQueue* in_cq) { stub_ = ::raft::RaftService::NewStub(shp_channel); this->cq_ = in_cq; //this->cq_ = new CompletionQueue(); } void EntrustSayHello(int idx) { //Shouldn't start with 0 when doing appendEntries. idx += 1; ::raft::AppendEntriesRequest request; request.mutable_base()->set_addr(g_my_addr); request.mutable_base()->set_term(0); auto _p_entry = request.add_replicate_entity(); auto _p_entity_id = _p_entry->mutable_entity_id(); _p_entity_id->set_term(0); _p_entity_id->set_idx(idx); auto _p_pre_entity_id = _p_entry->mutable_pre_log_id(); _p_pre_entity_id->set_term(0); _p_pre_entity_id->set_idx(idx - 1); auto _p_wop = _p_entry->mutable_write_op(); _p_wop->set_key("follower_benchmark_key_" + std::to_string(idx)); _p_wop->set_value("follower_benchmark_val_" + std::to_string(idx)); AsyncClientCall* call = new AsyncClientCall; std::chrono::time_point<std::chrono::system_clock> _deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(3100); //call->context.set_deadline(_deadline); call->response_reader = stub_->PrepareAsyncAppendEntries(&call->context, request, cq_); call->response_reader->StartCall(); call->response_reader->Finish(&call->reply, &call->status, (void*)call); } private: std::unique_ptr<::raft::RaftService::Stub> stub_; CompletionQueue* cq_; }; int main(int argc, char** argv) { if (argc != 7) { std::cout << "Usage:./program --count_per_thread=xx --thread_per_cq=xx --cq=xx --addr=xx --conn=xx --my_addr=xx"; return 0; } const char * target_str = "--count_per_thread="; auto p_target = std::strstr(argv[1],target_str); if (p_target == nullptr) { printf("para error argv[1] should be --count_per_thread=xx \n"); return 0; } p_target += std::strlen(target_str); g_count = std::atoi(p_target); uint32_t thread_num = 1; target_str = "--thread_per_cq="; p_target = std::strstr(argv[2],target_str); if (p_target == nullptr) { printf("para error argv[2] should be --thread_per_cq=xx \n"); return 0; } p_target += std::strlen(target_str); thread_num = std::atoi(p_target); target_str = "--cq="; p_target = std::strstr(argv[3],target_str); if (p_target == nullptr) { printf("para error argv[3] should be --cq=xx \n"); return 0; } p_target += std::strlen(target_str); int _cq_num = std::atoi(p_target); std::string _addr = "localhost:50051"; target_str = "--addr="; p_target = std::strstr(argv[4],target_str); if (p_target == nullptr) { printf("para error argv[4] should be --addr=xx \n"); return 0; } p_target += std::strlen(target_str); _addr = p_target; target_str = "--conn="; p_target = std::strstr(argv[5],target_str); if (p_target == nullptr) { printf("para error argv[5] should be --conn=xx \n"); return 0; } p_target += std::strlen(target_str); int _conn_size = std::atoi(p_target); target_str = "--my_addr="; p_target = std::strstr(argv[6],target_str); if (p_target == nullptr) { printf("para error argv[6] should be --my_addr=xx \n"); return 0; } p_target += std::strlen(target_str); g_my_addr = p_target; ChannelMgr::Initialize(_conn_size, _addr); //std::cout << "req for each thread:" << g_count << std::endl; //start the polling thread on CQ first. std::vector<std::thread*> _vec_t; std::vector<CompletionQueue*> _vec_cq; for (int i = 0; i < _cq_num; ++i) { auto * _p_cq = new CompletionQueue; _vec_cq.push_back(_p_cq); for (uint32_t i = 0; i < thread_num; i++) _vec_t.push_back(new std::thread(AsyncCompleteRpc,_p_cq)); } std::vector<std::thread*> _vec_entrusting_threads; auto _entrust_reqs = [&](int cq_idx, int thread_idx) { GreeterClient _greeter_client(ChannelMgr::GetOneChannel(), _vec_cq[cq_idx]); int _total_thread_num = thread_num * _cq_num; int _total_thread_idx = thread_num * cq_idx + thread_idx; for (int i = 0; i < g_count; i++) { int req_idx = i * _total_thread_num + _total_thread_idx; _greeter_client.EntrustSayHello(req_idx); // The actual RPC call! } }; auto _start = std::chrono::steady_clock::now(); //start entrusting the requests. for (int i = 0; i < _cq_num; ++i) { for (int m = 0; m < thread_num; ++m) { std::thread* _p_t = new std::thread(_entrust_reqs, i, m); _vec_entrusting_threads.emplace_back(_p_t); } } //Waiting entrusting thread to finish. for (uint32_t i = 0; i < _vec_entrusting_threads.size(); i++) _vec_entrusting_threads[i]->join(); std::cout << "entrusting done." << std::endl << std::flush; //Waiting polling thread to finish. for (uint32_t i = 0; i < _vec_t.size(); i++) _vec_t[i]->join(); int _total = _cq_num * thread_num * g_count; std::cout << "g_count:" << _total << std::endl; auto _end = std::chrono::steady_clock::now(); auto _ms = std::chrono::duration_cast<std::chrono::milliseconds>(_end - _start); std::cout << "time cost:" << _ms.count() << std::endl; uint32_t _throughput = _total / float(_ms.count()) * 1000; std::cout << "final throughput : " << _throughput << std::endl; return 0; }
9,713
C++
.cc
226
35.411504
122
0.595669
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,287
idiot_server.cc
ppLorins_aurora/src/gtest/other/idiot_server.cc
/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <memory> #include <iostream> #include <string> #include <thread> #include <vector> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "grpc/support/log.h" #include "grpc++/server_context.h" #include "grpc++/security/server_credentials.h" #include "grpc++/completion_queue.h" #include "raft.pb.h" #include "raft.grpc.pb.h" using ::grpc::Server; using ::grpc::ServerAsyncResponseWriter; using ::grpc::ServerBuilder; using ::grpc::ServerContext; using ::grpc::CompletionQueue; using ::grpc::ServerCompletionQueue; using ::grpc::Status; int g_thread_pair_num = 1; int g_cq_pair_num = 1; int g_pool = 1; class ServerImpl final { public: ~ServerImpl() { server_->Shutdown(); // Always shutdown the completion queue after the server. for (const auto& _cq : m_notify_cq) _cq->Shutdown(); //for (const auto& _cq : m_call_cq) // _cq->Shutdown(); } // There is no shutdown handling in this code. void Run() { std::string server_address("0.0.0.0:60051"); ServerBuilder builder; // Listen on the given address without any authentication mechanism. builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); // Register "service_" as the instance through which we'll communicate with // clients. In this case it corresponds to an *asynchronous* service. builder.RegisterService(&service_); // Get hold of the completion queue used for the asynchronous communication // with the gRPC runtime. for (int i = 0; i < g_cq_pair_num; ++i) { //cq_ = builder.AddCompletionQueue(); m_notify_cq.emplace_back(builder.AddCompletionQueue()); std::cout << "notify_cq:" << m_notify_cq[m_notify_cq.size() - 1].get() << " added." << std::endl; //m_call_cq.emplace_back(builder.AddCompletionQueue()); //std::cout <<"call_cq:" << m_call_cq[m_call_cq.size() - 1].get() << " added." << std::endl; } // Finally assemble the server. server_ = builder.BuildAndStart(); std::cout << "Server listening on " << server_address << std::endl; // Proceed to the server's main loop. std::vector<std::thread*> _vec_threads; for (int i = 0; i < g_thread_pair_num ; ++i) { int _cq_idx = i % g_cq_pair_num; for (int j = 0; j < g_pool; ++j) new CallData(&service_,m_notify_cq[_cq_idx].get()); _vec_threads.emplace_back(new std::thread(&ServerImpl::HandleRpcs, this, m_notify_cq[_cq_idx].get())); } std::cout << g_thread_pair_num << " working aysnc threads spawned" << std::endl; for (const auto& _t : _vec_threads) _t->join(); } private: // Class encompassing the state and logic needed to serve a request. class CallData { public: CallData(::raft::RaftService::AsyncService* service, ::grpc::ServerCompletionQueue* notify_cq) : service_(service), notify_cq_(notify_cq), responder_(&ctx_), status_(CREATE) { Proceed(); } void Proceed() { if (status_ == CREATE) { status_ = PROCESS; service_->RequestAppendEntries(&ctx_, &request_, &responder_, notify_cq_, notify_cq_, this); } else if (status_ == PROCESS) { new CallData(service_, notify_cq_); reply_.mutable_comm_rsp()->set_result(::raft::ErrorCode::SUCCESS); //std::cout << "i'm here" << std::endl; status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, this); } else { delete this; } } private: ::raft::RaftService::AsyncService* service_; ::grpc::ServerCompletionQueue* notify_cq_; ::grpc::ServerContext ctx_; ::raft::AppendEntriesRequest request_; ::raft::AppendEntriesResponse reply_; ::grpc::ServerAsyncResponseWriter<::raft::AppendEntriesResponse> responder_; enum CallStatus { CREATE, PROCESS, FINISH }; CallStatus status_; // The current serving state. }; void HandleRpcs(ServerCompletionQueue *poll_cq) { uint32_t _counter = 0; void* tag; bool ok; while (true) { GPR_ASSERT(poll_cq->Next(&tag, &ok)); GPR_ASSERT(ok); static_cast<CallData*>(tag)->Proceed(); } } std::vector<std::unique_ptr<ServerCompletionQueue>> m_notify_cq; //std::vector<std::unique_ptr<ServerCompletionQueue>> m_call_cq; ::raft::RaftService::AsyncService service_; std::unique_ptr<::grpc::Server> server_; }; const char* ParseCmdPara( char* argv,const char* para) { auto p_target = std::strstr(argv,para); if (p_target == nullptr) { printf("para error argv[%s] should be %s \n",argv,para); return nullptr; } p_target += std::strlen(para); return p_target; } int main(int argc, char** argv) { if (argc != 4) { std::cout << "Usage:./program --thread_pair=xx --cq_pair=xx --pool=xx"; return 0; } g_thread_pair_num = std::atoi(ParseCmdPara(argv[1],"--thread_pair=")); g_cq_pair_num = std::atoi(ParseCmdPara(argv[2],"--cq_pair=")); g_pool = std::atoi(ParseCmdPara(argv[3],"--pool=")); ServerImpl server; server.Run(); return 0; }
5,803
C++
.cc
151
33.05298
110
0.642424
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,288
client_impl.cc
ppLorins_aurora/src/client/client_impl.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "service/service.h" #include "leader/client_pool.h" #include "member/member_manager.h" #include "election/election.h" #include "client/client_impl.h" namespace RaftCore::Client { using ::RaftCore::Common::VoteType; using ::RaftCore::Service::Write; using ::RaftCore::Leader::ClientPool; using ::RaftCore::Member::MemberMgr; using ::RaftCore::Election::ElectionMgr; AppendEntriesAsyncClient::AppendEntriesAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq, bool delegate_me) : UnaryAsyncClient<::raft::AppendEntriesRequest, ::raft::AppendEntriesResponse, AppendEntriesAsyncClient>(shp_channel, shp_cq) { //Give myself a long lived delegator. if (delegate_me) this->OwnershipDelegator<AppendEntriesAsyncClient>::ResetOwnership(this); } AppendEntriesAsyncClient::~AppendEntriesAsyncClient() {} void AppendEntriesAsyncClient::Responder(const ::grpc::Status& status, const ::raft::AppendEntriesResponse& rsp) noexcept { auto _shp_write = this->OwnershipDelegator<Write>::GetOwnership(); auto *_p_conn_pool = (ClientPool<AppendEntriesAsyncClient>*)this->m_callback_args[0]; _shp_write->ReplicateDoneCallBack(status, rsp, _p_conn_pool->GetParentFollower(), this); } void AppendEntriesAsyncClient::Release() noexcept { //Reset myself. this->Reset(); //Release associated write request. this->OwnershipDelegator<Write>::ReleaseOwnership(); //Push myself back to the connection pool. auto *_p_conn_pool = (ClientPool<AppendEntriesAsyncClient>*)this->m_callback_args[0]; auto _shp_copied = this->OwnershipDelegator<AppendEntriesAsyncClient>::GetOwnership(); _p_conn_pool->Back(_shp_copied); VLOG(90) << "AppendEntriesAsyncClient returned:" << _p_conn_pool->GetParentFollower()->my_addr; //Clear my args. this->ClearCallBackArgs(); } CommitEntriesAsyncClient::CommitEntriesAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq) : UnaryAsyncClient<::raft::CommitEntryRequest, ::raft::CommitEntryResponse, CommitEntriesAsyncClient> (shp_channel, shp_cq){ //Give myself a long lived delegator. this->OwnershipDelegator<CommitEntriesAsyncClient>::ResetOwnership(this); } CommitEntriesAsyncClient::~CommitEntriesAsyncClient() {} void CommitEntriesAsyncClient::Responder(const ::grpc::Status& status, const ::raft::CommitEntryResponse& rsp) noexcept { auto _shp_write = this->OwnershipDelegator<Write>::GetOwnership(); auto *_p_conn_pool = (ClientPool<CommitEntriesAsyncClient>*)this->m_callback_args[0]; _shp_write->CommitDoneCallBack(status, rsp, _p_conn_pool->GetParentFollower()); } void CommitEntriesAsyncClient::Release() noexcept { //Reset myself. this->Reset(); //Release associated write request. this->OwnershipDelegator<Write>::ReleaseOwnership(); //Push myself back to the connection pool. auto *_p_conn_pool = (ClientPool<CommitEntriesAsyncClient>*)this->m_callback_args[0]; auto _shp_copied = this->OwnershipDelegator<CommitEntriesAsyncClient>::GetOwnership(); _p_conn_pool->Back(_shp_copied); VLOG(90) << "CommitEntriesAsyncClient returned:" << _p_conn_pool->GetParentFollower()->my_addr; //Clear my args. this->ClearCallBackArgs(); } HeartbeatSyncClient::HeartbeatSyncClient(std::shared_ptr<::grpc::Channel> shp_channel): UnarySyncClient<::raft::HeartBeatRequest, ::raft::CommonResponse>(shp_channel) {} HeartbeatSyncClient::~HeartbeatSyncClient() {} WriteSyncClient::WriteSyncClient(std::shared_ptr<::grpc::Channel> shp_channel): UnarySyncClient<::raft::ClientWriteRequest, ::raft::ClientWriteResponse>(shp_channel) {} WriteSyncClient::~WriteSyncClient() {} SyncDataSyncClient::SyncDataSyncClient(std::shared_ptr<::grpc::Channel> shp_channel): BidirectionalSyncClient<::raft::SyncDataRequest, ::raft::SyncDataResponse>(shp_channel) { this->m_sync_rw = this->m_stub->SyncData(this->m_client_context.get()); } SyncDataSyncClient::~SyncDataSyncClient() {} auto SyncDataSyncClient::GetInstantiatedReq()noexcept -> decltype(m_shp_request) { if (!this->m_shp_request) this->m_shp_request.reset(new ::raft::SyncDataRequest()); return this->m_shp_request; } auto SyncDataSyncClient::GetReaderWriter() noexcept -> decltype(m_sync_rw) { return this->m_sync_rw; } ::raft::SyncDataResponse* SyncDataSyncClient::GetResponse() noexcept { return &this->m_response; } MemberChangePrepareAsyncClient::MemberChangePrepareAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq) : UnaryAsyncClient<::raft::MemberChangeInnerRequest, ::raft::MemberChangeInnerResponse, MemberChangePrepareAsyncClient,::grpc::CompletionQueue>(shp_channel, shp_cq){} MemberChangePrepareAsyncClient::~MemberChangePrepareAsyncClient() {} void MemberChangePrepareAsyncClient::Responder(const ::grpc::Status& status, const ::raft::MemberChangeInnerResponse& rsp) noexcept { uint32_t _idx = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(m_callback_args[1])); MemberMgr::MemberChangePrepareCallBack(status, rsp, m_callback_args[0], _idx); } MemberChangeCommitAsyncClient::MemberChangeCommitAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq) : UnaryAsyncClient<::raft::MemberChangeInnerRequest, ::raft::MemberChangeInnerResponse, MemberChangeCommitAsyncClient,::grpc::CompletionQueue>(shp_channel, shp_cq){} MemberChangeCommitAsyncClient::~MemberChangeCommitAsyncClient() {} void MemberChangeCommitAsyncClient::Responder(const ::grpc::Status& status, const ::raft::MemberChangeInnerResponse& rsp) noexcept { uint32_t _idx = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(m_callback_args[1])); MemberMgr::MemberChangeCommitCallBack(status, rsp, m_callback_args[0], _idx); } PrevoteAsyncClient::PrevoteAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq) : UnaryAsyncClient<::raft::VoteRequest, ::raft::VoteResponse, PrevoteAsyncClient,::grpc::CompletionQueue>(shp_channel, shp_cq){} PrevoteAsyncClient::~PrevoteAsyncClient() {} void PrevoteAsyncClient::Responder(const ::grpc::Status& status, const ::raft::VoteResponse& rsp) noexcept { uint32_t _idx = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(m_callback_args[0])); ElectionMgr::CallBack(status, rsp, VoteType::PreVote,_idx); } VoteAsyncClient::VoteAsyncClient(std::shared_ptr<::grpc::Channel> shp_channel, std::shared_ptr<::grpc::CompletionQueue> shp_cq) : UnaryAsyncClient<::raft::VoteRequest, ::raft::VoteResponse, VoteAsyncClient,::grpc::CompletionQueue>(shp_channel, shp_cq){} VoteAsyncClient::~VoteAsyncClient() {} void VoteAsyncClient::Responder(const ::grpc::Status& status, const ::raft::VoteResponse& rsp) noexcept { uint32_t _idx = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(m_callback_args[0])); ElectionMgr::CallBack(status, rsp, VoteType::Vote,_idx); } }
7,900
C++
.cc
143
51.755245
108
0.756781
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,289
client_framework.cc
ppLorins_aurora/src/client/client_framework.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "glog/logging.h" #include "client/client_framework.h" namespace RaftCore::Client { template<typename T> ClientFramework<T>::ClientFramework(std::shared_ptr<Channel> shp_channel) noexcept{ this->m_stub = ::raft::RaftService::NewStub(shp_channel); this->Reset(); } template<typename T> void ClientFramework<T>::Reset() noexcept { this->m_client_context.reset(new ::grpc::ClientContext()); } template<typename T> std::shared_ptr<::raft::RaftService::Stub> ClientFramework<T>::GetStub() noexcept { return this->m_stub; } template<typename T> ClientFramework<T>::~ClientFramework() noexcept{} template<typename T,typename R> ClientTpl<T, R>::ClientTpl(std::shared_ptr<Channel> shp_channel) noexcept : ClientFramework(shp_channel) { this->m_shp_request.reset(new T()); } template<typename T,typename R> ClientTpl<T,R>::~ClientTpl() noexcept {} template<typename T,typename R> SyncClient<T, R>::SyncClient(std::shared_ptr<Channel> shp_channel) noexcept : ClientTpl<T, R>(shp_channel) {} template<typename T,typename R> SyncClient<T, R>::~SyncClient() noexcept {} template<typename T,typename R,typename CQ> AsyncClient<T,R,CQ>::AsyncClient(std::shared_ptr<Channel> shp_channel, std::shared_ptr<CQ> shp_cq)noexcept : ClientTpl<T, R>(shp_channel), m_server_cq(shp_cq) {} template<typename T,typename R,typename CQ> AsyncClient<T,R,CQ>::~AsyncClient()noexcept {} template<typename T,typename R> UnarySyncClient<T, R>::UnarySyncClient(std::shared_ptr<Channel> shp_channel)noexcept : SyncClient<T, R>(shp_channel) {} template<typename T,typename R> const R& UnarySyncClient<T, R>::DoRPC(std::function<void(std::shared_ptr<T>&)> req_setter, std::function<::grpc::Status(::grpc::ClientContext*,const T&,R*)> rpc, uint32_t timeo_ms, ::grpc::Status &ret_status)noexcept { req_setter(this->m_shp_request); std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(timeo_ms); this->m_client_context->set_deadline(deadline); ret_status = rpc(this->m_client_context.get(), *this->m_shp_request, &this->m_response); return this->m_response; } template<typename T,typename R> UnarySyncClient<T, R>::~UnarySyncClient()noexcept {} template<typename T,typename R> BidirectionalSyncClient<T, R>::BidirectionalSyncClient(std::shared_ptr<Channel> shp_channel)noexcept : SyncClient<T, R>(shp_channel) {} template<typename T,typename R> BidirectionalSyncClient<T, R>::~BidirectionalSyncClient()noexcept {} template<typename T,typename R,typename Q,typename CQ> UnaryAsyncClient<T,R,Q,CQ>::UnaryAsyncClient(std::shared_ptr<Channel> shp_channel, std::shared_ptr<CQ> shp_cq) noexcept : AsyncClient<T,R,CQ>(shp_channel, shp_cq) { static_assert(std::is_base_of<UnaryAsyncClient, Q>::value, "Q is not a derived from UnaryAsyncClient."); } template<typename T,typename R,typename Q,typename CQ> void UnaryAsyncClient<T,R,Q,CQ>::React(bool cq_result) noexcept { if (!cq_result) { LOG(ERROR) << "UnaryAsyncClient got false result from CQ."; this->Release(); return; } this->Responder(this->m_final_status, this->m_response); this->Release(); } template<typename T,typename R,typename Q,typename CQ> void UnaryAsyncClient<T,R,Q,CQ>::Release() noexcept { delete dynamic_cast<Q*>(this); } template<typename T,typename R,typename Q,typename CQ> void UnaryAsyncClient<T,R,Q,CQ>::EntrustRequest(const std::function<void(std::shared_ptr<T>&)> &req_setter, const FPrepareAsync<T,R> &f_prepare_async, uint32_t timeo_ms) noexcept { req_setter(this->m_shp_request); std::chrono::time_point<std::chrono::system_clock> _deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(timeo_ms); this->m_client_context->set_deadline(_deadline); this->m_reader = f_prepare_async(this->m_client_context.get(), *this->m_shp_request, this->m_server_cq.get()); this->m_reader->StartCall(); this->m_reader->Finish(&this->m_response, &this->m_final_status, dynamic_cast<ReactBase*>(this)); } template<typename T,typename R,typename Q,typename CQ> UnaryAsyncClient<T,R,Q,CQ>::~UnaryAsyncClient() noexcept {} template<typename T,typename R,typename Q> BidirectionalAsyncClient<T, R, Q>::BidirectionalAsyncClient(std::shared_ptr<Channel> shp_channel, std::shared_ptr<CompletionQueue> shp_cq) noexcept : AsyncClient<T,R>(shp_channel, shp_cq), m_async_rw(this->m_client_context.get()), m_status(ProcessStage::CONNECT) { static_assert(std::is_base_of<BidirectionalAsyncClient, Q>::value, "Q is not a derived from BidirectionalAsyncClient."); } template<typename T,typename R,typename Q> BidirectionalAsyncClient<T,R,Q>::~BidirectionalAsyncClient() noexcept {} template<typename T,typename R,typename Q> void BidirectionalAsyncClient<T,R,Q>::React(bool cq_result) noexcept { Q* _p_downcast = dynamic_cast<Q*>(this); if (!cq_result && (this->m_status != ProcessStage::READ)) { LOG(ERROR) << "BidirectionalAsyncClient got false result from CQ."; delete _p_downcast; return; } switch (this->m_status) { case ProcessStage::READ: //Meaning client said it wants to end the stream either by a 'WritesDone' or 'finish' call. if (!cq_result) { this->m_async_rw.Finish(this->m_final_status, _p_downcast); this->m_status = ProcessStage::FINISH; break; } this->m_responder(this->m_final_status, this->m_response); break; case ProcessStage::WRITE: this->m_async_rw.Read(&this->m_response, _p_downcast); this->m_status = ProcessStage::READ; break; case ProcessStage::CONNECT: break; case ProcessStage::WRITES_DONE: this->m_async_rw.Finish(this->m_final_status, _p_downcast); this->m_status = ProcessStage::FINISH; break; case ProcessStage::FINISH: if (this->m_final_status.error_code() != ::grpc::StatusCode::OK) { LOG(ERROR) << "rpc fail,err code:" << this->m_final_status.error_code() << ",err msg:" << this->m_final_status.error_message(); } delete _p_downcast; break; default: CHECK(false) << "Unexpected tag " << int(this->m_status); } } template<typename T,typename R,typename Q> void BidirectionalAsyncClient<T, R, Q>::AsyncDo(std::function<void(std::shared_ptr<T>&)> req_setter) noexcept { req_setter(this->m_shp_request); this->m_async_rw.Write(this->m_shp_request,dynamic_cast<Q*>(this)); this->m_status = ProcessStage::WRITE; } template<typename T,typename R,typename Q> void BidirectionalAsyncClient<T, R, Q>::WriteDone() noexcept { this->m_async_rw.WritesDone(dynamic_cast<Q*>(this)); this->m_status = ProcessStage::WRITES_DONE; } }
7,651
C++
.cc
161
43.130435
124
0.706625
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,290
client_base.cc
ppLorins_aurora/src/client/client_base.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "client/client_base.h" namespace RaftCore::Client { ClientBase::ClientBase() {} ClientBase::~ClientBase() {} void ClientBase::PushCallBackArgs(void* cb_data) noexcept{ this->m_callback_args.push_back(cb_data); } void ClientBase::ClearCallBackArgs() noexcept{ this->m_callback_args.clear(); } }
1,091
C++
.cc
25
41.92
73
0.749053
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,291
binlog_meta_data.cc
ppLorins_aurora/src/binlog/binlog_meta_data.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "boost/crc.hpp" #include "config/config.h" #include "tools/utilities.h" #include "binlog/binlog_meta_data.h" #define _TYPE_LOG_IDX_OFFSET_ (0x01) namespace RaftCore::BinLog { FileMetaData::IdxPair::IdxPair(uint32_t _a, uint64_t _b, uint32_t _c, uint32_t _d, uint32_t _e) { this->m_term = _a; this->m_index = _b; this->m_offset = _c; this->key_crc32 = _d; this->value_crc32 = _e; } bool FileMetaData::IdxPair::operator<(const IdxPair& _other)const noexcept { return this->LogIdentifier::operator<(_other); } bool FileMetaData::IdxPair::operator>(const IdxPair& _other)const noexcept { return this->LogIdentifier::operator>(_other); } bool FileMetaData::IdxPair::operator>(const LogIdentifier& _other)const noexcept { return this->LogIdentifier::operator>(_other); } bool FileMetaData::IdxPair::operator<(const LogIdentifier& _other)const noexcept { return this->LogIdentifier::operator<(_other); } bool FileMetaData::IdxPair::operator<=(const LogIdentifier& _other)const noexcept { return this->LogIdentifier::operator<=(_other); } bool FileMetaData::IdxPair::operator>=(const LogIdentifier& _other)const noexcept { return this->LogIdentifier::operator>=(_other); } bool FileMetaData::IdxPair::operator<(const ::raft::EntityID &_other)const noexcept { if (this->m_term < _other.term()) return true; if (this->m_term > _other.term()) return false; return this->m_index < _other.idx(); } bool FileMetaData::IdxPair::operator==(const ::raft::EntityID &_other)const noexcept { return (this->m_term == _other.term()) && (this->m_index==_other.idx()); } bool FileMetaData::IdxPair::operator!=(const ::raft::EntityID &_other)const noexcept { return !this->operator==(_other); } bool FileMetaData::IdxPair::operator>(const ::raft::EntityID &_other)const noexcept { if (this->m_term > _other.term()) return true; if (this->m_term < _other.term()) return false; return this->m_index > _other.idx(); } bool FileMetaData::IdxPair::operator==(const IdxPair &_other) const noexcept{ return (this->m_term == _other.m_term && this->m_index == _other.m_index); } const FileMetaData::IdxPair& FileMetaData::IdxPair::operator=(const IdxPair &_other) noexcept { this->m_term = _other.m_term; this->m_index = _other.m_index; this->m_offset = _other.m_offset; this->key_crc32 = _other.key_crc32; this->value_crc32 = _other.value_crc32; return *this; } std::size_t FileMetaData::IdxPair::Hash() const noexcept { auto h1 = std::hash<uint64_t>{}(this->m_index); auto h2 = std::hash<uint64_t>{}(this->m_offset); return h1 ^ (h2 << 8); } FileMetaData::FileMetaData() noexcept {} FileMetaData::~FileMetaData() noexcept {} void FileMetaData::AddLogOffset(uint32_t term,uint64_t log_index, uint32_t file_offset, uint32_t key_crc32,uint32_t value_crc32) noexcept { //The allocated buf will be released in the destructor. std::shared_ptr<IdxPair> _shp_new_record(new IdxPair(term,log_index,file_offset,key_crc32,value_crc32)); this->m_meta_hash.Insert(_shp_new_record); } void FileMetaData::AddLogOffset(const TypeOffsetList &_list) noexcept { for (const auto &_item : _list) this->m_meta_hash.Insert(_item); } void FileMetaData::Delete(const IdxPair &_item) noexcept { this->m_meta_hash.Delete(_item); } TypeBufferInfo FileMetaData::GenerateBuffer() const noexcept { std::list<std::shared_ptr<FileMetaData::IdxPair>> _hash_entry_list; this->m_meta_hash.GetOrderedByKey(_hash_entry_list); /* Meta data on-disk format,from low offset to high offset: 1> meta data(tlv list) 1) type :1 bytes 2) length : 3 bytes 3) value : vary 2> CRC32 checksum. (4 bytes) 3> length of whole meta data area. (4 bytes) 4> a magic string to identify the end of meta-data area. (variadic bytes) */ ::raft::LogOffset _log_offset; for (const auto &_ptr_entry : _hash_entry_list) { if (_ptr_entry == nullptr) continue; ::raft::LogOffsetItem* _p_item = _log_offset.add_mappings(); _p_item->set_log_term(_ptr_entry->m_term); _p_item->set_log_idx(_ptr_entry->m_index); _p_item->set_offset(_ptr_entry->m_offset); _p_item->set_key_crc32(_ptr_entry->key_crc32); _p_item->set_value_crc32(_ptr_entry->value_crc32); } std::string _log_offset_buf = ""; _log_offset.SerializeToString(&_log_offset_buf); uint32_t entry_field_length = (uint32_t)_log_offset_buf.size(); //Value length cannot exceeds the maximum a three byte long integer can represent. assert(entry_field_length <= 0xFFFFFF); static uint32_t _footer_len = (uint32_t)std::strlen(_FILE_META_DATA_MAGIC_STR_); uint32_t _tail_length = 4 * 2 + _footer_len; uint32_t _buf_size = 1 + 3 + entry_field_length + _tail_length; auto _ptr = (unsigned char*)malloc(_buf_size); assert(_ptr!=nullptr); //Set meta data 1> 1). unsigned char* _p_cur = (unsigned char*)_ptr; _p_cur[0] = _TYPE_LOG_IDX_OFFSET_; _p_cur++; //Set meta data 1> 2). uint32_t _tmp = 0; ::RaftCore::Tools::ConvertToBigEndian<uint32_t>(entry_field_length, &_tmp); unsigned char* px = (unsigned char*)&_tmp; CHECK(*px == 0x0); //for it is big endian , the fist byte must be 0x0. px++; std::memcpy(_p_cur,px,_FOUR_BYTES_ - 1); _p_cur += 3; //Set meta data 1> 3). std::memcpy(_p_cur,_log_offset_buf.data(),entry_field_length); _p_cur += entry_field_length; //Calculate crc32 checksum. uint32_t _crc32_value = ::RaftCore::Tools::CalculateCRC32(_ptr, _buf_size - _tail_length); //Set crc32 checksum. ::RaftCore::Tools::ConvertToBigEndian<uint32_t>(_crc32_value, &_tmp); std::memcpy(_p_cur,&_tmp,_FOUR_BYTES_); _p_cur += _FOUR_BYTES_; //Set meta data area length. ::RaftCore::Tools::ConvertToBigEndian<uint32_t>(_buf_size, &_tmp); std::memcpy(_p_cur,&_tmp,_FOUR_BYTES_); _p_cur += _FOUR_BYTES_; //Set meta data area magic string. std::memcpy(_p_cur,_FILE_META_DATA_MAGIC_STR_,_footer_len); _p_cur += _footer_len; return std::make_tuple(_ptr,_buf_size); } void FileMetaData::ConstructMeta(const unsigned char* _buf, std::size_t _size) noexcept { auto _cur_ptr = _buf; //Examine header. CHECK(*_cur_ptr == _TYPE_LOG_IDX_OFFSET_) << "meta header check fail"; _cur_ptr++; //Examine the checksum of metadata buf. uint32_t _length = 0x0; std::memcpy((unsigned char*)&_length+1,_cur_ptr,_FOUR_BYTES_ - 1); ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_length, &_length); _cur_ptr += 3; //Examine the checksum of metadata buf. ::raft::LogOffset _log_offset; CHECK(_log_offset.ParseFromArray(_cur_ptr, _length)) << "parse meta data buf fail.."; this->m_meta_hash.Clear(); for (const auto &_item : _log_offset.mappings()) { std::shared_ptr<IdxPair> _shp_pair(new IdxPair(_item.log_term(),_item.log_idx(),_item.offset(),_item.key_crc32(),_item.value_crc32())); this->m_meta_hash.Insert(_shp_pair); } } int FileMetaData::ConstructMeta(std::FILE* _handler) noexcept { //Offset of _handler should be set to 0 before calling this method. assert(_handler != nullptr); uint32_t _buf_size = ::RaftCore::Config::FLAGS_binlog_parse_buf_size * 1024 * 1024; unsigned char *sz_buf = (unsigned char *)malloc(_buf_size); uint32_t _this_read = 0,_total_read = 0; long _total_offset = 0; this->m_meta_hash.Clear(); do{ _this_read = (uint32_t)std::fread(sz_buf,1,_buf_size,_handler); CHECK(_this_read > _FOUR_BYTES_) << "parsing:read raw file fail,file corruption found,actual read:" << _this_read; _total_read += _this_read; auto _p_cur = sz_buf; //Parsing this buffer. uint32_t _this_offset = 0; do{ if (_this_read - _this_offset < _FOUR_BYTES_) { /*Each of the <length,content> pairs must be read integrally,if current parsing buffer is smaller than 4 bytes, means a partially read happened,need to adjust the reading position . */ CHECK(std::fseek(_handler,_total_offset,SEEK_SET)==0) << "seeking binlog fail"; break; } uint32_t _cur_offset = _total_offset; uint32_t _item_buf_len = *(uint32_t*)_p_cur; ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_item_buf_len, &_item_buf_len); if ( _this_read - (_this_offset+_FOUR_BYTES_) < _item_buf_len ) { /*Like the previous if branch which contains the break statement:need position adjusting*/ CHECK(std::fseek(_handler,_total_offset,SEEK_SET)==0) << "seeking binlog fail"; break; } _p_cur += _FOUR_BYTES_; _total_offset += _FOUR_BYTES_; _this_offset += _FOUR_BYTES_; ::raft::BinlogItem _binlog_item; //Parse binglog_item buffer. CHECK(_binlog_item.ParseFromArray(_p_cur, _item_buf_len)) << "parse file content fail.."; _p_cur += _item_buf_len; _total_offset += _item_buf_len; _this_offset += _item_buf_len; const auto & _wop = _binlog_item.entity().write_op(); uint32_t _key_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_wop.key().data(),(unsigned int)_wop.key().length()); uint32_t _value_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_wop.value().data(),(unsigned int)_wop.value().length()); std::shared_ptr<IdxPair> _shp_pair(new IdxPair(_binlog_item.entity().entity_id().term(), _binlog_item.entity().entity_id().idx(), _cur_offset,_key_crc32,_value_crc32)); this->m_meta_hash.Insert(_shp_pair); //This will always be true, otherwise it would break at previous break statement. } while (_this_offset < _this_read); //If _this_read < _buf_size , means we've read to the end of file. } while (_this_read >= _buf_size); free(sz_buf); return _total_offset == _total_read ? 0 : _total_offset; } void FileMetaData::GetOrderedMeta(TypeOffsetList &_output) const noexcept { this->m_meta_hash.GetOrderedByKey(_output); } void FileMetaData::BackOffset(int offset) noexcept{ this->m_meta_hash.Map([&offset](std::shared_ptr<IdxPair> &_one)->void{ _one->m_offset -= offset;}); } std::string FileMetaData::IdxPair::ToString() const noexcept { const static int _buf_size = 512; char _sz_buf[_buf_size] = { 0 }; std::snprintf(_sz_buf,_buf_size,"IdxPair term:%u,idx:%llu,offset:%u",this->m_term,this->m_index,this->m_offset); return std::string(_sz_buf); } std::ostream& operator<<(std::ostream& os, const FileMetaData::IdxPair& obj) { os << obj.ToString(); return os; } }
11,794
C++
.cc
245
42.204082
143
0.646561
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,292
binlog_singleton.cc
ppLorins_aurora/src/binlog/binlog_singleton.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "binlog/binlog_singleton.h" namespace RaftCore::BinLog { BinLogOperator BinLogGlobal::m_instance; }
887
C++
.cc
18
47.944444
73
0.754345
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,293
binlog_operator.cc
ppLorins_aurora/src/binlog/binlog_operator.cc
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "boost/crc.hpp" #include "boost/filesystem.hpp" #include "config/config.h" #include "tools/utilities.h" #include "leader/leader_view.h" #include "binlog/binlog_operator.h" #include "storage/storage_singleton.h" namespace RaftCore::BinLog { //To avoid issues caused by including header files mutually. namespace fs = ::boost::filesystem; using ::raft::EntityID; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Common::ConvertID; using ::RaftCore::Storage::StorageGlobal; BinLogOperator::BinLogOperator() {} BinLogOperator::~BinLogOperator()noexcept { if (this->m_initialized) this->UnInitialize(); } void BinLogOperator::Initialize(const char* role, bool file_name) noexcept { CHECK(role != nullptr); if (file_name) this->m_file_name = role; else this->m_file_name = _AURORA_BINLOG_NAME_ + std::string(".") + role; this->m_binlog_handler = std::fopen(this->m_file_name.c_str(),_AURORA_BINLOG_OP_MODE_); CHECK(this->m_binlog_handler != nullptr) << "open binlog file " << this->m_file_name << "fail..,errno:" << errno; this->ParseFile(); std::list<std::shared_ptr<FileMetaData::IdxPair>> _cur_file_meta; m_p_meta.load()->GetOrderedMeta(_cur_file_meta); LogIdentifier _last; _last.Set(0,0); if (!_cur_file_meta.empty()) _last.Set(_cur_file_meta.back()->m_term,_cur_file_meta.back()->m_index); this->m_last_logged.store(_last, std::memory_order_release); this->m_binlog_status.store(BinlogStatus::NORMAL); this->m_log_num = (uint32_t)_cur_file_meta.size(); this->m_precede_lcl_inuse.store(0); this->m_initialized = true; } void BinLogOperator::AddPreLRLUseCount() noexcept { this->m_precede_lcl_inuse.fetch_add(1); } void BinLogOperator::SubPreLRLUseCount() noexcept { this->m_precede_lcl_inuse.fetch_sub(1); } void BinLogOperator::ParseFile() noexcept{ //Construct the meta data if not exist, otherwise just load it into RAM m_p_meta.store(new FileMetaData()); CHECK(std::fseek(this->m_binlog_handler, 0, SEEK_END) == 0) << "seek binlog file " << this->m_file_name << "fail..,errno:" << errno; long _file_size = std::ftell(this->m_binlog_handler); CHECK(_file_size >= 0) << "tell binlog file " << this->m_file_name << "fail..,errno:" << errno; if (_file_size == 0) { //Empty file,just skip the following parsing meta steps. return; } long _file_tail_len = _FOUR_BYTES_ * 3; int _minimal_pb_section_size = _FOUR_BYTES_; //Only a four-bytes length field . int _minimal_meta_section_size = _FOUR_BYTES_ + _file_tail_len; //Only the tailing part and a 0-length tlv field. CHECK(_file_size >= _minimal_pb_section_size + _minimal_meta_section_size) << "binlog file size not correct:" << _file_size; //First read the last 12 bytes area. CHECK(std::fseek(this->m_binlog_handler, (_file_size - _file_tail_len), SEEK_SET) == 0) << "seek binlog file " << this->m_file_name << "fail..,errno:" << errno; char sz_last_zone[12] = {0}; CHECK(std::fread(sz_last_zone, 1, 12, this->m_binlog_handler)==12) << "read binlog file " << this->m_file_name << "last zone fail..,errno:" << errno; //This uncompleted file ending is probably due to an unexpected crash. static std::size_t _footer_len = std::strlen(_FILE_META_DATA_MAGIC_STR_); if (strncmp((const char*)&sz_last_zone[8], _FILE_META_DATA_MAGIC_STR_, _footer_len) != 0) { CHECK(std::fseek(this->m_binlog_handler,0, SEEK_SET) == 0) << "seek binlog file " << this->m_file_name << "fail..,errno:" << errno; //Since the file has no meta data , we have to construct it all over again. int _parsed_bytes = m_p_meta.load()->ConstructMeta(this->m_binlog_handler); if (_parsed_bytes > 0) { LOG(WARNING) << "binlog incomplete data found , file corruption probably happened,now truncating it."; this->Truncate(_parsed_bytes); } return; } //Meta data exists , just parse it. uint32_t _meta_data_len = *((uint32_t*)&sz_last_zone[4]); ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(_meta_data_len, &_meta_data_len); CHECK(std::fseek(this->m_binlog_handler,(_file_size - _meta_data_len), SEEK_SET) == 0) << "seek binlog file " << this->m_file_name << "fail..,errno:" << errno; auto _ptr_meta_buf = (unsigned char*)malloc(_meta_data_len); CHECK(std::fread(_ptr_meta_buf, 1,_meta_data_len, this->m_binlog_handler) == _meta_data_len) << "read binlog meta data fail...,errno:" << errno; uint32_t crc_in_file = *(uint32_t*)&sz_last_zone[0]; ::RaftCore::Tools::ConvertBigEndianToLocal<uint32_t>(crc_in_file, &crc_in_file); //CRC32 only calculate the 1>meta data area. uint32_t _crc32_value = ::RaftCore::Tools::CalculateCRC32(_ptr_meta_buf, _meta_data_len - 12); CHECK(crc_in_file == _crc32_value) << "binlog file meta check checksum failed"; m_p_meta.load()->ConstructMeta(_ptr_meta_buf,_meta_data_len); free(_ptr_meta_buf); } void BinLogOperator::UnInitialize() noexcept{ auto _p_tmp = m_p_meta.load(); delete _p_tmp; m_p_meta.store(nullptr); this->m_initialized = false; if (this->m_binlog_handler == nullptr) return ; if (!std::fclose(this->m_binlog_handler)) return; //normally return. //Something bad happened. CHECK(false) << "close binlog file fail...,errno:"<< errno; } void BinLogOperator::Truncate(uint32_t new_size) noexcept { CHECK(std::fclose(this->m_binlog_handler)==0) << "truncating file fclose failed"; fs::resize_file(fs::path(this->m_file_name),new_size); //Re-open file. this->m_binlog_handler = std::fopen(this->m_file_name.c_str(),_AURORA_BINLOG_OP_MODE_); CHECK(this->m_binlog_handler != nullptr) << "re-open binlog file after truncate fail.."; } TypeBufferInfo BinLogOperator::PrepareBuffer(const TypeEntityList &input_entities, FileMetaData::TypeOffsetList &offset_list) noexcept { unsigned char* _p_buf = nullptr; std::size_t _buf_size = 0; auto ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); uint64_t ts = ms.count(); std::list<std::string> _buf_list; uint32_t _cur_offset = 0; //Offset relative to this append operation. for (const auto & _shp_entity : input_entities) { auto _binlogitem = ::raft::BinlogItem(); _binlogitem.set_allocated_entity(_shp_entity.get()); _binlogitem.set_timestamp_ms(ts); std::string _output = ""; CHECK(_binlogitem.SerializeToString(&_output)) << "SerializeToString fail...,log entity:" << _shp_entity->entity_id().term() << "|" << _shp_entity->entity_id().idx(); _binlogitem.release_entity(); const auto & _wop = _shp_entity->write_op(); uint32_t _key_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_wop.key().data(),(unsigned int)_wop.key().length()); uint32_t _value_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_wop.value().data(),(unsigned int)_wop.value().length()); uint32_t _this_buf_size = (uint32_t)_output.size() + _FOUR_BYTES_; _buf_size += _this_buf_size; _buf_list.emplace_back(std::move(_output)); //Recording offset info. offset_list.emplace_back(new FileMetaData::IdxPair(_shp_entity->entity_id().term(),_shp_entity->entity_id().idx(), _cur_offset,_key_crc32,_value_crc32)); _cur_offset += _this_buf_size; } _p_buf = (unsigned char*)malloc(_buf_size); auto _pcur = _p_buf; for (const auto& _buf : _buf_list) { uint32_t _buf_len = (uint32_t)_buf.size(); ::RaftCore::Tools::ConvertToBigEndian<uint32_t>(uint32_t(_buf_len),&_buf_len); std::memcpy(_pcur,(unsigned char*)&_buf_len,_FOUR_BYTES_); _pcur += _FOUR_BYTES_; std::memcpy(_pcur,_buf.data(),_buf.size()); _pcur += _buf.size(); } return std::make_tuple(_p_buf, _buf_size); } void BinLogOperator::AppendBufferToBinlog(TypeBufferInfo &buffer_info, const FileMetaData::TypeOffsetList &offset_list) noexcept { //First,update this file's meta data. long _offset = std::ftell(this->m_binlog_handler); CHECK (_offset >= 0) << "ftell binlog file fail...,errno:"<< errno ; for (const auto &_item : offset_list) { _item->m_offset += _offset; } m_p_meta.load()->AddLogOffset(offset_list); //Second,append buffer to file. unsigned char* _p_buf = nullptr; int _buf_size = 0; std::tie(_p_buf, _buf_size) = buffer_info; CHECK(std::fwrite((void*)_p_buf,1,_buf_size,this->m_binlog_handler) == _buf_size) << "fwrite binlog file fail...,errno:"<< errno ; CHECK(std::fflush(this->m_binlog_handler) == 0) << "fflush binlog file fail...,errno:" << errno; free(_p_buf); } void BinLogOperator::RotateFile() noexcept { /*Since file position indicator is always at the tail , and file is opened in binary mode , ftell is exactly the file size. */ long file_len = std::ftell(this->m_binlog_handler); CHECK(file_len >= 0) << "ftell binlog file fail...,errno:"<< errno ; bool _exceed_limits = ((uint32_t)file_len > ::RaftCore::Config::FLAGS_binlog_max_size) || (this->m_log_num.load() > ::RaftCore::Config::FLAGS_binlog_max_log_num); if (!_exceed_limits) return; int _use_count = this->m_precede_lcl_inuse.load(); if (_use_count > 0) { LOG(INFO) << "somewhere using the pre-lcl part,use count:" << _use_count; return; } //Get the tail meta data need to move to the new binlog file. uint32_t _reserve_counter = 0; //#logs before ID-LCL. std::list<std::shared_ptr<FileMetaData::IdxPair>> _cur_file_meta; std::list<std::shared_ptr<FileMetaData::IdxPair>> _new_file_meta; FileMetaData* _p_new_meta = nullptr; m_p_meta.load()->GetOrderedMeta(_cur_file_meta); for (auto _iter = _cur_file_meta.crbegin(); _iter != _cur_file_meta.crend(); ) { /*We need to preserve more log entries . There are reasons for this: 1. Need to preserve the LCL in the new binlog, because it will be used in reverting log scenario as the 'pre_log_id'. 2. In some extreme cases, the resync log procedure may need log entries from leader's binlog file that are even earlier than the LCL. E.g., a follower coming back to the normal connected status of the cluster after a relatively long status of disconnected. At which point , its logs falling a lot behind the leader's and need to resync a lot of entries. */ bool _less_than_LCL = (*_iter)->operator<(StorageGlobal::m_instance.GetLastCommitted()); _reserve_counter += (_less_than_LCL) ? 1 : 0 ; bool _overflow_reserve_num = _reserve_counter > ::RaftCore::Config::FLAGS_binlog_reserve_log_num; if ( _less_than_LCL && _overflow_reserve_num ) break; _new_file_meta.emplace_front(*_iter); //Delete meta data that are smaller than (ID-LCL - FLAGS_reserve_log_num). m_p_meta.load()->Delete(**_iter); //Insert current iterating meta-data item to the new binlog file's meta-data list. if (_p_new_meta == nullptr) _p_new_meta = new FileMetaData(); _p_new_meta->AddLogOffset((*_iter)->m_term,(*_iter)->m_index,(*_iter)->m_offset,(*_iter)->key_crc32,(*_iter)->value_crc32); _iter++; } //If the trailing part actually exists , remove it. unsigned char* _p_tail_logs_buf = nullptr; long _tail_size = 0; //Note: _new_file_meta will contain log start from,i.e.,>= (ID-LCL - FLAGS_reserve_log_num). std::shared_ptr<FileMetaData::IdxPair> _shp_front = _new_file_meta.front(); //Copy tail meta data to the new binlog file. CHECK(std::fseek(this->m_binlog_handler,_shp_front->m_offset,SEEK_SET)==0); long _front_size = std::ftell(this->m_binlog_handler); CHECK(_front_size >= 0) << "tell binlog file " << this->m_file_name << "fail..,errno:" << errno; _tail_size = file_len - _front_size; _p_tail_logs_buf = (unsigned char *)malloc(_tail_size); CHECK(std::fread(_p_tail_logs_buf,1,_tail_size,this->m_binlog_handler)==_tail_size) << "read binlog file " << this->m_file_name << " tail fail..,errno:" << errno; this->Truncate(_shp_front->m_offset); //Writing meta data to the end of binlog file, close it after that. uint32_t _buf_size = 0; unsigned char * _p_meta_buf = nullptr; std::tie(_p_meta_buf, _buf_size) = m_p_meta.load()->GenerateBuffer(); CHECK(std::fwrite(_p_meta_buf, _buf_size, 1, this->m_binlog_handler) == 1) << "fwrite binlog file fail...,errno:"<< errno ; free(_p_meta_buf); CHECK (std::fflush(this->m_binlog_handler) == 0 ) << "fflush meta data to end of binlog file fail..."; //Rename & re-open. this->RenameOpenBinlogFile(); //Write the tail logs to the new binlog file if (_p_tail_logs_buf != nullptr) { CHECK(fwrite((void*)_p_tail_logs_buf,_tail_size,1,this->m_binlog_handler) ==1)<< "fwrite binlog file fail...,errno:"<< errno ; CHECK (std::fflush(this->m_binlog_handler) == 0) << "fflush binlog file fail...,errno:" << errno; } //New file meta offset are relative to the old binlog file,adjust it to the new binlog file. _p_new_meta->BackOffset(_front_size); //Release and re-allocate meta data buf. auto *_p_tmp = m_p_meta.load(); delete _p_tmp; m_p_meta.store(_p_new_meta); } bool BinLogOperator::AppendEntry(const TypeEntityList &input_entities,bool force) noexcept{ //TODO: remove test code //this->m_last_logged.store(ConvertID(input_entities.back()->entity_id()), std::memory_order_release); //return true; /* Prerequisite: There is no way for two or more threads calling this method parallel with the same pre_log(guaranteed by the previous generating guid step). Otherwise, terrible things could happen. */ if (!force && this->m_binlog_status.load() != BinlogStatus::NORMAL) { LOG(ERROR) << "binlog status wrong:" << int(this->m_binlog_status.load()); return false; } if (input_entities.empty()) return true; //For some legacy reason , we represent pre_log like this... const auto &pre_log = input_entities.front()->pre_log_id(); //Log offsets to update. FileMetaData::TypeOffsetList _offset_list; auto _buf_info = this->PrepareBuffer(input_entities,_offset_list); { std::unique_lock<std::mutex> _mutex_lock(m_cv_mutex); /* Having to be sure that the last log entry which has been written to the binlog file is exactly the one prior to the log that we're currently going to write . Meaning that we have to wait until the last log caught up with the pre_log. Figured out two approaches here: A. use CAS weak version to wait. B. use condition variable to wait. Since approach A will consume a lot of CPU times ,I choose approach B. */ while (!EntityIDEqual(pre_log, this->m_last_logged.load(std::memory_order_consume))) { auto wait_cond = [&]()->bool {return EntityIDEqual(pre_log,this->m_last_logged.load(std::memory_order_consume)); }; /* During high request load,many threads will be blocked here , and only one could go further after some other threads called cv.notify_all */ bool waiting_result = m_cv.wait_for(_mutex_lock, std::chrono::microseconds(::RaftCore::Config::FLAGS_binlog_append_file_timeo_us), wait_cond); if (!waiting_result) { LOG(WARNING) << "timeout during append ,current ID-LRL logID: " << this->m_last_logged.load(std::memory_order_consume) << ",waiting on previous id :" << ConvertID(pre_log) << ", this shouldn't exist too much, and will resolve quickly."; /*Just continuous waiting, no need to return false or something like that . Threads who got here must finish the appending process.*/ continue; } break; } //Note:Only one thread could reading here. //The following two steps can be merged into one . this->AppendBufferToBinlog(_buf_info, _offset_list); //Rotating binlog file... this->RotateFile(); } this->m_last_logged.store(ConvertID(input_entities.back()->entity_id()), std::memory_order_release); { /* Caution: modifying the conditions must be under the protect of mutex, whether the conditions can be represented by an atomic object or not. Reference:http://en.cppreference.com/w/cpp/thread/condition_variable */ std::unique_lock<std::mutex> _mutex_lock(m_cv_mutex); //Notifying doesn't need to hold the mutex. m_cv.notify_all(); } this->m_log_num.fetch_add((uint32_t)input_entities.size()); return true; } LogIdentifier BinLogOperator::GetLastReplicated() noexcept{ return this->m_last_logged.load(std::memory_order_consume); } BinLogOperator::BinlogErrorCode BinLogOperator::RevertLog(TypeMemlogFollowerList &log_list, const LogIdentifier &boundary) noexcept { const auto & pre_entity_id = log_list.front()->GetEntity()->pre_log_id(); //Find the last entry that being consistent with the first element of log_list. std::list<std::shared_ptr<FileMetaData::IdxPair>> _ordered_meta; m_p_meta.load()->GetOrderedMeta(_ordered_meta); auto _criter_meta = _ordered_meta.crbegin(); for (; _criter_meta != _ordered_meta.crend(); _criter_meta++) { if ((*_criter_meta)->operator!=(pre_entity_id)) { continue; } break; } CHECK(_criter_meta != _ordered_meta.crend()) << "Reverting log : cannot find pre_entity_id,something must be wrong"; auto _citer_meta = _criter_meta.base(); bool _found_consistent_log = false; for (auto _citer_log = log_list.cbegin(); _citer_log != log_list.cend();) { const std::string &_key = (*_citer_log)->GetEntity()->write_op().key(); const std::string &_value = (*_citer_log)->GetEntity()->write_op().value(); uint32_t _key_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_key.data(), (unsigned int)_key.length()); uint32_t _value_crc32 = ::RaftCore::Tools::CalculateCRC32((void*)_value.data(), (unsigned int)_value.length()); bool _item_equal = (*_citer_meta)->key_crc32 == _key_crc32 && (*_citer_meta)->value_crc32 == _value_crc32; if ( !_item_equal ) break; _found_consistent_log = true; //Advance input log iterator. _citer_log = log_list.erase(_citer_log); //Advance binlog meta iterator. _citer_meta++; if (_citer_meta == _ordered_meta.cend()) break; } /* There are several scenarios in reverting: 1> log_list & binlog can find a consistent log entry: assuming _log_listX is the sublist of log_list,with its first element is the first inconsistent entry with binlog : 1) _log_listX is empty: entries in log_list are all the same as binlog, nothing to revert. 2) _log_listX has no intersection with binlog: entries in log_list will be appended to binlog without any erasing operations. 3) _log_listX has intersection with binlog : the conflict entries in the binlog will be replaced by _log_listX. 2> log_list & binlog can't find a consistent consistent log entry: return a NO-CONSITENT error. */ if (!_found_consistent_log) return BinlogErrorCode::NO_CONSISTENT_ERROR; //No need to revert anything. if (log_list.empty()) return BinlogErrorCode::SUCCEED_MERGED; const auto &_first_inconsistent_log = log_list.front(); if (!EntityIDLarger(_first_inconsistent_log->GetEntity()->entity_id(), boundary)) return BinlogErrorCode::OVER_BOUNDARY; BinlogStatus _cur_status = BinlogStatus::NORMAL; if (!this->m_binlog_status.compare_exchange_strong(_cur_status,BinlogStatus::REVERTING)) { //Other threads may have already modified the status variable. return BinlogErrorCode::OTHER_ERROR; } bool binlog_ended = (_citer_meta == _ordered_meta.cend()); std::string _revert_point = binlog_ended ? "end of binlog file" : (*_citer_meta)->ToString(); LOG(INFO) << "log reverting start...,detail:" << _revert_point; //Only in the following case should we do reverting. if (!binlog_ended) { //Remove meta data. auto _remove_iter = _citer_meta; while (_remove_iter !=_ordered_meta.cend()) { m_p_meta.load()->Delete(**_remove_iter); _remove_iter++; } //Update ID-LRL and notify the other threads who are waiting on it. std::unique_lock<std::mutex> _mutex_lock(m_cv_mutex); this->Truncate((*_citer_meta)->m_offset); _citer_meta--; LogIdentifier _id_lrl; _id_lrl.Set((*_citer_meta)->m_term,(*_citer_meta)->m_index ); this->m_last_logged.store(_id_lrl, std::memory_order_release); m_cv.notify_all(); } //Appending new entries. std::list<std::shared_ptr<Entity>> _entities_list; for (const auto& _item : log_list) _entities_list.emplace_back(_item->GetEntity()); CHECK(AppendEntry(_entities_list,true)) << "AppendEntry to binlog fail,never should this happen,something terribly wrong."; //No exceptions could happened here. _cur_status = BinlogStatus::REVERTING; CHECK(this->m_binlog_status.compare_exchange_strong(_cur_status, BinlogStatus::NORMAL)) << "Binlog reverting : status CAS failed,something terribly wrong"; return BinlogErrorCode::SUCCEED_TRUNCATED; } BinLogOperator::BinlogErrorCode BinLogOperator::SetHead(std::shared_ptr<::raft::Entity> _shp_entity) noexcept { BinlogStatus _cur_status = BinlogStatus::NORMAL; if (!this->m_binlog_status.compare_exchange_strong(_cur_status,BinlogStatus::SETTING_HEAD)) { //Other threads may have already modified the status variable. return BinlogErrorCode::OTHER_ERROR; } CHECK(std::fclose(this->m_binlog_handler)==0) << "truncating file fclose failed"; if (fs::exists(fs::path(this->m_file_name))) CHECK(std::remove(this->m_file_name.c_str())==0); this->m_binlog_handler = std::fopen(this->m_file_name.c_str(), _AURORA_BINLOG_OP_MODE_); CHECK(this->m_binlog_handler != nullptr) << "open binlog file " << this->m_file_name << "fail..,errno:" << errno; //Clear and set up new meta info. auto *_p_tmp = m_p_meta.load(); delete _p_tmp; m_p_meta.store(new FileMetaData()); m_zero_log_id.Set(0, 0); this->m_last_logged.store(m_zero_log_id, std::memory_order_release); /*Since the binlog file has already just been reset,pre_log_id should be set to the initial id,only after that the data could be written into the binlog file. */ auto _p_pre_log_id = _shp_entity->mutable_pre_log_id(); _p_pre_log_id->set_term(0); _p_pre_log_id->set_idx(0); TypeEntityList _input_list; _input_list.emplace_back(_shp_entity); CHECK(AppendEntry(_input_list,true)) << "AppendEntry to binlog fail,never should this happen,something terribly wrong."; //No exceptions could happened here. _cur_status = BinlogStatus::SETTING_HEAD; CHECK(this->m_binlog_status.compare_exchange_strong(_cur_status, BinlogStatus::NORMAL)) << "Binlog setting head : status CAS failed,something terribly wrong"; return BinlogErrorCode::SUCCEED_TRUNCATED; } bool BinLogOperator::Clear() noexcept { if (this->m_binlog_status.load() != BinlogStatus::NORMAL) return false; this->m_last_logged.store(m_zero_log_id, std::memory_order_release); this->DeleteOpenBinlogFile(); auto *_p_tmp = m_p_meta.load(); delete _p_tmp; m_p_meta.store(new FileMetaData()); return true; } void BinLogOperator::DeleteOpenBinlogFile() noexcept { CHECK(std::fclose(this->m_binlog_handler)==0); LOG(INFO) << "deleting current running binlog:" << this->m_file_name; int _ret = std::remove(this->m_file_name.c_str()); CHECK(_ret == 0) << ",delete fail,errno:" << errno; //Open & create new binlog file. this->m_binlog_handler = std::fopen(this->m_file_name.c_str(),_AURORA_BINLOG_OP_MODE_); CHECK(this->m_binlog_handler != nullptr) << "rotating, fopen binlog file fail...,errno:" << errno; } void BinLogOperator::RenameOpenBinlogFile() noexcept{ //Scan binlog files. fs::path _path("."); CHECK (fs::is_directory(_path)) << "scan current directory fail,cannot save current file"; int max_suffix = 0; for (auto&& x : fs::directory_iterator(_path)) { std::string file_name = x.path().filename().string(); std::string::size_type pos = file_name.find(this->m_file_name); if (pos == std::string::npos) continue ; int suffix = 0; if (file_name != this->m_file_name) suffix = std::atol(file_name.substr(pos + this->m_file_name.length() + 1).c_str()); max_suffix = std::max<int>(suffix,max_suffix); } CHECK(std::fclose(this->m_binlog_handler)==0); //Rename current file. char sz_new_name[1024] = { 0 }; std::snprintf(sz_new_name, sizeof(sz_new_name),"%s-%d",this->m_file_name.c_str() ,max_suffix + 1); CHECK (std::rename(this->m_file_name.c_str(), sz_new_name) == 0) << "rename binlog file fail...,errno:" << errno; //Open & create new binlog file. this->m_binlog_handler = std::fopen(this->m_file_name.c_str(),_AURORA_BINLOG_OP_MODE_); CHECK (this->m_binlog_handler != nullptr) << "rotating, fopen binlog file fail...,errno:" << errno; } std::string BinLogOperator::GetBinlogFileName() noexcept { return this->m_file_name; } void BinLogOperator::GetOrderedMeta(FileMetaData::TypeOffsetList &_output) noexcept { m_p_meta.load()->GetOrderedMeta(_output); } }
27,023
C++
.cc
487
48.659138
188
0.650747
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,294
leader_bg_task.h
ppLorins_aurora/src/leader/leader_bg_task.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LEADER_BG_TASK_H__ #define __AURORA_LEADER_BG_TASK_H__ #include <memory> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "protocol/raft.pb.h" #include "protocol/raft.grpc.pb.h" #include "common/log_identifier.h" #include "tools/trivial_lock_double_list.h" #include "client/client_impl.h" #include "leader/follower_entity.h" namespace RaftCore { namespace Service { class Write; } } namespace RaftCore::Leader::BackGroundTask { using grpc::CompletionQueue; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Common::ReactInfo; using ::RaftCore::Common::FinishStatus; using ::RaftCore::Tools::TypeSysTimePoint; using ::RaftCore::DataStructure::OrderedTypeBase; using ::RaftCore::Client::SyncDataSyncClient; using ::RaftCore::Leader::TypePtrFollowerEntity; using ::RaftCore::Service::Write; class TwoPhaseCommitContext { public: struct PhaseState { struct RpcStatistic { RpcStatistic(); virtual ~RpcStatistic(); std::atomic<int> m_cq_entrust_num ; std::atomic<int> m_succeed_num; std::atomic<int> m_implicitly_fail_num; std::atomic<int> m_explicitly_fail_num; int EventsGot()const noexcept; std::string Dump() const noexcept; void Reset() noexcept; }; std::string Dump() const noexcept; PhaseState(); virtual ~PhaseState(); void Reset() noexcept; void Increase(uint32_t flag, std::atomic<int> &cur_cluster_data, std::atomic<int> &new_cluster_data) noexcept; void IncreaseEntrust(uint32_t flag) noexcept; void IncreaseSuccess(uint32_t flag) noexcept; void IncreaseImplicitFail(uint32_t flag) noexcept; void IncreaseExplicitFail(uint32_t flag) noexcept; FinishStatus JudgeClusterDetermined(RpcStatistic &cluster_stat, std::size_t majority) noexcept; bool JudgeClusterPotentiallySucceed(RpcStatistic &cluster_stat, std::size_t majority) noexcept; bool JudgeFinished() noexcept; RpcStatistic m_cur_cluster; RpcStatistic m_new_cluster; std::set<TypePtrFollowerEntity> m_conn_todo_set; }; public: TwoPhaseCommitContext(); virtual ~TwoPhaseCommitContext(); FinishStatus JudgePhaseIDetermined() noexcept; bool JudgePhaseIPotentiallySucceed() noexcept; bool JudgeAllFinished() noexcept; std::string Dump() const noexcept; void Reset() noexcept; PhaseState m_phaseI_state; PhaseState m_phaseII_state; std::size_t m_cluster_size = 0; std::size_t m_cluster_majority = 0; std::size_t m_new_cluster_size = 0; std::size_t m_new_cluster_majority = 0; }; /*Contains all information needed for a single client RPC context */ class LogReplicationContext final : public TwoPhaseCommitContext { public: LogReplicationContext()noexcept; virtual ~LogReplicationContext()noexcept; LogIdentifier m_cur_log_id; /*Have to use a pointer getting around of header files recursively including . Can't use the struct forward declaration here, shit. */ void* m_p_joint_snapshot; //A snapshot for consistent reading. }; class ReSyncLogContext final { public: LogIdentifier m_last_sync_point; TypePtrFollowerEntity m_follower; std::function<void(TypePtrFollowerEntity&)> m_on_success_cb; bool m_hold_pre_lcl = false; }; class SyncDataContenxt final { public: SyncDataContenxt(TypePtrFollowerEntity &shp_follower) noexcept; virtual ~SyncDataContenxt() noexcept; bool IsBeginning() const noexcept; public: LogIdentifier m_last_sync; TypePtrFollowerEntity m_follower; std::shared_ptr<SyncDataSyncClient> m_shp_client; std::function<void(TypePtrFollowerEntity&)> m_on_success_cb; ::grpc::Status m_final_status; }; class ClientReactContext final { public: ReactInfo m_react_info; }; class CutEmptyContext final : public OrderedTypeBase<CutEmptyContext> { public: CutEmptyContext(int value_flag = 0)noexcept; virtual ~CutEmptyContext()noexcept; virtual bool operator<(const CutEmptyContext& other)const noexcept override; virtual bool operator>(const CutEmptyContext& other)const noexcept override; virtual bool operator==(const CutEmptyContext& other)const noexcept override; std::shared_ptr<Write> m_write_request; TypeSysTimePoint m_generation_tp; /*<0: minimal value; >0:max value; ==0:comparable value. */ int m_value_flag = 0; std::atomic<bool> m_processed_flag; bool m_log_flag = false; }; } //end namespace #endif
5,942
C++
.h
138
35.601449
107
0.663985
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,295
leader_request.h
ppLorins_aurora/src/leader/leader_request.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LEADER_REQUEST_H__ #define __AURORA_LEADER_REQUEST_H__ #include <memory> #include "protocol/raft.grpc.pb.h" #include "protocol/raft.pb.h" #include "common/request_base.h" using ::raft::RaftService; using ::grpc::ServerCompletionQueue; using ::RaftCore::Common::UnaryRequest; namespace RaftCore::Leader { //Just a thin wrapper for differentiate rpcs. template<typename T,typename R,typename Q> class LeaderRequest : public UnaryRequest<T,R,Q>{ public: LeaderRequest()noexcept; virtual ~LeaderRequest()noexcept; private: LeaderRequest(const LeaderRequest&) = delete; LeaderRequest& operator=(const LeaderRequest&) = delete; }; } //end namespace #include "leader_request.cc" #endif
1,516
C++
.h
38
37.921053
73
0.761153
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,296
client_pool.h
ppLorins_aurora/src/leader/client_pool.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_CONNECTION_POOL_EX_H__ #define __AURORA_CONNECTION_POOL_EX_H__ #include <memory> #include <unordered_map> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "protocol/raft.pb.h" #include "protocol/raft.grpc.pb.h" #include "tools/lock_free_deque.h" #include "leader/channel_pool.h" namespace RaftCore::Leader { using ::RaftCore::DataStructure::LockFreeDeque; using ::RaftCore::Leader::ChannelPool; class FollowerEntity; template<typename T> class ClientPool final{ public: ClientPool(FollowerEntity* p_follower = nullptr) noexcept; virtual ~ClientPool() noexcept; std::shared_ptr<T> Fetch() noexcept; void Back(std::shared_ptr<T> &client) noexcept; FollowerEntity* GetParentFollower() noexcept; private: LockFreeDeque<T> m_pool; /*Cannot contain a shared_ptr<FollowerEntity> since it will cause two shared_ptr points to the same the FollowerEntity object resulting in a recursively destructing problem. */ FollowerEntity* m_p_parent_follower; private: ClientPool(const ClientPool&) = delete; ClientPool& operator=(const ClientPool&) = delete; }; } //end namespace #include "leader/client_pool.cc" #endif
1,993
C++
.h
49
38.163265
103
0.752088
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,297
memory_log_leader.h
ppLorins_aurora/src/leader/memory_log_leader.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef _AURORA_MEMORY_LOG_LEADER_H_ #define _AURORA_MEMORY_LOG_LEADER_H_ #include <ostream> #include "protocol/raft.pb.h" #include "tools/trivial_lock_double_list.h" #include "common/memory_log_base.h" using ::RaftCore::Common::MemoryLogItemBase; namespace RaftCore::Leader { class MemoryLogItemLeader final : public ::RaftCore::DataStructure::OrderedTypeBase<MemoryLogItemLeader> , public MemoryLogItemBase { public: virtual ~MemoryLogItemLeader() noexcept; MemoryLogItemLeader(uint32_t _term, uint64_t _index) noexcept; MemoryLogItemLeader(const ::raft::Entity &_entity) noexcept; virtual bool operator<(const MemoryLogItemLeader& _other)const noexcept; virtual bool operator>(const MemoryLogItemLeader& _other)const noexcept; virtual bool operator==(const MemoryLogItemLeader& _other)const noexcept; virtual bool operator!=(const MemoryLogItemLeader& _other)const noexcept; protected: virtual void NotImplemented() noexcept{} }; bool CmpMemoryLogLeader(const MemoryLogItemLeader& left, const MemoryLogItemLeader& right) noexcept; } #endif
1,883
C++
.h
38
47.052632
133
0.777473
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,298
channel_pool.h
ppLorins_aurora/src/leader/channel_pool.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_CHANNEL_POOL_EX_H__ #define __AURORA_CHANNEL_POOL_EX_H__ #include <memory> #include <unordered_map> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "protocol/raft.pb.h" #include "protocol/raft.grpc.pb.h" namespace RaftCore::Leader { class ChannelPool final{ public: ChannelPool(const std::string &peer_addr,uint32_t pool_size) noexcept; virtual ~ChannelPool() noexcept; void HeartBeat(uint32_t term,const std::string &my_addr) noexcept; std::shared_ptr<::grpc::Channel> GetOneChannel() noexcept; private: typedef std::vector<std::shared_ptr<::grpc::Channel>> TypeVecChannel; //Read only after initialization. std::shared_ptr<TypeVecChannel> m_channel_pool; //Relatively random accessing. std::atomic<uint32_t> m_idx; std::string m_peer_addr; private: ChannelPool(const ChannelPool&) = delete; ChannelPool& operator=(const ChannelPool&) = delete; }; } //end namespace #endif
1,767
C++
.h
43
38.395349
74
0.736904
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,299
leader_view.h
ppLorins_aurora/src/leader/leader_view.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LEADER_VIEW_H__ #define __AURORA_LEADER_VIEW_H__ #include <memory> #include <string> #include <set> #include "grpc/grpc.h" #include "grpc++/grpc++.h" #include "protocol/raft.pb.h" #include "common/comm_defs.h" #include "common/comm_view.h" #include "topology/topology_mgr.h" #include "tools/lock_free_queue.h" #include "tools/trivial_lock_double_list.h" #include "tools/trivial_lock_single_list.h" #include "leader/follower_entity.h" #include "leader/leader_bg_task.h" #include "leader/memory_log_leader.h" namespace RaftCore::Leader { using grpc::CompletionQueue; using ::RaftCore::Common::CommonView; using ::RaftCore::Leader::MemoryLogItemLeader; using ::RaftCore::Leader::BackGroundTask::CutEmptyContext; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Common::ReactInfo; using ::RaftCore::DataStructure::DoubleListNode; using ::RaftCore::DataStructure::TrivialLockDoubleList; using ::RaftCore::DataStructure::SingleListNode; using ::RaftCore::DataStructure::TrivialLockSingleList; using ::RaftCore::DataStructure::LockFreeUnorderedSingleList; class LeaderView :public CommonView{ public: enum class ServerStatus { NORMAL=0, HALTED, SHUTTING_DOWN, }; public: static void Initialize(const ::RaftCore::Topology& _topo) noexcept; static void UnInitialize() noexcept; static void ClientThreadReacting(const ReactInfo &info) noexcept; static void BroadcastHeatBeat() noexcept; static void UpdateThreadMapping() noexcept; //Set the following member functions to protected is to facilitate gtest. #ifdef _LEADER_VIEW_TEST_ public: #else private: #endif static bool ReSyncLogCB(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context)noexcept; static bool SyncDataCB(std::shared_ptr<BackGroundTask::SyncDataContenxt> &shp_context)noexcept; static bool ClientReactCB(std::shared_ptr<BackGroundTask::ClientReactContext> &shp_context) noexcept; public: static std::string my_addr; static std::unordered_map<std::string,TypePtrFollowerEntity> m_hash_followers; static std::shared_timed_mutex m_hash_followers_mutex; static TrivialLockDoubleList<MemoryLogItemLeader> m_entity_pending_list; static LockFreeUnorderedSingleList<DoubleListNode<MemoryLogItemLeader>> m_garbage; //Used for write requests which cannot get finished after it CutHead. static TrivialLockSingleList<CutEmptyContext> m_cut_empty_list; static LockFreeUnorderedSingleList<SingleListNode<CutEmptyContext>> m_cut_empty_garbage; //CV used for multiple threads cooperating on append binlog operations. static std::condition_variable m_cv; static std::mutex m_cv_mutex; static std::atomic<LogIdentifier> m_last_cut_log; static ServerStatus m_status; static std::atomic<uint32_t> m_last_log_waiting_num; //Mapping each notify thread to a dedicated cq for it to entrust client requests. static std::unordered_map<std::thread::id,uint32_t> m_notify_thread_mapping; private: static void AddRescynDataTask(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context) noexcept; static auto PrepareAppendEntriesRequest(std::shared_ptr<BackGroundTask::ReSyncLogContext> &shp_context); static bool SyncLogAfterLCL(std::shared_ptr<BackGroundTask::SyncDataContenxt> &shp_context); private: static const char* m_invoker_macro_names[]; private: LeaderView() = delete; virtual ~LeaderView() = delete; LeaderView(const LeaderView &) = delete; LeaderView& operator=(const LeaderView &) = delete; }; } #endif
4,467
C++
.h
97
42.865979
108
0.758685
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,300
follower_entity.h
ppLorins_aurora/src/leader/follower_entity.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_FOLLOWER_STATE_H__ #define __AURORA_FOLLOWER_STATE_H__ #include <memory> #include <atomic> #include <unordered_map> #include "common/comm_view.h" #include "common/comm_defs.h" #include "client/client_impl.h" #include "leader/client_pool.h" namespace RaftCore::Leader { using grpc::CompletionQueue; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Member::JointConsensusMask; using ::RaftCore::Client::AppendEntriesAsyncClient; using ::RaftCore::Client::CommitEntriesAsyncClient; using ::RaftCore::Leader::ChannelPool; using ::RaftCore::Leader::ClientPool; enum class FollowerStatus { NORMAL = 0, RESYNC_LOG, RESYNC_DATA, }; /* This is the class for representing follower's state in leader's view */ class FollowerEntity final{ public: template<typename T> using TClientPool = std::unordered_map<uint32_t, std::unique_ptr<T>>; inline static const char* MacroToString(FollowerStatus enum_val) { return m_status_macro_names[int(enum_val)]; } public: FollowerEntity(const std::string &follower_addr, FollowerStatus status = FollowerStatus::NORMAL, uint32_t joint_consensus_flag = uint32_t(JointConsensusMask::IN_OLD_CLUSTER), std::shared_ptr<CompletionQueue> input_cq = nullptr) noexcept; virtual ~FollowerEntity() noexcept; //If ever successfully updated the last sent committed for this follower. bool UpdateLastSentCommitted(const LogIdentifier &to) noexcept; std::shared_ptr<AppendEntriesAsyncClient> FetchAppendClient(void* &pool) noexcept; std::shared_ptr<CommitEntriesAsyncClient> FetchCommitClient(void* &pool) noexcept; public: std::string my_addr; //A simple type , can be read & blind write simultaneously by multiple thread. FollowerStatus m_status; uint32_t m_joint_consensus_flag; //Record #(timeout entries) since the latest successfully replicated log. int32_t m_timeout_counter; std::shared_ptr<ChannelPool> m_shp_channel_pool; std::atomic<LogIdentifier> m_last_sent_committed; private: //Note: only high frequently used client need to be pooled. TClientPool<ClientPool<AppendEntriesAsyncClient>> m_append_client_pool; TClientPool<ClientPool<CommitEntriesAsyncClient>> m_commit_client_pool; static const char* m_status_macro_names[]; private: FollowerEntity(const FollowerEntity&) = delete; FollowerEntity& operator=(const FollowerEntity&) = delete; }; typedef std::shared_ptr<::RaftCore::Leader::FollowerEntity> TypePtrFollowerEntity; } //end namespace #endif
3,380
C++
.h
75
41.893333
100
0.755358
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,301
service.h
ppLorins_aurora/src/service/service.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_SERVICE_EX_H__ #define __AURORA_SERVICE_EX_H__ #include <set> #include <mutex> #include "protocol/raft.pb.h" #include "protocol/raft.grpc.pb.h" #include "common/log_identifier.h" #include "leader/leader_request.h" #include "leader/follower_entity.h" #include "leader/leader_view.h" #include "leader/leader_bg_task.h" #include "follower/follower_request.h" #include "follower/memory_log_follower.h" #include "follower/follower_bg_task.h" #include "candidate/candidate_request.h" #include "guid/guid_generator.h" #include "client/client_impl.h" #include "tools/lock_free_unordered_single_list.h" #include "service/ownership_delegator.h" namespace RaftCore::Service { using ::RaftCore::Guid::GuidGenerator; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::DataStructure::AtomicPtrSingleListNode; using ::RaftCore::Common::FinishStatus; using ::RaftCore::Service::OwnershipDelegator; using ::RaftCore::Leader::BackGroundTask::LogReplicationContext; using ::RaftCore::Leader::BackGroundTask::TwoPhaseCommitContext; using ::RaftCore::Leader::MemoryLogItemLeader; using ::RaftCore::Leader::LeaderRequest; using ::RaftCore::Leader::FollowerEntity; using ::RaftCore::Leader::LeaderView; using ::RaftCore::Follower::FollowerUnaryRequest; using ::RaftCore::Follower::FollowerBidirectionalRequest; using ::RaftCore::Follower::TypeMemlogFollowerList; using ::RaftCore::Follower::BackGroundTask::DisorderMessageContext; using ::RaftCore::Leader::BackGroundTask::CutEmptyContext; using ::RaftCore::Candidate::CandidateUnaryRequest; using ::RaftCore::Client::AppendEntriesAsyncClient; using ::RaftCore::Tools::TypeTimePoint; using ::RaftCore::Tools::TypeSysTimePoint; //For the prospective common properties . class RPCBase { public: RPCBase(); virtual ~RPCBase(); protected: bool LeaderCheckVailidity(::raft::ClientCommonResponse* response) noexcept; std::string FollowerCheckValidity(const ::raft::RequestBase &req_base, TypeTimePoint* p_tp = nullptr, LogIdentifier *p_cur_id = nullptr) noexcept; bool ValidClusterNode(const std::string &peer_addr) noexcept; inline static const char* MacroToString(LeaderView::ServerStatus enum_val) { return m_status_macro_names[int(enum_val)]; } protected: std::shared_timed_mutex m_mutex; static const char* m_status_macro_names[]; private: RPCBase(const RPCBase&) = delete; RPCBase& operator=(const RPCBase&) = delete; }; class Write final : public LeaderRequest<::raft::ClientWriteRequest, ::raft::ClientWriteResponse, Write>, public OwnershipDelegator<Write>, public RPCBase { public: Write(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ~Write(); /*In Write RPC, the whole 'Process' procedure is divided into several smaller parts, just give an empty implementation here.*/ ::grpc::Status Process() noexcept override; void ReplicateDoneCallBack(const ::grpc::Status &status, const ::raft::AppendEntriesResponse& rsp, FollowerEntity* ptr_follower, AppendEntriesAsyncClient* ptr_client) noexcept; //Return: the to-be entrusted client if any, otherwise a nullptr is returned. bool UpdatePhaseIStatistic(const ::grpc::Status &status, const ::raft::AppendEntriesResponse& rsp, FollowerEntity* ptr_follower) noexcept; void CommitDoneCallBack(const ::grpc::Status &status, const ::raft::CommitEntryResponse& rsp, FollowerEntity* ptr_follower) noexcept; #ifdef _SVC_WRITE_TEST_ auto GetInnerLog() { return this->m_shp_entity; } #endif const std::shared_ptr<LogReplicationContext>& GetReqCtx() noexcept; void AfterAppendBinlog() noexcept; static void CutEmptyRoutine() noexcept; private: enum class WriteProcessStage { CREATE, FRONT_FINISH, ABOURTED }; virtual void React(bool cq_result = true) noexcept override; bool BeforeReplicate() noexcept; void AfterDetermined(AppendEntriesAsyncClient* ptr_client) noexcept; bool PrepareReplicationStatistic(std::list<std::shared_ptr<AppendEntriesAsyncClient>> &entrust_list) noexcept; //Return : if get majority entrusted. bool PrepareReplicationContext(uint32_t cur_term, uint32_t pre_term) noexcept; FinishStatus JudgeReplicationResult() noexcept; void ProcessReplicateFailure(const ::raft::CommonResponse& comm_rsp, TwoPhaseCommitContext::PhaseState &phaseI_state, FollowerEntity* ptr_follower, uint32_t joint_consensus_state) noexcept; void AddResyncLogTask(FollowerEntity* ptr_follower, const LogIdentifier &sync_point) noexcept; void EntrustCommitRequest(FollowerEntity* ptr_follower, AppendEntriesAsyncClient* ptr_client)noexcept; void ReleasePhaseIIReadyList()noexcept; void FinishRequest(WriteProcessStage state) noexcept; //Return: If successfully CutHead someone off from the pending list. bool AppendBinlog(AppendEntriesAsyncClient* ptr_client) noexcept; /*The microseconds that the thread should waiting for. After detecting a failure, waiting for its previous logs to have a deterministic result(success or implicitly/explicitly fail). Both the latest and non-latest logs have to be waited for. */ uint32_t GetConservativeTimeoutValue(uint64_t idx,bool last_guid=false) const noexcept; //Return : if server status successfully changed . bool UpdateServerStatus(uint64_t guid, LeaderView::ServerStatus status) noexcept; void LastlogResolve(bool result, uint64_t last_released_guid) noexcept; //Return the last released GUID. uint64_t WaitForLastGuidReleasing() const noexcept; bool ProcessCutEmptyRequest(const TypeSysTimePoint &tp, const LogIdentifier &current_lrl, std::shared_ptr<CutEmptyContext> &one, bool recheck) noexcept; private: bool m_first_of_cur_term = false; std::shared_ptr<MemoryLogItemLeader> m_shp_entity; // The latest guid used setting server status. uint64_t m_last_trigger_guid = 0; ::raft::EntityID* m_p_pre_entity_id = nullptr; std::shared_ptr<LogReplicationContext> m_shp_req_ctx; TypeTimePoint m_tp_start; std::chrono::time_point<std::chrono::steady_clock> m_wait_time_point; ::raft::ClientCommonResponse* m_rsp = nullptr; GuidGenerator::GUIDPair m_guid_pair; ::raft::ClientWriteRequest* m_client_request = nullptr; std::shared_ptr<::raft::CommitEntryRequest> m_shp_commit_req; //Indicating if the entry point of majority succeed is already taken by other threads. std::atomic<bool> m_phaseI_determined_point; AtomicPtrSingleListNode<FollowerEntity> m_phaseII_ready_list; WriteProcessStage m_write_stage{ WriteProcessStage::CREATE }; #ifdef _SVC_WRITE_TEST_ std::tm m_start_tm = { 0, 0, 0, 26, 9 - 1, 2019 - 1900 }; std::chrono::time_point<std::chrono::system_clock> m_epoch; #endif private: Write(const Write&) = delete; Write& operator=(const Write&) = delete; }; class Read final : public LeaderRequest<::raft::ClientReadRequest,::raft::ClientReadResponse,Read>, public RPCBase { public: Read(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: Read(const Read&) = delete; Read& operator=(const Read&) = delete; }; class MembershipChange final : public LeaderRequest<::raft::MemberChangeRequest,::raft::MemberChangeResponse,MembershipChange>, public RPCBase { public: MembershipChange(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: MembershipChange(const MembershipChange&) = delete; MembershipChange& operator=(const MembershipChange&) = delete; }; class AppendEntries final : public FollowerUnaryRequest<::raft::AppendEntriesRequest, ::raft::AppendEntriesResponse, AppendEntries>, public OwnershipDelegator<AppendEntries>, public RPCBase { public: AppendEntries(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ~AppendEntries()noexcept; //Return: If current request finished processing. bool BeforeJudgeOrder() noexcept ; const LogIdentifier& GetLastLogID() const noexcept; static void DisorderLogRoutine() noexcept; protected: ::grpc::Status Process() noexcept override; virtual void React(bool cq_result = true) noexcept override; private: bool ProcessDisorderLog(const TypeSysTimePoint &tp, const LogIdentifier &upper_log, std::shared_ptr<DisorderMessageContext> &one) noexcept; void ProcessAdjacentLog() noexcept; void ProcessOverlappedLog() noexcept; std::string ComposeInputLogs() noexcept; private: TypeTimePoint m_tp_start; ::raft::CommonResponse* m_rsp = nullptr; //std::unique_lock<std::mutex> *m_mutex_lock = nullptr; TypeMemlogFollowerList m_log_list; const ::raft::EntityID *m_pre_entity_id = nullptr; const ::raft::EntityID *m_last_entity_id = nullptr; LogIdentifier m_last_log; enum class AppendEntriesProcessStage { CREATE, WAITING, FINISH }; AppendEntriesProcessStage m_append_entries_stage{ AppendEntriesProcessStage::CREATE }; private: AppendEntries(const AppendEntries&) = delete; AppendEntries& operator=(const AppendEntries&) = delete; }; class CommitEntries final : public FollowerUnaryRequest<::raft::CommitEntryRequest,::raft::CommitEntryResponse,CommitEntries>, public RPCBase { public: CommitEntries(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: CommitEntries(const CommitEntries&) = delete; CommitEntries& operator=(const CommitEntries&) = delete; }; class SyncData final : public FollowerBidirectionalRequest<::raft::SyncDataRequest,::raft::SyncDataResponse,SyncData>, public RPCBase { public: SyncData(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: SyncData(const SyncData&) = delete; SyncData& operator=(const SyncData&) = delete; }; class MemberChangePrepare final : public FollowerUnaryRequest<::raft::MemberChangeInnerRequest,::raft::MemberChangeInnerResponse,MemberChangePrepare>, public RPCBase { public: MemberChangePrepare(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: MemberChangePrepare(const MemberChangePrepare&) = delete; MemberChangePrepare& operator=(const MemberChangePrepare&) = delete; }; class MemberChangeCommit final : public FollowerUnaryRequest<::raft::MemberChangeInnerRequest,::raft::MemberChangeInnerResponse,MemberChangeCommit>, public RPCBase { public: MemberChangeCommit(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: void SetServerShuttingDown() noexcept; private: MemberChangeCommit(const MemberChangeCommit&) = delete; MemberChangeCommit& operator=(const MemberChangeCommit&) = delete; }; class PreVote final : public CandidateUnaryRequest<::raft::VoteRequest,::raft::VoteResponse,PreVote>, public RPCBase { public: PreVote(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: PreVote(const PreVote&) = delete; PreVote& operator=(const PreVote&) = delete; }; class Vote final : public CandidateUnaryRequest<::raft::VoteRequest,::raft::VoteResponse,Vote>, public RPCBase { public: Vote(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: Vote(const Vote&) = delete; Vote& operator=(const Vote&) = delete; }; //This RPC making sense to multiple roles. class HeartBeat final : public UnaryRequest<::raft::HeartBeatRequest,::raft::CommonResponse,HeartBeat>, public RPCBase { public: HeartBeat(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ::grpc::Status Process() noexcept override; private: HeartBeat(const HeartBeat&) = delete; HeartBeat& operator=(const HeartBeat&) = delete; }; } #endif
14,428
C++
.h
280
47.157143
167
0.758389
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,302
ownership_delegator.h
ppLorins_aurora/src/service/ownership_delegator.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_OWNERSHIP_DELEGATOR_H__ #define __AURORA_OWNERSHIP_DELEGATOR_H__ #include <memory> namespace RaftCore::Service { template<typename T> class OwnershipDelegator { public: OwnershipDelegator(); virtual ~OwnershipDelegator(); void ResetOwnership(T *src) noexcept; void ReleaseOwnership() noexcept; std::shared_ptr<T> GetOwnership()noexcept; void CopyOwnership(std::shared_ptr<T> from)noexcept; private: std::shared_ptr<T> *m_p_shp_delegator = nullptr; private: OwnershipDelegator(const OwnershipDelegator&) = delete; OwnershipDelegator& operator=(const OwnershipDelegator&) = delete; }; } #include "service/ownership_delegator.cc" #endif
1,498
C++
.h
37
37.891892
73
0.753825
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,303
storage.h
ppLorins_aurora/src/storage/storage.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_STORAGE_H__ #define __AURORA_STORAGE_H__ #include <unordered_map> #include <mutex> #include "boost/filesystem.hpp" #include "common/comm_defs.h" #include "common/log_identifier.h" #include "tools/lock_free_unordered_single_list.h" #include "storage/sstable.h" namespace RaftCore::Storage { using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Common::WriteLock; using ::RaftCore::DataStructure::AtomicPtrSingleListNode; using ::RaftCore::DataStructure::UnorderedSingleListNode; using ::RaftCore::Storage::MemoryTable;; namespace fs = ::boost::filesystem; class StorageMgr final{ public: struct StorageItem { StorageItem(const LogIdentifier &log_id,const std::shared_ptr<std::string> &key, const std::shared_ptr<std::string> &value) : m_log_id(log_id),m_key(key),m_value(value) {} LogIdentifier m_log_id; //Ownership of the following two std::shared_ptr can be taken over. std::shared_ptr<std::string> m_key; std::shared_ptr<std::string> m_value; }; public: StorageMgr() noexcept; virtual ~StorageMgr() noexcept; bool Initialize(const char* role, bool reset = false) noexcept; void UnInitialize() noexcept; //Delete all data both in memory & disk. void Reset() noexcept; bool Get(const std::string &key, std::string &val) const noexcept; bool Set(const LogIdentifier &log_id, const std::string &key, const std::string &value) noexcept; const LogIdentifier GetLastCommitted() const noexcept; /*Note: return the `step_len` number of records greater than start_at.If start_at is earlier than the oldest item in the storage, return the earliest step_len records. */ void GetSlice(const LogIdentifier& start_at,uint32_t step_len,std::list<StorageItem> &output_list) const noexcept; void PurgeGarbage() noexcept; static void FindRoleBinlogFiles(const std::string &role, std::list<std::string> &output); private: bool ConstructFromBinlog(const LogIdentifier &from, const std::string &binlog_file_name) noexcept; void GetSliceInSSTable(const LogIdentifier& start_at, int step_len, std::list<StorageItem> &output_list) const noexcept; void GetSliceInMemory(const LogIdentifier& start_at, int step_len, std::list<StorageItem> &output_list) const noexcept; void ClearInMemoryData() noexcept; //return : indicating if purging can proceed or not. bool PurgeSSTable() noexcept; void PurgeMemoryTable() noexcept; void DumpMemoryTable(const MemoryTable *src) noexcept; void ConstructMemoryTable(const LogIdentifier &from) noexcept; void RecycleLast2SStables() noexcept; template<typename T> void ReleaseData(AtomicPtrSingleListNode<T> &head) noexcept { //Releasing process should be mutual exclusion from purging process. WriteLock _w_lock(this->m_mutex); auto *_p_cur = head.load(); while (_p_cur != nullptr) { auto _tmp = _p_cur; _p_cur = _p_cur->m_next.load(); //delete _tmp->m_data; delete _tmp; } head.store(nullptr); } private: bool m_initialized = false; std::string m_role_str = ""; fs::path m_path; std::atomic<LogIdentifier> m_last_committed; std::atomic<LogIdentifier> m_last_persist; /*There are several special operations for the followings, so use the raw version of single list instead of the wrapped version 'LockFreeUnorderedSingleList'. */ AtomicPtrSingleListNode<MemoryTable> m_memory_table_head; AtomicPtrSingleListNode<MemoryTable> m_garbage_memory_table; AtomicPtrSingleListNode<SSTAble> m_sstable_table_head; AtomicPtrSingleListNode<SSTAble> m_garbage_sstable; std::shared_timed_mutex m_mutex; std::list<std::string> m_loaded_binlog_files; private: StorageMgr(const StorageMgr&) = delete; StorageMgr& operator=(const StorageMgr&) = delete; }; } //end namespace #endif
4,881
C++
.h
100
43.86
133
0.709145
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,304
hashable_string.h
ppLorins_aurora/src/storage/hashable_string.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_HASHABLE_STRING_H__ #define __AURORA_HASHABLE_STRING_H__ #include <string> #include "common/comm_defs.h" #include "tools/lock_free_hash.h" namespace RaftCore::Storage { using ::RaftCore::DataStructure::HashTypeBase; class HashableString final: public HashTypeBase<HashableString> { public: //Constructing a object for temporary usage like querying in hash. HashableString(const std::string &other,bool on_fly=false) noexcept; virtual ~HashableString() noexcept; virtual bool operator<(const HashableString &other)const noexcept override; virtual bool operator==(const HashableString &other)const noexcept override; virtual bool operator==(const std::string &other)const noexcept ; virtual const HashableString& operator=(const HashableString &other)noexcept override; virtual std::size_t Hash() const noexcept override; virtual const std::string& GetStr() const noexcept ; virtual const std::shared_ptr<std::string> GetStrPtr() const noexcept ; private: std::shared_ptr<std::string> m_shp_str; }; typedef std::shared_ptr<HashableString> TypePtrHashableString; struct PtrHSHasher { std::size_t operator()(const TypePtrHashableString &shp_hashable_string)const; }; struct PtrHSEqualer { bool operator()(const TypePtrHashableString &left, const TypePtrHashableString &right)const; }; } //end namespace #endif
2,190
C++
.h
46
44.869565
96
0.766068
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,305
memory_table.h
ppLorins_aurora/src/storage/memory_table.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_MEMORY_TABLE_H__ #define __AURORA_MEMORY_TABLE_H__ #include "common/comm_defs.h" #include "tools/lock_free_hash.h" #include "storage/hashable_string.h" namespace RaftCore::Storage { using ::RaftCore::DataStructure::LockFreeHash; using ::RaftCore::Storage::HashableString; struct HashValue { HashValue(uint32_t a, uint64_t b, const std::string &c); bool operator<(const HashValue &other)const noexcept; uint32_t m_term; uint64_t m_index; std::shared_ptr<std::string> m_val; }; typedef std::shared_ptr<HashValue> TypePtrHashValue; typedef LockFreeHash<HashableString, HashValue> TypeRecords; typedef std::shared_ptr<TypeRecords> PtrRecords; class MemoryTable final{ public: MemoryTable() noexcept; virtual ~MemoryTable() noexcept; void Insert(const std::string &key, const std::string &val, uint32_t term, uint64_t index) noexcept; void IterateByKey(std::function<bool(const TypePtrHashableString&,const TypePtrHashValue&)> op) const noexcept; bool IterateByVal(std::function<bool(const HashValue&, const HashableString&)> op) const noexcept; bool GetData(const std::string &key, std::string &val) const noexcept; std::size_t Size() const noexcept; private: PtrRecords m_shp_records; private: MemoryTable(const MemoryTable&) = delete; MemoryTable& operator=(const MemoryTable&) = delete; }; typedef std::shared_ptr<MemoryTable> TypePtrMemoryTable; } //end namespace #endif
2,273
C++
.h
51
41.784314
115
0.753537
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,306
sstable.h
ppLorins_aurora/src/storage/sstable.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_SSTABLE_H__ #define __AURORA_SSTABLE_H__ #include "common/comm_defs.h" #include "common/log_identifier.h" #include "tools/lock_free_hash.h" #include "storage/hashable_string.h" #include "storage/memory_table.h" #define _AURORA_SSTABLE_PREFIX_ "sstable.data" #define _AURORA_SSTABLE_MERGE_SUFFIX_ ".merged" #define _AURORA_DATA_DIR_ "data" namespace RaftCore::Storage { using ::RaftCore::DataStructure::LockFreeHash; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::Storage::HashableString; class SSTAble final{ public: struct Meta { Meta(uint32_t a, uint16_t b, uint32_t c, uint16_t d,uint32_t e,uint64_t f); uint32_t m_key_offset; uint16_t m_key_len; uint32_t m_val_offset; uint16_t m_val_len; uint32_t m_term; uint64_t m_index; bool operator<(const Meta &other); }; typedef std::shared_ptr<Meta> TypePtrMeta; public: SSTAble(const char* file) noexcept; SSTAble(const SSTAble &from,const SSTAble &to) noexcept; SSTAble(const MemoryTable &src) noexcept; virtual ~SSTAble() noexcept; bool Read(const std::string &key, std::string &val) const noexcept; LogIdentifier GetMaxLogID() const noexcept; LogIdentifier GetMinLogID() const noexcept; const std::string& GetFilename() const noexcept; bool IterateByVal(std::function<bool(const Meta &meta,const HashableString &key)> op) const noexcept; private: void CreateFile(const char* file_name = nullptr) noexcept; void ParseFile() noexcept; void ParseMeta(unsigned char* &allocated_buf,std::size_t meta_len) noexcept; void DumpFrom(const MemoryTable &src) noexcept; void AppendKvPair(const TypePtrHashableString &key, const TypePtrHashValue &val, void* buf, uint32_t buff_len, uint32_t &buf_offset, uint32_t &file_offset) noexcept; void AppendMeta(const TypePtrHashableString &key, const TypePtrMeta &shp_meta, void* buf, uint32_t buff_len, uint32_t &buf_offset, uint32_t &file_offset) noexcept; void CalculateMetaOffset() noexcept; void AppendChecksum(uint32_t checksum) noexcept; void AppendMetaOffset() noexcept; void AppendFooter() noexcept; private: uint32_t m_record_crc = 0; uint32_t m_meta_crc = 0; long m_meta_offset = 0; typedef LockFreeHash<HashableString, Meta> TypeOffset; typedef std::shared_ptr<TypeOffset> TypePtrOffset; TypePtrOffset m_shp_meta; LogIdentifier m_max_log_id; LogIdentifier m_min_log_id; std::string m_associated_file = ""; std::FILE *m_file_handler = nullptr; static const int m_single_meta_len = _FOUR_BYTES_ * 3 + _TWO_BYTES_ * 2 + _EIGHT_BYTES_; private: SSTAble(const SSTAble&) = delete; SSTAble& operator=(const SSTAble&) = delete; }; } //end namespace #endif
3,671
C++
.h
83
39.722892
105
0.716671
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,307
storage_singleton.h
ppLorins_aurora/src/storage/storage_singleton.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_STORAGE_SINGLETON_H__ #define __AURORA_STORAGE_SINGLETON_H__ #include "storage/storage.h" namespace RaftCore::Storage { using ::RaftCore::Storage::StorageMgr; class StorageGlobal final{ public: static StorageMgr m_instance; private: StorageGlobal() = delete; virtual ~StorageGlobal() = delete; StorageGlobal(const StorageGlobal&) = delete; StorageGlobal& operator=(const StorageGlobal&) = delete; }; } //end namespace #endif
1,270
C++
.h
31
38.677419
73
0.748975
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,308
lock_free_hash_specific.h
ppLorins_aurora/src/tools/lock_free_hash_specific.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LOCK_FREE_HASH_SPECIFIC_H__ #define __AURORA_LOCK_FREE_HASH_SPECIFIC_H__ #include <thread> #include <atomic> #include "tools/lock_free_hash.h" namespace RaftCore::DataStructure { using ::RaftCore::DataStructure::HashNode; using ::RaftCore::DataStructure::LockFreeHash; using ::RaftCore::DataStructure::LockableNode; //Partial specification. template <typename T,typename R> class HashNodeAtomic final : public HashNode<T, R>, public LockableNode<void> { public: //using SpecifiedNode = HashNode<T, std::atomic<R*>>; HashNodeAtomic(const std::shared_ptr<T> &key,const std::shared_ptr<R> &val) noexcept; HashNodeAtomic* GetNext() const noexcept; void Update(R* val) noexcept; virtual void LockValue() noexcept override; virtual void UnLockValue() noexcept override; private: HashNodeAtomic(const HashNodeAtomic&) = delete; HashNodeAtomic& operator=(const HashNodeAtomic&) = delete; }; template <typename T,typename R> class LockFreeHashAtomic final : public LockFreeHash<T, R, HashNodeAtomic> { public: LockFreeHashAtomic(uint32_t slot_num=0)noexcept; bool Upsert(const T *key, R* p_avl) noexcept; private: LockFreeHashAtomic(const LockFreeHashAtomic&) = delete; LockFreeHashAtomic& operator=(const LockFreeHashAtomic&) = delete; }; } //end namespace #include "tools/lock_free_hash_specific.cc" #endif
2,179
C++
.h
50
41
89
0.760724
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,309
lock_free_hash.h
ppLorins_aurora/src/tools/lock_free_hash.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LOCK_FREE_HASH_H__ #define __AURORA_LOCK_FREE_HASH_H__ #include <memory> #include <list> #include <map> #include <type_traits> #include <functional> namespace RaftCore::DataStructure { template <typename T> class HashTypeBase { public: HashTypeBase() noexcept{} virtual bool operator<(const T&)const noexcept = 0; virtual bool operator==(const T&)const noexcept = 0; virtual const T& operator=(const T&)noexcept = 0; virtual std::size_t Hash() const noexcept = 0; virtual bool IsDeleted() const noexcept final; virtual void SetDeleted() const noexcept final; virtual void SetValid() const noexcept final; private: mutable bool m_deleted = false; }; template <typename T,typename R=void> class HashNode { public: HashNode(const std::shared_ptr<T> &key,const std::shared_ptr<R> &val) noexcept; virtual ~HashNode() noexcept; //virtual void Update(const std::shared_ptr<R> &val) noexcept; HashNode* GetNext() const noexcept; void SetNext(const HashNode<T,R> * const p_next) noexcept; std::shared_ptr<T> GetKey() const noexcept; std::size_t GetKeyHash() const noexcept; virtual std::shared_ptr<R> GetVal() const noexcept; void ModifyKey(std::function<void(std::shared_ptr<T>&)> op) noexcept; bool IsDeleted() const noexcept; void SetDeleted() const noexcept; void SetValid() const noexcept; void SetTag(uint32_t tag) noexcept; uint32_t GetTag() const noexcept; bool operator==(const HashNode<T,R>& one) const noexcept; bool operator==(const T& one) const noexcept; virtual void LockValue() noexcept; virtual void UnLockValue() noexcept; protected: //mutable std::mutex m_mutex; std::shared_ptr<T> m_shp_key; std::shared_ptr<R> m_shp_val; HashNode<T,R>* m_next = nullptr; uint32_t m_iterating_tag = 0; private: HashNode(const HashNode&) = delete; HashNode& operator=(const HashNode&) = delete; }; template <typename T,typename R=void, template<typename,typename> typename NodeType=HashNode> class LockFreeHash { public: typedef std::function<bool(const std::shared_ptr<R> &left, const std::shared_ptr<R> &right)> ValueComparator; LockFreeHash(uint32_t slot_num=0) noexcept; virtual ~LockFreeHash() noexcept; void Insert(const std::shared_ptr<T> &key, const std::shared_ptr<R> &val = nullptr, uint32_t tag = 0) noexcept; void Delete(const T &key) noexcept; bool Find(const T &key) const noexcept; /*Note: val pointer's ownership will be taken over. And the return value indicate whether the key pointer's ownership has been taken. */ //bool Upsert(const T *key, const std::shared_ptr<R> val = nullptr) noexcept; bool Read(const T &key, std::shared_ptr<R> &val) const noexcept; uint32_t Size() const noexcept; /*The GetOrderedBy* are time consuming operations when slots number is large, be sure not to invoke it in a real-time processing scenario.*/ void GetOrderedByKey(std::list<std::shared_ptr<T>> &_output) const noexcept; void GetOrderedByValue(std::map<std::shared_ptr<R>,std::shared_ptr<T>,ValueComparator> &_output) const noexcept; //Map the operator op to every element in the hash. void Map(std::function<void(std::shared_ptr<T>&)> op) noexcept; //Read only iterator. void Iterate(std::function<bool(const std::shared_ptr<T> &k,const std::shared_ptr<R> &v)> op) noexcept; bool CheckCond(std::function<bool(const T &key)> criteria) const noexcept; //Clear inserted elements but not the base structure nodes of the current hash. void Clear(bool destruct = false) noexcept; protected: uint32_t m_slots_mask = 0; std::atomic<NodeType<T,R>*> ** m_solts = nullptr; private: struct MyComparator { bool operator()(const std::shared_ptr<T> &a,const std::shared_ptr<T> &b) const{ return *a < *b; } }; struct MyEqualer { bool operator()(const std::shared_ptr<T> &a,const std::shared_ptr<T> &b) const{ return *a == *b; } }; struct MyHasher { std::size_t operator()(const std::shared_ptr<T> &a) const{ return a->Hash(); } }; uint32_t m_slots_num = 0; std::atomic<uint32_t> m_size; private: LockFreeHash(const LockFreeHash&) = delete; LockFreeHash& operator=(const LockFreeHash&) = delete; }; } //end namespace /*This is for separating template class member function definitions from its .h file into a corresponding .cc file: https://www.codeproject.com/Articles/48575/How-to-define-a-template-class-in-a-h-file-and-imp. */ #include "tools/lock_free_hash.cc" #endif
5,529
C++
.h
125
39.656
116
0.703627
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,310
trivial_lock_single_list.h
ppLorins_aurora/src/tools/trivial_lock_single_list.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_TRIVIAL_LOCK_ORDERED_SINGLE_LIST_H__ #define __AURORA_TRIVIAL_LOCK_ORDERED_SINGLE_LIST_H__ #include <memory> #include <atomic> #include <type_traits> #include <mutex> #include <functional> #include "glog/logging.h" #include "common/macro_manager.h" #include "tools/data_structure_base.h" #include "tools/trivial_lock_list_base.h" namespace RaftCore::DataStructure { template <typename T> class SingleListNode final : public OrderedTypeBase<SingleListNode<T>>, public LogicalDelete<void> { public: SingleListNode(const std::shared_ptr<T> &shp_val) noexcept; virtual ~SingleListNode() noexcept; virtual bool operator<(const SingleListNode& other)const noexcept override; virtual bool operator>(const SingleListNode& other)const noexcept override; virtual bool operator==(const SingleListNode& other)const noexcept override; static void Apply(SingleListNode<T>* phead, std::function<void(SingleListNode<T>*)> unary) noexcept; std::shared_ptr<T> m_val; std::atomic<SingleListNode<T>*> m_atomic_next; }; template <typename T> class TrivialLockSingleList final : public OperationTracker<SingleListNode<T>> { public: TrivialLockSingleList(const std::shared_ptr<T> &p_min, const std::shared_ptr<T> &p_max) noexcept; virtual ~TrivialLockSingleList() noexcept; void Insert(const std::shared_ptr<T> &p_one) noexcept; void Insert(SingleListNode<T>* new_node) noexcept; /*Note : Delete & CutHead are not intended to be invoked simultaneously. */ bool Delete(const std::shared_ptr<T> &p_one) noexcept; SingleListNode<T>* CutHead(std::function<bool(const T &one)> criteria) noexcept; SingleListNode<T>* CutHeadByValue(const T &val) noexcept; static void ReleaseCutHead(SingleListNode<T>* output_head) noexcept; //This method is not thread safe , but no way to call it simultaneously. void Clear() noexcept; SingleListNode<T>* SetEmpty() noexcept; void IterateCutHead(std::function<bool(std::shared_ptr<T> &)> accessor, SingleListNode<T>* output_head) const noexcept; void Iterate(std::function<bool(std::shared_ptr<T> &)> accessor) const noexcept; bool Empty() const noexcept; #ifdef _SINGLE_LIST_TEST_ int GetSize() const noexcept; SingleListNode<T>* GetHead() const noexcept; #endif private: void InsertTracker(SingleListNode<T>* new_node) noexcept; bool InsertRaw(SingleListNode<T>* new_node) noexcept; void SiftOutDeleted(SingleListNode<T>* &output_head) noexcept; SingleListNode<T>* m_head; //Used for indicating a cut head list. SingleListNode<T>* m_tail; std::recursive_mutex m_recursive_mutex; private: TrivialLockSingleList& operator=(const TrivialLockSingleList&) = delete; }; } //end namespace #include "tools/trivial_lock_single_list.cc" #endif
3,641
C++
.h
75
45.133333
123
0.749787
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,311
lock_free_deque.h
ppLorins_aurora/src/tools/lock_free_deque.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LOCK_FREE_DEQUE_H__ #define __AURORA_LOCK_FREE_DEQUE_H__ #include <memory> #include <atomic> #include <shared_mutex> #include "common/comm_defs.h" #include "tools/lock_free_unordered_single_list.h" namespace RaftCore::DataStructure { using ::RaftCore::DataStructure::AtomicPtrSingleListNode; using ::RaftCore::DataStructure::UnorderedSingleListNode; using ::RaftCore::DataStructure::LockFreeUnorderedSingleList; enum class EDequeNodeFlag { NORMAL = 0, FAKE_NODE, NO_COUNTING }; template <typename T> class DequeNode final { public: DequeNode() noexcept; //For dumb nodes DequeNode(const std::shared_ptr<T> &p_val) noexcept; virtual ~DequeNode() noexcept; std::atomic<DequeNode<T>*> m_atomic_next; std::shared_ptr<T> m_val; EDequeNodeFlag m_flag = EDequeNodeFlag::NORMAL; }; template <typename T> class LockFreeDeque final{ public: LockFreeDeque() noexcept; virtual ~LockFreeDeque() noexcept; void Push(const std::shared_ptr<T> &p_one, EDequeNodeFlag flag = EDequeNodeFlag::NORMAL) noexcept; std::shared_ptr<T> Pop() noexcept; #ifdef _DEQUE_TEST_ std::size_t GetSizeByIterating() const noexcept; std::size_t GetLogicalSize() const noexcept; std::size_t GetPhysicalSize() const noexcept; std::size_t Size() const noexcept; #endif static void GC() noexcept; private: DequeNode<T>* PopNode() noexcept; private: std::atomic<DequeNode<T>*> m_head; std::atomic<DequeNode<T>*> m_tail; DequeNode<T>* m_dummy = nullptr; #ifdef _DEQUE_TEST_ std::atomic<uint32_t> m_logical_size; std::atomic<uint32_t> m_physical_size; #endif static LockFreeUnorderedSingleList<DequeNode<T>> m_garbage; private: LockFreeDeque& operator=(const LockFreeDeque&) = delete; }; } //end namespace #include "tools/lock_free_deque.cc" #endif
2,658
C++
.h
68
36.088235
102
0.739592
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,312
lock_free_unordered_single_list.h
ppLorins_aurora/src/tools/lock_free_unordered_single_list.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LOCK_FREE_UNORDERED_SINGLE_LIST_H__ #define __AURORA_LOCK_FREE_UNORDERED_SINGLE_LIST_H__ #include <atomic> #include <functional> #include "common/macro_manager.h" namespace RaftCore::DataStructure { template <typename T> class UnorderedSingleListNode final{ public: template<typename ...Args> UnorderedSingleListNode(Args&&... args) noexcept; virtual ~UnorderedSingleListNode()noexcept; explicit UnorderedSingleListNode(T* p_src)noexcept; T* m_data; std::atomic<UnorderedSingleListNode<T>*> m_next; }; template<typename T> using AtomicPtrSingleListNode = std::atomic<UnorderedSingleListNode<T>*>; template <typename T> class LockFreeUnorderedSingleList final{ public: LockFreeUnorderedSingleList() noexcept; virtual ~LockFreeUnorderedSingleList() noexcept; void SetDeleter(std::function<void(T*)> deleter)noexcept; //Will take the ownership of 'src'. void PushFront(T* src) noexcept; void PurgeSingleList(uint32_t retain_num) noexcept; #ifdef _UNORDERED_SINGLE_LIST_TEST_ uint32_t Size() noexcept; void Iterate(std::function<void(T*)> func) noexcept; #endif private: std::atomic<UnorderedSingleListNode<T>*> m_head; std::function<void(T*)> m_deleter = [](T* data) { delete data; }; private: LockFreeUnorderedSingleList(const LockFreeUnorderedSingleList&) = delete; LockFreeUnorderedSingleList& operator=(const LockFreeUnorderedSingleList&) = delete; }; } //end namespace #include "tools/lock_free_unordered_single_list.cc" #endif
2,350
C++
.h
56
39.053571
88
0.757857
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,313
lock_free_queue.h
ppLorins_aurora/src/tools/lock_free_queue.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_TRIVIAL_LOCK_QUEUE_H__ #define __AURORA_TRIVIAL_LOCK_QUEUE_H__ #include <memory> #include <atomic> #include <functional> namespace RaftCore::DataStructure { enum class SlotState { /*--------------Node State--------------*/ SLOT_EMPTY, SLOT_PRODUCING, SLOT_PRODUCED, SLOT_CONSUMING }; template <typename T> class QueueNode final{ public: QueueNode() noexcept; virtual ~QueueNode() noexcept; std::shared_ptr<T> m_val; std::atomic<SlotState> m_state; inline static const char* MacroToString(SlotState enum_val) { return m_status_macro_names[int(enum_val)]; } private: static const char* m_status_macro_names[]; }; /*Note : LockFreeQueueBase is a wrapper aimed at eliminating specifying the template parameters needed by the invokers when they call LockFreeQueue methods. */ class LockFreeQueueBase { public: LockFreeQueueBase(){} virtual ~LockFreeQueueBase(){} virtual int Push(void* ptr_shp_element) noexcept = 0; virtual int PopConsume() noexcept = 0; virtual uint32_t GetSize() const noexcept = 0; virtual uint32_t GetCapacity() const noexcept = 0; virtual bool Empty() const noexcept = 0; }; //The following is a ring-buf supported multi-thread producing and multi-thread consuming template <typename T> class LockFreeQueue final : public LockFreeQueueBase { public: typedef std::function<bool(std::shared_ptr<T> &ptr_element)> TypeCallBackFunc; LockFreeQueue() noexcept; void Initilize(TypeCallBackFunc fn_cb,int queue_size) noexcept; virtual ~LockFreeQueue() noexcept; virtual int Push(void* ptr_shp_element) noexcept override; virtual int PopConsume() noexcept override; //Get a snapshot size. virtual uint32_t GetSize() const noexcept override; //For gtest usage. virtual uint32_t GetCapacity() const noexcept override; virtual bool Empty() const noexcept override; private: int Pop(std::shared_ptr<T> &ptr_element) noexcept; private: //Position where holds the latest produced element. std::atomic<uint32_t> m_head; //Position which just before the earliest produced element.If empty (m_head == m_tail). std::atomic<uint32_t> m_tail; /*Note :In the current design, there will always be at least one slot empty , to simplify the implementation. */ QueueNode<T> *m_data = nullptr; //Data ring buffer. TypeCallBackFunc m_fn_cb; uint32_t m_element_size = 0; uint32_t m_element_mask = 0; private: LockFreeQueue& operator=(const LockFreeQueue&) = delete; LockFreeQueue(const LockFreeQueue&) = delete; }; } //end namespace #include "tools/lock_free_queue.cc" #endif
3,523
C++
.h
88
36.329545
118
0.727138
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,314
utilities.h
ppLorins_aurora/src/tools/utilities.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __AURORA_UTILITIES_H__ #define __AURORA_UTILITIES_H__ #include <stdint.h> #include <list> #include <set> #include <string> #include <chrono> #include <cassert> #include <ctime> #include <random> #include "common/log_identifier.h" #include "boost/crc.hpp" namespace RaftCore::Tools { using ::RaftCore::Common::LogIdentifier; typedef std::chrono::time_point<std::chrono::steady_clock> TypeTimePoint; typedef std::chrono::time_point<std::chrono::system_clock> TypeSysTimePoint; //Note: The suffix letter of 'x' is to avoid name conflict under darwin. enum class LocalEndian{ UKNOWN_X, BIG_ENDIAN_X, LITTLE_ENDIAN_X }; inline bool LocalBigEndian() { static LocalEndian g_is_local_big_endian = LocalEndian::UKNOWN_X; if (g_is_local_big_endian != LocalEndian::UKNOWN_X) return (g_is_local_big_endian == LocalEndian::BIG_ENDIAN_X); uint32_t uTest = 0x12345678; unsigned char* pTest = (unsigned char*)&uTest; g_is_local_big_endian = LocalEndian::LITTLE_ENDIAN_X; if ((*pTest) == 0x12) { g_is_local_big_endian = LocalEndian::BIG_ENDIAN_X; return true; } return false; } template<typename _type> inline void ConvertToBigEndian(_type input, _type *output) { //Note:"input" is a copied. assert(output != nullptr && output != &input); if (LocalBigEndian()) { *output = input; return; } unsigned char* pCur = (unsigned char*)&input; unsigned char* pTarget = (unsigned char*)output; int iter_cnt = sizeof(_type) - 1; for (int i = 0; i <= iter_cnt; ++i) pTarget[iter_cnt-i] = pCur[i]; } template<typename _type> inline void ConvertBigEndianToLocal(_type input, _type *output) { ConvertToBigEndian(input, output); } void GetLocalIPs(std::list<std::string>& ips); uint32_t RoundUp(uint32_t num); uint32_t GetMask(uint32_t num); inline uint32_t CalculateCRC32(const void* data, unsigned int len) { boost::crc_32_type crc_result; crc_result.process_bytes(data,len); return crc_result.checksum(); } TypeTimePoint StartTimeing(); uint64_t EndTiming(const TypeTimePoint &tp_start, const char* operation_name, const LogIdentifier *p_cur_id = nullptr); void StringSplit(const std::string &input, char delimiter, std::set<std::string> &output); void StringSplit(const std::string &input, char delimiter, std::list<std::string> &output); std::string TimePointToString(const TypeSysTimePoint &tp); uint32_t GenerateRandom(uint32_t from, uint32_t to); template<typename T> inline uint32_t SizeOfX() noexcept { return sizeof(T); } template<> inline uint32_t SizeOfX<void>() noexcept { return 0; } } #endif
3,436
C++
.h
87
36.494253
119
0.72395
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,315
trivial_lock_double_list.h
ppLorins_aurora/src/tools/trivial_lock_double_list.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_TRIVIAL_LOCK_LIST_H__ #define __AURORA_TRIVIAL_LOCK_LIST_H__ #include <memory> #include <atomic> #include <type_traits> #include <mutex> #include <functional> #include "glog/logging.h" #include "tools/data_structure_base.h" #include "tools/trivial_lock_list_base.h" namespace RaftCore::DataStructure { template <typename T> class DoubleListNode final : public OrderedTypeBase<DoubleListNode<T>>, public LogicalDelete<void> { public: DoubleListNode(const std::shared_ptr<T> &p_val) noexcept; virtual ~DoubleListNode() noexcept; virtual bool operator<(const DoubleListNode& other)const noexcept override; virtual bool operator>(const DoubleListNode& other)const noexcept override; virtual bool operator==(const DoubleListNode& other)const noexcept override; //There are several lock-free operations base on std::atomic::CAS std::atomic<DoubleListNode<T>*> m_atomic_pre; std::atomic<DoubleListNode<T>*> m_atomic_next; std::shared_ptr<T> m_val; static void Apply(DoubleListNode<T>* phead, std::function<void(DoubleListNode<T>*)> unary) noexcept; }; template <typename T> class TrivialLockDoubleList final : OperationTracker<DoubleListNode<T>> { public: TrivialLockDoubleList(const std::shared_ptr<T> &p_min, const std::shared_ptr<T> &p_max) noexcept; virtual ~TrivialLockDoubleList() noexcept; void Insert(const std::shared_ptr<T> &p_one) noexcept; void Insert(DoubleListNode<T>* new_node) noexcept; /*Note : Delete & CutHead are not intended to be invoked simultaneously. */ bool Delete(const std::shared_ptr<T> &p_one) noexcept; void DeleteAll() noexcept; //1. Each pair of the adjacent elements satisfy criteria: cut them all. //2. otherwise, cut the satisfied elements. DoubleListNode<T>* CutHead(std::function<bool(const T &left, const T &right)> criteria) noexcept; DoubleListNode<T>* CutHead(std::function<bool(const T &one)> criteria) noexcept; DoubleListNode<T>* CutHeadByValue(const T &val) noexcept; static void ReleaseCutHead(DoubleListNode<T>* output_head) noexcept; //This method is not thread safe , but no way to call it simultaneously. void Clear() noexcept; void IterateCutHead(std::function<bool(T &)> accessor, DoubleListNode<T>* output_head) const noexcept; void Iterate(std::function<bool(T &)> accessor) const noexcept; bool Empty() const noexcept; #ifdef _TRIIAL_DOUBLE_LIST_TEST_ int GetSize() const noexcept; DoubleListNode<T>* GetHead() const noexcept; #endif private: void InsertTracker(DoubleListNode<T>* new_node) noexcept; /* Self-purging redundant */ bool InsertRaw(DoubleListNode<T>* new_node) noexcept; DoubleListNode<T>* FindNextNonDelete(DoubleListNode<T>* p_cur) noexcept; DoubleListNode<T>* ExpandForward(DoubleListNode<T>* p_cur) noexcept; DoubleListNode<T>* ExpandBackward(DoubleListNode<T>* p_cur) noexcept; bool MoveForward(DoubleListNode<T>* &p_pre,DoubleListNode<T>* &p_next) noexcept; void SiftOutDeleted(DoubleListNode<T>* &output_head) noexcept; DoubleListNode<T>* m_head = nullptr; DoubleListNode<T>* m_tail = nullptr; std::recursive_mutex m_recursive_mutex; private: TrivialLockDoubleList& operator=(const TrivialLockDoubleList&) = delete; }; } //end namespace #include "tools/trivial_lock_double_list.cc" #endif
4,196
C++
.h
83
46.879518
106
0.747228
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,316
lock_free_priority_queue.h
ppLorins_aurora/src/tools/lock_free_priority_queue.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_LOCK_FREE_PRIORITY_QUEUE_H__ #define __AURORA_LOCK_FREE_PRIORITY_QUEUE_H__ #include <memory> #include <map> #include <thread> #include <chrono> #include <set> #include "tools/lock_free_queue.h" namespace RaftCore::DataStructure { class LockFreePriotityQueue final{ public: enum class TaskType { /*The sequence of declarations also defines the priority from highest to lowest.*/ CLIENT_REACTING = 0, //Enum value also indicated the index in the task array RESYNC_DATA, RESYNC_LOG, }; struct Task { Task(TaskType x, LockFreeQueueBase *y) noexcept; Task(const Task& one) noexcept; void operator=(Task& one) noexcept; virtual ~Task(); bool operator<(const Task& _other); TaskType m_task_type; std::unique_ptr<LockFreeQueueBase> m_pc_queue; }; public: LockFreePriotityQueue() noexcept; virtual ~LockFreePriotityQueue() noexcept; void Initialize(int consumer_threads_num) noexcept; void UnInitialize() noexcept; /*Note: 1.AddTask is not thread safe, only invoke this method during server initialization. 2.Order of calling AddTask should be the same with the order of _task_type parameter defined in the 'TaskType'.This constrain is not reasonable and should be optimized off in the future. */ void AddTask(TaskType _task_type, LockFreeQueueBase* _queue) noexcept; /*_shp_element: pointer of std::shared_ptr<> pointing to the element to be inserted. The shared_ptr object's ownership is guaranteed to be increased. */ int Push(TaskType _task_type,void* _shp_element) noexcept; void Launch() noexcept; uint32_t GetSize() const noexcept; private: void Stop() noexcept; void ThreadEntrance() noexcept; private: std::condition_variable m_cv; std::mutex m_cv_mutex; int m_consumer_thread_num=0; std::atomic<int> m_running_thread_num; volatile bool m_stop = false; std::map<uint32_t,Task> m_task_queue; private: LockFreePriotityQueue& operator=(const LockFreePriotityQueue&) = delete; LockFreePriotityQueue(const LockFreePriotityQueue&) = delete; }; } //end namespace #endif
3,132
C++
.h
71
39.450704
111
0.69705
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,317
trivial_lock_list_base.h
ppLorins_aurora/src/tools/trivial_lock_list_base.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_TRIVIAL_LOCK_LIST_BASE_H__ #define __AURORA_TRIVIAL_LOCK_LIST_BASE_H__ #include <thread> #include <atomic> #include "tools/data_structure_base.h" #include "tools/lock_free_hash_specific.h" namespace RaftCore::DataStructure { using ::RaftCore::DataStructure::HashTypeBase; using ::RaftCore::DataStructure::HashNode; using ::RaftCore::DataStructure::LockFreeHashAtomic; //The template is an wrapper for compile compatibility. template<typename T=void> class ThreadIDWrapper final : public HashTypeBase<ThreadIDWrapper<T>> { public: ThreadIDWrapper(std::thread::id tid)noexcept; virtual ~ThreadIDWrapper()noexcept; virtual bool operator<(const ThreadIDWrapper&)const noexcept override; virtual bool operator==(const ThreadIDWrapper&)const noexcept override; virtual const ThreadIDWrapper& operator=(const ThreadIDWrapper&other)noexcept override { this->m_tid = other.m_tid; return *this; } virtual std::size_t Hash() const noexcept override; std::thread::id GetTid() const noexcept; private: std::thread::id m_tid; private: ThreadIDWrapper(const ThreadIDWrapper&) = delete; }; template<typename T> class OperationTracker { public: OperationTracker()noexcept; virtual ~OperationTracker()noexcept; protected: void WaitForListClean(T* output_head) noexcept; protected: LockFreeHashAtomic<ThreadIDWrapper<void>, T> *m_p_insert_footprint = nullptr; private: OperationTracker(const OperationTracker&) = delete; OperationTracker& operator=(const OperationTracker&) = delete; }; } //end namespace #include "tools/trivial_lock_list_base.cc" #endif
2,462
C++
.h
61
37.393443
90
0.762368
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,318
data_structure_base.h
ppLorins_aurora/src/tools/data_structure_base.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_DATA_STRUCTURE_COMMON_H__ #define __AURORA_DATA_STRUCTURE_COMMON_H__ namespace RaftCore::DataStructure { template <typename T> class OrderedTypeBase { public: OrderedTypeBase() noexcept; virtual ~OrderedTypeBase() noexcept; //The element of this list should be able to be compared with each other. virtual bool operator<(const T&)const noexcept = 0; virtual bool operator>(const T&)const noexcept = 0; virtual bool operator==(const T&)const noexcept = 0; virtual bool operator!=(const T&_other)const noexcept; //Should be non-final, providing a way for the subclass to override. virtual bool operator<=(const T& _other)const noexcept; virtual bool operator>=(const T& _other)const noexcept; //virtual std::string PrintMe() const noexcept { return ""; } }; //Here template is just a padding. template <typename T=void> class LogicalDelete { public: LogicalDelete() noexcept; virtual ~LogicalDelete() noexcept; virtual bool IsDeleted() const noexcept final; virtual void SetDeleted() noexcept final; private: bool m_deleted = false; }; //Here template is just a padding. template <typename T=void> class LockableNode { public: LockableNode() noexcept; virtual ~LockableNode() noexcept; protected: void SpinLock() noexcept; void SpinUnLock() noexcept; private: std::atomic_flag m_spin_lock; }; } //end namespace #include "tools/data_structure_base.cc" #endif
2,281
C++
.h
59
35.59322
77
0.743132
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,319
timer.h
ppLorins_aurora/src/tools/timer.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __AURORA_TIMER_H__ #define __AURORA_TIMER_H__ #include <stdint.h> #include <queue> #include <functional> #include <shared_mutex> namespace RaftCore::Timer { class GlobalTimer { public: static void Initialize() noexcept; static void UnInitialize() noexcept; static void AddTask(uint32_t interval_ms,std::function<bool()> processor) noexcept; private: static void Stop() noexcept; static void ThreadEntrance() noexcept; private: struct Task { Task(int x, std::function<bool()> y) : m_interval_ms(x),m_processor(y) {}; Task(uint64_t a, int x, std::function<bool()> y) : m_next_run(a), m_interval_ms(x),m_processor(y) {}; Task(uint64_t a, int x) : m_next_run(a), m_interval_ms(x) {}; void operator=(const Task &one); uint64_t m_next_run; int m_interval_ms; std::function<bool()> m_processor; }; struct TaskCmp { bool operator()(const Task &x,const Task &y); }; enum class ETimerThreadState {INITIALIZED,RUNNING,STOPPING,STOPPED}; static std::priority_queue<Task,std::deque<Task>,TaskCmp> m_heap; static std::shared_timed_mutex m_share_timed_mutex; static volatile ETimerThreadState m_thread_state; private: GlobalTimer() = delete; virtual ~GlobalTimer() noexcept = delete; GlobalTimer(const GlobalTimer&) = delete; GlobalTimer& operator=(const GlobalTimer&) = delete; }; } #endif
2,383
C++
.h
54
37.907407
113
0.656209
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,320
election.h
ppLorins_aurora/src/election/election.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_ELECTION_H__ #define __AURORA_ELECTION_H__ #include <memory> #include <shared_mutex> #include <map> #include <set> #include <thread> #include "protocol/raft.pb.h" #include "common/comm_defs.h" #include "common/log_identifier.h" #include "tools/utilities.h" #include "state/state_mgr.h" #include "member/member_manager.h" #include "topology/topology_mgr.h" #define _AURORA_ELECTION_CONFIG_FILE_ "election.config" namespace RaftCore::Election { using ::RaftCore::Common::VoteType; using ::RaftCore::Common::TwoPhaseCommitBatchTask; using ::RaftCore::Common::LogIdentifier; using ::RaftCore::State::RaftRole; using ::RaftCore::Topology; using ::RaftCore::Member::MemberMgr; using ::raft::VoteRequest; class ElectionMgr { public: static void Initialize() noexcept; static void UnInitialize() noexcept; static std::string TryVote(uint32_t term,const std::string &addr)noexcept; static void AddVotingTerm(uint32_t term,const std::string &addr) noexcept; static void ElectionThread()noexcept; static void NotifyNewLeaderEvent(uint32_t term, const std::string addr)noexcept; //SwitchRole is not idempotent. static void SwitchRole(RaftRole target_role, const std::string &new_leader = "") noexcept; static void CallBack(const ::grpc::Status &status, const ::raft::VoteResponse& rsp, VoteType vote_type,uint32_t idx) noexcept; #ifdef _ELECTION_TEST_ static void WaitElectionThread()noexcept; #endif public: /*To make election process simple & clear & non error prone, avoiding multiple thread operations as much as possible. */ static std::shared_timed_mutex m_election_mutex; //Persistent state on all servers: static std::atomic<uint32_t> m_cur_term; //current term //This is a special variable passing through the server-wide lives. static volatile bool m_leader_debut; static LogIdentifier m_pre_term_lrl; private: static void RenameBinlogNames(RaftRole old_role, RaftRole target_role) noexcept; static void CandidateRoutine()noexcept; static void LoadFile()noexcept; static void SaveFile() noexcept; static void Reset() noexcept; static bool BroadcastVoting(std::shared_ptr<VoteRequest> shp_req, const Topology &topo, VoteType vote_type) noexcept; //Return: if a higher term found during the increasing process. static bool IncreaseToMaxterm()noexcept; static void PollingCQ(std::shared_ptr<::grpc::CompletionQueue> shp_cq,int entrust_num)noexcept; static void PreparePrevoteTask(const Topology &topo)noexcept; static void SentNonOP(const std::string &tag) noexcept; private: static MemberMgr::JointSummary m_joint_snapshot; static std::map<uint32_t, std::string> m_voted; static std::shared_timed_mutex m_voted_mutex; static std::map<uint32_t, std::set<std::string>> m_known_voting; static std::shared_timed_mutex m_known_voting_mutex; static std::thread *m_p_thread; #ifdef _ELECTION_TEST_ static volatile bool m_candidate_routine_running; #endif struct NewLeaderEvent { std::string m_new_leader_addr; uint32_t m_new_leader_term; //POT type,thread safe. volatile bool m_notify_flag; }; static NewLeaderEvent m_new_leader_event; static uint32_t m_cur_cluster_size; static uint32_t m_cur_cluster_vote_counter; static uint32_t m_new_cluster_vote_counter; static TwoPhaseCommitBatchTask<std::string> m_phaseI_task; static TwoPhaseCommitBatchTask<std::string> m_phaseII_task; private: ElectionMgr() = delete; virtual ~ElectionMgr() = delete; ElectionMgr(const ElectionMgr&) = delete; ElectionMgr& operator=(const ElectionMgr&) = delete; }; } //end namespace #endif
4,616
C++
.h
104
40.519231
130
0.738534
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,321
memory_log_base.h
ppLorins_aurora/src/common/memory_log_base.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef _AURORA_MEMORY_LOG_BASE_H_ #define _AURORA_MEMORY_LOG_BASE_H_ #include <ostream> #include "protocol/raft.pb.h" namespace RaftCore::Common { class MemoryLogItemBase { public: MemoryLogItemBase(uint32_t _term, uint64_t _index)noexcept; MemoryLogItemBase(const ::raft::Entity &_entity)noexcept; virtual ~MemoryLogItemBase()noexcept; bool operator<(const MemoryLogItemBase &_other)const noexcept; bool operator>(const MemoryLogItemBase &_other)const noexcept; virtual bool operator==(const MemoryLogItemBase& _other)const noexcept; virtual bool operator!=(const MemoryLogItemBase& _other)const noexcept; bool AfterOf(const MemoryLogItemBase& _other)const noexcept; std::shared_ptr<::raft::Entity> GetEntity()const noexcept; protected: //Prevent the base class from being instantiated virtual void NotImplemented() noexcept = 0 ; protected: //Note: doesn't take the ownership of the original object std::shared_ptr<::raft::Entity> m_entity; }; bool CmpMemoryLog(const MemoryLogItemBase *left, const MemoryLogItemBase *right) noexcept; } #endif
1,912
C++
.h
41
43.731707
90
0.760976
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,322
macro_manager.h
ppLorins_aurora/src/common/macro_manager.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef _AURORA_MACRO_MGR_H_ #define _AURORA_MACRO_MGR_H_ #ifdef _RAFT_UNIT_TEST_ #define _DEQUE_TEST_ #define _SVC_WRITE_TEST_ #define _SVC_APPEND_ENTRIES_TEST_ #define _COMMON_VIEW_TEST_ #define _LEADER_VIEW_TEST_ #define _FOLLOWER_VIEW_TEST_ #define _MEMBER_MANAGEMENT_TEST_ #define _UNORDERED_SINGLE_LIST_TEST_ #define _SINGLE_LIST_TEST_ #define _ELECTION_TEST_ #define _TRIIAL_DOUBLE_LIST_TEST_ #define _STORAGE_TEST_ #define _GLOBAL_TEST_ #endif #endif
1,252
C++
.h
33
36.636364
73
0.757651
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,323
comm_defs.h
ppLorins_aurora/src/common/comm_defs.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #pragma warning( disable : 4290 ) #ifndef _AURORA_COMM_DEFS_H_ #define _AURORA_COMM_DEFS_H_ #include <ostream> #include <shared_mutex> #include <list> #include <vector> #include <tuple> #include "gflags/gflags.h" #include "glog/logging.h" #include "protocol/raft.pb.h" #include "common/macro_manager.h" namespace RaftCore::Common { #define _TWO_BYTES_ (2) #define _FOUR_BYTES_ (4) #define _EIGHT_BYTES_ (8) #define _MAX_UINT16_ (0xFFFF) #define _MAX_UINT32_ (0xFFFFFFFF) #define _MAX_INT32_ (0x7FFFFFFF) #define _MAX_UINT64_ (0xFFFFFFFFFFFFFFFF) #define _RING_BUF_EMPTY_POS_ (-1) #define _RING_BUF_INVALID_POS_ (-2) #define _ROLE_STR_LEADER_ "leader" #define _ROLE_STR_FOLLOWER_ "follower" #define _ROLE_STR_CANDIDATE_ "candidate" #define _ROLE_STR_UNKNOWN_ "unknown" #define _ROLE_STR_TEST_ "test" #define _AURORA_LOCAL_IP_ "127.0.0.1" #ifdef _SVC_WRITE_TEST_ #define _WRITE_VAL_TS_ "write_val_ts_" #endif typedef std::shared_lock<std::shared_timed_mutex> SharedLock; typedef std::unique_lock<std::shared_timed_mutex> UniqueLock; typedef SharedLock ReadLock; typedef UniqueLock WriteLock; typedef std::list<std::shared_ptr<::raft::Entity>> TypeEntityList; typedef std::tuple<unsigned char*,int> TypeBufferInfo; enum class FinishStatus { NEGATIVE_FINISHED = 0, UNFINISHED, POSITIVE_FINISHED }; enum class PhaseID { PhaseI = 0, PhaseII }; enum class VoteType { PreVote = 0, Vote }; bool EntityIDSmaller(const ::raft::EntityID &left, const ::raft::EntityID &right); bool EntityIDEqual(const ::raft::EntityID &left, const ::raft::EntityID &right); bool EntityIDSmallerEqual(const ::raft::EntityID &left, const ::raft::EntityID &right); template<typename T> struct TwoPhaseCommitBatchTask { std::vector<T> m_todo; std::vector<uint32_t> m_flags; }; } /*Note:Additional definitions in other namespace of this project.These definitions may not be suitable to be located in their original namespace since otherwise will cause header files recursively including issues.*/ namespace RaftCore::Member { enum class EJointStatus { STABLE=0, JOINT_CONSENSUS }; enum class JointConsensusMask { IN_OLD_CLUSTER = 1, IN_NEW_CLUSTER = 2, }; } #endif
3,042
C++
.h
74
39.189189
112
0.736215
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,324
request_base.h
ppLorins_aurora/src/common/request_base.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_REQUEST_BASE_H__ #define __AURORA_REQUEST_BASE_H__ #include <memory> #include "protocol/raft.grpc.pb.h" #include "protocol/raft.pb.h" #include "common/react_base.h" using ::raft::RaftService; using ::grpc::ServerCompletionQueue; namespace RaftCore::Common { using ::RaftCore::Common::ReactBase; template<typename T=void> class RequestBase : public ReactBase { public: RequestBase()noexcept; void Initialize(std::shared_ptr<RaftService::AsyncService> shp_svc, std::shared_ptr<ServerCompletionQueue> &shp_notify_cq, std::shared_ptr<ServerCompletionQueue> &shp_call_cq)noexcept; virtual ~RequestBase()noexcept; virtual ::grpc::Status Process() noexcept = 0; protected: //Server context cannot be reused across rpcs. ::grpc::ServerContext m_server_context; std::shared_ptr<RaftService::AsyncService> m_async_service; std::shared_ptr<ServerCompletionQueue> m_server_notify_cq; std::shared_ptr<ServerCompletionQueue> m_server_call_cq; private: RequestBase(const RequestBase&) = delete; RequestBase& operator=(const RequestBase&) = delete; }; template<typename T,typename R> class RequestTpl : public RequestBase<void> { public: RequestTpl()noexcept; virtual ~RequestTpl()noexcept; protected: T m_request; R m_response; private: RequestTpl(const RequestTpl&) = delete; RequestTpl& operator=(const RequestTpl&) = delete; }; template<typename T,typename R,typename Q> class UnaryRequest : public RequestTpl<T,R> { public: UnaryRequest()noexcept; virtual ~UnaryRequest()noexcept; protected: virtual void React(bool cq_result) noexcept override; protected: ::grpc::ServerAsyncResponseWriter<R> m_responder; enum class ProcessStage { CREATE = 0, FINISH }; ProcessStage m_stage; private: UnaryRequest(const UnaryRequest&) = delete; UnaryRequest& operator=(const UnaryRequest&) = delete; }; template<typename T,typename R,typename Q> class BidirectionalRequest : public RequestTpl<T,R> { public: BidirectionalRequest()noexcept; virtual ~BidirectionalRequest()noexcept; protected: virtual void React(bool cq_result) noexcept override; const char* GetStageName()const noexcept; protected: ::grpc::ServerAsyncReaderWriter<R,T> m_reader_writer; enum class ProcessStage { READ = 0, WRITE, CONNECT, DONE, FINISH }; static const char* m_status_macro_names[]; ProcessStage m_stage; private: std::mutex m_mutex; private: BidirectionalRequest(const BidirectionalRequest&) = delete; BidirectionalRequest& operator=(const BidirectionalRequest&) = delete; }; } //end namespace #include "common/request_base.cc" #endif
3,560
C++
.h
93
34.827957
74
0.745513
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,325
error_code.h
ppLorins_aurora/src/common/error_code.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_ERROR_CODE_H__ #define __AURORA_ERROR_CODE_H__ /*--------------Produce & consume operation return values--------------*/ #define QUEUE_ERROR (-1) #define QUEUE_SUCC (0) #define QUEUE_FULL (1) #define QUEUE_EMPTY (2) /*--------------storage error codes--------------*/ #define SUCC (0) #define LEFT_BEHIND (-1) #endif
1,196
C++
.h
26
44.615385
73
0.662931
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,326
react_base.h
ppLorins_aurora/src/common/react_base.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_REACT_BASE_H__ #define __AURORA_REACT_BASE_H__ #include <memory> #include <thread> #include <functional> #include "grpc++/completion_queue.h" namespace RaftCore::Common { struct ReactInfo { ReactInfo()noexcept; void Set(bool cq_result, void* tag)noexcept; ReactInfo(const ReactInfo &other)noexcept; bool m_cq_result = false; void* m_tag; }; typedef std::function<void(ReactInfo)> TypeReactorFunc; //An empty wrapper for all the subclasses which need to implement 'React' method. class ReactBase { public: ReactBase()noexcept; virtual ~ReactBase()noexcept; virtual void React(bool cq_result = true) noexcept = 0; static void GeneralReacting(const ReactInfo &info)noexcept; private: ReactBase(const ReactBase&) = delete; ReactBase& operator=(const ReactBase&) = delete; }; } //end namespace #endif
1,671
C++
.h
43
36.255814
81
0.743606
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,327
react_group.h
ppLorins_aurora/src/common/react_group.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_REACT_GROUP_H__ #define __AURORA_REACT_GROUP_H__ #include <memory> #include <thread> #include <functional> #include "grpc++/completion_queue.h" #include "common/react_base.h" namespace RaftCore::Common { using ::grpc::CompletionQueue; using ::grpc::ServerCompletionQueue; template<typename T=ServerCompletionQueue> using TypePtrCQ = std::shared_ptr<T>; //On a one CQ <---> multiple threads basis. template<typename T=ServerCompletionQueue> class ReactWorkGroup { public: enum class CQType { ServerCQ = 2, GENERAL_CQ }; public: ReactWorkGroup(TypePtrCQ<T> shp_cq, TypeReactorFunc reactor, int therad_num)noexcept; virtual ~ReactWorkGroup(); void StartPolling() noexcept; void WaitPolling() noexcept; TypePtrCQ<T> GetCQ() noexcept; void ShutDownCQ() noexcept; void GetThreadId(std::vector<std::thread::id> &ids) noexcept; private: void GrpcPollingThread() noexcept; TypePtrCQ<T> m_shp_cq; std::vector<std::thread*> m_vec_threads; TypeReactorFunc m_reactor; int m_polling_threads_num = 0; }; } //end namespace #include "common/react_group.cc" #endif
1,943
C++
.h
50
36.2
89
0.74275
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,328
log_identifier.h
ppLorins_aurora/src/common/log_identifier.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef _AURORA_LOG_INDENTIFIER_H_ #define _AURORA_LOG_INDENTIFIER_H_ #include <ostream> #include "protocol/raft.pb.h" namespace RaftCore::Common { /*This struct is used for being the instance of tempalte class std::atomic where we cannot use ::raft::EntityID directly ,beacuse it is not TRIVIALLY COPYABLE. */ struct LogIdentifier { /* Can't give a user-defined constructor for this struct , otherwise the compiler would complain 'the default constructor of "std::atomic<LogIdentifier>" cannot be referenced -- it is a deleted function', even though the LogIdentifier struct itself is TRIVIALLY COPABLE . This is a compiler (Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24215.1 for x86) issue, ,by contrast, there is no such problem under clang : Apple LLVM version 7.0.2 (clang-700.1.81). */ uint32_t m_term = 0; //Election term uint64_t m_index = 0; //The index under current election term //The followings mean to simulate copy-constructor,a work around of the problem described above. void Set(const LogIdentifier &_other)noexcept; void Set(uint32_t term, uint64_t index)noexcept; uint32_t GreaterThan(const LogIdentifier& _other) const noexcept; bool operator==(const LogIdentifier& _other) const noexcept; bool operator!=(const LogIdentifier& _other) const noexcept; bool operator< (const LogIdentifier &_other) const noexcept; bool operator<= (const LogIdentifier &_other) const noexcept; bool operator> (const LogIdentifier &_other) const noexcept; bool operator>= (const LogIdentifier &_other) const noexcept; std::string ToString() const noexcept; }; std::ostream& operator<<(std::ostream& os, const LogIdentifier& obj); LogIdentifier ConvertID(const ::raft::EntityID &entity_id); bool EntityIDEqual(const ::raft::EntityID &left, const LogIdentifier &right); bool EntityIDLarger(const ::raft::EntityID &left, const LogIdentifier &right); bool EntityIDLargerEqual(const ::raft::EntityID &left, const LogIdentifier &right); bool EntityIDSmaller(const ::raft::EntityID &left, const LogIdentifier &right); } #endif
2,937
C++
.h
50
55.48
165
0.750525
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
1,534,329
comm_view.h
ppLorins_aurora/src/common/comm_view.h
/* * <Aurora. A raft based distributed KV storage system.> * Copyright (C) <2019> <arthur> <pplorins@gmail.com> * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #ifndef __AURORA_COMM_VIEW_H__ #define __AURORA_COMM_VIEW_H__ #include "common/comm_defs.h" #include "common/log_identifier.h" #include "config/config.h" #include "tools/lock_free_deque.h" #include "tools/lock_free_unordered_single_list.h" #include "tools/lock_free_priority_queue.h" #include "tools/timer.h" namespace RaftCore::Common { using ::RaftCore::DataStructure::LockFreeUnorderedSingleList; using ::RaftCore::DataStructure::LockFreePriotityQueue; using ::RaftCore::Timer::GlobalTimer; using ::RaftCore::Common::LogIdentifier; class CommonView { #ifdef _COMMON_VIEW_TEST_ public: #else protected: #endif static void Initialize() noexcept; static void UnInitialize() noexcept; public: static int m_cpu_cores; static LockFreePriotityQueue m_priority_queue; //TODO: find why m_garbage can't be instantiated. //template<typename T> //static LockFreeDeque<DoubleListNode<T>> m_garbage; //This is the running flag for leader&follower routine threads. static volatile bool m_running_flag; static LogIdentifier m_zero_log_id; static LogIdentifier m_max_log_id; protected: template<template<typename> typename W, template<typename> typename N,typename T> static void InstallGC(LockFreeUnorderedSingleList<N<T>> *p_ref_garbage) noexcept { p_ref_garbage->SetDeleter(W<T>::ReleaseCutHead); auto _pending_list_gc = [p_ref_garbage]()->bool { p_ref_garbage->PurgeSingleList(::RaftCore::Config::FLAGS_retain_num_unordered_single_list); return true; }; GlobalTimer::AddTask(::RaftCore::Config::FLAGS_gc_interval_ms ,_pending_list_gc); } static std::vector<std::thread*> m_vec_routine; private: CommonView() = delete; virtual ~CommonView() = delete; CommonView(const CommonView&) = delete; CommonView& operator=(const CommonView&) = delete; }; //template<typename T> //LockFreeDeque<DoubleListNode<T>> CommonView::m_garbage; } //end namespace #endif
2,830
C++
.h
68
38.205882
103
0.728938
ppLorins/aurora
37
4
0
GPL-3.0
9/20/2024, 10:44:10 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false