repo_name
stringlengths
5
122
path
stringlengths
3
232
text
stringlengths
6
1.05M
ZengYao0206/GraphicsTest
GraphicsTest/CircleViewController.h
<reponame>ZengYao0206/GraphicsTest<gh_stars>0 // // CircleViewController.h // GraphicsTest // // Created by ZengYao on 16/7/27. // Copyright © 2016年 zengyao. All rights reserved. // #import <UIKit/UIKit.h> @interface CircleViewController : UIViewController @end
ZengYao0206/GraphicsTest
GraphicsTest/UIColor+YW.h
<reponame>ZengYao0206/GraphicsTest<filename>GraphicsTest/UIColor+YW.h // // UIColor+Common.h // HappyShare // // Created by <NAME> on 13-3-14. // Copyright (c) 2013年 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> @interface UIColor (YW) /** * @brief 获取颜色对象 * * @param string 颜色描述字符串,带“#”开头 * * @return 颜色对象 */ + (UIColor *)colorWithString:(NSString *)string; /** * @brief 获取颜色对象 * * @param float R G B A * * @return 颜色对象 */ + (UIColor *)colorWithR:(float)r G:(float)g B:(float)b A:(float)a; + (UIColor *)colorWithHex:(NSString *)hex; @end
ZengYao0206/GraphicsTest
GraphicsTest/GraphicViewController.h
// // GraphicViewController.h // GraphicsTest // // Created by yiyaowang on 15/10/20. // Copyright © 2015年 zengyao. All rights reserved. // #import <UIKit/UIKit.h> #import "OBAWeekHistogramView.h" #import "OBHistogramView.h" #import "OBDrawThreeFoldView.h" #import "OBCircleProgressView.h" @interface GraphicViewController : UIViewController<UIScrollViewDelegate> { UIScrollView* mainScrollView; OBAWeekHistogramView* zhuView; //柱形图 OBDrawThreeFoldView* zheView; //折线图 OBCircleProgressView* quView; //曲线图 } @property (nonatomic,copy)NSString* which; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBDataFormator.h
// // OBDataFormator.h // Oband // // Created by Oband on 14-7-30. // Copyright (c) 2014年 Oband Tech. All rights reserved. // #import <Foundation/Foundation.h> @interface OBDataFormator : NSObject +(NSString*) stringDate:(NSDate *) date format:(NSString *) format; +(NSDate *) datepartWithoutTime:(NSDate *)date; +(NSDate*) dateFromString:(NSString *) dateText format:(NSString *) format; +(NSInteger)WhatWeekWithDate:(NSDate*)date; +(NSInteger)WhatYearWithDate:(NSDate*)date; +(NSDate*)whatMonthDateWithDate:(NSDate*)date; +(NSMutableArray*)getAMonthSAllDayWithDate:(NSDate*)date; +(NSMutableArray*)getAmonthDates:(NSDate*)date; +(NSMutableArray*)getAMonthSAllDayDate:(NSDate*)date; //获取当天所在的星期的所有日期,format:@"yyyy-MM-dd" +(NSMutableArray*)getAweekDayDateWithDate:(NSDate*)date howDay:(int)howDay; //获取当天所在的星期的所有日期,format:@"dd" +(NSMutableArray*)getAweekDayWithDate:(NSDate*)date howDay:(int)howDay; +(NSDate*)DateFromString:(NSString*) format StrDate:(NSString*)strDate; +(NSString *)convertToJSONString:(id)object; +(NSTimeInterval)getIntervaltimeChange; @end
ZengYao0206/GraphicsTest
GraphicsTest/PageViewController.h
// // PageViewController.h // GraphicsTest // // Created by yiyaowang on 15/10/20. // Copyright © 2015年 zengyao. All rights reserved. // #import <UIKit/UIKit.h> #import "RootViewController.h" @interface PageViewController : UIViewController<UIScrollViewDelegate> { UIScrollView* pageScrollview; UIPageControl* pageControl; UIButton* button; } @end
ZengYao0206/GraphicsTest
GraphicsTest/OBAWeekHistogramView.h
// // OBAWeekHistogramView.h // Oband // // Created by oband on 14-8-1. // Copyright (c) 2014年 Oband Tech. All rights reserved. // #import <UIKit/UIKit.h> #import "OBHistogramView.h" @interface OBAWeekHistogramView : UIView @property (nonatomic,strong) UILabel *title; @property (nonatomic,strong) UILabel *note; @property (nonatomic,strong) OBHistogramView *mHistogramView; @property (nonatomic,strong) NSArray *arrayStrX; @property (nonatomic,strong) NSArray *arrayStrY; //@property (nonatomic,strong) NSDictionary *dataDiction; //为了解决日期排序 @property (nonatomic,strong) NSMutableArray *dataOfDateArray; @property (nonatomic,strong) NSMutableArray *dateArray; -(void)updataToHistogram; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBCircleProgressView.h
<filename>GraphicsTest/OBCircleProgressView.h // // OBCircleProgressView.h // Oband // // Created by oband on 14-7-31. // Copyright (c) 2014年 Oband Tech. All rights reserved. // #import <UIKit/UIKit.h> @interface OBCircleProgressView : UIView //标题-总摄入卡路里 @property (nonatomic,retain) UILabel *titleLable; //摄入能量总量 @property (nonatomic,retain)UILabel *totalIntakeLable; @property (nonatomic,setter = setUnitString:) NSString *unitString; @property(nonatomic,getter=isHidden,setter = setHidden:) BOOL hidden; @property(nonatomic,readonly,getter = getPercentage) float percentage; //圆圈 @property (nonatomic,retain) UIImageView *imageView; //是否显示百分比还是显示数字 YES就显示百分比 NO显示数字 @property (nonatomic,assign) BOOL isShowPercent; //headView的数据模型 //圆圈线的宽度 @property (nonatomic,assign) float lineWith; //每一圈的最大值默认是500 @property (nonatomic,assign) float maxValuePerCircle; //开始动画,动画时间 -(void)beginAnimationWithDuration:(float)time; //停止动画 -(void)StopAnimation; @property (nonatomic,assign) float progessValue; //背景圆的颜色 @property (nonatomic,setter = setbgArcColor:) UIColor *bgArcColor; //进度条的颜色 @property (nonatomic,strong) UIColor *progressColor; //超过最大值的颜色 @property (nonatomic,strong) UIColor *MaxColor; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBPNBar.h
<gh_stars>0 // // PNBar.h // PNChartDemo // // Created by yiyaowang on 10/19/15. // Copyright (c) 2015年 zengyao. All rights reserved. // #import <UIKit/UIKit.h> #import <QuartzCore/QuartzCore.h> @interface OBPNBar : UIView @property (nonatomic) float grade; @property (nonatomic,strong) CAShapeLayer * chartLine; @property (atomic, retain) UIColor * barColor; -(void)setGradeNOAnimation:(float)grade; @end
ZengYao0206/GraphicsTest
GraphicsTest/InvestCircleView.h
<gh_stars>0 // // InvestCircleView.h // Phyt // // Created by RiQiangWang on 2016/7/26. // Copyright © 2016年 Shipeng. All rights reserved. // #import <UIKit/UIKit.h> @interface InvestCircleView : UIView @property (nonatomic, assign) CGFloat progress; @property (nonatomic, assign) CGFloat returnRate; @property (nonatomic,assign) float maxValuePerCircle; @property (nonatomic,assign) float progessValue; //圆圈线的宽度 @property (nonatomic,assign) float lineWith; //背景圆的颜色 @property (nonatomic,setter = setbgArcColor:) UIColor *bgArcColor; //进度条的颜色 @property (nonatomic,strong) UIColor *progressColor; //超过最大值的颜色 @property (nonatomic,strong) UIColor *MaxColor; //开始动画,动画时间 - (void)beginAnimationWithDuration:(float)time; //停止动画 -(void)StopAnimation; @end
ZengYao0206/GraphicsTest
GraphicsTest/RootViewController.h
// // RootViewController.h // GraphicsTest // // Created by yiyaowang on 15/10/20. // Copyright © 2015年 zengyao. All rights reserved. // #import <UIKit/UIKit.h> @interface RootViewController : UIViewController<UITableViewDataSource,UITableViewDelegate> @end
ZengYao0206/GraphicsTest
GraphicsTest/OBPNColor.h
// // PNColor.h // PNChart // // Created by kevin on 13-6-8. // Copyright (c) 2013年 kevinzhow. All rights reserved. // #import <UIKit/UIKit.h> #import <Foundation/Foundation.h> /* * System Versioning Preprocessor Macros */ #define SCREEN_WIDTH ([UIScreen mainScreen].bounds.size.width) #define PNGrey [UIColor colorWithRed:246.0/255.0 green:246.0/255.0 blue:246.0/255.0 alpha:1.0f] #define PNLightBlue [UIColor colorWithRed:94.0/255.0 green:147.0/255.0 blue:196.0/255.0 alpha:1.0f] #define PNGreen [UIColor colorWithRed:77.0/255.0 green:186.0/255.0 blue:122.0/255.0 alpha:1.0f] #define PNTitleColor [UIColor colorWithRed:0.0/255.0 green:189.0/255.0 blue:113.0/255.0 alpha:1.0f] #define PNButtonGrey [UIColor colorWithRed:141.0/255.0 green:141.0/255.0 blue:141.0/255.0 alpha:1.0f] #define PNFreshGreen [UIColor colorWithRed:77.0/255.0 green:196.0/255.0 blue:122.0/255.0 alpha:1.0f] #define PNRed [UIColor colorWithRed:245.0/255.0 green:94.0/255.0 blue:78.0/255.0 alpha:1.0f] #define PNMauve [UIColor colorWithRed:88.0/255.0 green:75.0/255.0 blue:103.0/255.0 alpha:1.0f] #define PNBrown [UIColor colorWithRed:119.0/255.0 green:107.0/255.0 blue:95.0/255.0 alpha:1.0f] #define PNBlue [UIColor colorWithRed:82.0/255.0 green:116.0/255.0 blue:188.0/255.0 alpha:1.0f] #define PNDarkBlue [UIColor colorWithRed:121.0/255.0 green:134.0/255.0 blue:142.0/255.0 alpha:1.0f] #define PNYellow [UIColor colorWithRed:242.0/255.0 green:197.0/255.0 blue:117.0/255.0 alpha:1.0f] #define PNWhite [UIColor colorWithRed:255.0/255.0 green:255.0/255.0 blue:255.0/255.0 alpha:1.0f] #define PNDeepGrey [UIColor colorWithRed:99.0/255.0 green:99.0/255.0 blue:99.0/255.0 alpha:1.0f] #define PNPinkGrey [UIColor colorWithRed:200.0/255.0 green:193.0/255.0 blue:193.0/255.0 alpha:1.0f] #define PNHealYellow [UIColor colorWithRed:245.0/255.0 green:242.0/255.0 blue:238.0/255.0 alpha:1.0f] #define PNLightGrey [UIColor colorWithRed:225.0/255.0 green:225.0/255.0 blue:225.0/255.0 alpha:1.0f] #define PNCleanGrey [UIColor colorWithRed:251.0/255.0 green:251.0/255.0 blue:251.0/255.0 alpha:1.0f] #define PNLightYellow [UIColor colorWithRed:241.0/255.0 green:240.0/255.0 blue:240.0/255.0 alpha:1.0f] #define PNDarkYellow [UIColor colorWithRed:152.0/255.0 green:150.0/255.0 blue:159.0/255.0 alpha:1.0f] #define PNPinkDark [UIColor colorWithRed:170.0/255.0 green:165.0/255.0 blue:165.0/255.0 alpha:1.0f] #define PNCloudWhite [UIColor colorWithRed:244.0/255.0 green:244.0/255.0 blue:244.0/255.0 alpha:1.0f] #define PNBlack [UIColor colorWithRed:45.0/255.0 green:45.0/255.0 blue:45.0/255.0 alpha:1.0f] #define PNStarYellow [UIColor colorWithRed:252.0/255.0 green:223.0/255.0 blue:101.0/255.0 alpha:1.0f] #define PNTwitterColor [UIColor colorWithRed:0.0/255.0 green:171.0/255.0 blue:243.0/255.0 alpha:1.0] #define PNWeiboColor [UIColor colorWithRed:250.0/255.0 green:0.0/255.0 blue:33.0/255.0 alpha:1.0] #define PNiOSGreenColor [UIColor colorWithRed:98.0/255.0 green:247.0/255.0 blue:77.0/255.0 alpha:1.0] @interface OBPNColor : NSObject - (UIImage *)imageFromColor:(UIColor *)color; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBHistogramView.h
// // OBHistogramView.h // Oband // // Created by oband on 14-8-1. // Copyright (c) 2014年 Oband Tech. All rights reserved. // #import <UIKit/UIKit.h> @interface OBHistogramView : UIView @property (nonatomic,assign) int numberDay; @property (nonatomic,strong) UIColor *highColor; @property (nonatomic,strong) UIColor *lowColor; @property (nonatomic,assign) int maxValue; @property (nonatomic,assign) int minValue; //0-1 @property (nonatomic,assign) float spacingRatio; @property (nonatomic,assign) float marginLeft; //array 里面是float型数据 -(void)showHistogramByData:(NSArray*)array; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBDrawThreeFoldView.h
<reponame>ZengYao0206/GraphicsTest // // DrawThreeFoldView.h // 绘图测试 // // Created by apple on 14-5-8. // Copyright (c) 2014年 apple. All rights reserved. //--------画三条折线的View----------------- //NSArray *arr = @[@"8000",@"8000",@"7000",@"2500",@"6000",@"3500",@"8000",@"8000",@"7000",@"2500",@"6000",@"3500",@"8000",@"8000",@"7000",@"2500",@"6000",@"3500",@"8000",@"8000",@"7000",@"2500",@"6000",@"3500",@"8000",@"8000",@"7000",@"2500",@"6000",@"3500",@"8000",@"6000"]; // //NSArray *arr1 = @[@"4",@"5",@"7",@"2",@"4",@"6",@"8",@"2",@"4",@"5",@"3",@"7",@"2",@"1",@"6",@"8",@"5",@"7",@"2",@"4",@"6",@"8",@"2",@"4",@"5",@"3",@"7",@"7",@"2",@"4",@"6",@"4"]; // //NSArray *arr2 = @[@"3000",@"2500",@"6000",@"2500",@"3500",@"2500",@"6000",@"3500",@"2500",@"1000",@"2500",@"6000",@"3500",@"2500",@"6000",@"3500",@"2500",@"1000",@"2500",@"6000",@"3500",@"2500",@"6000",@"3500",@"2500",@"1000",@"6000",@"3500",@"2500",@"6000",@"3500",@"2500"]; //NSArray *arr3 = @[@"1",@"2",@"3",@"4",@"5",@"6",@"7",@"8",@"9",@"10",@"11",@"12",@"13",@"14",@"15",@"16",@"17",@"18",@"19",@"20",@"21",@"22",@"23",@"24",@"25",@"26",@"27",@"28",@"29",@"30",@"31",@"1"]; // // // // ////周的数据 //self.weekData = [NSMutableDictionary dictionary]; // //NSRange range = NSMakeRange(arr.count - 8, 8); // //[self.weekData setObject:[arr subarrayWithRange:range] forKey:@"move"]; //[self.weekData setObject:[arr1 subarrayWithRange:range] forKey:@"sleep"]; //[self.weekData setObject:[arr2 subarrayWithRange:range] forKey:@"diet"]; //[self.weekData setObject:[arr3 subarrayWithRange:range] forKey:@"date"]; // // ////月的的数据 //self.mouthData = [NSMutableDictionary dictionary]; // //[self.mouthData setObject:arr forKey:@"move"]; //[self.mouthData setObject:arr1 forKey:@"sleep"]; //[self.mouthData setObject:arr2 forKey:@"diet"]; //[self.mouthData setObject:arr3 forKey:@"date"]; // // // ////默认现在月的 //DrawThreeFoldView *draw = [[DrawThreeFoldView alloc] initWithFrame:CGRectMake(0, 0, 320, 250)]; //draw.moves = self.mouthData[@"move"]; //draw.sleeps = self.mouthData[@"sleep"]; //draw.diets = self.mouthData[@"diet"]; //draw.dates = self.mouthData[@"date"]; // //draw.delegate = self; // //[self.view addSubview:draw]; // //} // //#pragma mark - 点击切换月 和 周的 图 // //-(void)drawThreeFoldViewTouchBeganDrawThreeFoldView:(DrawThreeFoldView *)drawThreeFoldView event:(UIEvent *)event //{ // if (drawThreeFoldView.tag == 0) { //显示周 // drawThreeFoldView.tag = 1; // // drawThreeFoldView.moves = self.weekData[@"move"]; // drawThreeFoldView.sleeps = self.weekData[@"sleep"]; // drawThreeFoldView.diets = self.weekData[@"diet"]; // drawThreeFoldView.dates = self.weekData[@"date"]; // // }else{ //显示月 // drawThreeFoldView.tag = 0; // // drawThreeFoldView.moves = self.mouthData[@"move"]; // drawThreeFoldView.sleeps = self.mouthData[@"sleep"]; // drawThreeFoldView.diets = self.mouthData[@"diet"]; // drawThreeFoldView.dates = self.mouthData[@"date"]; // // } // // NSLog(@"ThreeFoldView 被点了 %d",drawThreeFoldView.tag); //} #import <UIKit/UIKit.h> @class OBDrawThreeFoldView; @protocol DrawThreeFoldViewDelegate <NSObject> //页面被点击了切换每周走势和每月走势 -(void)drawThreeFoldViewTouchBeganDrawThreeFoldView:(OBDrawThreeFoldView *)drawThreeFoldView event:(UIEvent *)event; @end @interface OBDrawThreeFoldView : UIView @property (nonatomic,assign) id<DrawThreeFoldViewDelegate>delegate; /** 设置数据的时候要多传一天的 如要显示每周的走势个个数组要传 ----- 8天的 **** 第一条数据是用来确定起点坐标的 如要显示每月的的走势个个数组要传-----31的 */ //运动数据 @property (nonatomic, retain) NSArray *moves; //睡眠数据 @property (nonatomic, retain) NSArray *sleeps; //饮食数据 @property (nonatomic, retain) NSArray *diets; //日期 @property (nonatomic, retain) NSArray *dates; @end
ZengYao0206/GraphicsTest
GraphicsTest/OBDrawLineChartView.h
<filename>GraphicsTest/OBDrawLineChartView.h // // DrawLineChartView.h // 绘图测试 // // Created by apple on 14-5-8. // Copyright (c) 2014年 apple. All rights reserved. //-------画一条折线的View #import <UIKit/UIKit.h> @interface OBDrawLineChartView : UIView //需要画的数值 @property (nonatomic, retain) NSArray *dataArr; //得多给一天的数据 //是画什么数据 0 - 代表运动 1 - 代表睡眠 2 - 代表饮食 @property (nonatomic, assign) NSInteger index; @end
JamesCreaton/BasicGeneticAlgorithm
project2D/GA.h
#pragma once #include "Population.h" #include <random> #define CROSSOVER_RATE 0.7 #define MUTATION_RATE 0.1 #define POP_SIZE 10 #define MAX_ALLOWABLE_GENERATIONS 65 #define CHROMO_LENGTH 2 #define GENE_LENGTH 6 class GA { public: GA(b2Vec2 Target); ~GA(); void CalculateSuccess(Person* a_person); struct Chromosone { std::string m_bits; }; struct Genome { std::vector<Chromosone> m_genome; }; void TestGeneration(); void NewGeneration(); void UpdatePopulation(float dt); void DrawPopulation(aie::Renderer2D* renderer); void DrawGUI(aie::Renderer2D* renderer, aie::Font* a_font); Genome GenerateRandomGenome(); std::vector<Person*>* GetPeople() { return m_population->GetPeople(); } protected: Population* m_population; private: std::string GenRandomBits(int length); void SortPeople(); void MutateBottom50(std::vector<Person*>* a_people); void BreedPopulation(std::vector<Person*>* a_people); Person * GetWeightedRandomPerson(std::vector<Person*> a_people); void CalculateGenerationFitness(Population* a_population); void MutateGenome1(Person & a_person); void MutateGenome2(Person & a_person); void MutateGenome3(Person & a_person); void MutateGenome4(Person & a_person); int m_currentGeneration; bool m_simulationStarted; b2Vec2 m_target; std::vector<Person*> m_top50; std::vector<Person*> m_bottom50; int m_highestFitness; int m_totalFitness; bool m_solutionFound; std::default_random_engine m_randEngine; };
JamesCreaton/BasicGeneticAlgorithm
project2D/Box.h
<filename>project2D/Box.h #pragma once #include <box2d.h> #include <Renderer2D.h> #include <glm\glm.hpp> class Box { public: Box(); ~Box(); void init(b2World* world, const glm::vec2& position, const glm::vec2& dimensions); void Draw(aie::Renderer2D* renderer); b2Body* GetBody() { return m_body; } b2Fixture* GetFixture() { return m_fixture; } private: b2Body* m_body = nullptr; b2Fixture* m_fixture = nullptr; glm::vec2 m_dimensions; };
JamesCreaton/BasicGeneticAlgorithm
project2D/Application2D.h
#pragma once #include "Application.h" #include <glm\glm.hpp> #include <glm\ext.hpp> #include "Box.h" #include "Ground.h" #include <Renderer2D.h> #include <vector> #include "GA.h" #include "Person.h" #include "Sphere.h" #include "Target.h" #include <Font.h> using glm::vec2; using glm::vec3; using glm::vec4; class Application2D : public aie::Application { public: Application2D(); virtual ~Application2D(); virtual bool startup(); virtual void shutdown(); virtual void update(float deltaTime); virtual void draw(); private: GA* m_ga; aie::Renderer2D* m_2dRenderer; aie::Font* m_font; std::unique_ptr<b2World> m_world; b2Vec2 m_gravity; Box* m_target; float m_cameraX, m_cameraY; };
JamesCreaton/BasicGeneticAlgorithm
project2D/FooDraw.h
#pragma once #include <box2d.h> class FooDraw : public b2Draw { public: FooDraw(); ~FooDraw(); void DrawPolygon(const b2Vec2* vertices, int32 vertexCount, const b2Color& color) {} void DrawSolidPolygon(const b2Vec2* vertices, int32 vertexCount, const b2Color& color) {} void DrawCircle(const b2Vec2& center, float32 radius, const b2Color& color) {} void DrawSolidCircle(const b2Vec2& center, float32 radius, const b2Vec2& axis, const b2Color& color) {} void DrawSegment(const b2Vec2& p1, const b2Vec2& p2, const b2Color& color) {} void DrawTransform(const b2Transform& xf) {} };
JamesCreaton/BasicGeneticAlgorithm
project2D/Rigidbody.h
#pragma once #include "PhysicsObject.h" class Rigidbody : public PhysicsObject { public: Rigidbody(); Rigidbody(ShapeType shapeID, glm::vec2 position, glm::vec2 velocity, float rotation, float mass); ~Rigidbody(); virtual bool fixedUpdate(glm::vec2 gravity, float timeStep); virtual void debug(); void applyForce(glm::vec2 force); void applyForceToActor(Rigidbody* actor2, glm::vec2 force); glm::vec2 getPosition() { return m_position; } float getRotation() { return m_rotation; } glm::vec2 getVelocity() { return m_velocity; } float getMass() { return m_mass; } protected: glm::vec2 m_position; glm::vec2 m_velocity; float m_mass; float m_rotation; //2D so we only need a single float to represent our rotation };
JamesCreaton/BasicGeneticAlgorithm
project2D/PhysicsObject.h
#pragma once #include <glm\glm.hpp> using glm::vec2; using glm::vec3; enum ShapeType { PLANE = 0, SPHERE = 1, BOX = 2, }; class PhysicsObject { protected: PhysicsObject(ShapeType a_shapeID) : m_shapeID(a_shapeID) {} PhysicsObject(); ~PhysicsObject(); public: virtual bool fixedUpdate(glm::vec2 gravity, float timeStep) = 0; virtual void debug() = 0; virtual void makeGizmo() = 0; virtual void resetPosition() {}; protected: ShapeType m_shapeID; };
JamesCreaton/BasicGeneticAlgorithm
project2D/Population.h
<reponame>JamesCreaton/BasicGeneticAlgorithm #pragma once #include <vector> #include <list> #include "Person.h" class Population { public: Population(); Population(int populationSize); ~Population(); void GenerateNewPopulation(); std::vector<Person*>* GetPeople(); //std::list<Person*> GetPeople(); protected: std::vector<Person*> m_people; //std::list<Person*> m_people; };
JamesCreaton/BasicGeneticAlgorithm
project2D/Person.h
<gh_stars>0 #pragma once #include <vector> #include <Box2D.h> #include <Renderer2D.h> #include <glm/glm.hpp> #include <glm\ext.hpp> class Person { public: Person(); ~Person(); void init(b2World* world, const glm::vec2& position, const glm::vec2& dimensions); void Update(float dt); void Draw(aie::Renderer2D* renderer); //Setters void SetInstructionSet(std::string instructions); void SetInstructionSetAtIndex(std::string instructions, int index); void ClearInstructionSet(); void SetFitness(float a_fitness); void SetProbability(float a_probablity); void SetCurrentMove(int a_currentMove) { m_currentMove = a_currentMove; } //Getters b2World* GetWorld() { return m_world; }; float GetFitness(); b2Body* GetBody() { return m_body; } b2Fixture* GetFixture() { return m_fixture; } std::vector<std::string> GetInstructionSet(); float GetProbability(); int GetCurrentMove() { return m_currentMove; } protected: std::vector<std::string> m_instructions; float m_fitness; private: b2World* m_world; b2Body* m_body = nullptr; b2Fixture* m_fixture = nullptr; glm::vec2 m_dimensions; int BinToDec(std::string bits); void MoveAgent(int move); float m_probabilityOfBreeding; //Testing int m_currentMove; };
victorhydecode/WeightedRandom
WeightedRandom/WeightedRandom.h
<filename>WeightedRandom/WeightedRandom.h<gh_stars>1-10 // // WeightedRandom.h // WeightedRandom // // Created by <NAME> on 17/01/2018. // Copyright © 2018 <NAME>. All rights reserved. // #import <Foundation/Foundation.h> //! Project version number for WeightedRandom. FOUNDATION_EXPORT double WeightedRandomVersionNumber; //! Project version string for WeightedRandom. FOUNDATION_EXPORT const unsigned char WeightedRandomVersionString[]; // In this header, you should import all the public headers of your framework using statements like #import <WeightedRandom/PublicHeader.h>
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/FindLines.h
<gh_stars>0 #ifndef tp_image_utils_functions_FindLines_h #define tp_image_utils_functions_FindLines_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/Point.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## struct FindLines { //################################################################################################ static std::vector<std::vector<tp_image_utils::Point>> findLines(const tp_image_utils::ByteMap& source, size_t minPoints=40, size_t maxDeviation=10); //################################################################################################ static std::vector<std::vector<tp_image_utils::Point>> findPolylines(const tp_image_utils::ByteMap& source, size_t minPoints=40, size_t maxDeviation=10, size_t maxJointDistance = 100); //################################################################################################ //! This returns a list of closed shapes /*! This is very similar to findPolylines, the only differences are that this will only return closed shapes, and the returned shape will have one fewer poins. A polyline has 1 point for each line plus an extra point at the end. A polygon does not need this as the last point is the same as the first. \param source - The binary image to detect the shapes in. \param minPoints - The minimum number of points to consider a line. \param maxDeviation - The max deviation from the line for a point to be considered to be part of that line. \param maxJointDistance - The max distance between line ends for them to be joined. \return A list of polygons. */ static std::vector<std::vector<tp_image_utils::Point>> findPolygons(const tp_image_utils::ByteMap& source, size_t minPoints=40, size_t maxDeviation=10, size_t maxJointDistance = 100); //################################################################################################ //! This returns a list of 4 sided closed shapes /*! This calls findPolygons and then only returns the 4 sided shapes, it also updates the point type to be PointTypeRectCorner. \param source - The binary image to detect the shapes in. \param minPoints - The minimum number of points to consider a line. \param maxDeviation - The max deviation from the line for a point to be considered to be part of that line. \param maxJointDistance - The max distance between line ends for them to be joined. \return A list of quadrilaterals. */ static std::vector<std::vector<tp_image_utils::Point>> findQuadrilaterals(const tp_image_utils::ByteMap& source, size_t minPoints=40, size_t maxDeviation=10, size_t maxJointDistance = 100); }; } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/DrawMask.h
<gh_stars>0 #ifndef tp_image_utils_functions_DrawMask_h #define tp_image_utils_functions_DrawMask_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ColorMap.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## void drawMask(tp_image_utils::ColorMap& image, TPPixel color, const tp_image_utils::ByteMap& mask, uint8_t maskValue); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/SignedDistanceField.h
#ifndef tp_image_utils_functions_SignedDistanceField_h #define tp_image_utils_functions_SignedDistanceField_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## //! Generate a signed distance field image /*! This will generate a signed distance filed image the generated image will be grey with the red, green, and blue channels each set to the same SDF value. The alpha channel will be set to 255. \param src - The source image only the red channel will be used, values >0 will be cosidered white. \param radius - This controls the radius at which the generated value will saturate. \return The generated signed distance field. */ tp_image_utils::ColorMap signedDistanceField(const tp_image_utils::ColorMap& src, int radius); //################################################################################################## //! Generate a signed distance field image /*! This will generate a signed distance filed image the generated image will be grey with the red, green, and blue channels each set to the same SDF value. The alpha channel will be set to 255. \param src - The source image only the red channel will be used, values >0 will be cosidered white. \param radius - This controls the radius at which the generated value will saturate. \param width - The width of the destination image, may be less than the source width. \param height - The height of the destination image, may be less than the source height. \return The generated signed distance field. */ tp_image_utils::ColorMap signedDistanceField(const tp_image_utils::ColorMap& src, int radius, int width, int height); //################################################################################################## tp_image_utils::ByteMap signedDistanceField(const tp_image_utils::ByteMap& src, int radius); //################################################################################################## tp_image_utils::ByteMap distanceField(const tp_image_utils::ByteMap& src, int radius); //################################################################################################## tp_image_utils::ByteMap signedDistanceField(const tp_image_utils::ByteMap& src, int radius, int width, int height); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ConvolutionMatrix.h
#ifndef tp_image_utils_functions_ConvolutionMatrix_h #define tp_image_utils_functions_ConvolutionMatrix_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## class ConvolutionMatrix { public: //################################################################################################ ConvolutionMatrix(); //################################################################################################ ConvolutionMatrix(const std::string& text); //################################################################################################ ConvolutionMatrix(const std::vector<double>& matrixData, size_t width, size_t height); //################################################################################################ [[nodiscard]]size_t width()const; //################################################################################################ [[nodiscard]]size_t height()const; //################################################################################################ const std::vector<double>& matrixData()const; //################################################################################################ void setMatrixData(const std::vector<double>& matrixData, size_t width, size_t height); //################################################################################################ [[nodiscard]]std::string toString()const; //################################################################################################ void loadString(const std::string& text); //################################################################################################ [[nodiscard]]tp_image_utils::ColorMap convolve(const tp_image_utils::ColorMap& src)const; //################################################################################################ //! Create a 3x3 identity matrix void makeIdentity(); //################################################################################################ void makeBlur(); private: std::vector<double> m_matrixData; size_t m_width{0}; size_t m_height{0}; }; //################################################################################################## //! Apply a convolution matrix to the image /*! Apply a convolution matrix to the image and return the result. \note the width and height should be odd numbers larger than one. \param src - The source image. \param matrixData - The matrix organised as rows. \param width - The number of columns in the matrix. \param height - The number of rows in the matrix. \return The image with the convolution matrix applied. */ tp_image_utils::ColorMap convolutionMatrix(const tp_image_utils::ColorMap& src, const std::vector<double>& matrixData, size_t width, size_t height); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/Bitwise.h
#ifndef tp_image_utils_functions_Bitwise_h #define tp_image_utils_functions_Bitwise_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## enum LogicOp { LogicOpFalse = 0, LogicOpNOR = 1, LogicOpConverseNonimplication = 2, LogicOpNegationP = 3, LogicOpMaterialNonimplication = 4, LogicOpNegationQ = 5, LogicOpXOR = 6, LogicOpNAND = 7, LogicOpAND = 8, LogicOpXNOR = 9, LogicOpQ = 10, LogicOpMaterialImplication = 11, LogicOpP = 12, LogicOpConverseImplication = 13, LogicOpOR = 14, LogicOpTrue = 15 }; //################################################################################################## const char* logicOpToString(LogicOp operation); //################################################################################################## LogicOp logicOpFromString(const std::string& operation); //################################################################################################## std::vector<std::string> logicalOps(); //################################################################################################## tp_image_utils::ByteMap bitwise(const tp_image_utils::ByteMap& p, const tp_image_utils::ByteMap& q, LogicOp operation); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/Globals.h
<reponame>omi-lab/tp_image_utils_functions #ifndef tp_image_utils_functions_Globals_h #define tp_image_utils_functions_Globals_h #include "tp_utils/Globals.h" //################################################################################################## //! A module of image manipulation functions. namespace tp_image_utils_functions { } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/NoiseField.h
<reponame>omi-lab/tp_image_utils_functions<gh_stars>0 #ifndef tp_image_utils_functions_EdgeDetect_h #define tp_image_utils_functions_EdgeDetect_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## tp_image_utils::ByteMap noiseField(const tp_image_utils::ByteMap& src, int radius); //################################################################################################## tp_image_utils::ByteMap noiseFieldGrid(const tp_image_utils::ByteMap& src, int cellSize); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/AlignImages.h
#ifndef tp_image_utils_functions_AlignImages_h #define tp_image_utils_functions_AlignImages_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" #include "tp_image_utils/Point.h" namespace tp_image_utils_functions { //################################################################################################## struct AlignImages { struct Rect { int x; int y; int w; int h; Rect(int x_=0, int y_=0, int w_=0, int h_=0): x(x_), y(y_), w(w_), h(h_){} Rect intersected(const Rect& other); }; struct SkewedRegion { //! The coordinates in the reference image (the ref always remains square) Rect referenceRect; //! The 4 sided region in the other image that maps onto the referenceRect (possibly skewed) std::vector<tp_image_utils::Point> otherRegion; }; //################################################################################################ //! Tries to aligin two images /*! This takes two single channel images and tries to align them so the absolute subtraction of the two images produces the lowest result. */ static std::pair<size_t, size_t> calculateMicroAlignment(const tp_image_utils::ByteMap& reference, const tp_image_utils::ByteMap& other, size_t maxOffset); //################################################################################################ //! Translate and clip two images /*! This translates one image relative to a reference and then clips both images to only contain the intersection of the two images. */ static void translateAndClipPair(const std::pair<int, int>& translation, tp_image_utils::ColorMap& reference, tp_image_utils::ColorMap& image); //################################################################################################ //! Find the region in the other image that best fits a rect in the reference /*! This takes two single channel images and tries to align them so the absolute subtraction of the two images produces the lowest result. */ static SkewedRegion calculateSkewedRegion(const tp_image_utils::ByteMap& reference, const tp_image_utils::ByteMap& other, size_t maxOffset); //################################################################################################ //! Extract a region from one image and clip both images /*! This translates one image relative to a reference and then clips both images to only contain the intersection of the two images. */ static void extractAndClipPair(const SkewedRegion& skewedRegion, tp_image_utils::ColorMap& reference, tp_image_utils::ColorMap& image); }; } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ExtractRect.h
<filename>inc/tp_image_utils_functions/ExtractRect.h #ifndef tp_image_utils_functions_ExtractRect_h #define tp_image_utils_functions_ExtractRect_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/Point.h" #include "tp_image_utils/Grid.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## struct ExtractRect { //################################################################################################ //! Extract an area of data from a source image /*! This will extract a region of data from the source image and then fit it to the output image. The list of sourcePoints needs to contain exactly 4 points of type PointTypeRectCorner these will be placed into the output image in the following order (top left, top right, bottom right, bottom left). Any points of type PointTypeRectSide will be evenly placed along the side formed by the PointTypeRectCorner points either side of them. This allows you to fit a curved region in to a rectangle. */ static tp_image_utils::ColorMap extractRect(const tp_image_utils::ColorMap& sourceImage, const std::vector<tp_image_utils::Point>& sourcePoints, size_t width, size_t height, std::vector<std::string>& errors); //################################################################################################ static tp_image_utils::ColorMap extractRect(const tp_image_utils::ColorMap& sourceImage, const tp_image_utils::Grid& clippingGrid, size_t width, size_t height, std::vector<std::string>& errors); //################################################################################################ static tp_image_utils::ColorMap extractRect(const tp_image_utils::ColorMap& sourceImage, size_t x, size_t y, size_t w, size_t h, TPPixel pad); //################################################################################################ static tp_image_utils::ColorMap extractRect(const tp_image_utils::ColorMap& sourceImage, size_t x, size_t y, size_t w, size_t h); }; } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/DeNoise.h
#ifndef tp_image_utils_functions_DeNoise_h #define tp_image_utils_functions_DeNoise_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## struct ByteRegion { uint8_t value{0}; size_t count{0}; size_t minX{0}; //!< The min x coordinate of this region calculated by calculateBoundingBoxes() size_t minY{0}; //!< The min y coordinate of this region calculated by calculateBoundingBoxes() size_t maxX{0}; //!< The max x coordinate of this region calculated by calculateBoundingBoxes() size_t maxY{0}; //!< The max y coordinate of this region calculated by calculateBoundingBoxes() }; //################################################################################################## struct ByteRegions { //! The details for each region found std::vector<ByteRegion> regions; //! The region indexes for each pixel in the image std::vector<int> map; size_t w{0}; size_t h{0}; //################################################################################################ //! Separate the regions of an image /*! Split a gray image into regions. \param src The image to split. \param addCorners Set this true if regions should be joined by corners as well as edges. */ ByteRegions(const tp_image_utils::ByteMap& src, bool addCorners); //################################################################################################ //! Calculate the region bounding boxes /*! Call this if you need to use use ByteRegion min and max coordinates. */ void calculateBoundingBoxes(); }; //################################################################################################## tp_image_utils::ByteMap deNoise(const tp_image_utils::ByteMap& src, size_t minSize, bool addCorners, uint8_t solid=0, uint8_t space=255); //################################################################################################## tp_image_utils::ByteMap deNoiseBlobs(const tp_image_utils::ByteMap& src, float minAspectRatio, float maxAspectRatio, float minDensity, float maxDensity, size_t minSize, size_t maxSize, bool addCorners, uint8_t solid=0, uint8_t space=255); //################################################################################################## tp_image_utils::ByteMap deNoiseStripes(const tp_image_utils::ByteMap& src, size_t minSize, uint8_t solid=0, uint8_t space=255); //################################################################################################## tp_image_utils::ByteMap deNoiseKnoblets(const tp_image_utils::ByteMap& src, size_t knobletWidth, uint8_t solid=0, uint8_t space=255); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/FillConcaveHull.h
#ifndef tp_image_utils_functions_FillConcaveHull_h #define tp_image_utils_functions_FillConcaveHull_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## struct FillConcaveHullParameters { uint8_t solid {0}; //!< The value of a solid pixel }; //################################################################################################## tp_image_utils::ByteMap fillConcaveHull(const tp_image_utils::ByteMap& src, const FillConcaveHullParameters& params); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/AddBorder.h
#ifndef tp_image_utils_functions_AddBorder_h #define tp_image_utils_functions_AddBorder_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" #include "tp_utils/TPPixel.h" namespace tp_image_utils_functions { //################################################################################################## tp_image_utils::ByteMap addBorder(const tp_image_utils::ByteMap& src, size_t width, uint8_t value=0); //################################################################################################## tp_image_utils::ColorMap addBorder(const tp_image_utils::ColorMap& src, size_t width, const TPPixel& color=TPPixel(0,0,0,255)); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/NormalizeBrightness.h
<reponame>omi-lab/tp_image_utils_functions #ifndef tp_image_utils_functions_NormalizeBrightness_h #define tp_image_utils_functions_NormalizeBrightness_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ColorMap.h" namespace tp_image_utils_functions { //################################################################################################## enum class NormalizationMode { None, Normalize, Exaggerate }; //################################################################################################## enum class ShiftBrightnessMode { None, ByValue, ByMean, ByMode, ByMedian, BySoftMode }; //################################################################################################## const char* shiftBrightnessModeToString(ShiftBrightnessMode mode); //################################################################################################## ShiftBrightnessMode shiftBrightnessModeFromString(const std::string& mode); //################################################################################################## //! /*! \param image - The source image \param radius - The radius in pixels to use for the normalization sample */ void normalizeBrightness(tp_image_utils::ColorMap& image, int radius, NormalizationMode mode=NormalizationMode::Normalize, float exaggeration=3.0f); //################################################################################################## void shiftBrightness(tp_image_utils::ColorMap& image, ShiftBrightnessMode mode, uint8_t value); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/SlotFill.h
#ifndef tp_image_utils_functions_SlotFill_h #define tp_image_utils_functions_SlotFill_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## struct SlotFillParameters { uint8_t solid {0}; //!< The value of a solid pixel uint8_t slot {255}; //!< The value of a slot pixel size_t endMinEach {1}; //!< Both ends should be at least this long size_t endMinEither{5}; //!< One or both ends should be at least this long size_t endMinSum {6}; //!< The sum of both ends should be at lest this lone size_t borderMinEach {1}; //!< Both ends should have a border at least this long size_t borderMinEither{1}; //!< One or both ends should have a border at least this long size_t borderMinSum {2}; //!< The sum of both ends should have a border at lest this long size_t startAngle {0}; //!< The start angle to rotate the image from size_t maxAngle {90}; //!< The maximum rotation size_t stepAngle {10}; //!< The amount to increment the angle by }; //################################################################################################## tp_image_utils::ByteMap slotFill(const tp_image_utils::ByteMap& src, const SlotFillParameters& params); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/DrawShapes.h
<filename>inc/tp_image_utils_functions/DrawShapes.h #ifndef tp_image_utils_functions_DrawShapes_h #define tp_image_utils_functions_DrawShapes_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ColorMap.h" #include "tp_image_utils/Point.h" #include "tp_image_utils/Grid.h" #include <stdint.h> #include <utility> #include <vector> namespace tp_image_utils_functions { //################################################################################################## //! Draws a series of points to an image tp_image_utils::ColorMap drawPoints(const tp_image_utils::ColorMap& image, const std::vector<tp_image_utils::Point>& points, const tp_image_utils::PointStyle& style = tp_image_utils::PointStyle()); //################################################################################################## //! Draws a series of points to an image tp_image_utils::ColorMap drawPoints(const tp_image_utils::ColorMap& image, const std::vector<std::vector<tp_image_utils::Point>>& points, const tp_image_utils::PointStyle& style = tp_image_utils::PointStyle()); //################################################################################################## //! Draws a series of points to an image tp_image_utils::ColorMap drawGrid(const tp_image_utils::Grid& grid, const tp_image_utils::ColorMap& image, const tp_image_utils::GridStyle& style = tp_image_utils::GridStyle()); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/EdgeDetect.h
<reponame>omi-lab/tp_image_utils_functions<gh_stars>0 #ifndef tp_image_utils_functions_EdgeDetect_h #define tp_image_utils_functions_EdgeDetect_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## tp_image_utils::ByteMap edgeDetect(const tp_image_utils::ColorMap& src, int threshold); //################################################################################################## tp_image_utils::ByteMap edgeDetect(const tp_image_utils::ByteMap& src, uint8_t threshold); //################################################################################################## tp_image_utils::ByteMap edgeDetectCorner(const tp_image_utils::ByteMap& src, uint8_t threshold); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ToPolar.h
<gh_stars>0 #ifndef tp_image_utils_functions_ToPolar_h #define tp_image_utils_functions_ToPolar_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## tp_image_utils::ByteMap toPolar(const tp_image_utils::ByteMap& src, size_t w, size_t h); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ReduceColors.h
#ifndef tp_image_utils_functions_ReduceColors_h #define tp_image_utils_functions_ReduceColors_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## //! Reduce the number of colors in an image /*! This will produce an image with a reduced color palette. \param src - The source image; \return A copy of the source image rendered with. */ tp_image_utils::ColorMap reduceColors(const tp_image_utils::ColorMap& src, int colorCount); //################################################################################################## tp_image_utils::ByteMap reduceColors(const tp_image_utils::ByteMap& src); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ToFloat.h
<reponame>omi-lab/tp_image_utils_functions<gh_stars>0 #ifndef tp_image_utils_functions_ToFloat_h #define tp_image_utils_functions_ToFloat_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ColorMap.h" namespace tp_image_utils_functions { //################################################################################################## enum class ChannelMode { Interleaved, Separate }; //################################################################################################## enum class ChannelOrder { RGB, BGR }; //################################################################################################## std::vector<std::string> channelModes(); //################################################################################################## std::string channelModeToString(ChannelMode channelMode); //################################################################################################## ChannelMode channelModeFromString(const std::string& channelMode); //################################################################################################## std::vector<std::string> channelOrders(); //################################################################################################## std::string channelOrderToString(ChannelOrder channelOrder); //################################################################################################## ChannelOrder channelOrderFromString(const std::string& channelOrder); //################################################################################################## //! Convert an image to floating point array /*! */ void toFloat(const tp_image_utils::ColorMap& src, ChannelMode channelMode, ChannelOrder channelOrder, std::vector<float>& outData); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/FindPixelGrid.h
#ifndef tp_image_utils_functions_FindPixelGrid_h #define tp_image_utils_functions_FindPixelGrid_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" #include "tp_image_utils/Grid.h" namespace tp_image_utils_functions { //################################################################################################## struct FindPixelGrid { static float mean(const std::vector<int>& values); static float mean(const std::vector<float>& values); static float squaredSD(const std::vector<int>& values, float& mean); static float squaredSD(const std::vector<float>& values, float& mean); static float standardDeviation(const std::vector<int>& values, float& mean); static std::vector<int> stripOutliers(const std::vector<int>& values, float mean, float squaredSD); static std::vector<float> stripOutliers(const std::vector<float>& values, float mean, float squaredSD); static std::vector<int> findReversals(const std::vector<uint8_t>& src); static std::vector<int> findReversalsH(const tp_image_utils::ByteMap& src); static std::vector<int> findReversalsV(const tp_image_utils::ByteMap& src); static float findMeanPixelSize(const std::vector<int>& values); static float findMeanPixelWidth(const tp_image_utils::ByteMap& src); static float findMeanPixelHeight(const tp_image_utils::ByteMap& src); //################################################################################################ //! Finds reversals in brightness /*! The result pixels: - 0 = Dark to light - 128 = No reversal - 255 = Light to dark \param src - A gray scale vector containing the source data \return - An vector the same size as the source image colored as above */ static std::vector<uint8_t> reversals(const std::vector<uint8_t>& src); //################################################################################################ //! Finds reversals in brightness /*! The result pixels: - 0 = Dark to light - 128 = No reversal - 255 = Light to dark \param src - A gray scale image containing the source data \return - An image the same size as the source image colored as above */ static tp_image_utils::ByteMap reversalsH(const tp_image_utils::ByteMap& src); //################################################################################################ //! Finds reversals in brightness /*! The result pixels: - 0 = Dark to light - 128 = No reversal - 255 = Light to dark \param src - A gray scale image containing the source data \return - An image the same size as the source image colored as above */ static tp_image_utils::ByteMap reversalsV(const tp_image_utils::ByteMap& src); //################################################################################################ static tp_image_utils::ByteMap findPixelGrid(const tp_image_utils::ByteMap& src); //################################################################################################ static tp_image_utils::ColorMap findPixelGrid(const tp_image_utils::ByteMap& src, const tp_image_utils::ColorMap& src2); //################################################################################################ struct FindRegularGridParams { int xCells; //!< The number of cells to find on the x axis, or 0 to calculate this. int yCells; //!< The number of cells to find on the y axis, or 0 to calculate this. tp_image_utils::LineCollection* hLines; //!< If set this will be populated with the list of horizontal lines. tp_image_utils::LineCollection* vLines; //!< If set this will be populated with the list of vertical lines. tp_image_utils::Line* correctedCorners; //!< Stretch the grid to best fit the corners float angleDeviation; //!< The maximum difference between the grid orientation and a line. FindRegularGridParams(): xCells(14), yCells(14), hLines(nullptr), vLines(nullptr), correctedCorners(nullptr), angleDeviation(2.0f) { } }; //################################################################################################ //! Find a grid in the lines /*! This will search for a grid in the lines using the following steps: 1. Rotate the lines to find the angle where the most lines are parallel or perpendicular. 2. Once it has an orientation for the grid it will filter out lines that dont fit. 3. Then it will try to find the best spacing by measuring the distances between lines and filtering out the outliers. 4. Then it will find the best offset to align the grid with the remaining lines. This will be between 0 and the cell width. 5. Finaly if it is searching for a finite grid it will adjust the offset to move the grid over the lines so that the grid matches the most lines. \param lines - The lines to search. \param gridType - The type of grid to produce. \param params - Extra optional params. \return The best fitting grid that it could find. */ static tp_image_utils::Grid findRegularGrid(const tp_image_utils::LineCollection& lines, tp_image_utils::GridType gridType, const FindRegularGridParams& params = FindRegularGridParams()); }; } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ExtractPolygons.h
<filename>inc/tp_image_utils_functions/ExtractPolygons.h #ifndef tp_image_utils_functions_ExtractRect_h #define tp_image_utils_functions_ExtractRect_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" #include "tp_math_utils/Polygon.h" namespace tp_image_utils_functions { //################################################################################################## struct ExtractPolygon { //################################################################################################ static void simplePolygonExtraction(const tp_image_utils::ByteMap& sourceImage, std::vector<tp_math_utils::Polygon>& results, bool annotate=false); //################################################################################################ static void simplePolygonExtraction(const tp_image_utils::ColorMap& sourceImage, std::vector<tp_math_utils::Polygon>& results, bool annotate=false); }; } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/DrawLine.h
<reponame>omi-lab/tp_image_utils_functions #ifndef tp_image_utils_functions_DrawLine_h #define tp_image_utils_functions_DrawLine_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ColorMap.h" #include <stdint.h> #include <utility> #include <vector> namespace tp_image_utils_functions { //################################################################################################## void drawLine(tp_image_utils::ColorMap& image, TPPixel color, int x1, int y1, int x2, int y2); //################################################################################################## void drawPolyline(tp_image_utils::ColorMap& image, TPPixel color, const std::vector<std::pair<int,int>>& points); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/ToHue.h
#ifndef tp_image_utils_functions_ToHue_h #define tp_image_utils_functions_ToHue_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## //! Extract the colors and remove shading /*! This will produce a rgb image containing the normalized color. \param src - The color image to extract the color from. \return A rgb image with the colors normalized. */ tp_image_utils::ColorMap toHue(const tp_image_utils::ColorMap& src); //################################################################################################## tp_image_utils::ColorMap toHue(const tp_image_utils::ByteMap& src); //################################################################################################## tp_image_utils::ByteMap toHueGray(const tp_image_utils::ColorMap& src); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/PixelManipulation.h
#ifndef tp_image_utils_functions_PixelManipulation_h #define tp_image_utils_functions_PixelManipulation_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" #include "tp_image_utils/ColorMap.h" namespace tp_image_utils_functions { /*! All input and output values are floating point in the range 0.0f to 1.0f. In values for color input images: - red - green - blue - alpha In values for byte input images: - byte */ struct PixelManipulation { //Calculation for color image output std::string calcRed {"red"}; std::string calcGreen {"green"}; std::string calcBlue {"blue"}; std::string calcAlpha {"alpha"}; //Calculation for byte image output std::string calcByte {"byte"}; }; //################################################################################################## tp_image_utils::ColorMap pixelManipulationColor(const tp_image_utils::ColorMap& src, const PixelManipulation& params, std::vector<std::string>& errors); //################################################################################################## tp_image_utils::ColorMap pixelManipulationColor(const tp_image_utils::ByteMap& src, const PixelManipulation& params, std::vector<std::string>& errors); //################################################################################################## tp_image_utils::ByteMap pixelManipulationByte(const tp_image_utils::ColorMap& src, const PixelManipulation& params, std::vector<std::string>& errors); //################################################################################################## tp_image_utils::ByteMap pixelManipulationByte(const tp_image_utils::ByteMap& src, const PixelManipulation& params, std::vector<std::string>& errors); } #endif
omi-lab/tp_image_utils_functions
inc/tp_image_utils_functions/CellSegment.h
#ifndef tp_image_utils_functions_CellSegment_h #define tp_image_utils_functions_CellSegment_h #include "tp_image_utils_functions/Globals.h" #include "tp_image_utils/ByteMap.h" namespace tp_image_utils_functions { //################################################################################################## enum class CellGrowMode { Box, Flood }; //################################################################################################## const char* cellGrowModeToString(CellGrowMode mode); //################################################################################################## CellGrowMode cellGrowModeFromString(const std::string& mode); //################################################################################################## struct CellSegmentParameters { int distanceFieldRadius{512}; int minRadius{10}; int maxInitialCells{20}; int growCellsPasses{100}; CellGrowMode cellGrowMode{CellGrowMode::Box}; }; //################################################################################################## tp_image_utils::ByteMap cellSegmentInitialCells(const tp_image_utils::ByteMap& src, const CellSegmentParameters& params); //################################################################################################## tp_image_utils::ByteMap cellSegment(const tp_image_utils::ByteMap& src, const CellSegmentParameters& params); //################################################################################################## tp_image_utils::ByteMap cellSegment(const tp_image_utils::ByteMap& src, const tp_image_utils::ByteMap& labels, const CellSegmentParameters& params); //################################################################################################## tp_image_utils::ByteMap cellSegmentSimple(const tp_image_utils::ByteMap& src, const CellSegmentParameters& params); } #endif
gonzus/dparser
scan.h
/* Copyright 2002-2004 <NAME>, All Rights Reserved */ #ifndef _scan_H_ #define _scan_H_ typedef struct ShiftResult { struct SNode *snode; D_Shift *shift; d_loc_t loc; } ShiftResult; int scan_buffer(d_loc_t *loc, D_State *st, ShiftResult *result); #endif
gonzus/dparser
read_binary.c
<gh_stars>0 /* Copyright 2002-2008 <NAME>, All Rights Reserved */ #include "d.h" #include "util.h" #include "dparse_tables.h" #include "read_binary.h" static void read_chk(void *ptr, size_t size, size_t nmemb, FILE *fp, unsigned char **str) { if (fp) { if (fread(ptr, size, nmemb, fp) != nmemb) d_fail("error reading binary tables\n"); } else { memcpy(ptr, *str, size * nmemb); (*str) += size * nmemb; } } BinaryTables *read_binary_tables_internal(FILE *fp, unsigned char *str, D_ReductionCode spec_code, D_ReductionCode final_code) { BinaryTablesHead tables; int i; BinaryTables *binary_tables = MALLOC(sizeof(BinaryTables)); char *tables_buf, *strings_buf; struct P { D_ReductionCode code; }; struct P p_spec_code, p_final_code; p_spec_code.code = spec_code; p_final_code.code = final_code; read_chk(&tables, sizeof(BinaryTablesHead), 1, fp, &str); tables_buf = MALLOC(tables.tables_size + tables.strings_size); read_chk(tables_buf, sizeof(char), tables.tables_size, fp, &str); strings_buf = tables_buf + tables.tables_size; read_chk(strings_buf, sizeof(char), tables.strings_size, fp, &str); for (i = 0; i < tables.n_relocs; i++) { intptr_t offset; void **ptr; intptr_t *intptr; read_chk((void *)&offset, sizeof(intptr_t), 1, fp, &str); intptr = (intptr_t *)(tables_buf + offset); ptr = (void **)intptr; if (*intptr == -1) { *ptr = (void *)0; } else if (*intptr == -2) { *ptr = *(void **)&p_spec_code; } else if (*intptr == -3) { *ptr = *(void **)&p_final_code; } else { *((char **)ptr) += (intptr_t)tables_buf; } } for (i = 0; i < tables.n_strings; i++) { intptr_t offset; read_chk((void *)&offset, sizeof(intptr_t), 1, fp, &str); *((char **)(tables_buf + offset)) += (intptr_t)strings_buf; } if (fp) fclose(fp); binary_tables->parser_tables_gram = (D_ParserTables *)(tables_buf + tables.d_parser_tables_loc); binary_tables->tables = tables_buf; return binary_tables; } BinaryTables *read_binary_tables(char *file_name, D_ReductionCode spec_code, D_ReductionCode final_code) { FILE *fp = fopen(file_name, "rb"); if (!fp) d_fail("error opening tables %s\n", file_name); return read_binary_tables_internal(fp, 0, spec_code, final_code); } BinaryTables *read_binary_tables_from_file(FILE *fp, D_ReductionCode spec_code, D_ReductionCode final_code) { return read_binary_tables_internal(fp, 0, spec_code, final_code); } BinaryTables *read_binary_tables_from_string(unsigned char *str, D_ReductionCode spec_code, D_ReductionCode final_code) { return read_binary_tables_internal(0, str, spec_code, final_code); } void free_BinaryTables(BinaryTables *binary_tables) { d_free(binary_tables->tables); d_free(binary_tables); }
gonzus/dparser
d.h
<filename>d.h /* Copyright 2002-2004 <NAME>, All Rights Reserved */ #ifndef _d_H_ #define _d_H_ #define __USE_MINGW_ANSI_STDIO 1 #ifdef MEMWATCH #define MEMWATCH_STDIO 1 #include "../../src/memwatch-2.67/memwatch.h" #define MEM_GROW_MACRO #endif #include <assert.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #if !defined(__FreeBSD__) || (__FreeBSD_version >= 500000) #include <inttypes.h> #endif #include <limits.h> #include <sys/types.h> #if !defined(__MINGW32__) && !defined(WIN32) #include <sys/mman.h> #include <sys/uio.h> #endif #if !defined(WIN32) #include <unistd.h> #include <sys/time.h> #include <dirent.h> #endif #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <ctype.h> #include <string.h> #include <strings.h> #ifdef LEAK_DETECT #define GC_DEBUG #include "gc.h" #define MALLOC(n) GC_MALLOC(n) #define CALLOC(m, n) GC_MALLOC((m) * (n)) #define FREE(p) GC_FREE(p) #define REALLOC(p, n) GC_REALLOC((p), (n)) #define CHECK_LEAKS() GC_gcollect() #else #ifdef USE_GC #include "gc.h" #define MALLOC GC_MALLOC #define REALLOC GC_REALLOC #define FREE(_x) #define malloc dont_use_malloc_use_MALLOC_instead #define relloc dont_use_realloc_use_REALLOC_instead #define free dont_use_free_use_FREE_instead #else #define MALLOC malloc #define REALLOC realloc #define FREE free #endif #endif /* enough already with the signed/unsiged char issues */ #define isspace_(_c) isspace((unsigned char)(_c)) #define isdigit_(_c) isdigit((unsigned char)(_c)) #define isxdigit_(_c) isxdigit((unsigned char)(_c)) #define isprint_(_c) isprint((unsigned char)(_c)) #define D_VERSION ((D_MAJOR_VERSION << 24) + (D_MINOR_VERSION << 16) + D_BUILD_VERSION) /* Compilation Options */ #define round2(_x, _n) ((_x + ((_n)-1)) & ~((_n)-1)) #define tohex1(_x) ((((_x)&15) > 9) ? (((_x)&15) - 10 + 'A') : (((_x)&15) + '0')) #define tohex2(_x) ((((_x) >> 4) > 9) ? (((_x) >> 4) - 10 + 'A') : (((_x) >> 4) + '0')) #define numberof(_x) ((sizeof(_x)) / (sizeof((_x)[0]))) typedef int8_t int8; typedef uint8_t uint8; typedef int32_t int32; typedef uint32_t uint32; typedef int64_t int64; typedef uint64_t uint64; typedef int16_t int16; typedef uint16_t uint16; typedef unsigned int uint; #ifdef D_DEBUG #define DBG(_x) \ if (d_debug_level > 1) { \ _x; \ } #else #define DBG(_x) #endif void d_version(char *); #define USE_SCANNER 1 #endif
gonzus/dparser
sample_parser.c
<reponame>gonzus/dparser /* Copyright 2002-2004 <NAME>, All Rights Reserved */ #include "d.h" #include "util.h" #include "dsymtab.h" #include "dparse_tables.h" #include "dparse.h" #include "gram.h" #include "scan.h" #include "parse.h" #include "arg.h" #define SIZEOF_MY_PARSE_NODE 100 /* permit test cases up to this size */ extern D_ParserTables parser_tables_gram; int save_parse_tree = 1; int partial_parses = 0; int fixup = 1; int fixup_ebnf = 0; int compare_stacks = 1; int commit_actions_interval = 100; int start_state = 0; int dont_use_greediness_for_disambiguation = 0; int dont_use_height_for_disambiguation = 0; static void help(ArgumentState *arg_state, char *arg_unused); ArgumentDescription arg_desc[] = { {"start_state", 'S', "Start State", "I", &start_state, "D_PARSE_START_STATE", NULL}, {"save_parse_tree", 's', "Save Parse Tree", "T", &save_parse_tree, "D_PARSE_SAVE_PARSE_TREE", NULL}, {"partial_parses", 'p', "Partial Parses", "T", &partial_parses, "D_PARSE_PARTIAL_PARSES", NULL}, {"compare_stacks", 'c', "Compare Stacks", "T", &compare_stacks, "D_PARSE_COMPARE_STACKS", NULL}, {"commit_interval", 'i', "Commit Interval", "I", &commit_actions_interval, "D_PARSE_COMMIT_INTERVAL", NULL}, {"fixup", 'f', "Fixup Internal Productions", "T", &fixup, "D_PARSE_FIXUP", NULL}, {"fixup_ebnf", 'e', "Fixup EBNF Productions", "T", &fixup_ebnf, "D_PARSE_FIXUP_EBNF", NULL}, {"nogreedy", 'G', "No Greediness for Disambiguation", "T", &dont_use_greediness_for_disambiguation, "D_PARSE_GREEDY", NULL}, {"noheight", 'H', "No Height for Disambiguation", "T", &dont_use_height_for_disambiguation, "D_PARSE_HEIGHT", NULL}, {"verbose", 'v', "Verbose", "+", &d_verbose_level, "D_PARSE_VERBOSE", NULL}, {"test", 't', "Test", "+", &test_level, "D_PARSE_TEST", NULL}, {"debug", 'd', "Debug", "+", &d_debug_level, "D_PARSE_DEBUG", NULL}, {"help", 'h', "Help", NULL, NULL, NULL, help}, {0}}; ArgumentState arg_state = {0, 0, "program", arg_desc}; static void help(ArgumentState *arg_state, char *arg_unused) { char ver[30]; d_version(ver); fprintf(stderr, "Sample DParser Version %s ", ver); fprintf(stderr, "Copyright (c) 2002-2013 <NAME>\n"); usage(arg_state, arg_unused); } char *ops = "+"; void *ops_cache = NULL; int ops_scan(char *ops, void *ops_cache, d_loc_t *loc, unsigned char *op_assoc, int *op_priority) { (void)ops; (void)ops_cache; if (loc->s[0] == '+') { loc->s++; *op_assoc = ASSOC_BINARY_LEFT; *op_priority = 9500; return 1; } return 0; } int main(int argc, char *argv[]) { int i, len = 0; char *buf = NULL, *fn; D_Parser *p; D_ParseNode *pn = NULL; (void)argc; process_args(&arg_state, argv); if (!arg_state.nfile_arguments) help(&arg_state, NULL); p = new_D_Parser(&parser_tables_gram, SIZEOF_MY_PARSE_NODE); p->save_parse_tree = save_parse_tree; p->ambiguity_fn = ambiguity_count_fn; p->partial_parses = partial_parses; p->dont_fixup_internal_productions = !fixup; p->fixup_EBNF_productions = fixup_ebnf; p->dont_compare_stacks = !compare_stacks; p->commit_actions_interval = commit_actions_interval; p->start_state = start_state; p->dont_use_greediness_for_disambiguation = dont_use_greediness_for_disambiguation; p->dont_use_height_for_disambiguation = dont_use_height_for_disambiguation; for (i = 0; i < arg_state.nfile_arguments; i++) { p->loc.pathname = arg_state.file_argument[i]; p->loc.line = 1; p->loc.col = 0; if (buf_read(arg_state.file_argument[i], &buf, &len) > 0) pn = dparse(p, buf, len); else d_fail("unable to read file '%s'", arg_state.file_argument[i]); if (pn) { free_D_ParseNode(p, pn); pn = 0; } else { fn = d_dup_pathname_str(p->loc.pathname); fprintf(stderr, "fatal error, '%s' line %d\n", fn, p->loc.line); FREE(fn); } if (buf) FREE(buf); } free_D_Parser(p); free_args(&arg_state); exit(0); }
gonzus/dparser
lex.c
/* Copyright 2002-2004 <NAME>, All Rights Reserved */ #include "d.h" #include "util.h" #include "dparse_tables.h" #include "gram.h" #include "lr.h" #include "lex.h" typedef struct NFAState { uint index; Vec(struct NFAState *) chars[256]; Vec(struct NFAState *) epsilon; Vec(Action *) accepts; Vec(Action *) live; } NFAState; typedef struct DFAState { Vec(struct NFAState *) states; struct DFAState *chars[256]; ScanState *scan; } DFAState; typedef Vec(DFAState *) VecDFAState; typedef Vec(NFAState *) VecNFAState; typedef struct LexState { uint nfa_index; VecNFAState allnfas; uint transitions; uint scanners; uint ignore_case; } LexState; static NFAState *new_NFAState(LexState *ls) { NFAState *n = MALLOC(sizeof(NFAState)); memset(n, 0, sizeof(NFAState)); n->index = ls->nfa_index++; vec_add(&ls->allnfas, n); return n; } static DFAState *new_DFAState() { DFAState *n = MALLOC(sizeof(DFAState)); memset(n, 0, sizeof(DFAState)); return n; } static void free_DFAState(DFAState *y) { vec_free(&y->states); FREE(y); } static void free_VecDFAState(VecDFAState *dfas) { uint i; for (i = 0; i < dfas->n; i++) free_DFAState(dfas->v[i]); vec_free(dfas); } static void free_NFAState(NFAState *y) { uint i; for (i = 0; i < 256; i++) vec_free(&y->chars[i]); vec_free(&y->epsilon); vec_free(&y->accepts); FREE(y); } static void free_VecNFAState(VecNFAState *nfas) { uint i; for (i = 0; i < nfas->n; i++) free_NFAState(nfas->v[i]); vec_free(nfas); } static ScanState *new_ScanState() { ScanState *n = MALLOC(sizeof(ScanState)); memset(n, 0, sizeof(ScanState)); return n; } static int nfacmp(const void *ai, const void *aj) { uint32 i = (*(NFAState **)ai)->index; uint32 j = (*(NFAState **)aj)->index; return (i > j) ? 1 : ((i < j) ? -1 : 0); } static void nfa_closure(DFAState *x) { uint i, j, k; for (i = 0; i < x->states.n; i++) { NFAState *s = x->states.v[i]; for (j = 0; j < x->states.v[i]->epsilon.n; j++) { for (k = 0; k < x->states.n; k++) if (x->states.v[i]->epsilon.v[j] == x->states.v[k]) goto Lbreak; vec_add(&x->states, s->epsilon.v[j]); Lbreak:; } } qsort(x->states.v, x->states.n, sizeof(x->states.v[0]), nfacmp); } static int eq_dfa_state(DFAState *x, DFAState *y) { uint i; if (x->states.n != y->states.n) return 0; for (i = 0; i < x->states.n; i++) if (x->states.v[i] != y->states.v[i]) return 0; return 1; } static void dfa_to_scanner(VecDFAState *alldfas, VecScanState *scanner) { uint i, j, k; int highest, p; vec_clear(scanner); for (i = 0; i < alldfas->n; i++) { alldfas->v[i]->scan = new_ScanState(); alldfas->v[i]->scan->index = i; vec_add(scanner, alldfas->v[i]->scan); } for (i = 0; i < alldfas->n; i++) { for (j = 0; j < 256; j++) if (alldfas->v[i]->chars[j]) alldfas->v[i]->scan->chars[j] = alldfas->v[i]->chars[j]->scan; highest = INT_MIN; for (j = 0; j < alldfas->v[i]->states.n; j++) for (k = 0; k < alldfas->v[i]->states.v[j]->accepts.n; k++) { p = alldfas->v[i]->states.v[j]->accepts.v[k]->term->term_priority; if (highest < p) highest = p; } for (j = 0; j < alldfas->v[i]->states.n; j++) for (k = 0; k < alldfas->v[i]->states.v[j]->accepts.n; k++) { p = alldfas->v[i]->states.v[j]->accepts.v[k]->term->term_priority; if (p == highest) vec_add(&alldfas->v[i]->scan->accepts, alldfas->v[i]->states.v[j]->accepts.v[k]); } } } static void nfa_to_scanner(NFAState *n, Scanner *s) { DFAState *x = new_DFAState(), *y; VecDFAState alldfas; uint i, i_alldfas, i_states, i_char; VecScanState *scanner = &s->states; memset(&alldfas, 0, sizeof(alldfas)); vec_add(&x->states, n); nfa_closure(x); vec_add(&alldfas, x); for (i_alldfas = 0; i_alldfas < alldfas.n; i_alldfas++) { x = alldfas.v[i_alldfas]; for (i_char = 0; i_char < 256; i_char++) { y = NULL; for (i_states = 0; i_states < x->states.n; i_states++) { for (i = 0; i < x->states.v[i_states]->chars[i_char].n; i++) { if (!y) y = new_DFAState(); set_add(&y->states, x->states.v[i_states]->chars[i_char].v[i]); } } if (y) { set_to_vec(&y->states); nfa_closure(y); for (i = 0; i < alldfas.n; i++) if (eq_dfa_state(y, alldfas.v[i])) { free_DFAState(y); y = alldfas.v[i]; goto Lnext; } vec_add(&alldfas, y); Lnext: x->chars[i_char] = y; } } } dfa_to_scanner(&alldfas, scanner); free_VecDFAState(&alldfas); } /* build a NFA for the regular expression */ static int build_regex_nfa(LexState *ls, uint8 **areg, NFAState *pp, NFAState *nn, Action *trailing) { uint8 c, pc, *reg = *areg; NFAState *p = pp, *s, *x, *n = nn; int reversed, i, has_trailing = 0; uint8 mark[256]; s = p; while ((c = *reg++)) { switch (c) { case '(': has_trailing = build_regex_nfa(ls, &reg, s, (x = new_NFAState(ls)), trailing) || has_trailing; p = s; s = x; break; case ')': goto Lreturn; case '|': vec_add(&s->epsilon, nn); vec_add(&pp->epsilon, (s = new_NFAState(ls))); break; case '[': if (*reg == '^') { reg++; reversed = 1; } else reversed = 0; memset(mark, 0, sizeof(mark)); pc = UCHAR_MAX; while ((c = *reg++)) { switch (c) { case ']': goto Lsetdone; case '-': c = *reg++; if (!c) goto Lerror; if (c == '\\') c = *reg++; if (!c) goto Lerror; for (; pc <= c; pc++) mark[pc] = 1; break; case '\\': c = *reg++; /* fall through */ default: pc = c; mark[c] = 1; break; } } Lsetdone: x = new_NFAState(ls); for (i = 1; i < 256; i++) if ((!reversed && mark[i]) || (reversed && !mark[i])) vec_add(&s->chars[i], x); p = s; s = x; break; case '?': vec_add(&p->epsilon, s); break; case '*': vec_add(&p->epsilon, s); vec_add(&s->epsilon, p); break; case '+': vec_add(&s->epsilon, p); break; case '/': vec_add(&s->accepts, trailing); has_trailing = 1; break; case '\\': c = *reg++; if (!c) goto Lerror; /* fall through */ default: if (!ls->ignore_case || !isalpha(c)) vec_add(&s->chars[c], (x = new_NFAState(ls))); else { vec_add(&s->chars[tolower(c)], (x = new_NFAState(ls))); vec_add(&s->chars[toupper(c)], x); } p = s; s = x; break; } } Lreturn: vec_add(&s->epsilon, n); *areg = reg; return has_trailing; Lerror: d_fail("bad (part of) regex: %s\n", *areg); return has_trailing; } static void action_diff(VecAction *a, VecAction *b, VecAction *c) { uint bb = 0, cc = 0; while (1) { if (bb >= b->n) break; Lagainc: if (cc >= c->n) { while (bb < b->n) vec_add(a, b->v[bb++]); break; } Lagainb: if (b->v[bb]->index == c->v[cc]->index) { bb++; cc++; continue; } if (b->v[bb]->index < c->v[cc]->index) { vec_add(a, b->v[bb++]); if (bb >= b->n) break; goto Lagainb; } cc++; goto Lagainc; } } static void action_intersect(VecAction *a, VecAction *b, VecAction *c) { uint bb = 0, cc = 0; while (1) { if (bb >= b->n) break; Lagainc: if (cc >= c->n) break; Lagainb: if (b->v[bb]->index == c->v[cc]->index) { vec_add(a, b->v[bb++]); cc++; continue; } if (b->v[bb]->index < c->v[cc]->index) { bb++; if (bb >= b->n) break; goto Lagainb; } cc++; goto Lagainc; } } static void compute_liveness(Scanner *scanner) { uint i, j, changed = 1; ScanState *ss, *sss; VecScanState *states = &scanner->states; /* basis */ for (i = 0; i < states->n; i++) { ss = states->v[i]; set_union(&ss->live, &ss->accepts); } while (changed) { changed = 0; for (i = 0; i < states->n; i++) { ss = states->v[i]; for (j = 0; j < 256; j++) { if ((sss = ss->chars[j])) { if (ss != sss) if (set_union(&ss->live, &sss->live)) changed = 1; } } } } for (i = 0; i < states->n; i++) { ss = states->v[i]; set_to_vec(&ss->live); sort_VecAction(&ss->live); } } static uint32 trans_hash_fn(ScanStateTransition *a, hash_fns_t *fns) { uint h = 0, i; if (!fns->data[0]) for (i = 0; i < a->live_diff.n; i++) h += 3 * a->live_diff.v[i]->index; for (i = 0; i < a->accepts_diff.n; i++) h += 3 * a->accepts_diff.v[i]->index; return h; } static int trans_cmp_fn(ScanStateTransition *a, ScanStateTransition *b, hash_fns_t *fns) { uint i; if (!fns->data[0]) if (a->live_diff.n != b->live_diff.n) return 1; if (a->accepts_diff.n != b->accepts_diff.n) return 1; if (!fns->data[0]) for (i = 0; i < a->live_diff.n; i++) if (a->live_diff.v[i] != b->live_diff.v[i]) return 1; for (i = 0; i < a->accepts_diff.n; i++) if (a->accepts_diff.v[i] != b->accepts_diff.v[i]) return 1; return 0; } static hash_fns_t trans_hash_fns = {(hash_fn_t)trans_hash_fn, (cmp_fn_t)trans_cmp_fn, {0, 0}}; static void build_transitions(LexState *ls, Scanner *s) { uint i, j; ScanState *ss; ScanStateTransition *trans = NULL, *x; VecScanState *states = &s->states; #ifdef LIVE_DIFF_IN_TRANSITIONS trans_hash_fns.data[0] = (void *)0; #else trans_hash_fns.data[0] = (void *)1; #endif for (i = 0; i < states->n; i++) { ss = states->v[i]; for (j = 0; j < 256; j++) { if (!trans) { trans = MALLOC(sizeof(*trans)); memset(trans, 0, sizeof(*trans)); } if (ss->chars[j]) { action_diff(&trans->live_diff, &ss->live, &ss->chars[j]->live); action_intersect(&trans->accepts_diff, &ss->accepts, &trans->live_diff); } if ((x = set_add_fn(&s->transitions, trans, &trans_hash_fns)) == trans) trans = NULL; else { vec_free(&trans->live_diff); vec_free(&trans->accepts_diff); } ss->transition[j] = x; } } if (trans) FREE(trans); j = 0; set_to_vec(&s->transitions); for (i = 0; i < s->transitions.n; i++) s->transitions.v[i]->index = i; ls->transitions += s->transitions.n; } static void compute_transitions(LexState *ls, Scanner *s) { compute_liveness(s); build_transitions(ls, s); } static void build_state_scanner(Grammar *g, LexState *ls, State *s) { NFAState *n, *nn, *nnn; Action *a; uint8 *c, *reg; uint j, one; one = 0; n = new_NFAState(ls); /* first strings since they can be trivially combined as a tree */ for (j = 0; j < s->shift_actions.n; j++) { a = s->shift_actions.v[j]; if (a->kind == ACTION_ACCEPT) { one = 1; if (!n->chars[0].n) vec_add(&n->chars[0], (nnn = new_NFAState(ls))); else nnn = n->chars[0].v[0]; vec_add(&nnn->accepts, a); } else if (a->kind == ACTION_SHIFT && a->term->kind == TERM_STRING) { one = 1; nn = n; if (!a->term->ignore_case) { for (c = (uint8 *)a->term->string; *c; c++) { if (!nn->chars[*c].n) vec_add(&nn->chars[*c], (nnn = new_NFAState(ls))); else nnn = nn->chars[*c].v[0]; nn = nnn; } } else { /* use new states */ for (c = (uint8 *)a->term->string; *c; c++) { if (isalpha(*c)) { vec_add(&nn->chars[toupper(*c)], (nnn = new_NFAState(ls))); vec_add(&nn->chars[tolower(*c)], nnn); } else vec_add(&nn->chars[*c], (nnn = new_NFAState(ls))); nn = nnn; } } vec_add(&nn->accepts, a); } } /* now regexes */ for (j = 0; j < s->shift_actions.n; j++) { a = s->shift_actions.v[j]; if (a->kind == ACTION_SHIFT && a->term->kind == TERM_REGEX) { Action *trailing_context = (Action *)MALLOC(sizeof(Action)); memcpy(trailing_context, a, sizeof(Action)); trailing_context->kind = ACTION_SHIFT_TRAILING; trailing_context->index = g->action_count++; one = 1; reg = (uint8 *)a->term->string; vec_add(&n->epsilon, (nnn = new_NFAState(ls))); nn = new_NFAState(ls); ls->ignore_case = a->term->ignore_case; if (build_regex_nfa(ls, &reg, nnn, nn, trailing_context)) { a->term->trailing_context = 1; s->trailing_context = 1; vec_add(&g->actions, trailing_context); } else FREE(trailing_context); vec_add(&nn->accepts, a); } } if (one) { nfa_to_scanner(n, &s->scanner); compute_transitions(ls, &s->scanner); } free_VecNFAState(&ls->allnfas); ls->scanners++; } static LexState *new_LexState() { LexState *ls = MALLOC(sizeof(LexState)); memset(ls, 0, sizeof(LexState)); vec_clear(&ls->allnfas); return ls; } void build_scanners(Grammar *g) { uint i, j, k; State *s; LexState *ls = new_LexState(); /* detect identical scanners */ for (i = 0; i < g->states.n; i++) { s = g->states.v[i]; if (s->same_shifts) continue; for (j = 0; j < i; j++) { if (g->states.v[j]->same_shifts) continue; if (g->states.v[j]->shift_actions.n != s->shift_actions.n) continue; if (g->states.v[j]->scan_kind != s->scan_kind) continue; for (k = 0; k < g->states.v[j]->shift_actions.n; k++) if (s->shift_actions.v[k]->term != g->states.v[j]->shift_actions.v[k]->term) break; if (k >= g->states.v[j]->shift_actions.n) { s->same_shifts = g->states.v[j]; break; } } } /* build scanners */ for (i = 0; i < g->states.n; i++) { s = g->states.v[i]; if (s->shift_actions.n) { if (s->same_shifts) s->scanner = s->same_shifts->scanner; else build_state_scanner(g, ls, s); } } if (d_verbose_level) printf("%d scanners %d transitions\n", ls->scanners, ls->transitions); FREE(ls); }
gonzus/dparser
util.c
/* Copyright 2002-2006 <NAME>, All Rights Reserved */ #include "d.h" #include "util.h" uint d_prime2[] = {1, 3, 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, 4194301, 8388593, 16777213, 33554393, 67108859, 134217689, 268435399, 536870909}; int d_verbose_level = 0; int d_debug_level = 0; int test_level = 0; int d_rdebug_grammar_level = 0; char *d_dup_pathname_str(const char *s) { const char *e = s; if (!s) return dup_str("", 0); if (*e == '"') { e++; while (*e && *e != '"') e++; return dup_str(s + 1, e); } else return dup_str(s, s + strlen(s)); } char *dup_str(const char *s, const char *e) { int l = e ? e - s : strlen(s); char *ss = (char *)MALLOC(l + 1); memcpy(ss, s, l); ss[l] = 0; return ss; } uint strhashl(const char *s, int l) { uint h = 0, g; int i = 0; for (; i < l; i++, s++) { h = (h << 4) + *s; if ((g = h & 0xf0000000)) h = (h ^ (g >> 24)) ^ g; } return h; } int buf_read(const char *pathname, char **buf, int *len) { struct stat sb; int fd; size_t real_size; *buf = 0; *len = 0; fd = open(pathname, O_RDONLY); if (fd <= 0) return -1; memset(&sb, 0, sizeof(sb)); fstat(fd, &sb); *len = sb.st_size; *buf = (char *)MALLOC(*len + 2); /* MINGW likes to convert cr lf => lf which messes with the size */ real_size = read(fd, *buf, *len); (*buf)[real_size] = 0; (*buf)[real_size + 1] = 0; *len = real_size; close(fd); return *len; } char *sbuf_read(const char *pathname) { char *buf; int len; if (buf_read(pathname, &buf, &len) < 0) return NULL; return buf; } void d_fail(const char *str, ...) { char nstr[256]; va_list ap; va_start(ap, str); snprintf(nstr, 255, "fail: %s\n", str); vfprintf(stderr, nstr, ap); va_end(ap); exit(1); } void d_warn(const char *str, ...) { char nstr[256]; va_list ap; va_start(ap, str); snprintf(nstr, 255, "warning: %s\n", str); vfprintf(stderr, nstr, ap); va_end(ap); } void vec_add_internal(void *v, void *elem) { AbstractVec *av = (AbstractVec *)v; if (!av->n) { av->v = av->e; } else if (av->v == av->e) { av->v = (void **)MALLOC(INITIAL_VEC_SIZE * sizeof(void *)); memcpy(av->v, av->e, av->n * sizeof(void *)); } else { if ((av->n & (INITIAL_VEC_SIZE - 1)) == 0) { int l = av->n, nl = (1 + INITIAL_VEC_SHIFT); l = l >> INITIAL_VEC_SHIFT; while (!(l & 1)) { l = l >> 1; nl++; } l = l >> 1; if (!av->n || !l) { nl = 1 << nl; av->v = (void **)REALLOC(av->v, nl * sizeof(void *)); } } } av->v[av->n] = elem; av->n++; } int vec_eq(void *v, void *vv) { AbstractVec *av = (AbstractVec *)v; AbstractVec *avv = (AbstractVec *)vv; uint i; if (av->n != avv->n) return 0; for (i = 0; i < av->n; i++) if (av->v[i] != avv->v[i]) return 0; return 1; } void *stack_push_internal(AbstractStack *s, void *elem) { int n = s->cur - s->start; if (s->start == s->initial) { s->cur = (void **)MALLOC(n * 2 * sizeof(void *)); memcpy(s->cur, s->start, n * sizeof(void *)); } else s->cur = (void **)REALLOC(s->start, n * 2 * sizeof(void *)); s->end = s->start = s->cur; s->cur += n; s->end += n * 2; *s->cur++ = elem; return elem; } int set_find(void *av, void *t) { AbstractVec *v = (AbstractVec *)av; int j, n = v->n; uint i; if (n) { uint h = ((uintptr_t)t); h = h % n; for (i = h, j = 0; i < v->n && j < SET_MAX_SEQUENTIAL; i = ((i + 1) % n), j++) { if (!v->v[i]) { return 0; } else if (v->v[i] == t) return 1; } } return 0; } int set_add(void *av, void *t) { AbstractVec *v = (AbstractVec *)av, vv; int j, n = v->n; uint i; if (n) { uint h = ((uintptr_t)t); h = h % n; for (i = h, j = 0; i < v->n && j < SET_MAX_SEQUENTIAL; i = ((i + 1) % n), j++) { if (!v->v[i]) { v->v[i] = t; return 1; } else if (v->v[i] == t) return 0; } } if (!n) { vv.v = NULL; v->i = INITIAL_SET_SIZE_INDEX; } else { vv.v = v->v; vv.n = v->n; v->i = v->i + 1; } v->n = d_prime2[v->i]; v->v = (void **)MALLOC(v->n * sizeof(void *)); memset(v->v, 0, v->n * sizeof(void *)); if (vv.v) { set_union(av, &vv); FREE(vv.v); } return set_add(v, t); } void *set_add_fn(void *av, void *t, hash_fns_t *fns) { AbstractVec *v = (AbstractVec *)av, vv; uint32 tt = fns->hash_fn(t, fns); int j, n = v->n; uint i; if (n) { uint h = tt % n; for (i = h, j = 0; i < v->n && j < SET_MAX_SEQUENTIAL; i = ((i + 1) % n), j++) { if (!v->v[i]) { v->v[i] = t; return t; } else { if (!fns->cmp_fn(v->v[i], t, fns)) return v->v[i]; } } } if (!n) { vv.v = NULL; v->i = INITIAL_SET_SIZE_INDEX; } else { vv.v = v->v; vv.n = v->n; v->i = v->i + 1; } v->n = d_prime2[v->i]; v->v = (void **)MALLOC(v->n * sizeof(void *)); memset(v->v, 0, v->n * sizeof(void *)); if (vv.v) { set_union_fn(av, &vv, fns); FREE(vv.v); } return set_add_fn(v, t, fns); } int set_union(void *av, void *avv) { AbstractVec *vv = (AbstractVec *)avv; uint i, changed = 0; for (i = 0; i < vv->n; i++) if (vv->v[i]) changed = set_add(av, vv->v[i]) || changed; return changed; } void set_union_fn(void *av, void *avv, hash_fns_t *fns) { AbstractVec *vv = (AbstractVec *)avv; uint i; for (i = 0; i < vv->n; i++) if (vv->v[i]) set_add_fn(av, vv->v[i], fns); } void set_to_vec(void *av) { AbstractVec *v = (AbstractVec *)av, vv; uint i; vv.n = v->n; vv.v = v->v; if (v->v == v->e) { memcpy(vv.e, v->e, sizeof(v->e)); vv.v = vv.e; } v->n = 0; v->v = 0; for (i = 0; i < vv.n; i++) if (vv.v[i]) vec_add_internal(v, vv.v[i]); FREE(vv.v); } void int_list_diff(int *a, int *b, int *c) { while (1) { if (*b < 0) break; Lagainc: if (*c < 0) { while (*b >= 0) *a++ = *b++; break; } Lagainb: if (*b == *c) { b++; c++; continue; } if (*b < *c) { *a++ = *b++; if (*b < 0) break; goto Lagainb; } if (*c < *b) { c++; goto Lagainc; } } *a++ = -1; } void int_list_intersect(int *a, int *b, int *c) { while (1) { if (*b < 0) break; Lagainc: if (*c < 0) break; Lagainb: if (*b == *c) { *a++ = *b++; c++; continue; } if (*b < *c) { b++; if (*b < 0) break; goto Lagainb; } if (*c < *b) { c++; goto Lagainc; } } *a++ = -1; } int *int_list_dup(int *aa) { int *a = aa, *b, *bb; while (*a != -1) { a++; } bb = b = (int *)MALLOC((a - aa + 1) * sizeof(int)); a = aa; while (*a != -1) { *b++ = *a++; } *b++ = -1; return bb; } #define ESC(_c) \ *ss++ = '\\'; \ *ss++ = _c; \ break; static char *escape_string_internal(char *s, int single_quote) { char *ss = (char *)MALLOC((strlen(s) + 1) * 4), *sss = ss; for (; *s; s++) { switch (*s) { case '\b': ESC('b'); case '\f': ESC('f'); case '\n': ESC('n'); case '\r': ESC('r'); case '\t': ESC('t'); case '\v': ESC('v'); case '\a': ESC('a'); case '\\': ESC('\\'); case '\"': if (!single_quote) { ESC(*s); } *ss++ = *s; break; case '\'': if (single_quote) { ESC(*s); } *ss++ = *s; break; default: if (isprint_(*s)) *ss++ = *s; else { *ss++ = '\\'; *ss++ = 'x'; *ss++ = tohex2((unsigned char)*s); *ss++ = tohex1((unsigned char)*s); } break; } } *ss = 0; return sss; } char *escape_string(char *s) { return escape_string_internal(s, 0); } char *escape_string_single_quote(char *s) { return escape_string_internal(s, 1); } void d_free(void *x) { FREE(x); }
gonzus/dparser
scan.c
<filename>scan.c /* Copyright 2002-2004 <NAME>, All Rights Reserved */ #include "d.h" #include "dparse_tables.h" #include "scan.h" int scan_buffer(d_loc_t *aloc, D_State *parse_state, ShiftResult *results) { d_loc_t loc = *aloc, last_loc = *aloc; char *s = loc.s; int col = loc.col, line = loc.line; int nresults = 0, i = 0, j; D_Shift **shift = NULL, **shift_diff = 0; switch (parse_state->scanner_size) { case 1: { /* all matches */ SB_uint8 *st = (SB_uint8 *)parse_state->scanner_table; SB_trans_uint8 *tst = (SB_trans_uint8 *)parse_state->transition_table; uint8 state = 0, last = state, prev = state; uint8 c; uint32 sb, so; c = (uint8)*s++; while ((state = st[state].scanner_block[(sb = (c >> SCANNER_BLOCK_SHIFT))][(so = c & SCANNER_BLOCK_MASK)])) { state -= 1; if (prev && parse_state->accepts_diff) { shift_diff = parse_state->accepts_diff[tst[prev].scanner_block[sb][so]]; for (; *shift_diff; shift_diff++) { results[nresults].loc = loc; results[nresults++].shift = *shift_diff; } } prev = state; if (c == '\n') { line++; col = 0; } else col++; loc.s = s; loc.line = line; loc.col = col; if (st[state].shift) { last = state; last_loc = loc; } c = (uint8)*s++; } shift = st[last].shift; break; } case 2: { /* all matches */ SB_uint16 *st = (SB_uint16 *)parse_state->scanner_table; SB_trans_uint16 *tst = (SB_trans_uint16 *)parse_state->transition_table; uint16 state = 0, last = state, prev = state; uint8 c; uint32 sb, so; c = (uint8)*s++; while ((state = st[state].scanner_block[(sb = (c >> SCANNER_BLOCK_SHIFT))][(so = c & SCANNER_BLOCK_MASK)])) { state -= 1; if (prev && parse_state->accepts_diff) { shift_diff = parse_state->accepts_diff[tst[prev].scanner_block[sb][so]]; for (; *shift_diff; shift_diff++) { results[nresults].loc = loc; results[nresults++].shift = *shift_diff; } } prev = state; loc.s = s; loc.line = line; loc.col = col; if (st[state].shift) { last = state; last_loc = loc; } if (c == '\n') { line++; col = 0; } else col++; c = (uint8)*s++; } shift = st[last].shift; break; } case 4: { /* all matches */ SB_uint32 *st = (SB_uint32 *)parse_state->scanner_table; SB_trans_uint32 *tst = (SB_trans_uint32 *)parse_state->transition_table; uint32 state = 0, last = state, prev = state; uint8 c; uint32 sb, so; c = (uint8)*s++; while ((state = st[state].scanner_block[(sb = (c >> SCANNER_BLOCK_SHIFT))][(so = c & SCANNER_BLOCK_MASK)])) { state -= 1; if (prev && parse_state->accepts_diff) { shift_diff = parse_state->accepts_diff[tst[prev].scanner_block[sb][so]]; for (; *shift_diff; shift_diff++) { results[nresults].loc = loc; results[nresults++].shift = *shift_diff; } } prev = state; loc.s = s; loc.line = line; loc.col = col; if (st[state].shift) { last = state; last_loc = loc; } if (c == '\n') { line++; col = 0; } else col++; c = (uint8)*s++; } shift = st[last].shift; break; } } if (shift) { for (; *shift; shift++) { results[nresults].loc = last_loc; results[nresults++].shift = *shift; } } if (nresults) { int longest = 0; char *end = results[nresults - 1].loc.s; if (parse_state->scan_kind == D_SCAN_LONGEST) longest = 1; if (parse_state->scan_kind == D_SCAN_MIXED) { for (i = nresults - 1; i >= 0; i--) { if (results[i].loc.s < end) break; if (results[i].shift->shift_kind == D_SCAN_LONGEST) longest = 1; } } if (longest) { /* keep only 'longest' */ i = 0; for (j = 0; j < nresults; j++) { if (results[j].loc.s == end || results[j].shift->shift_kind == D_SCAN_TRAILING) { if (i != j) results[i] = results[j]; i++; } } nresults = i; } else if (parse_state->scan_kind == D_SCAN_MIXED) { /* only keep non-longest */ for (j = i; j >= 0; j--) if (results[j].shift->shift_kind != D_SCAN_LONGEST) { if (i != j) results[i] = results[j]; i--; } nresults = nresults - i - 1; if (i != -1) memmove(&results[0], &results[i + 1], nresults * sizeof(results[0])); } } return nresults; }
yelite/RoomMonitor
sensors/DHT/main.c
#include <stdio.h> #include <pcduino/Arduino.h> #define DHTLIB_OK 0 #define DHTLIB_ERROR_CHECKSUM -1 #define DHTLIB_ERROR_TIMEOUT -2 typedef struct return_value { int flag; uint8_t temp; uint8_t hum; } dht_data; dht_data readDHT(int pin) { dht_data val; // BUFFER TO RECEIVE uint8_t bits[5]; uint8_t cnt = 7; uint8_t idx = 0; // EMPTY BUFFER int i; for (i=0; i< 5; i++) bits[i] = 0; // REQUEST SAMPLE pinMode(pin, OUTPUT); digitalWrite(pin, LOW); delay(36); digitalWrite(pin, HIGH); delayMicroseconds(40); pinMode(pin, INPUT); // ACKNOWLEDGE or TIMEOUT unsigned int loopCnt = 10000; while(digitalRead(pin) == LOW) { if (loopCnt-- == 0) { val.flag = DHTLIB_ERROR_TIMEOUT; return val; } } loopCnt = 50000; while(digitalRead(pin) == HIGH){ if (loopCnt-- == 0) { val.flag = DHTLIB_ERROR_TIMEOUT; return val; } } // READ OUTPUT - 40 BITS => 5 BYTES or TIMEOUT for (i=0; i<40; i++) { loopCnt = 10000; while(digitalRead(pin) == LOW) { if (loopCnt-- == 0) { val.flag = DHTLIB_ERROR_TIMEOUT; return val; } } unsigned long t = micros(); loopCnt = 10000; while(digitalRead(pin) == HIGH) { if (loopCnt-- == 0) { val.flag = DHTLIB_ERROR_TIMEOUT; return val; } } if ((micros() - t) > 40) bits[idx] |= (1 << cnt); if (cnt == 0) // next byte? { cnt = 7; // restart at MSB idx++; // next byte! } else cnt--; } // WRITE TO RIGHT VARS // as bits[1] and bits[3] are allways zero they are omitted in formulas. val.hum = bits[0]; val.temp = bits[2]; uint8_t sum = bits[0] + bits[2]; if (bits[4] != sum) { val.flag = DHTLIB_ERROR_CHECKSUM; return val; } val.flag = DHTLIB_OK; return val; } void setup() { if (argc != 2) { printf("usage: %s GPIOpin#\n", argv[0]); printf("example: %s 4 - Read from an DHT connected to GPIO #4\n", argv[0]); } int pin = atoi(argv[1]); dht_data rv = readDHT(pin); switch(rv.flag) { case DHTLIB_OK: printf("Temperature (C): \n%d\n", rv.temp); printf("Humidity (%%): \n%d\n", rv.hum); break; case DHTLIB_ERROR_CHECKSUM: printf("Checksum Error!"); break; case DHTLIB_ERROR_TIMEOUT: printf("Timeout Error!"); break; } } void loop() { exit(0); }
yelite/RoomMonitor
sensors/BMP/Wire.h
/* TwoWire.h - TWI/I2C library for Wiring */ #ifndef TwoWire_h #define TwoWire_h #include <inttypes.h> #include "pcduino/Stream.h" #define BUFFER_LENGTH 32 class TwoWire : public Stream { private: static int i2c_handle; static uint8_t rxBuffer[]; static uint8_t rxBufferIndex; static uint8_t rxBufferLength; static uint8_t txBuffer[]; static uint8_t txBufferIndex; static uint8_t txBufferLength; static uint8_t transmitting; static void (*user_onRequest)(void); static void (*user_onReceive)(int); static void onRequestService(void); static void onReceiveService(uint8_t*, int); public: TwoWire(); void begin(); void begin(uint8_t); void begin(int); void setBusFreq(unsigned int); void beginTransmission(uint8_t); void beginTransmission(int); uint8_t endTransmission(void); uint8_t requestFrom(uint8_t, uint8_t); uint8_t requestFrom(int, int); virtual int write(byte); virtual size_t write(const uint8_t *, size_t); virtual int available(void); virtual int read(void); virtual int peek(void); virtual void flush(void); void onReceive( void (*)(int) ); void onRequest( void (*)(void) ); using Print::write; }; extern TwoWire Wire; #endif
yelite/RoomMonitor
sensors/LightSensor/main.c
<gh_stars>0 #include <pcduino/Arduino.h> void setup() { if (argc != 2) { printf("usage: %s GPIOpin#\n", argv[0]); printf("example: %s 4 - Read from an light sensor connected to GPIO #4\n", argv[0]); exit(1); } int pin = atoi(argv[1]); int rv = analogRead(pin); printf("Voltage(mV): \n%d\n", rv); } void loop() { exit(0); }
hl4080/RDMA-Tutorial
debug.h
<reponame>hl4080/RDMA-Tutorial /* * code from : http://c.learncodethehardway.org/book/ex20.html */ #ifndef DEBUG_H_ #define DEBUG_H_ #include <stdio.h> #include <errno.h> #include <string.h> #define LOG_HEADER "\n================ %s ================\n" #define LOG_SUB_HEADER "\n************ %s ************\n" extern FILE *log_fp; #define clean_errno() (errno == 0 ? "None" : strerror(errno)) #define log_err(M, ...) fprintf(stderr, "[ERROR] (%s:%d:%s: errno: %s) " M "\n",\ __FILE__, __LINE__, __func__, clean_errno(), ##__VA_ARGS__) #define log_warn(M, ...) fprintf(stderr, "[WARN] (%s:%d:%s errno: %s) " M "\n",\ __FILE__, __LINE__, __func__, clean_errno(), ##__VA_ARGS__) #define log_info(M, ...) fprintf(stderr, "" M "\n", ##__VA_ARGS__) #define log_file(M, ...) {fprintf(log_fp, "" M "\n", ##__VA_ARGS__);fflush(log_fp);} #define sentinel(M, ...) {log_err(M, ##__VA_ARGS__); errno=0; goto error;} #define check(A, M, ...) if(!(A)) {log_err(M, ##__VA_ARGS__); errno=0; goto error;} #ifdef DEBUG #define debug_detail(M, ...) fprintf(stderr, "[DEBUG] (%s:%d:%s) " M "\n",\ __FILE__, __LINE__, __func__, ##__VA_ARGS__) #define debug(M, ...) fprintf(stderr, "[DEBUG] " M "\n", ##__VA_ARGS__) #define log(M, ...) {log_info (M, ##__VA_ARGS__); log_file (M, ##__VA_ARGS__);} #else #define debug(M, ...) #define log(M, ...) {log_file (M, ##__VA_ARGS__);} #endif #endif /* DEBUG_H_ */
hl4080/RDMA-Tutorial
setup_ib.c
<gh_stars>100-1000 #include <arpa/inet.h> #include <unistd.h> #include <malloc.h> #include "sock.h" #include "ib.h" #include "debug.h" #include "config.h" #include "setup_ib.h" struct IBRes ib_res; int connect_qp_server () { int ret = 0, n = 0, i = 0; int num_peers = config_info.num_clients; int sockfd = 0; int *peer_sockfd = NULL; struct sockaddr_in peer_addr; socklen_t peer_addr_len = sizeof(struct sockaddr_in); char sock_buf[64] = {'\0'}; struct QPInfo *local_qp_info = NULL; struct QPInfo *remote_qp_info = NULL; sockfd = sock_create_bind(config_info.sock_port); check(sockfd > 0, "Failed to create server socket."); listen(sockfd, 5); peer_sockfd = (int *) calloc (num_peers, sizeof(int)); check (peer_sockfd != NULL, "Failed to allocate peer_sockfd"); for (i = 0; i < num_peers; i++) { peer_sockfd[i] = accept(sockfd, (struct sockaddr *)&peer_addr, &peer_addr_len); check (peer_sockfd[i] > 0, "Failed to create peer_sockfd[%d]", i); } /* init local qp_info */ local_qp_info = (struct QPInfo *) calloc (num_peers, sizeof(struct QPInfo)); check (local_qp_info != NULL, "Failed to allocate local_qp_info"); for (i = 0; i < num_peers; i++) { local_qp_info[i].lid = ib_res.port_attr.lid; local_qp_info[i].qp_num = ib_res.qp[i]->qp_num; local_qp_info[i].rank = config_info.rank; } /* get qp_info from client */ remote_qp_info = (struct QPInfo *) calloc (num_peers, sizeof(struct QPInfo)); check (remote_qp_info != NULL, "Failed to allocate remote_qp_info"); for (i = 0; i < num_peers; i++) { ret = sock_get_qp_info (peer_sockfd[i], &remote_qp_info[i]); check (ret == 0, "Failed to get qp_info from client[%d]", i); } /* send qp_info to client */ int peer_ind = -1; int j = 0; for (i = 0; i < num_peers; i++) { peer_ind = -1; for (j = 0; j < num_peers; j++) { if (remote_qp_info[j].rank == i) { peer_ind = j; break; } } ret = sock_set_qp_info (peer_sockfd[i], &local_qp_info[peer_ind]); check (ret == 0, "Failed to send qp_info to client[%d]", peer_ind); } /* change send QP state to RTS */ log (LOG_SUB_HEADER, "Start of IB Config"); for (i = 0; i < num_peers; i++) { peer_ind = -1; for (j = 0; j < num_peers; j++) { if (remote_qp_info[j].rank == i) { peer_ind = j; break; } } ret = modify_qp_to_rts (ib_res.qp[peer_ind], remote_qp_info[i].qp_num, remote_qp_info[i].lid); check (ret == 0, "Failed to modify qp[%d] to rts", peer_ind); log ("\tqp[%"PRIu32"] <-> qp[%"PRIu32"]", ib_res.qp[peer_ind]->qp_num, remote_qp_info[i].qp_num); } log (LOG_SUB_HEADER, "End of IB Config"); /* sync with clients */ for (i = 0; i < num_peers; i++) { n = sock_read (peer_sockfd[i], sock_buf, sizeof(SOCK_SYNC_MSG)); check (n == sizeof(SOCK_SYNC_MSG), "Failed to receive sync from client"); } for (i = 0; i < num_peers; i++) { n = sock_write (peer_sockfd[i], sock_buf, sizeof(SOCK_SYNC_MSG)); check (n == sizeof(SOCK_SYNC_MSG), "Failed to write sync to client"); } for (i = 0; i < num_peers; i++) { close (peer_sockfd[i]); } free (peer_sockfd); close (sockfd); return 0; error: if (peer_sockfd != NULL) { for (i = 0; i < num_peers; i++) { if (peer_sockfd[i] > 0) { close (peer_sockfd[i]); } } free (peer_sockfd); } if (sockfd > 0) { close (sockfd); } return -1; } int connect_qp_client () { int ret = 0, n = 0, i = 0; int num_peers = ib_res.num_qps; int *peer_sockfd = NULL; char sock_buf[64] = {'\0'}; struct QPInfo *local_qp_info = NULL; struct QPInfo *remote_qp_info = NULL; peer_sockfd = (int *) calloc (num_peers, sizeof(int)); check (peer_sockfd != NULL, "Failed to allocate peer_sockfd"); for (i = 0; i < num_peers; i++) { peer_sockfd[i] = sock_create_connect (config_info.servers[i], config_info.sock_port); check (peer_sockfd[i] > 0, "Failed to create peer_sockfd[%d]", i); } /* init local qp_info */ local_qp_info = (struct QPInfo *) calloc (num_peers, sizeof(struct QPInfo)); check (local_qp_info != NULL, "Failed to allocate local_qp_info"); for (i = 0; i < num_peers; i++) { local_qp_info[i].lid = ib_res.port_attr.lid; local_qp_info[i].qp_num = ib_res.qp[i]->qp_num; local_qp_info[i].rank = config_info.rank; } /* send qp_info to server */ for (i = 0; i < num_peers; i++) { ret = sock_set_qp_info (peer_sockfd[i], &local_qp_info[i]); check (ret == 0, "Failed to send qp_info[%d] to server", i); } /* get qp_info from server */ remote_qp_info = (struct QPInfo *) calloc (num_peers, sizeof(struct QPInfo)); check (remote_qp_info != NULL, "Failed to allocate remote_qp_info"); for (i = 0; i < num_peers; i++) { ret = sock_get_qp_info (peer_sockfd[i], &remote_qp_info[i]); check (ret == 0, "Failed to get qp_info[%d] from server", i); } /* change QP state to RTS */ /* send qp_info to client */ int peer_ind = -1; int j = 0; log (LOG_SUB_HEADER, "IB Config"); for (i = 0; i < num_peers; i++) { peer_ind = -1; for (j = 0; j < num_peers; j++) { if (remote_qp_info[j].rank == i) { peer_ind = j; break; } } ret = modify_qp_to_rts (ib_res.qp[peer_ind], remote_qp_info[i].qp_num, remote_qp_info[i].lid); check (ret == 0, "Failed to modify qp[%d] to rts", peer_ind); log ("\tqp[%"PRIu32"] <-> qp[%"PRIu32"]", ib_res.qp[peer_ind]->qp_num, remote_qp_info[i].qp_num); } log (LOG_SUB_HEADER, "End of IB Config"); /* sync with server */ for (i = 0; i < num_peers; i++) { n = sock_write (peer_sockfd[i], sock_buf, sizeof(SOCK_SYNC_MSG)); check (n == sizeof(SOCK_SYNC_MSG), "Failed to write sync to client[%d]", i); } for (i = 0; i < num_peers; i++) { n = sock_read (peer_sockfd[i], sock_buf, sizeof(SOCK_SYNC_MSG)); check (n == sizeof(SOCK_SYNC_MSG), "Failed to receive sync from client"); } for (i = 0; i < num_peers; i++) { close (peer_sockfd[i]); } free (peer_sockfd); free (local_qp_info); free (remote_qp_info); return 0; error: if (peer_sockfd != NULL) { for (i = 0; i < num_peers; i++) { if (peer_sockfd[i] > 0) { close (peer_sockfd[i]); } } free (peer_sockfd); } if (local_qp_info != NULL) { free (local_qp_info); } if (remote_qp_info != NULL) { free (remote_qp_info); } return -1; } int setup_ib () { int ret = 0; int i = 0; struct ibv_device **dev_list = NULL; memset (&ib_res, 0, sizeof(struct IBRes)); if (config_info.is_server) { ib_res.num_qps = config_info.num_clients; } else { ib_res.num_qps = config_info.num_servers; } /* get IB device list */ dev_list = ibv_get_device_list(NULL); check(dev_list != NULL, "Failed to get ib device list."); /* create IB context */ ib_res.ctx = ibv_open_device(*dev_list); check(ib_res.ctx != NULL, "Failed to open ib device."); /* allocate protection domain */ ib_res.pd = ibv_alloc_pd(ib_res.ctx); check(ib_res.pd != NULL, "Failed to allocate protection domain."); /* query IB port attribute */ ret = ibv_query_port(ib_res.ctx, IB_PORT, &ib_res.port_attr); check(ret == 0, "Failed to query IB port information."); /* register mr */ /* set the buf_size twice as large as msg_size * num_concurr_msgs */ /* the recv buffer occupies the first half while the sending buffer */ /* occupies the second half */ /* assume all msgs are of the same content */ ib_res.ib_buf_size = config_info.msg_size * config_info.num_concurr_msgs * ib_res.num_qps; ib_res.ib_buf = (char *) memalign (4096, ib_res.ib_buf_size); check (ib_res.ib_buf != NULL, "Failed to allocate ib_buf"); ib_res.mr = ibv_reg_mr (ib_res.pd, (void *)ib_res.ib_buf, ib_res.ib_buf_size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE); check (ib_res.mr != NULL, "Failed to register mr"); /* query IB device attr */ ret = ibv_query_device(ib_res.ctx, &ib_res.dev_attr); check(ret==0, "Failed to query device"); /* create cq */ ib_res.cq = ibv_create_cq (ib_res.ctx, ib_res.dev_attr.max_cqe, NULL, NULL, 0); check (ib_res.cq != NULL, "Failed to create cq"); /* create srq */ struct ibv_srq_init_attr srq_init_attr = { .attr.max_wr = ib_res.dev_attr.max_srq_wr, .attr.max_sge = 1, }; ib_res.srq = ibv_create_srq (ib_res.pd, &srq_init_attr); /* create qp */ struct ibv_qp_init_attr qp_init_attr = { .send_cq = ib_res.cq, .recv_cq = ib_res.cq, .srq = ib_res.srq, .cap = { .max_send_wr = ib_res.dev_attr.max_qp_wr, .max_recv_wr = ib_res.dev_attr.max_qp_wr, .max_send_sge = 1, .max_recv_sge = 1, }, .qp_type = IBV_QPT_RC, }; ib_res.qp = (struct ibv_qp **) calloc (ib_res.num_qps, sizeof(struct ibv_qp *)); check (ib_res.qp != NULL, "Failed to allocate qp"); for (i = 0; i < ib_res.num_qps; i++) { ib_res.qp[i] = ibv_create_qp (ib_res.pd, &qp_init_attr); check (ib_res.qp[i] != NULL, "Failed to create qp[%d]", i); } /* connect QP */ if (config_info.is_server) { ret = connect_qp_server (); } else { ret = connect_qp_client (); } check (ret == 0, "Failed to connect qp"); ibv_free_device_list (dev_list); return 0; error: if (dev_list != NULL) { ibv_free_device_list (dev_list); } return -1; } void close_ib_connection () { int i; if (ib_res.qp != NULL) { for (i = 0; i < ib_res.num_qps; i++) { if (ib_res.qp[i] != NULL) { ibv_destroy_qp (ib_res.qp[i]); } } free (ib_res.qp); } if (ib_res.srq != NULL) { ibv_destroy_srq (ib_res.srq); } if (ib_res.cq != NULL) { ibv_destroy_cq (ib_res.cq); } if (ib_res.mr != NULL) { ibv_dereg_mr (ib_res.mr); } if (ib_res.pd != NULL) { ibv_dealloc_pd (ib_res.pd); } if (ib_res.ctx != NULL) { ibv_close_device (ib_res.ctx); } if (ib_res.ib_buf != NULL) { free (ib_res.ib_buf); } }
hl4080/RDMA-Tutorial
config.c
<gh_stars>100-1000 #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <sys/utsname.h> #include "debug.h" #include "config.h" struct ConfigInfo config_info; /* remove space, tab and line return from the line */ void clean_up_line (char *line) { char *i = line; char *j = line; while (*j != 0) { *i = *j; j += 1; if (*i != ' ' && *i != '\t' && *i != '\r' && *i != '\n') { i += 1; } } *i = 0; } /* * parse_node_list: * get a list of servers or clients * * return value: * the number of nodes in the list */ int parse_node_list (char *line, char ***node_list) { int start = 0, end = 0, num_nodes=0; char *i = line; char node_name_prefix[128] = {'\0'}; char *j = node_name_prefix; while (*i != '.') { if ((*i >= '0') && (*i <= '9')) { start = start * 10 + *i - '0'; } else { *j = *i; j += 1; } i += 1; } i += 2; while (*i != 0) { if ((*i >= '0') && (*i <= '9')) { end = end * 10 + *i - '0'; } i += 1; } num_nodes = end - start + 1; check (num_nodes > 0, "Invaild number of nodes: %d", num_nodes); *node_list = (char **) calloc (num_nodes, sizeof(char *)); if (*node_list == NULL){ printf ("Failed to allocate node_list.\n"); return 0; } int k = 0, node_ind = start; for (k = 0; k < num_nodes; k++) { (*node_list)[k] = (char *) calloc (128, sizeof(char)); check ((*node_list)[k] != NULL, "Failed to allocate node_list[%d]", k); if (strstr(node_name_prefix, "mnemosyne")) { sprintf ((*node_list)[k], "mnemosyne%02d", node_ind); } else { sprintf ((*node_list)[k], "saguaro%d", node_ind); } node_ind += 1; } return num_nodes; error: return -1; } int get_rank () { int ret = 0; uint32_t i = 0; uint32_t num_servers = config_info.num_servers; uint32_t num_clients = config_info.num_clients; struct utsname utsname_buf; char hostname[64]; /* get hostname */ ret = uname (&utsname_buf); check (ret == 0, "Failed to call uname"); strncpy (hostname, utsname_buf.nodename, sizeof(hostname)); config_info.rank = -1; for (i = 0; i < num_servers; i++) { if (strstr(hostname, config_info.servers[i])) { config_info.rank = i; config_info.is_server = true; break; } } for (i = 0; i < num_clients; i++) { if (strstr(hostname, config_info.clients[i])) { if (config_info.rank == -1) { config_info.rank = i; config_info.is_server = false; break; } else { check (0, "node (%s) listed as both server and client", hostname); } } } check (config_info.rank >= 0, "Failed to get rank for node: %s", hostname); return 0; error: return -1; } int parse_config_file (char *fname) { int ret = 0; FILE *fp = NULL; char line[128] = {'\0'}; int attr = 0; fp = fopen (fname, "r"); check (fp != NULL, "Failed to open config file %s", fname); while (fgets(line, 128, fp) != NULL) { // skip comments if (strstr(line, "#") != NULL) { continue; } clean_up_line (line); if (strstr (line, "servers:")) { attr = ATTR_SERVERS; continue; } else if (strstr (line, "clients:")) { attr = ATTR_CLIENTS; continue; } else if (strstr (line, "msg_size:")) { attr = ATTR_MSG_SIZE; continue; } else if (strstr (line, "num_concurr_msgs:")) { attr = ATTR_NUM_CONCURR_MSGS; continue; } if (attr == ATTR_SERVERS) { ret = parse_node_list (line, &config_info.servers); check (ret > 0, "Failed to get server list"); config_info.num_servers = ret; } else if (attr == ATTR_CLIENTS) { ret = parse_node_list (line, &config_info.clients); check (ret > 0, "Failed to get client list"); config_info.num_clients = ret; } else if (attr == ATTR_MSG_SIZE) { config_info.msg_size = atoi(line); check (config_info.msg_size > 0, "Invalid Value: msg_size = %d", config_info.msg_size); } else if (attr == ATTR_NUM_CONCURR_MSGS) { config_info.num_concurr_msgs = atoi(line); check (config_info.num_concurr_msgs > 0, "Invalid Value: num_concurr_msgs = %d", config_info.num_concurr_msgs); } attr = 0; } ret = get_rank (); check (ret == 0, "Failed to get rank"); fclose (fp); return 0; error: if (fp != NULL) { fclose (fp); } return -1; } void destroy_config_info () { int num_servers = config_info.num_servers; int num_clients = config_info.num_clients; int i; if (config_info.servers != NULL) { for (i = 0; i < num_servers; i++) { if (config_info.servers[i] != NULL) { free (config_info.servers[i]); } } free (config_info.servers); } if (config_info.clients != NULL) { for (i = 0; i < num_clients; i++) { if (config_info.clients[i] != NULL) { free (config_info.clients[i]); } } free (config_info.clients); } } void print_config_info () { log (LOG_SUB_HEADER, "Configuraion"); if (config_info.is_server) { log ("is_server = %s", "true"); } else { log ("is_server = %s", "false"); } log ("rank = %d", config_info.rank); log ("msg_size = %d", config_info.msg_size); log ("num_concurr_msgs = %d", config_info.num_concurr_msgs); log ("sock_port = %s", config_info.sock_port); log (LOG_SUB_HEADER, "End of Configuraion"); }
hl4080/RDMA-Tutorial
server.h
#ifndef SERVER_H_ #define SERVER_H_ int run_server (); #endif /* server.h */
hl4080/RDMA-Tutorial
sock.c
<filename>sock.c #define _GNU_SOURCE #include <sys/socket.h> #include <netdb.h> #include <arpa/inet.h> #include <unistd.h> #include <string.h> #include "debug.h" #include "sock.h" ssize_t sock_read (int sock_fd, void *buffer, size_t len) { ssize_t nr, tot_read; char *buf = buffer; // avoid pointer arithmetic on void pointer tot_read = 0; while (len !=0 && (nr = read(sock_fd, buf, len)) != 0) { if (nr < 0) { if (errno == EINTR) { continue; } else { return -1; } } len -= nr; buf += nr; tot_read += nr; } return tot_read; } ssize_t sock_write (int sock_fd, void *buffer, size_t len) { ssize_t nw, tot_written; const char *buf = buffer; // avoid pointer arithmetic on void pointer for (tot_written = 0; tot_written < len; ) { nw = write(sock_fd, buf, len-tot_written); if (nw <= 0) { if (nw == -1 && errno == EINTR) { continue; } else { return -1; } } tot_written += nw; buf += nw; } return tot_written; } int sock_create_bind (char *port) { struct addrinfo hints; struct addrinfo *result, *rp; int sock_fd = -1, ret = 0; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; hints.ai_flags = AI_PASSIVE; ret = getaddrinfo(NULL, port, &hints, &result); check(ret==0, "getaddrinfo error."); for (rp = result; rp != NULL; rp = rp->ai_next) { sock_fd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (sock_fd < 0) { continue; } ret = bind(sock_fd, rp->ai_addr, rp->ai_addrlen); if (ret == 0) { /* bind success */ break; } close(sock_fd); sock_fd = -1; } check(rp != NULL, "creating socket."); freeaddrinfo(result); return sock_fd; error: if (result) { freeaddrinfo(result); } if (sock_fd > 0) { close(sock_fd); } return -1; } int sock_create_connect (char *server_name, char *port) { struct addrinfo hints; struct addrinfo *result, *rp; int sock_fd = -1, ret = 0; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; ret = getaddrinfo(server_name, port, &hints, &result); check(ret==0, "[ERROR] %s", gai_strerror(ret)); for (rp = result; rp != NULL; rp = rp->ai_next) { sock_fd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (sock_fd == -1) { continue; } ret = connect(sock_fd, rp->ai_addr, rp->ai_addrlen); if (ret == 0) { /* connection success */ break; } close(sock_fd); sock_fd = -1; } check(rp!=NULL, "could not connect."); freeaddrinfo(result); return sock_fd; error: if (result) { freeaddrinfo(result); } if (sock_fd != -1) { close(sock_fd); } return -1; } int sock_set_qp_info(int sock_fd, struct QPInfo *qp_info) { int n; struct QPInfo tmp_qp_info; tmp_qp_info.lid = htons(qp_info->lid); tmp_qp_info.qp_num = htonl(qp_info->qp_num); tmp_qp_info.rank = htonl(qp_info->rank); n = sock_write(sock_fd, (char *)&tmp_qp_info, sizeof(struct QPInfo)); check(n==sizeof(struct QPInfo), "write qp_info to socket."); return 0; error: return -1; } int sock_get_qp_info(int sock_fd, struct QPInfo *qp_info) { int n; struct QPInfo tmp_qp_info; n = sock_read(sock_fd, (char *)&tmp_qp_info, sizeof(struct QPInfo)); check(n==sizeof(struct QPInfo), "read qp_info from socket."); qp_info->lid = ntohs(tmp_qp_info.lid); qp_info->qp_num = ntohl(tmp_qp_info.qp_num); qp_info->rank = ntohl(tmp_qp_info.rank); return 0; error: return -1; }
hl4080/RDMA-Tutorial
config.h
#ifndef CONFIG_H_ #define CONFIG_H_ #include <stdbool.h> #include <inttypes.h> enum ConfigFileAttr { ATTR_SERVERS = 1, ATTR_CLIENTS, ATTR_MSG_SIZE, ATTR_NUM_CONCURR_MSGS, }; struct ConfigInfo { int num_servers; int num_clients; char **servers; /* list of servers */ char **clients; /* list of clients */ bool is_server; /* if the current node is server */ int rank; /* the rank of the node */ int msg_size; /* the size of each echo message */ int num_concurr_msgs; /* the number of messages can be sent concurrently */ char *sock_port; /* socket port number */ }__attribute__((aligned(64))); extern struct ConfigInfo config_info; int parse_config_file (char *fname); void destroy_config_info (); void print_config_info (); #endif /* CONFIG_H_*/
hl4080/RDMA-Tutorial
client.h
#ifndef CLIENT_H_ #define CLIENT_H_ int run_client (); #endif /* client.h */
hl4080/RDMA-Tutorial
setup_ib.h
<filename>setup_ib.h #ifndef SETUP_IB_H_ #define SETUP_IB_H_ #include <infiniband/verbs.h> struct IBRes { struct ibv_context *ctx; struct ibv_pd *pd; struct ibv_mr *mr; struct ibv_cq *cq; struct ibv_qp **qp; struct ibv_srq *srq; struct ibv_port_attr port_attr; struct ibv_device_attr dev_attr; int num_qps; char *ib_buf; size_t ib_buf_size; }; extern struct IBRes ib_res; int setup_ib (); void close_ib_connection (); int connect_qp_server (); int connect_qp_client (); #endif /*setup_ib.h*/
thomascrmbz/cpp-chatapp
src/client/command_handler.h
<gh_stars>0 #pragma once #include "chat_server.h" #include "chat_ui.h" #include <string> #include <vector> #include <sstream> #include <iterator> namespace ChatApp { class CommandHandler { public: static void handle(std::string command, std::vector<std::string> args, ChatApp::ChatUI * chat_ui, ChatApp::ChatServer * chat_server) { if (command == "help") { chat_ui->print("\e[94m/help \e[0m- show all available commands"); chat_ui->print("\e[94m/msg <username> <message> \e[0m- send a private message"); // chat_ui->print("\e[94m/r <message> \e[0m- reply to a private message"); chat_ui->print("\e[94m/exit \e[0m- exit the chat"); } if (command == "msg") { if (args.size() > 1) { std::stringstream ss; std::copy(args.begin() + 1, args.end() - 1, std::ostream_iterator<std::string>(ss, " ")); ss << args.back(); std::string username = args[0]; std::string message = ss.str(); chat_server->write(username + ";" + message, 0x4); chat_ui->print("\e[1;35mTo \e[0m" + username + ": " + message); } else chat_ui->print("\e[1;31m/msg <username> <message>"); } if (command == "exit") exit(0); } }; }
thomascrmbz/cpp-chatapp
src/server/user.h
<filename>src/server/user.h #pragma once #include <string> #include <websocket_server.h> namespace ChatApp { class User { public: User(std::string username, WebSocket::Connection * connection); public: std::string get_username(void); WebSocket::Connection * get_connection(void); public: static ChatApp::User * get_user(WebSocket::Connection * connection); static ChatApp::User * get_user(std::string username); static void add_user(ChatApp::User user); static void delete_user(WebSocket::Connection * connection); private: std::string username; WebSocket::Connection * connection; }; static std::vector<ChatApp::User> users; }
thomascrmbz/cpp-chatapp
src/client/chat_ui.h
#pragma once #include <string> #include <functional> #include <vector> namespace ChatApp { class ChatUI { public: ChatUI(void); ~ChatUI(); public: std::string ask_ip(void); std::string ask_username(void); public: void print_global_message(std::string username, std::string message); void print_private_message(std::string username, std::string message); void print_broadcast(std::string text); void print(std::string text); public: std::function<void(std::string, std::vector<std::string>)> on_command = [](std::string command, std::vector<std::string> args) {}; std::function<void(std::string)> on_message = [](std::string message) {}; public: void run(void); private: std::string ask(std::string message); void handle_command(std::string input); private: std::string current_input = ""; std::string previous_input = ""; std::string next_input = ""; }; }
thomascrmbz/cpp-chatapp
src/client/helper.h
<gh_stars>0 #pragma once #include <string> #include <vector> class Helper { public: static char getch(void); static std::string trim_string(std::string str); static std::vector<std::string> string_to_vector(std::string str); };
thomascrmbz/cpp-chatapp
src/client/chat_server.h
<reponame>thomascrmbz/cpp-chatapp<filename>src/client/chat_server.h<gh_stars>0 #pragma once #include <string> #include <functional> #include <websocket_client.h> namespace ChatApp { class ChatServer { public: ChatServer(std::string ip); ~ChatServer(); public: void set_username(std::string username); public: void connect(void); void write(std::string message, int type); bool is_connected(void); public: std::function<void(std::string, std::string, int)> on_message = [](std::string username, std::string message, int type) {}; private: void listen(void); private: std::string ip; std::string username; WebSocket::Connection * connection; bool connected = false; }; }
jamshark70/pure-data
src/s_midi_pm.c
<filename>src/s_midi_pm.c /* Copyright (c) 1997-2003 <NAME>, <NAME>, <NAME>, * <NAME>, <NAME>, and others. * For information on usage and redistribution, and for a DISCLAIMER OF ALL * WARRANTIES, see the file, "LICENSE.txt," in this distribution. this file calls portmidi to do MIDI I/O for MSW and Mac OSX. */ #include "m_pd.h" #include "s_stuff.h" #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #include <sys/time.h> #ifndef _WIN32 #include <sys/resource.h> #endif /* _WIN32 */ #endif /* HAVE_UNISTD_H */ #include <stdlib.h> #include <string.h> #include <errno.h> #include "portmidi.h" #include "porttime.h" /* full status byte definitions in s_midi.c */ /* channel voice messages */ #define MIDI_NOTEOFF 0x80 #define MIDI_NOTEON 0x90 #define MIDI_POLYAFTERTOUCH 0xa0 #define MIDI_CONTROLCHANGE 0xb0 #define MIDI_PROGRAMCHANGE 0xc0 #define MIDI_AFTERTOUCH 0xd0 #define MIDI_PITCHBEND 0xe0 /* system common messages */ #define MIDI_SYSEX 0xf0 #define MIDI_TIMECODE 0xf1 #define MIDI_SONGPOS 0xf2 #define MIDI_SONGSELECT 0xf3 #define MIDI_SYSEXEND 0xf7 static PmStream *mac_midiindevlist[MAXMIDIINDEV]; static PmStream *mac_midioutdevlist[MAXMIDIOUTDEV]; static int mac_nmidiindev; static int mac_nmidioutdev; void sys_do_open_midi(int nmidiin, int *midiinvec, int nmidiout, int *midioutvec) { int i = 0, j, devno; int n = 0; PmError err; Pt_Start(1, 0, 0); /* start a timer with millisecond accuracy */ mac_nmidiindev = 0; for (i = 0; i < nmidiin; i++) { for (j = 0, devno = 0; j < Pm_CountDevices(); j++) { const PmDeviceInfo *info = Pm_GetDeviceInfo(j); if (info->input) { if (devno == midiinvec[i]) { err = Pm_OpenInput(&mac_midiindevlist[mac_nmidiindev], j, NULL, 1024, NULL, NULL); if (err) post("could not open MIDI input %d (%s): %s", j, info->name, Pm_GetErrorText(err)); else { /* disable default active sense filtering */ Pm_SetFilter(mac_midiindevlist[mac_nmidiindev], 0); verbose(PD_VERBOSE, "MIDI input (%s) opened.", info->name); mac_nmidiindev++; } } devno++; } } } mac_nmidioutdev = 0; for (i = 0; i < nmidiout; i++) { for (j = 0, devno = 0; j < Pm_CountDevices(); j++) { const PmDeviceInfo *info = Pm_GetDeviceInfo(j); if (info->output) { if (devno == midioutvec[i]) { err = Pm_OpenOutput( &mac_midioutdevlist[mac_nmidioutdev], j, NULL, 0, NULL, NULL, 0); if (err) post("could not open MIDI output %d (%s): %s", j, info->name, Pm_GetErrorText(err)); else { verbose(PD_VERBOSE, "MIDI output (%s) opened.", info->name); mac_nmidioutdev++; } } devno++; } } } } void sys_close_midi(void) { int i; for (i = 0; i < mac_nmidiindev; i++) Pm_Close(mac_midiindevlist[i]); mac_nmidiindev = 0; for (i = 0; i < mac_nmidioutdev; i++) Pm_Close(mac_midioutdevlist[i]); mac_nmidioutdev = 0; } void sys_putmidimess(int portno, int a, int b, int c) { PmEvent buffer; /* fprintf(stderr, "put 1 msg %d %d\n", portno, mac_nmidioutdev); */ if (portno >= 0 && portno < mac_nmidioutdev) { buffer.message = Pm_Message(a, b, c); buffer.timestamp = 0; /* fprintf(stderr, "put msg\n"); */ Pm_Write(mac_midioutdevlist[portno], &buffer, 1); } } static void writemidi4(PortMidiStream* stream, int a, int b, int c, int d) { PmEvent buffer; buffer.timestamp = 0; buffer.message = ((a & 0xff) | ((b & 0xff) << 8) | ((c & 0xff) << 16) | ((d & 0xff) << 24)); Pm_Write(stream, &buffer, 1); } void sys_putmidibyte(int portno, int byte) { /* try to parse the bytes into MIDI messages so they can fit into PortMidi buffers. */ static int mess[4]; static int nbytes = 0, sysex = 0, i; if (byte > MIDI_SYSEXEND) { /* realtime */ writemidi4(mac_midioutdevlist[portno], byte, 0, 0, 0); } else if (byte == MIDI_SYSEX) { /* sysex start */ mess[0] = MIDI_SYSEX; nbytes = 1; sysex = 1; } else if (byte == MIDI_SYSEXEND) { /* sysex end */ mess[nbytes] = byte; for (i = nbytes+1; i < 4; i++) mess[i] = 0; writemidi4(mac_midioutdevlist[portno], mess[0], mess[1], mess[2], mess[3]); sysex = 0; nbytes = 0; } else if (byte >= MIDI_NOTEOFF) { /* status byte */ sysex = 0; if (byte > MIDI_SONGSELECT) { /* 0 data bytes */ writemidi4(mac_midioutdevlist[portno], byte, 0, 0, 0); nbytes = 0; } else { /* 1 or 2 data bytes */ mess[0] = byte; nbytes = 1; } } else if (sysex) { /* sysex data byte */ mess[nbytes] = byte; nbytes++; if (nbytes == 4) { writemidi4(mac_midioutdevlist[portno], mess[0], mess[1], mess[2], mess[3]); nbytes = 0; } } else if (nbytes) { /* channel or system message */ int status = mess[0]; if (status < MIDI_SYSEX) status &= 0xf0; if (status == MIDI_PROGRAMCHANGE || status == MIDI_AFTERTOUCH || status == MIDI_TIMECODE || status == MIDI_SONGSELECT) { writemidi4(mac_midioutdevlist[portno], mess[0], byte, 0, 0); nbytes = (status < MIDI_SYSEX ? 1 : 0); } else { if (nbytes == 1) { mess[1] = byte; nbytes = 2; } else { writemidi4(mac_midioutdevlist[portno], mess[0], mess[1], byte, 0); nbytes = (status < 0xf0 ? 1 : 0); } } } } /* this is non-zero if we are in the middle of transmitting sysex */ int nd_sysex_mode = 0; /* send in 4 bytes of sysex data. if one of the bytes is sysex end stop and unset nd_sysex_mode */ void nd_sysex_inword(int midiindev, int status, int data1, int data2, int data3) { if (nd_sysex_mode) { sys_midibytein(midiindev, status); if (status == MIDI_SYSEXEND) nd_sysex_mode = 0; } if (nd_sysex_mode) { sys_midibytein(midiindev, data1); if (data1 == MIDI_SYSEXEND) nd_sysex_mode = 0; } if (nd_sysex_mode) { sys_midibytein(midiindev, data2); if (data2 == MIDI_SYSEXEND) nd_sysex_mode = 0; } if (nd_sysex_mode) { sys_midibytein(midiindev, data3); if (data3 == MIDI_SYSEXEND) nd_sysex_mode = 0; } } void sys_poll_midi(void) { int i, nmess, throttle = 100; PmEvent buffer; for (i = 0; i < mac_nmidiindev; i++) { while((nmess = Pm_Read(mac_midiindevlist[i], &buffer, 1))) { if (!throttle--) goto overload; if (nmess > 0) { int status = Pm_MessageStatus(buffer.message); int data1 = Pm_MessageData1(buffer.message); int data2 = Pm_MessageData2(buffer.message); int data3 = ((buffer.message >> 24) & 0xff); int msgtype = ((status & 0xf0) == 0xf0 ? status : (status & 0xf0)); if (status > MIDI_SYSEXEND) { /* realtime message */ sys_midibytein(i, status); } else if (nd_sysex_mode) nd_sysex_inword(i, status, data1, data2, data3); else switch (msgtype) { /* 2 data bytes */ case MIDI_NOTEOFF: case MIDI_NOTEON: case MIDI_POLYAFTERTOUCH: case MIDI_CONTROLCHANGE: case MIDI_PITCHBEND: case MIDI_SONGPOS: sys_midibytein(i, status); sys_midibytein(i, data1); sys_midibytein(i, data2); break; /* 1 data byte */ case MIDI_PROGRAMCHANGE: case MIDI_AFTERTOUCH: case MIDI_TIMECODE: case MIDI_SONGSELECT: sys_midibytein(i, status); sys_midibytein(i, data1); break; /* no data bytes */ case MIDI_SYSEX: nd_sysex_mode = 1; nd_sysex_inword(i, status, data1, data2, data3); break; /* all others */ default: sys_midibytein(i, status); break; } } else { pd_error(0, "%s", Pm_GetErrorText(nmess)); if (nmess != pmBufferOverflow) break; } } } overload: ; } void midi_getdevs(char *indevlist, int *nindevs, char *outdevlist, int *noutdevs, int maxndev, int devdescsize) { int i, nindev = 0, noutdev = 0; for (i = 0; i < Pm_CountDevices(); i++) { const PmDeviceInfo *info = Pm_GetDeviceInfo(i); /* post("%d: %s, %s (%d,%d)", i, info->interf, info->name, info->input, info->output); */ if (info->input && nindev < maxndev) { strcpy(indevlist + nindev * devdescsize, info->name); nindev++; } if (info->output && noutdev < maxndev) { strcpy(outdevlist + noutdev * devdescsize, info->name); noutdev++; } } *nindevs = nindev; *noutdevs = noutdev; }
jamshark70/pure-data
src/s_file.c
/* Copyright (c) 1997-2004 <NAME>. * For information on usage and redistribution, and for a DISCLAIMER OF ALL * WARRANTIES, see the file, "LICENSE.txt," in this distribution. */ /* * this file implements a mechanism for storing and retrieving preferences. * Should later be renamed "preferences.c" or something. * * In unix this is handled by the "~/.pdsettings" file, in windows by * the registry, and in MacOS by the Preferences system. */ #include "m_pd.h" #include "s_stuff.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #ifdef HAVE_UNISTD_H #include <sys/types.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #endif #ifdef _WIN32 #include <windows.h> #include <tchar.h> #include <io.h> #endif #ifdef _MSC_VER /* This is only for Microsoft's compiler, not cygwin, e.g. */ #define snprintf _snprintf #endif #ifdef __APPLE__ /* needed for plist handling */ #include <CoreFoundation/CoreFoundation.h> #endif void sys_doflags(void); static PERTHREAD char *sys_prefbuf; static PERTHREAD int sys_prefbufsize; static PERTHREAD FILE *sys_prefsavefp; static void sys_initloadpreferences_file(const char *filename) { int fd; long length; if ((fd = open(filename, 0)) < 0) { if (sys_verbose) perror(filename); return; } length = lseek(fd, 0, 2); if (length < 0) { if (sys_verbose) perror(filename); close(fd); return; } lseek(fd, 0, 0); if (!(sys_prefbuf = malloc(length + 2))) { pd_error(0, "couldn't allocate memory for preferences buffer"); close(fd); return; } sys_prefbuf[0] = '\n'; if (read(fd, sys_prefbuf+1, length) < length) { perror(filename); sys_prefbuf[0] = 0; close(fd); return; } sys_prefbuf[length+1] = 0; close(fd); verbose(PD_VERBOSE, "success reading preferences from: %s", filename); } static int sys_getpreference_file(const char *key, char *value, int size) { char searchfor[80], *where, *whereend; if (!sys_prefbuf) return (0); sprintf(searchfor, "\n%s:", key); where = strstr(sys_prefbuf, searchfor); if (!where) return (0); where += strlen(searchfor); while (*where == ' ' || *where == '\t') where++; for (whereend = where; *whereend && *whereend != '\n'; whereend++) ; if (*whereend == '\n') whereend--; if (whereend > where + size - 1) whereend = where + size - 1; strncpy(value, where, whereend+1-where); value[whereend+1-where] = 0; return (1); } static void sys_doneloadpreferences_file(void) { if (sys_prefbuf) free(sys_prefbuf); } static void sys_initsavepreferences_file(const char *filename) { if ((sys_prefsavefp = fopen(filename, "w")) == NULL) pd_error(0, "%s: %s", filename, strerror(errno)); } static void sys_putpreference_file(const char *key, const char *value) { if (sys_prefsavefp) fprintf(sys_prefsavefp, "%s: %s\n", key, value); } static void sys_donesavepreferences_file(void) { if (sys_prefsavefp) { fclose(sys_prefsavefp); sys_prefsavefp = 0; } } #if defined(__APPLE__) /***** macos: read and write to ~/Library/Preferences plist file ******/ static PERTHREAD CFMutableDictionaryRef sys_prefdict = NULL; // get preferences file load path into dst, returns 1 if embedded static int preferences_getloadpath(char *dst, size_t size) { char embedded_prefs[MAXPDSTRING]; char user_prefs[MAXPDSTRING]; char *homedir = getenv("HOME"); struct stat statbuf; snprintf(embedded_prefs, MAXPDSTRING, "%s/../org.puredata.pd", sys_libdir->s_name); snprintf(user_prefs, MAXPDSTRING, "%s/Library/Preferences/org.puredata.pd.plist", homedir); if (stat(user_prefs, &statbuf) == 0) { strncpy(dst, user_prefs, size); return 0; } else { strncpy(dst, embedded_prefs, size); return 1; } } // get preferences file save path static void preferences_getsavepath(char *dst, size_t size) { char user_prefs[MAXPDSTRING]; snprintf(user_prefs, MAXPDSTRING, "%s/Library/Preferences/org.puredata.pd.plist", getenv("HOME")); strncpy(dst, user_prefs, size); } static void sys_initloadpreferences(void) { char user_prefs[MAXPDSTRING]; CFStringRef path = NULL; CFURLRef fileURL = NULL; CFReadStreamRef stream = NULL; CFErrorRef err = NULL; CFPropertyListRef plist = NULL; if (sys_prefbuf || sys_prefdict) { bug("sys_initloadpreferences"); return; } // open read stream preferences_getloadpath(user_prefs, MAXPDSTRING); path = CFStringCreateWithCStringNoCopy(kCFAllocatorDefault, user_prefs, kCFStringEncodingUTF8, kCFAllocatorNull); fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, path, kCFURLPOSIXPathStyle, false); // false -> not a directory stream = CFReadStreamCreateWithFile(kCFAllocatorDefault, fileURL); if (!stream || !CFReadStreamOpen(stream)) goto cleanup; // read plist plist = CFPropertyListCreateWithStream(kCFAllocatorDefault, stream, 0, kCFPropertyListImmutable, NULL, &err); if (!plist) { CFStringRef errString = CFErrorCopyDescription(err); pd_error(0, "couldn't read preferences plist: %s", CFStringGetCStringPtr(errString, kCFStringEncodingUTF8)); CFRelease(errString); goto cleanup; } CFRetain(plist); sys_prefdict = (CFMutableDictionaryRef)plist; cleanup: if (stream) { if (CFReadStreamGetStatus(stream) == kCFStreamStatusOpen) { CFReadStreamClose(stream); } CFRelease(stream); } if (fileURL) {CFRelease(fileURL);} if (path) {CFRelease(path);} if (err) {CFRelease(err);} } static void sys_doneloadpreferences(void) { if (sys_prefbuf) sys_doneloadpreferences_file(); if (sys_prefdict) { CFRelease(sys_prefdict); sys_prefdict = NULL; } } static void sys_initsavepreferences(void) { if (sys_prefsavefp) { bug("sys_initsavepreferences"); return; } sys_prefdict = CFDictionaryCreateMutable(kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); } static void sys_donesavepreferences(void) { char user_prefs[MAXPDSTRING]; CFStringRef path = NULL; CFURLRef fileURL = NULL; CFWriteStreamRef stream = NULL; CFErrorRef err = NULL; CFDataRef data = NULL; if (sys_prefsavefp) sys_donesavepreferences_file(); if (!sys_prefdict) return; // convert dict to plist data data = CFPropertyListCreateData(kCFAllocatorDefault, (CFPropertyListRef)sys_prefdict, kCFPropertyListBinaryFormat_v1_0, 0, &err); if (!data) { CFStringRef errString = CFErrorCopyDescription(err); pd_error(0, "couldn't write preferences plist: %s", CFStringGetCStringPtr(errString, kCFStringEncodingUTF8)); CFRelease(errString); goto cleanup; } // open write stream preferences_getsavepath(user_prefs, MAXPDSTRING); path = CFStringCreateWithCStringNoCopy(kCFAllocatorDefault, user_prefs, kCFStringEncodingUTF8, kCFAllocatorNull); fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, path, kCFURLPOSIXPathStyle, false); // false -> not a directory stream = CFWriteStreamCreateWithFile(kCFAllocatorDefault, fileURL); if (!stream || !CFWriteStreamOpen(stream)) goto cleanup; // write plist if (CFWriteStreamWrite(stream, CFDataGetBytePtr(data), CFDataGetLength(data)) < 0) { pd_error(0, "couldn't write preferences plist"); goto cleanup; } cleanup: if (sys_prefdict) { CFRelease(sys_prefdict); sys_prefdict = NULL; } if (data) {CFRelease(data);} if (stream) { if(CFWriteStreamGetStatus(stream) == kCFStreamStatusOpen) { CFWriteStreamClose(stream); } CFRelease(stream); } if (fileURL) {CFRelease(fileURL);} if (path) {CFRelease(path);} if (err) {CFRelease(err);} } static int sys_getpreference(const char *key, char *value, int size) { if (sys_prefbuf) return (sys_getpreference_file(key, value, size)); if (sys_prefdict) { /* read from loaded plist dict */ CFStringRef k = CFStringCreateWithCStringNoCopy(kCFAllocatorDefault, key, kCFStringEncodingUTF8, kCFAllocatorNull); void *v = NULL; int ret = 0; if (CFDictionaryGetValueIfPresent(sys_prefdict, k, (const void **)&v)) { ret = CFStringGetCString((CFStringRef)v, value, size, kCFStringEncodingUTF8); #if 0 if (ret) fprintf(stderr, "plist read %s = %s\n", key, value); #endif if (v) CFRelease(v); } CFRelease(k); return (ret); } else { /* fallback to defaults command */ char cmdbuf[256]; int nread = 0, nleft = size; char path[MAXPDSTRING]; int embedded = preferences_getloadpath(path, MAXPDSTRING); if (embedded) snprintf(cmdbuf, 256, "defaults read %s %s 2> /dev/null\n", path, key); else snprintf(cmdbuf, 256, "defaults read org.puredata.pd %s 2> /dev/null\n", key); FILE *fp = popen(cmdbuf, "r"); while (nread < size) { int newread = fread(value+nread, 1, size-nread, fp); if (newread <= 0) break; nread += newread; } pclose(fp); if (nread < 1) return (0); if (nread >= size) nread = size-1; value[nread] = 0; if (value[nread-1] == '\n') /* remove newline character at end */ value[nread-1] = 0; return (1); } } static void sys_putpreference(const char *key, const char *value) { if (sys_prefsavefp) { sys_putpreference_file(key, value); return; } if (sys_prefdict) { CFStringRef k = CFStringCreateWithCString(kCFAllocatorDefault, key, kCFStringEncodingUTF8); CFStringRef v = CFStringCreateWithCString(kCFAllocatorDefault, value, kCFStringEncodingUTF8); CFDictionarySetValue((CFMutableDictionaryRef)sys_prefdict, k, v); CFRelease(k); CFRelease(v); #if 0 fprintf(stderr, "plist write %s = %s\n", key, value); #endif } else { /* fallback to defaults command */ char cmdbuf[MAXPDSTRING]; snprintf(cmdbuf, MAXPDSTRING, "defaults write org.puredata.pd %s \"%s\" 2> /dev/null\n", key, value); system(cmdbuf); } } #elif defined(_WIN32) /***** windows: read and write to registry ******/ static void sys_initloadpreferences(void) { if (sys_prefbuf) bug("sys_initloadpreferences"); } static void sys_doneloadpreferences(void) { if (sys_prefbuf) sys_doneloadpreferences_file(); } static void sys_initsavepreferences(void) { if (sys_prefsavefp) bug("sys_initsavepreferences"); } static void sys_donesavepreferences(void) { if (sys_prefsavefp) sys_donesavepreferences_file(); } static int sys_getpreference(const char *key, char *value, int size) { if (sys_prefbuf) return (sys_getpreference_file(key, value, size)); else { HKEY hkey; DWORD bigsize = size; LONG err = RegOpenKeyEx(HKEY_CURRENT_USER, "Software\\Pure-Data", 0, KEY_QUERY_VALUE, &hkey); if (err != ERROR_SUCCESS) return (0); err = RegQueryValueEx(hkey, key, 0, 0, value, &bigsize); if (err != ERROR_SUCCESS) { RegCloseKey(hkey); return (0); } RegCloseKey(hkey); return (1); } } static void sys_putpreference(const char *key, const char *value) { if (sys_prefsavefp) sys_putpreference_file(key, value); else { HKEY hkey; LONG err = RegCreateKeyEx(HKEY_CURRENT_USER, "Software\\Pure-Data", 0, NULL, REG_OPTION_NON_VOLATILE, KEY_SET_VALUE, NULL, &hkey, NULL); if (err != ERROR_SUCCESS) { pd_error(0, "unable to create registry entry: %s\n", key); return; } err = RegSetValueEx(hkey, key, 0, REG_EXPAND_SZ, value, strlen(value)+1); if (err != ERROR_SUCCESS) pd_error(0, "unable to set registry entry: %s\n", key); RegCloseKey(hkey); } } #else /***** linux/android/BSD etc: read and write to ~/.pdsettings file ******/ static void sys_initloadpreferences(void) { char filenamebuf[MAXPDSTRING], *homedir = getenv("HOME"); int fd, length; char user_prefs_file[MAXPDSTRING]; /* user prefs file */ /* default prefs embedded in the package */ char default_prefs_file[MAXPDSTRING]; struct stat statbuf; snprintf(default_prefs_file, MAXPDSTRING, "%s/default.pdsettings", sys_libdir->s_name); snprintf(user_prefs_file, MAXPDSTRING, "%s/.pdsettings", (homedir ? homedir : ".")); if (stat(user_prefs_file, &statbuf) == 0) strncpy(filenamebuf, user_prefs_file, MAXPDSTRING); else if (stat(default_prefs_file, &statbuf) == 0) strncpy(filenamebuf, default_prefs_file, MAXPDSTRING); else return; filenamebuf[MAXPDSTRING-1] = 0; sys_initloadpreferences_file(filenamebuf); } static int sys_getpreference(const char *key, char *value, int size) { return (sys_getpreference_file(key, value, size)); } static void sys_doneloadpreferences(void) { sys_doneloadpreferences_file(); } static void sys_initsavepreferences(void) { char filenamebuf[MAXPDSTRING], *homedir = getenv("HOME"); FILE *fp; if (!homedir) return; snprintf(filenamebuf, MAXPDSTRING, "%s/.pdsettings", homedir); filenamebuf[MAXPDSTRING-1] = 0; sys_initsavepreferences_file(filenamebuf); } static void sys_putpreference(const char *key, const char *value) { sys_putpreference_file(key, value); } static void sys_donesavepreferences(void) { sys_donesavepreferences_file(); } #endif void sys_loadpreferences(const char *filename, int startingup) { t_audiosettings as; int nmidiindev, midiindev[MAXMIDIINDEV]; int nmidioutdev, midioutdev[MAXMIDIOUTDEV]; int midiapi, nolib, maxi, i; char prefbuf[MAXPDSTRING], keybuf[80]; sys_get_audio_settings(&as); if (*filename) sys_initloadpreferences_file(filename); else sys_initloadpreferences(); /* load audio preferences */ if (!sys_externalschedlib && sys_getpreference("audioapi", prefbuf, MAXPDSTRING) && sscanf(prefbuf, "%d", &as.a_api) < 1) as.a_api = -1; /* JMZ/MB: brackets for initializing */ if (sys_getpreference("noaudioin", prefbuf, MAXPDSTRING) && (!strcmp(prefbuf, ".") || !strcmp(prefbuf, "True"))) as.a_nindev = 0; else { for (as.a_nindev = 0; as.a_nindev < MAXAUDIOINDEV; as.a_nindev++) { /* first try to find a name - if that matches an existing device use it. Otherwise fall back to device number. */ int devn; /* read in device number and channel count */ sprintf(keybuf, "audioindev%d", as.a_nindev+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; if (sscanf(prefbuf, "%d %d", &as.a_indevvec[as.a_nindev], &as.a_chindevvec[as.a_nindev]) < 2) break; /* possibly override device number if the device name was also saved and if it matches one we have now */ sprintf(keybuf, "audioindevname%d", as.a_nindev+1); if (sys_getpreference(keybuf, prefbuf, MAXPDSTRING) && (devn = sys_audiodevnametonumber(0, prefbuf)) >= 0) as.a_indevvec[as.a_nindev] = devn; as.a_nindev++; } /* if no preferences at all, set -1 for default behavior */ if (as.a_nindev == 0) as.a_nindev = -1; } /* JMZ/MB: brackets for initializing */ if (sys_getpreference("noaudioout", prefbuf, MAXPDSTRING) && (!strcmp(prefbuf, ".") || !strcmp(prefbuf, "True"))) as.a_noutdev = 0; else { for (as.a_noutdev = 0; as.a_noutdev < MAXAUDIOOUTDEV; as.a_noutdev++) { int devn; sprintf(keybuf, "audiooutdev%d", as.a_noutdev+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; if (sscanf(prefbuf, "%d %d", &as.a_outdevvec[as.a_noutdev], &as.a_choutdevvec[as.a_noutdev]) < 2) break; sprintf(keybuf, "audiooutdevname%d", as.a_noutdev+1); if (sys_getpreference(keybuf, prefbuf, MAXPDSTRING) && (devn = sys_audiodevnametonumber(1, prefbuf)) >= 0) as.a_outdevvec[as.a_noutdev] = devn; as.a_noutdev++; } if (as.a_noutdev == 0) as.a_noutdev = -1; } if (sys_getpreference("rate", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &as.a_srate); if (sys_getpreference("audiobuf", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &as.a_advance); if (sys_getpreference("callback", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &as.a_callback); if (sys_getpreference("audioblocksize", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &as.a_blocksize); #ifndef _WIN32 else if (sys_getpreference("blocksize", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &as.a_blocksize); #endif sys_set_audio_settings(&as); /* load MIDI preferences */ if (sys_getpreference("midiapi", prefbuf, MAXPDSTRING) && sscanf(prefbuf, "%d", &midiapi) > 0) sys_set_midi_api(midiapi); /* JMZ/MB: brackets for initializing */ if (sys_getpreference("nomidiin", prefbuf, MAXPDSTRING) && (!strcmp(prefbuf, ".") || !strcmp(prefbuf, "True"))) nmidiindev = 0; else for (nmidiindev = 0; nmidiindev < MAXMIDIINDEV; nmidiindev++) { /* first try to find a name - if that matches an existing device use it. Otherwise fall back to device number. */ int devn; sprintf(keybuf, "midiindevname%d", nmidiindev+1); if (sys_getpreference(keybuf, prefbuf, MAXPDSTRING) && (devn = sys_mididevnametonumber(0, prefbuf)) >= 0) midiindev[nmidiindev] = devn; else { sprintf(keybuf, "midiindev%d", nmidiindev+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; if (sscanf(prefbuf, "%d", &midiindev[nmidiindev]) < 1) break; } nmidiindev++; } /* JMZ/MB: brackets for initializing */ if (sys_getpreference("nomidiout", prefbuf, MAXPDSTRING) && (!strcmp(prefbuf, ".") || !strcmp(prefbuf, "True"))) nmidioutdev = 0; else for (nmidioutdev = 0; nmidioutdev < MAXMIDIOUTDEV; nmidioutdev++) { int devn; sprintf(keybuf, "midioutdevname%d", nmidioutdev+1); if (sys_getpreference(keybuf, prefbuf, MAXPDSTRING) && (devn = sys_mididevnametonumber(1, prefbuf)) >= 0) midioutdev[nmidioutdev] = devn; else { sprintf(keybuf, "midioutdev%d", nmidioutdev+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; if (sscanf(prefbuf, "%d", &midioutdev[nmidioutdev]) < 1) break; } nmidioutdev++; } sys_open_midi(nmidiindev, midiindev, nmidioutdev, midioutdev, 0); /* search path */ if (sys_getpreference("npath", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &maxi); else maxi = 0x7fffffff; for (i = 0; i < maxi; i++) { sprintf(keybuf, "path%d", i+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; STUFF->st_searchpath = namelist_append_files(STUFF->st_searchpath, prefbuf); } if (sys_getpreference("standardpath", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &sys_usestdpath); if (sys_getpreference("verbose", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &sys_verbose); /* startup settings */ if (sys_getpreference("nloadlib", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &maxi); else maxi = 0x7fffffff; for (i = 0; i<maxi; i++) { sprintf(keybuf, "loadlib%d", i+1); if (!sys_getpreference(keybuf, prefbuf, MAXPDSTRING)) break; STUFF->st_externlist = namelist_append_files(STUFF->st_externlist, prefbuf); } if (sys_getpreference("defeatrt", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &sys_defeatrt); if (sys_getpreference("flags", prefbuf, MAXPDSTRING) && strcmp(prefbuf, ".")) { sys_flags = gensym(prefbuf); if (startingup) sys_doflags(); } if (sys_defeatrt) sys_hipriority = 0; else #if defined(ANDROID) sys_hipriority = 0; #else sys_hipriority = 1; #endif if (sys_getpreference("zoom", prefbuf, MAXPDSTRING)) sscanf(prefbuf, "%d", &sys_zoom_open); sys_doneloadpreferences(); } void sys_savepreferences(const char *filename) { t_audiosettings as; int i; char buf1[MAXPDSTRING], buf2[MAXPDSTRING]; int nmidiindev, midiindev[MAXMIDIINDEV]; int nmidioutdev, midioutdev[MAXMIDIOUTDEV]; if (filename && *filename) sys_initsavepreferences_file(filename); else sys_initsavepreferences(); /* audio settings */ sys_get_audio_settings(&as); sprintf(buf1, "%d", as.a_api); sys_putpreference("audioapi", buf1); sys_putpreference("noaudioin", (as.a_nindev <= 0 ? "True":"False")); for (i = 0; i < as.a_nindev; i++) { sprintf(buf1, "audioindev%d", i+1); sprintf(buf2, "%d %d", as.a_indevvec[i], as.a_chindevvec[i]); sys_putpreference(buf1, buf2); sprintf(buf1, "audioindevname%d", i+1); sys_audiodevnumbertoname(0, as.a_indevvec[i], buf2, MAXPDSTRING); if (! *buf2) strcat(buf2, "?"); sys_putpreference(buf1, buf2); } sys_putpreference("noaudioout", (as.a_noutdev <= 0 ? "True":"False")); for (i = 0; i < as.a_noutdev; i++) { sprintf(buf1, "audiooutdev%d", i+1); sprintf(buf2, "%d %d", as.a_outdevvec[i], as.a_choutdevvec[i]); sys_putpreference(buf1, buf2); sprintf(buf1, "audiooutdevname%d", i+1); sys_audiodevnumbertoname(1, as.a_outdevvec[i], buf2, MAXPDSTRING); if (! *buf2) strcat(buf2, "?"); sys_putpreference(buf1, buf2); } sprintf(buf1, "%d", as.a_advance); sys_putpreference("audiobuf", buf1); sprintf(buf1, "%d", as.a_srate); sys_putpreference("rate", buf1); sprintf(buf1, "%d", as.a_callback); sys_putpreference("callback", buf1); sprintf(buf1, "%d", as.a_blocksize); sys_putpreference("audioblocksize", buf1); /* MIDI settings */ sprintf(buf1, "%d", sys_midiapi); sys_putpreference("midiapi", buf1); sys_get_midi_params(&nmidiindev, midiindev, &nmidioutdev, midioutdev); sys_putpreference("nomidiin", (nmidiindev <= 0 ? "True" : "False")); for (i = 0; i < nmidiindev; i++) { sprintf(buf1, "midiindev%d", i+1); sprintf(buf2, "%d", midiindev[i]); sys_putpreference(buf1, buf2); sprintf(buf1, "midiindevname%d", i+1); sys_mididevnumbertoname(0, midiindev[i], buf2, MAXPDSTRING); if (! *buf2) strcat(buf2, "?"); sys_putpreference(buf1, buf2); } sys_putpreference("nomidiout", (nmidioutdev <= 0 ? "True" : "False")); for (i = 0; i < nmidioutdev; i++) { sprintf(buf1, "midioutdev%d", i+1); sprintf(buf2, "%d", midioutdev[i]); sys_putpreference(buf1, buf2); sprintf(buf1, "midioutdevname%d", i+1); sys_mididevnumbertoname(1, midioutdev[i], buf2, MAXPDSTRING); if (! *buf2) strcat(buf2, "?"); sys_putpreference(buf1, buf2); } /* file search path */ for (i = 0; 1; i++) { const char *pathelem = namelist_get(STUFF->st_searchpath, i); if (!pathelem) break; sprintf(buf1, "path%d", i+1); sys_putpreference(buf1, pathelem); } sprintf(buf1, "%d", i); sys_putpreference("npath", buf1); sprintf(buf1, "%d", sys_usestdpath); sys_putpreference("standardpath", buf1); sprintf(buf1, "%d", sys_verbose); sys_putpreference("verbose", buf1); /* startup */ for (i = 0; 1; i++) { const char *pathelem = namelist_get(STUFF->st_externlist, i); if (!pathelem) break; sprintf(buf1, "loadlib%d", i+1); sys_putpreference(buf1, pathelem); } sprintf(buf1, "%d", i); sys_putpreference("nloadlib", buf1); sprintf(buf1, "%d", sys_defeatrt); sys_putpreference("defeatrt", buf1); sys_putpreference("flags", (sys_flags ? sys_flags->s_name : "")); /* misc */ sprintf(buf1, "%d", sys_zoom_open); sys_putpreference("zoom", buf1); sys_putpreference("loading", "no"); sys_donesavepreferences(); } /* calls from GUI to load/save from/to a file */ void glob_loadpreferences(t_pd *dummy, t_symbol *filesym) { sys_loadpreferences(filesym->s_name, 0); sys_close_audio(); sys_reopen_audio(); sys_close_midi(); sys_reopen_midi(); } void glob_savepreferences(t_pd *dummy, t_symbol *filesym) { sys_savepreferences(filesym->s_name); } void glob_forgetpreferences(t_pd *dummy) { #if !defined(_WIN32) && !defined(__APPLE__) char user_prefs_file[MAXPDSTRING]; /* user prefs file */ const char *homedir = getenv("HOME"); struct stat statbuf; snprintf(user_prefs_file, MAXPDSTRING, "%s/.pdsettings", (homedir ? homedir : ".")); user_prefs_file[MAXPDSTRING-1] = 0; if (stat(user_prefs_file, &statbuf) != 0) { post("no Pd settings to clear"); } else if (!unlink(user_prefs_file)) { post("removed %s file", user_prefs_file); } else { post("couldn't delete %s file: %s", user_prefs_file, strerror(errno)); } #endif /* !defined(_WIN32) && !defined(__APPLE__) */ #ifdef __APPLE__ char cmdbuf[MAXPDSTRING]; int warn = 1; if (!sys_getpreference("audioapi", cmdbuf, MAXPDSTRING)) post("no Pd settings to clear"), warn = 0; /* do it anyhow, why not... */ snprintf(cmdbuf, MAXPDSTRING, "defaults delete org.puredata.pd 2> /dev/null\n"); if (system(cmdbuf) && warn) post("failed to erase Pd settings"); else if(warn) post("erased Pd settings"); #endif /* __APPLE__ */ #ifdef _WIN32 HKEY hkey; if (RegOpenKeyEx(HKEY_CURRENT_USER, "Software", 0, KEY_QUERY_VALUE, &hkey) != ERROR_SUCCESS) post("no Pd settings to erase"); else { if (RegDeleteKey(hkey, "Pure-Data") != ERROR_SUCCESS) post("no Pd settings to erase"); else post("erased Pd settings"); RegCloseKey(hkey); } #endif /* _WIN32 */ } int sys_oktoloadfiles(int done) { #if defined(_WIN32) || defined(__APPLE__) if (done) { sys_putpreference("loading", "no"); return (1); } else { char prefbuf[MAXPDSTRING]; if (sys_getpreference("loading", prefbuf, MAXPDSTRING) && strcmp(prefbuf, "no")) { post( "skipping loading preferences... Pd seems to have crashed on startup"); post("(re-save preferences to reinstate them)"); return (0); } else { sys_putpreference("loading", "yes"); return (1); } } #else return (1); #endif }
jamshark70/pure-data
src/d_soundfile.h
<reponame>jamshark70/pure-data<filename>src/d_soundfile.h /* Copyright (c) 2019 <NAME>. * For information on usage and redistribution, and for a DISCLAIMER OF ALL * WARRANTIES, see the file, "LICENSE.txt," in this distribution. */ /* soundfile formats and helper functions */ #pragma once #include "m_pd.h" #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <string.h> #include <limits.h> #include <errno.h> /* GLIBC large file support */ #ifdef _LARGEFILE64_SOURCE #define lseek lseek64 #define off_t __off64_t #endif /* MSVC doesn't define or uses different naming for these Posix types */ #ifdef _MSC_VER #include <BaseTsd.h> typedef SSIZE_T ssize_t; #define off_t ssize_t /* choose appropriate size if SSIZE_MAX is not defined */ #ifndef SSIZE_MAX #ifdef _WIN64 #define SSIZE_MAX _I64_MAX #else /* _WIN32 */ #define SSIZE_MAX INT_MAX #endif #endif /* SSIZE_MAX */ #endif /* _MSC_VER */ /** should be large enough for all file type min sizes */ #define SFHDRBUFSIZE 128 #define SFMAXFRAMES SIZE_MAX /**< default max sample frames, unsigned */ #define SFMAXBYTES SSIZE_MAX /**< default max sample bytes, signed */ /** sound file read/write debug posts */ //#define DEBUG_SOUNDFILE /* ----- soundfile ----- */ /** soundfile file descriptor, backend type, and format info note: headersize and bytelimit are signed as they are used for < 0 comparisons, hopefully ssize_t is large enough "headersize" can also be thought of as the audio data byte offset */ typedef struct _soundfile { int sf_fd; /**< file descriptor, >= 0 : open, -1 : closed */ struct _soundfile_type *sf_type; /**< type implementation */ /* format info */ int sf_samplerate; /**< read: file sr, write: pd sr */ int sf_nchannels; /**< number of channels */ int sf_bytespersample; /**< bit rate, 2: 16 bit, 3: 24 bit, 4: 32 bit */ ssize_t sf_headersize; /**< header size in bytes, -1 for unknown size */ int sf_bigendian; /**< sample endianness, 1 : big or 0 : little */ int sf_bytesperframe; /**< number of bytes per sample frame */ ssize_t sf_bytelimit; /**< number of sound data bytes to read/write */ } t_soundfile; /** clear soundfile struct to defaults, does not close or free */ void soundfile_clear(t_soundfile *sf); /** copy src soundfile info into dst */ void soundfile_copy(t_soundfile *dst, const t_soundfile *src); /** returns 1 if bytes need to be swapped due to endianess, otherwise 0 */ int soundfile_needsbyteswap(const t_soundfile *sf); /** generic soundfile errors */ typedef enum _soundfile_errno { SOUNDFILE_ERRUNKNOWN = -1000, /* unknown header */ SOUNDFILE_ERRMALFORMED = -1001, /* bad header */ SOUNDFILE_ERRVERSION = -1002, /* header ok, unsupported version */ SOUNDFILE_ERRSAMPLEFMT = -1003 /* header ok, unsupported sample format */ } t_soundfile_errno; /** returns a soundfile error string, otherwise calls C strerror */ const char* soundfile_strerror(int errnum); /* ----- soundfile type ----- */ /** returns 1 if buffer is the beginning of a supported file header, size will be at least minheadersize this may be called in a background thread */ typedef int (*t_soundfile_isheaderfn)(const char *buf, size_t size); /** read format info from soundfile header, returns 1 on success or 0 on error note: set sf_bytelimit = sound data size, optionaly set errno this may be called in a background thread */ typedef int (*t_soundfile_readheaderfn)(t_soundfile *sf); /** write header to beginning of an open file from an info struct returns header bytes written or < 0 on error note: optionaly set errno this may be called in a background thread */ typedef int (*t_soundfile_writeheaderfn)(t_soundfile *sf, size_t nframes); /** update file header data size, returns 1 on success or 0 on error this may be called in a background thread */ typedef int (*t_soundfile_updateheaderfn)(t_soundfile *sf, size_t nframes); /** returns 1 if the filename has a supported file extension, otherwise 0 this may be called in a background thread */ typedef int (*t_soundfile_hasextensionfn)(const char *filename, size_t size); /** appends the default file extension, returns 1 on success this may be called in a background thread */ typedef int (*t_soundfile_addextensionfn)(char *filename, size_t size); /** returns the type's preferred sample endianness based on the requested endianness (0 little, 1 big, -1 unspecified) returns 1 for big endian, 0 for little endian */ typedef int (*t_soundfile_endiannessfn)(int endianness); /* type implementation for a single file format */ typedef struct _soundfile_type { char *t_name; /**< type name, unique & w/o white spaces */ size_t t_minheadersize; /**< minimum valid header size */ t_soundfile_isheaderfn t_isheaderfn; /**< must be non-NULL */ t_soundfile_readheaderfn t_readheaderfn; /**< must be non-NULL */ t_soundfile_writeheaderfn t_writeheaderfn; /**< must be non-NULL */ t_soundfile_updateheaderfn t_updateheaderfn; /**< must be non-NULL */ t_soundfile_hasextensionfn t_hasextensionfn; /**< must be non-NULL */ t_soundfile_addextensionfn t_addextensionfn; /**< must be non-NULL */ t_soundfile_endiannessfn t_endiannessfn; /**< must be non-NULL */ } t_soundfile_type; /** add a new type implementation returns 1 on success or 0 if max types has been reached */ int soundfile_addtype(const t_soundfile_type *t); /* ----- read/write helpers ----- */ /** seek to offset in file fd and read size bytes into dst, returns bytes written on success or -1 on failure */ ssize_t fd_read(int fd, off_t offset, void *dst, size_t size); /** seek to offset in file fd and write size bytes from dst, returns number of bytes written on success or -1 if seek or write failed */ ssize_t fd_write(int fd, off_t offset, const void *src, size_t size); /* ----- byte swappers ----- */ /** returns 1 if system is bigendian */ int sys_isbigendian(void); /** swap 8 bytes and return if doit = 1, otherwise return n */ uint64_t swap8(uint64_t n, int doit); /** swap a 64 bit signed int and return if do it = 1, otherwise return n */ int64_t swap8s(int64_t n, int doit); /** swap 4 bytes and return if doit = 1, otherwise return n */ uint32_t swap4(uint32_t n, int doit); /** swap a 32 bit signed int and return if do it = 1, otherwise return n */ int32_t swap4s(int32_t n, int doit); /** swap 2 bytes and return if doit = 1, otherwise return n */ uint16_t swap2(uint16_t n, int doit); /** swap a 4 byte string in place if do it = 1, otherewise do nothing */ void swapstring4(char *foo, int doit); /** swap an 8 byte string in place if do it = 1, otherwise do nothing */ void swapstring8(char *foo, int doit);
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/bnxt/bnxt_cpr.h
<filename>kdns/dpdk-17.02/drivers/net/bnxt/bnxt_cpr.h<gh_stars>100-1000 /*- * BSD LICENSE * * Copyright(c) Broadcom Limited. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Broadcom Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _BNXT_CPR_H_ #define _BNXT_CPR_H_ #include <rte_io.h> #define CMP_VALID(cmp, raw_cons, ring) \ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \ !((raw_cons) & ((ring)->ring_size))) #define CMP_TYPE(cmp) \ (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK) #define ADV_RAW_CMP(idx, n) ((idx) + (n)) #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) #define B_CP_DB_REARM(cpr, raw_cons) \ rte_write32((DB_CP_REARM_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ ((cpr)->cp_doorbell)) #define B_CP_DIS_DB(cpr, raw_cons) \ rte_write32((DB_CP_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ ((cpr)->cp_doorbell)) struct bnxt_ring; struct bnxt_cp_ring_info { uint32_t cp_raw_cons; void *cp_doorbell; struct cmpl_base *cp_desc_ring; phys_addr_t cp_desc_mapping; struct ctx_hw_stats *hw_stats; phys_addr_t hw_stats_map; uint32_t hw_stats_ctx_id; struct bnxt_ring *cp_ring_struct; }; #define RX_CMP_L2_ERRORS \ (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR) struct bnxt; void bnxt_free_def_cp_ring(struct bnxt *bp); int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id); void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp); #endif
vicharl/containerdns
kdns/src/local_udp_process.h
<gh_stars>100-1000 #ifndef _LOCAL_UDP_PROCESS_H_ #define _LOCAL_UDP_PROCESS_H_ #include <arpa/inet.h> #include "db_update.h" int local_udp_process_init(void); int local_udp_domian_databd_update(struct domin_info_update *update); int local_udp_zones_reload(char *del_zones, char *add_zones); #endif /* _LOCAL_UDP_PROCESS_H_ */
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/virtio/virtio_user/virtio_user_dev.h
/*- * BSD LICENSE * * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _VIRTIO_USER_DEV_H #define _VIRTIO_USER_DEV_H #include <limits.h> #include "../virtio_pci.h" #include "../virtio_ring.h" #include "vhost.h" struct virtio_user_dev { /* for vhost_user backend */ int vhostfd; /* for vhost_kernel backend */ char *ifname; int *vhostfds; int *tapfds; /* for both vhost_user and vhost_kernel */ int callfds[VIRTIO_MAX_VIRTQUEUES]; int kickfds[VIRTIO_MAX_VIRTQUEUES]; int mac_specified; uint32_t max_queue_pairs; uint32_t queue_pairs; uint32_t queue_size; uint64_t features; /* the negotiated features with driver, * and will be sync with device */ uint64_t device_features; /* supported features by device */ uint8_t status; uint8_t mac_addr[ETHER_ADDR_LEN]; char path[PATH_MAX]; struct vring vrings[VIRTIO_MAX_VIRTQUEUES]; struct virtio_user_backend_ops *ops; }; int virtio_user_start_device(struct virtio_user_dev *dev); int virtio_user_stop_device(struct virtio_user_dev *dev); int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac); void virtio_user_dev_uninit(struct virtio_user_dev *dev); void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); #endif
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/thunderx/base/nicvf_plat.h
<filename>kdns/dpdk-17.02/drivers/net/thunderx/base/nicvf_plat.h /* * BSD LICENSE * * Copyright (C) Cavium networks Ltd. 2016. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium networks nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _THUNDERX_NICVF_H #define _THUNDERX_NICVF_H /* Platform/OS/arch specific abstractions */ /* log */ #include <rte_log.h> #include "../nicvf_logs.h" #define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__) #define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__) #define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__) #define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__) /* delay */ #include <rte_cycles.h> #define nicvf_delay_us(x) rte_delay_us(x) /* barrier */ #include <rte_atomic.h> #define nicvf_smp_wmb() rte_smp_wmb() #define nicvf_smp_rmb() rte_smp_rmb() /* utils */ #include <rte_common.h> #define nicvf_min(x, y) RTE_MIN(x, y) /* byte order */ #include <rte_byteorder.h> #define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x) #define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x) #define NICVF_BYTE_ORDER RTE_BYTE_ORDER #define NICVF_BIG_ENDIAN RTE_BIG_ENDIAN #define NICVF_LITTLE_ENDIAN RTE_LITTLE_ENDIAN /* Constants */ #include <rte_ether.h> #define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN #include <rte_io.h> #define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr)) #define nicvf_addr_read(addr) rte_read64_relaxed((void *)(addr)) /* ARM64 specific functions */ #if defined(RTE_ARCH_ARM64) #define nicvf_prefetch_store_keep(_ptr) ({\ asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); }) #define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \ asm volatile( \ "ldp %x[x1], %x[x0], [%x[p1]]" \ : [x1]"=r"(reg1), [x0]"=r"(reg2)\ : [p1]"r"(addr) \ ); }) #else /* non optimized functions for building on non arm64 arch */ #define nicvf_prefetch_store_keep(_ptr) do {} while (0) #define NICVF_LOAD_PAIR(reg1, reg2, addr) \ do { \ reg1 = nicvf_addr_read((uintptr_t)addr); \ reg2 = nicvf_addr_read((uintptr_t)addr + 8); \ } while (0) #endif #include "nicvf_hw.h" #include "nicvf_mbox.h" #endif /* _THUNDERX_NICVF_H */
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/i40e/rte_pmd_i40e.h
<filename>kdns/dpdk-17.02/drivers/net/i40e/rte_pmd_i40e.h /*- * BSD LICENSE * * Copyright (c) 2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _PMD_I40E_H_ #define _PMD_I40E_H_ /** * @file rte_pmd_i40e.h * * i40e PMD specific functions. * * @b EXPERIMENTAL: this API may change, or be removed, without prior notice * */ #include <rte_ethdev.h> /** * Response sent back to i40e driver from user app after callback */ enum rte_pmd_i40e_mb_event_rsp { RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */ }; /** * Data sent to the user application when the callback is executed. */ struct rte_pmd_i40e_mb_event_param { uint16_t vfid; /**< Virtual Function number */ uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */ uint16_t retval; /**< return value */ void *msg; /**< pointer to message */ uint16_t msglen; /**< length of the message */ }; /** * Notify VF when PF link status changes. * * @param port * The port identifier of the Ethernet device. * @param vf * VF id. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* invalid. */ int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf); /** * Enable/Disable VF MAC anti spoofing. * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to set MAC anti spoofing. * @param on * 1 - Enable VFs MAC anti spoofing. * 0 - Disable VFs MAC anti spoofing. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable VF VLAN anti spoofing. * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to set VLAN anti spoofing. * @param on * 1 - Enable VFs VLAN anti spoofing. * 0 - Disable VFs VLAN anti spoofing. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable TX loopback on all the PF and VFs. * * @param port * The port identifier of the Ethernet device. * @param on * 1 - Enable TX loopback. * 0 - Disable TX loopback. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on); /** * Enable/Disable VF unicast promiscuous mode. * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to set. * @param on * 1 - Enable. * 0 - Disable. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable VF multicast promiscuous mode. * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to set. * @param on * 1 - Enable. * 0 - Disable. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on); /** * Set the VF MAC address. * * PF should set MAC address before VF initialized, if PF sets the MAC * address after VF initialized, new MAC address won't be effective until * VF reinitialize. * * This will remove all existing MAC filters. * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF id. * @param mac_addr * VF MAC address. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* or *mac_addr* is invalid. */ int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, struct ether_addr *mac_addr); /** * Enable/Disable vf vlan strip for all queues in a pool * * @param port * The port identifier of the Ethernet device. * @param vf * ID specifying VF. * @param on * 1 - Enable VF's vlan strip on RX queues. * 0 - Disable VF's vlan strip on RX queues. * * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); /** * Enable/Disable vf vlan insert * * @param port * The port identifier of the Ethernet device. * @param vf_id * ID specifying VF. * @param vlan_id * 0 - Disable VF's vlan insert. * n - Enable; n is inserted as the vlan id. * * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, uint16_t vlan_id); /** * Enable/Disable vf broadcast mode * * @param port * The port identifier of the Ethernet device. * @param vf_id * ID specifying VF. * @param on * 0 - Disable broadcast. * 1 - Enable broadcast. * * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable vf vlan tag * * @param port * The port identifier of the Ethernet device. * @param vf_id * ID specifying VF. * @param on * 0 - Disable VF's vlan tag. * n - Enable VF's vlan tag. * * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable VF VLAN filter * * @param port * The port identifier of the Ethernet device. * @param vlan_id * ID specifying VLAN * @param vf_mask * Mask to filter VF's * @param on * 0 - Disable VF's VLAN filter. * 1 - Enable VF's VLAN filter. * * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, uint64_t vf_mask, uint8_t on); /** * Get VF's statistics * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to get. * @param stats * A pointer to a structure of type *rte_eth_stats* to be filled with * the values of device counters for the following set of statistics: * - *ipackets* with the total of successfully received packets. * - *opackets* with the total of successfully transmitted packets. * - *ibytes* with the total of successfully received bytes. * - *obytes* with the total of successfully transmitted bytes. * - *ierrors* with the total of erroneous received packets. * - *oerrors* with the total of failed transmitted packets. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_get_vf_stats(uint8_t port, uint16_t vf_id, struct rte_eth_stats *stats); /** * Clear VF's statistics * * @param port * The port identifier of the Ethernet device. * @param vf_id * VF on which to get. * @return * - (0) if successful. * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ int rte_pmd_i40e_reset_vf_stats(uint8_t port, uint16_t vf_id); #endif /* _PMD_I40E_H_ */
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/sfc/sfc_tx.h
/*- * Copyright (c) 2016 Solarflare Communications Inc. * All rights reserved. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SFC_TX_H #define _SFC_TX_H #include <rte_mbuf.h> #include <rte_ethdev.h> #include "efx.h" #ifdef __cplusplus extern "C" { #endif /** * A segment must not cross 4K boundary * (this is a requirement of NIC TX descriptors) */ #define SFC_TX_SEG_BOUNDARY 4096 struct sfc_adapter; struct sfc_evq; struct sfc_tx_sw_desc { struct rte_mbuf *mbuf; uint8_t *tsoh; /* Buffer to store TSO header */ }; enum sfc_txq_state_bit { SFC_TXQ_INITIALIZED_BIT = 0, #define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT) SFC_TXQ_STARTED_BIT, #define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT) SFC_TXQ_RUNNING_BIT, #define SFC_TXQ_RUNNING (1 << SFC_TXQ_RUNNING_BIT) SFC_TXQ_FLUSHING_BIT, #define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT) SFC_TXQ_FLUSHED_BIT, #define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT) }; struct sfc_txq { struct sfc_evq *evq; struct sfc_tx_sw_desc *sw_ring; unsigned int state; unsigned int ptr_mask; efx_desc_t *pend_desc; efx_txq_t *common; efsys_mem_t mem; unsigned int added; unsigned int pending; unsigned int completed; unsigned int free_thresh; uint16_t hw_vlan_tci; unsigned int hw_index; unsigned int flags; }; static inline unsigned int sfc_txq_sw_index(const struct sfc_txq *txq) { return txq->hw_index; } struct sfc_txq_info { unsigned int entries; struct sfc_txq *txq; boolean_t deferred_start; boolean_t deferred_started; }; int sfc_tx_init(struct sfc_adapter *sa); void sfc_tx_fini(struct sfc_adapter *sa); int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index); void sfc_tx_qflush_done(struct sfc_txq *txq); int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index); void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index); int sfc_tx_start(struct sfc_adapter *sa); void sfc_tx_stop(struct sfc_adapter *sa); uint16_t sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); /* From 'sfc_tso.c' */ int sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries, unsigned int socket_id); void sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries); int sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend, unsigned int *pkt_descs, size_t *pkt_len); #ifdef __cplusplus } #endif #endif /* _SFC_TX_H */
vicharl/containerdns
kdns/src/rate_limit.c
/* * rate_limit.c */ #include <rte_common.h> #include <rte_eal.h> #include <rte_malloc.h> #include <rte_mempool.h> #include <rte_hash.h> #include <rte_mbuf.h> #include <rte_meter.h> #include <rte_cycles.h> #include <arpa/inet.h> #include "kdns.h" #include "dns-conf.h" #include "rate_limit.h" #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32) #define EM_HASH_CRC 1 #endif #ifdef EM_HASH_CRC #include <rte_hash_crc.h> #define DEFAULT_HASH_FUNC rte_hash_crc #else #include <rte_jhash.h> #define DEFAULT_HASH_FUNC rte_jhash #endif #define EXCEEDED_LOG_PER_SECOND (1) typedef struct { uint32_t client_num; uint32_t rl_ps[RATE_LIMIT_TYPE_MAX]; } rate_limit_ctrl; typedef struct { uint32_t exceeded_cnt; struct rte_meter_srtcm rl_meter[RATE_LIMIT_TYPE_MAX]; } rate_limit_hnode; static rate_limit_ctrl rl_ctrl[MAX_CORES]; static struct rte_hash *rl_hmap[MAX_CORES]; static rate_limit_hnode *rl_harray[MAX_CORES]; static const char *rl_type_str_array[RATE_LIMIT_TYPE_MAX] = { "all", "fwd", "exceeded log" }; static inline const char *rate_limit_type_str(rate_limit_type type) { if (unlikely(type < 0 || type >= RATE_LIMIT_TYPE_MAX)) { return "illegal type"; } return rl_type_str_array[type]; } int rate_limit(uint32_t sip, rate_limit_type type, unsigned lcore_id) { int ret; uint64_t now; rate_limit_hnode *hnode; if (unlikely(type < 0 || type >= RATE_LIMIT_TYPE_MAX)) { log_msg(LOG_ERR, "rate limit illegal type %d\n", type); return 0; } if (rl_ctrl[lcore_id].rl_ps[type] == 0 || rl_ctrl[lcore_id].client_num == 0) { return 0; } ret = rte_hash_lookup(rl_hmap[lcore_id], (const void *)&sip); if (ret < 0) { ret = rte_hash_add_key(rl_hmap[lcore_id], (const void *)&sip); if (ret < 0) { log_msg(LOG_ERR, "Failed to insert sip %s to hash table %d, ret %d!", inet_ntoa(*(struct in_addr *)&sip), lcore_id, ret); return 0; } } now = rte_rdtsc(); hnode = &rl_harray[lcore_id][ret]; if (rte_meter_srtcm_color_blind_check(&hnode->rl_meter[type], now, 1) == e_RTE_METER_RED) { ++hnode->exceeded_cnt; if (rte_meter_srtcm_color_blind_check(&hnode->rl_meter[RATE_LIMIT_TYPE_EXCEEDED_LOG], now, 1) != e_RTE_METER_RED) { log_msg(LOG_ERR, "query from %s, %s rate limit exceeded %d, in slave lcore %u, drop\n", inet_ntoa(*(struct in_addr *)&sip), rate_limit_type_str(type), hnode->exceeded_cnt, lcore_id); hnode->exceeded_cnt = 0; } return -1; } return 0; } int rate_limit_init(uint32_t all_per_second, uint32_t fwd_per_second, uint32_t client_num, unsigned lcore_id) { int ret; uint32_t i; char name[RTE_HASH_NAMESIZE]; struct rte_hash_parameters hash_params; struct rte_meter_srtcm_params meter_params[RATE_LIMIT_TYPE_MAX]; rate_limit_hnode tmp; rl_ctrl[lcore_id].client_num = client_num; rl_ctrl[lcore_id].rl_ps[RATE_LIMIT_TYPE_ALL] = all_per_second; rl_ctrl[lcore_id].rl_ps[RATE_LIMIT_TYPE_FWD] = fwd_per_second; rl_ctrl[lcore_id].rl_ps[RATE_LIMIT_TYPE_EXCEEDED_LOG] = EXCEEDED_LOG_PER_SECOND; if (rl_ctrl[lcore_id].client_num == 0 || (rl_ctrl[lcore_id].rl_ps[RATE_LIMIT_TYPE_ALL] == 0 && rl_ctrl[lcore_id].rl_ps[RATE_LIMIT_TYPE_FWD] == 0)) { log_msg(LOG_INFO, "rate limit is disabled!\n"); return 0; } if (rl_hmap[lcore_id] == NULL) { memset(&hash_params, 0, sizeof(struct rte_hash_parameters)); hash_params.name = name; hash_params.entries = rl_ctrl[lcore_id].client_num; hash_params.key_len = sizeof(uint32_t); hash_params.hash_func = DEFAULT_HASH_FUNC; hash_params.hash_func_init_val = 0; hash_params.socket_id = rte_socket_id(); snprintf(name, sizeof(name), "rl_hmap_%u", lcore_id); rl_hmap[lcore_id] = rte_hash_create(&hash_params); if (rl_hmap[lcore_id] == NULL) { log_msg(LOG_ERR, "Failed to create hash table: %s!\n", name); exit(-1); } } if (rl_harray[lcore_id] == NULL) { snprintf(name, sizeof(name), "rl_harray_%u", lcore_id); rl_harray[lcore_id] = rte_calloc(name, rl_ctrl[lcore_id].client_num, sizeof(rate_limit_hnode), 0); if (rl_harray[lcore_id] == NULL) { log_msg(LOG_ERR, "Failed to malloc hash array: %s!\n", name); exit(-1); } } memset(&tmp, 0, sizeof(tmp)); for (i = 0; i < RATE_LIMIT_TYPE_MAX; ++i) { if (rl_ctrl[lcore_id].rl_ps[i]) { memset(&meter_params[i], 0, sizeof(struct rte_meter_srtcm_params)); meter_params[i].cir = rl_ctrl[lcore_id].rl_ps[i]; meter_params[i].cbs = rl_ctrl[lcore_id].rl_ps[i]; meter_params[i].ebs = rl_ctrl[lcore_id].rl_ps[i] / 2; ret = rte_meter_srtcm_config(&tmp.rl_meter[i], &meter_params[i]); if (ret) { log_msg(LOG_ERR, "Failed to init %s meter srtcm config!\n", rate_limit_type_str(i)); exit(-1); } } } for (i = 0; i < rl_ctrl[lcore_id].client_num; ++i) { memcpy(&rl_harray[lcore_id][i], &tmp, sizeof(rate_limit_hnode)); } return 0; } void rate_limit_uninit(unsigned lcore_id) { if (rl_harray[lcore_id]) { rte_free(rl_harray[lcore_id]); rl_harray[lcore_id] = NULL; } if (rl_hmap[lcore_id]) { rte_hash_free(rl_hmap[lcore_id]); rl_hmap[lcore_id] = NULL; } } int rate_limit_reload(uint32_t all_per_second, uint32_t fwd_per_second, uint32_t client_num, unsigned lcore_id) { if (rl_ctrl[lcore_id].client_num != client_num) { rate_limit_uninit(lcore_id); } rate_limit_init(all_per_second, fwd_per_second, client_num, lcore_id); return 0; }
vicharl/containerdns
kdns/dpdk-17.02/lib/librte_eal/linuxapp/eal/eal_interrupts.c
<filename>kdns/dpdk-17.02/lib/librte_eal/linuxapp/eal/eal_interrupts.c<gh_stars>100-1000 /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <pthread.h> #include <sys/queue.h> #include <stdarg.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <inttypes.h> #include <sys/epoll.h> #include <sys/signalfd.h> #include <sys/ioctl.h> #include <sys/eventfd.h> #include <assert.h> #include <rte_common.h> #include <rte_interrupts.h> #include <rte_memory.h> #include <rte_memzone.h> #include <rte_launch.h> #include <rte_eal.h> #include <rte_per_lcore.h> #include <rte_lcore.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_debug.h> #include <rte_log.h> #include <rte_pci.h> #include <rte_malloc.h> #include <rte_errno.h> #include <rte_spinlock.h> #include "eal_private.h" #include "eal_vfio.h" #include "eal_thread.h" #define EAL_INTR_EPOLL_WAIT_FOREVER (-1) #define NB_OTHER_INTR 1 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */ /** * union for pipe fds. */ union intr_pipefds{ struct { int pipefd[2]; }; struct { int readfd; int writefd; }; }; /** * union buffer for reading on different devices */ union rte_intr_read_buffer { int uio_intr_count; /* for uio device */ #ifdef VFIO_PRESENT uint64_t vfio_intr_count; /* for vfio device */ #endif uint64_t timerfd_num; /* for timerfd */ char charbuf[16]; /* for others */ }; TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback); TAILQ_HEAD(rte_intr_source_list, rte_intr_source); struct rte_intr_callback { TAILQ_ENTRY(rte_intr_callback) next; rte_intr_callback_fn cb_fn; /**< callback address */ void *cb_arg; /**< parameter for callback */ }; struct rte_intr_source { TAILQ_ENTRY(rte_intr_source) next; struct rte_intr_handle intr_handle; /**< interrupt handle */ struct rte_intr_cb_list callbacks; /**< user callbacks */ uint32_t active; }; /* global spinlock for interrupt data operation */ static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER; /* union buffer for pipe read/write */ static union intr_pipefds intr_pipe; /* interrupt sources list */ static struct rte_intr_source_list intr_sources; /* interrupt handling thread */ static pthread_t intr_thread; /* VFIO interrupts */ #ifdef VFIO_PRESENT #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int)) /* irq set buffer length for queue interrupts and LSC interrupt */ #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \ sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1)) /* enable legacy (INTx) interrupts */ static int vfio_enable_intx(const struct rte_intr_handle *intr_handle) { struct vfio_irq_set *irq_set; char irq_set_buf[IRQ_SET_BUF_LEN]; int len, ret; int *fd_ptr; len = sizeof(irq_set_buf); /* enable INTx */ irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; irq_set->count = 1; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; irq_set->start = 0; fd_ptr = (int *) &irq_set->data; *fd_ptr = intr_handle->fd; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n", intr_handle->fd); return -1; } /* unmask INTx after enabling */ memset(irq_set, 0, len); len = sizeof(struct vfio_irq_set); irq_set->argsz = len; irq_set->count = 1; irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK; irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; irq_set->start = 0; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n", intr_handle->fd); return -1; } return 0; } /* disable legacy (INTx) interrupts */ static int vfio_disable_intx(const struct rte_intr_handle *intr_handle) { struct vfio_irq_set *irq_set; char irq_set_buf[IRQ_SET_BUF_LEN]; int len, ret; len = sizeof(struct vfio_irq_set); /* mask interrupts before disabling */ irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; irq_set->count = 1; irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK; irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; irq_set->start = 0; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n", intr_handle->fd); return -1; } /* disable INTx*/ memset(irq_set, 0, len); irq_set->argsz = len; irq_set->count = 0; irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; irq_set->start = 0; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error disabling INTx interrupts for fd %d\n", intr_handle->fd); return -1; } return 0; } /* enable MSI interrupts */ static int vfio_enable_msi(const struct rte_intr_handle *intr_handle) { int len, ret; char irq_set_buf[IRQ_SET_BUF_LEN]; struct vfio_irq_set *irq_set; int *fd_ptr; len = sizeof(irq_set_buf); irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; irq_set->count = 1; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSI_IRQ_INDEX; irq_set->start = 0; fd_ptr = (int *) &irq_set->data; *fd_ptr = intr_handle->fd; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n", intr_handle->fd); return -1; } return 0; } /* disable MSI interrupts */ static int vfio_disable_msi(const struct rte_intr_handle *intr_handle) { struct vfio_irq_set *irq_set; char irq_set_buf[IRQ_SET_BUF_LEN]; int len, ret; len = sizeof(struct vfio_irq_set); irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; irq_set->count = 0; irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSI_IRQ_INDEX; irq_set->start = 0; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) RTE_LOG(ERR, EAL, "Error disabling MSI interrupts for fd %d\n", intr_handle->fd); return ret; } static int get_max_intr(const struct rte_intr_handle *intr_handle) { struct rte_intr_source *src; TAILQ_FOREACH(src, &intr_sources, next) { if (src->intr_handle.fd != intr_handle->fd) continue; if (src->intr_handle.max_intr < intr_handle->max_intr) src->intr_handle.max_intr = intr_handle->max_intr; if (!src->intr_handle.max_intr) src->intr_handle.max_intr = 1; else if (src->intr_handle.max_intr > RTE_MAX_RXTX_INTR_VEC_ID) src->intr_handle.max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1; return src->intr_handle.max_intr; } return -1; } /* enable MSI-X interrupts */ static int vfio_enable_msix(const struct rte_intr_handle *intr_handle) { int len, ret; char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; struct vfio_irq_set *irq_set; int *fd_ptr; len = sizeof(irq_set_buf); irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; ret = get_max_intr(intr_handle); if (ret < 0) { RTE_LOG(ERR, EAL, "Invalid number of MSI-X irqs for fd %d\n", intr_handle->fd); return -1; } irq_set->count = ret; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; irq_set->start = 0; fd_ptr = (int *) &irq_set->data; /* INTR vector offset 0 reserve for non-efds mapping */ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd; memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds, sizeof(*intr_handle->efds) * intr_handle->nb_efd); ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n", intr_handle->fd); return -1; } return 0; } /* disable MSI-X interrupts */ static int vfio_disable_msix(const struct rte_intr_handle *intr_handle) { struct vfio_irq_set *irq_set; char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; int len, ret; len = sizeof(struct vfio_irq_set); irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; irq_set->count = 0; irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; irq_set->start = 0; ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) RTE_LOG(ERR, EAL, "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd); return ret; } #endif static int uio_intx_intr_disable(const struct rte_intr_handle *intr_handle) { unsigned char command_high; /* use UIO config file descriptor for uio_pci_generic */ if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { RTE_LOG(ERR, EAL, "Error reading interrupts status for fd %d\n", intr_handle->uio_cfg_fd); return -1; } /* disable interrupts */ command_high |= 0x4; if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { RTE_LOG(ERR, EAL, "Error disabling interrupts for fd %d\n", intr_handle->uio_cfg_fd); return -1; } return 0; } static int uio_intx_intr_enable(const struct rte_intr_handle *intr_handle) { unsigned char command_high; /* use UIO config file descriptor for uio_pci_generic */ if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { RTE_LOG(ERR, EAL, "Error reading interrupts status for fd %d\n", intr_handle->uio_cfg_fd); return -1; } /* enable interrupts */ command_high &= ~0x4; if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { RTE_LOG(ERR, EAL, "Error enabling interrupts for fd %d\n", intr_handle->uio_cfg_fd); return -1; } return 0; } static int uio_intr_disable(const struct rte_intr_handle *intr_handle) { const int value = 0; if (write(intr_handle->fd, &value, sizeof(value)) < 0) { RTE_LOG(ERR, EAL, "Error disabling interrupts for fd %d (%s)\n", intr_handle->fd, strerror(errno)); return -1; } return 0; } static int uio_intr_enable(const struct rte_intr_handle *intr_handle) { const int value = 1; if (write(intr_handle->fd, &value, sizeof(value)) < 0) { RTE_LOG(ERR, EAL, "Error enabling interrupts for fd %d (%s)\n", intr_handle->fd, strerror(errno)); return -1; } return 0; } int rte_intr_callback_register(const struct rte_intr_handle *intr_handle, rte_intr_callback_fn cb, void *cb_arg) { int ret, wake_thread; struct rte_intr_source *src; struct rte_intr_callback *callback; wake_thread = 0; /* first do parameter checking */ if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) { RTE_LOG(ERR, EAL, "Registering with invalid input parameter\n"); return -EINVAL; } /* allocate a new interrupt callback entity */ callback = rte_zmalloc("interrupt callback list", sizeof(*callback), 0); if (callback == NULL) { RTE_LOG(ERR, EAL, "Can not allocate memory\n"); return -ENOMEM; } callback->cb_fn = cb; callback->cb_arg = cb_arg; rte_spinlock_lock(&intr_lock); /* check if there is at least one callback registered for the fd */ TAILQ_FOREACH(src, &intr_sources, next) { if (src->intr_handle.fd == intr_handle->fd) { /* we had no interrupts for this */ if TAILQ_EMPTY(&src->callbacks) wake_thread = 1; TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); ret = 0; break; } } /* no existing callbacks for this - add new source */ if (src == NULL) { if ((src = rte_zmalloc("interrupt source list", sizeof(*src), 0)) == NULL) { RTE_LOG(ERR, EAL, "Can not allocate memory\n"); rte_free(callback); ret = -ENOMEM; } else { src->intr_handle = *intr_handle; TAILQ_INIT(&src->callbacks); TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); TAILQ_INSERT_TAIL(&intr_sources, src, next); wake_thread = 1; ret = 0; } } rte_spinlock_unlock(&intr_lock); /** * check if need to notify the pipe fd waited by epoll_wait to * rebuild the wait list. */ if (wake_thread) if (write(intr_pipe.writefd, "1", 1) < 0) return -EPIPE; return ret; } int rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle, rte_intr_callback_fn cb_fn, void *cb_arg) { int ret; struct rte_intr_source *src; struct rte_intr_callback *cb, *next; /* do parameter checking first */ if (intr_handle == NULL || intr_handle->fd < 0) { RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n"); return -EINVAL; } rte_spinlock_lock(&intr_lock); /* check if the insterrupt source for the fd is existent */ TAILQ_FOREACH(src, &intr_sources, next) if (src->intr_handle.fd == intr_handle->fd) break; /* No interrupt source registered for the fd */ if (src == NULL) { ret = -ENOENT; /* interrupt source has some active callbacks right now. */ } else if (src->active != 0) { ret = -EAGAIN; /* ok to remove. */ } else { ret = 0; /*walk through the callbacks and remove all that match. */ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) { next = TAILQ_NEXT(cb, next); if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 || cb->cb_arg == cb_arg)) { TAILQ_REMOVE(&src->callbacks, cb, next); rte_free(cb); ret++; } } /* all callbacks for that source are removed. */ if (TAILQ_EMPTY(&src->callbacks)) { TAILQ_REMOVE(&intr_sources, src, next); rte_free(src); } } rte_spinlock_unlock(&intr_lock); /* notify the pipe fd waited by epoll_wait to rebuild the wait list */ if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) { ret = -EPIPE; } return ret; } int rte_intr_enable(const struct rte_intr_handle *intr_handle) { if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) return -1; switch (intr_handle->type){ /* write to the uio fd to enable the interrupt */ case RTE_INTR_HANDLE_UIO: if (uio_intr_enable(intr_handle)) return -1; break; case RTE_INTR_HANDLE_UIO_INTX: if (uio_intx_intr_enable(intr_handle)) return -1; break; /* not used at this moment */ case RTE_INTR_HANDLE_ALARM: return -1; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: if (vfio_enable_msix(intr_handle)) return -1; break; case RTE_INTR_HANDLE_VFIO_MSI: if (vfio_enable_msi(intr_handle)) return -1; break; case RTE_INTR_HANDLE_VFIO_LEGACY: if (vfio_enable_intx(intr_handle)) return -1; break; #endif /* unknown handle type */ default: RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n", intr_handle->fd); return -1; } return 0; } int rte_intr_disable(const struct rte_intr_handle *intr_handle) { if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) return -1; switch (intr_handle->type){ /* write to the uio fd to disable the interrupt */ case RTE_INTR_HANDLE_UIO: if (uio_intr_disable(intr_handle)) return -1; break; case RTE_INTR_HANDLE_UIO_INTX: if (uio_intx_intr_disable(intr_handle)) return -1; break; /* not used at this moment */ case RTE_INTR_HANDLE_ALARM: return -1; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: if (vfio_disable_msix(intr_handle)) return -1; break; case RTE_INTR_HANDLE_VFIO_MSI: if (vfio_disable_msi(intr_handle)) return -1; break; case RTE_INTR_HANDLE_VFIO_LEGACY: if (vfio_disable_intx(intr_handle)) return -1; break; #endif /* unknown handle type */ default: RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n", intr_handle->fd); return -1; } return 0; } static int eal_intr_process_interrupts(struct epoll_event *events, int nfds) { int n, bytes_read; struct rte_intr_source *src; struct rte_intr_callback *cb; union rte_intr_read_buffer buf; struct rte_intr_callback active_cb; for (n = 0; n < nfds; n++) { /** * if the pipe fd is ready to read, return out to * rebuild the wait list. */ if (events[n].data.fd == intr_pipe.readfd){ int r = read(intr_pipe.readfd, buf.charbuf, sizeof(buf.charbuf)); RTE_SET_USED(r); return -1; } rte_spinlock_lock(&intr_lock); TAILQ_FOREACH(src, &intr_sources, next) if (src->intr_handle.fd == events[n].data.fd) break; if (src == NULL){ rte_spinlock_unlock(&intr_lock); continue; } /* mark this interrupt source as active and release the lock. */ src->active = 1; rte_spinlock_unlock(&intr_lock); /* set the length to be read dor different handle type */ switch (src->intr_handle.type) { case RTE_INTR_HANDLE_UIO: case RTE_INTR_HANDLE_UIO_INTX: bytes_read = sizeof(buf.uio_intr_count); break; case RTE_INTR_HANDLE_ALARM: bytes_read = sizeof(buf.timerfd_num); break; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: case RTE_INTR_HANDLE_VFIO_MSI: case RTE_INTR_HANDLE_VFIO_LEGACY: bytes_read = sizeof(buf.vfio_intr_count); break; #endif case RTE_INTR_HANDLE_EXT: default: bytes_read = 1; break; } if (src->intr_handle.type != RTE_INTR_HANDLE_EXT) { /** * read out to clear the ready-to-be-read flag * for epoll_wait. */ bytes_read = read(events[n].data.fd, &buf, bytes_read); if (bytes_read < 0) { if (errno == EINTR || errno == EWOULDBLOCK) continue; RTE_LOG(ERR, EAL, "Error reading from file " "descriptor %d: %s\n", events[n].data.fd, strerror(errno)); } else if (bytes_read == 0) RTE_LOG(ERR, EAL, "Read nothing from file " "descriptor %d\n", events[n].data.fd); } /* grab a lock, again to call callbacks and update status. */ rte_spinlock_lock(&intr_lock); if (bytes_read > 0) { /* Finally, call all callbacks. */ TAILQ_FOREACH(cb, &src->callbacks, next) { /* make a copy and unlock. */ active_cb = *cb; rte_spinlock_unlock(&intr_lock); /* call the actual callback */ active_cb.cb_fn(&src->intr_handle, active_cb.cb_arg); /*get the lock back. */ rte_spinlock_lock(&intr_lock); } } /* we done with that interrupt source, release it. */ src->active = 0; rte_spinlock_unlock(&intr_lock); } return 0; } /** * It handles all the interrupts. * * @param pfd * epoll file descriptor. * @param totalfds * The number of file descriptors added in epoll. * * @return * void */ static void eal_intr_handle_interrupts(int pfd, unsigned totalfds) { struct epoll_event events[totalfds]; int nfds = 0; for(;;) { nfds = epoll_wait(pfd, events, totalfds, EAL_INTR_EPOLL_WAIT_FOREVER); /* epoll_wait fail */ if (nfds < 0) { if (errno == EINTR) continue; RTE_LOG(ERR, EAL, "epoll_wait returns with fail\n"); return; } /* epoll_wait timeout, will never happens here */ else if (nfds == 0) continue; /* epoll_wait has at least one fd ready to read */ if (eal_intr_process_interrupts(events, nfds) < 0) return; } } /** * It builds/rebuilds up the epoll file descriptor with all the * file descriptors being waited on. Then handles the interrupts. * * @param arg * pointer. (unused) * * @return * never return; */ static __attribute__((noreturn)) void * eal_intr_thread_main(__rte_unused void *arg) { struct epoll_event ev; /* host thread, never break out */ for (;;) { /* build up the epoll fd with all descriptors we are to * wait on then pass it to the handle_interrupts function */ static struct epoll_event pipe_event = { .events = EPOLLIN | EPOLLPRI, }; struct rte_intr_source *src; unsigned numfds = 0; /* create epoll fd */ int pfd = epoll_create(1); if (pfd < 0) rte_panic("Cannot create epoll instance\n"); pipe_event.data.fd = intr_pipe.readfd; /** * add pipe fd into wait list, this pipe is used to * rebuild the wait list. */ if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd, &pipe_event) < 0) { rte_panic("Error adding fd to %d epoll_ctl, %s\n", intr_pipe.readfd, strerror(errno)); } numfds++; rte_spinlock_lock(&intr_lock); TAILQ_FOREACH(src, &intr_sources, next) { if (src->callbacks.tqh_first == NULL) continue; /* skip those with no callbacks */ ev.events = EPOLLIN | EPOLLPRI; ev.data.fd = src->intr_handle.fd; /** * add all the uio device file descriptor * into wait list. */ if (epoll_ctl(pfd, EPOLL_CTL_ADD, src->intr_handle.fd, &ev) < 0){ rte_panic("Error adding fd %d epoll_ctl, %s\n", src->intr_handle.fd, strerror(errno)); } else numfds++; } rte_spinlock_unlock(&intr_lock); /* serve the interrupt */ eal_intr_handle_interrupts(pfd, numfds); /** * when we return, we need to rebuild the * list of fds to monitor. */ close(pfd); } } int rte_eal_intr_init(void) { int ret = 0, ret_1 = 0; char thread_name[RTE_MAX_THREAD_NAME_LEN]; /* init the global interrupt source head */ TAILQ_INIT(&intr_sources); /** * create a pipe which will be waited by epoll and notified to * rebuild the wait list of epoll. */ if (pipe(intr_pipe.pipefd) < 0) return -1; /* create the host thread to wait/handle the interrupt */ ret = pthread_create(&intr_thread, NULL, eal_intr_thread_main, NULL); if (ret != 0) { RTE_LOG(ERR, EAL, "Failed to create thread for interrupt handling\n"); } else { /* Set thread_name for aid in debugging. */ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "eal-intr-thread"); ret_1 = rte_thread_setname(intr_thread, thread_name); if (ret_1 != 0) RTE_LOG(DEBUG, EAL, "Failed to set thread name for interrupt handling\n"); } return -ret; } static void eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle) { union rte_intr_read_buffer buf; int bytes_read = 1; int nbytes; switch (intr_handle->type) { case RTE_INTR_HANDLE_UIO: case RTE_INTR_HANDLE_UIO_INTX: bytes_read = sizeof(buf.uio_intr_count); break; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: case RTE_INTR_HANDLE_VFIO_MSI: case RTE_INTR_HANDLE_VFIO_LEGACY: bytes_read = sizeof(buf.vfio_intr_count); break; #endif default: bytes_read = 1; RTE_LOG(INFO, EAL, "unexpected intr type\n"); break; } /** * read out to clear the ready-to-be-read flag * for epoll_wait. */ do { nbytes = read(fd, &buf, bytes_read); if (nbytes < 0) { if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) continue; RTE_LOG(ERR, EAL, "Error reading from fd %d: %s\n", fd, strerror(errno)); } else if (nbytes == 0) RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd); return; } while (1); } static int eal_epoll_process_event(struct epoll_event *evs, unsigned int n, struct rte_epoll_event *events) { unsigned int i, count = 0; struct rte_epoll_event *rev; for (i = 0; i < n; i++) { rev = evs[i].data.ptr; if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID, RTE_EPOLL_EXEC)) continue; events[count].status = RTE_EPOLL_VALID; events[count].fd = rev->fd; events[count].epfd = rev->epfd; events[count].epdata.event = rev->epdata.event; events[count].epdata.data = rev->epdata.data; if (rev->epdata.cb_fun) rev->epdata.cb_fun(rev->fd, rev->epdata.cb_arg); rte_compiler_barrier(); rev->status = RTE_EPOLL_VALID; count++; } return count; } static inline int eal_init_tls_epfd(void) { int pfd = epoll_create(255); if (pfd < 0) { RTE_LOG(ERR, EAL, "Cannot create epoll instance\n"); return -1; } return pfd; } int rte_intr_tls_epfd(void) { if (RTE_PER_LCORE(_epfd) == -1) RTE_PER_LCORE(_epfd) = eal_init_tls_epfd(); return RTE_PER_LCORE(_epfd); } int rte_epoll_wait(int epfd, struct rte_epoll_event *events, int maxevents, int timeout) { struct epoll_event evs[maxevents]; int rc; if (!events) { RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n"); return -1; } /* using per thread epoll fd */ if (epfd == RTE_EPOLL_PER_THREAD) epfd = rte_intr_tls_epfd(); while (1) { rc = epoll_wait(epfd, evs, maxevents, timeout); if (likely(rc > 0)) { /* epoll_wait has at least one fd ready to read */ rc = eal_epoll_process_event(evs, rc, events); break; } else if (rc < 0) { if (errno == EINTR) continue; /* epoll_wait fail */ RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n", strerror(errno)); rc = -1; break; } else { /* rc == 0, epoll_wait timed out */ break; } } return rc; } static inline void eal_epoll_data_safe_free(struct rte_epoll_event *ev) { while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID, RTE_EPOLL_INVALID)) while (ev->status != RTE_EPOLL_VALID) rte_pause(); memset(&ev->epdata, 0, sizeof(ev->epdata)); ev->fd = -1; ev->epfd = -1; } int rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event) { struct epoll_event ev; if (!event) { RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n"); return -1; } /* using per thread epoll fd */ if (epfd == RTE_EPOLL_PER_THREAD) epfd = rte_intr_tls_epfd(); if (op == EPOLL_CTL_ADD) { event->status = RTE_EPOLL_VALID; event->fd = fd; /* ignore fd in event */ event->epfd = epfd; ev.data.ptr = (void *)event; } ev.events = event->epdata.event; if (epoll_ctl(epfd, op, fd, &ev) < 0) { RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n", op, fd, strerror(errno)); if (op == EPOLL_CTL_ADD) /* rollback status when CTL_ADD fail */ event->status = RTE_EPOLL_INVALID; return -1; } if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID) eal_epoll_data_safe_free(event); return 0; } int rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd, int op, unsigned int vec, void *data) { struct rte_epoll_event *rev; struct rte_epoll_data *epdata; int epfd_op; unsigned int efd_idx; int rc = 0; efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; if (!intr_handle || intr_handle->nb_efd == 0 || efd_idx >= intr_handle->nb_efd) { RTE_LOG(ERR, EAL, "Wrong intr vector number.\n"); return -EPERM; } switch (op) { case RTE_INTR_EVENT_ADD: epfd_op = EPOLL_CTL_ADD; rev = &intr_handle->elist[efd_idx]; if (rev->status != RTE_EPOLL_INVALID) { RTE_LOG(INFO, EAL, "Event already been added.\n"); return -EEXIST; } /* attach to intr vector fd */ epdata = &rev->epdata; epdata->event = EPOLLIN | EPOLLPRI | EPOLLET; epdata->data = data; epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr; epdata->cb_arg = (void *)intr_handle; rc = rte_epoll_ctl(epfd, epfd_op, intr_handle->efds[efd_idx], rev); if (!rc) RTE_LOG(DEBUG, EAL, "efd %d associated with vec %d added on epfd %d" "\n", rev->fd, vec, epfd); else rc = -EPERM; break; case RTE_INTR_EVENT_DEL: epfd_op = EPOLL_CTL_DEL; rev = &intr_handle->elist[efd_idx]; if (rev->status == RTE_EPOLL_INVALID) { RTE_LOG(INFO, EAL, "Event does not exist.\n"); return -EPERM; } rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev); if (rc) rc = -EPERM; break; default: RTE_LOG(ERR, EAL, "event op type mismatch\n"); rc = -EPERM; } return rc; } int rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd) { uint32_t i; int fd; uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); assert(nb_efd != 0); if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) { for (i = 0; i < n; i++) { fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (fd < 0) { RTE_LOG(ERR, EAL, "can't setup eventfd, error %i (%s)\n", errno, strerror(errno)); return -errno; } intr_handle->efds[i] = fd; } intr_handle->nb_efd = n; intr_handle->max_intr = NB_OTHER_INTR + n; } else { intr_handle->efds[0] = intr_handle->fd; intr_handle->nb_efd = RTE_MIN(nb_efd, 1U); intr_handle->max_intr = NB_OTHER_INTR; } return 0; } void rte_intr_efd_disable(struct rte_intr_handle *intr_handle) { uint32_t i; struct rte_epoll_event *rev; for (i = 0; i < intr_handle->nb_efd; i++) { rev = &intr_handle->elist[i]; if (rev->status == RTE_EPOLL_INVALID) continue; if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) { /* force free if the entry valid */ eal_epoll_data_safe_free(rev); rev->status = RTE_EPOLL_INVALID; } } if (intr_handle->max_intr > intr_handle->nb_efd) { for (i = 0; i < intr_handle->nb_efd; i++) close(intr_handle->efds[i]); } intr_handle->nb_efd = 0; intr_handle->max_intr = 0; } int rte_intr_dp_is_en(struct rte_intr_handle *intr_handle) { return !(!intr_handle->nb_efd); } int rte_intr_allow_others(struct rte_intr_handle *intr_handle) { if (!rte_intr_dp_is_en(intr_handle)) return 1; else return !!(intr_handle->max_intr - intr_handle->nb_efd); } int rte_intr_cap_multiple(struct rte_intr_handle *intr_handle) { if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) return 1; return 0; }
vicharl/containerdns
kdns/src/metrics.h
<gh_stars>100-1000 #ifndef _METRICS_H_ #define _METRICS_H_ #include <stdio.h> #include <time.h> #include "webserver.h" typedef struct metrics_metrics { uint64_t minTime; uint64_t maxTime; uint64_t timeSum; uint64_t metrics[4]; } metrics_metrics_st; uint64_t time_now_usec(void); int metrics_host_reload(char *host_name); void fwd_metrics_init(void); void metrics_domain_clientIp_update(char *domain, int64_t timeStart, uint32_t src_addr); void metrics_domain_update(char *domain, int64_t timeStart); void *metrics_domains_get(__attribute__((unused)) struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response); void *metrics_domains_clientIp_get(__attribute__((unused)) struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response); void metrics_data_update(metrics_metrics_st *metrics, uint64_t diff_us); #endif /*_METRICS_H_*/
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/i40e/i40e_rxtx_vec_sse.c
<filename>kdns/dpdk-17.02/drivers/net/i40e/i40e_rxtx_vec_sse.c /*- * BSD LICENSE * * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdint.h> #include <rte_ethdev.h> #include <rte_malloc.h> #include "base/i40e_prototype.h" #include "base/i40e_type.h" #include "i40e_ethdev.h" #include "i40e_rxtx.h" #include "i40e_rxtx_vec_common.h" #include <tmmintrin.h> #ifndef __INTEL_COMPILER #pragma GCC diagnostic ignored "-Wcast-qual" #endif static inline void i40e_rxq_rearm(struct i40e_rx_queue *rxq) { int i; uint16_t rx_id; volatile union i40e_rx_desc *rxdp; struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; struct rte_mbuf *mb0, *mb1; __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, RTE_PKTMBUF_HEADROOM); __m128i dma_addr0, dma_addr1; rxdp = rxq->rx_ring + rxq->rxrearm_start; /* Pull 'n' more MBUFs into the software ring */ if (rte_mempool_get_bulk(rxq->mp, (void *)rxep, RTE_I40E_RXQ_REARM_THRESH) < 0) { if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= rxq->nb_rx_desc) { dma_addr0 = _mm_setzero_si128(); for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { rxep[i].mbuf = &rxq->fake_mbuf; _mm_store_si128((__m128i *)&rxdp[i].read, dma_addr0); } } rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += RTE_I40E_RXQ_REARM_THRESH; return; } /* Initialize the mbufs in vector, process 2 mbufs in one loop */ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { __m128i vaddr0, vaddr1; uintptr_t p0, p1; mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; /* Flush mbuf with pkt template. * Data to be rearmed is 6 bytes long. * Though, RX will overwrite ol_flags that are coming next * anyway. So overwrite whole 8 bytes with one load: * 6 bytes of rearm_data plus first 2 bytes of ol_flags. */ p0 = (uintptr_t)&mb0->rearm_data; *(uint64_t *)p0 = rxq->mbuf_initializer; p1 = (uintptr_t)&mb1->rearm_data; *(uint64_t *)p1 = rxq->mbuf_initializer; /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); /* convert pa to dma_addr hdr/data */ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); /* add headroom to pa values */ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); /* flush desc with pa dma_addr */ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); } rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; if (rxq->rxrearm_start >= rxq->nb_rx_desc) rxq->rxrearm_start = 0; rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); /* Update the tail pointer on the NIC */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); } /* Handling the offload flags (olflags) field takes computation * time when receiving packets. Therefore we provide a flag to disable * the processing of the olflags field when they are not needed. This * gives improved performance, at the cost of losing the offload info * in the received packet */ #ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE static inline void desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) { __m128i vlan0, vlan1, rss, l3_l4e; /* mask everything except RSS, flow director and VLAN flags * bit2 is for VLAN tag, bit11 for flow director indication * bit13:12 for RSS indication. */ const __m128i rss_vlan_msk = _mm_set_epi32( 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804); const __m128i cksum_mask = _mm_set_epi32( PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | PKT_RX_EIP_CKSUM_BAD); /* map rss and vlan type to rss hash and vlan flag */ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0); const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, 0, 0, PKT_RX_FDIR, 0); const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, /* shift right 1 bit to make sure it not exceed 255 */ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, PKT_RX_IP_CKSUM_BAD >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); rss = _mm_srli_epi32(vlan1, 11); rss = _mm_shuffle_epi8(rss_flags, rss); l3_l4e = _mm_srli_epi32(vlan1, 22); l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); /* then we shift left 1 bit */ l3_l4e = _mm_slli_epi32(l3_l4e, 1); /* we need to mask out the reduntant bits */ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask); vlan0 = _mm_or_si128(vlan0, rss); vlan0 = _mm_or_si128(vlan0, l3_l4e); rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); } #else #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) #endif #define PKTLEN_SHIFT 10 static inline void desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) { __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); ptype0 = _mm_srli_epi64(ptype0, 30); ptype1 = _mm_srli_epi64(ptype1, 30); rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); } /* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ static inline uint16_t _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) { volatile union i40e_rx_desc *rxdp; struct i40e_rx_entry *sw_ring; uint16_t nb_pkts_recd; int pos; uint64_t var; __m128i shuf_msk; __m128i crc_adjust = _mm_set_epi16( 0, 0, 0, /* ignore non-length fields */ -rxq->crc_len, /* sub crc on data_len */ 0, /* ignore high-16bits of pkt_len */ -rxq->crc_len, /* sub crc on pkt_len */ 0, 0 /* ignore pkt_type field */ ); __m128i dd_check, eop_check; /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); /* Just the act of getting into the function from the application is * going to cost about 7 cycles */ rxdp = rxq->rx_ring + rxq->rx_tail; rte_prefetch0(rxdp); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act */ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) i40e_rxq_rearm(rxq); /* Before we start moving massive data around, check to see if * there is actually a packet available */ if (!(rxdp->wb.qword1.status_error_len & rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) return 0; /* 4 packets DD mask */ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); /* 4 packets EOP mask */ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); /* mask to shuffle from desc. to mbuf */ shuf_msk = _mm_set_epi8( 7, 6, 5, 4, /* octet 4~7, 32bits rss */ 3, 2, /* octet 2~3, low 16 bits vlan_macip */ 15, 14, /* octet 15~14, 16 bits data_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 15, 14, /* octet 15~14, low 16 bits pkt_len */ 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF /*pkt_type set as unknown */ ); /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ sw_ring = &rxq->sw_ring[rxq->rx_tail]; /* A. load 4 packet in one loop * [A*. mask out 4 unused dirty field in desc] * B. copy 4 mbuf point from swring to rx_pkts * C. calc the number of DD bits among the 4 packets * [C*. extract the end-of-packet bit, if requested] * D. fill info. from desc to mbuf */ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; pos += RTE_I40E_DESCS_PER_LOOP, rxdp += RTE_I40E_DESCS_PER_LOOP) { __m128i descs[RTE_I40E_DESCS_PER_LOOP]; __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; __m128i zero, staterr, sterr_tmp1, sterr_tmp2; /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ __m128i mbp1; #if defined(RTE_ARCH_X86_64) __m128i mbp2; #endif /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); rte_compiler_barrier(); /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); #if defined(RTE_ARCH_X86_64) /* B.1 load 2 64 bit mbuf points */ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); #endif descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); rte_compiler_barrier(); /* B.1 load 2 mbuf point */ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); rte_compiler_barrier(); descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); #if defined(RTE_ARCH_X86_64) /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); #endif if (split_packet) { rte_mbuf_prefetch_part2(rx_pkts[pos]); rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } /* avoid compiler reorder optimization */ rte_compiler_barrier(); /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT); const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT); /* merge the now-aligned packet length fields back in */ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80); descs[2] = _mm_blend_epi16(descs[2], len2, 0x80); /* D.1 pkt 3,4 convert format from desc to pktmbuf */ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); /* C.1 4=>2 filter staterr info only */ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); /* C.1 4=>2 filter staterr info only */ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); desc_to_olflags_v(descs, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT); const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT); /* merge the now-aligned packet length fields back in */ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80); descs[0] = _mm_blend_epi16(descs[0], len0, 0x80); /* D.1 pkt 1,2 convert format from desc to pktmbuf */ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); /* C.2 get 4 pkts staterr value */ zero = _mm_xor_si128(dd_check, dd_check); staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); /* D.3 copy final 3,4 data to rx_pkts */ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, pkt_mb4); _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, pkt_mb3); /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); /* C* extract and record EOP bit */ if (split_packet) { __m128i eop_shuf_mask = _mm_set_epi8( 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x0C, 0x00, 0x08 ); /* and with mask to extract bits, flipping 1-0 */ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); /* the staterr values are not in order, as the count * count of dd bits doesn't care. However, for end of * packet tracking, we do care, so shuffle. This also * compresses the 32-bit values to 8-bit */ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); /* store the resulting 32-bit value */ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); split_packet += RTE_I40E_DESCS_PER_LOOP; /* zero-out next pointers */ rx_pkts[pos]->next = NULL; rx_pkts[pos + 1]->next = NULL; rx_pkts[pos + 2]->next = NULL; rx_pkts[pos + 3]->next = NULL; } /* C.3 calc available number of desc */ staterr = _mm_and_si128(staterr, dd_check); staterr = _mm_packs_epi32(staterr, zero); /* D.3 copy final 1,2 data to rx_pkts */ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, pkt_mb2); _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos]); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) break; } /* Update our internal tail pointer */ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); return nb_pkts_recd; } /* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } /* vPMD receive routine that reassembles scattered packets * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct i40e_rx_queue *rxq = rx_queue; uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; /* get some new buffers */ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, split_flags); if (nb_bufs == 0) return 0; /* happy day case, full burst + no packets to be joined */ const uint64_t *split_fl64 = (uint64_t *)split_flags; if (rxq->pkt_first_seg == NULL && split_fl64[0] == 0 && split_fl64[1] == 0 && split_fl64[2] == 0 && split_fl64[3] == 0) return nb_bufs; /* reassemble any packets that need reassembly*/ unsigned i = 0; if (rxq->pkt_first_seg == NULL) { /* find the first split flag, and only reassemble then*/ while (i < nb_bufs && !split_flags[i]) i++; if (i == nb_bufs) return nb_bufs; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); } static inline void vtx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); __m128i descriptor = _mm_set_epi64x(high_qw, pkt->buf_physaddr + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } static inline void vtx(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) { int i; for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) vtx1(txdp, *pkt, flags); } uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; volatile struct i40e_tx_desc *txdp; struct i40e_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = I40E_TD_CMD; uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; int i; /* cross rx_thresh boundary is not allowed */ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) i40e_tx_free_bufs(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { tx_backlog_entry(txep, tx_pkts, n); for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) vtx1(txdp, *tx_pkts, flags); vtx1(txdp, *tx_pkts++, rs); nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; } tx_backlog_entry(txep, tx_pkts, nb_commit); vtx(txdp, tx_pkts, nb_commit, flags); tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->tx_next_rs) { txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << I40E_TXD_QW1_CMD_SHIFT); txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } void __attribute__((cold)) i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) { _i40e_rx_queue_release_mbufs_vec(rxq); } int __attribute__((cold)) i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) { return i40e_rxq_vec_setup_default(rxq); } int __attribute__((cold)) i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) { return 0; } int __attribute__((cold)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 /* need SSE4.1 support */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) return -1; #endif return i40e_rx_vec_dev_conf_condition_check_default(dev); }
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/enic/enic_rxtx.c
<gh_stars>100-1000 /* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * Copyright (c) 2014, Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <rte_mbuf.h> #include <rte_ethdev.h> #include <rte_prefetch.h> #include "enic_compat.h" #include "rq_enet_desc.h" #include "enic.h" #include <rte_ether.h> #include <rte_ip.h> #include <rte_tcp.h> #define RTE_PMD_USE_PREFETCH #ifdef RTE_PMD_USE_PREFETCH /*Prefetch a cache line into all cache levels. */ #define rte_enic_prefetch(p) rte_prefetch0(p) #else #define rte_enic_prefetch(p) do {} while (0) #endif #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else #define rte_packet_prefetch(p) do {} while (0) #endif static inline uint16_t enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) { return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; } static inline uint16_t enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) { return le16_to_cpu(crd->bytes_written_flags) & ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; } static inline uint8_t enic_cq_rx_desc_packet_error(uint16_t bwflags) { return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == CQ_ENET_RQ_DESC_FLAGS_TRUNCATED; } static inline uint8_t enic_cq_rx_desc_eop(uint16_t ciflags) { return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) == CQ_ENET_RQ_DESC_FLAGS_EOP; } static inline uint8_t enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) { return (le16_to_cpu(cqrd->q_number_rss_type_flags) & CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC; } static inline uint8_t enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) { return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK; } static inline uint8_t enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) { return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK; } static inline uint8_t enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) { return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); } static inline uint32_t enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) { return le32_to_cpu(cqrd->rss_hash); } static inline uint16_t enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) { return le16_to_cpu(cqrd->vlan); } static inline uint16_t enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; return le16_to_cpu(cqrd->bytes_written_flags) & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; } /* Find the offset to L5. This is needed by enic TSO implementation. * Return 0 if not a TCP packet or can't figure out the length. */ static inline uint8_t tso_header_len(struct rte_mbuf *mbuf) { struct ether_hdr *eh; struct vlan_hdr *vh; struct ipv4_hdr *ip4; struct ipv6_hdr *ip6; struct tcp_hdr *th; uint8_t hdr_len; uint16_t ether_type; /* offset past Ethernet header */ eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); ether_type = eh->ether_type; hdr_len = sizeof(struct ether_hdr); if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len); ether_type = vh->eth_proto; hdr_len += sizeof(struct vlan_hdr); } /* offset past IP header */ switch (rte_be_to_cpu_16(ether_type)) { case ETHER_TYPE_IPv4: ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len); if (ip4->next_proto_id != IPPROTO_TCP) return 0; hdr_len += (ip4->version_ihl & 0xf) * 4; break; case ETHER_TYPE_IPv6: ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len); if (ip6->proto != IPPROTO_TCP) return 0; hdr_len += sizeof(struct ipv6_hdr); break; default: return 0; } if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len) return 0; /* offset past TCP header */ th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len); hdr_len += (th->data_off >> 4) * 4; if (hdr_len > mbuf->pkt_len) return 0; return hdr_len; } static inline uint8_t enic_cq_rx_check_err(struct cq_desc *cqd) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; uint16_t bwflags; bwflags = enic_cq_rx_desc_bwflags(cqrd); if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) return 1; return 0; } /* Lookup table to translate RX CQ flags to mbuf flags. */ static inline uint32_t enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; uint8_t cqrd_flags = cqrd->flags; static const uint32_t cq_type_table[128] __rte_cache_aligned = { [0x00] = RTE_PTYPE_UNKNOWN, [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, /* All others reserved */ }; cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; return cq_type_table[cqrd_flags]; } static inline void enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci; ciflags = enic_cq_rx_desc_ciflags(cqrd); bwflags = enic_cq_rx_desc_bwflags(cqrd); vlan_tci = enic_cq_rx_desc_vlan(cqrd); mbuf->ol_flags = 0; /* flags are meaningless if !EOP */ if (unlikely(!enic_cq_rx_desc_eop(ciflags))) goto mbuf_flags_done; /* VLAN STRIPPED flag. The L2 packet type updated here also */ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; mbuf->packet_type |= RTE_PTYPE_L2_ETHER; } else { if (vlan_tci != 0) mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN; else mbuf->packet_type |= RTE_PTYPE_L2_ETHER; } mbuf->vlan_tci = vlan_tci; /* RSS flag */ if (enic_cq_rx_desc_rss_type(cqrd)) { pkt_flags |= PKT_RX_RSS_HASH; mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); } /* checksum flags */ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { if (enic_cq_rx_desc_csum_not_calc(cqrd)) pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN & PKT_RX_L4_CKSUM_UNKNOWN); else { uint32_t l4_flags; l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) pkt_flags |= PKT_RX_IP_CKSUM_GOOD; else pkt_flags |= PKT_RX_IP_CKSUM_BAD; if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)) pkt_flags |= PKT_RX_L4_CKSUM_GOOD; else pkt_flags |= PKT_RX_L4_CKSUM_BAD; } } } mbuf_flags_done: mbuf->ol_flags = pkt_flags; } /* dummy receive function to replace actual function in * order to do safe reconfiguration operations. */ uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue, __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) { return 0; } uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct vnic_rq *sop_rq = rx_queue; struct vnic_rq *data_rq; struct vnic_rq *rq; struct enic *enic = vnic_dev_priv(sop_rq->vdev); uint16_t cq_idx; uint16_t rq_idx; uint16_t rq_num; struct rte_mbuf *nmb, *rxmb; uint16_t nb_rx = 0; struct vnic_cq *cq; volatile struct cq_desc *cqd_ptr; uint8_t color; uint16_t seg_length; struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; data_rq = &enic->rq[sop_rq->data_queue_idx]; while (nb_rx < nb_pkts) { volatile struct rq_enet_desc *rqd_ptr; dma_addr_t dma_addr; struct cq_desc cqd; uint8_t packet_error; uint16_t ciflags; /* Check for pkts available */ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; if (color == cq->last_color) break; /* Get the cq descriptor and extract rq info from it */ cqd = *cqd_ptr; rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; rq = &enic->rq[rq_num]; rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx; /* allocate a new mbuf */ nmb = rte_mbuf_raw_alloc(rq->mp); if (nmb == NULL) { rte_atomic64_inc(&enic->soft_stats.rx_nombuf); break; } /* A packet error means descriptor and data are untrusted */ packet_error = enic_cq_rx_check_err(&cqd); /* Get the mbuf to return and replace with one just allocated */ rxmb = rq->mbuf_ring[rq_idx]; rq->mbuf_ring[rq_idx] = nmb; /* Increment cqd, rqd, mbuf_table index */ cq_idx++; if (unlikely(cq_idx == cq->ring.desc_count)) { cq_idx = 0; cq->last_color = cq->last_color ? 0 : 1; } /* Prefetch next mbuf & desc while processing current one */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; rte_enic_prefetch(cqd_ptr); ciflags = enic_cq_rx_desc_ciflags( (struct cq_enet_rq_desc *)&cqd); /* Push descriptor for newly allocated mbuf */ nmb->data_off = RTE_PKTMBUF_HEADROOM; dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM); rq_enet_desc_enc(rqd_ptr, dma_addr, (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP : RQ_ENET_TYPE_NOT_SOP), nmb->buf_len - RTE_PKTMBUF_HEADROOM); /* Fill in the rest of the mbuf */ seg_length = enic_cq_rx_desc_n_bytes(&cqd); if (rq->is_sop) { first_seg = rxmb; first_seg->nb_segs = 1; first_seg->pkt_len = seg_length; } else { first_seg->pkt_len = (uint16_t)(first_seg->pkt_len + seg_length); first_seg->nb_segs++; last_seg->next = rxmb; } rxmb->next = NULL; rxmb->port = enic->port_id; rxmb->data_len = seg_length; rq->rx_nb_hold++; if (!(enic_cq_rx_desc_eop(ciflags))) { last_seg = rxmb; continue; } /* cq rx flags are only valid if eop bit is set */ first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); enic_cq_rx_to_pkt_flags(&cqd, first_seg); if (unlikely(packet_error)) { rte_pktmbuf_free(first_seg); rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); continue; } /* prefetch mbuf data for caller */ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr, RTE_PKTMBUF_HEADROOM)); /* store the mbuf address into the next entry of the array */ rx_pkts[nb_rx++] = first_seg; } sop_rq->pkt_first_seg = first_seg; sop_rq->pkt_last_seg = last_seg; cq->to_clean = cq_idx; if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) { if (data_rq->in_use) { data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count, data_rq->posted_index, data_rq->rx_nb_hold); data_rq->rx_nb_hold = 0; } sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count, sop_rq->posted_index, sop_rq->rx_nb_hold); sop_rq->rx_nb_hold = 0; rte_mb(); if (data_rq->in_use) iowrite32_relaxed(data_rq->posted_index, &data_rq->ctrl->posted_index); rte_compiler_barrier(); iowrite32_relaxed(sop_rq->posted_index, &sop_rq->ctrl->posted_index); } return nb_rx; } static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) { struct vnic_wq_buf *buf; struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; unsigned int nb_to_free, nb_free = 0, i; struct rte_mempool *pool; unsigned int tail_idx; unsigned int desc_count = wq->ring.desc_count; nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + 1; tail_idx = wq->tail_idx; buf = &wq->bufs[tail_idx]; pool = ((struct rte_mbuf *)buf->mb)->pool; for (i = 0; i < nb_to_free; i++) { buf = &wq->bufs[tail_idx]; m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb)); buf->mb = NULL; if (unlikely(m == NULL)) { tail_idx = enic_ring_incr(desc_count, tail_idx); continue; } if (likely(m->pool == pool)) { RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); free[nb_free++] = m; } else { rte_mempool_put_bulk(pool, (void *)free, nb_free); free[0] = m; nb_free = 1; pool = m->pool; } tail_idx = enic_ring_incr(desc_count, tail_idx); } rte_mempool_put_bulk(pool, (void **)free, nb_free); wq->tail_idx = tail_idx; wq->ring.desc_avail += nb_to_free; } unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) { u16 completed_index; completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; if (wq->last_completed_index != completed_index) { enic_free_wq_bufs(wq, completed_index); wq->last_completed_index = completed_index; } return 0; } uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { uint16_t index; unsigned int pkt_len, data_len; unsigned int nb_segs; struct rte_mbuf *tx_pkt; struct vnic_wq *wq = (struct vnic_wq *)tx_queue; struct enic *enic = vnic_dev_priv(wq->vdev); unsigned short vlan_id; uint64_t ol_flags; uint64_t ol_flags_mask; unsigned int wq_desc_avail; int head_idx; struct vnic_wq_buf *buf; unsigned int desc_count; struct wq_enet_desc *descs, *desc_p, desc_tmp; uint16_t mss; uint8_t vlan_tag_insert; uint8_t eop; uint64_t bus_addr; uint8_t offload_mode; uint16_t header_len; enic_cleanup_wq(enic, wq); wq_desc_avail = vnic_wq_desc_avail(wq); head_idx = wq->head_idx; desc_count = wq->ring.desc_count; ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); for (index = 0; index < nb_pkts; index++) { tx_pkt = *tx_pkts++; pkt_len = tx_pkt->pkt_len; data_len = tx_pkt->data_len; ol_flags = tx_pkt->ol_flags; nb_segs = tx_pkt->nb_segs; if (pkt_len > ENIC_TX_MAX_PKT_SIZE) { rte_pktmbuf_free(tx_pkt); rte_atomic64_inc(&enic->soft_stats.tx_oversized); continue; } if (nb_segs > wq_desc_avail) { if (index > 0) goto post; goto done; } mss = 0; vlan_id = 0; vlan_tag_insert = 0; bus_addr = (dma_addr_t) (tx_pkt->buf_physaddr + tx_pkt->data_off); descs = (struct wq_enet_desc *)wq->ring.descs; desc_p = descs + head_idx; eop = (data_len == pkt_len); offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM; header_len = 0; if (tx_pkt->tso_segsz) { header_len = tso_header_len(tx_pkt); if (header_len) { offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; mss = tx_pkt->tso_segsz; } } if ((ol_flags & ol_flags_mask) && (header_len == 0)) { if (ol_flags & PKT_TX_IP_CKSUM) mss |= ENIC_CALC_IP_CKSUM; /* Nic uses just 1 bit for UDP and TCP */ switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: case PKT_TX_UDP_CKSUM: mss |= ENIC_CALC_TCP_UDP_CKSUM; break; } } if (ol_flags & PKT_TX_VLAN_PKT) { vlan_tag_insert = 1; vlan_id = tx_pkt->vlan_tci; } wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len, offload_mode, eop, eop, 0, vlan_tag_insert, vlan_id, 0); *desc_p = desc_tmp; buf = &wq->bufs[head_idx]; buf->mb = (void *)tx_pkt; head_idx = enic_ring_incr(desc_count, head_idx); wq_desc_avail--; if (!eop) { for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = tx_pkt->next) { data_len = tx_pkt->data_len; if (tx_pkt->next == NULL) eop = 1; desc_p = descs + head_idx; bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr + tx_pkt->data_off); wq_enet_desc_enc((struct wq_enet_desc *) &desc_tmp, bus_addr, data_len, mss, 0, offload_mode, eop, eop, 0, vlan_tag_insert, vlan_id, 0); *desc_p = desc_tmp; buf = &wq->bufs[head_idx]; buf->mb = (void *)tx_pkt; head_idx = enic_ring_incr(desc_count, head_idx); wq_desc_avail--; } } } post: rte_wmb(); iowrite32_relaxed(head_idx, &wq->ctrl->posted_index); done: wq->ring.desc_avail = wq_desc_avail; wq->head_idx = head_idx; return index; }
vicharl/containerdns
kdns/dpdk-17.02/lib/librte_ether/rte_flow_driver.h
<filename>kdns/dpdk-17.02/lib/librte_ether/rte_flow_driver.h /*- * BSD LICENSE * * Copyright 2016 6WIND S.A. * Copyright 2016 Mellanox. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of 6WIND S.A. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef RTE_FLOW_DRIVER_H_ #define RTE_FLOW_DRIVER_H_ /** * @file * RTE generic flow API (driver side) * * This file provides implementation helpers for internal use by PMDs, they * are not intended to be exposed to applications and are not subject to ABI * versioning. */ #include <stdint.h> #include <rte_errno.h> #include "rte_ethdev.h" #include "rte_flow.h" #ifdef __cplusplus extern "C" { #endif /** * Generic flow operations structure implemented and returned by PMDs. * * To implement this API, PMDs must handle the RTE_ETH_FILTER_GENERIC filter * type in their .filter_ctrl callback function (struct eth_dev_ops) as well * as the RTE_ETH_FILTER_GET filter operation. * * If successful, this operation must result in a pointer to a PMD-specific * struct rte_flow_ops written to the argument address as described below: * * \code * * // PMD filter_ctrl callback * * static const struct rte_flow_ops pmd_flow_ops = { ... }; * * switch (filter_type) { * case RTE_ETH_FILTER_GENERIC: * if (filter_op != RTE_ETH_FILTER_GET) * return -EINVAL; * *(const void **)arg = &pmd_flow_ops; * return 0; * } * * \endcode * * See also rte_flow_ops_get(). * * These callback functions are not supposed to be used by applications * directly, which must rely on the API defined in rte_flow.h. * * Public-facing wrapper functions perform a few consistency checks so that * unimplemented (i.e. NULL) callbacks simply return -ENOTSUP. These * callbacks otherwise only differ by their first argument (with port ID * already resolved to a pointer to struct rte_eth_dev). */ struct rte_flow_ops { /** See rte_flow_validate(). */ int (*validate) (struct rte_eth_dev *, const struct rte_flow_attr *, const struct rte_flow_item [], const struct rte_flow_action [], struct rte_flow_error *); /** See rte_flow_create(). */ struct rte_flow *(*create) (struct rte_eth_dev *, const struct rte_flow_attr *, const struct rte_flow_item [], const struct rte_flow_action [], struct rte_flow_error *); /** See rte_flow_destroy(). */ int (*destroy) (struct rte_eth_dev *, struct rte_flow *, struct rte_flow_error *); /** See rte_flow_flush(). */ int (*flush) (struct rte_eth_dev *, struct rte_flow_error *); /** See rte_flow_query(). */ int (*query) (struct rte_eth_dev *, struct rte_flow *, enum rte_flow_action_type, void *, struct rte_flow_error *); }; /** * Initialize generic flow error structure. * * This function also sets rte_errno to a given value. * * @param[out] error * Pointer to flow error structure (may be NULL). * @param code * Related error code (rte_errno). * @param type * Cause field and error types. * @param cause * Object responsible for the error. * @param message * Human-readable error message. * * @return * Error code. */ static inline int rte_flow_error_set(struct rte_flow_error *error, int code, enum rte_flow_error_type type, const void *cause, const char *message) { if (error) { *error = (struct rte_flow_error){ .type = type, .cause = cause, .message = message, }; } rte_errno = code; return code; } /** * Get generic flow operations structure from a port. * * @param port_id * Port identifier to query. * @param[out] error * Pointer to flow error structure. * * @return * The flow operations structure associated with port_id, NULL in case of * error, in which case rte_errno is set and the error structure contains * additional details. */ const struct rte_flow_ops * rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error); #ifdef __cplusplus } #endif #endif /* RTE_FLOW_DRIVER_H_ */
vicharl/containerdns
kdns/dpdk-17.02/app/test-crypto-perf/cperf_test_throughput.c
<gh_stars>100-1000 /*- * BSD LICENSE * * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <rte_malloc.h> #include <rte_cycles.h> #include <rte_crypto.h> #include <rte_cryptodev.h> #include "cperf_test_throughput.h" #include "cperf_ops.h" struct cperf_throughput_results { uint64_t ops_enqueued; uint64_t ops_dequeued; uint64_t ops_enqueued_failed; uint64_t ops_dequeued_failed; uint64_t ops_failed; double ops_per_second; double throughput_gbps; double cycles_per_byte; }; struct cperf_throughput_ctx { uint8_t dev_id; uint16_t qp_id; uint8_t lcore_id; struct rte_mempool *pkt_mbuf_pool_in; struct rte_mempool *pkt_mbuf_pool_out; struct rte_mbuf **mbufs_in; struct rte_mbuf **mbufs_out; struct rte_mempool *crypto_op_pool; struct rte_cryptodev_sym_session *sess; cperf_populate_ops_t populate_ops; cperf_verify_crypto_op_t verify_op_output; const struct cperf_options *options; const struct cperf_test_vector *test_vector; struct cperf_throughput_results results; }; struct cperf_op_result { enum rte_crypto_op_status status; }; static void cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb) { uint32_t i; if (ctx) { if (ctx->sess) rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); if (ctx->mbufs_in) { for (i = 0; i < mbuf_nb; i++) rte_pktmbuf_free(ctx->mbufs_in[i]); rte_free(ctx->mbufs_in); } if (ctx->mbufs_out) { for (i = 0; i < mbuf_nb; i++) { if (ctx->mbufs_out[i] != NULL) rte_pktmbuf_free(ctx->mbufs_out[i]); } rte_free(ctx->mbufs_out); } if (ctx->pkt_mbuf_pool_in) rte_mempool_free(ctx->pkt_mbuf_pool_in); if (ctx->pkt_mbuf_pool_out) rte_mempool_free(ctx->pkt_mbuf_pool_out); if (ctx->crypto_op_pool) rte_mempool_free(ctx->crypto_op_pool); rte_free(ctx); } } static struct rte_mbuf * cperf_mbuf_create(struct rte_mempool *mempool, uint32_t segments_nb, const struct cperf_options *options, const struct cperf_test_vector *test_vector) { struct rte_mbuf *mbuf; uint32_t segment_sz = options->buffer_sz / segments_nb; uint32_t last_sz = options->buffer_sz % segments_nb; uint8_t *mbuf_data; uint8_t *test_data = (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? test_vector->plaintext.data : test_vector->ciphertext.data; mbuf = rte_pktmbuf_alloc(mempool); if (mbuf == NULL) goto error; mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz); if (mbuf_data == NULL) goto error; memcpy(mbuf_data, test_data, segment_sz); test_data += segment_sz; segments_nb--; while (segments_nb) { struct rte_mbuf *m; m = rte_pktmbuf_alloc(mempool); if (m == NULL) goto error; rte_pktmbuf_chain(mbuf, m); mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz); if (mbuf_data == NULL) goto error; memcpy(mbuf_data, test_data, segment_sz); test_data += segment_sz; segments_nb--; } if (last_sz) { mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz); if (mbuf_data == NULL) goto error; memcpy(mbuf_data, test_data, last_sz); } mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, options->auth_digest_sz); if (mbuf_data == NULL) goto error; if (options->op_type == CPERF_AEAD) { uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf, RTE_ALIGN_CEIL(options->auth_aad_sz, 16)); if (aead == NULL) goto error; memcpy(aead, test_vector->aad.data, test_vector->aad.length); } return mbuf; error: if (mbuf != NULL) rte_pktmbuf_free(mbuf); return NULL; } void * cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id, const struct cperf_options *options, const struct cperf_test_vector *test_vector, const struct cperf_op_fns *op_fns) { struct cperf_throughput_ctx *ctx = NULL; unsigned int mbuf_idx = 0; char pool_name[32] = ""; ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0); if (ctx == NULL) goto err; ctx->dev_id = dev_id; ctx->qp_id = qp_id; ctx->populate_ops = op_fns->populate_ops; ctx->options = options; ctx->test_vector = test_vector; ctx->sess = op_fns->sess_create(dev_id, options, test_vector); if (ctx->sess == NULL) goto err; snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d", dev_id); ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name, options->pool_sz * options->segments_nb, 0, 0, RTE_PKTMBUF_HEADROOM + RTE_CACHE_LINE_ROUNDUP( (options->buffer_sz / options->segments_nb) + (options->buffer_sz % options->segments_nb) + options->auth_digest_sz), rte_socket_id()); if (ctx->pkt_mbuf_pool_in == NULL) goto err; /* Generate mbufs_in with plaintext populated for test */ if (ctx->options->pool_sz % ctx->options->burst_sz) goto err; ctx->mbufs_in = rte_malloc(NULL, (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0); for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) { ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create( ctx->pkt_mbuf_pool_in, options->segments_nb, options, test_vector); if (ctx->mbufs_in[mbuf_idx] == NULL) goto err; } if (options->out_of_place == 1) { snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d", dev_id); ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create( pool_name, options->pool_sz, 0, 0, RTE_PKTMBUF_HEADROOM + RTE_CACHE_LINE_ROUNDUP( options->buffer_sz + options->auth_digest_sz), rte_socket_id()); if (ctx->pkt_mbuf_pool_out == NULL) goto err; } ctx->mbufs_out = rte_malloc(NULL, (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0); for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) { if (options->out_of_place == 1) { ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create( ctx->pkt_mbuf_pool_out, 1, options, test_vector); if (ctx->mbufs_out[mbuf_idx] == NULL) goto err; } else { ctx->mbufs_out[mbuf_idx] = NULL; } } snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d", dev_id); ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name, RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0, rte_socket_id()); if (ctx->crypto_op_pool == NULL) goto err; return ctx; err: cperf_throughput_test_free(ctx, mbuf_idx); return NULL; } static int cperf_throughput_test_verifier(struct rte_mbuf *mbuf, const struct cperf_options *options, const struct cperf_test_vector *vector) { const struct rte_mbuf *m; uint32_t len; uint16_t nb_segs; uint8_t *data; uint32_t cipher_offset, auth_offset; uint8_t cipher, auth; int res = 0; m = mbuf; nb_segs = m->nb_segs; len = 0; while (m && nb_segs != 0) { len += m->data_len; m = m->next; nb_segs--; } data = rte_malloc(NULL, len, 0); if (data == NULL) return 1; m = mbuf; nb_segs = m->nb_segs; len = 0; while (m && nb_segs != 0) { memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *), m->data_len); len += m->data_len; m = m->next; nb_segs--; } switch (options->op_type) { case CPERF_CIPHER_ONLY: cipher = 1; cipher_offset = 0; auth = 0; auth_offset = 0; break; case CPERF_CIPHER_THEN_AUTH: cipher = 1; cipher_offset = 0; auth = 1; auth_offset = vector->plaintext.length; break; case CPERF_AUTH_ONLY: cipher = 0; cipher_offset = 0; auth = 1; auth_offset = vector->plaintext.length; break; case CPERF_AUTH_THEN_CIPHER: cipher = 1; cipher_offset = 0; auth = 1; auth_offset = vector->plaintext.length; break; case CPERF_AEAD: cipher = 1; cipher_offset = vector->aad.length; auth = 1; auth_offset = vector->aad.length + vector->plaintext.length; break; } if (cipher == 1) { if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) res += memcmp(data + cipher_offset, vector->ciphertext.data, vector->ciphertext.length); else res += memcmp(data + cipher_offset, vector->plaintext.data, vector->plaintext.length); } if (auth == 1) { if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) res += memcmp(data + auth_offset, vector->digest.data, vector->digest.length); } if (res != 0) res = 1; return res; } int cperf_throughput_test_runner(void *test_ctx) { struct cperf_throughput_ctx *ctx = test_ctx; struct cperf_op_result *res, *pres; if (ctx->options->verify) { res = rte_malloc(NULL, sizeof(struct cperf_op_result) * ctx->options->total_ops, 0); if (res == NULL) return 0; } uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0; uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; uint64_t i, m_idx = 0, tsc_start, tsc_end, tsc_duration; uint16_t ops_unused = 0; uint64_t idx = 0; struct rte_crypto_op *ops[ctx->options->burst_sz]; struct rte_crypto_op *ops_processed[ctx->options->burst_sz]; uint32_t lcore = rte_lcore_id(); #ifdef CPERF_LINEARIZATION_ENABLE struct rte_cryptodev_info dev_info; int linearize = 0; /* Check if source mbufs require coalescing */ if (ctx->options->segments_nb > 1) { rte_cryptodev_info_get(ctx->dev_id, &dev_info); if ((dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) linearize = 1; } #endif /* CPERF_LINEARIZATION_ENABLE */ ctx->lcore_id = lcore; if (!ctx->options->csv) printf("\n# Running throughput test on device: %u, lcore: %u\n", ctx->dev_id, lcore); /* Warm up the host CPU before starting the test */ for (i = 0; i < ctx->options->total_ops; i++) rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); tsc_start = rte_rdtsc_precise(); while (ops_enqd_total < ctx->options->total_ops) { uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz) <= ctx->options->total_ops) ? ctx->options->burst_sz : ctx->options->total_ops - ops_enqd_total; uint16_t ops_needed = burst_size - ops_unused; /* Allocate crypto ops from pool */ if (ops_needed != rte_crypto_op_bulk_alloc( ctx->crypto_op_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)) return -1; /* Setup crypto op, attach mbuf etc */ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx], &ctx->mbufs_out[m_idx], ops_needed, ctx->sess, ctx->options, ctx->test_vector); if (ctx->options->verify) { for (i = 0; i < ops_needed; i++) { ops[i]->opaque_data = (void *)&res[idx]; idx++; } } /** * When ops_needed is smaller than ops_enqd, the * unused ops need to be moved to the front for * next round use. */ if (unlikely(ops_enqd > ops_needed)) { size_t nb_b_to_mov = ops_unused * sizeof( struct rte_crypto_op *); memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov); } #ifdef CPERF_LINEARIZATION_ENABLE if (linearize) { /* PMD doesn't support scatter-gather and source buffer * is segmented. * We need to linearize it before enqueuing. */ for (i = 0; i < burst_size; i++) rte_pktmbuf_linearize(ops[i]->sym->m_src); } #endif /* CPERF_LINEARIZATION_ENABLE */ /* Enqueue burst of ops on crypto device */ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, ops, burst_size); if (ops_enqd < burst_size) ops_enqd_failed++; /** * Calculate number of ops not enqueued (mainly for hw * accelerators whose ingress queue can fill up). */ ops_unused = burst_size - ops_enqd; ops_enqd_total += ops_enqd; /* Dequeue processed burst of ops from crypto device */ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, ops_processed, ctx->options->burst_sz); if (likely(ops_deqd)) { if (ctx->options->verify) { void *opq; for (i = 0; i < ops_deqd; i++) { opq = (ops_processed[i]->opaque_data); pres = (struct cperf_op_result *)opq; pres->status = ops_processed[i]->status; } } /* free crypto ops so they can be reused. We don't free * the mbufs here as we don't want to reuse them as * the crypto operation will change the data and cause * failures. */ for (i = 0; i < ops_deqd; i++) rte_crypto_op_free(ops_processed[i]); ops_deqd_total += ops_deqd; } else { /** * Count dequeue polls which didn't return any * processed operations. This statistic is mainly * relevant to hw accelerators. */ ops_deqd_failed++; } m_idx += ops_needed; m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ? 0 : m_idx; } /* Dequeue any operations still in the crypto device */ while (ops_deqd_total < ctx->options->total_ops) { /* Sending 0 length burst to flush sw crypto device */ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); /* dequeue burst */ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, ops_processed, ctx->options->burst_sz); if (ops_deqd == 0) ops_deqd_failed++; else { if (ctx->options->verify) { void *opq; for (i = 0; i < ops_deqd; i++) { opq = (ops_processed[i]->opaque_data); pres = (struct cperf_op_result *)opq; pres->status = ops_processed[i]->status; } } for (i = 0; i < ops_deqd; i++) rte_crypto_op_free(ops_processed[i]); ops_deqd_total += ops_deqd; } } tsc_end = rte_rdtsc_precise(); tsc_duration = (tsc_end - tsc_start); if (ctx->options->verify) { struct rte_mbuf **mbufs; if (ctx->options->out_of_place == 1) mbufs = ctx->mbufs_out; else mbufs = ctx->mbufs_in; for (i = 0; i < ctx->options->total_ops; i++) { if (res[i].status != RTE_CRYPTO_OP_STATUS_SUCCESS || cperf_throughput_test_verifier( mbufs[i], ctx->options, ctx->test_vector)) { ctx->results.ops_failed++; } } rte_free(res); } /* Calculate average operations processed per second */ ctx->results.ops_per_second = ((double)ctx->options->total_ops / tsc_duration) * rte_get_tsc_hz(); /* Calculate average throughput (Gbps) in bits per second */ ctx->results.throughput_gbps = ((ctx->results.ops_per_second * ctx->options->buffer_sz * 8) / 1000000000); /* Calculate average cycles per byte */ ctx->results.cycles_per_byte = ((double)tsc_duration / ctx->options->total_ops) / ctx->options->buffer_sz; ctx->results.ops_enqueued = ops_enqd_total; ctx->results.ops_dequeued = ops_deqd_total; ctx->results.ops_enqueued_failed = ops_enqd_failed; ctx->results.ops_dequeued_failed = ops_deqd_failed; return 0; } void cperf_throughput_test_destructor(void *arg) { struct cperf_throughput_ctx *ctx = arg; struct cperf_throughput_results *results = &ctx->results; static int only_once; if (ctx == NULL) return; if (!ctx->options->csv) { printf("\n# Device %d on lcore %u\n", ctx->dev_id, ctx->lcore_id); printf("# Buffer Size(B)\t Enqueued\t Dequeued\tFailed Enq" "\tFailed Deq\tOps(Millions)\tThroughput(Gbps)" "\tCycles Per Byte\n"); printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t" "%10"PRIu64"\t%16.4f\t%16.4f\t%15.2f\n", ctx->options->buffer_sz, results->ops_enqueued, results->ops_dequeued, results->ops_enqueued_failed, results->ops_dequeued_failed, results->ops_per_second/1000000, results->throughput_gbps, results->cycles_per_byte); } else { if (!only_once) printf("\n# CPU lcore id, Burst Size(B), " "Buffer Size(B),Enqueued,Dequeued,Failed Enq," "Failed Deq,Ops(Millions),Throughput(Gbps)," "Cycles Per Byte\n"); only_once = 1; printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" "%.f3;%.f3;%.f3\n", ctx->lcore_id, ctx->options->burst_sz, ctx->options->buffer_sz, results->ops_enqueued, results->ops_dequeued, results->ops_enqueued_failed, results->ops_dequeued_failed, results->ops_per_second/1000000, results->throughput_gbps, results->cycles_per_byte); } cperf_throughput_test_free(ctx, ctx->options->pool_sz); }
vicharl/containerdns
kdns/dpdk-17.02/app/test-pmd/csumonly.c
<filename>kdns/dpdk-17.02/app/test-pmd/csumonly.c /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * Copyright 2014 <NAME>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdarg.h> #include <stdio.h> #include <errno.h> #include <stdint.h> #include <unistd.h> #include <inttypes.h> #include <sys/queue.h> #include <sys/stat.h> #include <rte_common.h> #include <rte_byteorder.h> #include <rte_log.h> #include <rte_debug.h> #include <rte_cycles.h> #include <rte_memory.h> #include <rte_memcpy.h> #include <rte_memzone.h> #include <rte_launch.h> #include <rte_eal.h> #include <rte_per_lcore.h> #include <rte_lcore.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_memory.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_memcpy.h> #include <rte_interrupts.h> #include <rte_pci.h> #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_ip.h> #include <rte_tcp.h> #include <rte_udp.h> #include <rte_sctp.h> #include <rte_prefetch.h> #include <rte_string_fns.h> #include <rte_flow.h> #include "testpmd.h" #define IP_DEFTTL 64 /* from RFC 1340. */ #define IP_VERSION 0x40 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) #define GRE_KEY_PRESENT 0x2000 #define GRE_KEY_LEN 4 #define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8))) #else #define _htons(x) (x) #endif /* structure that caches offload info for the current packet */ struct testpmd_offload_info { uint16_t ethertype; uint16_t l2_len; uint16_t l3_len; uint16_t l4_len; uint8_t l4_proto; uint8_t is_tunnel; uint16_t outer_ethertype; uint16_t outer_l2_len; uint16_t outer_l3_len; uint8_t outer_l4_proto; uint16_t tso_segsz; uint16_t tunnel_tso_segsz; uint32_t pkt_len; }; /* simplified GRE header */ struct simple_gre_hdr { uint16_t flags; uint16_t proto; } __attribute__((__packed__)); static uint16_t get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype) { if (ethertype == _htons(ETHER_TYPE_IPv4)) return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr); else /* assume ethertype == ETHER_TYPE_IPv6 */ return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr); } /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */ static void parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info) { struct tcp_hdr *tcp_hdr; info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; info->l4_proto = ipv4_hdr->next_proto_id; /* only fill l4_len for TCP, it's useful for TSO */ if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len); info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; } else info->l4_len = 0; } /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */ static void parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info) { struct tcp_hdr *tcp_hdr; info->l3_len = sizeof(struct ipv6_hdr); info->l4_proto = ipv6_hdr->proto; /* only fill l4_len for TCP, it's useful for TSO */ if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len); info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; } else info->l4_len = 0; } /* * Parse an ethernet header to fill the ethertype, l2_len, l3_len and * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan * header. The l4_len argument is only set in case of TCP (useful for TSO). */ static void parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info) { struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; info->l2_len = sizeof(struct ether_hdr); info->ethertype = eth_hdr->ether_type; if (info->ethertype == _htons(ETHER_TYPE_VLAN)) { struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); info->l2_len += sizeof(struct vlan_hdr); info->ethertype = vlan_hdr->eth_proto; } switch (info->ethertype) { case _htons(ETHER_TYPE_IPv4): ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len); parse_ipv4(ipv4_hdr, info); break; case _htons(ETHER_TYPE_IPv6): ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len); parse_ipv6(ipv6_hdr, info); break; default: info->l4_len = 0; info->l3_len = 0; info->l4_proto = 0; break; } } /* Parse a vxlan header */ static void parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info, uint32_t pkt_type) { struct ether_hdr *eth_hdr; /* check udp destination port, 4789 is the default vxlan port * (rfc7348) or that the rx offload flag is set (i40e only * currently) */ if (udp_hdr->dst_port != _htons(4789) && RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0) return; info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; eth_hdr = (struct ether_hdr *)((char *)udp_hdr + sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)); parse_ethernet(eth_hdr, info); info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */ } /* Parse a gre header */ static void parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) { struct ether_hdr *eth_hdr; struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; uint8_t gre_len = 0; /* check which fields are supported */ if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0) return; gre_len += sizeof(struct simple_gre_hdr); if (gre_hdr->flags & _htons(GRE_KEY_PRESENT)) gre_len += GRE_KEY_LEN; if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) { info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len); parse_ipv4(ipv4_hdr, info); info->ethertype = _htons(ETHER_TYPE_IPv4); info->l2_len = 0; } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) { info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len); info->ethertype = _htons(ETHER_TYPE_IPv6); parse_ipv6(ipv6_hdr, info); info->l2_len = 0; } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) { info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len); parse_ethernet(eth_hdr, info); } else return; info->l2_len += gre_len; } /* Parse an encapsulated ip or ipv6 header */ static void parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info) { struct ipv4_hdr *ipv4_hdr = encap_ip; struct ipv6_hdr *ipv6_hdr = encap_ip; uint8_t ip_version; ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4; if (ip_version != 4 && ip_version != 6) return; info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; if (ip_version == 4) { parse_ipv4(ipv4_hdr, info); info->ethertype = _htons(ETHER_TYPE_IPv4); } else { parse_ipv6(ipv6_hdr, info); info->ethertype = _htons(ETHER_TYPE_IPv6); } info->l2_len = 0; } /* if possible, calculate the checksum of a packet in hw or sw, * depending on the testpmd command line configuration */ static uint64_t process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, uint16_t testpmd_ol_flags) { struct ipv4_hdr *ipv4_hdr = l3_hdr; struct udp_hdr *udp_hdr; struct tcp_hdr *tcp_hdr; struct sctp_hdr *sctp_hdr; uint64_t ol_flags = 0; uint32_t max_pkt_len, tso_segsz = 0; /* ensure packet is large enough to require tso */ if (!info->is_tunnel) { max_pkt_len = info->l2_len + info->l3_len + info->l4_len + info->tso_segsz; if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len) tso_segsz = info->tso_segsz; } else { max_pkt_len = info->outer_l2_len + info->outer_l3_len + info->l2_len + info->l3_len + info->l4_len + info->tunnel_tso_segsz; if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len) tso_segsz = info->tunnel_tso_segsz; } if (info->ethertype == _htons(ETHER_TYPE_IPv4)) { ipv4_hdr = l3_hdr; ipv4_hdr->hdr_checksum = 0; ol_flags |= PKT_TX_IPV4; if (info->l4_proto == IPPROTO_TCP && tso_segsz) { ol_flags |= PKT_TX_IP_CKSUM; } else { if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) ol_flags |= PKT_TX_IP_CKSUM; else ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); } } else if (info->ethertype == _htons(ETHER_TYPE_IPv6)) ol_flags |= PKT_TX_IPV6; else return 0; /* packet type not supported, nothing to do */ if (info->l4_proto == IPPROTO_UDP) { udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len); /* do not recalculate udp cksum if it was 0 */ if (udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ol_flags |= PKT_TX_UDP_CKSUM; else { udp_hdr->dgram_cksum = get_udptcp_checksum(l3_hdr, udp_hdr, info->ethertype); } } } else if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; if (tso_segsz) ol_flags |= PKT_TX_TCP_SEG; else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ol_flags |= PKT_TX_TCP_CKSUM; else { tcp_hdr->cksum = get_udptcp_checksum(l3_hdr, tcp_hdr, info->ethertype); } } else if (info->l4_proto == IPPROTO_SCTP) { sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len); sctp_hdr->cksum = 0; /* sctp payload must be a multiple of 4 to be * offloaded */ if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) && ((ipv4_hdr->total_length & 0x3) == 0)) { ol_flags |= PKT_TX_SCTP_CKSUM; } else { /* XXX implement CRC32c, example available in * RFC3309 */ } } return ol_flags; } /* Calculate the checksum of outer header */ static uint64_t process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, uint16_t testpmd_ol_flags, int tso_enabled) { struct ipv4_hdr *ipv4_hdr = outer_l3_hdr; struct ipv6_hdr *ipv6_hdr = outer_l3_hdr; struct udp_hdr *udp_hdr; uint64_t ol_flags = 0; if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) { ipv4_hdr->hdr_checksum = 0; ol_flags |= PKT_TX_OUTER_IPV4; if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ol_flags |= PKT_TX_OUTER_IP_CKSUM; else ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); } else ol_flags |= PKT_TX_OUTER_IPV6; if (info->outer_l4_proto != IPPROTO_UDP) return ol_flags; udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len); /* outer UDP checksum is done in software as we have no hardware * supporting it today, and no API for it. In the other side, for * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be * set to zero. * * If a packet will be TSOed into small packets by NIC, we cannot * set/calculate a non-zero checksum, because it will be a wrong * value after the packet be split into several small packets. */ if (tso_enabled) udp_hdr->dgram_cksum = 0; /* do not recalculate udp cksum if it was 0 */ if (udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr); else udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr); } return ol_flags; } /* * Helper function. * Performs actual copying. * Returns number of segments in the destination mbuf on success, * or negative error code on failure. */ static int mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[], uint16_t seglen[], uint8_t nb_seg) { uint32_t dlen, slen, tlen; uint32_t i, len; const struct rte_mbuf *m; const uint8_t *src; uint8_t *dst; dlen = 0; slen = 0; tlen = 0; dst = NULL; src = NULL; m = ms; i = 0; while (ms != NULL && i != nb_seg) { if (slen == 0) { slen = rte_pktmbuf_data_len(ms); src = rte_pktmbuf_mtod(ms, const uint8_t *); } if (dlen == 0) { dlen = RTE_MIN(seglen[i], slen); md[i]->data_len = dlen; md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1]; dst = rte_pktmbuf_mtod(md[i], uint8_t *); } len = RTE_MIN(slen, dlen); memcpy(dst, src, len); tlen += len; slen -= len; dlen -= len; src += len; dst += len; if (slen == 0) ms = ms->next; if (dlen == 0) i++; } if (ms != NULL) return -ENOBUFS; else if (tlen != m->pkt_len) return -EINVAL; md[0]->nb_segs = nb_seg; md[0]->pkt_len = tlen; md[0]->vlan_tci = m->vlan_tci; md[0]->vlan_tci_outer = m->vlan_tci_outer; md[0]->ol_flags = m->ol_flags; md[0]->tx_offload = m->tx_offload; return nb_seg; } /* * Allocate a new mbuf with up to tx_pkt_nb_segs segments. * Copy packet contents and offload information into then new segmented mbuf. */ static struct rte_mbuf * pkt_copy_split(const struct rte_mbuf *pkt) { int32_t n, rc; uint32_t i, len, nb_seg; struct rte_mempool *mp; uint16_t seglen[RTE_MAX_SEGS_PER_PKT]; struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT]; mp = current_fwd_lcore()->mbp; if (tx_pkt_split == TX_PKT_SPLIT_RND) nb_seg = random() % tx_pkt_nb_segs + 1; else nb_seg = tx_pkt_nb_segs; memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0])); /* calculate number of segments to use and their length. */ len = 0; for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) { len += seglen[i]; md[i] = NULL; } n = pkt->pkt_len - len; /* update size of the last segment to fit rest of the packet */ if (n >= 0) { seglen[i - 1] += n; len += n; } nb_seg = i; while (i != 0) { p = rte_pktmbuf_alloc(mp); if (p == NULL) { RTE_LOG(ERR, USER1, "failed to allocate %u-th of %u mbuf " "from mempool: %s\n", nb_seg - i, nb_seg, mp->name); break; } md[--i] = p; if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) { RTE_LOG(ERR, USER1, "mempool %s, %u-th segment: " "expected seglen: %u, " "actual mbuf tailroom: %u\n", mp->name, i, seglen[i], rte_pktmbuf_tailroom(md[i])); break; } } /* all mbufs successfully allocated, do copy */ if (i == 0) { rc = mbuf_copy_split(pkt, md, seglen, nb_seg); if (rc < 0) RTE_LOG(ERR, USER1, "mbuf_copy_split for %p(len=%u, nb_seg=%hhu) " "into %u segments failed with error code: %d\n", pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc); /* figure out how many mbufs to free. */ i = RTE_MAX(rc, 0); } /* free unused mbufs */ for (; i != nb_seg; i++) { rte_pktmbuf_free_seg(md[i]); md[i] = NULL; } return md[0]; } /* * Receive a burst of packets, and for each packet: * - parse packet, and try to recognize a supported packet type (1) * - if it's not a supported packet type, don't touch the packet, else: * - reprocess the checksum of all supported layers. This is done in SW * or HW, depending on testpmd command line configuration * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP * segmentation offload (this implies HW TCP checksum) * Then transmit packets on the output port. * * (1) Supported packets are: * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP . * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / * UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP * * The testpmd command line for this forward engine sets the flags * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control * wether a checksum must be calculated in software or in hardware. The * IP, UDP, TCP and SCTP flags always concern the inner layer. The * OUTER_IP is only useful for tunnel packets. */ static void pkt_burst_checksum_forward(struct fwd_stream *fs) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_port *txp; struct rte_mbuf *m, *p; struct ether_hdr *eth_hdr; void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */ uint16_t nb_rx; uint16_t nb_tx; uint16_t nb_prep; uint16_t i; uint64_t rx_ol_flags, tx_ol_flags; uint16_t testpmd_ol_flags; uint32_t retry; uint32_t rx_bad_ip_csum; uint32_t rx_bad_l4_csum; struct testpmd_offload_info info; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; uint64_t core_cycles; #endif #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES start_tsc = rte_rdtsc(); #endif /* receive a burst of packet */ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, nb_pkt_per_burst); if (unlikely(nb_rx == 0)) return; #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; #endif fs->rx_packets += nb_rx; rx_bad_ip_csum = 0; rx_bad_l4_csum = 0; txp = &ports[fs->tx_port]; testpmd_ol_flags = txp->tx_ol_flags; memset(&info, 0, sizeof(info)); info.tso_segsz = txp->tso_segsz; info.tunnel_tso_segsz = txp->tunnel_tso_segsz; for (i = 0; i < nb_rx; i++) { if (likely(i < nb_rx - 1)) rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], void *)); m = pkts_burst[i]; info.is_tunnel = 0; info.pkt_len = rte_pktmbuf_pkt_len(m); tx_ol_flags = 0; rx_ol_flags = m->ol_flags; /* Update the L3/L4 checksum error packet statistics */ if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD) rx_bad_ip_csum += 1; if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD) rx_bad_l4_csum += 1; /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan * and inner headers */ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr->d_addr); ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr->s_addr); parse_ethernet(eth_hdr, &info); l3_hdr = (char *)eth_hdr + info.l2_len; /* check if it's a supported tunnel */ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) { if (info.l4_proto == IPPROTO_UDP) { struct udp_hdr *udp_hdr; udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info.l3_len); parse_vxlan(udp_hdr, &info, m->packet_type); if (info.is_tunnel) tx_ol_flags |= PKT_TX_TUNNEL_VXLAN; } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; gre_hdr = (struct simple_gre_hdr *) ((char *)l3_hdr + info.l3_len); parse_gre(gre_hdr, &info); if (info.is_tunnel) tx_ol_flags |= PKT_TX_TUNNEL_GRE; } else if (info.l4_proto == IPPROTO_IPIP) { void *encap_ip_hdr; encap_ip_hdr = (char *)l3_hdr + info.l3_len; parse_encap_ip(encap_ip_hdr, &info); if (info.is_tunnel) tx_ol_flags |= PKT_TX_TUNNEL_IPIP; } } /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */ if (info.is_tunnel) { outer_l3_hdr = l3_hdr; l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len; } /* step 2: depending on user command line configuration, * recompute checksum either in software or flag the * mbuf to offload the calculation to the NIC. If TSO * is configured, prepare the mbuf for TCP segmentation. */ /* process checksums of inner headers first */ tx_ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags); /* Then process outer headers if any. Note that the software * checksum will be wrong if one of the inner checksums is * processed in hardware. */ if (info.is_tunnel == 1) { tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info, testpmd_ol_flags, !!(tx_ol_flags & PKT_TX_TCP_SEG)); } /* step 3: fill the mbuf meta data (flags and header lengths) */ if (info.is_tunnel == 1) { if (info.tunnel_tso_segsz || (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) || (tx_ol_flags & PKT_TX_OUTER_IPV6)) { m->outer_l2_len = info.outer_l2_len; m->outer_l3_len = info.outer_l3_len; m->l2_len = info.l2_len; m->l3_len = info.l3_len; m->l4_len = info.l4_len; m->tso_segsz = info.tunnel_tso_segsz; } else { /* if there is a outer UDP cksum processed in sw and the inner in hw, the outer checksum will be wrong as the payload will be modified by the hardware */ m->l2_len = info.outer_l2_len + info.outer_l3_len + info.l2_len; m->l3_len = info.l3_len; m->l4_len = info.l4_len; } } else { /* this is only useful if an offload flag is * set, but it does not hurt to fill it in any * case */ m->l2_len = info.l2_len; m->l3_len = info.l3_len; m->l4_len = info.l4_len; m->tso_segsz = info.tso_segsz; } m->ol_flags = tx_ol_flags; /* Do split & copy for the packet. */ if (tx_pkt_split != TX_PKT_SPLIT_OFF) { p = pkt_copy_split(m); if (p != NULL) { rte_pktmbuf_free(m); m = p; pkts_burst[i] = m; } } /* if verbose mode is enabled, dump debug info */ if (verbose_level > 0) { char buf[256]; printf("-----------------\n"); printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n", fs->rx_port, m, m->pkt_len, m->nb_segs); /* dump rx parsed packet info */ rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf)); printf("rx: l2_len=%d ethertype=%x l3_len=%d " "l4_proto=%d l4_len=%d flags=%s\n", info.l2_len, rte_be_to_cpu_16(info.ethertype), info.l3_len, info.l4_proto, info.l4_len, buf); if (rx_ol_flags & PKT_RX_LRO) printf("rx: m->lro_segsz=%u\n", m->tso_segsz); if (info.is_tunnel == 1) printf("rx: outer_l2_len=%d outer_ethertype=%x " "outer_l3_len=%d\n", info.outer_l2_len, rte_be_to_cpu_16(info.outer_ethertype), info.outer_l3_len); /* dump tx packet info */ if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM | TESTPMD_TX_OFFLOAD_UDP_CKSUM | TESTPMD_TX_OFFLOAD_TCP_CKSUM | TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) || info.tso_segsz != 0) printf("tx: m->l2_len=%d m->l3_len=%d " "m->l4_len=%d\n", m->l2_len, m->l3_len, m->l4_len); if (info.is_tunnel == 1) { if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) || (tx_ol_flags & PKT_TX_OUTER_IPV6)) printf("tx: m->outer_l2_len=%d " "m->outer_l3_len=%d\n", m->outer_l2_len, m->outer_l3_len); if (info.tunnel_tso_segsz != 0 && (m->ol_flags & PKT_TX_TCP_SEG)) printf("tx: m->tso_segsz=%d\n", m->tso_segsz); } else if (info.tso_segsz != 0 && (m->ol_flags & PKT_TX_TCP_SEG)) printf("tx: m->tso_segsz=%d\n", m->tso_segsz); rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf)); printf("tx: flags=%s", buf); printf("\n"); } } nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); if (nb_prep != nb_rx) printf("Preparing packet burst to transmit failed: %s\n", rte_strerror(rte_errno)); nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_prep); /* * Retry if necessary */ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { retry = 0; while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { rte_delay_us(burst_tx_delay_time); nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, &pkts_burst[nb_tx], nb_rx - nb_tx); } } fs->tx_packets += nb_tx; fs->rx_bad_ip_csum += rx_bad_ip_csum; fs->rx_bad_l4_csum += rx_bad_l4_csum; #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; #endif if (unlikely(nb_tx < nb_rx)) { fs->fwd_dropped += (nb_rx - nb_tx); do { rte_pktmbuf_free(pkts_burst[nb_tx]); } while (++nb_tx < nb_rx); } #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES end_tsc = rte_rdtsc(); core_cycles = (end_tsc - start_tsc); fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); #endif } struct fwd_engine csum_fwd_engine = { .fwd_mode_name = "csum", .port_fwd_begin = NULL, .port_fwd_end = NULL, .packet_fwd = pkt_burst_checksum_forward, };
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/qede/qede_rxtx.c
/* * Copyright (c) 2016 QLogic Corporation. * All rights reserved. * www.qlogic.com * * See LICENSE.qede_pmd for copyright and licensing details. */ #include "qede_rxtx.h" static bool gro_disable = 1; /* mod_param */ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) { struct rte_mbuf *new_mb = NULL; struct eth_rx_bd *rx_bd; dma_addr_t mapping; uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, rxq, "Failed to allocate rx buffer " "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u", idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq), rte_mempool_avail_count(rxq->mb_pool), rte_mempool_in_use_count(rxq->mb_pool)); return -ENOMEM; } rxq->sw_rx_ring[idx].mbuf = new_mb; rxq->sw_rx_ring[idx].page_offset = 0; mapping = rte_mbuf_data_dma_addr_default(new_mb); /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); rxq->sw_rx_prod++; return 0; } static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) { uint16_t i; if (rxq->sw_rx_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { if (rxq->sw_rx_ring[i].mbuf != NULL) { rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf); rxq->sw_rx_ring[i].mbuf = NULL; } } } } void qede_rx_queue_release(void *rx_queue) { struct qede_rx_queue *rxq = rx_queue; if (rxq != NULL) { qede_rx_queue_release_mbufs(rxq); rte_free(rxq->sw_rx_ring); rxq->sw_rx_ring = NULL; rte_free(rxq); rxq = NULL; } } static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq) { unsigned int i; PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc); if (txq->sw_tx_ring) { for (i = 0; i < txq->nb_tx_desc; i++) { if (txq->sw_tx_ring[i].mbuf) { rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf); txq->sw_tx_ring[i].mbuf = NULL; } } } } int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct qede_rx_queue *rxq; uint16_t max_rx_pkt_len; uint16_t bufsz; size_t size; int rc; int i; PMD_INIT_FUNC_TRACE(edev); /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */ if (!rte_is_power_of_2(nb_desc)) { DP_ERR(edev, "Ring size %u is not power of 2\n", nb_desc); return -EINVAL; } /* Free memory prior to re-allocation if needed... */ if (dev->data->rx_queues[queue_idx] != NULL) { qede_rx_queue_release(dev->data->rx_queues[queue_idx]); dev->data->rx_queues[queue_idx] = NULL; } /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (!rxq) { DP_ERR(edev, "Unable to allocate memory for rxq on socket %u", socket_id); return -ENOMEM; } rxq->qdev = qdev; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len; qdev->mtu = max_rx_pkt_len; /* Fix up RX buffer size */ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; if ((rxmode->enable_scatter) || (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) { if (!dev->data->scattered_rx) { DP_INFO(edev, "Forcing scatter-gather mode\n"); dev->data->scattered_rx = 1; } } if (dev->data->scattered_rx) rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; else rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD; /* Align to cache-line size if needed */ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size); DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n", qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx); /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc; rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_rx_ring) { DP_NOTICE(edev, false, "Unable to alloc memory for sw_rx_ring on socket %u\n", socket_id); rte_free(rxq); rxq = NULL; return -ENOMEM; } /* Allocate FW Rx ring */ rc = qdev->ops->common->chain_alloc(edev, ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, ECORE_CHAIN_MODE_NEXT_PTR, ECORE_CHAIN_CNT_TYPE_U16, rxq->nb_rx_desc, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring, NULL); if (rc != ECORE_SUCCESS) { DP_NOTICE(edev, false, "Unable to alloc memory for rxbd ring on socket %u\n", socket_id); rte_free(rxq->sw_rx_ring); rxq->sw_rx_ring = NULL; rte_free(rxq); rxq = NULL; return -ENOMEM; } /* Allocate FW completion ring */ rc = qdev->ops->common->chain_alloc(edev, ECORE_CHAIN_USE_TO_CONSUME, ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, rxq->nb_rx_desc, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring, NULL); if (rc != ECORE_SUCCESS) { DP_NOTICE(edev, false, "Unable to alloc memory for cqe ring on socket %u\n", socket_id); /* TBD: Freeing RX BD ring */ rte_free(rxq->sw_rx_ring); rxq->sw_rx_ring = NULL; rte_free(rxq); return -ENOMEM; } /* Allocate buffers for the Rx ring */ for (i = 0; i < rxq->nb_rx_desc; i++) { rc = qede_alloc_rx_buffer(rxq); if (rc) { DP_NOTICE(edev, false, "RX buffer allocation failed at idx=%d\n", i); goto err4; } } dev->data->rx_queues[queue_idx] = rxq; DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n", queue_idx, nb_desc, qdev->mtu, socket_id); return 0; err4: qede_rx_queue_release(rxq); return -ENOMEM; } void qede_tx_queue_release(void *tx_queue) { struct qede_tx_queue *txq = tx_queue; if (txq != NULL) { qede_tx_queue_release_mbufs(txq); if (txq->sw_tx_ring) { rte_free(txq->sw_tx_ring); txq->sw_tx_ring = NULL; } rte_free(txq); } txq = NULL; } int qede_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct qede_tx_queue *txq; int rc; PMD_INIT_FUNC_TRACE(edev); if (!rte_is_power_of_2(nb_desc)) { DP_ERR(edev, "Ring size %u is not power of 2\n", nb_desc); return -EINVAL; } /* Free memory prior to re-allocation if needed... */ if (dev->data->tx_queues[queue_idx] != NULL) { qede_tx_queue_release(dev->data->tx_queues[queue_idx]); dev->data->tx_queues[queue_idx] = NULL; } txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { DP_ERR(edev, "Unable to allocate memory for txq on socket %u", socket_id); return -ENOMEM; } txq->nb_tx_desc = nb_desc; txq->qdev = qdev; txq->port_id = dev->data->port_id; rc = qdev->ops->common->chain_alloc(edev, ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, txq->nb_tx_desc, sizeof(union eth_tx_bd_types), &txq->tx_pbl, NULL); if (rc != ECORE_SUCCESS) { DP_ERR(edev, "Unable to allocate memory for txbd ring on socket %u", socket_id); qede_tx_queue_release(txq); return -ENOMEM; } /* Allocate software ring */ txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring", (sizeof(struct qede_tx_entry) * txq->nb_tx_desc), RTE_CACHE_LINE_SIZE, socket_id); if (!txq->sw_tx_ring) { DP_ERR(edev, "Unable to allocate memory for txbd ring on socket %u", socket_id); qede_tx_queue_release(txq); return -ENOMEM; } txq->queue_id = queue_idx; txq->nb_tx_avail = txq->nb_tx_desc; txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH); dev->data->tx_queues[queue_idx] = txq; DP_INFO(edev, "txq %u num_desc %u tx_free_thresh %u socket %u\n", queue_idx, nb_desc, txq->tx_free_thresh, socket_id); return 0; } /* This function inits fp content and resets the SB, RXQ and TXQ arrays */ static void qede_init_fp(struct qede_dev *qdev) { struct qede_fastpath *fp; uint8_t i, rss_id, tc; int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0; memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) * sizeof(*qdev->fp_array))); memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) * sizeof(*qdev->sb_array))); for_each_queue(i) { fp = &qdev->fp_array[i]; if (fp_rx) { fp->type = QEDE_FASTPATH_RX; fp_rx--; } else{ fp->type = QEDE_FASTPATH_TX; } fp->qdev = qdev; fp->id = i; fp->sb_info = &qdev->sb_array[i]; snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i); } qdev->gro_disable = gro_disable; } void qede_free_fp_arrays(struct qede_dev *qdev) { /* It asseumes qede_free_mem_load() is called before */ if (qdev->fp_array != NULL) { rte_free(qdev->fp_array); qdev->fp_array = NULL; } if (qdev->sb_array != NULL) { rte_free(qdev->sb_array); qdev->sb_array = NULL; } } int qede_alloc_fp_array(struct qede_dev *qdev) { struct qede_fastpath *fp; struct ecore_dev *edev = &qdev->edev; int i; qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev), sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE); if (!qdev->fp_array) { DP_ERR(edev, "fp array allocation failed\n"); return -ENOMEM; } qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev), sizeof(*qdev->sb_array), RTE_CACHE_LINE_SIZE); if (!qdev->sb_array) { DP_ERR(edev, "sb array allocation failed\n"); rte_free(qdev->fp_array); return -ENOMEM; } return 0; } /* This function allocates fast-path status block memory */ static int qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, uint16_t sb_id) { struct ecore_dev *edev = &qdev->edev; struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt)); if (!sb_virt) { DP_ERR(edev, "Status block allocation failed\n"); return -ENOMEM; } rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_L2_QUEUE); if (rc) { DP_ERR(edev, "Status block initialization failed\n"); /* TBD: No dma_free_coherent possible */ return rc; } return 0; } int qede_alloc_fp_resc(struct qede_dev *qdev) { struct ecore_dev *edev = &qdev->edev; struct qede_fastpath *fp; uint32_t num_sbs; uint16_t i; uint16_t sb_idx; int rc; if (IS_VF(edev)) ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); else num_sbs = ecore_cxt_get_proto_cid_count (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL); if (num_sbs == 0) { DP_ERR(edev, "No status blocks available\n"); return -EINVAL; } if (qdev->fp_array) qede_free_fp_arrays(qdev); rc = qede_alloc_fp_array(qdev); if (rc != 0) return rc; qede_init_fp(qdev); for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) { fp = &qdev->fp_array[i]; if (IS_VF(edev)) sb_idx = i % num_sbs; else sb_idx = i; if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) { qede_free_fp_arrays(qdev); return -ENOMEM; } } return 0; } void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); qede_free_mem_load(eth_dev); qede_free_fp_arrays(qdev); } static inline void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) { uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); struct eth_rx_prod_data rx_prods = { 0 }; /* Update producers */ rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod); rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod); /* Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. */ rte_wmb(); internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (uint32_t *)&rx_prods); /* mmiowb is needed to synchronize doorbell writes from more than one * processor. It guarantees that the write arrives to the device before * the napi lock is released and another qede_poll is called (possibly * on another CPU). Without this barrier, the next doorbell can bypass * this doorbell. This is applicable to IA64/Altix systems. */ rte_wmb(); PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod); } static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct ecore_queue_start_common_params q_params; struct qed_dev_info *qed_info = &qdev->dev_info.common; struct qed_update_vport_params vport_update_params; struct qede_tx_queue *txq; struct qede_fastpath *fp; dma_addr_t p_phys_table; int txq_index; uint16_t page_cnt; int vlan_removal_en = 1; int rc, tc, i; for_each_queue(i) { fp = &qdev->fp_array[i]; if (fp->type & QEDE_FASTPATH_RX) { p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq-> rx_comp_ring); page_cnt = ecore_chain_get_page_cnt(&fp->rxq-> rx_comp_ring); memset(&q_params, 0, sizeof(q_params)); q_params.queue_id = i; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; q_params.sb_idx = RX_PI; ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); rc = qdev->ops->q_rx_start(edev, i, &q_params, fp->rxq->rx_buf_size, fp->rxq->rx_bd_ring.p_phys_addr, p_phys_table, page_cnt, &fp->rxq->hw_rxq_prod_addr); if (rc) { DP_ERR(edev, "Start rxq #%d failed %d\n", fp->rxq->queue_id, rc); return rc; } fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; qede_update_rx_prod(qdev, fp->rxq); } if (!(fp->type & QEDE_FASTPATH_TX)) continue; for (tc = 0; tc < qdev->num_tc; tc++) { txq = fp->txqs[tc]; txq_index = tc * QEDE_RSS_COUNT(qdev) + i; p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl); page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl); memset(&q_params, 0, sizeof(q_params)); q_params.queue_id = txq->queue_id; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; q_params.sb_idx = TX_PI(tc); rc = qdev->ops->q_tx_start(edev, i, &q_params, p_phys_table, page_cnt, /* **pp_doorbell */ &txq->doorbell_addr); if (rc) { DP_ERR(edev, "Start txq %u failed %d\n", txq_index, rc); return rc; } txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD); txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; } } /* Prepare and send the vport enable */ memset(&vport_update_params, 0, sizeof(vport_update_params)); /* Update MTU via vport update */ vport_update_params.mtu = qdev->mtu; vport_update_params.vport_id = 0; vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; /* @DPDK */ if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) { /* TBD: Check SRIOV enabled for VF */ vport_update_params.update_tx_switching_flg = 1; vport_update_params.tx_switching_flg = 1; } rc = qdev->ops->vport_update(edev, &vport_update_params); if (rc) { DP_ERR(edev, "Update V-PORT failed %d\n", rc); return rc; } return 0; } static bool qede_tunn_exist(uint16_t flag) { return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag); } /* * qede_check_tunn_csum_l4: * Returns: * 1 : If L4 csum is enabled AND if the validation has failed. * 0 : Otherwise */ static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag) { if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag) return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag); return 0; } static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag) { if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag); return 0; } static inline uint8_t qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag) { struct ipv4_hdr *ip; uint16_t pkt_csum; uint16_t calc_csum; uint16_t val; val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag); if (unlikely(val)) { m->packet_type = qede_rx_cqe_to_pkt_type(flag); if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, sizeof(struct ether_hdr)); pkt_csum = ip->hdr_checksum; ip->hdr_checksum = 0; calc_csum = rte_ipv4_cksum(ip); ip->hdr_checksum = pkt_csum; return (calc_csum != pkt_csum); } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { return 1; } } return 0; } static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) { ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons++; } static inline void qede_reuse_page(struct qede_dev *qdev, struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons) { struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); struct qede_rx_entry *curr_prod; dma_addr_t new_mapping; curr_prod = &rxq->sw_rx_ring[idx]; *curr_prod = *curr_cons; new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) + curr_prod->page_offset; rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping)); rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping)); rxq->sw_rx_prod++; } static inline void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *qdev, uint8_t count) { struct qede_rx_entry *curr_cons; for (; count > 0; count--) { curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; qede_reuse_page(qdev, rxq, curr_cons); qede_rx_bd_ring_consume(rxq); } } static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags) { uint16_t val; /* Lookup table */ static const uint32_t ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = { [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, }; /* Bits (0..3) provides L3/L4 protocol type */ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK << PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) | (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK << PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags; if (val < QEDE_PKT_TYPE_MAX) return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER; else return RTE_PTYPE_UNKNOWN; } static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags) { uint32_t val; /* Lookup table */ static const uint32_t ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = { [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN, [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE, [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] = RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6, [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6, }; /* Cover bits[4-0] to include tunn_type and next protocol */ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK << ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) | (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK << ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags; if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE) return ptype_tunn_lkup_tbl[val]; else return RTE_PTYPE_UNKNOWN; } static inline int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, uint8_t num_segs, uint16_t pkt_len) { struct qede_rx_queue *rxq = p_rxq; struct qede_dev *qdev = rxq->qdev; struct ecore_dev *edev = &qdev->edev; register struct rte_mbuf *seg1 = NULL; register struct rte_mbuf *seg2 = NULL; uint16_t sw_rx_index; uint16_t cur_size; seg1 = rx_mb; while (num_segs) { cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : pkt_len; if (unlikely(!cur_size)) { PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs" " left for mapping jumbo", num_segs); qede_recycle_rx_bd_ring(rxq, qdev, num_segs); return -EINVAL; } sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf; qede_rx_bd_ring_consume(rxq); pkt_len -= cur_size; seg2->data_len = cur_size; seg1->next = seg2; seg1 = seg1->next; num_segs--; rxq->rx_segs++; } return 0; } uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct qede_rx_queue *rxq = p_rxq; struct qede_dev *qdev = rxq->qdev; struct ecore_dev *edev = &qdev->edev; struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id]; uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index; uint16_t rx_pkt = 0; union eth_rx_cqe *cqe; struct eth_fast_path_rx_reg_cqe *fp_cqe; register struct rte_mbuf *rx_mb = NULL; register struct rte_mbuf *seg1 = NULL; enum eth_rx_cqe_type cqe_type; uint16_t pkt_len; /* Sum of all BD segments */ uint16_t len; /* Length of first BD */ uint8_t num_segs = 1; uint16_t pad; uint16_t preload_idx; uint8_t csum_flag; uint16_t parse_flag; enum rss_hash_type htype; uint8_t tunn_parse_flag; uint8_t j; hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); rte_rmb(); if (hw_comp_cons == sw_comp_cons) return 0; while (sw_comp_cons != hw_comp_cons) { /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); cqe_type = cqe->fast_path_regular.type; if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE"); qdev->ops->eth_cqe_completion(edev, fp->id, (struct eth_slow_path_rx_cqe *)cqe); goto next_cqe; } /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; assert(rx_mb != NULL); /* non GRO */ fp_cqe = &cqe->fast_path_regular; len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd); pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len); pad = fp_cqe->placement_offset; assert((len + pad) <= rx_mb->buf_len); PMD_RX_LOG(DEBUG, rxq, "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x" " len = %u, parsing_flags = %d", cqe_type, fp_cqe->bitfields, rte_le_to_cpu_16(fp_cqe->vlan_tag), len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags)); /* If this is an error packet then drop it */ parse_flag = rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags); rx_mb->ol_flags = 0; if (qede_tunn_exist(parse_flag)) { PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet"); if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { PMD_RX_LOG(ERR, rxq, "L4 csum failed, flags = 0x%x", parse_flag); rxq->rx_hw_errors++; rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; } else { tunn_parse_flag = fp_cqe->tunnel_pars_flags.flags; rx_mb->packet_type = qede_rx_cqe_to_tunn_pkt_type( tunn_parse_flag); } } else { PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet"); if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { PMD_RX_LOG(ERR, rxq, "L4 csum failed, flags = 0x%x", parse_flag); rxq->rx_hw_errors++; rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; } else if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x", parse_flag); rxq->rx_hw_errors++; rx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; } else { rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag); } } PMD_RX_LOG(INFO, rxq, "packet_type 0x%x", rx_mb->packet_type); if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) { PMD_RX_LOG(ERR, rxq, "New buffer allocation failed," "dropping incoming packet"); qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num); rte_eth_devices[rxq->port_id]. data->rx_mbuf_alloc_failed++; rxq->rx_alloc_errors++; break; } qede_rx_bd_ring_consume(rxq); if (fp_cqe->bd_num > 1) { PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs" " len on first: %04x Total Len: %04x", fp_cqe->bd_num, len, pkt_len); num_segs = fp_cqe->bd_num - 1; seg1 = rx_mb; if (qede_process_sg_pkts(p_rxq, seg1, num_segs, pkt_len - len)) goto next_cqe; for (j = 0; j < num_segs; j++) { if (qede_alloc_rx_buffer(rxq)) { PMD_RX_LOG(ERR, rxq, "Buffer allocation failed"); rte_eth_devices[rxq->port_id]. data->rx_mbuf_alloc_failed++; rxq->rx_alloc_errors++; break; } rxq->rx_segs++; } } rxq->rx_segs++; /* for the first segment */ /* Prefetch next mbuf while processing current one. */ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); /* Update rest of the MBUF fields */ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = fp_cqe->bd_num; rx_mb->data_len = len; rx_mb->pkt_len = pkt_len; rx_mb->port = rxq->port_id; htype = (uint8_t)GET_FIELD(fp_cqe->bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); if (qdev->rss_enable && htype) { rx_mb->ol_flags |= PKT_RX_RSS_HASH; rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash); PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x", rx_mb->hash.rss); } rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); if (CQE_HAS_VLAN(parse_flag)) { rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); rx_mb->ol_flags |= PKT_RX_VLAN_PKT; } if (CQE_HAS_OUTER_VLAN(parse_flag)) { /* FW does not provide indication of Outer VLAN tag, * which is always stripped, so vlan_tci_outer is set * to 0. Here vlan_tag represents inner VLAN tag. */ rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); rx_mb->ol_flags |= PKT_RX_QINQ_PKT; rx_mb->vlan_tci_outer = 0; } rx_pkts[rx_pkt] = rx_mb; rx_pkt++; next_cqe: ecore_chain_recycle_consumed(&rxq->rx_comp_ring); sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); if (rx_pkt == nb_pkts) { PMD_RX_LOG(DEBUG, rxq, "Budget reached nb_pkts=%u received=%u", rx_pkt, nb_pkts); break; } } qede_update_rx_prod(qdev, rxq); rxq->rcv_pkts += rx_pkt; PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id()); return rx_pkt; } static inline int qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq) { uint16_t nb_segs, idx = TX_CONS(txq); struct eth_tx_bd *tx_data_bd; struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf; if (unlikely(!mbuf)) { PMD_TX_LOG(ERR, txq, "null mbuf"); PMD_TX_LOG(ERR, txq, "tx_desc %u tx_avail %u tx_cons %u tx_prod %u", txq->nb_tx_desc, txq->nb_tx_avail, idx, TX_PROD(txq)); return -1; } nb_segs = mbuf->nb_segs; while (nb_segs) { /* It's like consuming rxbuf in recv() */ ecore_chain_consume(&txq->tx_pbl); txq->nb_tx_avail++; nb_segs--; } rte_pktmbuf_free(mbuf); txq->sw_tx_ring[idx].mbuf = NULL; return 0; } static inline uint16_t qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq) { uint16_t tx_compl = 0; uint16_t hw_bd_cons; hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); rte_compiler_barrier(); while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) { if (qede_free_tx_pkt(edev, txq)) { PMD_TX_LOG(ERR, txq, "hw_bd_cons = %u, chain_cons = %u", hw_bd_cons, ecore_chain_get_cons_idx(&txq->tx_pbl)); break; } txq->sw_tx_cons++; /* Making TXD available */ tx_compl++; } PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u", tx_compl, txq->sw_tx_cons, txq->nb_tx_avail); return tx_compl; } /* Populate scatter gather buffer descriptor fields */ static inline uint8_t qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, struct eth_tx_1st_bd *bd1) { struct qede_tx_queue *txq = p_txq; struct eth_tx_2nd_bd *bd2 = NULL; struct eth_tx_3rd_bd *bd3 = NULL; struct eth_tx_bd *tx_bd = NULL; dma_addr_t mapping; uint8_t nb_segs = 1; /* min one segment per packet */ /* Check for scattered buffers */ while (m_seg) { if (nb_segs == 1) { bd2 = (struct eth_tx_2nd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(bd2, 0, sizeof(*bd2)); mapping = rte_mbuf_data_dma_addr(m_seg); QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len); } else if (nb_segs == 2) { bd3 = (struct eth_tx_3rd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(bd3, 0, sizeof(*bd3)); mapping = rte_mbuf_data_dma_addr(m_seg); QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len); } else { tx_bd = (struct eth_tx_bd *) ecore_chain_produce(&txq->tx_pbl); memset(tx_bd, 0, sizeof(*tx_bd)); mapping = rte_mbuf_data_dma_addr(m_seg); QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len); } nb_segs++; m_seg = m_seg->next; } /* Return total scattered buffers */ return nb_segs; } uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct qede_tx_queue *txq = p_txq; struct qede_dev *qdev = txq->qdev; struct ecore_dev *edev = &qdev->edev; struct qede_fastpath *fp; struct eth_tx_1st_bd *bd1; struct rte_mbuf *mbuf; struct rte_mbuf *m_seg = NULL; uint16_t nb_tx_pkts; uint16_t bd_prod; uint16_t idx; uint16_t tx_count; uint16_t nb_frags; uint16_t nb_pkt_sent = 0; fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id]; if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) { PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u", nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh); (void)qede_process_tx_compl(edev, txq); } nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)); if (unlikely(nb_tx_pkts == 0)) { PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u", nb_pkts, txq->nb_tx_avail); return 0; } tx_count = nb_tx_pkts; while (nb_tx_pkts--) { /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = TX_PROD(txq); mbuf = *tx_pkts++; txq->sw_tx_ring[idx].mbuf = mbuf; bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); bd1->data.bd_flags.bitfields = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; /* FW 8.10.x specific change */ bd1->data.bitfields = (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; /* Map MBUF linear data for DMA and set in the first BD */ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf), mbuf->data_len); PMD_TX_LOG(INFO, txq, "BD1 len %04x", mbuf->data_len); if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) { PMD_TX_LOG(INFO, txq, "Tx tunnel packet"); /* First indicate its a tunnel pkt */ bd1->data.bd_flags.bitfields |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; /* Legacy FW had flipped behavior in regard to this bit * i.e. it needed to set to prevent FW from touching * encapsulated packets when it didn't need to. */ if (unlikely(txq->is_legacy)) bd1->data.bitfields ^= 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; /* Outer IP checksum offload */ if (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) { PMD_TX_LOG(INFO, txq, "OuterIP csum offload"); bd1->data.bd_flags.bitfields |= ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; } /* Outer UDP checksum offload */ bd1->data.bd_flags.bitfields |= ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK << ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; } /* Descriptor based VLAN insertion */ if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x", mbuf->vlan_tci); bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci); bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; } /* Offload the IP checksum in the hardware */ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { PMD_TX_LOG(INFO, txq, "IP csum offload"); bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; } /* L4 checksum offload (tcp or udp) */ if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { PMD_TX_LOG(INFO, txq, "L4 csum offload"); bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; /* IPv6 + extn. -> later */ } /* Handle fragmented MBUF */ m_seg = mbuf->next; /* Encode scatter gather buffer descriptors if required */ nb_frags = qede_encode_sg_bd(txq, m_seg, bd1); bd1->data.nbds = nb_frags; txq->nb_tx_avail -= nb_frags; txq->sw_tx_prod++; rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf); bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); nb_pkt_sent++; txq->xmit_pkts++; PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x", bd1->data.nbds, mbuf->pkt_len); } /* Write value of prod idx into bd_prod */ txq->tx_db.data.bd_prod = bd_prod; rte_wmb(); rte_compiler_barrier(); DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw); rte_wmb(); /* Check again for Tx completions */ (void)qede_process_tx_compl(edev, txq); PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d", nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id()); return nb_pkt_sent; } static void qede_init_fp_queue(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct qede_fastpath *fp; uint8_t i, rss_id, txq_index, tc; int rxq = 0, txq = 0; for_each_queue(i) { fp = &qdev->fp_array[i]; if (fp->type & QEDE_FASTPATH_RX) { fp->rxq = eth_dev->data->rx_queues[i]; fp->rxq->queue_id = rxq++; } if (fp->type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { txq_index = tc * QEDE_TSS_COUNT(qdev) + txq; fp->txqs[tc] = eth_dev->data->tx_queues[txq_index]; fp->txqs[tc]->queue_id = txq_index; if (qdev->dev_info.is_legacy) fp->txqs[tc]->is_legacy = true; } txq++; } } } int qede_dev_start(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct qed_link_output link_output; struct qede_fastpath *fp; int rc; DP_INFO(edev, "Device state is %d\n", qdev->state); if (qdev->state == QEDE_DEV_START) { DP_INFO(edev, "Port is already started\n"); return 0; } if (qdev->state == QEDE_DEV_CONFIG) qede_init_fp_queue(eth_dev); rc = qede_start_queues(eth_dev, true); if (rc) { DP_ERR(edev, "Failed to start queues\n"); /* TBD: free */ return rc; } /* Newer SR-IOV PF driver expects RX/TX queues to be started before * enabling RSS. Hence RSS configuration is deferred upto this point. * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) return -1; /* Bring-up the link */ qede_dev_set_link_state(eth_dev, true); /* Start/resume traffic */ qdev->ops->fastpath_start(edev); qdev->state = QEDE_DEV_START; DP_INFO(edev, "dev_state is QEDE_DEV_START\n"); return 0; } static int qede_drain_txq(struct qede_dev *qdev, struct qede_tx_queue *txq, bool allow_drain) { struct ecore_dev *edev = &qdev->edev; int rc, cnt = 1000; while (txq->sw_tx_cons != txq->sw_tx_prod) { qede_process_tx_compl(edev, txq); if (!cnt) { if (allow_drain) { DP_NOTICE(edev, false, "Tx queue[%u] is stuck," "requesting MCP to drain\n", txq->queue_id); rc = qdev->ops->common->drain(edev); if (rc) return rc; return qede_drain_txq(qdev, txq, false); } DP_NOTICE(edev, false, "Timeout waiting for tx queue[%d]:" "PROD=%d, CONS=%d\n", txq->queue_id, txq->sw_tx_prod, txq->sw_tx_cons); return -ENODEV; } cnt--; DELAY(1000); rte_compiler_barrier(); } /* FW finished processing, wait for HW to transmit all tx packets */ DELAY(2000); return 0; } static int qede_stop_queues(struct qede_dev *qdev) { struct qed_update_vport_params vport_update_params; struct ecore_dev *edev = &qdev->edev; int rc, tc, i; /* Disable the vport */ memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = 0; vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 0; vport_update_params.update_rss_flg = 0; DP_INFO(edev, "Deactivate vport\n"); rc = qdev->ops->vport_update(edev, &vport_update_params); if (rc) { DP_ERR(edev, "Failed to update vport\n"); return rc; } DP_INFO(edev, "Flushing tx queues\n"); /* Flush Tx queues. If needed, request drain from MCP */ for_each_queue(i) { struct qede_fastpath *fp = &qdev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { struct qede_tx_queue *txq = fp->txqs[tc]; rc = qede_drain_txq(qdev, txq, true); if (rc) return rc; } } } /* Stop all Queues in reverse order */ for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) { struct qed_stop_rxq_params rx_params; /* Stop the Tx Queue(s) */ if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { struct qed_stop_txq_params tx_params; u8 val; tx_params.rss_id = i; val = qdev->fp_array[i].txqs[tc]->queue_id; tx_params.tx_queue_id = val; DP_INFO(edev, "Stopping tx queues\n"); rc = qdev->ops->q_tx_stop(edev, &tx_params); if (rc) { DP_ERR(edev, "Failed to stop TXQ #%d\n", tx_params.tx_queue_id); return rc; } } } /* Stop the Rx Queue */ if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) { memset(&rx_params, 0, sizeof(rx_params)); rx_params.rss_id = i; rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id; rx_params.eq_completion_only = 1; DP_INFO(edev, "Stopping rx queues\n"); rc = qdev->ops->q_rx_stop(edev, &rx_params); if (rc) { DP_ERR(edev, "Failed to stop RXQ #%d\n", i); return rc; } } } return 0; } int qede_reset_fp_rings(struct qede_dev *qdev) { struct qede_fastpath *fp; struct qede_tx_queue *txq; uint8_t tc; uint16_t id, i; for_each_queue(id) { fp = &qdev->fp_array[id]; if (fp->type & QEDE_FASTPATH_RX) { DP_INFO(&qdev->edev, "Reset FP chain for RSS %u\n", id); qede_rx_queue_release_mbufs(fp->rxq); ecore_chain_reset(&fp->rxq->rx_bd_ring); ecore_chain_reset(&fp->rxq->rx_comp_ring); fp->rxq->sw_rx_prod = 0; fp->rxq->sw_rx_cons = 0; *fp->rxq->hw_cons_ptr = 0; for (i = 0; i < fp->rxq->nb_rx_desc; i++) { if (qede_alloc_rx_buffer(fp->rxq)) { DP_ERR(&qdev->edev, "RX buffer allocation failed\n"); return -ENOMEM; } } } if (fp->type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { txq = fp->txqs[tc]; qede_tx_queue_release_mbufs(txq); ecore_chain_reset(&txq->tx_pbl); txq->sw_tx_cons = 0; txq->sw_tx_prod = 0; *txq->hw_cons_ptr = 0; } } } qede_reset_fp_rings(qdev); return 0; } /* This function frees all memory of a single fp */ void qede_free_mem_load(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct qede_fastpath *fp; uint16_t txq_idx; uint8_t id; uint8_t tc; for_each_queue(id) { fp = &qdev->fp_array[id]; if (fp->type & QEDE_FASTPATH_RX) { if (!fp->rxq) continue; qede_rx_queue_release(fp->rxq); eth_dev->data->rx_queues[id] = NULL; } else { for (tc = 0; tc < qdev->num_tc; tc++) { if (!fp->txqs[tc]) continue; txq_idx = fp->txqs[tc]->queue_id; qede_tx_queue_release(fp->txqs[tc]); eth_dev->data->tx_queues[txq_idx] = NULL; } } } } void qede_dev_stop(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; DP_INFO(edev, "port %u\n", eth_dev->data->port_id); if (qdev->state != QEDE_DEV_START) { DP_INFO(edev, "Device not yet started\n"); return; } if (qede_stop_queues(qdev)) DP_ERR(edev, "Didn't succeed to close queues\n"); DP_INFO(edev, "Stopped queues\n"); qdev->ops->fastpath_stop(edev); /* Bring the link down */ qede_dev_set_link_state(eth_dev, false); qdev->state = QEDE_DEV_STOP; DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n"); } uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq, __rte_unused struct rte_mbuf **pkts, __rte_unused uint16_t nb_pkts) { return 0; }
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/ixgbe/ixgbe_fdir.c
/*- * BSD LICENSE * * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdint.h> #include <stdarg.h> #include <errno.h> #include <sys/queue.h> #include <rte_interrupts.h> #include <rte_log.h> #include <rte_debug.h> #include <rte_pci.h> #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_malloc.h> #include "ixgbe_logs.h" #include "base/ixgbe_api.h" #include "base/ixgbe_common.h" #include "ixgbe_ethdev.h" /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */ #define FDIRCTRL_PBALLOC_MASK 0x03 /* For calculating memory required for FDIR filters */ #define PBALLOC_SIZE_SHIFT 15 /* Number of bits used to mask bucket hash for different pballoc sizes */ #define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */ #define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */ #define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */ #define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */ #define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */ #define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */ #define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */ #define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */ #define IXGBE_MAX_FLX_SOURCE_OFF 62 #define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT) #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10 #define IXGBE_FDIR_FLOW_TYPES ( \ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ uint8_t ipv6_addr[16]; \ uint8_t i; \ rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\ (ipv6m) = 0; \ for (i = 0; i < sizeof(ipv6_addr); i++) { \ if (ipv6_addr[i] == UINT8_MAX) \ (ipv6m) |= 1 << i; \ else if (ipv6_addr[i] != 0) { \ PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \ return -EINVAL; \ } \ } \ } while (0) #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \ uint8_t ipv6_addr[16]; \ uint8_t i; \ for (i = 0; i < sizeof(ipv6_addr); i++) { \ if ((ipv6m) & (1 << i)) \ ipv6_addr[i] = UINT8_MAX; \ else \ ipv6_addr[i] = 0; \ } \ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\ } while (0) #define DEFAULT_VXLAN_PORT 4789 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash); static int fdir_set_input_mask(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask); static int fdir_set_input_mask_82599(struct rte_eth_dev *dev); static int fdir_set_input_mask_x550(struct rte_eth_dev *dev); static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl); static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl); static int ixgbe_fdir_filter_to_atr_input( const struct rte_eth_fdir_filter *fdir_filter, union ixgbe_atr_input *input, enum rte_fdir_mode mode); static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, uint32_t key); static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, enum rte_fdir_pballoc_type pballoc); static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, enum rte_fdir_pballoc_type pballoc); static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, uint32_t fdircmd, uint32_t fdirhash, enum rte_fdir_mode mode); static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, uint32_t fdirhash); static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, bool del, bool update); static int ixgbe_fdir_flush(struct rte_eth_dev *dev); static void ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info); static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats); /** * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c. * It adds extra configuration of fdirctrl that is common for all filter types. * * Initialize Flow Director control registers * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register **/ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) { int i; PMD_INIT_FUNC_TRACE(); /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Continue setup of fdirctrl register bits: * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 filters are left */ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* * Poll init-done after we write the register. Estimated times: * 10G: PBALLOC = 11b, timing is 60us * 1G: PBALLOC = 11b, timing is 600us * 100M: PBALLOC = 11b, timing is 6ms * * Multiple these timings by 4 if under full Rx load * * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for * 1 msec per poll time. If we're at line rate and drop to 100M, then * this might not finish in our poll time, but we can live with that * for now. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!"); return -ETIMEDOUT; } return 0; } /* * Set appropriate bits in fdirctrl for: variable reporting levels, moving * flexbytes matching field, and drop queue (only for perfect matching mode). */ static inline int configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) { *fdirctrl = 0; switch (conf->pballoc) { case RTE_FDIR_PBALLOC_64K: /* 8k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; break; case RTE_FDIR_PBALLOC_128K: /* 16k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; break; case RTE_FDIR_PBALLOC_256K: /* 32k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; break; default: /* bad value */ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value"); return -EINVAL; }; /* status flags: write hash & swindex in the rx descriptor */ switch (conf->status) { case RTE_FDIR_NO_REPORT_STATUS: /* do nothing, default mode */ break; case RTE_FDIR_REPORT_STATUS: /* report status when the packet matches a fdir rule */ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; break; case RTE_FDIR_REPORT_STATUS_ALWAYS: /* always report status */ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS; break; default: /* bad value */ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value"); return -EINVAL; }; *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << IXGBE_FDIRCTRL_FLEX_SHIFT; if (conf->mode >= RTE_FDIR_MODE_PERFECT && conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); } return 0; } /** * Reverse the bits in FDIR registers that store 2 x 16 bit masks. * * @hi_dword: Bits 31:16 mask to be bit swapped. * @lo_dword: Bits 15:0 mask to be bit swapped. * * Flow director uses several registers to store 2 x 16 bit masks with the * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the * mask affects the MS bit/byte of the target. This function reverses the * bits in these masks. * **/ static inline uint32_t reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword) { uint32_t mask = hi_dword << 16; mask |= lo_dword; mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); } /* * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, * but makes use of the rte_fdir_masks structure to see which bits to set. */ static int fdir_set_input_mask_82599(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); /* * mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf */ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; uint32_t fdirtcpm; /* TCP source and destination port masks. */ uint32_t fdiripv6m; /* IPv6 source and destination masks. */ volatile uint32_t *reg; PMD_INIT_FUNC_TRACE(); /* * Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that * a VLAN of 0 is unspecified, so mask that out as well. L4type * cannot be masked out in this implementation. */ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ fdirm |= IXGBE_FDIRM_L4P; if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) /* mask VLAN Priority */ fdirm |= IXGBE_FDIRM_VLANP; else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000)) /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; else if (info->mask.vlan_tci_mask == 0) /* mask VLAN ID and Priority */ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); return -EINVAL; } IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ fdirtcpm = reverse_fdir_bitmasks( rte_be_to_cpu_16(info->mask.dst_port_mask), rte_be_to_cpu_16(info->mask.src_port_mask)); /* write all the same so that UDP, TCP and SCTP use the same mask * (little-endian) */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); /* Store source and destination IPv4 masks (big-endian), * can not use IXGBE_WRITE_REG. */ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M); *reg = ~(info->mask.src_ipv4_mask); reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M); *reg = ~(info->mask.dst_ipv4_mask); if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { /* * Store source and destination IPv6 masks (bit reversed) */ fdiripv6m = (info->mask.dst_ipv6_mask << 16) | info->mask.src_ipv6_mask; IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m); } return IXGBE_SUCCESS; } /* * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, * but makes use of the rte_fdir_masks structure to see which bits to set. */ static int fdir_set_input_mask_x550(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); /* mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf */ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; uint32_t fdiripv6m; enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; uint16_t mac_mask; PMD_INIT_FUNC_TRACE(); /* set the default UDP port for VxLAN */ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT); /* some bits must be set for mac vlan or tunnel mode */ fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P; if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) /* mask VLAN Priority */ fdirm |= IXGBE_FDIRM_VLANP; else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000)) /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; else if (info->mask.vlan_tci_mask == 0) /* mask VLAN ID and Priority */ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); return -EINVAL; } IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE | IXGBE_FDIRIP6M_TNI_VNI; if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { mac_mask = info->mask.mac_addr_byte_mask; fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) & IXGBE_FDIRIP6M_INNER_MAC; switch (info->mask.tunnel_type_mask) { case 0: /* Mask turnnel type */ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; break; case 1: break; default: PMD_INIT_LOG(ERR, "invalid tunnel_type_mask"); return -EINVAL; } switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) { case 0x0: /* Mask vxlan id */ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI; break; case 0x00FFFFFF: fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24; break; case 0xFFFFFFFF: break; default: PMD_INIT_LOG(ERR, "invalid tunnel_id_mask"); return -EINVAL; } } IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m); IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); return IXGBE_SUCCESS; } static int ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) { struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); uint16_t dst_ipv6m = 0; uint16_t src_ipv6m = 0; memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask)); info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; info->mask.src_port_mask = input_mask->src_port_mask; info->mask.dst_port_mask = input_mask->dst_port_mask; info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m); IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m); info->mask.src_ipv6_mask = src_ipv6m; info->mask.dst_ipv6_mask = dst_ipv6m; return IXGBE_SUCCESS; } static int ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) { struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask)); info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask; info->mask.tunnel_type_mask = input_mask->tunnel_type_mask; info->mask.tunnel_id_mask = input_mask->tunnel_id_mask; return IXGBE_SUCCESS; } static int ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) { enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; if (mode >= RTE_FDIR_MODE_SIGNATURE && mode <= RTE_FDIR_MODE_PERFECT) return ixgbe_fdir_store_input_mask_82599(dev, input_mask); else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) return ixgbe_fdir_store_input_mask_x550(dev, input_mask); PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); return -ENOTSUP; } int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev) { enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; if (mode >= RTE_FDIR_MODE_SIGNATURE && mode <= RTE_FDIR_MODE_PERFECT) return fdir_set_input_mask_82599(dev); else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) return fdir_set_input_mask_x550(dev); PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); return -ENOTSUP; } static int fdir_set_input_mask(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) { int ret; ret = ixgbe_fdir_store_input_mask(dev, input_mask); if (ret) return ret; return ixgbe_fdir_set_input_mask(dev); } /* * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration * arguments are valid */ static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); const struct rte_eth_flex_payload_cfg *flex_cfg; const struct rte_eth_fdir_flex_mask *flex_mask; uint32_t fdirm; uint16_t flexbytes = 0; uint16_t i; fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM); if (conf == NULL) { PMD_DRV_LOG(ERR, "NULL pointer."); return -EINVAL; } for (i = 0; i < conf->nb_payloads; i++) { flex_cfg = &conf->flex_set[i]; if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) { PMD_DRV_LOG(ERR, "unsupported payload type."); return -EINVAL; } if (((flex_cfg->src_offset[0] & 0x1) == 0) && (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) && (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) { *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; *fdirctrl |= (flex_cfg->src_offset[0] / sizeof(uint16_t)) << IXGBE_FDIRCTRL_FLEX_SHIFT; } else { PMD_DRV_LOG(ERR, "invalid flexbytes arguments."); return -EINVAL; } } for (i = 0; i < conf->nb_flexmasks; i++) { flex_mask = &conf->flex_mask[i]; if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) { PMD_DRV_LOG(ERR, "flexmask should be set globally."); return -EINVAL; } flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) | ((flex_mask->mask[1]) & 0xFF)); if (flexbytes == UINT16_MAX) fdirm &= ~IXGBE_FDIRM_FLEX; else if (flexbytes != 0) { /* IXGBE_FDIRM_FLEX is set by default when set mask */ PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments."); return -EINVAL; } } IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0; info->flex_bytes_offset = (uint8_t)((*fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >> IXGBE_FDIRCTRL_FLEX_SHIFT); return 0; } int ixgbe_fdir_configure(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int err; uint32_t fdirctrl, pbsize; int i; enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; PMD_INIT_FUNC_TRACE(); if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X540 && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a) return -ENOSYS; /* x550 supports mac-vlan and tunnel mode but other NICs not */ if (hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a && mode != RTE_FDIR_MODE_SIGNATURE && mode != RTE_FDIR_MODE_PERFECT) return -ENOSYS; err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); if (err) return err; /* * Before enabling Flow Director, the Rx Packet Buffer size * must be reduced. The new value is the current size minus * flow director memory usage size. */ pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); /* * The defaults in the HW for RX PB 1-7 are not zero and so should be * intialized to zero for non DCB mode otherwise actual total RX PB * would be bigger than programmed and filter space would run into * the PB 0 region. */ for (i = 1; i < 8; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask); if (err < 0) { PMD_INIT_LOG(ERR, " Error on setting FD mask"); return err; } err = ixgbe_set_fdir_flex_conf(dev, &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl); if (err < 0) { PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments."); return err; } err = fdir_enable_82599(hw, fdirctrl); if (err < 0) { PMD_INIT_LOG(ERR, " Error on enabling FD."); return err; } return 0; } /* * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used * by the IXGBE driver code. */ static int ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, union ixgbe_atr_input *input, enum rte_fdir_mode mode) { input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci; input->formatted.flex_bytes = (uint16_t)( (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF)); switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; break; case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; break; case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; break; case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6; break; case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6; break; default: break; } switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: input->formatted.src_port = fdir_filter->input.flow.udp4_flow.src_port; input->formatted.dst_port = fdir_filter->input.flow.udp4_flow.dst_port; /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: input->formatted.src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; input->formatted.dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: input->formatted.src_port = fdir_filter->input.flow.udp6_flow.src_port; input->formatted.dst_port = fdir_filter->input.flow.udp6_flow.dst_port; /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: rte_memcpy(input->formatted.src_ip, fdir_filter->input.flow.ipv6_flow.src_ip, sizeof(input->formatted.src_ip)); rte_memcpy(input->formatted.dst_ip, fdir_filter->input.flow.ipv6_flow.dst_ip, sizeof(input->formatted.dst_ip)); break; default: break; } if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { rte_memcpy( input->formatted.inner_mac, fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes, sizeof(input->formatted.inner_mac)); } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { rte_memcpy( input->formatted.inner_mac, fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes, sizeof(input->formatted.inner_mac)); input->formatted.tunnel_type = fdir_filter->input.flow.tunnel_flow.tunnel_type; input->formatted.tni_vni = fdir_filter->input.flow.tunnel_flow.tunnel_id; } return 0; } /* * The below function is taken from the FreeBSD IXGBE drivers release * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK * before returning, as the signature hash can use 16bits. * * The newer driver has optimised functions for calculating bucket and * signature hashes. However they don't support IPv6 type packets for signature * filters so are not used here. * * Note that the bkt_hash field in the ixgbe_atr_input structure is also never * set. * * Compute the hashes for SW ATR * @stream: input bitstream to compute the hash on * @key: 32-bit hash key **/ static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, uint32_t key) { /* * The algorithm is as follows: * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] * and A[n] x B[n] is bitwise AND between same length strings * * K[n] is 16 bits, defined as: * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] * for n modulo 32 < 15, K[n] = * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] * * S[n] is 16 bits, defined as: * for n >= 15, S[n] = S[n:n - 15] * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] * * To simplify for programming, the algorithm is implemented * in software this way: * * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] * * for (i = 0; i < 352; i+=32) * hi_hash_dword[31:0] ^= Stream[(i+31):i]; * * lo_hash_dword[15:0] ^= Stream[15:0]; * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; * * hi_hash_dword[31:0] ^= Stream[351:320]; * * if (key[0]) * hash[15:0] ^= Stream[15:0]; * * for (i = 0; i < 16; i++) { * if (key[i]) * hash[15:0] ^= lo_hash_dword[(i+15):i]; * if (key[i + 16]) * hash[15:0] ^= hi_hash_dword[(i+15):i]; * } * */ __be32 common_hash_dword = 0; u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 hash_result = 0; u8 i; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); /* generate common hash dword */ for (i = 1; i <= 13; i++) common_hash_dword ^= atr_input->dword_stream[i]; hi_hash_dword = IXGBE_NTOHL(common_hash_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ if (key & 0x0001) hash_result ^= lo_hash_dword; if (key & 0x00010000) hash_result ^= hi_hash_dword; /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the vlan until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* process the remaining 30 bits in the key 2 bits at a time */ for (i = 15; i; i--) { if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; } return hash_result; } static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, enum rte_fdir_pballoc_type pballoc) { if (pballoc == RTE_FDIR_PBALLOC_256K) return ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & PERFECT_BUCKET_256KB_HASH_MASK; else if (pballoc == RTE_FDIR_PBALLOC_128K) return ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & PERFECT_BUCKET_128KB_HASH_MASK; else return ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & PERFECT_BUCKET_64KB_HASH_MASK; } /** * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete * @hw: pointer to hardware structure */ static inline int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd) { int i; for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) return 0; rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US); } return -ETIMEDOUT; } /* * Calculate the hash value needed for signature-match filters. In the FreeBSD * driver, this is done by the optimised function * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it * doesn't support calculating a hash for an IPv6 filter. */ static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, enum rte_fdir_pballoc_type pballoc) { uint32_t bucket_hash, sig_hash; if (pballoc == RTE_FDIR_PBALLOC_256K) bucket_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & SIG_BUCKET_256KB_HASH_MASK; else if (pballoc == RTE_FDIR_PBALLOC_128K) bucket_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & SIG_BUCKET_128KB_HASH_MASK; else bucket_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & SIG_BUCKET_64KB_HASH_MASK; sig_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_SIGNATURE_HASH_KEY); return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash; } /* * This is based on ixgbe_fdir_write_perfect_filter_82599() in * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register * added, and IPv6 support also added. The hash value is also pre-calculated * as the pballoc value is needed to do it. */ static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, uint32_t fdircmd, uint32_t fdirhash, enum rte_fdir_mode mode) { uint32_t fdirport, fdirvlan; u32 addr_low, addr_high; u32 tunnel_type = 0; int err = 0; volatile uint32_t *reg; if (mode == RTE_FDIR_MODE_PERFECT) { /* record the IPv4 address (big-endian) * can not use IXGBE_WRITE_REG. */ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA); *reg = input->formatted.src_ip[0]; reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA); *reg = input->formatted.dst_ip[0]; /* record source and destination port (little-endian)*/ fdirport = IXGBE_NTOHS(input->formatted.dst_port); fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; fdirport |= IXGBE_NTOHS(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { /* for mac vlan and tunnel modes */ addr_low = ((u32)input->formatted.inner_mac[0] | ((u32)input->formatted.inner_mac[1] << 8) | ((u32)input->formatted.inner_mac[2] << 16) | ((u32)input->formatted.inner_mac[3] << 24)); addr_high = ((u32)input->formatted.inner_mac[4] | ((u32)input->formatted.inner_mac[5] << 8)); if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); } else { /* tunnel mode */ if (input->formatted.tunnel_type != RTE_FDIR_TUNNEL_TYPE_NVGRE) tunnel_type = 0x80000000; tunnel_type |= addr_high; IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); } } /* record vlan (little-endian) and flex_bytes(big-endian) */ fdirvlan = input->formatted.flex_bytes; fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); /* configure FDIRHASH register */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* * flush all previous writes to make certain registers are * programmed prior to issuing the command */ IXGBE_WRITE_FLUSH(hw); /* configure FDIRCMD register */ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err < 0) PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); return err; } /** * This function is based on ixgbe_atr_add_signature_filter_82599() in * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports * setting extra fields in the FDIRCMD register, and removes the code that was * verifying the flow_type field. According to the documentation, a flow type of * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to * work ok... * * Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @queue: queue index to direct traffic to * @fdircmd: any extra flags to set in fdircmd register * @fdirhash: pre-calculated hash value for the filter **/ static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, uint32_t fdirhash) { int err = 0; PMD_INIT_FUNC_TRACE(); /* configure FDIRCMD register */ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err < 0) PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); return err; } /* * This is based on ixgbe_fdir_erase_perfect_filter_82599() in * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so * that it can be used for removing signature and perfect filters. */ static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash) { uint32_t fdircmd = 0; int err = 0; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* flush hash to HW */ IXGBE_WRITE_FLUSH(hw); /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err < 0) { PMD_INIT_LOG(ERR, "Timeout querying for flow director filter."); return err; } /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err < 0) PMD_INIT_LOG(ERR, "Timeout erasing flow director filter."); return err; } static inline struct ixgbe_fdir_filter * ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info, union ixgbe_atr_input *key) { int ret; ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key); if (ret < 0) return NULL; return fdir_info->hash_map[ret]; } static inline int ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info, struct ixgbe_fdir_filter *fdir_filter) { int ret; ret = rte_hash_add_key(fdir_info->hash_handle, &fdir_filter->ixgbe_fdir); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to insert fdir filter to hash table %d!", ret); return ret; } fdir_info->hash_map[ret] = fdir_filter; TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries); return 0; } static inline int ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info, union ixgbe_atr_input *key) { int ret; struct ixgbe_fdir_filter *fdir_filter; ret = rte_hash_del_key(fdir_info->hash_handle, key); if (ret < 0) { PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret); return ret; } fdir_filter = fdir_info->hash_map[ret]; fdir_info->hash_map[ret] = NULL; TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); rte_free(fdir_filter); return 0; } static int ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct ixgbe_fdir_rule *rule) { enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; int err; memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &rule->ixgbe_fdir, fdir_mode); if (err) return err; rule->mode = fdir_mode; if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) rule->fdirflags = IXGBE_FDIRCMD_DROP; rule->queue = fdir_filter->action.rx_queue; rule->soft_id = fdir_filter->soft_id; return 0; } int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, struct ixgbe_fdir_rule *rule, bool del, bool update) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fdircmd_flags; uint32_t fdirhash; uint8_t queue; bool is_perfect = FALSE; int err; struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; struct ixgbe_fdir_filter *node; bool add_node = FALSE; if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; /* * Sanity check for x550. * When adding a new filter with flow type set to IPv4, * the flow director mask should be configed before, * and the L4 protocol and ports are masked. */ if ((!del) && (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) && (rule->ixgbe_fdir.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) && (info->mask.src_port_mask != 0 || info->mask.dst_port_mask != 0)) { PMD_DRV_LOG(ERR, "By this device," " IPv4 is not supported without" " L4 protocol and ports masked!"); return -ENOTSUP; } if (fdir_mode >= RTE_FDIR_MODE_PERFECT && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) is_perfect = TRUE; if (is_perfect) { if (rule->ixgbe_fdir.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { PMD_DRV_LOG(ERR, "IPv6 is not supported in" " perfect mode!"); return -ENOTSUP; } fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir, dev->data->dev_conf.fdir_conf.pballoc); fdirhash |= rule->soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; } else fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir, dev->data->dev_conf.fdir_conf.pballoc); if (del) { err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir); if (err < 0) return err; err = fdir_erase_filter_82599(hw, fdirhash); if (err < 0) PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!"); else PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!"); return err; } /* add or update an fdir filter*/ fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; if (rule->fdirflags & IXGBE_FDIRCMD_DROP) { if (is_perfect) { queue = dev->data->dev_conf.fdir_conf.drop_queue; fdircmd_flags |= IXGBE_FDIRCMD_DROP; } else { PMD_DRV_LOG(ERR, "Drop option is not supported in" " signature mode."); return -EINVAL; } } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM) queue = (uint8_t)rule->queue; else return -EINVAL; node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir); if (node) { if (update) { node->fdirflags = fdircmd_flags; node->fdirhash = fdirhash; node->queue = queue; } else { PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!"); return -EINVAL; } } else { add_node = TRUE; node = rte_zmalloc("ixgbe_fdir", sizeof(struct ixgbe_fdir_filter), 0); if (!node) return -ENOMEM; (void)rte_memcpy(&node->ixgbe_fdir, &rule->ixgbe_fdir, sizeof(union ixgbe_atr_input)); node->fdirflags = fdircmd_flags; node->fdirhash = fdirhash; node->queue = queue; err = ixgbe_insert_fdir_filter(info, node); if (err < 0) { rte_free(node); return err; } } if (is_perfect) { err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir, queue, fdircmd_flags, fdirhash, fdir_mode); } else { err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir, queue, fdircmd_flags, fdirhash); } if (err < 0) { PMD_DRV_LOG(ERR, "Fail to add FDIR filter!"); if (add_node) (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir); } else { PMD_DRV_LOG(DEBUG, "Success to add FDIR filter"); } return err; } /* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter. * @dev: pointer to the structure rte_eth_dev * @fdir_filter: fdir filter entry * @del: 1 - delete, 0 - add * @update: 1 - update */ static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, bool del, bool update) { struct ixgbe_fdir_rule rule; int err; err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule); if (err) return err; return ixgbe_fdir_filter_program(dev, &rule, del, update); } static int ixgbe_fdir_flush(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); int ret; ret = ixgbe_reinit_fdir_tables_82599(hw); if (ret < 0) { PMD_INIT_LOG(ERR, "Failed to re-initialize FD table."); return ret; } info->f_add = 0; info->f_remove = 0; info->add = 0; info->remove = 0; return ret; } #define FDIRENTRIES_NUM_SHIFT 10 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); uint32_t fdirctrl, max_num; uint8_t offset; fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >> IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t); fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; max_num = (1 << (FDIRENTRIES_NUM_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT && fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) fdir_info->guarant_spc = max_num; else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE) fdir_info->guarant_spc = max_num * 4; fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask; fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask; fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask; IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask, fdir_info->mask.ipv6_mask.src_ip); IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask, fdir_info->mask.ipv6_mask.dst_ip); fdir_info->mask.src_port_mask = info->mask.src_port_mask; fdir_info->mask.dst_port_mask = info->mask.dst_port_mask; fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask; fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask; fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask; fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN; if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN || fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) fdir_info->flow_types_mask[0] = 0; else fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; fdir_info->flex_payload_unit = sizeof(uint16_t); fdir_info->max_flex_payload_segment_num = 1; fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF; fdir_info->flex_conf.nb_payloads = 1; fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD; fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1; fdir_info->flex_conf.nb_flexmasks = 1; fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN; fdir_info->flex_conf.flex_mask[0].mask[0] = (uint8_t)(info->mask.flex_bytes_mask & 0x00FF); fdir_info->flex_conf.flex_mask[0].mask[1] = (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8); } static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); uint32_t reg, max_num; enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; /* Get the information from registers */ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE); info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >> IXGBE_FDIRFREE_COLL_SHIFT); info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >> IXGBE_FDIRFREE_FREE_SHIFT); reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN); info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >> IXGBE_FDIRLEN_MAXHASH_SHIFT); info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >> IXGBE_FDIRLEN_MAXLEN_SHIFT); reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >> IXGBE_FDIRUSTAT_REMOVE_SHIFT; info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >> IXGBE_FDIRUSTAT_ADD_SHIFT; reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF; info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >> IXGBE_FDIRFSTAT_FREMOVE_SHIFT; info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >> IXGBE_FDIRFSTAT_FADD_SHIFT; /* Copy the new information in the fdir parameter */ fdir_stats->collision = info->collision; fdir_stats->free = info->free; fdir_stats->maxhash = info->maxhash; fdir_stats->maxlen = info->maxlen; fdir_stats->remove = info->remove; fdir_stats->add = info->add; fdir_stats->f_remove = info->f_remove; fdir_stats->f_add = info->f_add; reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); max_num = (1 << (FDIRENTRIES_NUM_SHIFT + (reg & FDIRCTRL_PBALLOC_MASK))); if (fdir_mode >= RTE_FDIR_MODE_PERFECT && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) fdir_stats->guarant_cnt = max_num - fdir_stats->free; else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE) fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free; } /* * ixgbe_fdir_ctrl_func - deal with all operations on flow director. * @dev: pointer to the structure rte_eth_dev * @filter_op:operation will be taken * @arg: a pointer to specific structure corresponding to the filter_op */ int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret = 0; if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X540 && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; if (filter_op == RTE_ETH_FILTER_NOP) return 0; if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) return -EINVAL; switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = ixgbe_add_del_fdir_filter(dev, (struct rte_eth_fdir_filter *)arg, FALSE, FALSE); break; case RTE_ETH_FILTER_UPDATE: ret = ixgbe_add_del_fdir_filter(dev, (struct rte_eth_fdir_filter *)arg, FALSE, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_add_del_fdir_filter(dev, (struct rte_eth_fdir_filter *)arg, TRUE, FALSE); break; case RTE_ETH_FILTER_FLUSH: ret = ixgbe_fdir_flush(dev); break; case RTE_ETH_FILTER_INFO: ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); break; case RTE_ETH_FILTER_STATS: ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); break; default: PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); ret = -EINVAL; break; } return ret; } /* restore flow director filter */ void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); struct ixgbe_fdir_filter *node; bool is_perfect = FALSE; enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; if (fdir_mode >= RTE_FDIR_MODE_PERFECT && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) is_perfect = TRUE; if (is_perfect) { TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { (void)fdir_write_perfect_filter_82599(hw, &node->ixgbe_fdir, node->queue, node->fdirflags, node->fdirhash, fdir_mode); } } else { TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { (void)fdir_add_signature_filter_82599(hw, &node->ixgbe_fdir, node->queue, node->fdirflags, node->fdirhash); } } } /* remove all the flow director filters */ int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev) { struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); struct ixgbe_fdir_filter *fdir_filter; struct ixgbe_fdir_filter *filter_flag; int ret = 0; /* flush flow director */ rte_hash_reset(fdir_info->hash_handle); memset(fdir_info->hash_map, 0, sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM); filter_flag = TAILQ_FIRST(&fdir_info->fdir_list); while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); rte_free(fdir_filter); } if (filter_flag != NULL) ret = ixgbe_fdir_flush(dev); return ret; }
vicharl/containerdns
kdns/core/radtree.c
/* * radtree.c * radtree -- generic radix tree for binary strings. * * Copyright (c) 2001-2006, NLnet Labs. * * */ #include <assert.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <time.h> #include <stdio.h> #include <ctype.h> #include "radtree.h" #include "util.h" struct radtree* radix_tree_create(void) { struct radtree* rt = (struct radtree*)xalloc( sizeof(*rt)); rt->root = NULL; rt->count = 0; return rt; } /** delete radnodes in postorder recursion */ static void radnode_del_postorder( struct radnode* n) { unsigned i; if(!n) return; for(i=0; i<n->len; i++) { radnode_del_postorder( n->array[i].node); free(n->array[i].str); } free(n->array); free(n); } void radix_tree_clear(struct radtree* rt) { radnode_del_postorder(rt->root); rt->root = NULL; rt->count = 0; } void radix_tree_delete(struct radtree* rt) { if(!rt) return; radix_tree_clear(rt); free(rt); } /** return last elem-containing node in this subtree (excl self) */ static struct radnode* radnode_last_in_subtree(struct radnode* n) { int idx; /* try last entry in array first */ for(idx=((int)n->len)-1; idx >= 0; idx--) { if(n->array[idx].node) { /* does it have entries in its subtrees? */ if(n->array[idx].node->len > 0) { struct radnode* s = radnode_last_in_subtree( n->array[idx].node); if(s) return s; } /* no, does it have an entry itself? */ if(n->array[idx].node->elem) return n->array[idx].node; } } return NULL; } /** last in subtree, incl self */ static struct radnode* radnode_last_in_subtree_incl_self(struct radnode* n) { struct radnode* s = radnode_last_in_subtree(n); if(s) return s; if(n->elem) return n; return NULL; } /** return first elem-containing node in this subtree (excl self) */ static struct radnode* radnode_first_in_subtree(struct radnode* n) { unsigned idx; struct radnode* s; /* try every subnode */ for(idx=0; idx<n->len; idx++) { if(n->array[idx].node) { /* does it have elem itself? */ if(n->array[idx].node->elem) return n->array[idx].node; /* try its subtrees */ if((s=radnode_first_in_subtree(n->array[idx].node))!=0) return s; } } return NULL; } /** Find an entry in arrays from idx-1 to 0 */ static struct radnode* radnode_find_prev_from_idx(struct radnode* n, unsigned from) { unsigned idx = from; while(idx > 0) { idx --; if(n->array[idx].node) { struct radnode* s = radnode_last_in_subtree_incl_self( n->array[idx].node); if(s) return s; } } return NULL; } /** * Find a prefix of the key, in whole-nodes. * Finds the longest prefix that corresponds to a whole radnode entry. * There may be a slightly longer prefix in one of the array elements. * @param result: the longest prefix, the entry itself if *respos==len, * otherwise an array entry, residx. * @param respos: pos in string where next unmatched byte is, if == len an * exact match has been found. If == 0 then a "" match was found. * @return false if no prefix found, not even the root "" prefix. */ static int radix_find_prefix_node(struct radtree* rt, uint8_t* k, uint16_t len, struct radnode** result, uint16_t* respos) { struct radnode* n = rt->root; uint16_t pos = 0; uint8_t byte; *respos = 0; *result = n; if(!n) return 0; while(n) { if(pos == len) { return 1; } byte = k[pos]; if(byte < n->offset) { return 1; } byte -= n->offset; if(byte >= n->len) { return 1; } pos++; if(n->array[byte].len != 0) { /* must match additional string */ if(pos+n->array[byte].len > len) { return 1; } if(memcmp(&k[pos], n->array[byte].str, n->array[byte].len) != 0) { return 1; } pos += n->array[byte].len; } n = n->array[byte].node; if(!n) return 1; *respos = pos; *result = n; } return 1; } /** grow array to at least the given size, offset unchanged */ static int radnode_array_grow( struct radnode* n, unsigned want) { unsigned ns = ((unsigned)n->capacity)*2; struct radsel* a; assert(want <= 256); /* cannot be more, range of uint8 */ if(want > ns) ns = want; if(ns > 256) ns = 256; /* we do not use realloc, because we want to keep the old array * in case alloc fails, so that the tree is still usable */ a = (struct radsel*)xalloc_array_zero(ns, sizeof(struct radsel)); if(!a) return 0; assert(n->len <= n->capacity); assert(n->capacity < ns); memcpy(&a[0], &n->array[0], n->len*sizeof(struct radsel)); free(n->array); n->array = a; n->capacity = ns; return 1; } /** make space in radnode array for another byte */ static int radnode_array_space( struct radnode* n, uint8_t byte) { /* is there an array? */ if(!n->array || n->capacity == 0) { n->array = (struct radsel*)xalloc( sizeof(struct radsel)); if(!n->array) return 0; memset(&n->array[0], 0, sizeof(struct radsel)); n->len = 1; n->capacity = 1; n->offset = byte; /* is the array unused? */ } else if(n->len == 0 && n->capacity != 0) { n->len = 1; n->offset = byte; memset(&n->array[0], 0, sizeof(struct radsel)); /* is it below the offset? */ } else if(byte < n->offset) { /* is capacity enough? */ unsigned idx; unsigned need = n->offset-byte; if(n->len+need > n->capacity) { /* grow array */ if(!radnode_array_grow( n, n->len+need)) return 0; } /* reshuffle items to end */ memmove(&n->array[need], &n->array[0], n->len*sizeof(struct radsel)); /* fixup pidx */ for(idx = 0; idx < n->len; idx++) { if(n->array[idx+need].node) n->array[idx+need].node->pidx = idx+need; } /* zero the first */ memset(&n->array[0], 0, need*sizeof(struct radsel)); n->len += need; n->offset = byte; /* is it above the max? */ } else if(byte-n->offset >= n->len) { /* is capacity enough? */ unsigned need = (byte-n->offset) - n->len + 1; /* grow array */ if(n->len + need > n->capacity) { if(!radnode_array_grow( n, n->len+need)) return 0; } /* zero added entries */ memset(&n->array[n->len], 0, need*sizeof(struct radsel)); /* grow length */ n->len += need; } return 1; } /** create a prefix in the array strs */ static int radsel_str_create( struct radsel* r, uint8_t* k, uint16_t pos, uint16_t len) { r->str = (uint8_t*)xalloc( sizeof(uint8_t)*(len-pos)); if(!r->str) return 0; /* out of memory */ memmove(r->str, k+pos, len-pos); r->len = len-pos; return 1; } /** see if one byte string p is a prefix of another x (equality is true) */ static int bstr_is_prefix(uint8_t* p, uint16_t plen, uint8_t* x, uint16_t xlen) { /* if plen is zero, it is an (empty) prefix */ if(plen == 0) return 1; /* if so, p must be shorter */ if(plen > xlen) return 0; return (memcmp(p, x, plen) == 0); } /** number of bytes in common for the two strings */ static uint16_t bstr_common(uint8_t* x, uint16_t xlen, uint8_t* y, uint16_t ylen) { unsigned i, max = ((xlen<ylen)?xlen:ylen); for(i=0; i<max; i++) { if(x[i] != y[i]) return i; } return max; } int bstr_is_prefix_ext(uint8_t* p, uint16_t plen, uint8_t* x, uint16_t xlen) { return bstr_is_prefix(p, plen, x, xlen); } uint16_t bstr_common_ext(uint8_t* x, uint16_t xlen, uint8_t* y, uint16_t ylen) { return bstr_common(x, xlen, y, ylen); } /** allocate remainder from prefixes for a split: * plen: len prefix, l: longer bstring, llen: length of l. */ static int radsel_prefix_remainder( uint16_t plen, uint8_t* l, uint16_t llen, uint8_t** s, uint16_t* slen) { *slen = llen - plen; *s = (uint8_t*)xalloc( (*slen)*sizeof(uint8_t)); if(!*s) return 0; memmove(*s, l+plen, llen-plen); return 1; } /** radsel create a split when two nodes have shared prefix. * @param r: radsel that gets changed, it contains a node. * @param k: key byte string * @param pos: position where the string enters the radsel (e.g. r.str) * @param len: length of k. * @param add: additional node for the string k. * removed by called on failure. * @return false on alloc failure, no changes made. */ static int radsel_split( struct radsel* r, uint8_t* k, uint16_t pos, uint16_t len, struct radnode* add) { uint8_t* addstr = k+pos; uint16_t addlen = len-pos; if(bstr_is_prefix(addstr, addlen, r->str, r->len)) { uint8_t* split_str=NULL, *dupstr=NULL; uint16_t split_len=0; /* 'add' is a prefix of r.node */ /* also for empty addstr */ /* set it up so that the 'add' node has r.node as child */ /* so, r.node gets moved below the 'add' node, but we do * this so that the r.node stays the same pointer for its * key name */ assert(addlen != r->len); assert(addlen < r->len); if(r->len-addlen > 1) { /* shift one because a char is in the lookup array */ if(!radsel_prefix_remainder( addlen+1, r->str, r->len, &split_str, &split_len)) return 0; } if(addlen != 0) { dupstr = (uint8_t*)xalloc( addlen*sizeof(uint8_t)); if(!dupstr) { free(split_str); return 0; } memcpy(dupstr, addstr, addlen); } if(!radnode_array_space( add, r->str[addlen])) { free(split_str); free(dupstr); return 0; } /* alloc succeeded, now link it in */ add->parent = r->node->parent; add->pidx = r->node->pidx; add->array[0].node = r->node; add->array[0].str = split_str; add->array[0].len = split_len; r->node->parent = add; r->node->pidx = 0; r->node = add; free(r->str); r->str = dupstr; r->len = addlen; } else if(bstr_is_prefix(r->str, r->len, addstr, addlen)) { uint8_t* split_str = NULL; uint16_t split_len = 0; /* r.node is a prefix of 'add' */ /* set it up so that the 'r.node' has 'add' as child */ /* and basically, r.node is already completely fine, * we only need to create a node as its child */ assert(addlen != r->len); assert(r->len < addlen); if(addlen-r->len > 1) { /* shift one because a character goes into array */ if(!radsel_prefix_remainder( r->len+1, addstr, addlen, &split_str, &split_len)) return 0; } if(!radnode_array_space( r->node, addstr[r->len])) { free(split_str); return 0; } /* alloc succeeded, now link it in */ add->parent = r->node; add->pidx = addstr[r->len] - r->node->offset; r->node->array[add->pidx].node = add; r->node->array[add->pidx].str = split_str; r->node->array[add->pidx].len = split_len; } else { /* okay we need to create a new node that chooses between * the nodes 'add' and r.node * We do this so that r.node stays the same pointer for its * key name. */ struct radnode* com; uint8_t* common_str=NULL, *s1_str=NULL, *s2_str=NULL; uint16_t common_len, s1_len=0, s2_len=0; common_len = bstr_common(r->str, r->len, addstr, addlen); assert(common_len < r->len); assert(common_len < addlen); /* create the new node for choice */ com = (struct radnode*)xalloc_zero( sizeof(*com)); if(!com) return 0; /* out of memory */ /* create the two substrings for subchoices */ if(r->len-common_len > 1) { /* shift by one char because it goes in lookup array */ if(!radsel_prefix_remainder( common_len+1, r->str, r->len, &s1_str, &s1_len)) { free(com); return 0; } } if(addlen-common_len > 1) { if(!radsel_prefix_remainder( common_len+1, addstr, addlen, &s2_str, &s2_len)) { free(com); free(s1_str); return 0; } } /* create the shared prefix to go in r */ if(common_len > 0) { common_str = (uint8_t*)xalloc( common_len*sizeof(uint8_t)); if(!common_str) { free(com); free(s1_str); free(s2_str); return 0; } memcpy(common_str, addstr, common_len); } /* make space in the common node array */ if(!radnode_array_space( com, r->str[common_len]) || !radnode_array_space( com, addstr[common_len])) { free(com->array); free(com); free(common_str); free(s1_str); free(s2_str); return 0; } /* allocs succeeded, proceed to link it all up */ com->parent = r->node->parent; com->pidx = r->node->pidx; r->node->parent = com; r->node->pidx = r->str[common_len]-com->offset; add->parent = com; add->pidx = addstr[common_len]-com->offset; com->array[r->node->pidx].node = r->node; com->array[r->node->pidx].str = s1_str; com->array[r->node->pidx].len = s1_len; com->array[add->pidx].node = add; com->array[add->pidx].str = s2_str; com->array[add->pidx].len = s2_len; free(r->str); r->str = common_str; r->len = common_len; r->node = com; } return 1; } struct radnode* radix_insert(struct radtree* rt, uint8_t* k, uint16_t len, void* elem) { struct radnode* n; uint16_t pos = 0; /* create new element to add */ struct radnode* add = (struct radnode*)xalloc_zero(sizeof(*add)); if(!add) return NULL; /* out of memory */ add->elem = elem; /* find out where to add it */ if(!radix_find_prefix_node(rt, k, len, &n, &pos)) { /* new root */ assert(rt->root == NULL); if(len == 0) { rt->root = add; } else { /* add a root to point to new node */ n = (struct radnode*)xalloc_zero(sizeof(*n)); if(!n) return NULL; if(!radnode_array_space(n, k[0])) { free(n->array); free(n); free(add); return NULL; } add->parent = n; add->pidx = 0; n->array[0].node = add; if(len > 1) { if(!radsel_prefix_remainder(1, k, len, &n->array[0].str, &n->array[0].len)) { free(n->array); free(n); free(add); return NULL; } } rt->root = n; } } else if(pos == len) { /* found an exact match */ if(n->elem) { /* already exists, failure */ free(add); return NULL; } n->elem = elem; free(add); add = n; } else { /* n is a node which can accomodate */ uint8_t byte; assert(pos < len); byte = k[pos]; /* see if it falls outside of array */ if(byte < n->offset || byte-n->offset >= n->len) { /* make space in the array for it; adjusts offset */ if(!radnode_array_space(n, byte)) { free(add); return NULL; } assert(byte>=n->offset && byte-n->offset<n->len); byte -= n->offset; /* see if more prefix needs to be split off */ if(pos+1 < len) { if(!radsel_str_create(&n->array[byte], k, pos+1, len)) { free(add); return NULL; } } /* insert the new node in the new bucket */ add->parent = n; add->pidx = byte; n->array[byte].node = add; /* so a bucket exists and byte falls in it */ } else if(n->array[byte-n->offset].node == NULL) { /* use existing bucket */ byte -= n->offset; if(pos+1 < len) { /* split off more prefix */ if(!radsel_str_create(&n->array[byte], k, pos+1, len)) { free(add); return NULL; } } /* insert the new node in the new bucket */ add->parent = n; add->pidx = byte; n->array[byte].node = add; } else { /* use bucket but it has a shared prefix, * split that out and create a new intermediate * node to split out between the two. * One of the two might exactmatch the new * intermediate node */ if(!radsel_split(&n->array[byte-n->offset], k, pos+1, len, add)) { free(add); return NULL; } } } rt->count ++; return add; } /** Delete a radnode */ static void radnode_delete( struct radnode* n) { unsigned i; if(!n) return; for(i=0; i<n->len; i++) { /* safe to free NULL str */ free(n->array[i].str); } free(n->array); free(n); } /** Cleanup node with one child, it is removed and joined into parent[x] str */ static int radnode_cleanup_onechild( struct radnode* n, struct radnode* par) { uint8_t* join; uint16_t joinlen; uint8_t pidx = n->pidx; struct radnode* child = n->array[0].node; /* node had one child, merge them into the parent. */ /* keep the child node, so its pointers stay valid. */ /* at parent, append child->str to array str */ assert(pidx < par->len); joinlen = par->array[pidx].len + n->array[0].len + 1; join = (uint8_t*)xalloc( joinlen*sizeof(uint8_t)); if(!join) { /* cleanup failed due to out of memory */ /* the tree is inefficient, with node n still existing */ return 0; } /* we know that .str and join are malloced, thus aligned */ if(par->array[pidx].str) memcpy(join, par->array[pidx].str, par->array[pidx].len); /* the array lookup is gone, put its character in the lookup string*/ join[par->array[pidx].len] = child->pidx + n->offset; /* but join+len may not be aligned */ if(n->array[0].str) memmove(join+par->array[pidx].len+1, n->array[0].str, n->array[0].len); free(par->array[pidx].str); par->array[pidx].str = join; par->array[pidx].len = joinlen; /* and set the node to our child. */ par->array[pidx].node = child; child->parent = par; child->pidx = pidx; /* we are unlinked, delete our node */ radnode_delete( n); return 1; } /** remove array of nodes */ static void radnode_array_clean_all( struct radnode* n) { n->offset = 0; n->len = 0; /* shrink capacity */ free(n->array); n->array = NULL; n->capacity = 0; } /** see if capacity can be reduced for the given node array */ static void radnode_array_reduce_if_needed( struct radnode* n) { if(n->len <= n->capacity/2 && n->len != n->capacity) { struct radsel* a = (struct radsel*)xalloc_array_zero(sizeof(*a), n->len); if(!a) return; memcpy(a, n->array, sizeof(*a)*n->len); free(n->array); n->array = a; n->capacity = n->len; } } /** remove NULL nodes from front of array */ static void radnode_array_clean_front( struct radnode* n) { /* move them up and adjust offset */ unsigned idx, shuf = 0; /* remove until a nonNULL entry */ while(shuf < n->len && n->array[shuf].node == NULL) shuf++; if(shuf == 0) return; if(shuf == n->len) { /* the array is empty, the tree is inefficient */ radnode_array_clean_all( n); return; } assert(shuf < n->len); assert((int)shuf <= 255-(int)n->offset); memmove(&n->array[0], &n->array[shuf], (n->len - shuf)*sizeof(struct radsel)); n->offset += shuf; n->len -= shuf; for(idx=0; idx<n->len; idx++) if(n->array[idx].node) n->array[idx].node->pidx = idx; /* see if capacity can be reduced */ radnode_array_reduce_if_needed( n); } /** remove NULL nodes from end of array */ static void radnode_array_clean_end( struct radnode* n) { /* shorten it */ unsigned shuf = 0; /* remove until a nonNULL entry */ while(shuf < n->len && n->array[n->len-1-shuf].node == NULL) shuf++; if(shuf == 0) return; if(shuf == n->len) { /* the array is empty, the tree is inefficient */ radnode_array_clean_all( n); return; } assert(shuf < n->len); n->len -= shuf; /* array elements can stay where they are */ /* see if capacity can be reduced */ radnode_array_reduce_if_needed( n); } /** clean up radnode leaf, where we know it has a parent */ static void radnode_cleanup_leaf( struct radnode* n, struct radnode* par) { uint8_t pidx; /* node was a leaf */ /* delete leaf node, but store parent+idx */ pidx = n->pidx; radnode_delete( n); /* set parent+idx entry to NULL str and node.*/ assert(pidx < par->len); free(par->array[pidx].str); par->array[pidx].str = NULL; par->array[pidx].len = 0; par->array[pidx].node = NULL; /* see if par offset or len must be adjusted */ if(par->len == 1) { /* removed final element from array */ radnode_array_clean_all( par); } else if(pidx == 0) { /* removed first element from array */ radnode_array_clean_front( par); } else if(pidx == par->len-1) { /* removed last element from array */ radnode_array_clean_end( par); } } /** * Cleanup a radix node that was made smaller, see if it can * be merged with others. * @param rt: tree to remove root if needed. * @param n: node to cleanup * @return false on alloc failure. */ static int radnode_cleanup(struct radtree* rt, struct radnode* n) { while(n) { if(n->elem) { /* cannot delete node with a data element */ return 1; } else if(n->len == 1 && n->parent) { return radnode_cleanup_onechild(n, n->parent); } else if(n->len == 0) { struct radnode* par = n->parent; if(!par) { /* root deleted */ radnode_delete(n); rt->root = NULL; return 1; } /* remove and delete the leaf node */ radnode_cleanup_leaf(n, par); /* see if parent can now be cleaned up */ n = par; } else { /* node cannot be cleaned up */ return 1; } } /* ENOTREACH */ return 1; } void radix_delete(struct radtree* rt, struct radnode* n) { if(!n) return; n->elem = NULL; rt->count --; if(!radnode_cleanup(rt, n)) { /* out of memory in cleanup. the elem ptr is NULL, but * the radix tree could be inefficient. */ } } struct radnode* radix_search(struct radtree* rt, uint8_t* k, uint16_t len) { struct radnode* n = rt->root; uint16_t pos = 0; uint8_t byte; while(n) { if(pos == len) return n->elem?n:NULL; byte = k[pos]; if(byte < n->offset) return NULL; byte -= n->offset; if(byte >= n->len) return NULL; pos++; if(n->array[byte].len != 0) { /* must match additional string */ if(pos+n->array[byte].len > len) return NULL; /* no match */ if(memcmp(&k[pos], n->array[byte].str, n->array[byte].len) != 0) return NULL; /* no match */ pos += n->array[byte].len; } n = n->array[byte].node; } return NULL; } /** return self or a previous element */ static int ret_self_or_prev(struct radnode* n, struct radnode** result) { if(n->elem) *result = n; else *result = radix_prev(n); return 0; } int radix_find_less_equal(struct radtree* rt, uint8_t* k, uint16_t len, struct radnode** result) { struct radnode* n = rt->root; uint16_t pos = 0; uint8_t byte; int r; if(!n) { /* empty tree */ *result = NULL; return 0; } while(pos < len) { byte = k[pos]; if(byte < n->offset) { /* so the previous is the element itself */ /* or something before this element */ return ret_self_or_prev(n, result); } byte -= n->offset; if(byte >= n->len) { /* so, the previous is the last of array, or itself */ /* or something before this element */ if((*result=radnode_last_in_subtree_incl_self(n))==0) *result = radix_prev(n); return 0; } pos++; if(!n->array[byte].node) { /* no match */ /* Find an entry in arrays from byte-1 to 0 */ *result = radnode_find_prev_from_idx(n, byte); if(*result) return 0; /* this entry or something before it */ return ret_self_or_prev(n, result); } if(n->array[byte].len != 0) { /* must match additional string */ if(pos+n->array[byte].len > len) { /* the additional string is longer than key*/ if( (memcmp(&k[pos], n->array[byte].str, len-pos)) <= 0) { /* and the key is before this node */ *result = radix_prev(n->array[byte].node); } else { /* the key is after the additional * string, thus everything in that * subtree is smaller. */ *result=radnode_last_in_subtree_incl_self(n->array[byte].node); /* if somehow that is NULL, * then we have an inefficient tree: * byte+1 is larger than us, so find * something in byte-1 and before */ if(!*result) *result = radix_prev(n->array[byte].node); } return 0; /* no match */ } if( (r=memcmp(&k[pos], n->array[byte].str, n->array[byte].len)) < 0) { *result = radix_prev(n->array[byte].node); return 0; /* no match */ } else if(r > 0) { /* the key is larger than the additional * string, thus everything in that subtree * is smaller */ *result=radnode_last_in_subtree_incl_self(n->array[byte].node); /* if we have an inefficient tree */ if(!*result) *result = radix_prev(n->array[byte].node); return 0; /* no match */ } pos += n->array[byte].len; } n = n->array[byte].node; } if(n->elem) { /* exact match */ *result = n; return 1; } /* there is a node which is an exact match, but it has no element */ *result = radix_prev(n); return 0; } struct radnode* radix_first(struct radtree* rt) { struct radnode* n; if(!rt || !rt->root) return NULL; n = rt->root; if(n->elem) return n; return radix_next(n); } struct radnode* radix_last(struct radtree* rt) { if(!rt || !rt->root) return NULL; return radnode_last_in_subtree_incl_self(rt->root); } struct radnode* radix_next(struct radnode* n) { if(!n) return NULL; if(n->len) { /* go down */ struct radnode* s = radnode_first_in_subtree(n); if(s) return s; } /* go up - the parent->elem is not useful, because it is before us */ while(n->parent) { unsigned idx = n->pidx; n = n->parent; idx++; for(; idx < n->len; idx++) { /* go down the next branch */ if(n->array[idx].node) { struct radnode* s; /* node itself */ if(n->array[idx].node->elem) return n->array[idx].node; /* or subtree */ s = radnode_first_in_subtree( n->array[idx].node); if(s) return s; } } } return NULL; } struct radnode* radix_prev(struct radnode* n) { if(!n) return NULL; /* must go up, since all array nodes are after this node */ while(n->parent) { uint8_t idx = n->pidx; struct radnode* s; n = n->parent; assert(n->len > 0); /* since we are a child */ /* see if there are elements in previous branches there */ s = radnode_find_prev_from_idx(n, idx); if(s) return s; /* the current node is before the array */ if(n->elem) return n; } return NULL; } /** convert one character from domain-name to radname */ static uint8_t char_d2r(uint8_t c) { if(c < 'A') return c+1; /* make space for 00 */ else if(c <= 'Z') return c-'A'+'a'; /* lowercase */ else return c; } /** convert one character from radname to domain-name (still lowercased) */ static uint8_t char_r2d(uint8_t c) { assert(c != 0); /* end of label */ if(c <= 'A') return c-1; else return c; } /** copy and convert a range of characters */ static void cpy_d2r(uint8_t* to, const uint8_t* from, int len) { int i; for(i=0; i<len; i++) to[i] = char_d2r(from[i]); } /** copy and convert a range of characters */ static void cpy_r2d(uint8_t* to, uint8_t* from, uint8_t len) { uint8_t i; for(i=0; i<len; i++) to[i] = char_r2d(from[i]); } /* radname code: domain to radix-bstring */ void radomain_name_d2r(uint8_t* k, uint16_t* len, const uint8_t* dname, size_t dlen) { /* the domain name is converted as follows, * to preserve the normal (NSEC) ordering of domain names. * lowercased, and 'end-of-label' is a '00' byte, * bytes 00-'A' are +1 moved to make space for 00 byte. * final root label is not appended (string ends). * because the only allowed empty label is the final root label, * we can also remove the last 00 label-end. * The total result length is one-or-two less than the dname. * * examples (numbers are bytes, letters are ascii): * - root: dname: 0, radname: '' * - nl.: dname: 3nl0, radname: 'nl' * - labs.nl: dname 4labs3nl0, radname: 'nl0labs' * - x.labs.nl: dname 1x4labs3nl0, radname: 'nl0labs0x' */ /* conversion by putting the label starts on a stack */ const uint8_t* labstart[130]; unsigned int lab = 0, kpos, dpos = 0; /* sufficient space */ assert(k && dname); assert(dlen <= 256); /* and therefore not more than 128 labels */ assert(*len >= dlen); assert(dlen > 0); /* even root label has dlen=1 */ /* root */ if(dlen == 1) { assert(dname[0] == 0); *len = 0; return; } /* walk through domain name and remember label positions */ do { /* compression pointers not allowed */ if((dname[dpos] & 0xc0)) { *len = 0; return; /* format error */ } labstart[lab++] = &dname[dpos]; if(dpos + dname[dpos] + 1 >= dlen) { *len = 0; return; /* format error */ } /* skip the label contents */ dpos += dname[dpos]; dpos ++; } while(dname[dpos] != 0); /* exit condition makes root label not in labelstart stack */ /* because the root was handled before, we know there is some text */ assert(lab > 0); lab-=1; kpos = *labstart[lab]; cpy_d2r(k, labstart[lab]+1, kpos); /* if there are more labels, copy them over */ while(lab) { /* put 'end-of-label' 00 to end previous label */ k[kpos++]=0; /* append the label */ lab--; cpy_d2r(k+kpos, labstart[lab]+1, *labstart[lab]); kpos += *labstart[lab]; } /* done */ assert(kpos == dlen-2); /* no rootlabel, one less label-marker */ *len = kpos; } /* radname code: radix-bstring to domain */ void radomain_name_r2d(uint8_t* k, uint16_t len, uint8_t* dname, size_t* dlen) { /* find labels and push on stack */ uint8_t* labstart[130]; uint8_t lablen[130]; unsigned int lab = 0, dpos, kpos = 0; /* sufficient space */ assert(k && dname); assert((size_t)*dlen >= (size_t)len+2); assert(len <= 256); /* root label */ if(len == 0) { assert(*dlen > 0); dname[0]=0; *dlen=1; return; } /* find labels */ while(kpos < len) { lablen[lab]=0; labstart[lab]=&k[kpos]; /* skip to next label */ while(kpos < len && k[kpos] != 0) { lablen[lab]++; kpos++; } lab++; /* skip 00 byte for label-end */ if(kpos < len) { assert(k[kpos] == 0); kpos++; } } /* copy the labels over to the domain name */ dpos = 0; while(lab) { lab--; /* label length */ dname[dpos++] = lablen[lab]; /* label content */ cpy_r2d(dname+dpos, labstart[lab], lablen[lab]); dpos += lablen[lab]; } /* append root label */ dname[dpos++] = 0; /* assert the domain name is wellformed */ assert((int)dpos == (int)len+2); assert(dname[dpos-1] == 0); /* ends with root label */ *dlen = dpos; } /** insert by domain name */ struct radnode* radomain_name_insert(struct radtree* rt, const uint8_t* d, size_t max, void* elem) { /* convert and insert */ uint8_t radname[300]; uint16_t len = (uint16_t)sizeof(radname); if(max > sizeof(radname)) return NULL; /* too long */ radomain_name_d2r(radname, &len, d, max); return radix_insert(rt, radname, len, elem); } /** delete by domain name */ void radomain_name_delete(struct radtree* rt, const uint8_t* d, size_t max) { /* search and remove */ struct radnode* n = radomain_name_search(rt, d, max); if(n) radix_delete(rt, n); } /* search for exact match of domain name, converted to radname in tree */ struct radnode* radomain_name_search(struct radtree* rt, const uint8_t* d, size_t max) { /* stack of labels in the domain name */ const uint8_t* labstart[130]; unsigned int lab, dpos, lpos; struct radnode* n = rt->root; uint8_t byte; uint16_t i; uint8_t b; /* search for root? it is '' */ if(max < 1) return NULL; if(d[0] == 0) { if(!n) return NULL; return n->elem?n:NULL; } /* find labels stack in domain name */ lab = 0; dpos = 0; /* must have one label, since root is specialcased */ do { if((d[dpos] & 0xc0)) return NULL; /* compression ptrs not allowed error */ labstart[lab++] = &d[dpos]; if(dpos + d[dpos] + 1 >= max) return NULL; /* format error: outside of bounds */ /* skip the label contents */ dpos += d[dpos]; dpos ++; } while(d[dpos] != 0); /* exit condition makes that root label is not in the labstarts */ /* now: dpos+1 is length of domain name. lab is number of labels-1 */ /* start processing at the last label */ lab-=1; lpos = 0; while(n) { /* fetch next byte this label */ if(lpos < *labstart[lab]) /* lpos+1 to skip labelstart, lpos++ to move forward */ byte = char_d2r(labstart[lab][++lpos]); else { if(lab == 0) /* last label - we're done */ return n->elem?n:NULL; /* next label, search for byte 00 */ lpos = 0; lab--; byte = 0; } /* find that byte in the array */ if(byte < n->offset) return NULL; byte -= n->offset; if(byte >= n->len) return NULL; if(n->array[byte].len != 0) { /* must match additional string */ /* see how many bytes we need and start matching them*/ for(i=0; i<n->array[byte].len; i++) { /* next byte to match */ if(lpos < *labstart[lab]) b = char_d2r(labstart[lab][++lpos]); else { /* if last label, no match since * we are in the additional string */ if(lab == 0) return NULL; /* next label, search for byte 00 */ lpos = 0; lab--; b = 0; } if(n->array[byte].str[i] != b) return NULL; /* not matched */ } } n = n->array[byte].node; } return NULL; } /* find domain name or smaller or equal domain name in radix tree */ int radomain_name_find_less_equal(struct radtree* rt, const uint8_t* d, size_t max, struct radnode** result) { /* stack of labels in the domain name */ const uint8_t* labstart[130]; unsigned int lab, dpos, lpos; struct radnode* n = rt->root; uint8_t byte; uint16_t i; uint8_t b; /* empty tree */ if(!n) { *result = NULL; return 0; } /* search for root? it is '' */ if(max < 1) { *result = NULL; return 0; /* parse error, out of bounds */ } if(d[0] == 0) { if(n->elem) { *result = n; return 1; } /* no smaller element than the root */ *result = NULL; return 0; } /* find labels stack in domain name */ lab = 0; dpos = 0; /* must have one label, since root is specialcased */ do { if((d[dpos] & 0xc0)) { *result = NULL; return 0; /* compression ptrs not allowed error */ } labstart[lab++] = &d[dpos]; if(dpos + d[dpos] + 1 >= max) { *result = NULL; /* format error: outside of bounds */ return 0; } /* skip the label contents */ dpos += d[dpos]; dpos ++; } while(d[dpos] != 0); /* exit condition makes that root label is not in the labstarts */ /* now: dpos+1 is length of domain name. lab is number of labels-1 */ /* start processing at the last label */ lab-=1; lpos = 0; while(1) { /* fetch next byte this label */ if(lpos < *labstart[lab]) /* lpos+1 to skip labelstart, lpos++ to move forward */ byte = char_d2r(labstart[lab][++lpos]); else { if(lab == 0) { /* last label - we're done */ /* exact match */ if(n->elem) { *result = n; return 1; } /* there is a node which is an exact match, * but there no element in it */ *result = radix_prev(n); return 0; } /* next label, search for byte 0 the label separator */ lpos = 0; lab--; byte = 0; } /* find that byte in the array */ if(byte < n->offset) /* so the previous is the element itself */ /* or something before this element */ return ret_self_or_prev(n, result); byte -= n->offset; if(byte >= n->len) { /* so, the previous is the last of array, or itself */ /* or something before this element */ *result = radnode_last_in_subtree_incl_self(n); if(!*result) *result = radix_prev(n); return 0; } if(!n->array[byte].node) { /* no match */ /* Find an entry in arrays from byte-1 to 0 */ *result = radnode_find_prev_from_idx(n, byte); if(*result) return 0; /* this entry or something before it */ return ret_self_or_prev(n, result); } if(n->array[byte].len != 0) { /* must match additional string */ /* see how many bytes we need and start matching them*/ for(i=0; i<n->array[byte].len; i++) { /* next byte to match */ if(lpos < *labstart[lab]) b = char_d2r(labstart[lab][++lpos]); else { /* if last label, no match since * we are in the additional string */ if(lab == 0) { /* dname ended, thus before * this array element */ *result =radix_prev( n->array[byte].node); return 0; } /* next label, search for byte 00 */ lpos = 0; lab--; b = 0; } if(b < n->array[byte].str[i]) { *result =radix_prev( n->array[byte].node); return 0; } else if(b > n->array[byte].str[i]) { /* the key is after the additional, * so everything in its subtree is * smaller */ *result = radnode_last_in_subtree_incl_self(n->array[byte].node); /* if that is NULL, we have an * inefficient tree, find in byte-1*/ if(!*result) *result = radix_prev(n->array[byte].node); return 0; } } } n = n->array[byte].node; } /* ENOTREACH */ return 0; }
vicharl/containerdns
kdns/deps/libmicrohttpd/src/examples/demo_https.c
<filename>kdns/deps/libmicrohttpd/src/examples/demo_https.c /* This file is part of libmicrohttpd Copyright (C) 2013 <NAME> (and other contributing authors) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file demo_https.c * @brief complex demonstration site: create directory index, offer * upload via form and HTTP POST, download with mime type detection * and error reporting (403, etc.) --- and all of this with * high-performance settings (large buffers, thread pool). * If you want to benchmark MHD, this code should be used to * run tests against. Note that the number of threads may need * to be adjusted depending on the number of available cores. * Logic is identical to demo.c, just adds HTTPS support. * @author <NAME> */ #include "platform.h" #include <microhttpd.h> #include <unistd.h> #include <pthread.h> #include <sys/types.h> #include <sys/stat.h> #include <dirent.h> #ifdef MHD_HAVE_LIBMAGIC #include <magic.h> #endif /* MHD_HAVE_LIBMAGIC */ #include <limits.h> #include <ctype.h> #if defined(CPU_COUNT) && (CPU_COUNT+0) < 2 #undef CPU_COUNT #endif #if !defined(CPU_COUNT) #define CPU_COUNT 2 #endif /** * Number of threads to run in the thread pool. Should (roughly) match * the number of cores on your system. */ #define NUMBER_OF_THREADS CPU_COUNT #ifdef MHD_HAVE_LIBMAGIC /** * How many bytes of a file do we give to libmagic to determine the mime type? * 16k might be a bit excessive, but ought not hurt performance much anyway, * and should definitively be on the safe side. */ #define MAGIC_HEADER_SIZE (16 * 1024) #endif /* MHD_HAVE_LIBMAGIC */ /** * Page returned for file-not-found. */ #define FILE_NOT_FOUND_PAGE "<html><head><title>File not found</title></head><body>File not found</body></html>" /** * Page returned for internal errors. */ #define INTERNAL_ERROR_PAGE "<html><head><title>Internal error</title></head><body>Internal error</body></html>" /** * Page returned for refused requests. */ #define REQUEST_REFUSED_PAGE "<html><head><title>Request refused</title></head><body>Request refused (file exists?)</body></html>" /** * Head of index page. */ #define INDEX_PAGE_HEADER "<html>\n<head><title>Welcome</title></head>\n<body>\n"\ "<h1>Upload</h1>\n"\ "<form method=\"POST\" enctype=\"multipart/form-data\" action=\"/\">\n"\ "<dl><dt>Content type:</dt><dd>"\ "<input type=\"radio\" name=\"category\" value=\"books\">Book</input>"\ "<input type=\"radio\" name=\"category\" value=\"images\">Image</input>"\ "<input type=\"radio\" name=\"category\" value=\"music\">Music</input>"\ "<input type=\"radio\" name=\"category\" value=\"software\">Software</input>"\ "<input type=\"radio\" name=\"category\" value=\"videos\">Videos</input>\n"\ "<input type=\"radio\" name=\"category\" value=\"other\" checked>Other</input></dd>"\ "<dt>Language:</dt><dd>"\ "<input type=\"radio\" name=\"language\" value=\"no-lang\" checked>none</input>"\ "<input type=\"radio\" name=\"language\" value=\"en\">English</input>"\ "<input type=\"radio\" name=\"language\" value=\"de\">German</input>"\ "<input type=\"radio\" name=\"language\" value=\"fr\">French</input>"\ "<input type=\"radio\" name=\"language\" value=\"es\">Spanish</input></dd>\n"\ "<dt>File:</dt><dd>"\ "<input type=\"file\" name=\"upload\"/></dd></dl>"\ "<input type=\"submit\" value=\"Send!\"/>\n"\ "</form>\n"\ "<h1>Download</h1>\n"\ "<ol>\n" /** * Footer of index page. */ #define INDEX_PAGE_FOOTER "</ol>\n</body>\n</html>" /** * NULL-terminated array of supported upload categories. Should match HTML * in the form. */ static const char * const categories[] = { "books", "images", "music", "software", "videos", "other", NULL, }; /** * Specification of a supported language. */ struct Language { /** * Directory name for the language. */ const char *dirname; /** * Long name for humans. */ const char *longname; }; /** * NULL-terminated array of supported upload categories. Should match HTML * in the form. */ static const struct Language languages[] = { { "no-lang", "No language specified" }, { "en", "English" }, { "de", "German" }, { "fr", "French" }, { "es", "Spanish" }, { NULL, NULL }, }; /** * Response returned if the requested file does not exist (or is not accessible). */ static struct MHD_Response *file_not_found_response; /** * Response returned for internal errors. */ static struct MHD_Response *internal_error_response; /** * Response returned for '/' (GET) to list the contents of the directory and allow upload. */ static struct MHD_Response *cached_directory_response; /** * Response returned for refused uploads. */ static struct MHD_Response *request_refused_response; /** * Mutex used when we update the cached directory response object. */ static pthread_mutex_t mutex; #ifdef MHD_HAVE_LIBMAGIC /** * Global handle to MAGIC data. */ static magic_t magic; #endif /* MHD_HAVE_LIBMAGIC */ /** * Mark the given response as HTML for the brower. * * @param response response to mark */ static void mark_as_html (struct MHD_Response *response) { (void) MHD_add_response_header (response, MHD_HTTP_HEADER_CONTENT_TYPE, "text/html"); } /** * Replace the existing 'cached_directory_response' with the * given response. * * @param response new directory response */ static void update_cached_response (struct MHD_Response *response) { (void) pthread_mutex_lock (&mutex); if (NULL != cached_directory_response) MHD_destroy_response (cached_directory_response); cached_directory_response = response; (void) pthread_mutex_unlock (&mutex); } /** * Context keeping the data for the response we're building. */ struct ResponseDataContext { /** * Response data string. */ char *buf; /** * Number of bytes allocated for 'buf'. */ size_t buf_len; /** * Current position where we append to 'buf'. Must be smaller or equal to 'buf_len'. */ size_t off; }; /** * Create a listing of the files in 'dirname' in HTML. * * @param rdc where to store the list of files * @param dirname name of the directory to list * @return MHD_YES on success, MHD_NO on error */ static int list_directory (struct ResponseDataContext *rdc, const char *dirname) { char fullname[PATH_MAX]; struct stat sbuf; DIR *dir; struct dirent *de; if (NULL == (dir = opendir (dirname))) return MHD_NO; while (NULL != (de = readdir (dir))) { if ('.' == de->d_name[0]) continue; if (sizeof (fullname) <= (size_t) snprintf (fullname, sizeof (fullname), "%s/%s", dirname, de->d_name)) continue; /* ugh, file too long? how can this be!? */ if (0 != stat (fullname, &sbuf)) continue; /* ugh, failed to 'stat' */ if (! S_ISREG (sbuf.st_mode)) continue; /* not a regular file, skip */ if (rdc->off + 1024 > rdc->buf_len) { void *r; if ( (2 * rdc->buf_len + 1024) < rdc->buf_len) break; /* more than SIZE_T _index_ size? Too big for us */ rdc->buf_len = 2 * rdc->buf_len + 1024; if (NULL == (r = realloc (rdc->buf, rdc->buf_len))) break; /* out of memory */ rdc->buf = r; } rdc->off += snprintf (&rdc->buf[rdc->off], rdc->buf_len - rdc->off, "<li><a href=\"/%s\">%s</a></li>\n", fullname, de->d_name); } (void) closedir (dir); return MHD_YES; } /** * Re-scan our local directory and re-build the index. */ static void update_directory () { static size_t initial_allocation = 32 * 1024; /* initial size for response buffer */ struct MHD_Response *response; struct ResponseDataContext rdc; unsigned int language_idx; unsigned int category_idx; const struct Language *language; const char *category; char dir_name[128]; struct stat sbuf; rdc.buf_len = initial_allocation; if (NULL == (rdc.buf = malloc (rdc.buf_len))) { update_cached_response (NULL); return; } rdc.off = snprintf (rdc.buf, rdc.buf_len, "%s", INDEX_PAGE_HEADER); for (language_idx = 0; NULL != languages[language_idx].dirname; language_idx++) { language = &languages[language_idx]; if (0 != stat (language->dirname, &sbuf)) continue; /* empty */ /* we ensured always +1k room, filenames are ~256 bytes, so there is always still enough space for the header without need for an additional reallocation check. */ rdc.off += snprintf (&rdc.buf[rdc.off], rdc.buf_len - rdc.off, "<h2>%s</h2>\n", language->longname); for (category_idx = 0; NULL != categories[category_idx]; category_idx++) { category = categories[category_idx]; snprintf (dir_name, sizeof (dir_name), "%s/%s", language->dirname, category); if (0 != stat (dir_name, &sbuf)) continue; /* empty */ /* we ensured always +1k room, filenames are ~256 bytes, so there is always still enough space for the header without need for an additional reallocation check. */ rdc.off += snprintf (&rdc.buf[rdc.off], rdc.buf_len - rdc.off, "<h3>%s</h3>\n", category); if (MHD_NO == list_directory (&rdc, dir_name)) { free (rdc.buf); update_cached_response (NULL); return; } } } /* we ensured always +1k room, filenames are ~256 bytes, so there is always still enough space for the footer without need for a final reallocation check. */ rdc.off += snprintf (&rdc.buf[rdc.off], rdc.buf_len - rdc.off, "%s", INDEX_PAGE_FOOTER); initial_allocation = rdc.buf_len; /* remember for next time */ response = MHD_create_response_from_buffer (rdc.off, rdc.buf, MHD_RESPMEM_MUST_FREE); mark_as_html (response); #if FORCE_CLOSE (void) MHD_add_response_header (response, MHD_HTTP_HEADER_CONNECTION, "close"); #endif update_cached_response (response); } /** * Context we keep for an upload. */ struct UploadContext { /** * Handle where we write the uploaded file to. */ int fd; /** * Name of the file on disk (used to remove on errors). */ char *filename; /** * Language for the upload. */ char *language; /** * Category for the upload. */ char *category; /** * Post processor we're using to process the upload. */ struct MHD_PostProcessor *pp; /** * Handle to connection that we're processing the upload for. */ struct MHD_Connection *connection; /** * Response to generate, NULL to use directory. */ struct MHD_Response *response; }; /** * Append the 'size' bytes from 'data' to '*ret', adding * 0-termination. If '*ret' is NULL, allocate an empty string first. * * @param ret string to update, NULL or 0-terminated * @param data data to append * @param size number of bytes in 'data' * @return MHD_NO on allocation failure, MHD_YES on success */ static int do_append (char **ret, const char *data, size_t size) { char *buf; size_t old_len; if (NULL == *ret) old_len = 0; else old_len = strlen (*ret); buf = malloc (old_len + size + 1); if (NULL == buf) return MHD_NO; memcpy (buf, *ret, old_len); if (NULL != *ret) free (*ret); memcpy (&buf[old_len], data, size); buf[old_len + size] = '\0'; *ret = buf; return MHD_YES; } /** * Iterator over key-value pairs where the value * maybe made available in increments and/or may * not be zero-terminated. Used for processing * POST data. * * @param cls user-specified closure * @param kind type of the value, always MHD_POSTDATA_KIND when called from MHD * @param key 0-terminated key for the value * @param filename name of the uploaded file, NULL if not known * @param content_type mime-type of the data, NULL if not known * @param transfer_encoding encoding of the data, NULL if not known * @param data pointer to size bytes of data at the * specified offset * @param off offset of data in the overall value * @param size number of bytes in data available * @return MHD_YES to continue iterating, * MHD_NO to abort the iteration */ static int process_upload_data (void *cls, enum MHD_ValueKind kind, const char *key, const char *filename, const char *content_type, const char *transfer_encoding, const char *data, uint64_t off, size_t size) { struct UploadContext *uc = cls; int i; (void)kind; /* Unused. Silent compiler warning. */ (void)content_type; /* Unused. Silent compiler warning. */ (void)transfer_encoding; /* Unused. Silent compiler warning. */ (void)off; /* Unused. Silent compiler warning. */ if (0 == strcmp (key, "category")) return do_append (&uc->category, data, size); if (0 == strcmp (key, "language")) return do_append (&uc->language, data, size); if (0 != strcmp (key, "upload")) { fprintf (stderr, "Ignoring unexpected form value `%s'\n", key); return MHD_YES; /* ignore */ } if (NULL == filename) { fprintf (stderr, "No filename, aborting upload\n"); return MHD_NO; /* no filename, error */ } if ( (NULL == uc->category) || (NULL == uc->language) ) { fprintf (stderr, "Missing form data for upload `%s'\n", filename); uc->response = request_refused_response; return MHD_NO; } if (-1 == uc->fd) { char fn[PATH_MAX]; if ( (NULL != strstr (filename, "..")) || (NULL != strchr (filename, '/')) || (NULL != strchr (filename, '\\')) ) { uc->response = request_refused_response; return MHD_NO; } /* create directories -- if they don't exist already */ #ifdef WINDOWS (void) mkdir (uc->language); #else (void) mkdir (uc->language, S_IRWXU); #endif snprintf (fn, sizeof (fn), "%s/%s", uc->language, uc->category); #ifdef WINDOWS (void) mkdir (fn); #else (void) mkdir (fn, S_IRWXU); #endif /* open file */ snprintf (fn, sizeof (fn), "%s/%s/%s", uc->language, uc->category, filename); for (i=strlen (fn)-1;i>=0;i--) if (! isprint ((int) fn[i])) fn[i] = '_'; uc->fd = open (fn, O_CREAT | O_EXCL #if O_LARGEFILE | O_LARGEFILE #endif | O_WRONLY, S_IRUSR | S_IWUSR); if (-1 == uc->fd) { fprintf (stderr, "Error opening file `%s' for upload: %s\n", fn, strerror (errno)); uc->response = request_refused_response; return MHD_NO; } uc->filename = strdup (fn); } if ( (0 != size) && (size != (size_t) write (uc->fd, data, size)) ) { /* write failed; likely: disk full */ fprintf (stderr, "Error writing to file `%s': %s\n", uc->filename, strerror (errno)); uc->response = internal_error_response; close (uc->fd); uc->fd = -1; if (NULL != uc->filename) { unlink (uc->filename); free (uc->filename); uc->filename = NULL; } return MHD_NO; } return MHD_YES; } /** * Function called whenever a request was completed. * Used to clean up 'struct UploadContext' objects. * * @param cls client-defined closure, NULL * @param connection connection handle * @param con_cls value as set by the last call to * the MHD_AccessHandlerCallback, points to NULL if this was * not an upload * @param toe reason for request termination */ static void response_completed_callback (void *cls, struct MHD_Connection *connection, void **con_cls, enum MHD_RequestTerminationCode toe) { struct UploadContext *uc = *con_cls; (void)cls; /* Unused. Silent compiler warning. */ (void)connection; /* Unused. Silent compiler warning. */ (void)toe; /* Unused. Silent compiler warning. */ if (NULL == uc) return; /* this request wasn't an upload request */ if (NULL != uc->pp) { MHD_destroy_post_processor (uc->pp); uc->pp = NULL; } if (-1 != uc->fd) { (void) close (uc->fd); if (NULL != uc->filename) { fprintf (stderr, "Upload of file `%s' failed (incomplete or aborted), removing file.\n", uc->filename); (void) unlink (uc->filename); } } if (NULL != uc->filename) free (uc->filename); free (uc); } /** * Return the current directory listing. * * @param connection connection to return the directory for * @return MHD_YES on success, MHD_NO on error */ static int return_directory_response (struct MHD_Connection *connection) { int ret; (void) pthread_mutex_lock (&mutex); if (NULL == cached_directory_response) ret = MHD_queue_response (connection, MHD_HTTP_INTERNAL_SERVER_ERROR, internal_error_response); else ret = MHD_queue_response (connection, MHD_HTTP_OK, cached_directory_response); (void) pthread_mutex_unlock (&mutex); return ret; } /** * Main callback from MHD, used to generate the page. * * @param cls NULL * @param connection connection handle * @param url requested URL * @param method GET, PUT, POST, etc. * @param version HTTP version * @param upload_data data from upload (PUT/POST) * @param upload_data_size number of bytes in "upload_data" * @param ptr our context * @return #MHD_YES on success, #MHD_NO to drop connection */ static int generate_page (void *cls, struct MHD_Connection *connection, const char *url, const char *method, const char *version, const char *upload_data, size_t *upload_data_size, void **ptr) { struct MHD_Response *response; int ret; int fd; struct stat buf; (void)cls; /* Unused. Silent compiler warning. */ (void)version; /* Unused. Silent compiler warning. */ if (0 != strcmp (url, "/")) { /* should be file download */ #ifdef MHD_HAVE_LIBMAGIC char file_data[MAGIC_HEADER_SIZE]; ssize_t got; #endif /* MHD_HAVE_LIBMAGIC */ const char *mime; if (0 != strcmp (method, MHD_HTTP_METHOD_GET)) return MHD_NO; /* unexpected method (we're not polite...) */ fd = -1; if ( (NULL == strstr (&url[1], "..")) && ('/' != url[1]) ) { fd = open (&url[1], O_RDONLY); if ( (-1 != fd) && ( (0 != fstat (fd, &buf)) || (! S_ISREG (buf.st_mode)) ) ) { (void) close (fd); fd = -1; } } if (-1 == fd) return MHD_queue_response (connection, MHD_HTTP_NOT_FOUND, file_not_found_response); #ifdef MHD_HAVE_LIBMAGIC /* read beginning of the file to determine mime type */ got = read (fd, file_data, sizeof (file_data)); (void) lseek (fd, 0, SEEK_SET); if (-1 != got) mime = magic_buffer (magic, file_data, got); else #endif /* MHD_HAVE_LIBMAGIC */ mime = NULL; if (NULL == (response = MHD_create_response_from_fd (buf.st_size, fd))) { /* internal error (i.e. out of memory) */ (void) close (fd); return MHD_NO; } /* add mime type if we had one */ if (NULL != mime) (void) MHD_add_response_header (response, MHD_HTTP_HEADER_CONTENT_TYPE, mime); ret = MHD_queue_response (connection, MHD_HTTP_OK, response); MHD_destroy_response (response); return ret; } if (0 == strcmp (method, MHD_HTTP_METHOD_POST)) { /* upload! */ struct UploadContext *uc = *ptr; if (NULL == uc) { if (NULL == (uc = malloc (sizeof (struct UploadContext)))) return MHD_NO; /* out of memory, close connection */ memset (uc, 0, sizeof (struct UploadContext)); uc->fd = -1; uc->connection = connection; uc->pp = MHD_create_post_processor (connection, 64 * 1024 /* buffer size */, &process_upload_data, uc); if (NULL == uc->pp) { /* out of memory, close connection */ free (uc); return MHD_NO; } *ptr = uc; return MHD_YES; } if (0 != *upload_data_size) { if (NULL == uc->response) (void) MHD_post_process (uc->pp, upload_data, *upload_data_size); *upload_data_size = 0; return MHD_YES; } /* end of upload, finish it! */ MHD_destroy_post_processor (uc->pp); uc->pp = NULL; if (-1 != uc->fd) { close (uc->fd); uc->fd = -1; } if (NULL != uc->response) { return MHD_queue_response (connection, MHD_HTTP_FORBIDDEN, uc->response); } else { update_directory (); return return_directory_response (connection); } } if (0 == strcmp (method, MHD_HTTP_METHOD_GET)) { return return_directory_response (connection); } /* unexpected request, refuse */ return MHD_queue_response (connection, MHD_HTTP_FORBIDDEN, request_refused_response); } #ifndef MINGW /** * Function called if we get a SIGPIPE. Does nothing. * * @param sig will be SIGPIPE (ignored) */ static void catcher (int sig) { (void)sig; /* Unused. Silent compiler warning. */ /* do nothing */ } /** * setup handlers to ignore SIGPIPE. */ static void ignore_sigpipe (void) { struct sigaction oldsig; struct sigaction sig; sig.sa_handler = &catcher; sigemptyset (&sig.sa_mask); #ifdef SA_INTERRUPT sig.sa_flags = SA_INTERRUPT; /* SunOS */ #else sig.sa_flags = SA_RESTART; #endif if (0 != sigaction (SIGPIPE, &sig, &oldsig)) fprintf (stderr, "Failed to install SIGPIPE handler: %s\n", strerror (errno)); } #endif /* test server key */ const char srv_signed_key_pem[] = "-----BEGIN RSA PRIVATE KEY-----\n" "<KEY>" <KEY>" "<KEY>" "<KEY>" "<KEY>" "<KEY>" <KEY>" "<KEY>" "<KEY>" "bpeNWl2l/HSN3VtUN6aCAKbN/X3o0GavCCMn5Fa85uJFsab4ss/uP+2PusU71+zP\n" "sBm6p/2IbGvF5k3VPDA7X5YX61sukRjRBihY8xSnNYx1UcoOsX6AiPnbhifD8+xQ\n" "Tlf8oJUCgYEA0BTfzqNpr9Wxw5/QXaSdw7S/0eP5a0C/nwURvmfSzuTD4equzbEN\n" "d+dI/s2JMxrdj/I4uoAfUXRGaabevQIjFzC9uyE3LaOyR2zhuvAzX+vVcs6bSXeU\n" "<KEY>" "<KEY>" "<KEY>" "<KEY>" "2lMhEM8az/K58kJ4WXSwOLtr6MD/WjNT2tkcy0puEJLm6BFCd6A6pLn9jaKou/92\n" "SfltZjJPb3GUlp9zn5tAAeSSi7YMViBrfuFiHObij5LorefBXISLjuYbMwL03MgH\n" "Ocl2JtA2ywMp2KFXs8GQWQKBgFyIVv5ogQrbZ0pvj31xr9HjqK6d01VxIi+tOmpB\n" "4ocnOLEcaxX12BzprW55ytfOCVpF1jHD/imAhb3YrHXu0fwe6DXYXfZV4SSG2vB7\n" "IB9z14KBN5qLHjNGFpMQXHSMek+b/ftTU0ZnPh9uEM5D3YqRLVd7GcdUhHvG8P8Q\n" "C9aXAoGBAJtID6h8wOGMP0XYX5YYnhlC7dOLfk8UYrzlp3xhqVkzKthTQTj6wx9R\n" "GtC4k7U1ki8oJsfcIlBNXd768fqDVWjYju5rzShMpo8OCTS6ipAblKjCxPPVhIpv\n" "tWPlbSn1qj6wylstJ5/3Z+ZW5H4wIKp5jmLiioDhcP0L/Ex3Zx8O\n" "-----END RSA PRIVATE KEY-----\n"; /* test server CA signed certificates */ const char srv_signed_cert_pem[] = "-----BEGIN CERTIFICATE-----\n" "MIIDGzCCAgWgAwIBAgIES0KCvTALBgkqhkiG9w0BAQUwFzEVMBMGA1UEAxMMdGVz\n" "dF9jYV9jZXJ0MB4XDTEwMDEwNTAwMDcyNVoXDTQ1MDMxMjAwMDcyNVowFzEVMBMG\n" "A1UEAxMMdGVzdF9jYV9jZXJ0MIIBHzALBgkqhkiG9w0BAQEDggEOADCCAQkCggEA\n" "vfTdv+3fgvVTKRnP/HVNG81cr8TrUP/iiyuve/THMzvFXhCW+K03KwEku55QvnUn\n" "dwBfU/ROzLlv+5hotgiDRNFT3HxurmhouySBrJNJv7qWp8ILq4sw32vo0fbMu5BZ\n" "F49bUXK9L3kW2PdhTtSQPWHEzNrCxO+YgCilKHkY3vQNfdJ020Q5EAAEseD1YtWC\n" "IpRvJzYlZMpjYB1ubTl24kwrgOKUJYKqM4jmF4DVQp4oOK/6QYGGh1QmHRPAy3CB\n" "II6sbb+sZT9cAqU6GYQVB35lm4XAgibXV6KgmpVxVQQ69U6xyoOl204xuekZOaG9\n" "RUPId74Rtmwfi1TLbBzo2wIDAQABo3YwdDAMBgNVHRMBAf8EAjAAMBMGA1UdJQQM\n" "<KEY>" "<KEY>" "CSqGSIb3DQEBBQOCAQEAHVWPxazupbOkG7Did+dY9z2z6RjTzYvurTtEKQgzM2Vz\n" "GQBA+3pZ3c5mS97fPIs9hZXfnQeelMeZ2XP1a+9vp35bJjZBBhVH+pqxjCgiUflg\n" "A3Zqy0XwwVCgQLE2HyaU3DLUD/aeIFK5gJaOSdNTXZLv43K8kl4cqDbMeRpVTbkt\n" "YmG4AyEOYRNKGTqMEJXJoxD5E3rBUNrVI/XyTjYrulxbNPcMWEHKNeeqWpKDYTFo\n" "Bb01PCthGXiq/4A2RLAFosadzRa8SBpoSjPPfZ0b2w4MJpReHqKbR5+T2t6hzml6\n" "4ToyOKPDmamiTuN5KzLN3cw7DQlvWMvqSOChPLnA3Q==\n" "-----END CERTIFICATE-----\n"; /** * Entry point to demo. Note: this HTTP server will make all * files in the current directory and its subdirectories available * to anyone. Press ENTER to stop the server once it has started. * * @param argc number of arguments in argv * @param argv first and only argument should be the port number * @return 0 on success */ int main (int argc, char *const *argv) { struct MHD_Daemon *d; unsigned int port; if ( (argc != 2) || (1 != sscanf (argv[1], "%u", &port)) || (UINT16_MAX < port) ) { fprintf (stderr, "%s PORT\n", argv[0]); return 1; } #ifndef MINGW ignore_sigpipe (); #endif #ifdef MHD_HAVE_LIBMAGIC magic = magic_open (MAGIC_MIME_TYPE); (void) magic_load (magic, NULL); #endif /* MHD_HAVE_LIBMAGIC */ (void) pthread_mutex_init (&mutex, NULL); file_not_found_response = MHD_create_response_from_buffer (strlen (FILE_NOT_FOUND_PAGE), (void *) FILE_NOT_FOUND_PAGE, MHD_RESPMEM_PERSISTENT); mark_as_html (file_not_found_response); request_refused_response = MHD_create_response_from_buffer (strlen (REQUEST_REFUSED_PAGE), (void *) REQUEST_REFUSED_PAGE, MHD_RESPMEM_PERSISTENT); mark_as_html (request_refused_response); internal_error_response = MHD_create_response_from_buffer (strlen (INTERNAL_ERROR_PAGE), (void *) INTERNAL_ERROR_PAGE, MHD_RESPMEM_PERSISTENT); mark_as_html (internal_error_response); update_directory (); d = MHD_start_daemon (MHD_USE_AUTO | MHD_USE_INTERNAL_POLLING_THREAD | MHD_USE_ERROR_LOG | MHD_USE_TLS, port, NULL, NULL, &generate_page, NULL, MHD_OPTION_CONNECTION_MEMORY_LIMIT, (size_t) (256 * 1024), #if PRODUCTION MHD_OPTION_PER_IP_CONNECTION_LIMIT, (unsigned int) (64), #endif MHD_OPTION_CONNECTION_TIMEOUT, (unsigned int) (120 /* seconds */), MHD_OPTION_THREAD_POOL_SIZE, (unsigned int) NUMBER_OF_THREADS, MHD_OPTION_NOTIFY_COMPLETED, &response_completed_callback, NULL, MHD_OPTION_HTTPS_MEM_KEY, srv_signed_key_pem, MHD_OPTION_HTTPS_MEM_CERT, srv_signed_cert_pem, MHD_OPTION_END); if (NULL == d) return 1; fprintf (stderr, "HTTP server running. Press ENTER to stop the server\n"); (void) getc (stdin); MHD_stop_daemon (d); MHD_destroy_response (file_not_found_response); MHD_destroy_response (request_refused_response); MHD_destroy_response (internal_error_response); update_cached_response (NULL); (void) pthread_mutex_destroy (&mutex); #ifdef MHD_HAVE_LIBMAGIC magic_close (magic); #endif /* MHD_HAVE_LIBMAGIC */ return 0; } /* end of demo_https.c */
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/sfc/sfc_rx.c
/*- * Copyright (c) 2016 Solarflare Communications Inc. * All rights reserved. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <rte_mempool.h> #include "efx.h" #include "sfc.h" #include "sfc_debug.h" #include "sfc_log.h" #include "sfc_ev.h" #include "sfc_rx.h" #include "sfc_tweak.h" /* * Maximum number of Rx queue flush attempt in the case of failure or * flush timeout */ #define SFC_RX_QFLUSH_ATTEMPTS (3) /* * Time to wait between event queue polling attempts when waiting for Rx * queue flush done or failed events. */ #define SFC_RX_QFLUSH_POLL_WAIT_MS (1) /* * Maximum number of event queue polling attempts when waiting for Rx queue * flush done or failed events. It defines Rx queue flush attempt timeout * together with SFC_RX_QFLUSH_POLL_WAIT_MS. */ #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000) void sfc_rx_qflush_done(struct sfc_rxq *rxq) { rxq->state |= SFC_RXQ_FLUSHED; rxq->state &= ~SFC_RXQ_FLUSHING; } void sfc_rx_qflush_failed(struct sfc_rxq *rxq) { rxq->state |= SFC_RXQ_FLUSH_FAILED; rxq->state &= ~SFC_RXQ_FLUSHING; } static void sfc_rx_qrefill(struct sfc_rxq *rxq) { unsigned int free_space; unsigned int bulks; void *objs[SFC_RX_REFILL_BULK]; efsys_dma_addr_t addr[RTE_DIM(objs)]; unsigned int added = rxq->added; unsigned int id; unsigned int i; struct sfc_rx_sw_desc *rxd; struct rte_mbuf *m; uint8_t port_id = rxq->port_id; free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) - (added - rxq->completed); if (free_space < rxq->refill_threshold) return; bulks = free_space / RTE_DIM(objs); id = added & rxq->ptr_mask; while (bulks-- > 0) { if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs, RTE_DIM(objs)) < 0) { /* * It is hardly a safe way to increment counter * from different contexts, but all PMDs do it. */ rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed += RTE_DIM(objs); break; } for (i = 0; i < RTE_DIM(objs); ++i, id = (id + 1) & rxq->ptr_mask) { m = objs[i]; rxd = &rxq->sw_desc[id]; rxd->mbuf = m; rte_mbuf_refcnt_set(m, 1); m->data_off = RTE_PKTMBUF_HEADROOM; m->next = NULL; m->nb_segs = 1; m->port = port_id; addr[i] = rte_pktmbuf_mtophys(m); } efx_rx_qpost(rxq->common, addr, rxq->buf_size, RTE_DIM(objs), rxq->completed, added); added += RTE_DIM(objs); } /* Push doorbell if something is posted */ if (rxq->added != added) { rxq->added = added; efx_rx_qpush(rxq->common, added, &rxq->pushed); } } static uint64_t sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags) { uint64_t mbuf_flags = 0; switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) { case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4): mbuf_flags |= PKT_RX_IP_CKSUM_GOOD; break; case EFX_PKT_IPV4: mbuf_flags |= PKT_RX_IP_CKSUM_BAD; break; default: RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0); SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_UNKNOWN); break; } switch ((desc_flags & (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) { case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP): case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP): mbuf_flags |= PKT_RX_L4_CKSUM_GOOD; break; case EFX_PKT_TCP: case EFX_PKT_UDP: mbuf_flags |= PKT_RX_L4_CKSUM_BAD; break; default: RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0); SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_UNKNOWN); break; } return mbuf_flags; } static uint32_t sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags) { return RTE_PTYPE_L2_ETHER | ((desc_flags & EFX_PKT_IPV4) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) | ((desc_flags & EFX_PKT_IPV6) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) | ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) | ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0); } static void sfc_rx_set_rss_hash(struct sfc_rxq *rxq, unsigned int flags, struct rte_mbuf *m) { #if EFSYS_OPT_RX_SCALE uint8_t *mbuf_data; if ((rxq->flags & SFC_RXQ_RSS_HASH) == 0) return; mbuf_data = rte_pktmbuf_mtod(m, uint8_t *); if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common, EFX_RX_HASHALG_TOEPLITZ, mbuf_data); m->ol_flags |= PKT_RX_RSS_HASH; } #endif } uint16_t sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct sfc_rxq *rxq = rx_queue; unsigned int completed; unsigned int prefix_size = rxq->prefix_size; unsigned int done_pkts = 0; boolean_t discard_next = B_FALSE; struct rte_mbuf *scatter_pkt = NULL; if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0)) return 0; sfc_ev_qpoll(rxq->evq); completed = rxq->completed; while (completed != rxq->pending && done_pkts < nb_pkts) { unsigned int id; struct sfc_rx_sw_desc *rxd; struct rte_mbuf *m; unsigned int seg_len; unsigned int desc_flags; id = completed++ & rxq->ptr_mask; rxd = &rxq->sw_desc[id]; m = rxd->mbuf; desc_flags = rxd->flags; if (discard_next) goto discard; if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) goto discard; if (desc_flags & EFX_PKT_PREFIX_LEN) { uint16_t tmp_size; int rc __rte_unused; rc = efx_pseudo_hdr_pkt_length_get(rxq->common, rte_pktmbuf_mtod(m, uint8_t *), &tmp_size); SFC_ASSERT(rc == 0); seg_len = tmp_size; } else { seg_len = rxd->size - prefix_size; } rte_pktmbuf_data_len(m) = seg_len; rte_pktmbuf_pkt_len(m) = seg_len; if (scatter_pkt != NULL) { if (rte_pktmbuf_chain(scatter_pkt, m) != 0) { rte_pktmbuf_free(scatter_pkt); goto discard; } /* The packet to deliver */ m = scatter_pkt; } if (desc_flags & EFX_PKT_CONT) { /* The packet is scattered, more fragments to come */ scatter_pkt = m; /* Futher fragments have no prefix */ prefix_size = 0; continue; } /* Scattered packet is done */ scatter_pkt = NULL; /* The first fragment of the packet has prefix */ prefix_size = rxq->prefix_size; m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags); m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags); /* * Extract RSS hash from the packet prefix and * set the corresponding field (if needed and possible) */ sfc_rx_set_rss_hash(rxq, desc_flags, m); m->data_off += prefix_size; *rx_pkts++ = m; done_pkts++; continue; discard: discard_next = ((desc_flags & EFX_PKT_CONT) != 0); rte_mempool_put(rxq->refill_mb_pool, m); rxd->mbuf = NULL; } /* pending is only moved when entire packet is received */ SFC_ASSERT(scatter_pkt == NULL); rxq->completed = completed; sfc_rx_qrefill(rxq); return done_pkts; } unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq *rxq; SFC_ASSERT(sw_index < sa->rxq_count); rxq = sa->rxq_info[sw_index].rxq; if (rxq == NULL || (rxq->state & SFC_RXQ_RUNNING) == 0) return 0; sfc_ev_qpoll(rxq->evq); return rxq->pending - rxq->completed; } int sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset) { if ((rxq->state & SFC_RXQ_RUNNING) == 0) return 0; sfc_ev_qpoll(rxq->evq); return offset < (rxq->pending - rxq->completed); } static void sfc_rx_qpurge(struct sfc_rxq *rxq) { unsigned int i; struct sfc_rx_sw_desc *rxd; for (i = rxq->completed; i != rxq->added; ++i) { rxd = &rxq->sw_desc[i & rxq->ptr_mask]; rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf); rxd->mbuf = NULL; } } static void sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq *rxq; unsigned int retry_count; unsigned int wait_count; rxq = sa->rxq_info[sw_index].rxq; SFC_ASSERT(rxq->state & SFC_RXQ_STARTED); /* * Retry Rx queue flushing in the case of flush failed or * timeout. In the worst case it can delay for 6 seconds. */ for (retry_count = 0; ((rxq->state & SFC_RXQ_FLUSHED) == 0) && (retry_count < SFC_RX_QFLUSH_ATTEMPTS); ++retry_count) { if (efx_rx_qflush(rxq->common) != 0) { rxq->state |= SFC_RXQ_FLUSH_FAILED; break; } rxq->state &= ~SFC_RXQ_FLUSH_FAILED; rxq->state |= SFC_RXQ_FLUSHING; /* * Wait for Rx queue flush done or failed event at least * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied * by SFC_RX_QFLUSH_POLL_ATTEMPTS). */ wait_count = 0; do { rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS); sfc_ev_qpoll(rxq->evq); } while ((rxq->state & SFC_RXQ_FLUSHING) && (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); if (rxq->state & SFC_RXQ_FLUSHING) sfc_err(sa, "RxQ %u flush timed out", sw_index); if (rxq->state & SFC_RXQ_FLUSH_FAILED) sfc_err(sa, "RxQ %u flush failed", sw_index); if (rxq->state & SFC_RXQ_FLUSHED) sfc_info(sa, "RxQ %u flushed", sw_index); } sfc_rx_qpurge(rxq); } int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; struct sfc_evq *evq; int rc; sfc_log_init(sa, "sw_index=%u", sw_index); SFC_ASSERT(sw_index < sa->rxq_count); rxq_info = &sa->rxq_info[sw_index]; rxq = rxq_info->rxq; SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED); evq = rxq->evq; rc = sfc_ev_qstart(sa, evq->evq_index); if (rc != 0) goto fail_ev_qstart; rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */, evq->common, &rxq->common); if (rc != 0) goto fail_rx_qcreate; efx_rx_qenable(rxq->common); rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0; rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING); sfc_rx_qrefill(rxq); if (sw_index == 0) { rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, (sa->rss_channels > 1) ? B_TRUE : B_FALSE); if (rc != 0) goto fail_mac_filter_default_rxq_set; } /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ sa->eth_dev->data->rx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; return 0; fail_mac_filter_default_rxq_set: sfc_rx_qflush(sa, sw_index); fail_rx_qcreate: sfc_ev_qstop(sa, evq->evq_index); fail_ev_qstart: return rc; } void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; sfc_log_init(sa, "sw_index=%u", sw_index); SFC_ASSERT(sw_index < sa->rxq_count); rxq_info = &sa->rxq_info[sw_index]; rxq = rxq_info->rxq; if (rxq->state == SFC_RXQ_INITIALIZED) return; SFC_ASSERT(rxq->state & SFC_RXQ_STARTED); /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ sa->eth_dev->data->rx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; rxq->state &= ~SFC_RXQ_RUNNING; if (sw_index == 0) efx_mac_filter_default_rxq_clear(sa->nic); sfc_rx_qflush(sa, sw_index); rxq->state = SFC_RXQ_INITIALIZED; efx_rx_qdestroy(rxq->common); sfc_ev_qstop(sa, rxq->evq->evq_index); } static int sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc, const struct rte_eth_rxconf *rx_conf) { const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc); int rc = 0; if (rx_conf->rx_thresh.pthresh != 0 || rx_conf->rx_thresh.hthresh != 0 || rx_conf->rx_thresh.wthresh != 0) { sfc_err(sa, "RxQ prefetch/host/writeback thresholds are not supported"); rc = EINVAL; } if (rx_conf->rx_free_thresh > rx_free_thresh_max) { sfc_err(sa, "RxQ free threshold too large: %u vs maximum %u", rx_conf->rx_free_thresh, rx_free_thresh_max); rc = EINVAL; } if (rx_conf->rx_drop_en == 0) { sfc_err(sa, "RxQ drop disable is not supported"); rc = EINVAL; } return rc; } static unsigned int sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) { uint32_t data_off; uint32_t order; /* The mbuf object itself is always cache line aligned */ order = rte_bsf32(RTE_CACHE_LINE_SIZE); /* Data offset from mbuf object start */ data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) + RTE_PKTMBUF_HEADROOM; order = MIN(order, rte_bsf32(data_off)); return 1u << (order - 1); } static uint16_t sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start); const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end); uint16_t buf_size; unsigned int buf_aligned; unsigned int start_alignment; unsigned int end_padding_alignment; /* Below it is assumed that both alignments are power of 2 */ SFC_ASSERT(rte_is_power_of_2(nic_align_start)); SFC_ASSERT(rte_is_power_of_2(nic_align_end)); /* * mbuf is always cache line aligned, double-check * that it meets rx buffer start alignment requirements. */ /* Start from mbuf pool data room size */ buf_size = rte_pktmbuf_data_room_size(mb_pool); /* Remove headroom */ if (buf_size <= RTE_PKTMBUF_HEADROOM) { sfc_err(sa, "RxQ mbuf pool %s object data room size %u is smaller than headroom %u", mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM); return 0; } buf_size -= RTE_PKTMBUF_HEADROOM; /* Calculate guaranteed data start alignment */ buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool); /* Reserve space for start alignment */ if (buf_aligned < nic_align_start) { start_alignment = nic_align_start - buf_aligned; if (buf_size <= start_alignment) { sfc_err(sa, "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC", mb_pool->name, rte_pktmbuf_data_room_size(mb_pool), RTE_PKTMBUF_HEADROOM, start_alignment); return 0; } buf_aligned = nic_align_start; buf_size -= start_alignment; } else { start_alignment = 0; } /* Make sure that end padding does not write beyond the buffer */ if (buf_aligned < nic_align_end) { /* * Estimate space which can be lost. If guarnteed buffer * size is odd, lost space is (nic_align_end - 1). More * accurate formula is below. */ end_padding_alignment = nic_align_end - MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1)); if (buf_size <= end_padding_alignment) { sfc_err(sa, "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC", mb_pool->name, rte_pktmbuf_data_room_size(mb_pool), RTE_PKTMBUF_HEADROOM, start_alignment, end_padding_alignment); return 0; } buf_size -= end_padding_alignment; } else { /* * Start is aligned the same or better than end, * just align length. */ buf_size = P2ALIGN(buf_size, nic_align_end); } return buf_size; } int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); int rc; uint16_t buf_size; struct sfc_rxq_info *rxq_info; unsigned int evq_index; struct sfc_evq *evq; struct sfc_rxq *rxq; rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf); if (rc != 0) goto fail_bad_conf; buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); if (buf_size == 0) { sfc_err(sa, "RxQ %u mbuf pool object size is too small", sw_index); rc = EINVAL; goto fail_bad_conf; } if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) && !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) { sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool " "object size is too small", sw_index); sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " "PDU size %u plus Rx prefix %u bytes", sw_index, buf_size, (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size); rc = EINVAL; goto fail_bad_conf; } SFC_ASSERT(sw_index < sa->rxq_count); rxq_info = &sa->rxq_info[sw_index]; SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries); rxq_info->entries = nb_rx_desc; rxq_info->type = sa->eth_dev->data->dev_conf.rxmode.enable_scatter ? EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT; evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id); if (rc != 0) goto fail_ev_qinit; evq = sa->evq_info[evq_index].evq; rc = ENOMEM; rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE, socket_id); if (rxq == NULL) goto fail_rxq_alloc; rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries), socket_id, &rxq->mem); if (rc != 0) goto fail_dma_alloc; rc = ENOMEM; rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries, sizeof(*rxq->sw_desc), RTE_CACHE_LINE_SIZE, socket_id); if (rxq->sw_desc == NULL) goto fail_desc_alloc; evq->rxq = rxq; rxq->evq = evq; rxq->ptr_mask = rxq_info->entries - 1; rxq->refill_threshold = rx_conf->rx_free_thresh; rxq->refill_mb_pool = mb_pool; rxq->buf_size = buf_size; rxq->hw_index = sw_index; rxq->port_id = sa->eth_dev->data->port_id; /* Cache limits required on datapath in RxQ structure */ rxq->batch_max = encp->enc_rx_batch_max; rxq->prefix_size = encp->enc_rx_prefix_size; #if EFSYS_OPT_RX_SCALE if (sa->hash_support == EFX_RX_HASH_AVAILABLE) rxq->flags |= SFC_RXQ_RSS_HASH; #endif rxq->state = SFC_RXQ_INITIALIZED; rxq_info->rxq = rxq; rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0); return 0; fail_desc_alloc: sfc_dma_free(sa, &rxq->mem); fail_dma_alloc: rte_free(rxq); fail_rxq_alloc: sfc_ev_qfini(sa, evq_index); fail_ev_qinit: rxq_info->entries = 0; fail_bad_conf: sfc_log_init(sa, "failed %d", rc); return rc; } void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; SFC_ASSERT(sw_index < sa->rxq_count); rxq_info = &sa->rxq_info[sw_index]; rxq = rxq_info->rxq; SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED); rxq_info->rxq = NULL; rxq_info->entries = 0; rte_free(rxq->sw_desc); sfc_dma_free(sa, &rxq->mem); rte_free(rxq); sfc_ev_qfini(sa, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); } #if EFSYS_OPT_RX_SCALE efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf) { efx_rx_hash_type_t efx_hash_types = 0; if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER)) != 0) efx_hash_types |= EFX_RX_HASH_IPV4; if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0) efx_hash_types |= EFX_RX_HASH_TCPIPV4; if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0) efx_hash_types |= EFX_RX_HASH_IPV6; if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0) efx_hash_types |= EFX_RX_HASH_TCPIPV6; return efx_hash_types; } uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types) { uint64_t rss_hf = 0; if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0) rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER); if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0) rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0) rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX); if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0) rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX); return rss_hf; } #endif static int sfc_rx_rss_config(struct sfc_adapter *sa) { int rc = 0; #if EFSYS_OPT_RX_SCALE if (sa->rss_channels > 1) { rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, sa->rss_hash_types, B_TRUE); if (rc != 0) goto finish; rc = efx_rx_scale_key_set(sa->nic, sa->rss_key, sizeof(sa->rss_key)); if (rc != 0) goto finish; rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl, sizeof(sa->rss_tbl)); } finish: #endif return rc; } int sfc_rx_start(struct sfc_adapter *sa) { unsigned int sw_index; int rc; sfc_log_init(sa, "rxq_count=%u", sa->rxq_count); rc = efx_rx_init(sa->nic); if (rc != 0) goto fail_rx_init; rc = sfc_rx_rss_config(sa); if (rc != 0) goto fail_rss_config; for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) { if ((!sa->rxq_info[sw_index].deferred_start || sa->rxq_info[sw_index].deferred_started)) { rc = sfc_rx_qstart(sa, sw_index); if (rc != 0) goto fail_rx_qstart; } } return 0; fail_rx_qstart: while (sw_index-- > 0) sfc_rx_qstop(sa, sw_index); fail_rss_config: efx_rx_fini(sa->nic); fail_rx_init: sfc_log_init(sa, "failed %d", rc); return rc; } void sfc_rx_stop(struct sfc_adapter *sa) { unsigned int sw_index; sfc_log_init(sa, "rxq_count=%u", sa->rxq_count); sw_index = sa->rxq_count; while (sw_index-- > 0) { if (sa->rxq_info[sw_index].rxq != NULL) sfc_rx_qstop(sa, sw_index); } efx_rx_fini(sa->nic); } static int sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index]; unsigned int max_entries; max_entries = EFX_RXQ_MAXNDESCS; SFC_ASSERT(rte_is_power_of_2(max_entries)); rxq_info->max_entries = max_entries; return 0; } static int sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) { int rc = 0; switch (rxmode->mq_mode) { case ETH_MQ_RX_NONE: /* No special checks are required */ break; #if EFSYS_OPT_RX_SCALE case ETH_MQ_RX_RSS: if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) { sfc_err(sa, "RSS is not available"); rc = EINVAL; } break; #endif default: sfc_err(sa, "Rx multi-queue mode %u not supported", rxmode->mq_mode); rc = EINVAL; } if (rxmode->header_split) { sfc_err(sa, "Header split on Rx not supported"); rc = EINVAL; } if (rxmode->hw_vlan_filter) { sfc_err(sa, "HW VLAN filtering not supported"); rc = EINVAL; } if (rxmode->hw_vlan_strip) { sfc_err(sa, "HW VLAN stripping not supported"); rc = EINVAL; } if (rxmode->hw_vlan_extend) { sfc_err(sa, "Q-in-Q HW VLAN stripping not supported"); rc = EINVAL; } if (!rxmode->hw_strip_crc) { sfc_warn(sa, "FCS stripping control not supported - always stripped"); rxmode->hw_strip_crc = 1; } if (rxmode->enable_lro) { sfc_err(sa, "LRO not supported"); rc = EINVAL; } return rc; } /** * Initialize Rx subsystem. * * Called at device configuration stage when number of receive queues is * specified together with other device level receive configuration. * * It should be used to allocate NUMA-unaware resources. */ int sfc_rx_init(struct sfc_adapter *sa) { struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; unsigned int sw_index; int rc; rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); if (rc != 0) goto fail_check_mode; sa->rxq_count = sa->eth_dev->data->nb_rx_queues; rc = ENOMEM; sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count, sizeof(struct sfc_rxq_info), 0, sa->socket_id); if (sa->rxq_info == NULL) goto fail_rxqs_alloc; for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) { rc = sfc_rx_qinit_info(sa, sw_index); if (rc != 0) goto fail_rx_qinit_info; } #if EFSYS_OPT_RX_SCALE sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? MIN(sa->rxq_count, EFX_MAXRSS) : 1; if (sa->rss_channels > 1) { for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) sa->rss_tbl[sw_index] = sw_index % sa->rss_channels; } #endif return 0; fail_rx_qinit_info: rte_free(sa->rxq_info); sa->rxq_info = NULL; fail_rxqs_alloc: sa->rxq_count = 0; fail_check_mode: sfc_log_init(sa, "failed %d", rc); return rc; } /** * Shutdown Rx subsystem. * * Called at device close stage, for example, before device * reconfiguration or shutdown. */ void sfc_rx_fini(struct sfc_adapter *sa) { unsigned int sw_index; sw_index = sa->rxq_count; while (sw_index-- > 0) { if (sa->rxq_info[sw_index].rxq != NULL) sfc_rx_qfini(sa, sw_index); } sa->rss_channels = 0; rte_free(sa->rxq_info); sa->rxq_info = NULL; sa->rxq_count = 0; }
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/thunderx/base/nicvf_bsvf.h
<filename>kdns/dpdk-17.02/drivers/net/thunderx/base/nicvf_bsvf.h<gh_stars>100-1000 /* * BSD LICENSE * * Copyright (C) Cavium networks Ltd. 2016. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium networks nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __THUNDERX_NICVF_BSVF_H__ #define __THUNDERX_NICVF_BSVF_H__ #include <sys/queue.h> struct nicvf; /** * The base queue structure to hold secondary qsets. */ struct svf_entry { STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */ struct nicvf *vf; /**< Holder of a secondary qset */ }; /** * Enqueue new entry to secondary qsets. * * @param entry * Entry to be enqueued. */ void nicvf_bsvf_push(struct svf_entry *entry); /** * Dequeue an entry from secondary qsets. * * @return * Dequeued entry. */ struct svf_entry * nicvf_bsvf_pop(void); /** * Check if the queue of secondary qsets is empty. * * @return * 0 on non-empty * otherwise empty */ int nicvf_bsvf_empty(void); #endif /* __THUNDERX_NICVF_BSVF_H__ */
vicharl/containerdns
kdns/dpdk-17.02/examples/ipsec-secgw/esp.c
/*- * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdint.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <netinet/in.h> #include <netinet/ip.h> #include <netinet/ip6.h> #include <fcntl.h> #include <unistd.h> #include <rte_common.h> #include <rte_crypto.h> #include <rte_cryptodev.h> #include <rte_random.h> #include "ipsec.h" #include "esp.h" #include "ipip.h" int esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) { struct ip *ip4; struct rte_crypto_sym_op *sym_cop; int32_t payload_len, ip_hdr_len; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); ip4 = rte_pktmbuf_mtod(m, struct ip *); if (likely(ip4->ip_v == IPVERSION)) ip_hdr_len = ip4->ip_hl * 4; else if (ip4->ip_v == IP6_VERSION) /* XXX No option headers supported */ ip_hdr_len = sizeof(struct ip6_hdr); else { RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", ip4->ip_v); return -EINVAL; } payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", payload_len, sa->block_size); return -EINVAL; } sym_cop = get_sym_cop(cop); sym_cop->m_src = m; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = payload_len; struct cnt_blk *icb; uint8_t *aad; uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_AES_CBC: sym_cop->cipher.iv.data = iv; sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, ip_hdr_len + sizeof(struct esp_hdr)); sym_cop->cipher.iv.length = sa->iv_len; break; case RTE_CRYPTO_CIPHER_AES_CTR: case RTE_CRYPTO_CIPHER_AES_GCM: icb = get_cnt_blk(m); icb->salt = sa->salt; memcpy(&icb->iv, iv, 8); icb->cnt = rte_cpu_to_be_32(1); sym_cop->cipher.iv.data = (uint8_t *)icb; sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->cipher.iv.length = 16; break; default: RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", sa->cipher_algo); return -EINVAL; } switch (sa->auth_algo) { case RTE_CRYPTO_AUTH_NULL: case RTE_CRYPTO_AUTH_SHA1_HMAC: case RTE_CRYPTO_AUTH_SHA256_HMAC: sym_cop->auth.data.offset = ip_hdr_len; sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len + payload_len; break; case RTE_CRYPTO_AUTH_AES_GCM: aad = get_aad(m); memcpy(aad, iv - sizeof(struct esp_hdr), 8); sym_cop->auth.aad.data = aad; sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->auth.aad.length = 8; break; default: RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", sa->auth_algo); return -EINVAL; } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); sym_cop->auth.digest.length = sa->digest_len; return 0; } int esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) { struct ip *ip4, *ip; struct ip6_hdr *ip6; uint8_t *nexthdr, *pad_len; uint8_t *padding; uint16_t i; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); return -1; } nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); pad_len = nexthdr - 1; padding = pad_len - *pad_len; for (i = 0; i < *pad_len; i++) { if (padding[i] != i + 1) { RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); return -EINVAL; } } if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { RTE_LOG(ERR, IPSEC_ESP, "failed to remove pad_len + digest\n"); return -EINVAL; } if (unlikely(sa->flags == TRANSPORT)) { ip = rte_pktmbuf_mtod(m, struct ip *); ip4 = (struct ip *)rte_pktmbuf_adj(m, sizeof(struct esp_hdr) + sa->iv_len); if (likely(ip->ip_v == IPVERSION)) { memmove(ip4, ip, ip->ip_hl * 4); ip4->ip_p = *nexthdr; ip4->ip_len = htons(rte_pktmbuf_data_len(m)); } else { ip6 = (struct ip6_hdr *)ip4; /* XXX No option headers supported */ memmove(ip6, ip, sizeof(struct ip6_hdr)); ip6->ip6_nxt = *nexthdr; ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); } } else ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); return 0; } int esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) { struct ip *ip4; struct ip6_hdr *ip6; struct esp_hdr *esp = NULL; uint8_t *padding, *new_ip, nlp; struct rte_crypto_sym_op *sym_cop; int32_t i; uint16_t pad_payload_len, pad_len, ip_hdr_len; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); ip_hdr_len = 0; ip4 = rte_pktmbuf_mtod(m, struct ip *); if (likely(ip4->ip_v == IPVERSION)) { if (unlikely(sa->flags == TRANSPORT)) { ip_hdr_len = ip4->ip_hl * 4; nlp = ip4->ip_p; } else nlp = IPPROTO_IPIP; } else if (ip4->ip_v == IP6_VERSION) { if (unlikely(sa->flags == TRANSPORT)) { /* XXX No option headers supported */ ip_hdr_len = sizeof(struct ip6_hdr); ip6 = (struct ip6_hdr *)ip4; nlp = ip6->ip6_nxt; } else nlp = IPPROTO_IPV6; } else { RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", ip4->ip_v); return -EINVAL; } /* Padded payload length */ pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - ip_hdr_len + 2, sa->block_size); pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || sa->flags == TRANSPORT); if (likely(sa->flags == IP4_TUNNEL)) ip_hdr_len = sizeof(struct ip); else if (sa->flags == IP6_TUNNEL) ip_hdr_len = sizeof(struct ip6_hdr); else if (sa->flags != TRANSPORT) { RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", sa->flags); return -EINVAL; } /* Check maximum packet size */ if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + pad_payload_len + sa->digest_len > IP_MAXPACKET)) { RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); return -EINVAL; } padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len); if (unlikely(padding == NULL)) { RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n"); return -ENOSPC; } rte_prefetch0(padding); switch (sa->flags) { case IP4_TUNNEL: ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, &sa->src, &sa->dst); esp = (struct esp_hdr *)(ip4 + 1); break; case IP6_TUNNEL: ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, &sa->src, &sa->dst); esp = (struct esp_hdr *)(ip6 + 1); break; case TRANSPORT: new_ip = (uint8_t *)rte_pktmbuf_prepend(m, sizeof(struct esp_hdr) + sa->iv_len); memmove(new_ip, ip4, ip_hdr_len); esp = (struct esp_hdr *)(new_ip + ip_hdr_len); if (likely(ip4->ip_v == IPVERSION)) { ip4 = (struct ip *)new_ip; ip4->ip_p = IPPROTO_ESP; ip4->ip_len = htons(rte_pktmbuf_data_len(m)); } else { ip6 = (struct ip6_hdr *)new_ip; ip6->ip6_nxt = IPPROTO_ESP; ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); } } sa->seq++; esp->spi = rte_cpu_to_be_32(sa->spi); esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); uint64_t *iv = (uint64_t *)(esp + 1); sym_cop = get_sym_cop(cop); sym_cop->m_src = m; switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_AES_CBC: memset(iv, 0, sa->iv_len); sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr); sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; break; case RTE_CRYPTO_CIPHER_AES_CTR: case RTE_CRYPTO_CIPHER_AES_GCM: *iv = sa->seq; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = pad_payload_len; break; default: RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", sa->cipher_algo); return -EINVAL; } /* Fill pad_len using default sequential scheme */ for (i = 0; i < pad_len - 2; i++) padding[i] = i + 1; padding[pad_len - 2] = pad_len - 2; padding[pad_len - 1] = nlp; struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; icb->iv = sa->seq; icb->cnt = rte_cpu_to_be_32(1); sym_cop->cipher.iv.data = (uint8_t *)icb; sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->cipher.iv.length = 16; uint8_t *aad; switch (sa->auth_algo) { case RTE_CRYPTO_AUTH_NULL: case RTE_CRYPTO_AUTH_SHA1_HMAC: case RTE_CRYPTO_AUTH_SHA256_HMAC: sym_cop->auth.data.offset = ip_hdr_len; sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len + pad_payload_len; break; case RTE_CRYPTO_AUTH_AES_GCM: aad = get_aad(m); memcpy(aad, esp, 8); sym_cop->auth.aad.data = aad; sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->auth.aad.length = 8; break; default: RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", sa->auth_algo); return -EINVAL; } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); sym_cop->auth.digest.length = sa->digest_len; return 0; } int esp_outbound_post(struct rte_mbuf *m __rte_unused, struct ipsec_sa *sa __rte_unused, struct rte_crypto_op *cop) { RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); return -1; } return 0; }
vicharl/containerdns
kdns/dpdk-17.02/lib/librte_vhost/vhost.c
/*- * BSD LICENSE * * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/vhost.h> #include <linux/virtio_net.h> #include <stddef.h> #include <stdint.h> #include <stdlib.h> #ifdef RTE_LIBRTE_VHOST_NUMA #include <numaif.h> #endif #include <rte_ethdev.h> #include <rte_log.h> #include <rte_string_fns.h> #include <rte_memory.h> #include <rte_malloc.h> #include <rte_virtio_net.h> #include "vhost.h" #define VHOST_USER_F_PROTOCOL_FEATURES 30 /* Features supported by this lib. */ #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ (1ULL << VIRTIO_NET_F_CTRL_RX) | \ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \ (1ULL << VIRTIO_NET_F_MQ) | \ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VHOST_F_LOG_ALL) | \ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ (1ULL << VIRTIO_NET_F_HOST_TSO4) | \ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \ (1ULL << VIRTIO_NET_F_CSUM) | \ (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_RING_F_INDIRECT_DESC)) uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES; struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; /* device ops to add/remove device to/from data core. */ struct virtio_net_device_ops const *notify_ops; struct virtio_net * get_device(int vid) { struct virtio_net *dev = vhost_devices[vid]; if (unlikely(!dev)) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) device not found.\n", vid); } return dev; } static void cleanup_vq(struct vhost_virtqueue *vq, int destroy) { if ((vq->callfd >= 0) && (destroy != 0)) close(vq->callfd); if (vq->kickfd >= 0) close(vq->kickfd); } /* * Unmap any memory, close any file descriptors and * free any memory owned by a device. */ void cleanup_device(struct virtio_net *dev, int destroy) { uint32_t i; vhost_backend_cleanup(dev); for (i = 0; i < dev->virt_qp_nb; i++) { cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy); cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy); } } /* * Release virtqueues and device memory. */ static void free_device(struct virtio_net *dev) { uint32_t i; struct vhost_virtqueue *rxq, *txq; for (i = 0; i < dev->virt_qp_nb; i++) { rxq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ]; txq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ]; rte_free(rxq->shadow_used_ring); rte_free(txq->shadow_used_ring); /* rxq and txq are allocated together as queue-pair */ rte_free(rxq); } rte_free(dev); } static void init_vring_queue(struct vhost_virtqueue *vq, int qp_idx) { memset(vq, 0, sizeof(struct vhost_virtqueue)); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; /* Backends are set to -1 indicating an inactive device. */ vq->backend = -1; /* always set the default vq pair to enabled */ if (qp_idx == 0) vq->enabled = 1; TAILQ_INIT(&vq->zmbuf_list); } static void init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) { uint32_t base_idx = qp_idx * VIRTIO_QNUM; init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx); init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx); } static void reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx) { int callfd; callfd = vq->callfd; init_vring_queue(vq, qp_idx); vq->callfd = callfd; } static void reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) { uint32_t base_idx = qp_idx * VIRTIO_QNUM; reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx); reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx); } int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) { struct vhost_virtqueue *virtqueue = NULL; uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ; uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ; virtqueue = rte_malloc(NULL, sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0); if (virtqueue == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for virt qp:%d.\n", qp_idx); return -1; } dev->virtqueue[virt_rx_q_idx] = virtqueue; dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ; init_vring_queue_pair(dev, qp_idx); dev->virt_qp_nb += 1; return 0; } /* * Reset some variables in device structure, while keeping few * others untouched, such as vid, ifname, virt_qp_nb: they * should be same unless the device is removed. */ void reset_device(struct virtio_net *dev) { uint32_t i; dev->features = 0; dev->protocol_features = 0; dev->flags = 0; for (i = 0; i < dev->virt_qp_nb; i++) reset_vring_queue_pair(dev, i); } /* * Invoked when there is a new vhost-user connection established (when * there is a new virtio device being attached). */ int vhost_new_device(void) { struct virtio_net *dev; int i; dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); if (dev == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for new dev.\n"); return -1; } for (i = 0; i < MAX_VHOST_DEVICE; i++) { if (vhost_devices[i] == NULL) break; } if (i == MAX_VHOST_DEVICE) { RTE_LOG(ERR, VHOST_CONFIG, "Failed to find a free slot for new device.\n"); rte_free(dev); return -1; } vhost_devices[i] = dev; dev->vid = i; return i; } /* * Invoked when there is the vhost-user connection is broken (when * the virtio device is being detached). */ void vhost_destroy_device(int vid) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return; if (dev->flags & VIRTIO_DEV_RUNNING) { dev->flags &= ~VIRTIO_DEV_RUNNING; notify_ops->destroy_device(vid); } cleanup_device(dev, 1); free_device(dev); vhost_devices[vid] = NULL; } void vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) { struct virtio_net *dev; unsigned int len; dev = get_device(vid); if (dev == NULL) return; len = if_len > sizeof(dev->ifname) ? sizeof(dev->ifname) : if_len; strncpy(dev->ifname, if_name, len); dev->ifname[sizeof(dev->ifname) - 1] = '\0'; } void vhost_enable_dequeue_zero_copy(int vid) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return; dev->dequeue_zero_copy = 1; } int rte_vhost_get_numa_node(int vid) { #ifdef RTE_LIBRTE_VHOST_NUMA struct virtio_net *dev = get_device(vid); int numa_node; int ret; if (dev == NULL) return -1; ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to query numa node: %d\n", vid, ret); return -1; } return numa_node; #else RTE_SET_USED(vid); return -1; #endif } uint32_t rte_vhost_get_queue_num(int vid) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return 0; return dev->virt_qp_nb; } int rte_vhost_get_ifname(int vid, char *buf, size_t len) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return -1; len = RTE_MIN(len, sizeof(dev->ifname)); strncpy(buf, dev->ifname, len); buf[len - 1] = '\0'; return 0; } uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id) { struct virtio_net *dev; struct vhost_virtqueue *vq; dev = get_device(vid); if (!dev) return 0; vq = dev->virtqueue[queue_id]; if (!vq->enabled) return 0; return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; } int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return -1; if (enable) { RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n"); return -1; } dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY; return 0; } uint64_t rte_vhost_feature_get(void) { return VHOST_FEATURES; } int rte_vhost_feature_disable(uint64_t feature_mask) { VHOST_FEATURES = VHOST_FEATURES & ~feature_mask; return 0; } int rte_vhost_feature_enable(uint64_t feature_mask) { if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) { VHOST_FEATURES = VHOST_FEATURES | feature_mask; return 0; } return -1; } /* * Register ops so that we can add/remove device to data core. */ int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops) { notify_ops = ops; return 0; }
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/i40e/i40e_rxtx_vec_neon.c
/*- * BSD LICENSE * * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * Copyright(c) 2016, Linaro Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdint.h> #include <rte_ethdev.h> #include <rte_malloc.h> #include "base/i40e_prototype.h" #include "base/i40e_type.h" #include "i40e_ethdev.h" #include "i40e_rxtx.h" #include "i40e_rxtx_vec_common.h" #include <arm_neon.h> #pragma GCC diagnostic ignored "-Wcast-qual" static inline void i40e_rxq_rearm(struct i40e_rx_queue *rxq) { int i; uint16_t rx_id; volatile union i40e_rx_desc *rxdp; struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; struct rte_mbuf *mb0, *mb1; uint64x2_t dma_addr0, dma_addr1; uint64x2_t zero = vdupq_n_u64(0); uint64_t paddr; uint8x8_t p; rxdp = rxq->rx_ring + rxq->rxrearm_start; /* Pull 'n' more MBUFs into the software ring */ if (unlikely(rte_mempool_get_bulk(rxq->mp, (void *)rxep, RTE_I40E_RXQ_REARM_THRESH) < 0)) { if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= rxq->nb_rx_desc) { for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { rxep[i].mbuf = &rxq->fake_mbuf; vst1q_u64((uint64_t *)&rxdp[i].read, zero); } } rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += RTE_I40E_RXQ_REARM_THRESH; return; } p = vld1_u8((uint8_t *)&rxq->mbuf_initializer); /* Initialize the mbufs in vector, process 2 mbufs in one loop */ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; /* Flush mbuf with pkt template. * Data to be rearmed is 6 bytes long. * Though, RX will overwrite ol_flags that are coming next * anyway. So overwrite whole 8 bytes with one load: * 6 bytes of rearm_data plus first 2 bytes of ol_flags. */ vst1_u8((uint8_t *)&mb0->rearm_data, p); paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM; dma_addr0 = vdupq_n_u64(paddr); /* flush desc with pa dma_addr */ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); vst1_u8((uint8_t *)&mb1->rearm_data, p); paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM; dma_addr1 = vdupq_n_u64(paddr); vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); } rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; if (rxq->rxrearm_start >= rxq->nb_rx_desc) rxq->rxrearm_start = 0; rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); /* Update the tail pointer on the NIC */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); } /* Handling the offload flags (olflags) field takes computation * time when receiving packets. Therefore we provide a flag to disable * the processing of the olflags field when they are not needed. This * gives improved performance, at the cost of losing the offload info * in the received packet */ #ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE static inline void desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) { uint32x4_t vlan0, vlan1, rss, l3_l4e; /* mask everything except RSS, flow director and VLAN flags * bit2 is for VLAN tag, bit11 for flow director indication * bit13:12 for RSS indication. */ const uint32x4_t rss_vlan_msk = { 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804}; /* map rss and vlan type to rss hash and vlan flag */ const uint8x16_t vlan_flags = { 0, 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; const uint8x16_t rss_flags = { 0, PKT_RX_FDIR, 0, 0, 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR, 0, 0, 0, 0, 0, 0, 0, 0}; const uint8x16_t l3_l4e_flags = { 0, PKT_RX_IP_CKSUM_BAD, PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, 0, 0, 0, 0, 0, 0, 0, 0}; vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]), vreinterpretq_u32_u64(descs[2])).val[1]; vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]), vreinterpretq_u32_u64(descs[3])).val[1]; vlan0 = vzipq_u32(vlan0, vlan1).val[0]; vlan1 = vandq_u32(vlan0, rss_vlan_msk); vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags, vreinterpretq_u8_u32(vlan1))); rss = vshrq_n_u32(vlan1, 11); rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags, vreinterpretq_u8_u32(rss))); l3_l4e = vshrq_n_u32(vlan1, 22); l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags, vreinterpretq_u8_u32(l3_l4e))); vlan0 = vorrq_u32(vlan0, rss); vlan0 = vorrq_u32(vlan0, l3_l4e); rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0); rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1); rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2); rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3); } #else #define desc_to_olflags_v(descs, rx_pkts) do {} while (0) #endif #define PKTLEN_SHIFT 10 #define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL static inline void desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) { int i; uint8_t ptype; uint8x16_t tmp; for (i = 0; i < 4; i++) { tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30)); ptype = vgetq_lane_u8(tmp, 8); rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype); } } /* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ static inline uint16_t _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) { volatile union i40e_rx_desc *rxdp; struct i40e_rx_entry *sw_ring; uint16_t nb_pkts_recd; int pos; uint64_t var; /* mask to shuffle from desc. to mbuf */ uint8x16_t shuf_msk = { 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF, /* pkt_type set as unknown */ 14, 15, /* octet 15~14, low 16 bits pkt_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 14, 15, /* octet 15~14, 16 bits data_len */ 2, 3, /* octet 2~3, low 16 bits vlan_macip */ 4, 5, 6, 7 /* octet 4~7, 32bits rss */ }; uint8x16_t eop_check = { 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint16x8_t crc_adjust = { 0, 0, /* ignore pkt_type field */ rxq->crc_len, /* sub crc on pkt_len */ 0, /* ignore high-16bits of pkt_len */ rxq->crc_len, /* sub crc on data_len */ 0, 0, 0 /* ignore non-length fields */ }; /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); /* Just the act of getting into the function from the application is * going to cost about 7 cycles */ rxdp = rxq->rx_ring + rxq->rx_tail; rte_prefetch_non_temporal(rxdp); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act */ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) i40e_rxq_rearm(rxq); /* Before we start moving massive data around, check to see if * there is actually a packet available */ if (!(rxdp->wb.qword1.status_error_len & rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) return 0; /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ sw_ring = &rxq->sw_ring[rxq->rx_tail]; /* A. load 4 packet in one loop * [A*. mask out 4 unused dirty field in desc] * B. copy 4 mbuf point from swring to rx_pkts * C. calc the number of DD bits among the 4 packets * [C*. extract the end-of-packet bit, if requested] * D. fill info. from desc to mbuf */ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; pos += RTE_I40E_DESCS_PER_LOOP, rxdp += RTE_I40E_DESCS_PER_LOOP) { uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP]; uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; uint16x8x2_t sterr_tmp1, sterr_tmp2; uint64x2_t mbp1, mbp2; uint16x8_t staterr; uint16x8_t tmp; uint64_t stat; int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT}; /* B.1 load 1 mbuf point */ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); rte_rmb(); /* B.2 copy 2 mbuf point into rx_pkts */ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); /* B.1 load 1 mbuf point */ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); /* B.1 load 2 mbuf point */ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); descs[0] = vld1q_u64((uint64_t *)(rxdp)); /* B.2 copy 2 mbuf point into rx_pkts */ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); if (split_packet) { rte_mbuf_prefetch_part2(rx_pkts[pos]); rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } /* avoid compiler reorder optimization */ rte_compiler_barrier(); /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]), len_shl); descs[3] = vreinterpretq_u64_u32(len3); uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]), len_shl); descs[2] = vreinterpretq_u64_u32(len2); /* D.1 pkt 3,4 convert format from desc to pktmbuf */ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); /* C.1 4=>2 filter staterr info only */ sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]), vreinterpretq_u16_u64(descs[3])); /* C.1 4=>2 filter staterr info only */ sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]), vreinterpretq_u16_u64(descs[2])); /* C.2 get 4 pkts staterr value */ staterr = vzipq_u16(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); desc_to_olflags_v(descs, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); pkt_mb4 = vreinterpretq_u8_u16(tmp); tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); pkt_mb3 = vreinterpretq_u8_u16(tmp); /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]), len_shl); descs[1] = vreinterpretq_u64_u32(len1); uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]), len_shl); descs[0] = vreinterpretq_u64_u32(len0); /* D.1 pkt 1,2 convert format from desc to pktmbuf */ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); /* D.3 copy final 3,4 data to rx_pkts */ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, pkt_mb4); vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, pkt_mb3); /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); pkt_mb2 = vreinterpretq_u8_u16(tmp); tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); pkt_mb1 = vreinterpretq_u8_u16(tmp); /* C* extract and record EOP bit */ if (split_packet) { uint8x16_t eop_shuf_mask = { 0x00, 0x02, 0x04, 0x06, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; uint8x16_t eop_bits; /* and with mask to extract bits, flipping 1-0 */ eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr)); eop_bits = vandq_u8(eop_bits, eop_check); /* the staterr values are not in order, as the count * count of dd bits doesn't care. However, for end of * packet tracking, we do care, so shuffle. This also * compresses the 32-bit values to 8-bit */ eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask); /* store the resulting 32-bit value */ vst1q_lane_u32((uint32_t *)split_packet, vreinterpretq_u32_u8(eop_bits), 0); split_packet += RTE_I40E_DESCS_PER_LOOP; /* zero-out next pointers */ rx_pkts[pos]->next = NULL; rx_pkts[pos + 1]->next = NULL; rx_pkts[pos + 2]->next = NULL; rx_pkts[pos + 3]->next = NULL; } rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); /* D.3 copy final 1,2 data to rx_pkts */ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, pkt_mb2); vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos]); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK); nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) break; } /* Update our internal tail pointer */ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); return nb_pkts_recd; } /* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } /* vPMD receive routine that reassembles scattered packets * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct i40e_rx_queue *rxq = rx_queue; uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; /* get some new buffers */ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, split_flags); if (nb_bufs == 0) return 0; /* happy day case, full burst + no packets to be joined */ const uint64_t *split_fl64 = (uint64_t *)split_flags; if (rxq->pkt_first_seg == NULL && split_fl64[0] == 0 && split_fl64[1] == 0 && split_fl64[2] == 0 && split_fl64[3] == 0) return nb_bufs; /* reassemble any packets that need reassembly*/ unsigned i = 0; if (rxq->pkt_first_seg == NULL) { /* find the first split flag, and only reassemble then*/ while (i < nb_bufs && !split_flags[i]) i++; if (i == nb_bufs) return nb_bufs; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); } static inline void vtx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw}; vst1q_u64((uint64_t *)txdp, descriptor); } static inline void vtx(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) { int i; for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) vtx1(txdp, *pkt, flags); } uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; volatile struct i40e_tx_desc *txdp; struct i40e_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = I40E_TD_CMD; uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; int i; /* cross rx_thresh boundary is not allowed */ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) i40e_tx_free_bufs(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { tx_backlog_entry(txep, tx_pkts, n); for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) vtx1(txdp, *tx_pkts, flags); vtx1(txdp, *tx_pkts++, rs); nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; } tx_backlog_entry(txep, tx_pkts, nb_commit); vtx(txdp, tx_pkts, nb_commit, flags); tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->tx_next_rs) { txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << I40E_TXD_QW1_CMD_SHIFT); txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } void __attribute__((cold)) i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) { _i40e_rx_queue_release_mbufs_vec(rxq); } int __attribute__((cold)) i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) { return i40e_rxq_vec_setup_default(rxq); } int __attribute__((cold)) i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) { return 0; } int __attribute__((cold)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) { return i40e_rx_vec_dev_conf_condition_check_default(dev); }
vicharl/containerdns
kdns/dpdk-17.02/drivers/net/qede/base/ecore_sp_commands.c
/* * Copyright (c) 2016 QLogic Corporation. * All rights reserved. * www.qlogic.com * * See LICENSE.qede_pmd for copyright and licensing details. */ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" #include "ecore_chain.h" #include "ecore_spq.h" #include "ecore_init_fw_funcs.h" #include "ecore_cxt.h" #include "ecore_sp_commands.h" #include "ecore_gtt_reg_addr.h" #include "ecore_iro.h" #include "reg_addr.h" #include "ecore_int.h" #include "ecore_hw.h" #include "ecore_dcbx.h" #include "ecore_sriov.h" enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent, u8 cmd, u8 protocol, struct ecore_sp_init_data *p_data) { u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct ecore_spq_entry *p_ent = OSAL_NULL; enum _ecore_status_t rc; if (!pp_ent) return ECORE_INVAL; /* Get an SPQ entry */ rc = ecore_spq_get_entry(p_hwfn, pp_ent); if (rc != ECORE_SUCCESS) return rc; /* Fill the SPQ entry */ p_ent = *pp_ent; p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid); p_ent->elem.hdr.cmd_id = cmd; p_ent->elem.hdr.protocol_id = protocol; p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL; p_ent->comp_mode = p_data->comp_mode; p_ent->comp_done.done = 0; switch (p_ent->comp_mode) { case ECORE_SPQ_MODE_EBLOCK: p_ent->comp_cb.cookie = &p_ent->comp_done; break; case ECORE_SPQ_MODE_BLOCK: if (!p_data->p_comp_data) return ECORE_INVAL; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; case ECORE_SPQ_MODE_CB: if (!p_data->p_comp_data) p_ent->comp_cb.function = OSAL_NULL; else p_ent->comp_cb = *p_data->p_comp_data; break; default: DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); return ECORE_INVAL; } DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", opaque_cid, cmd, protocol, (unsigned long)&p_ent->ramrod, D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return ECORE_SUCCESS; } static enum tunnel_clss ecore_tunn_get_clss_type(u8 type) { switch (type) { case ECORE_TUNN_CLSS_MAC_VLAN: return TUNNEL_CLSS_MAC_VLAN; case ECORE_TUNN_CLSS_MAC_VNI: return TUNNEL_CLSS_MAC_VNI; case ECORE_TUNN_CLSS_INNER_MAC_VLAN: return TUNNEL_CLSS_INNER_MAC_VLAN; case ECORE_TUNN_CLSS_INNER_MAC_VNI: return TUNNEL_CLSS_INNER_MAC_VNI; case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; default: return TUNNEL_CLSS_MAC_VLAN; } } static void ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn, struct ecore_tunn_update_params *p_src, struct pf_update_tunnel_config *p_tunn_cfg) { unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode; unsigned long update_mask = p_src->tunn_mode_update_mask; unsigned long tunn_mode = p_src->tunn_mode; unsigned long new_tunn_mode = 0; if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) { if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); } else { if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode)) OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); } if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) { if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); } else { if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode)) OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); } if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) { if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); } else { if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode)) OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); } if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { if (p_src->update_geneve_udp_port) DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); p_src->update_geneve_udp_port = 0; p_src->tunn_mode = new_tunn_mode; return; } if (p_src->update_geneve_udp_port) { p_tunn_cfg->set_geneve_udp_port_flg = 1; p_tunn_cfg->geneve_udp_port = OSAL_CPU_TO_LE16(p_src->geneve_udp_port); } if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) { if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); } else { if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); } if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) { if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); } else { if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); } p_src->tunn_mode = new_tunn_mode; } static void ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn, struct ecore_tunn_update_params *p_src, struct pf_update_tunnel_config *p_tunn_cfg) { unsigned long tunn_mode = p_src->tunn_mode; enum tunnel_clss type; ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); p_tunn_cfg->tunnel_clss_vxlan = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); p_tunn_cfg->tunnel_clss_l2gre = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); p_tunn_cfg->tunnel_clss_ipgre = type; if (p_src->update_vxlan_udp_port) { p_tunn_cfg->set_vxlan_udp_port_flg = 1; p_tunn_cfg->vxlan_udp_port = OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); } if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_l2gre = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_ipgre = 1; if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_vxlan = 1; if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { if (p_src->update_geneve_udp_port) DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); p_src->update_geneve_udp_port = 0; return; } if (p_src->update_geneve_udp_port) { p_tunn_cfg->set_geneve_udp_port_flg = 1; p_tunn_cfg->geneve_udp_port = OSAL_CPU_TO_LE16(p_src->geneve_udp_port); } if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_l2geneve = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_ipgeneve = 1; type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); p_tunn_cfg->tunnel_clss_l2geneve = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); p_tunn_cfg->tunnel_clss_ipgeneve = type; } static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, unsigned long tunn_mode) { u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; u8 l2geneve_enable = 0, ipgeneve_enable = 0; if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) l2gre_enable = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) ipgre_enable = 1; if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) vxlan_enable = 1; ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); if (ECORE_IS_BB_A0(p_hwfn->p_dev)) return; if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) l2geneve_enable = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) ipgeneve_enable = 1; ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, ipgeneve_enable); } static void ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, struct ecore_tunn_start_params *p_src, struct pf_start_tunnel_config *p_tunn_cfg) { unsigned long tunn_mode; enum tunnel_clss type; if (!p_src) return; tunn_mode = p_src->tunn_mode; type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); p_tunn_cfg->tunnel_clss_vxlan = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); p_tunn_cfg->tunnel_clss_l2gre = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); p_tunn_cfg->tunnel_clss_ipgre = type; if (p_src->update_vxlan_udp_port) { p_tunn_cfg->set_vxlan_udp_port_flg = 1; p_tunn_cfg->vxlan_udp_port = OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); } if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_l2gre = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_ipgre = 1; if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_vxlan = 1; if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { if (p_src->update_geneve_udp_port) DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); p_src->update_geneve_udp_port = 0; return; } if (p_src->update_geneve_udp_port) { p_tunn_cfg->set_geneve_udp_port_flg = 1; p_tunn_cfg->geneve_udp_port = OSAL_CPU_TO_LE16(p_src->geneve_udp_port); } if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_l2geneve = 1; if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) p_tunn_cfg->tx_enable_ipgeneve = 1; type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); p_tunn_cfg->tunnel_clss_l2geneve = type; type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); p_tunn_cfg->tunnel_clss_ipgeneve = type; } enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_tunn_start_params *p_tunn, enum ecore_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = OSAL_NULL; u16 sb = ecore_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; u8 page_cnt; /* update initial eq producer */ ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain)); /* Initialize the SPQ entry for the ramrod */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; /* Fill the ramrod data */ p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = ECORE_PATH_ID(p_hwfn); /* For easier debugging */ p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf); switch (mode) { case ECORE_MF_DEFAULT: case ECORE_MF_NPAR: p_ramrod->mf_mode = MF_NPAR; break; case ECORE_MF_OVLAN: p_ramrod->mf_mode = MF_OVLAN; break; default: DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl.p_phys_table); page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); ecore_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; default: DP_NOTICE(p_hwfn, true, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->p_dev->p_iov_info) { struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8)p_iov->total_vfs; } /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI * version is available. */ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, p_ramrod->outer_tag); rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); if (p_tunn) { ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; } return rc; } enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn) { struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; /* Get SPQ entry */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_CB; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, &p_ent->ramrod.pf_update); return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); } enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn, struct ecore_rl_update_params *params) { struct ecore_spq_entry *p_ent = OSAL_NULL; enum _ecore_status_t rc = ECORE_NOTIMPL; struct rl_update_ramrod_data *rl_update; struct ecore_sp_init_data init_data; /* Get SPQ entry */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; rl_update = &p_ent->ramrod.rl_update; rl_update->qcn_update_param_flg = params->qcn_update_param_flg; rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg; rl_update->rl_init_flg = params->rl_init_flg; rl_update->rl_start_flg = params->rl_start_flg; rl_update->rl_stop_flg = params->rl_stop_flg; rl_update->rl_id_first = params->rl_id_first; rl_update->rl_id_last = params->rl_id_last; rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg; rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate); rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate); rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai); rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai); rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g); rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us); rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32( params->dcqcn_timeuot_us); rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us); return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); } /* Set pf update ramrod command params */ enum _ecore_status_t ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, struct ecore_tunn_update_params *p_tunn, enum spq_mode comp_mode, struct ecore_spq_comp_cb *p_comp_data) { struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; /* Get SPQ entry */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; ecore_tunn_set_pf_update_params(p_hwfn, p_tunn, &p_ent->ramrod.pf_update.tunnel_config); rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); if (rc != ECORE_SUCCESS) return rc; if (p_tunn->update_vxlan_udp_port) ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, p_tunn->vxlan_udp_port); if (p_tunn->update_geneve_udp_port) ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, p_tunn->geneve_udp_port); ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; return rc; } enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn) { struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; /* Get SPQ entry */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); } enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn) { struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc; /* Get SPQ entry */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); }
vicharl/containerdns
kdns/src/domain_update.h
#ifndef __DOMAIN_UPDATE_H__ #define __DOMAIN_UPDATE_H__ #include "db_update.h" #define DNS_STATUS_INIT "init" #define DNS_STATUS_RUN "running" void domian_info_exchange_run(uint16_t web_port, int ssl_enable, char *key_pem_file, char *cert_pem_file); int domain_list_del_zones(char *del_zones); void domain_info_master_init(void); #endif
vicharl/containerdns
kdns/src/hashMap.h
<filename>kdns/src/hashMap.h #ifndef _DNSHASHMAP_H_ #define _DNSHASHMAP_H_ #define HASH_NODE_FIND 1 typedef struct hashNode_ { char *key; void *data; int fingerprint; struct hashNode_ *next; } hashNode; typedef struct hashMap_ { hashNode **hashBuckets; rte_rwlock_t *locks; unsigned int bucketsSize; // 2^x -1 unsigned int lockSize; // 2^x -1 unsigned int (*hashFun)(char *key); int (*equalFun)(char *key, hashNode *node, void *check); // check the k-v int (*queryFun)(hashNode *node, void *arg); // check the node int (*checkExpiredFun)(hashNode *node, void *arg); int (*getAllNodeFun)(hashNode *node, void *arg); } hashMap; unsigned int elfHashDomain(char *str); void hmap_update(hashMap *map, char *key, void *check, void *new_data); int hmap_check_expired(hashMap *map, void *arg); int hmap_get_all(hashMap *map, void *arg); int hmap_lookup(hashMap *map, char *key, void *check, void *arg); void hmap_del(hashMap *map, char *key, void *check); void hmap_del_all(hashMap *map); hashMap *hmap_create(int bucketsSize, int lockSize, unsigned int (*hashFun)(char *key), int (*equalFun)(char *key, hashNode *node, void *check), int (*queryFun)(hashNode *node, void *arg), int (*checkExpiredFun)(hashNode *node, void *arg), int (*getAllNodeFun)(hashNode *node, void *arg)); #endif /*_DNSHASHMAP_H_*/