text
stringlengths
9
39.2M
dir
stringlengths
26
295
lang
stringclasses
185 values
created_date
timestamp[us]
updated_date
timestamp[us]
repo_name
stringlengths
1
97
repo_full_name
stringlengths
7
106
star
int64
1k
183k
len_tokens
int64
1
13.8M
```css .feedback-div, .feedback-btn, .update-btn { display: inline-block; } @keyframes rotating { from { transform: rotate(0deg); } to { transform: rotate(-360deg); } } .rotating { animation: rotating 2s linear infinite; } ```
/content/code_sandbox/app/src/app/feedback/feedback.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
65
```css .request-div { /*border-left: 1px solid #ddd;*/ /*border-right: 1px solid #ddd;*/ /*border-top: 1px solid #ddd;*/ display: flex; flex-direction: row; margin-top: 10px; margin-bottom: 10px; padding-top: 10px; position: relative; animation: from-top 0.8s ease 1; } @keyframes from-top { 0% {top: -40px; opacity: 0;} 100% {top: 0px; opacity: 1;} } .request--icon { width: 80px; min-width: 80px; height: 80px; margin-bottom: 10px; margin-top: 10px; margin-right: 10px; background-size: cover; border: 10px solid #1a1a1a; border-radius: 50px; } .request--progress--div { width: 100%; height: 100%; display: flex; flex-direction: column; justify-content: center; text-align: center; border-radius: 50px; } .request--progress--div.displayed { background-color: rgba(0, 0, 0, 0.5); } /*.request--progress { font-weight: 600; font-size: 20px; }*/ .request--eta { font-size: 14px; color: #bbb; } .request--core { padding: 10px; padding-top: 20px; } .request--title { font-size: 18px; font-weight: 500; } .request--actions { width: 80px; } .request--artist { font-size: 14px; color: #ddd; font-weight: 100; } .request--length { margin-top: 8px; font-size: 12px; color: #ddd; font-weight: 100; } .request--sub { font-size: 11px; font-weight: 100; margin-top: 5px; } .request--sub--running { color:transparent; -webkit-background-clip: text; background-clip: text; } .request-subs-open { color: #ff8a65; font-weight: 100; font-style: italic; font-size: 12px; cursor: pointer; width: 80px; } .request-subs-opened { padding-bottom: 8px; border-bottom: 1px solid #ff8a65; } .abort--icon { font-size: 30px; margin-bottom: -6px; } .abort--legend { font-weight: 100; text-transform: uppercase; font-size: 12px; } ```
/content/code_sandbox/app/src/app/request/request.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
624
```html <div class="feedback-div"> <div class="feedback-btn clickable tooltip" (click)="wantFeedback()"> <i class="icon-mail"></i> <span class="tooltiptext" i18n>An issue, an idea?</span> </div> <div class="update-btn" (click)="installUpdate()" *ngIf="updateAvailable" [ngClass]="{'clickable': (updateDownloaded && alltomp3.numberActive === 0)}"> <div class="tooltip"> <div [ngClass]="{'rotating': !updateDownloaded}"><i class="icon-cycle"></i></div> <span class="tooltiptext" i18n *ngIf="!updateDownloaded">An update is downloading</span> <span class="tooltiptext" i18n *ngIf="updateDownloaded">An update has been downloaded and will be installed at the next app launch<span *ngIf="alltomp3.numberActive === 0">, or click to relaunch the app now</span></span> </div> </div> </div> ```
/content/code_sandbox/app/src/app/feedback/feedback.component.html
html
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
241
```html <div class="help-div-arrow"> <div class="arrow thisarrow"></div> </div> <div class="help-div-title" i18n>Enter:</div> <div class="help-div-container"> <div class="help-div-possibility"> <div class="possibility-title" i18n>Songs</div> <ul> <li *ngFor="let proposal of helpProposalsSongs" (click)="selectProposal(proposal)">{{ proposal }}</li> </ul> </div> <div class="help-div-sep-div"> <div class="help-div-sep-title" i18n>OR</div> </div> <div class="help-div-possibility"> <div class="possibility-title">URLs</div> <ul> <li *ngFor="let proposal of helpProposalsURLs" (click)="selectProposal(proposal)">{{ proposal }}</li> </ul> </div> </div> ```
/content/code_sandbox/app/src/app/help/help.component.html
html
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
215
```css .help-div-title { text-align: center; width: 65px; margin: 0 auto; margin-top: 60px; padding: 3px; text-transform: uppercase; color: #ff8a65; font-weight: bold; border-bottom: 1px solid white; border-top: 1px solid #ff8a65; } .help-div-container { display: flex; flex-direction: row; } .help-div-possibility { flex: 1 1 0; min-width: 0; padding: 10px; padding-bottom: 0px; } .possibility-title { text-align: center; font-variant: small-caps; color: #ff8a65; margin-bottom: 10px; } .help-div-possibility ul { padding: 0; margin: 0; list-style-type: none; } .help-div-possibility li { padding: 0; margin: 0; border: 1px solid #ff8a65; padding: 5px; margin-top: 5px; margin-bottom: 5px; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; display: block; cursor: pointer; } .help-div-possibility li:hover { background-color: #222; } .help-div-sep-div { border-left: 1px solid white; width: 1px; flex-basis: 0; } .help-div-sep-title { position: relative; width: 11px; height: 11px; border-radius: 10px; border: 1px solid white; background-color: #111; text-align: center; font-size: 10px; padding: 4px; left: -10px; top: 10px; } .help-div-arrow { text-align: center; } .arrow { border-left: 1px solid #ff8a65; display: block; } .arrow:before { content: ''; height: 8px; width: 8px; border-radius: 4px; background-color: #ff8a65; display: block; position: relative; top: -4px; left: -4px; } .arrow:after { content: ''; height: 180px; border-left: 1px solid #ff8a65; display: block; position: absolute; bottom: -147px; left: 68px; transform: rotate(-50deg); } .thisarrow { display: inline-block; position: relative; top: -50px; left: -140px; height: 60px; /*transform: rotate(30deg);*/ } ```
/content/code_sandbox/app/src/app/help/help.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
628
```css .suggestion { padding: 8px; cursor: pointer; background-color: #1a1a1a; color: white; font-weight: 200; font-size: 14px; } .suggestion--active { background-color: #222; } .suggestion:hover { background-color: #222; } .suggestion--last { border-bottom-right-radius: 15px; border-bottom-left-radius: 15px; } ```
/content/code_sandbox/app/src/app/suggestion/suggestion.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
100
```css .news { margin-bottom: 30px; padding: 15px; background-color: #1a1a1a; border-radius: 15px; } .news-title { font-weight: 100; font-size: 16px; margin-bottom: 6px; } .news-content { font-weight: 100; font-size: 12px; } .news-header { margin-top: 40px; text-align: center; color: #ff8a65; font-weight: 100; font-size: 20px; margin-bottom: 20px; } ```
/content/code_sandbox/app/src/app/news/news.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
135
```html <div *ngIf="releaseNotes.length > 0" class="news-header" i18n> Update's changes </div> <div *ngFor="let v of releaseNotes" class="news"> <div class="news-title"> Version {{ v.version }} </div> <div class="news-content" [innerHTML]="v.notes"></div> </div> <div *ngIf="news.length > 0" class="news-header" i18n> News </div> <div *ngFor="let n of news" class="news"> <div class="news-title"> {{ n.title }} </div> <div class="news-content" [innerHTML]="n.content"></div> </div> ```
/content/code_sandbox/app/src/app/news/news.component.html
html
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
158
```html <div class="suggestion" [ngClass]="{'suggestion--active': active, 'suggestion--last': last}"> <i [ngClass]="{'icon-vinyl': (type == 'album'), 'icon-note': (type == 'track')}"></i> {{ suggestion.title }} - <b>{{ suggestion.artistName }}</b> </div> ```
/content/code_sandbox/app/src/app/suggestion/suggestion.component.html
html
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
76
```css .open-path { cursor: pointer; } ```
/content/code_sandbox/app/src/app/saving-path/saving-path.component.css
css
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
10
```html <div class="legend" i18n>Songs are downloaded in <span (click)="openPath()" class="label">{{ path }}</span> <i class="icon-edit open-path" (click)="changePath()"></i></div> ```
/content/code_sandbox/app/src/app/saving-path/saving-path.component.html
html
2016-07-29T12:27:38
2024-08-14T13:08:01
alltomp3-app
AllToMP3/alltomp3-app
1,313
50
```yaml # .readthedocs.yml # Required version: 2 mkdocs: configuration: mkdocs.yml ```
/content/code_sandbox/readthedocs.yml
yaml
2016-07-10T15:18:14
2024-08-15T16:38:12
ue5-style-guide
Allar/ue5-style-guide
5,102
25
```yaml site_name: Linter and Style Guide Documentation nav: - Home: index.md - Getting Started: gettingstarted.md - How Does Linting Work?: howitworks.md - Unreal Engine Marketplace Guidelines: unrealguidelines.md - Gamemakin LLC Style Guide: style.md - TODO: todo.md theme: readthedocs ```
/content/code_sandbox/mkdocs.yml
yaml
2016-07-10T15:18:14
2024-08-15T16:38:12
ue5-style-guide
Allar/ue5-style-guide
5,102
79
```html <!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <title>AlloyFinger</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no"/> <style> html,body{ margin: 0; padding: 0; border: 0; background-color: #ccc; text-align: center; font: 14px / 1.5 "Helvetica Neue",Helvetica,Arial,"Microsoft Yahei","Hiragino Sans GB","Heiti SC","WenQuanYi Micro Hei",sans-serif; } .header{ background-color: #333; height: 40px; color:white; text-align: left; text-indent: 20px; font-weight: bold; font-size: 20px; line-height: 40px; } .title{ height: 30px; color:#333333; font-size: 20px; line-height: 30px; } .example img{ width:160px ; } .imgBox{ margin-bottom: 10px; border-bottom: 1px solid #333333; } .swipeBox{ height:160px; width: 160px; margin: 0 auto; overflow: hidden; font-size: 0; position: relative; border: 2px solid #ccc; box-sizing: border-box; } .scroll{ width: 480px; height: 160px; white-space: nowrap; } .nuclear-nav { position: absolute; bottom: 6px; right: 10px; } .nuclear-nav a { display: inline-block; background-color: white; cursor: pointer; width: 10px; height: 10px; -moz-border-radius: 5px; -webkit-border-radius: 5px; border-radius: 5px; margin-right: 5px; border: 1px solid #808080; } .nuclear-nav a.active { background-color: #ffd800; } .longTapBox{ position: relative; margin: 0 auto; width: 160px; height: 160px; overflow: hidden; -webkit-user-select: none; -webkit-user-drag: none; -webkit-touch-callout: none; user-select: none; user-drag: none; touch-callout: none; -webkit-tap-highlight-color: rgba(0, 0, 0, 0); } .longTapBox img{ pointer-events: none; } .overlay2,.overlay{ background-color: rgba(70, 70, 70, 0.8); position: absolute; top:0; left: 0; width: 160px; height: 160px; display: none; } .overlay2 img, .overlay img{ width: 40px; height: 40px; position: absolute; top: 60px; left: 60px; } .pb6{ padding-bottom: 6px; } .ribbon { top: 3.2em; right: -3.7em; -webkit-transform: rotate(45deg); -moz-transform: rotate(45deg); -ms-transform: rotate(45deg); -o-transform: rotate(45deg); transform: rotate(45deg); color:#fff; display: block; padding: .6em 3.5em; position: fixed; text-align: center; text-decoration: none; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-color: green; z-index: 10000; } .btn { letter-spacing: 3px; display: inline-block; color: white; width: 270px; height: 45px; font-size: 24px; line-height: 45px; background-color: green; -moz-border-radius: 2px; -webkit-border-radius: 2px; border-radius: 2px; text-decoration: none; } .footer{ height: 120px; } .linkCanvasBox{ margin-top: 10px; } .linkCanvasBox .btn{ font-size: 16px; } </style> </head> <body> <a href="path_to_url" class="ribbon">Fork me on Github</a> <div class="header">AlloyFinger</div> <div class="example"> <div class="title">pinch</div> <div class="imgBox"> <img id="pinchImg" src="asset/test.png" /> </div> <div class="title">rotate</div> <div class="imgBox"> <img id="rotateImg" src="asset/test.png" /> </div> <div class="title">pinch+rotate</div> <div class="imgBox"> <img id="pinchRotateImg" src="asset/test.png" /> </div> <div class="title">pressMove</div> <div class="imgBox"> <img id="pressMoveImg" src="asset/test.png" /> </div> <div class="title">doubleTap</div> <div class="imgBox"> <img id="doubleTapImg" src="asset/test.png" /> </div> <div class="title">swipe</div> <div class="imgBox pb6"> <div class="swipeBox"> <div class="scroll" id="swipeScroll"> <img src="asset/test.png" /> <img src="asset/test2.png" /> <img src="asset/test3.png" /> </div> <div class="nuclear-nav"> <a data-index="0" class="active"></a> <a data-index="1" class=" "></a> <a data-index="2" class=" "></a> </div> </div> </div> <div class="title">longTap</div> <div class="imgBox pb6"> <div class=" longTapBox" id="longTapBox"> <img id="longTapImg" src="asset/test.png" /> <div class="overlay" id="overlay"> <img src="asset/yes.png"/> </div> </div> </div> <div class="title">tap</div> <div class="imgBox pb6"> <div class=" longTapBox" id="tapBox"> <img src="asset/test.png" /> <div class="overlay2" id="overlay2"> <img src="asset/yes.png"/> </div> </div> </div> </div> <div class="footer"> <div> <a class="btn" href="path_to_url"></a></div> </div> <script src="asset/transform.js"></script> <script src="alloy_finger.js"></script> <script src="asset/to.js"></script> <script> var pinchImg = document.getElementById("pinchImg"); Transform(pinchImg); var initScale = 1; new AlloyFinger(pinchImg, { multipointStart: function () { initScale = pinchImg.scaleX; }, pinch: function (evt) { pinchImg.scaleX = pinchImg.scaleY = initScale * evt.zoom; } }); var rotateImg = document.getElementById("rotateImg"); Transform(rotateImg); new AlloyFinger(rotateImg, { rotate:function(evt){ rotateImg.rotateZ += evt.angle; } }); var pinchRotateImg = document.getElementById("pinchRotateImg"); Transform(pinchRotateImg); new AlloyFinger(pinchRotateImg, { rotate:function(evt){ pinchRotateImg.rotateZ += evt.angle; }, multipointStart: function () { initScale = pinchRotateImg.scaleX; }, pinch: function (evt) { pinchRotateImg.scaleX = pinchRotateImg.scaleY = initScale * evt.zoom; } }); var pressMoveImg = document.getElementById("pressMoveImg"); Transform(pressMoveImg); new AlloyFinger(pressMoveImg, { pressMove:function(evt){ pressMoveImg.translateX += evt.deltaX; pressMoveImg.translateY += evt.deltaY; evt.preventDefault(); } }); function ease(x) { return Math.sqrt(1 - Math.pow(x - 1, 2)); } var doubleTapImg = document.getElementById("doubleTapImg"); Transform(doubleTapImg); new AlloyFinger(doubleTapImg, { doubleTap:function(){ if(doubleTapImg.scaleX===1){ new To(doubleTapImg, "scaleX", 2, 500, ease); new To(doubleTapImg, "scaleY", 2, 500, ease); }else if(doubleTapImg.scaleX===2){ new To(doubleTapImg, "scaleX", 1, 500, ease); new To(doubleTapImg, "scaleY", 1, 500, ease); } } }); var swipeScroll = document.getElementById("swipeScroll"), currentIndex=0; Transform(swipeScroll); function activeNav(index){ var items = document.querySelectorAll(".nuclear-nav a"), i = 0, len = items.length; for (; i < len; i++) { if (i === index) { items[i].classList.add("active"); } else { items[i].classList.remove("active"); } } } new AlloyFinger(swipeScroll, { touchMove:function(evt) { if (Math.abs(evt.deltaX) >= Math.abs(evt.deltaY)) { evt.preventDefault(); } }, swipe:function(evt){ if(evt.direction==="Left"){ if(currentIndex<2) { currentIndex++; new To(swipeScroll, "translateX", -160 * currentIndex, 500, ease, function () { activeNav(currentIndex); }); } }else if(evt.direction==="Right"){ if(currentIndex>0) { currentIndex--; new To(swipeScroll, "translateX", -160 * currentIndex, 500, ease, function () { activeNav(currentIndex); }); } } } }); var longTapBox = document.getElementById("longTapBox"); Transform(longTapBox); var overlay=document.getElementById("overlay"); new AlloyFinger(longTapBox, { longTap:function(evt){ evt.preventDefault(); toggleDom(overlay); } }); var tapBox = document.getElementById("tapBox"); Transform(tapBox); var overlay2=document.getElementById("overlay2"); new AlloyFinger(tapBox, { tap:function(){ toggleDom(overlay2); }, singleTap:function(){ console.log("singleTap") }, doubleTap:function(){ console.log("doubleTap") }, pointStart:function(){ console.log("pointStart") } }); function toggleDom(dom){ var displayValue=window.getComputedStyle(dom,null)["display"]; if(displayValue==="none"){ dom.style.display="block"; }else{ dom.style.display="none"; } } </script> </body> </html> ```
/content/code_sandbox/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
2,608
```html <!DOCTYPE html> <html> <head> <title>Tap State</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no" /> <style> #test { -webkit-tap-highlight-color: rgba(0,0,0,0); background-color: green; } #test.active { background-color: red; } .ribbon { top: 3.2em; right: -4.7em; -webkit-transform: rotate(45deg); -moz-transform: rotate(45deg); -ms-transform: rotate(45deg); -o-transform: rotate(45deg); transform: rotate(45deg); color:#fff; display: block; padding: .6em 3.5em; position: fixed; text-align: center; text-decoration: none; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-color: #6c7211; z-index: 10000; } </style> </head> <body style="text-align: center; font-family: Segoe UI Light,Segoe UI,Microsoft Jhenghei,Mirco Yahei,'sans-serif'; background-color: black; color: white;"> <a href="path_to_url" class="ribbon">Fork me on Github</a> <div id="test" style="width: 280px; height: 200px; margin: 0 auto; line-height: 200px; font-size: 30px;">Tap Me</div> <div id="result" style="font-size: 18px;"></div> <script src="../../alloy_finger.js"></script> <div> <script> var result = document.querySelector("#result"), testDiv = document.querySelector("#test"), html = ""; new AlloyFinger(testDiv, { touchStart: function () { html += ""; result.innerHTML = html; addClass(testDiv, "active"); }, touchMove: function () { removeClass(testDiv, "active"); }, touchEnd: function () { removeClass(testDiv, "active"); }, touchCancel: function () { removeClass(testDiv, "active"); }, tap: function () { html += "tap<br/>"; result.innerHTML = html; } }) function hasClass(ele, cls) { return !!ele.className.match(new RegExp('(\\s|^)' + cls + '(\\s|$)')); } function addClass(ele, cls) { if (!hasClass(ele, cls)) ele.className += " " + cls; } function removeClass(ele, cls) { if (hasClass(ele, cls)) { var reg = new RegExp('(\\s|^)' + cls + '(\\s|$)'); ele.className = ele.className.replace(reg, ' '); } } </script> </div> </body> </html> ```
/content/code_sandbox/example/tap_state/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
672
```html <!DOCTYPE html> <html> <head> <title>Event Test</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no" /> <style> #test { -webkit-tap-highlight-color: rgba(0,0,0,0); background-color: green; } </style> </head> <body style="text-align: center; font-family: Segoe UI Light,Segoe UI,Microsoft Jhenghei,Mirco Yahei,'sans-serif'; background-color: black; color: white;height: 1000px;"> <div id="test" style="width: 280px; height: 200px; margin: 0 auto; line-height: 200px; font-size: 30px;">Touch Me</div> <div id="result" style="font-size: 18px;"></div> <script src="../../alloy_finger.js"></script> <div> <script> var result = document.querySelector("#result"), testDiv = document.querySelector("#test"), html = ""; new AlloyFinger(testDiv, { touchStart: function () { html = ""; html += "start<br/>"; result.innerHTML = html; }, touchEnd: function () { html += "end<br/>"; result.innerHTML = html; }, tap: function () { html += "tap<br/>"; result.innerHTML = html; }, rotate: function (evt) { html += "rotate [" + evt.angle + "]<br/>"; result.innerHTML = html; }, pinch: function (evt) { html += "pinch [" + evt.scale + "]<br/>"; result.innerHTML = html; }, pressMove: function (evt) { html += "pressMove [" + evt.deltaX.toFixed(4) + "|" + evt.deltaY.toFixed(4) + "]<br/>"; result.innerHTML = html; }, swipe: function (evt) { html += "swipe [" + evt.direction+"]<br/>"; result.innerHTML = html; }, twoFingerPressMove: function (evt) { html += "twoFingerPressMove [" + evt.deltaX.toFixed(4) + "|" + evt.deltaY.toFixed(4) + "]<br/>"; result.innerHTML = html; evt.preventDefault(); } }) </script> </div> </body> </html> ```
/content/code_sandbox/example/test/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
537
```javascript /* AlloyFinger v0.1.15 * By dntzhang * Github: path_to_url */ ; (function () { function getLen(v) { return Math.sqrt(v.x * v.x + v.y * v.y); } function dot(v1, v2) { return v1.x * v2.x + v1.y * v2.y; } function getAngle(v1, v2) { var mr = getLen(v1) * getLen(v2); if (mr === 0) return 0; var r = dot(v1, v2) / mr; if (r > 1) r = 1; return Math.acos(r); } function cross(v1, v2) { return v1.x * v2.y - v2.x * v1.y; } function getRotateAngle(v1, v2) { var angle = getAngle(v1, v2); if (cross(v1, v2) > 0) { angle *= -1; } return angle * 180 / Math.PI; } var HandlerAdmin = function(el) { this.handlers = []; this.el = el; }; HandlerAdmin.prototype.add = function(handler) { this.handlers.push(handler); } HandlerAdmin.prototype.del = function(handler) { if(!handler) this.handlers = []; for(var i=this.handlers.length; i>=0; i--) { if(this.handlers[i] === handler) { this.handlers.splice(i, 1); } } } HandlerAdmin.prototype.dispatch = function() { for(var i=0,len=this.handlers.length; i<len; i++) { var handler = this.handlers[i]; if(typeof handler === 'function') handler.apply(this.el, arguments); } } function wrapFunc(el, handler) { var handlerAdmin = new HandlerAdmin(el); handlerAdmin.add(handler); return handlerAdmin; } var AlloyFinger = function (el, option) { this.element = typeof el == 'string' ? document.querySelector(el) : el; this.start = this.start.bind(this); this.move = this.move.bind(this); this.end = this.end.bind(this); this.cancel = this.cancel.bind(this); this.element.addEventListener("touchstart", this.start, false); this.element.addEventListener("touchmove", this.move, false); this.element.addEventListener("touchend", this.end, false); this.element.addEventListener("touchcancel", this.cancel, false); this.preV = { x: null, y: null }; this.pinchStartLen = null; this.zoom = 1; this.isDoubleTap = false; var noop = function () { }; this.rotate = wrapFunc(this.element, option.rotate || noop); this.touchStart = wrapFunc(this.element, option.touchStart || noop); this.multipointStart = wrapFunc(this.element, option.multipointStart || noop); this.multipointEnd = wrapFunc(this.element, option.multipointEnd || noop); this.pinch = wrapFunc(this.element, option.pinch || noop); this.swipe = wrapFunc(this.element, option.swipe || noop); this.tap = wrapFunc(this.element, option.tap || noop); this.doubleTap = wrapFunc(this.element, option.doubleTap || noop); this.longTap = wrapFunc(this.element, option.longTap || noop); this.singleTap = wrapFunc(this.element, option.singleTap || noop); this.pressMove = wrapFunc(this.element, option.pressMove || noop); this.twoFingerPressMove = wrapFunc(this.element, option.twoFingerPressMove || noop); this.touchMove = wrapFunc(this.element, option.touchMove || noop); this.touchEnd = wrapFunc(this.element, option.touchEnd || noop); this.touchCancel = wrapFunc(this.element, option.touchCancel || noop); this._cancelAllHandler = this.cancelAll.bind(this); window.addEventListener('scroll', this._cancelAllHandler); this.delta = null; this.last = null; this.now = null; this.tapTimeout = null; this.singleTapTimeout = null; this.longTapTimeout = null; this.swipeTimeout = null; this.x1 = this.x2 = this.y1 = this.y2 = null; this.preTapPosition = { x: null, y: null }; }; AlloyFinger.prototype = { start: function (evt) { if (!evt.touches) return; this.now = Date.now(); this.x1 = evt.touches[0].pageX; this.y1 = evt.touches[0].pageY; this.delta = this.now - (this.last || this.now); this.touchStart.dispatch(evt, this.element); if (this.preTapPosition.x !== null) { this.isDoubleTap = (this.delta > 0 && this.delta <= 250 && Math.abs(this.preTapPosition.x - this.x1) < 30 && Math.abs(this.preTapPosition.y - this.y1) < 30); if (this.isDoubleTap) clearTimeout(this.singleTapTimeout); } this.preTapPosition.x = this.x1; this.preTapPosition.y = this.y1; this.last = this.now; var preV = this.preV, len = evt.touches.length; if (len > 1) { this._cancelLongTap(); this._cancelSingleTap(); var v = { x: evt.touches[1].pageX - this.x1, y: evt.touches[1].pageY - this.y1 }; preV.x = v.x; preV.y = v.y; this.pinchStartLen = getLen(preV); this.multipointStart.dispatch(evt, this.element); } this._preventTap = false; this.longTapTimeout = setTimeout(function () { this.longTap.dispatch(evt, this.element); this._preventTap = true; }.bind(this), 750); }, move: function (evt) { if (!evt.touches) return; var preV = this.preV, len = evt.touches.length, currentX = evt.touches[0].pageX, currentY = evt.touches[0].pageY; this.isDoubleTap = false; if (len > 1) { var sCurrentX = evt.touches[1].pageX, sCurrentY = evt.touches[1].pageY var v = { x: evt.touches[1].pageX - currentX, y: evt.touches[1].pageY - currentY }; if (preV.x !== null) { if (this.pinchStartLen > 0) { evt.zoom = getLen(v) / this.pinchStartLen; this.pinch.dispatch(evt, this.element); } evt.angle = getRotateAngle(v, preV); this.rotate.dispatch(evt, this.element); } preV.x = v.x; preV.y = v.y; if (this.x2 !== null && this.sx2 !== null) { evt.deltaX = (currentX - this.x2 + sCurrentX - this.sx2) / 2; evt.deltaY = (currentY - this.y2 + sCurrentY - this.sy2) / 2; } else { evt.deltaX = 0; evt.deltaY = 0; } this.twoFingerPressMove.dispatch(evt, this.element); this.sx2 = sCurrentX; this.sy2 = sCurrentY; } else { if (this.x2 !== null) { evt.deltaX = currentX - this.x2; evt.deltaY = currentY - this.y2; //move //(10),tap var movedX = Math.abs(this.x1 - this.x2), movedY = Math.abs(this.y1 - this.y2); if(movedX > 10 || movedY > 10){ this._preventTap = true; } } else { evt.deltaX = 0; evt.deltaY = 0; } this.pressMove.dispatch(evt, this.element); } this.touchMove.dispatch(evt, this.element); this._cancelLongTap(); this.x2 = currentX; this.y2 = currentY; if (len > 1) { evt.preventDefault(); } }, end: function (evt) { if (!evt.changedTouches) return; this._cancelLongTap(); var self = this; if (evt.touches.length < 2) { this.multipointEnd.dispatch(evt, this.element); this.sx2 = this.sy2 = null; } //swipe if ((this.x2 && Math.abs(this.x1 - this.x2) > 30) || (this.y2 && Math.abs(this.y1 - this.y2) > 30)) { evt.direction = this._swipeDirection(this.x1, this.x2, this.y1, this.y2); this.swipeTimeout = setTimeout(function () { self.swipe.dispatch(evt, self.element); }, 0) } else { this.tapTimeout = setTimeout(function () { if(!self._preventTap){ self.tap.dispatch(evt, self.element); } // trigger double tap immediately if (self.isDoubleTap) { self.doubleTap.dispatch(evt, self.element); self.isDoubleTap = false; } }, 0) if (!self.isDoubleTap) { self.singleTapTimeout = setTimeout(function () { self.singleTap.dispatch(evt, self.element); }, 250); } } this.touchEnd.dispatch(evt, this.element); this.preV.x = 0; this.preV.y = 0; this.zoom = 1; this.pinchStartLen = null; this.x1 = this.x2 = this.y1 = this.y2 = null; }, cancelAll: function () { this._preventTap = true clearTimeout(this.singleTapTimeout); clearTimeout(this.tapTimeout); clearTimeout(this.longTapTimeout); clearTimeout(this.swipeTimeout); }, cancel: function (evt) { this.cancelAll() this.touchCancel.dispatch(evt, this.element); }, _cancelLongTap: function () { clearTimeout(this.longTapTimeout); }, _cancelSingleTap: function () { clearTimeout(this.singleTapTimeout); }, _swipeDirection: function (x1, x2, y1, y2) { return Math.abs(x1 - x2) >= Math.abs(y1 - y2) ? (x1 - x2 > 0 ? 'Left' : 'Right') : (y1 - y2 > 0 ? 'Up' : 'Down') }, on: function(evt, handler) { if(this[evt]) { this[evt].add(handler); } }, off: function(evt, handler) { if(this[evt]) { this[evt].del(handler); } }, destroy: function() { if(this.singleTapTimeout) clearTimeout(this.singleTapTimeout); if(this.tapTimeout) clearTimeout(this.tapTimeout); if(this.longTapTimeout) clearTimeout(this.longTapTimeout); if(this.swipeTimeout) clearTimeout(this.swipeTimeout); this.element.removeEventListener("touchstart", this.start); this.element.removeEventListener("touchmove", this.move); this.element.removeEventListener("touchend", this.end); this.element.removeEventListener("touchcancel", this.cancel); this.rotate.del(); this.touchStart.del(); this.multipointStart.del(); this.multipointEnd.del(); this.pinch.del(); this.swipe.del(); this.tap.del(); this.doubleTap.del(); this.longTap.del(); this.singleTap.del(); this.pressMove.del(); this.twoFingerPressMove.del() this.touchMove.del(); this.touchEnd.del(); this.touchCancel.del(); this.preV = this.pinchStartLen = this.zoom = this.isDoubleTap = this.delta = this.last = this.now = this.tapTimeout = this.singleTapTimeout = this.longTapTimeout = this.swipeTimeout = this.x1 = this.x2 = this.y1 = this.y2 = this.preTapPosition = this.rotate = this.touchStart = this.multipointStart = this.multipointEnd = this.pinch = this.swipe = this.tap = this.doubleTap = this.longTap = this.singleTap = this.pressMove = this.touchMove = this.touchEnd = this.touchCancel = this.twoFingerPressMove = null; window.removeEventListener('scroll', this._cancelAllHandler); return null; } }; if (typeof module !== 'undefined' && typeof exports === 'object') { module.exports = AlloyFinger; } else { window.AlloyFinger = AlloyFinger; } })(); ```
/content/code_sandbox/alloy_finger.js
javascript
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
2,810
```html <!DOCTYPE html> <html> <head> <title>Event Test</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no" /> <style> #test { -webkit-tap-highlight-color: rgba(0,0,0,0); background-color: green; } </style> </head> <body style="text-align: center; font-family: Segoe UI Light,Segoe UI,Microsoft Jhenghei,Mirco Yahei,'sans-serif'; background-color: black; color: white;"> <div id="test" style="width: 280px; height: 200px; margin: 0 auto; line-height: 200px; font-size: 30px;">Touch Me</div> <div id="result" style="font-size: 18px;"></div> <script src="../../alloy_finger.js"></script> <div> <script> var result = document.querySelector("#result"), testDiv = document.querySelector("#test"), html = ""; var af = new AlloyFinger(testDiv, { touchStart: function () { html = ""; html += "start<br/>"; result.innerHTML = html; }, touchEnd: function () { html += "end<br/>"; result.innerHTML = html; }, tap: function () { html += "tap<br/>"; result.innerHTML = html; } }); var onLongTap = function() { html += "longTap<br/>"; result.innerHTML = html; } af.on('longTap', onLongTap); af.on('doubleTap', function() { html += "doubleTap<br/>"; result.innerHTML = html; af.off('longTap', onLongTap); }); </script> </div> </body> </html> ```
/content/code_sandbox/example/on_off/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
412
```html <!DOCTYPE html> <html> <head> <title></title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no"/> <style> .ribbon { top: 3.2em; right: -4.7em; -webkit-transform: rotate(45deg); -moz-transform: rotate(45deg); -ms-transform: rotate(45deg); -o-transform: rotate(45deg); transform: rotate(45deg); color:#fff; display: block; padding: .6em 3.5em; position: fixed; text-align: center; text-decoration: none; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-color: green; z-index: 10000; } </style> </head> <body> <a href="path_to_url" class="ribbon">Fork me on Github</a> <script src="../../asset/transform.js"></script> <script src="../../alloy_finger.js"></script> <img src="../../asset/test.png" id="testImg" alt="" /> <script> var el = document.getElementById("testImg"); Transform(el); var initScale = 1; var gesture = new AlloyFinger(el, { rotate: function (evt) { el.rotateZ += evt.angle; }, pinchStart: function () { initScale = el.scaleX; }, pinch: function (evt) { el.scaleX = el.scaleY = initScale * evt.zoom; }, pressMove: function (evt) { el.translateX += evt.deltaX; el.translateY += evt.deltaY; }, tap: function (evt) { //console.log(el.scaleX + "_" + el.scaleY + "_" + el.rotateZ + "_" + el.translateX + "_" + el.translateY); //console.log("tap"); }, doubleTap: function (evt) { //console.log("doubleTap"); }, longTap: function (evt) { //console.log("longTap"); }, swipe: function (evt) { //console.log("swipe" + evt.direction); } }); </script> </body> </html> ```
/content/code_sandbox/example/simple/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
524
```html <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>Canvas+AlloyFinger</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no"/> <style> .ribbon { top: 3.2em; right: -4.7em; -webkit-transform: rotate(45deg); -moz-transform: rotate(45deg); -ms-transform: rotate(45deg); -o-transform: rotate(45deg); transform: rotate(45deg); color:#fff; display: block; padding: .6em 3.5em; position: fixed; text-align: center; text-decoration: none; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-color: green; z-index: 10000; } </style> </head> <body> <a href="path_to_url" class="ribbon">Fork me on Github</a> <script src="../../asset/alloy_paper.js"></script> <script src="../../alloy_finger.js"></script> <div style="text-align: center;"><canvas id="ourCanvas" width="300" height="600" style="border: 1px solid black;"></canvas></div> <script> ;(function (AlloyPaper) { var Stage = AlloyPaper.Stage, Bitmap = AlloyPaper.Bitmap,Loader=AlloyPaper.Loader; var stage = new Stage("#ourCanvas"); stage.autoUpdate=false; var ld = new Loader(); ld.loadRes([ { id: "test", src: "../../asset/test.png" }, { id: "test2", src: "../../asset/test2.png" } ]); ld.complete(function () { var bmp = new Bitmap(ld.get("test")); bmp.originX = 0.5; bmp.originY = 0.5; bmp.x = stage.width / 2; bmp.y =150; stage.add(bmp); var bmp2 = new Bitmap(ld.get("test2")); bmp2.originX = 0.5; bmp2.originY = 0.5; bmp2.x = stage.width / 2; bmp2.y =450; stage.add(bmp2); stage.update(); var initScale = 1; new AlloyFinger(bmp, { multipointStart: function () { initScale = bmp.scaleX; }, rotate: function (evt) { bmp.rotation += evt.angle; stage.update(); }, pinch: function (evt) { bmp.scaleX = bmp.scaleY = initScale * evt.scale; stage.update(); }, pressMove: function (evt) { bmp.x += evt.deltaX; bmp.y += evt.deltaY; evt.preventDefault(); stage.update(); } }); new AlloyFinger(bmp2, { multipointStart: function () { initScale = bmp2.scaleX; }, rotate: function (evt) { bmp2.rotation += evt.angle; stage.update(); }, pinch: function (evt) { bmp2.scaleX = bmp2.scaleY = initScale * evt.scale; stage.update(); }, pressMove: function (evt) { bmp2.x += evt.deltaX; bmp2.y += evt.deltaY; evt.preventDefault(); stage.update(); } }); }); })(AlloyPaper) </script> </body> </html> ```
/content/code_sandbox/example/canvas/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
784
```html <!DOCTYPE html> <html> <head> <title>Event Test</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no" /> <style> #test { -webkit-tap-highlight-color: rgba(0,0,0,0); background-color: green; } </style> </head> <body style="text-align: center; font-family: Segoe UI Light,Segoe UI,Microsoft Jhenghei,Mirco Yahei,'sans-serif'; background-color: black; color: white;"> <div id="test" style="width: 280px; height: 200px; margin: 0 auto; line-height: 200px; font-size: 30px;">Touch Me</div> <div id="result" style="font-size: 18px;"></div> <script src="../../alloy_finger.js"></script> <div> <script> var result = document.querySelector("#result"), testDiv = document.querySelector("#test"), html = ""; var af = new AlloyFinger(testDiv, { touchStart: function () { html = ""; html += "start<br/>"; result.innerHTML = html; }, touchEnd: function () { html += "end<br/>"; result.innerHTML = html; }, tap: function () { html += "tap<br/>"; result.innerHTML = html; } }); var onLongTap = function() { html += "destroy<br/>"; result.innerHTML = html; af = af.destroy(); } af.on('longTap', onLongTap); </script> </div> </body> </html> ```
/content/code_sandbox/example/destroy/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
380
```html <!DOCTYPE html> <html> <head> <title>AlloyFinger</title> <meta name="viewport" content="width=device-width,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no"/> <style> .ribbon { top: 3.2em; right: -4.7em; -webkit-transform: rotate(45deg); -moz-transform: rotate(45deg); -ms-transform: rotate(45deg); -o-transform: rotate(45deg); transform: rotate(45deg); color:#fff; display: block; padding: .6em 3.5em; position: fixed; text-align: center; text-decoration: none; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-color: green; z-index: 10000; } </style> </head> <body> <a href="path_to_url" class="ribbon">Fork me on Github</a> <script src="../../asset/transform.js"></script> <script src="../../alloy_finger.js"></script> <script src="../../asset/image_loaded.js"></script> <script src="../../asset/to.js"></script> <div id="imgBox" style="position:fixed;width: 100%;height: 100%;left:0;top:0; background:black;display: none;"> <img src="../../asset/cover.jpg" id="testImg" alt="" style="width: 100%;position: absolute; " /> </div> <script> var topPx; imageLoaded("#testImg",function(w,h){ document.querySelector("#imgBox").style.display="block"; topPx=window.innerHeight/2-(h*window.innerWidth/w)/2; this.style.top=topPx+"px"; }); function ease(x) { return Math.sqrt(1 - Math.pow(x - 1, 2)); } var el = document.getElementById("testImg"); Transform(el); var initScale = 1; new AlloyFinger(el, { multipointStart: function () { To.stopAll(); initScale = el.scaleX; }, rotate: function (evt) { el.rotateZ += evt.angle; }, pinch: function (evt) { el.scaleX = el.scaleY = initScale * evt.zoom; }, multipointEnd: function () { To.stopAll(); if (el.scaleX < 1) { new To(el, "scaleX", 1, 500, ease); new To(el, "scaleY", 1, 500, ease); } if (el.scaleX > 2) { new To(el, "scaleX", 2, 500, ease); new To(el, "scaleY", 2, 500, ease); } var rotation = el.rotateZ % 360; if (rotation < 0)rotation = 360 + rotation; el.rotateZ=rotation; if (rotation > 0 && rotation < 45) { new To(el, "rotateZ", 0, 500, ease); } else if (rotation >= 315) { new To(el, "rotateZ", 360, 500, ease); } else if (rotation >= 45 && rotation < 135) { new To(el, "rotateZ", 90, 500, ease); } else if (rotation >= 135 && rotation < 225) { new To(el, "rotateZ", 180, 500, ease); } else if (rotation >= 225 && rotation < 315) { new To(el, "rotateZ", 270, 500, ease); } }, pressMove: function (evt) { el.translateX += evt.deltaX; el.translateY += evt.deltaY; evt.preventDefault(); }, tap: function (evt) { //console.log(el.scaleX + "_" + el.scaleY + "_" + el.rotateZ + "_" + el.translateX + "_" + el.translateY); //console.log("tap"); }, doubleTap: function (evt) { To.stopAll(); if (el.scaleX > 1.5) { new To(el, "scaleX", 1, 500, ease); new To(el, "scaleY", 1, 500, ease); new To(el, "translateX", 0, 500, ease); new To(el, "translateY", 0, 500, ease); } else { var box = el.getBoundingClientRect(); var y = box.height - (( evt.changedTouches[0].pageY - topPx) * 2) - (box.height / 2 - ( evt.changedTouches[0].pageY - topPx)); var x = box.width - (( evt.changedTouches[0].pageX) * 2) - (box.width / 2 - ( evt.changedTouches[0].pageX)); new To(el, "scaleX", 2, 500, ease); new To(el, "scaleY", 2, 500, ease); new To(el, "translateX", x, 500, ease); new To(el, "translateY", y, 500, ease); } //console.log("doubleTap"); }, longTap: function (evt) { //console.log("longTap"); }, swipe: function (evt) { //console.log("swipe" + evt.direction); } }); </script> </body> </html> ```
/content/code_sandbox/example/picture/index.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
1,244
```jsx /* AlloyFinger v0.1.0 * By dntzhang * Reedited by nemoliao * Github: path_to_url */ import React, { Component } from 'react'; export default class AlloyFinger extends Component { constructor(props) { super(props); this.preV = { x: null, y: null }; this.pinchStartLen = null; this.scale = 1; this.isSingleTap = false; this.isDoubleTap = false; this.delta = null; this.last = null; this.now = null; this.end = null; this.multiTouch = false; this.tapTimeout = null; this.longTapTimeout = null; this.singleTapTimeout = null; this.swipeTimeout=null; this.x1 = this.x2 = this.y1 = this.y2 = null; this.preTapPosition={x: null, y: null}; // Disable taps after longTap this.afterLongTap = false; this.afterLongTapTimeout = null; } getLen(v) { return Math.sqrt(v.x * v.x + v.y * v.y); } dot(v1, v2) { return v1.x * v2.x + v1.y * v2.y; } getAngle(v1, v2) { var mr = this.getLen(v1) * this.getLen(v2); if (mr === 0) return 0; var r = this.dot(v1, v2) / mr; if (r > 1) r = 1; return Math.acos(r); } cross(v1, v2) { return v1.x * v2.y - v2.x * v1.y; } getRotateAngle(v1, v2) { var angle = this.getAngle(v1, v2); if (this.cross(v1, v2) > 0) { angle *= -1; } return angle * 180 / Math.PI; } _resetState() { this.setState({ x: null, y: null, swiping: false, start: 0 }); } _emitEvent(name, ...arg) { if (this.props[name]) { this.props[name](...arg); } } _handleTouchStart (evt) { this._emitEvent('onTouchStart', evt); if (!evt.touches) return; this.now = Date.now(); this.x1 = evt.touches[0].pageX; this.y1 = evt.touches[0].pageY; this.delta = this.now - (this.last || this.now); if (this.preTapPosition.x!==null) { this.isDoubleTap = (this.delta > 0 && this.delta <= 250&&Math.abs(this.preTapPosition.x-this.x1)<30&&Math.abs(this.preTapPosition.y-this.y1)<30); } this.preTapPosition.x=this.x1; this.preTapPosition.y=this.y1; this.last = this.now; var preV = this.preV, len = evt.touches.length; if (len > 1) { this._cancelLongTap(); this._cancelSingleTap(); var v = { x: evt.touches[1].pageX - this.x1, y: evt.touches[1].pageY - this.y1 }; preV.x = v.x; preV.y = v.y; this.pinchStartLen = this.getLen(preV); this._emitEvent('onMultipointStart', evt); } else { this.isSingleTap = true; } this.longTapTimeout = setTimeout(() => { this._emitEvent('onLongTap', evt); this.afterLongTap = true; this.afterLongTapTimeout = setTimeout(() => { this.afterLongTap = false; }, 1000); }, 750); } _handleTouchMove(evt) { this._emitEvent('onTouchMove', evt); var preV = this.preV, len = evt.touches.length, currentX = evt.touches[0].pageX, currentY = evt.touches[0].pageY; this.isSingleTap = false; this.isDoubleTap = false; if (len > 1) { var v = { x: evt.touches[1].pageX - currentX, y: evt.touches[1].pageY - currentY }; if (preV.x !== null) { if (this.pinchStartLen > 0) { evt.center = { x: (evt.touches[1].pageX + currentX) / 2, y: (evt.touches[1].pageY + currentY) / 2 }; evt.scale = evt.zoom = this.getLen(v) / this.pinchStartLen; this._emitEvent('onPinch', evt); } evt.angle = this.getRotateAngle(v, preV); this._emitEvent('onRotate', evt); } preV.x = v.x; preV.y = v.y; this.multiTouch = true; } else { if (this.x2 !== null) { evt.deltaX = currentX - this.x2; evt.deltaY = currentY - this.y2; } else { evt.deltaX = 0; evt.deltaY = 0; } this._emitEvent('onPressMove', evt); } this._cancelLongTap(); this.x2 = currentX; this.y2 = currentY; if (len > 1) { evt.preventDefault(); } } _handleTouchCancel(evt) { this._emitEvent('onTouchCancel', evt); clearInterval(this.singleTapTimeout); clearInterval(this.tapTimeout); clearInterval(this.longTapTimeout); clearInterval(this.swipeTimeout); } _handleTouchEnd(evt) { this._emitEvent('onTouchEnd', evt); this.end = Date.now(); this._cancelLongTap(); if (this.multiTouch === true && evt.touches.length < 2) { this._emitEvent('onMultipointEnd', evt); } evt.origin = [this.x1, this.y1]; if (this.multiTouch === false) { if ((this.x2 && Math.abs(this.x1 - this.x2) > 30) || (this.y2 && Math.abs(this.y1 - this.y2) > 30)) { evt.direction = this._swipeDirection(this.x1, this.x2, this.y1, this.y2); evt.distance = Math.abs(this.x1 - this.x2); this.swipeTimeout = setTimeout(() => { this._emitEvent('onSwipe', evt); }, 0); } else { if (this.afterLongTap) { clearTimeout(this.afterLongTapTimeout); this.afterLongTap = false; } else { this.tapTimeout = setTimeout(() => { this._emitEvent('onTap', evt); if (this.isDoubleTap) { this._emitEvent('onDoubleTap', evt); clearTimeout(this.singleTapTimeout); this.isDoubleTap = false; } else if (this.isSingleTap) { this.singleTapTimeout = setTimeout(()=>{ this._emitEvent('onSingleTap', evt); }, 250); this.isSingleTap = false; } }, 0); } } } this.preV.x = 0; this.preV.y = 0; this.scale = 1; this.pinchStartLen = null; this.x1 = this.x2 = this.y1 = this.y2 = null; this.multiTouch = false; } _cancelLongTap () { clearTimeout(this.longTapTimeout); } _cancelSingleTap () { clearTimeout(this.singleTapTimeout); } _swipeDirection (x1, x2, y1, y2) { if (Math.abs(x1 - x2) > 80 || this.end-this.now < 250) { return Math.abs(x1 - x2) >= Math.abs(y1 - y2) ? (x1 - x2 > 0 ? 'Left' : 'Right') : (y1 - y2 > 0 ? 'Up' : 'Down'); } else { return 'Nochange'; } } render() { return React.cloneElement(React.Children.only(this.props.children), { onTouchStart: this._handleTouchStart.bind(this), onTouchMove: this._handleTouchMove.bind(this), onTouchCancel: this._handleTouchCancel.bind(this), onTouchEnd: this._handleTouchEnd.bind(this) }); } } ```
/content/code_sandbox/react/AlloyFinger.jsx
jsx
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
1,919
```javascript function imageLoaded(selector,onload){ var img=new Image() ; var dom=document.querySelector(selector); img.onload=function(){ //real_width,real_height onload.call(dom,this.width,this.height); img.onload=null; img=null; }; img.src=dom.getAttribute("src"); } ```
/content/code_sandbox/asset/image_loaded.js
javascript
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
65
```javascript /* transformjs * By dntzhang * Github: path_to_url */ ;(function () { var Matrix3D = function (n11, n12, n13, n14, n21, n22, n23, n24, n31, n32, n33, n34, n41, n42, n43, n44) { this.elements =window.Float32Array ? new Float32Array(16) : []; var te = this.elements; te[0] = (n11 !== undefined) ? n11 : 1; te[4] = n12 || 0; te[8] = n13 || 0; te[12] = n14 || 0; te[1] = n21 || 0; te[5] = (n22 !== undefined) ? n22 : 1; te[9] = n23 || 0; te[13] = n24 || 0; te[2] = n31 || 0; te[6] = n32 || 0; te[10] = (n33 !== undefined) ? n33 : 1; te[14] = n34 || 0; te[3] = n41 || 0; te[7] = n42 || 0; te[11] = n43 || 0; te[15] = (n44 !== undefined) ? n44 : 1; }; Matrix3D.DEG_TO_RAD = Math.PI / 180; Matrix3D.prototype = { set: function (n11, n12, n13, n14, n21, n22, n23, n24, n31, n32, n33, n34, n41, n42, n43, n44) { var te = this.elements; te[0] = n11; te[4] = n12; te[8] = n13; te[12] = n14; te[1] = n21; te[5] = n22; te[9] = n23; te[13] = n24; te[2] = n31; te[6] = n32; te[10] = n33; te[14] = n34; te[3] = n41; te[7] = n42; te[11] = n43; te[15] = n44; return this; }, identity: function () { this.set( 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ); return this; }, multiplyMatrices: function (a, be) { var ae = a.elements; var te = this.elements; var a11 = ae[0], a12 = ae[4], a13 = ae[8], a14 = ae[12]; var a21 = ae[1], a22 = ae[5], a23 = ae[9], a24 = ae[13]; var a31 = ae[2], a32 = ae[6], a33 = ae[10], a34 = ae[14]; var a41 = ae[3], a42 = ae[7], a43 = ae[11], a44 = ae[15]; var b11 = be[0], b12 = be[1], b13 = be[2], b14 = be[3]; var b21 = be[4], b22 = be[5], b23 = be[6], b24 = be[7]; var b31 = be[8], b32 = be[9], b33 = be[10], b34 = be[11]; var b41 = be[12], b42 = be[13], b43 = be[14], b44 = be[15]; te[0] = a11 * b11 + a12 * b21 + a13 * b31 + a14 * b41; te[4] = a11 * b12 + a12 * b22 + a13 * b32 + a14 * b42; te[8] = a11 * b13 + a12 * b23 + a13 * b33 + a14 * b43; te[12] = a11 * b14 + a12 * b24 + a13 * b34 + a14 * b44; te[1] = a21 * b11 + a22 * b21 + a23 * b31 + a24 * b41; te[5] = a21 * b12 + a22 * b22 + a23 * b32 + a24 * b42; te[9] = a21 * b13 + a22 * b23 + a23 * b33 + a24 * b43; te[13] = a21 * b14 + a22 * b24 + a23 * b34 + a24 * b44; te[2] = a31 * b11 + a32 * b21 + a33 * b31 + a34 * b41; te[6] = a31 * b12 + a32 * b22 + a33 * b32 + a34 * b42; te[10] = a31 * b13 + a32 * b23 + a33 * b33 + a34 * b43; te[14] = a31 * b14 + a32 * b24 + a33 * b34 + a34 * b44; te[3] = a41 * b11 + a42 * b21 + a43 * b31 + a44 * b41; te[7] = a41 * b12 + a42 * b22 + a43 * b32 + a44 * b42; te[11] = a41 * b13 + a42 * b23 + a43 * b33 + a44 * b43; te[15] = a41 * b14 + a42 * b24 + a43 * b34 + a44 * b44; return this; }, // 90Math.cos0 _rounded: function(value,i){ i= Math.pow(10, i || 15); // default return Math.round(value*i)/i; }, appendTransform: function (x, y, z, scaleX, scaleY, scaleZ, rotateX, rotateY, rotateZ,skewX,skewY, originX, originY, originZ) { var rx = rotateX * Matrix3D.DEG_TO_RAD; var cosx =this._rounded( Math.cos(rx)); var sinx = this._rounded(Math.sin(rx)); var ry = rotateY * Matrix3D.DEG_TO_RAD; var cosy =this._rounded( Math.cos(ry)); var siny = this._rounded(Math.sin(ry)); var rz = rotateZ * Matrix3D.DEG_TO_RAD; var cosz =this._rounded( Math.cos(rz * -1)); var sinz =this._rounded( Math.sin(rz * -1)); this.multiplyMatrices(this, [ 1, 0, 0, x, 0, cosx, sinx, y, 0, -sinx, cosx, z, 0, 0, 0, 1 ]); this.multiplyMatrices(this, [ cosy, 0, siny, 0, 0, 1, 0, 0, -siny, 0, cosy, 0, 0, 0, 0, 1 ]); this.multiplyMatrices(this,[ cosz * scaleX, sinz * scaleY, 0, 0, -sinz * scaleX, cosz * scaleY, 0, 0, 0, 0, 1 * scaleZ, 0, 0, 0, 0, 1 ]); if(skewX||skewY){ this.multiplyMatrices(this,[ this._rounded(Math.cos(skewX* Matrix3D.DEG_TO_RAD)), this._rounded( Math.sin(skewX* Matrix3D.DEG_TO_RAD)), 0, 0, -1*this._rounded(Math.sin(skewY* Matrix3D.DEG_TO_RAD)), this._rounded( Math.cos(skewY* Matrix3D.DEG_TO_RAD)), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ]); } if (originX || originY || originZ) { this.elements[12] -= originX * this.elements[0] + originY * this.elements[4] + originZ * this.elements[8]; this.elements[13] -= originX * this.elements[1] + originY * this.elements[5] + originZ * this.elements[9]; this.elements[14] -= originX * this.elements[2] + originY * this.elements[6] + originZ * this.elements[10]; } return this; } }; function observe(target, props, callback) { for (var i = 0, len = props.length; i < len; i++) { var prop = props[i]; watch(target, prop, callback); } } function watch(target, prop, callback) { Object.defineProperty(target, prop, { get: function () { return this["__" + prop]; }, set: function (value) { if (value !== this["__" + prop]) { this["__" + prop] = value; callback(); } } }); } window.Transform = function (element,notPerspective) { observe( element, ["translateX", "translateY", "translateZ", "scaleX", "scaleY", "scaleZ", "rotateX", "rotateY", "rotateZ", "skewX", "skewY", "originX", "originY", "originZ"], function () { var mtx = element.matrix3D.identity().appendTransform(element.translateX, element.translateY, element.translateZ, element.scaleX, element.scaleY, element.scaleZ, element.rotateX, element.rotateY, element.rotateZ, element.skewX, element.skewY, element.originX, element.originY, element.originZ); element.style.transform = element.style.msTransform = element.style.OTransform = element.style.MozTransform = element.style.webkitTransform =(notPerspective?"":"perspective(" + (element.perspective===undefined?500:element.perspective) + "px) ")+ "matrix3d(" + Array.prototype.slice.call(mtx.elements).join(",") + ")"; }); element.matrix3D = new Matrix3D(); if (!notPerspective) { observe( element, ["perspective"], function () { element.style.transform = element.style.msTransform = element.style.OTransform = element.style.MozTransform = element.style.webkitTransform = "perspective(" + element.perspective + "px) matrix3d(" + Array.prototype.slice.call(element.matrix3D.elements).join(",") + ")"; }); element.perspective = 500; } element.scaleX = element.scaleY = element.scaleZ = 1; //imagex\y\ztranslate element.translateX = element.translateY = element.translateZ = element.rotateX = element.rotateY = element.rotateZ = element.skewX = element.skewY = element.originX = element.originY = element.originZ = 0; } })(); ```
/content/code_sandbox/transformjs/transform.js
javascript
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
2,568
```javascript /* transformjs * By dntzhang */ ;(function () { var Matrix3D = function (n11, n12, n13, n14, n21, n22, n23, n24, n31, n32, n33, n34, n41, n42, n43, n44) { this.elements =window.Float32Array ? new Float32Array(16) : []; var te = this.elements; te[0] = (n11 !== undefined) ? n11 : 1; te[4] = n12 || 0; te[8] = n13 || 0; te[12] = n14 || 0; te[1] = n21 || 0; te[5] = (n22 !== undefined) ? n22 : 1; te[9] = n23 || 0; te[13] = n24 || 0; te[2] = n31 || 0; te[6] = n32 || 0; te[10] = (n33 !== undefined) ? n33 : 1; te[14] = n34 || 0; te[3] = n41 || 0; te[7] = n42 || 0; te[11] = n43 || 0; te[15] = (n44 !== undefined) ? n44 : 1; }; Matrix3D.DEG_TO_RAD = Math.PI / 180; Matrix3D.prototype = { set: function (n11, n12, n13, n14, n21, n22, n23, n24, n31, n32, n33, n34, n41, n42, n43, n44) { var te = this.elements; te[0] = n11; te[4] = n12; te[8] = n13; te[12] = n14; te[1] = n21; te[5] = n22; te[9] = n23; te[13] = n24; te[2] = n31; te[6] = n32; te[10] = n33; te[14] = n34; te[3] = n41; te[7] = n42; te[11] = n43; te[15] = n44; return this; }, identity: function () { this.set( 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ); return this; }, multiplyMatrices: function (a, be) { var ae = a.elements; var te = this.elements; var a11 = ae[0], a12 = ae[4], a13 = ae[8], a14 = ae[12]; var a21 = ae[1], a22 = ae[5], a23 = ae[9], a24 = ae[13]; var a31 = ae[2], a32 = ae[6], a33 = ae[10], a34 = ae[14]; var a41 = ae[3], a42 = ae[7], a43 = ae[11], a44 = ae[15]; var b11 = be[0], b12 = be[1], b13 = be[2], b14 = be[3]; var b21 = be[4], b22 = be[5], b23 = be[6], b24 = be[7]; var b31 = be[8], b32 = be[9], b33 = be[10], b34 = be[11]; var b41 = be[12], b42 = be[13], b43 = be[14], b44 = be[15]; te[0] = a11 * b11 + a12 * b21 + a13 * b31 + a14 * b41; te[4] = a11 * b12 + a12 * b22 + a13 * b32 + a14 * b42; te[8] = a11 * b13 + a12 * b23 + a13 * b33 + a14 * b43; te[12] = a11 * b14 + a12 * b24 + a13 * b34 + a14 * b44; te[1] = a21 * b11 + a22 * b21 + a23 * b31 + a24 * b41; te[5] = a21 * b12 + a22 * b22 + a23 * b32 + a24 * b42; te[9] = a21 * b13 + a22 * b23 + a23 * b33 + a24 * b43; te[13] = a21 * b14 + a22 * b24 + a23 * b34 + a24 * b44; te[2] = a31 * b11 + a32 * b21 + a33 * b31 + a34 * b41; te[6] = a31 * b12 + a32 * b22 + a33 * b32 + a34 * b42; te[10] = a31 * b13 + a32 * b23 + a33 * b33 + a34 * b43; te[14] = a31 * b14 + a32 * b24 + a33 * b34 + a34 * b44; te[3] = a41 * b11 + a42 * b21 + a43 * b31 + a44 * b41; te[7] = a41 * b12 + a42 * b22 + a43 * b32 + a44 * b42; te[11] = a41 * b13 + a42 * b23 + a43 * b33 + a44 * b43; te[15] = a41 * b14 + a42 * b24 + a43 * b34 + a44 * b44; return this; }, // 90Math.cos0 _rounded: function(value,i){ i= Math.pow(10, i || 15); // default return Math.round(value*i)/i; }, appendTransform: function (x, y, z, scaleX, scaleY, scaleZ, rotateX, rotateY, rotateZ,skewX,skewY, originX, originY, originZ) { var rx = rotateX * Matrix3D.DEG_TO_RAD; var cosx =this._rounded( Math.cos(rx)); var sinx = this._rounded(Math.sin(rx)); var ry = rotateY * Matrix3D.DEG_TO_RAD; var cosy =this._rounded( Math.cos(ry)); var siny = this._rounded(Math.sin(ry)); var rz = rotateZ * Matrix3D.DEG_TO_RAD; var cosz =this._rounded( Math.cos(rz * -1)); var sinz =this._rounded( Math.sin(rz * -1)); this.multiplyMatrices(this, [ 1, 0, 0, x, 0, cosx, sinx, y, 0, -sinx, cosx, z, 0, 0, 0, 1 ]); this.multiplyMatrices(this, [ cosy, 0, siny, 0, 0, 1, 0, 0, -siny, 0, cosy, 0, 0, 0, 0, 1 ]); this.multiplyMatrices(this,[ cosz * scaleX, sinz * scaleY, 0, 0, -sinz * scaleX, cosz * scaleY, 0, 0, 0, 0, 1 * scaleZ, 0, 0, 0, 0, 1 ]); if(skewX||skewY){ this.multiplyMatrices(this,[ this._rounded(Math.cos(skewX* Matrix3D.DEG_TO_RAD)), this._rounded( Math.sin(skewX* Matrix3D.DEG_TO_RAD)), 0, 0, -1*this._rounded(Math.sin(skewY* Matrix3D.DEG_TO_RAD)), this._rounded( Math.cos(skewY* Matrix3D.DEG_TO_RAD)), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ]); } if (originX || originY || originZ) { this.elements[12] -= originX * this.elements[0] + originY * this.elements[4] + originZ * this.elements[8]; this.elements[13] -= originX * this.elements[1] + originY * this.elements[5] + originZ * this.elements[9]; this.elements[14] -= originX * this.elements[2] + originY * this.elements[6] + originZ * this.elements[10]; } return this; } }; function observe(target, props, callback) { for (var i = 0, len = props.length; i < len; i++) { var prop = props[i]; watch(target, prop, callback); } } function watch(target, prop, callback) { Object.defineProperty(target, prop, { get: function () { return this["__" + prop]; }, set: function (value) { if (value !== this["__" + prop]) { this["__" + prop] = value; callback(); } } }); } window.Transform = function (element) { observe( element, ["translateX", "translateY", "translateZ", "scaleX", "scaleY", "scaleZ" , "rotateX", "rotateY", "rotateZ","skewX","skewY", "originX", "originY", "originZ"], function () { var mtx = element.matrix3D.identity().appendTransform( element.translateX, element.translateY, element.translateZ, element.scaleX, element.scaleY, element.scaleZ, element.rotateX, element.rotateY, element.rotateZ,element.skewX,element.skewY, element.originX, element.originY, element.originZ); element.style.transform = element.style.msTransform = element.style.OTransform = element.style.MozTransform = element.style.webkitTransform = "perspective("+element.perspective+"px) matrix3d(" + Array.prototype.slice.call(mtx.elements).join(",") + ")"; }); observe( element, [ "perspective"], function () { element.style.transform = element.style.msTransform = element.style.OTransform = element.style.MozTransform = element.style.webkitTransform = "perspective("+element.perspective+"px) matrix3d(" + Array.prototype.slice.call(element.matrix3D.elements).join(",") + ")"; }); element.matrix3D = new Matrix3D(); element.perspective = 500; element.scaleX = element.scaleY = element.scaleZ = 1; //imagex\y\ztranslate element.translateX = element.translateY = element.translateZ = element.rotateX = element.rotateY = element.rotateZ =element.skewX=element.skewY= element.originX = element.originY = element.originZ = 0; } })(); ```
/content/code_sandbox/asset/transform.js
javascript
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
2,527
```html <!DOCTYPE html> <html xmlns="path_to_url"> <head> <title></title> <style> #ctt .test { width: 100px; height: 100px; color: white; line-height: 100px; text-align: center; display: inline-block; margin-top: 30px; position: absolute; left: 0; top: 0; } </style> </head> <body> <div style="text-align: center;position:relative;left:100px;top:100px;" id="ctt"> <div id="test1" class="test" style="background-color: green; "> transformjs </div> <div id="test2" class="test" style="background-color: red;left:200px; "> transformjs </div> <div id="test3" class="test" style="background-color: blue;left:400px "> transformjs </div> <div id="test4" class="test" style="background-color: #ff6a00;left:600px "> transformjs </div> <div id="test5" class="test" style="background-color:#485f0f;left:400px;top:200px; "> transformjs </div> <div id="test6" class="test" style="background-color:#485f0f;left:0;top:200px; "> transformjs </div> <div id="test7" class="test" style="background-color:#291996;left:200px;top:200px; "> transformjs </div> <div id="test8" class="test" style="background-color:#291996;left:600px;top:200px; "> transformjs </div> <div id="test9" class="test" style="background-color:#c71585;left:200px;top:400px; "> transformjs </div> <div id="test10" class="test" style="background-color:#32cd32;left:400px;top:400px; "> transformjs </div> </div> <script src="transform.js"></script> <script> //----------------how to use--------------------------------- var element1 = document.querySelector("#test1"); var element2 = document.querySelector("#test2"); var element3 = document.querySelector("#test3"); var element4 = document.querySelector("#test4"); var element5 = document.querySelector("#test5"); var element6= document.querySelector("#test6"); var element7 = document.querySelector("#test7"); var element8 = document.querySelector("#test8"); var element9 = document.querySelector("#test9"); var element10 = document.querySelector("#test10"); Transform(element1); Transform(element2); Transform(element3); Transform(element4); Transform(element5); Transform(element6); Transform(element7); Transform(element8); Transform(element9); Transform(element10); element8.originZ = -150; element4.originX = -50; element4.originY = -50; var step = 0.02, xStep = 3; setInterval(function () { element1.rotateZ++; element2.rotateY++; element3.rotateX++; element4.rotateZ++; element5.rotateY++; element5.rotateX++; element7.translateX < -50 && (xStep *= -1); element7.translateX > 50 && (xStep *= -1); element7.translateX += xStep; element6.scaleX < 0.5 && (step *= -1); element6.scaleX > 1.5 && (step *= -1); element6.scaleX += step; element6.scaleY += step; element8.rotateY++; element8.rotateX++; element9.skewX++; element10.skewY++; }, 15); //------------------------------------------------------------ </script> </body> </html> ```
/content/code_sandbox/asset/a.html
html
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
873
```javascript /* Alloy Game Engine * By AlloyTeam path_to_url * Github: path_to_url */ ;(function (root, factory) { if (typeof define === 'function' && define.amd) { define([], factory); } else if (typeof exports === 'object') { module.exports = factory(); } else { root.AlloyPaper = factory(); } }(this, function () { 'use strict'; // The base Class implementation (does nothing) var Class = function () { }; // Create a new Class that inherits from this class Class.extend = function (prop) { var _super = this.prototype; var prototype = Object.create(_super); // Copy the properties over onto the new prototype for (var name in prop) { if (name != "statics") { // Check if we're overwriting an existing function prototype[name] = typeof prop[name] == "function" && typeof _super[name] == "function" ? (function (temp_name, fn) { return function () { var tmp = this._super; // Add a new ._super() method that is the same method // but on the super-class this._super = _super[temp_name]; // The method only need to be bound temporarily, so we // remove it when we're done executing var ret = fn.apply(this, arguments); this._super = tmp; return ret; }; })(name, prop[name]) : prop[name]; } } // The dummy class constructor function _Class() { // All construction is actually done in the init method this.ctor.apply(this, arguments); } // for (var key in this) { if (this.hasOwnProperty(key) && key != "extend") _Class[key] = this[key]; } // Populate our constructed prototype object _Class.prototype = prototype; _Class.prototype._super = Object.create(_super); // if (prop.statics) { for (var key in prop.statics) { if (prop.statics.hasOwnProperty(key)) { _Class[key] = prop.statics[key]; if (key == "ctor") { // _Class[key](); } } } } // Enforce the constructor to be what we expect _Class.prototype.constructor = _Class; // And make this class extendable _Class.extend = Class.extend; return _Class; }; window.Class = Class; //AlloyPaper var AlloyPaper={}; AlloyPaper.DefaultCursor = "default"; AlloyPaper.Cache = {}; AlloyPaper.TWEEN = Class.extend({ "statics": { "ctor": function() { if (Date.now === undefined) { Date.now = function() { return new Date().valueOf(); }; } this._tweens = []; }, "REVISION": "14", "getAll": function() { return this._tweens; }, "removeAll": function() { this._tweens = []; }, "add": function(tween) { this._tweens.push(tween); }, "remove": function(tween) { var i = this._tweens.indexOf(tween); if (i !== -1) { this._tweens.splice(i, 1); } }, "update": function(time) { if (this._tweens.length === 0) return false; var i = 0; time = time !== undefined ? time : typeof window !== "undefined" && window.performance !== undefined && window.performance.now !== undefined ? window.performance.now() : Date.now(); while (i < this._tweens.length) { if (this._tweens[i].update(time)) { i++; } else { this._tweens.splice(i, 1); } } return true; }, "Tween": function(object) { var _object = object; var _valuesStart = {}; var _valuesEnd = {}; var _valuesStartRepeat = {}; var _duration = 1e3; var _repeat = 0; var _yoyo = false; var _isPlaying = false; var _reversed = false; var _delayTime = 0; var _startTime = null; var _easingFunction = AlloyPaper.TWEEN.Easing.Linear.None; var _interpolationFunction = AlloyPaper.TWEEN.Interpolation.Linear; var _chainedTweens = []; var _onStartCallback = null; var _onStartCallbackFired = false; var _onUpdateCallback = null; var _onCompleteCallback = null; var _onStopCallback = null; var _paused = false, _passTime = null; for (var field in object) { _valuesStart[field] = parseFloat(object[field], 10); } this.toggle = function() { if (_paused) { this.play(); } else { this.pause(); } }; this.pause = function() { _paused = true; var pauseTime = typeof window !== "undefined" && window.performance !== undefined && window.performance.now !== undefined ? window.performance.now() : Date.now(); _passTime = pauseTime - _startTime; }; this.play = function() { _paused = false; var nowTime = typeof window !== "undefined" && window.performance !== undefined && window.performance.now !== undefined ? window.performance.now() : Date.now(); _startTime = nowTime - _passTime; }; this.to = function(properties, duration) { if (duration !== undefined) { _duration = duration; } _valuesEnd = properties; return this; }; this.start = function(time) { AlloyPaper.TWEEN.add(this); _isPlaying = true; _onStartCallbackFired = false; _startTime = time !== undefined ? time : typeof window !== "undefined" && window.performance !== undefined && window.performance.now !== undefined ? window.performance.now() : Date.now(); _startTime += _delayTime; for (var property in _valuesEnd) { if (_valuesEnd[property] instanceof Array) { if (_valuesEnd[property].length === 0) { continue; } _valuesEnd[property] = [_object[property]].concat(_valuesEnd[property]); } _valuesStart[property] = _object[property]; if (_valuesStart[property] instanceof Array === false) { _valuesStart[property] *= 1; } _valuesStartRepeat[property] = _valuesStart[property] || 0; } return this; }; this.stop = function() { if (!_isPlaying) { return this; } AlloyPaper.TWEEN.remove(this); _isPlaying = false; if (_onStopCallback !== null) { _onStopCallback.call(_object); } this.stopChainedTweens(); return this; }; this.stopChainedTweens = function() { for (var i = 0, numChainedTweens = _chainedTweens.length; i < numChainedTweens; i++) { _chainedTweens[i].stop(); } }; this.delay = function(amount) { _delayTime = amount; return this; }; this.repeat = function(times) { _repeat = times; return this; }; this.yoyo = function(yoyo) { _yoyo = yoyo; return this; }; this.easing = function(easing) { _easingFunction = easing; return this; }; this.interpolation = function(interpolation) { _interpolationFunction = interpolation; return this; }; this.chain = function() { _chainedTweens = arguments; return this; }; this.onStart = function(callback) { _onStartCallback = callback; return this; }; this.onUpdate = function(callback) { _onUpdateCallback = callback; return this; }; this.onComplete = function(callback) { _onCompleteCallback = callback; return this; }; this.onStop = function(callback) { _onStopCallback = callback; return this; }; this.update = function(time) { if (_paused) return true; var property; if (time < _startTime) { return true; } if (_onStartCallbackFired === false) { if (_onStartCallback !== null) { _onStartCallback.call(_object); } _onStartCallbackFired = true; } var elapsed = (time - _startTime) / _duration; elapsed = elapsed > 1 ? 1 : elapsed; var value = _easingFunction(elapsed); for (property in _valuesEnd) { var start = _valuesStart[property] || 0; var end = _valuesEnd[property]; if (end instanceof Array) { _object[property] = _interpolationFunction(end, value); } else { if (typeof end === "string") { end = start + parseFloat(end, 10); } if (typeof end === "number") { _object[property] = start + (end - start) * value; } } } if (_onUpdateCallback !== null) { _onUpdateCallback.call(_object, value); } if (elapsed == 1) { if (_repeat > 0) { if (isFinite(_repeat)) { _repeat--; } for (property in _valuesStartRepeat) { if (typeof _valuesEnd[property] === "string") { _valuesStartRepeat[property] = _valuesStartRepeat[property] + parseFloat(_valuesEnd[property], 10); } if (_yoyo) { var tmp = _valuesStartRepeat[property]; _valuesStartRepeat[property] = _valuesEnd[property]; _valuesEnd[property] = tmp; } _valuesStart[property] = _valuesStartRepeat[property]; } if (_yoyo) { _reversed = !_reversed; } _startTime = time + _delayTime; return true; } else { if (_onCompleteCallback !== null) { _onCompleteCallback.call(_object); } for (var i = 0, numChainedTweens = _chainedTweens.length; i < numChainedTweens; i++) { _chainedTweens[i].start(time); } return false; } } return true; }; }, "Easing": { "Linear": { "None": function(k) { return k; } }, "Quadratic": { "In": function(k) { return k * k; }, "Out": function(k) { return k * (2 - k); }, "InOut": function(k) { if ((k *= 2) < 1) return.5 * k * k; return -.5 * (--k * (k - 2) - 1); } }, "Cubic": { "In": function(k) { return k * k * k; }, "Out": function(k) { return --k * k * k + 1; }, "InOut": function(k) { if ((k *= 2) < 1) return.5 * k * k * k; return.5 * ((k -= 2) * k * k + 2); } }, "Quartic": { "In": function(k) { return k * k * k * k; }, "Out": function(k) { return 1 - --k * k * k * k; }, "InOut": function(k) { if ((k *= 2) < 1) return.5 * k * k * k * k; return -.5 * ((k -= 2) * k * k * k - 2); } }, "Quintic": { "In": function(k) { return k * k * k * k * k; }, "Out": function(k) { return --k * k * k * k * k + 1; }, "InOut": function(k) { if ((k *= 2) < 1) return.5 * k * k * k * k * k; return.5 * ((k -= 2) * k * k * k * k + 2); } }, "Sinusoidal": { "In": function(k) { return 1 - Math.cos(k * Math.PI / 2); }, "Out": function(k) { return Math.sin(k * Math.PI / 2); }, "InOut": function(k) { return.5 * (1 - Math.cos(Math.PI * k)); } }, "Exponential": { "In": function(k) { return k === 0 ? 0 : Math.pow(1024, k - 1); }, "Out": function(k) { return k === 1 ? 1 : 1 - Math.pow(2, -10 * k); }, "InOut": function(k) { if (k === 0) return 0; if (k === 1) return 1; if ((k *= 2) < 1) return.5 * Math.pow(1024, k - 1); return.5 * (-Math.pow(2, -10 * (k - 1)) + 2); } }, "Circular": { "In": function(k) { return 1 - Math.sqrt(1 - k * k); }, "Out": function(k) { return Math.sqrt(1 - --k * k); }, "InOut": function(k) { if ((k *= 2) < 1) return -.5 * (Math.sqrt(1 - k * k) - 1); return.5 * (Math.sqrt(1 - (k -= 2) * k) + 1); } }, "Elastic": { "In": function(k) { var s, a = .1, p = .4; if (k === 0) return 0; if (k === 1) return 1; if (!a || a < 1) { a = 1; s = p / 4; } else s = p * Math.asin(1 / a) / (2 * Math.PI); return -(a * Math.pow(2, 10 * (k -= 1)) * Math.sin((k - s) * (2 * Math.PI) / p)); }, "Out": function(k) { var s, a = .1, p = .4; if (k === 0) return 0; if (k === 1) return 1; if (!a || a < 1) { a = 1; s = p / 4; } else s = p * Math.asin(1 / a) / (2 * Math.PI); return a * Math.pow(2, -10 * k) * Math.sin((k - s) * (2 * Math.PI) / p) + 1; }, "InOut": function(k) { var s, a = .1, p = .4; if (k === 0) return 0; if (k === 1) return 1; if (!a || a < 1) { a = 1; s = p / 4; } else s = p * Math.asin(1 / a) / (2 * Math.PI); if ((k *= 2) < 1) return -.5 * (a * Math.pow(2, 10 * (k -= 1)) * Math.sin((k - s) * (2 * Math.PI) / p)); return a * Math.pow(2, -10 * (k -= 1)) * Math.sin((k - s) * (2 * Math.PI) / p) * .5 + 1; } }, "Back": { "In": function(k) { var s = 1.70158; return k * k * ((s + 1) * k - s); }, "Out": function(k) { var s = 1.70158; return --k * k * ((s + 1) * k + s) + 1; }, "InOut": function(k) { var s = 1.70158 * 1.525; if ((k *= 2) < 1) return.5 * (k * k * ((s + 1) * k - s)); return.5 * ((k -= 2) * k * ((s + 1) * k + s) + 2); } }, "Bounce": { "In": function(k) { return 1 - AlloyPaper.TWEEN.Easing.Bounce.Out(1 - k); }, "Out": function(k) { if (k < 1 / 2.75) { return 7.5625 * k * k; } else if (k < 2 / 2.75) { return 7.5625 * (k -= 1.5 / 2.75) * k + .75; } else if (k < 2.5 / 2.75) { return 7.5625 * (k -= 2.25 / 2.75) * k + .9375; } else { return 7.5625 * (k -= 2.625 / 2.75) * k + .984375; } }, "InOut": function(k) { if (k < .5) return AlloyPaper.TWEEN.Easing.Bounce.In(k * 2) * .5; return AlloyPaper.TWEEN.Easing.Bounce.Out(k * 2 - 1) * .5 + .5; } } }, "Interpolation": { "Linear": function(v, k) { var m = v.length - 1, f = m * k, i = Math.floor(f), fn = AlloyPaper.TWEEN.Interpolation.Utils.Linear; if (k < 0) return fn(v[0], v[1], f); if (k > 1) return fn(v[m], v[m - 1], m - f); return fn(v[i], v[i + 1 > m ? m : i + 1], f - i); }, "Bezier": function(v, k) { var b = 0, n = v.length - 1, pw = Math.pow, bn = AlloyPaper.TWEEN.Interpolation.Utils.Bernstein, i; for (i = 0; i <= n; i++) { b += pw(1 - k, n - i) * pw(k, i) * v[i] * bn(n, i); } return b; }, "CatmullRom": function(v, k) { var m = v.length - 1, f = m * k, i = Math.floor(f), fn = AlloyPaper.TWEEN.Interpolation.Utils.CatmullRom; if (v[0] === v[m]) { if (k < 0) i = Math.floor(f = m * (1 + k)); return fn(v[(i - 1 + m) % m], v[i], v[(i + 1) % m], v[(i + 2) % m], f - i); } else { if (k < 0) return v[0] - (fn(v[0], v[0], v[1], v[1], -f) - v[0]); if (k > 1) return v[m] - (fn(v[m], v[m], v[m - 1], v[m - 1], f - m) - v[m]); return fn(v[i ? i - 1 : 0], v[i], v[m < i + 1 ? m : i + 1], v[m < i + 2 ? m : i + 2], f - i); } }, "Utils": { "Linear": function(p0, p1, t) { return (p1 - p0) * t + p0; }, "Bernstein": function(n, i) { var fc = AlloyPaper.TWEEN.Interpolation.Utils.getFactorial(); return fc(n) / fc(i) / fc(n - i); }, "getFactorial": function() { return function() { var a = [1]; return function(n) { var s = 1, i; if (a[n]) return a[n]; for (i = n; i > 1; i--) s *= i; return a[n] = s; }; }(); }, "CatmullRom": function(p0, p1, p2, p3, t) { var v0 = (p2 - p0) * .5, v1 = (p3 - p1) * .5, t2 = t * t, t3 = t * t2; return (2 * p1 - 2 * p2 + v0 + v1) * t3 + (-3 * p1 + 3 * p2 - 2 * v0 - v1) * t2 + v0 * t + p1; } } } } }); //begin-------------------AlloyPaper.Dom---------------------begin AlloyPaper.Dom = Class.extend({ "statics": { "get": function(selector) { this.element = document.querySelector(selector); return this; }, "on": function(type, fn) { this.element.addEventListener(type, fn, false); return this; } } }); //end-------------------AlloyPaper.Dom---------------------end //begin-------------------AlloyPaper.FPS---------------------begin AlloyPaper.FPS = Class.extend({ "statics": { "get": function() { if (!this.instance) this.instance = new this(); this.instance._computeFPS(); return this.instance; } }, "ctor": function() { this.last = new Date(); this.current = null; this.lastMeasured=new Date(); this.fpsList = []; this.totalValue = 0; this.value = 60; }, "_computeFPS": function() { this.current = new Date(); if (this.current - this.last > 0) { var fps = Math.ceil(1e3 / (this.current - this.last)); this.fpsList.push(fps); this.totalValue += fps; this.last = this.current; } if (this.current - this.lastMeasured > 1000) { this.value =Math.ceil( this.totalValue / this.fpsList.length); this.totalValue = 0; this.fpsList.length = 0; this.lastMeasured = this.current; } } }); //end-------------------AlloyPaper.FPS---------------------end AlloyPaper.Keyboard = Class.extend({ "statics": { "ctor": function() { var KeyboardJS = {}, locales = {}, locale, map, macros, activeKeys = [], bindings = [], activeBindings = [], activeMacros = [], aI, usLocale; usLocale = { map: { "3": ["cancel"], "8": ["backspace"], "9": ["tab"], "12": ["clear"], "13": ["enter"], "16": ["shift"], "17": ["ctrl"], "18": ["alt", "menu"], "19": ["pause", "break"], "20": ["capslock"], "27": ["escape", "esc"], "32": ["space", "spacebar"], "33": ["pageup"], "34": ["pagedown"], "35": ["end"], "36": ["home"], "37": ["left"], "38": ["up"], "39": ["right"], "40": ["down"], "41": ["select"], "42": ["printscreen"], "43": ["execute"], "44": ["snapshot"], "45": ["insert", "ins"], "46": ["delete", "del"], "47": ["help"], "91": ["command", "windows", "win", "super", "leftcommand", "leftwindows", "leftwin", "leftsuper"], "92": ["command", "windows", "win", "super", "rightcommand", "rightwindows", "rightwin", "rightsuper"], "145": ["scrolllock", "scroll"], "186": ["semicolon", ";"], "187": ["equal", "equalsign", "="], "188": ["comma", ","], "189": ["dash", "-"], "190": ["period", "."], "191": ["slash", "forwardslash", "/"], "192": ["graveaccent", "`"], "219": ["openbracket", "["], "220": ["backslash", "\\"], "221": ["closebracket", "]"], "222": ["apostrophe", "'"], "48": ["zero", "0"], "49": ["one", "1"], "50": ["two", "2"], "51": ["three", "3"], "52": ["four", "4"], "53": ["five", "5"], "54": ["six", "6"], "55": ["seven", "7"], "56": ["eight", "8"], "57": ["nine", "9"], "96": ["numzero", "num0"], "97": ["numone", "num1"], "98": ["numtwo", "num2"], "99": ["numthree", "num3"], "100": ["numfour", "num4"], "101": ["numfive", "num5"], "102": ["numsix", "num6"], "103": ["numseven", "num7"], "104": ["numeight", "num8"], "105": ["numnine", "num9"], "106": ["nummultiply", "num*"], "107": ["numadd", "num+"], "108": ["numenter"], "109": ["numsubtract", "num-"], "110": ["numdecimal", "num."], "111": ["numdivide", "num/"], "144": ["numlock", "num"], "112": ["f1"], "113": ["f2"], "114": ["f3"], "115": ["f4"], "116": ["f5"], "117": ["f6"], "118": ["f7"], "119": ["f8"], "120": ["f9"], "121": ["f10"], "122": ["f11"], "123": ["f12"] }, macros: [["shift + `", ["tilde", "~"]], ["shift + 1", ["exclamation", "exclamationpoint", "!"]], ["shift + 2", ["at", "@"]], ["shift + 3", ["number", "#"]], ["shift + 4", ["dollar", "dollars", "dollarsign", "$"]], ["shift + 5", ["percent", "%"]], ["shift + 6", ["caret", "^"]], ["shift + 7", ["ampersand", "and", "&"]], ["shift + 8", ["asterisk", "*"]], ["shift + 9", ["openparen", "("]], ["shift + 0", ["closeparen", ")"]], ["shift + -", ["underscore", "_"]], ["shift + =", ["plus", "+"]], ["shift + (", ["opencurlybrace", "opencurlybracket", "{"]], ["shift + )", ["closecurlybrace", "closecurlybracket", "}"]], ["shift + \\", ["verticalbar", "|"]], ["shift + ;", ["colon", ":"]], ["shift + '", ["quotationmark", '"']], ["shift + !,", ["openanglebracket", "<"]], ["shift + .", ["closeanglebracket", ">"]], ["shift + /", ["questionmark", "?"]]] }; for (aI = 65; aI <= 90; aI += 1) { usLocale.map[aI] = String.fromCharCode(aI + 32); usLocale.macros.push(["shift + " + String.fromCharCode(aI + 32) + ", capslock + " + String.fromCharCode(aI + 32), [String.fromCharCode(aI)]]); } registerLocale("us", usLocale); getSetLocale("us"); enable(); KeyboardJS.enable = enable; KeyboardJS.disable = disable; KeyboardJS.activeKeys = getActiveKeys; KeyboardJS.releaseKey = removeActiveKey; KeyboardJS.pressKey = addActiveKey; KeyboardJS.on = createBinding; KeyboardJS.clear = removeBindingByKeyCombo; KeyboardJS.clear.key = removeBindingByKeyName; KeyboardJS.locale = getSetLocale; KeyboardJS.locale.register = registerLocale; KeyboardJS.macro = createMacro; KeyboardJS.macro.remove = removeMacro; KeyboardJS.key = {}; KeyboardJS.key.name = getKeyName; KeyboardJS.key.code = getKeyCode; KeyboardJS.combo = {}; KeyboardJS.combo.active = isSatisfiedCombo; KeyboardJS.combo.parse = parseKeyCombo; KeyboardJS.combo.stringify = stringifyKeyCombo; function enable() { if (window.addEventListener) { window.document.addEventListener("keydown", keydown, false); window.document.addEventListener("keyup", keyup, false); window.addEventListener("blur", reset, false); window.addEventListener("webkitfullscreenchange", reset, false); window.addEventListener("mozfullscreenchange", reset, false); } else if (window.attachEvent) { window.document.attachEvent("onkeydown", keydown); window.document.attachEvent("onkeyup", keyup); window.attachEvent("onblur", reset); } } function disable() { reset(); if (window.removeEventListener) { window.document.removeEventListener("keydown", keydown, false); window.document.removeEventListener("keyup", keyup, false); window.removeEventListener("blur", reset, false); window.removeEventListener("webkitfullscreenchange", reset, false); window.removeEventListener("mozfullscreenchange", reset, false); } else if (window.detachEvent) { window.document.detachEvent("onkeydown", keydown); window.document.detachEvent("onkeyup", keyup); window.detachEvent("onblur", reset); } } function reset(event) { activeKeys = []; pruneMacros(); pruneBindings(event); } function keydown(event) { var keyNames, keyName, kI; keyNames = getKeyName(event.keyCode); if (keyNames.length < 1) { return; } event.isRepeat = false; for (kI = 0; kI < keyNames.length; kI += 1) { keyName = keyNames[kI]; if (getActiveKeys().indexOf(keyName) != -1) event.isRepeat = true; addActiveKey(keyName); } executeMacros(); executeBindings(event); } function keyup(event) { var keyNames, kI; keyNames = getKeyName(event.keyCode); if (keyNames.length < 1) { return; } for (kI = 0; kI < keyNames.length; kI += 1) { removeActiveKey(keyNames[kI]); } pruneMacros(); pruneBindings(event); } function getKeyName(keyCode) { return map[keyCode] || []; } function getKeyCode(keyName) { var keyCode; for (keyCode in map) { if (!map.hasOwnProperty(keyCode)) { continue; } if (map[keyCode].indexOf(keyName) > -1) { return keyCode; } } return false; } function createMacro(combo, injectedKeys) { if (typeof combo !== "string" && (typeof combo !== "object" || typeof combo.push !== "function")) { throw new Error("Cannot create macro. The combo must be a string or array."); } if (typeof injectedKeys !== "object" || typeof injectedKeys.push !== "function") { throw new Error("Cannot create macro. The injectedKeys must be an array."); } macros.push([combo, injectedKeys]); } function removeMacro(combo) { var macro, mI; if (typeof combo !== "string" && (typeof combo !== "object" || typeof combo.push !== "function")) { throw new Error("Cannot remove macro. The combo must be a string or array."); } for (mI = 0; mI < macros.length; mI += 1) { macro = macros[mI]; if (compareCombos(combo, macro[0])) { removeActiveKey(macro[1]); macros.splice(mI, 1); break; } } } function executeMacros() { var mI, combo, kI; for (mI = 0; mI < macros.length; mI += 1) { combo = parseKeyCombo(macros[mI][0]); if (activeMacros.indexOf(macros[mI]) === -1 && isSatisfiedCombo(combo)) { activeMacros.push(macros[mI]); for (kI = 0; kI < macros[mI][1].length; kI += 1) { addActiveKey(macros[mI][1][kI]); } } } } function pruneMacros() { var mI, combo, kI; for (mI = 0; mI < activeMacros.length; mI += 1) { combo = parseKeyCombo(activeMacros[mI][0]); if (isSatisfiedCombo(combo) === false) { for (kI = 0; kI < activeMacros[mI][1].length; kI += 1) { removeActiveKey(activeMacros[mI][1][kI]); } activeMacros.splice(mI, 1); mI -= 1; } } } function createBinding(keyCombo, keyDownCallback, keyUpCallback) { var api = {}, binding, subBindings = [], bindingApi = {}, kI, subCombo; if (typeof keyCombo === "string") { keyCombo = parseKeyCombo(keyCombo); } for (kI = 0; kI < keyCombo.length; kI += 1) { binding = {}; subCombo = stringifyKeyCombo([keyCombo[kI]]); if (typeof subCombo !== "string") { throw new Error("Failed to bind key combo. The key combo must be string."); } binding.keyCombo = subCombo; binding.keyDownCallback = []; binding.keyUpCallback = []; if (keyDownCallback) { binding.keyDownCallback.push(keyDownCallback); } if (keyUpCallback) { binding.keyUpCallback.push(keyUpCallback); } bindings.push(binding); subBindings.push(binding); } api.clear = clear; api.on = on; return api; function clear() { var bI; for (bI = 0; bI < subBindings.length; bI += 1) { bindings.splice(bindings.indexOf(subBindings[bI]), 1); } } function on(eventName) { var api = {}, callbacks, cI, bI; if (typeof eventName !== "string") { throw new Error("Cannot bind callback. The event name must be a string."); } if (eventName !== "keyup" && eventName !== "keydown") { throw new Error('Cannot bind callback. The event name must be a "keyup" or "keydown".'); } callbacks = Array.prototype.slice.apply(arguments, [1]); for (cI = 0; cI < callbacks.length; cI += 1) { if (typeof callbacks[cI] === "function") { if (eventName === "keyup") { for (bI = 0; bI < subBindings.length; bI += 1) { subBindings[bI].keyUpCallback.push(callbacks[cI]); } } else if (eventName === "keydown") { for (bI = 0; bI < subBindings.length; bI += 1) { subBindings[bI].keyDownCallback.push(callbacks[cI]); } } } } api.clear = clear; return api; function clear() { var cI, bI; for (cI = 0; cI < callbacks.length; cI += 1) { if (typeof callbacks[cI] === "function") { if (eventName === "keyup") { for (bI = 0; bI < subBindings.length; bI += 1) { subBindings[bI].keyUpCallback.splice(subBindings[bI].keyUpCallback.indexOf(callbacks[cI]), 1); } } else { for (bI = 0; bI < subBindings.length; bI += 1) { subBindings[bI].keyDownCallback.splice(subBindings[bI].keyDownCallback.indexOf(callbacks[cI]), 1); } } } } } } } function removeBindingByKeyCombo(keyCombo) { var bI, binding, keyName; for (bI = 0; bI < bindings.length; bI += 1) { binding = bindings[bI]; if (compareCombos(keyCombo, binding.keyCombo)) { bindings.splice(bI, 1); bI -= 1; } } } function removeBindingByKeyName(keyName) { var bI, kI, binding; if (keyName) { for (bI = 0; bI < bindings.length; bI += 1) { binding = bindings[bI]; for (kI = 0; kI < binding.keyCombo.length; kI += 1) { if (binding.keyCombo[kI].indexOf(keyName) > -1) { bindings.splice(bI, 1); bI -= 1; break; } } } } else { bindings = []; } } function executeBindings(event) { var bI, sBI, binding, bindingKeys, remainingKeys, cI, killEventBubble, kI, bindingKeysSatisfied, index, sortedBindings = [], bindingWeight; remainingKeys = [].concat(activeKeys); for (bI = 0; bI < bindings.length; bI += 1) { bindingWeight = extractComboKeys(bindings[bI].keyCombo).length; if (!sortedBindings[bindingWeight]) { sortedBindings[bindingWeight] = []; } sortedBindings[bindingWeight].push(bindings[bI]); } for (sBI = sortedBindings.length - 1; sBI >= 0; sBI -= 1) { if (!sortedBindings[sBI]) { continue; } for (bI = 0; bI < sortedBindings[sBI].length; bI += 1) { binding = sortedBindings[sBI][bI]; bindingKeys = extractComboKeys(binding.keyCombo); bindingKeysSatisfied = true; for (kI = 0; kI < bindingKeys.length; kI += 1) { if (remainingKeys.indexOf(bindingKeys[kI]) === -1) { bindingKeysSatisfied = false; break; } } if (bindingKeysSatisfied && isSatisfiedCombo(binding.keyCombo)) { activeBindings.push(binding); for (kI = 0; kI < bindingKeys.length; kI += 1) { index = remainingKeys.indexOf(bindingKeys[kI]); if (index > -1) { remainingKeys.splice(index, 1); kI -= 1; } } for (cI = 0; cI < binding.keyDownCallback.length; cI += 1) { if (binding.keyDownCallback[cI](event, getActiveKeys(), binding.keyCombo) === false) { killEventBubble = true; } } if (killEventBubble === true) { event.preventDefault(); event.stopPropagation(); } } } } } function pruneBindings(event) { var bI, cI, binding, killEventBubble; for (bI = 0; bI < activeBindings.length; bI += 1) { binding = activeBindings[bI]; if (isSatisfiedCombo(binding.keyCombo) === false) { for (cI = 0; cI < binding.keyUpCallback.length; cI += 1) { if (binding.keyUpCallback[cI](event, getActiveKeys(), binding.keyCombo) === false) { killEventBubble = true; } } if (killEventBubble === true) { event.preventDefault(); event.stopPropagation(); } activeBindings.splice(bI, 1); bI -= 1; } } } function compareCombos(keyComboArrayA, keyComboArrayB) { var cI, sI, kI; keyComboArrayA = parseKeyCombo(keyComboArrayA); keyComboArrayB = parseKeyCombo(keyComboArrayB); if (keyComboArrayA.length !== keyComboArrayB.length) { return false; } for (cI = 0; cI < keyComboArrayA.length; cI += 1) { if (keyComboArrayA[cI].length !== keyComboArrayB[cI].length) { return false; } for (sI = 0; sI < keyComboArrayA[cI].length; sI += 1) { if (keyComboArrayA[cI][sI].length !== keyComboArrayB[cI][sI].length) { return false; } for (kI = 0; kI < keyComboArrayA[cI][sI].length; kI += 1) { if (keyComboArrayB[cI][sI].indexOf(keyComboArrayA[cI][sI][kI]) === -1) { return false; } } } } return true; } function isSatisfiedCombo(keyCombo) { var cI, sI, stage, kI, stageOffset = 0, index, comboMatches; keyCombo = parseKeyCombo(keyCombo); for (cI = 0; cI < keyCombo.length; cI += 1) { comboMatches = true; stageOffset = 0; for (sI = 0; sI < keyCombo[cI].length; sI += 1) { stage = [].concat(keyCombo[cI][sI]); for (kI = stageOffset; kI < activeKeys.length; kI += 1) { index = stage.indexOf(activeKeys[kI]); if (index > -1) { stage.splice(index, 1); stageOffset = kI; } } if (stage.length !== 0) { comboMatches = false; break; } } if (comboMatches) { return true; } } return false; } function extractComboKeys(keyCombo) { var cI, sI, kI, keys = []; keyCombo = parseKeyCombo(keyCombo); for (cI = 0; cI < keyCombo.length; cI += 1) { for (sI = 0; sI < keyCombo[cI].length; sI += 1) { keys = keys.concat(keyCombo[cI][sI]); } } return keys; } function parseKeyCombo(keyCombo) { var s = keyCombo, i = 0, op = 0, ws = false, nc = false, combos = [], combo = [], stage = [], key = ""; if (typeof keyCombo === "object" && typeof keyCombo.push === "function") { return keyCombo; } if (typeof keyCombo !== "string") { throw new Error('Cannot parse "keyCombo" because its type is "' + typeof keyCombo + '". It must be a "string".'); } while (s.charAt(i) === " ") { i += 1; } while (true) { if (s.charAt(i) === " ") { while (s.charAt(i) === " ") { i += 1; } ws = true; } else if (s.charAt(i) === ",") { if (op || nc) { throw new Error("Failed to parse key combo. Unexpected , at character index " + i + "."); } nc = true; i += 1; } else if (s.charAt(i) === "+") { if (key.length) { stage.push(key); key = ""; } if (op || nc) { throw new Error("Failed to parse key combo. Unexpected + at character index " + i + "."); } op = true; i += 1; } else if (s.charAt(i) === ">") { if (key.length) { stage.push(key); key = ""; } if (stage.length) { combo.push(stage); stage = []; } if (op || nc) { throw new Error("Failed to parse key combo. Unexpected > at character index " + i + "."); } op = true; i += 1; } else if (i < s.length - 1 && s.charAt(i) === "!" && (s.charAt(i + 1) === ">" || s.charAt(i + 1) === "," || s.charAt(i + 1) === "+")) { key += s.charAt(i + 1); op = false; ws = false; nc = false; i += 2; } else if (i < s.length && s.charAt(i) !== "+" && s.charAt(i) !== ">" && s.charAt(i) !== "," && s.charAt(i) !== " ") { if (op === false && ws === true || nc === true) { if (key.length) { stage.push(key); key = ""; } if (stage.length) { combo.push(stage); stage = []; } if (combo.length) { combos.push(combo); combo = []; } } op = false; ws = false; nc = false; while (i < s.length && s.charAt(i) !== "+" && s.charAt(i) !== ">" && s.charAt(i) !== "," && s.charAt(i) !== " ") { key += s.charAt(i); i += 1; } } else { i += 1; continue; } if (i >= s.length) { if (key.length) { stage.push(key); key = ""; } if (stage.length) { combo.push(stage); stage = []; } if (combo.length) { combos.push(combo); combo = []; } break; } } return combos; } function stringifyKeyCombo(keyComboArray) { var cI, ccI, output = []; if (typeof keyComboArray === "string") { return keyComboArray; } if (typeof keyComboArray !== "object" || typeof keyComboArray.push !== "function") { throw new Error("Cannot stringify key combo."); } for (cI = 0; cI < keyComboArray.length; cI += 1) { output[cI] = []; for (ccI = 0; ccI < keyComboArray[cI].length; ccI += 1) { output[cI][ccI] = keyComboArray[cI][ccI].join(" + "); } output[cI] = output[cI].join(" > "); } return output.join(" "); } function getActiveKeys() { return [].concat(activeKeys); } function addActiveKey(keyName) { if (keyName.match(/\s/)) { throw new Error("Cannot add key name " + keyName + " to active keys because it contains whitespace."); } if (activeKeys.indexOf(keyName) > -1) { return; } activeKeys.push(keyName); } function removeActiveKey(keyName) { var keyCode = getKeyCode(keyName); if (keyCode === "91" || keyCode === "92") { activeKeys = []; } else { activeKeys.splice(activeKeys.indexOf(keyName), 1); } } function registerLocale(localeName, localeMap) { if (typeof localeName !== "string") { throw new Error("Cannot register new locale. The locale name must be a string."); } if (typeof localeMap !== "object") { throw new Error("Cannot register " + localeName + " locale. The locale map must be an object."); } if (typeof localeMap.map !== "object") { throw new Error("Cannot register " + localeName + " locale. The locale map is invalid."); } if (!localeMap.macros) { localeMap.macros = []; } locales[localeName] = localeMap; } function getSetLocale(localeName) { if (localeName) { if (typeof localeName !== "string") { throw new Error("Cannot set locale. The locale name must be a string."); } if (!locales[localeName]) { throw new Error("Cannot set locale to " + localeName + " because it does not exist. If you would like to submit a " + localeName + " locale map for KeyboardJS please submit it at path_to_url"); } map = locales[localeName].map; macros = locales[localeName].macros; locale = localeName; } return locale; } this.Keyboard = KeyboardJS; }, "on": function(keyCombo, onDownCallback, onUpCallback) { this.Keyboard.on(keyCombo, onDownCallback, onUpCallback); }, "getActiveKeys": function() { return this.Keyboard.activeKeys(); } } }); //begin-------------------AlloyPaper.Loader---------------------begin AlloyPaper.Loader = Class.extend({ "ctor": function() { this.res = {}; this.loadedCount = 0; this.resCount = -1; this.FILE_PATTERN = /(\w+:\/{2})?((?:\w+\.){2}\w+)?(\/?[\S]+\/|\/)?([\w\-%\.]+)(?:\.)(\w+)?(\?\S+)?/i; this.ns = 6; this.sounds = []; for (var i = 0; i < this.ns; i++) this.sounds.push([]); this.playing = []; }, "get": function(id) { return this.res[id]; }, "loadRes": function(arr) { this.resCount = arr.length; for (var i = 0; i < arr.length; i++) { var type=this._getTypeByExtension(arr[i].src.match(this.FILE_PATTERN)[5]); if (type === "audio") { this.loadAudio(arr[i].id, arr[i].src); } else if (type === "js") { this.loadScript(arr[i].src); } else if (type === "img") { this.loadImage(arr[i].id, arr[i].src); } } }, "loadImage": function(id, src) { var img = document.createElement("img"); var self = this; img.onload = function() { self._handleLoad(this, id); img.onreadystatechange = null; }; img.onreadystatechange = function() { if (img.readyState == "loaded" || img.readyState == "complete") { self._handleLoad(this, id); img.onload = null; } }; img.onerror = function() {}; img.src = src; }, "loadAudio": function(id, src) { var tag = document.createElement("audio"); tag.autoplay = false; this.res[id] = tag; tag.src = null; tag.preload = "auto"; tag.onerror = function() {}; tag.onstalled = function() {}; var self = this; var _audioCanPlayHandler = function() { self.playing[id] = 0; for (var i = 0; i < self.ns; i++) { self.sounds[i][id] = new Audio(src); } self.loadedCount++; self.handleProgress&&self.handleProgress(self.loadedCount, self.resCount); self._clean(this); this.removeEventListener && this.removeEventListener("canplaythrough", _audioCanPlayHandler, false); self.checkComplete(); }; tag.addEventListener("canplaythrough", _audioCanPlayHandler, false); tag.src = src; if (tag.load != null) { tag.load(); } }, "loadScript": function (url) { var script = document.createElement("script"); script.type = "text/javascript"; var self = this; if (script.readyState) { //IE script.onreadystatechange = function () { if (script.readyState == "loaded" || script.readyState == "complete") { script.onreadystatechange = null; self._handleLoad(); } }; } else { //Others script.onload = function () { self._handleLoad(); }; } script.src = url; document.getElementsByTagName("head")[0].appendChild(script); }, "checkComplete": function() { if (this.loadedCount === this.resCount) { this.handleComplete(); } }, "complete": function(fn) { this.handleComplete = fn; }, "progress": function(fn) { this.handleProgress = fn; }, "playSound": function (id, volume) { var sound = this.sounds[this.playing[id]][id]; sound.volume = volume === undefined ? 1 : volume; sound.play(); ++this.playing[id]; if (this.playing[id] >= this.ns) this.playing[id] = 0; }, "_handleLoad": function (currentImg, id) { if (currentImg) { this._clean(currentImg); this.res[id] = currentImg; } this.loadedCount++; if (this.handleProgress) this.handleProgress(this.loadedCount, this.resCount); this.checkComplete(); }, "_getTypeByExtension": function(extension) { switch (extension) { case "jpeg": case "jpg": case "gif": case "png": case "webp": case "bmp": return "img"; case "ogg": case "mp3": case "wav": return "audio"; case "js": return "js"; } }, "_clean": function(tag) { tag.onload = null; tag.onstalled = null; tag.onprogress = null; tag.onerror = null; } }); //end-------------------AlloyPaper.Loader---------------------end //begin-------------------AlloyPaper.Matrix2D---------------------begin AlloyPaper.Matrix2D = Class.extend({ "statics": { "DEG_TO_RAD": 0.017453292519943295 }, "ctor": function(a, b, c, d, tx, ty) { this.a = a == null ? 1 : a; this.b = b || 0; this.c = c || 0; this.d = d == null ? 1 : d; this.tx = tx || 0; this.ty = ty || 0; return this; }, "identity": function() { this.a = this.d = 1; this.b = this.c = this.tx = this.ty = 0; return this; }, "appendTransform": function(x, y, scaleX, scaleY, rotation, skewX, skewY, regX, regY) { if (rotation % 360) { var r = rotation * AlloyPaper.Matrix2D.DEG_TO_RAD; var cos = Math.cos(r); var sin = Math.sin(r); } else { cos = 1; sin = 0; } if (skewX || skewY) { skewX *= AlloyPaper.Matrix2D.DEG_TO_RAD; skewY *= AlloyPaper.Matrix2D.DEG_TO_RAD; this.append(Math.cos(skewY), Math.sin(skewY), -Math.sin(skewX), Math.cos(skewX), x, y); this.append(cos * scaleX, sin * scaleX, -sin * scaleY, cos * scaleY, 0, 0); } else { this.append(cos * scaleX, sin * scaleX, -sin * scaleY, cos * scaleY, x, y); } if (regX || regY) { this.tx -= regX * this.a + regY * this.c; this.ty -= regX * this.b + regY * this.d; } return this; }, "append": function(a, b, c, d, tx, ty) { var a1 = this.a; var b1 = this.b; var c1 = this.c; var d1 = this.d; this.a = a * a1 + b * c1; this.b = a * b1 + b * d1; this.c = c * a1 + d * c1; this.d = c * b1 + d * d1; this.tx = tx * a1 + ty * c1 + this.tx; this.ty = tx * b1 + ty * d1 + this.ty; return this; }, "initialize": function(a, b, c, d, tx, ty) { this.a = a; this.b = b; this.c = c; this.d = d; this.tx = tx; this.ty = ty; return this; }, "setValues": function(a, b, c, d, tx, ty) { this.a = a == null ? 1 : a; this.b = b || 0; this.c = c || 0; this.d = d == null ? 1 : d; this.tx = tx || 0; this.ty = ty || 0; return this; }, "copy": function(matrix) { return this.setValues(matrix.a, matrix.b, matrix.c, matrix.d, matrix.tx, matrix.ty); } }); //end-------------------AlloyPaper.Matrix2D---------------------end (function () { var observe = function (target, arr, callback) { var _observe = function (target, arr, callback) { if (!target.$observer) target.$observer = this; var $observer = target.$observer; var eventPropArr = []; if (observe.isArray(target)) { $observer.mock(target); } for (var prop in target) { if (target.hasOwnProperty(prop)) { if (callback) { if (observe.isArray(arr) && observe.isInArray(arr, prop)) { eventPropArr.push(prop); $observer.watch(target, prop); } else if (observe.isString(arr) && prop == arr) { eventPropArr.push(prop); $observer.watch(target, prop); } } else { eventPropArr.push(prop); $observer.watch(target, prop); } } } $observer.target = target; if (!$observer.propertyChangedHandler) $observer.propertyChangedHandler = []; var propChanged = callback ? callback : arr; $observer.propertyChangedHandler.push({ all: !callback, propChanged: propChanged, eventPropArr: eventPropArr }); }; _observe.prototype = { "onPropertyChanged": function (prop, value, oldValue, target, path) { if (value !== oldValue && this.propertyChangedHandler) { var rootName = observe._getRootName(prop, path); for (var i = 0, len = this.propertyChangedHandler.length; i < len; i++) { var handler = this.propertyChangedHandler[i]; if (handler.all || observe.isInArray(handler.eventPropArr, rootName) || rootName.indexOf("Array-") === 0) { handler.propChanged.call(this.target, prop, value, oldValue, path); } } } if (prop.indexOf("Array-") !== 0 && typeof value === "object") { this.watch(target, prop, target.$observeProps.$observerPath); } }, "mock": function (target) { var self = this; observe.methods.forEach(function (item) { target[item] = function () { var old = Array.prototype.slice.call(this, 0); var result = Array.prototype[item].apply(this, Array.prototype.slice.call(arguments)); if (new RegExp("\\b" + item + "\\b").test(observe.triggerStr)) { for (var cprop in this) { if (this.hasOwnProperty(cprop) && !observe.isFunction(this[cprop])) { self.watch(this, cprop, this.$observeProps.$observerPath); } } //todo self.onPropertyChanged("Array-" + item, this, old, this, this.$observeProps.$observerPath); } return result; }; }); }, "watch": function (target, prop, path) { if (prop === "$observeProps" || prop === "$observer") return; if (observe.isFunction(target[prop])) return; if (!target.$observeProps) target.$observeProps = {}; if (path !== undefined) { target.$observeProps.$observerPath = path; } else { target.$observeProps.$observerPath = "#"; } var self = this; var currentValue = target.$observeProps[prop] = target[prop]; Object.defineProperty(target, prop, { get: function () { return this.$observeProps[prop]; }, set: function (value) { var old = this.$observeProps[prop]; this.$observeProps[prop] = value; self.onPropertyChanged(prop, value, old, this, target.$observeProps.$observerPath); } }); if (typeof currentValue == "object") { if (observe.isArray(currentValue)) { this.mock(currentValue); } for (var cprop in currentValue) { if (currentValue.hasOwnProperty(cprop)) { this.watch(currentValue, cprop, target.$observeProps.$observerPath + "-" + prop); } } } } }; return new _observe(target, arr, callback) }; observe.methods = ["concat", "every", "filter", "forEach", "indexOf", "join", "lastIndexOf", "map", "pop", "push", "reduce", "reduceRight", "reverse", "shift", "slice", "some", "sort", "splice", "unshift", "toLocaleString", "toString", "size"]; observe.triggerStr = ["concat", "pop", "push", "reverse", "shift", "sort", "splice", "unshift", "size"].join(","); observe.isArray = function (obj) { return Object.prototype.toString.call(obj) === '[object Array]'; }; observe.isString = function (obj) { return typeof obj === "string"; }; observe.isInArray = function (arr, item) { for (var i = arr.length; --i > -1;) { if (item === arr[i]) return true; } return false; }; observe.isFunction = function (obj) { return Object.prototype.toString.call(obj) == '[object Function]'; }; observe.twoWay = function (objA, aProp, objB, bProp) { if (typeof objA[aProp] === "object" && typeof objB[bProp] === "object") { observe(objA, aProp, function (name, value) { objB[bProp] = this[aProp]; }) observe(objB, bProp, function (name, value) { objA[aProp] = this[bProp]; }) } else { observe(objA, aProp, function (name, value) { objB[bProp] = value; }) observe(objB, bProp, function (name, value) { objA[aProp] = value; }) } } observe._getRootName = function (prop, path) { if (path === "#") { return prop; } return path.split("-")[1]; } observe.add = function (obj, prop, value) { obj[prop] = value; var $observer = obj.$observer; $observer.watch(obj, prop); } Array.prototype.size = function (length) { this.length = length; } AlloyPaper.Observe = observe; })(); //begin-------------------AlloyPaper.RAF---------------------begin AlloyPaper.RAF = Class.extend({ "statics": { "ctor": function() { var requestAnimFrame = function() { return window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame || window.oRequestAnimationFrame || window.msRequestAnimationFrame || function(callback, element) { window.setTimeout(callback, 1e3 / 60); }; }(); var requestInterval = function(fn, delay) { if (!window.requestAnimationFrame && !window.webkitRequestAnimationFrame && !(window.mozRequestAnimationFrame && window.mozCancelRequestAnimationFrame) && !window.oRequestAnimationFrame && !window.msRequestAnimationFrame) return window.setInterval(fn, delay); var start = new Date().getTime(), handle = {}; function loop() { var current = new Date().getTime(), delta = current - start; if (delta >= delay) { fn.call(); start = new Date().getTime(); } handle.value = requestAnimFrame(loop); } handle.value = requestAnimFrame(loop); return handle; }; var clearRequestInterval = function(handle) { if (handle) { setTimeout(function() { window.cancelAnimationFrame ? window.cancelAnimationFrame(handle.value) : window.webkitCancelAnimationFrame ? window.webkitCancelAnimationFrame(handle.value) : window.webkitCancelRequestAnimationFrame ? window.webkitCancelRequestAnimationFrame(handle.value) : window.mozCancelRequestAnimationFrame ? window.mozCancelRequestAnimationFrame(handle.value) : window.oCancelRequestAnimationFrame ? window.oCancelRequestAnimationFrame(handle.value) : window.msCancelRequestAnimationFrame ? window.msCancelRequestAnimationFrame(handle.value) : clearInterval(handle); }, 0); } }; this.requestInterval = requestInterval; this.clearRequestInterval = clearRequestInterval; } } }); //end-------------------AlloyPaper.RAF---------------------end //begin-------------------AlloyPaper.To---------------------begin AlloyPaper.To = Class.extend({ "statics": { "ctor": function () { this.bounceOut = AlloyPaper.TWEEN.Easing.Bounce.Out, this.linear = AlloyPaper.TWEEN.Easing.Linear.None, this.quadraticIn = AlloyPaper.TWEEN.Easing.Quadratic.In, this.quadraticOut = AlloyPaper.TWEEN.Easing.Quadratic.Out, this.quadraticInOut = AlloyPaper.TWEEN.Easing.Quadratic.InOut, this.cubicIn = AlloyPaper.TWEEN.Easing.Cubic.In, this.cubicOut = AlloyPaper.TWEEN.Easing.Cubic.Out, this.cubicInOut = AlloyPaper.TWEEN.Easing.Cubic.InOut, this.quarticIn = AlloyPaper.TWEEN.Easing.Quartic.In, this.quarticOut = AlloyPaper.TWEEN.Easing.Quartic.Out, this.quarticInOut = AlloyPaper.TWEEN.Easing.Quartic.InOut, this.quinticIn = AlloyPaper.TWEEN.Easing.Quintic.In, this.quinticOut = AlloyPaper.TWEEN.Easing.Quintic.Out, this.quinticInOut = AlloyPaper.TWEEN.Easing.Quintic.InOut, this.sinusoidalIn = AlloyPaper.TWEEN.Easing.Sinusoidal.In, this.sinusoidalOut = AlloyPaper.TWEEN.Easing.Sinusoidal.Out, this.sinusoidalInOut = AlloyPaper.TWEEN.Easing.Sinusoidal.InOut, this.exponentialIn = AlloyPaper.TWEEN.Easing.Exponential.In, this.exponentialOut = AlloyPaper.TWEEN.Easing.Exponential.Out, this.exponentialInOut = AlloyPaper.TWEEN.Easing.Exponential.InOut, this.circularIn = AlloyPaper.TWEEN.Easing.Circular.In, this.circularOut = AlloyPaper.TWEEN.Easing.Circular.Out, this.circularInOut = AlloyPaper.TWEEN.Easing.Circular.InOut, this.elasticIn = AlloyPaper.TWEEN.Easing.Elastic.In, this.elasticOut = AlloyPaper.TWEEN.Easing.Elastic.Out, this.elasticInOut = AlloyPaper.TWEEN.Easing.Elastic.InOut, this.backIn = AlloyPaper.TWEEN.Easing.Back.In, this.backOut = AlloyPaper.TWEEN.Easing.Back.Out, this.backInOut = AlloyPaper.TWEEN.Easing.Back.InOut, this.bounceIn = AlloyPaper.TWEEN.Easing.Bounce.In, this.bounceOut = AlloyPaper.TWEEN.Easing.Bounce.Out, this.bounceInOut = AlloyPaper.TWEEN.Easing.Bounce.InOut, this.interpolationLinear = AlloyPaper.TWEEN.Interpolation.Linear, this.interpolationBezier = AlloyPaper.TWEEN.Interpolation.Bezier, this.interpolationCatmullRom = AlloyPaper.TWEEN.Interpolation.CatmullRom; }, "get": function (element) { var to = new this(element); var stage = this.getStage(element); stage && stage.toList.push(to); return to; }, "getStage": function (element) { if (!element.parent) return; if (element.parent instanceof AlloyPaper.Stage) { return element.parent; } else { return this.getStage(element.parent); } } }, "ctor": function(element) { this.element = element; this.cmds = []; this.index = 0; this.tweens = []; this._pause = false; this.loop = setInterval(function() { AlloyPaper.TWEEN.update(); }, 15); this.cycleCount = 0; }, "to": function() { this.cmds.push(["to"]); return this; }, "set": function(prop, value, time, ease) { this.cmds[this.cmds.length - 1].push([prop, [value, time, ease]]); return this; }, "x": function() { this.cmds[this.cmds.length - 1].push(["x", arguments]); return this; }, "y": function() { this.cmds[this.cmds.length - 1].push(["y", arguments]); return this; }, "z": function() { this.cmds[this.cmds.length - 1].push(["z", arguments]); return this; }, "rotation": function() { this.cmds[this.cmds.length - 1].push(["rotation", arguments]); return this; }, "scaleX": function() { this.cmds[this.cmds.length - 1].push(["scaleX", arguments]); return this; }, "scaleY": function() { this.cmds[this.cmds.length - 1].push(["scaleY", arguments]); return this; }, "skewX": function() { this.cmds[this.cmds.length - 1].push(["skewX", arguments]); return this; }, "skewY": function() { this.cmds[this.cmds.length - 1].push(["skewY", arguments]); return this; }, "originX": function() { this.cmds[this.cmds.length - 1].push(["originX", arguments]); return this; }, "originY": function() { this.cmds[this.cmds.length - 1].push(["originY", arguments]); return this; }, "alpha": function() { this.cmds[this.cmds.length - 1].push(["alpha", arguments]); return this; }, "begin": function(fn) { this.cmds[this.cmds.length - 1].begin = fn; return this; }, "progress": function(fn) { this.cmds[this.cmds.length - 1].progress = fn; return this; }, "end": function(fn) { this.cmds[this.cmds.length - 1].end = fn; return this; }, "wait": function() { this.cmds.push(["wait", arguments]); return this; }, "then": function() { this.cmds.push(["then", arguments]); return this; }, "cycle": function() { this.cmds.push(["cycle", arguments]); return this; }, "rubber": function() { this.cmds = this.cmds.concat([["to", ["scaleX", { "0": 1.25, "1": 300}], ["scaleY", { "0": .75, "1": 300}]], ["to", ["scaleX", { "0": .75, "1": 100}], ["scaleY", { "0": 1.25, "1": 100}]], ["to", ["scaleX", { "0": 1.15, "1": 100}], ["scaleY", { "0": .85, "1": 100}]], ["to", ["scaleX", { "0": .95, "1": 150}], ["scaleY", { "0": 1.05, "1": 150}]], ["to", ["scaleX", { "0": 1.05, "1": 100}], ["scaleY", { "0": .95, "1": 100}]], ["to", ["scaleX", { "0": 1, "1": 250}], ["scaleY", { "0": 1, "1": 250}]]]); return this; }, "bounceIn": function() { this.cmds = this.cmds.concat([["to", ["scaleX", { "0": 0, "1": 0}], ["scaleY", { "0": 0, "1": 0}]], ["to", ["scaleX", { "0": 1.35, "1": 200}], ["scaleY", { "0": 1.35, "1": 200}]], ["to", ["scaleX", { "0": .9, "1": 100}], ["scaleY", { "0": .9, "1": 100}]], ["to", ["scaleX", { "0": 1.1, "1": 100}], ["scaleY", { "0": 1.1, "1": 100}]], ["to", ["scaleX", { "0": .95, "1": 100}], ["scaleY", { "0": .95, "1": 100}]], ["to", ["scaleX", { "0": 1, "1": 100}], ["scaleY", { "0": 1, "1": 100}]]]); return this; }, "flipInX": function() { this.cmds = this.cmds.concat([["to", ["rotateX", { "0": -90, "1": 0}]], ["to", ["rotateX", { "0": 20, "1": 300}]], ["to", ["rotateX", { "0": -20, "1": 300}]], ["to", ["rotateX", { "0": 10, "1": 300}]], ["to", ["rotateX", { "0": -5, "1": 300}]], ["to", ["rotateX", { "0": 0, "1": 300}]]]); return this; }, "zoomOut": function() { this.cmds = this.cmds.concat([["to", ["scaleX", { "0": 0, "1": 400}], ["scaleY", { "0": 0, "1": 400}]]]); return this; }, "start": function() { if (this._pause) return; var len = this.cmds.length; if (this.index < len) { this.exec(this.cmds[this.index], this.index == len - 1); } else { clearInterval(this.loop); } return this; }, "pause": function() { this._pause = true; for (var i = 0, len = this.tweens.length; i < len; i++) { this.tweens[i].pause(); } if (this.currentTask == "wait") { this.timeout -= new Date() - this.currentTaskBegin; this.currentTaskBegin = new Date(); } }, "toggle": function() { if (this._pause) { this.play(); } else { this.pause(); } }, "play": function() { this._pause = false; for (var i = 0, len = this.tweens.length; i < len; i++) { this.tweens[i].play(); } var self = this; if (this.currentTask == "wait") { setTimeout(function() { if (self._pause) return; self.index++; self.start(); if (self.index == self.cmds.length && self.complete) self.complete(); }, this.timeout); } }, "stop": function () { for (var i = 0, len = this.tweens.length; i < len; i++) { this.tweens[i].pause(); AlloyPaper.TWEEN.remove(this.tweens[i]); } this.cmds.length = 0; }, "exec": function(cmd, last) { var len = cmd.length, self = this; this.currentTask = cmd[0]; switch (this.currentTask) { case "to": self.stepCompleteCount = 0; for (var i = 1; i < len; i++) { var task = cmd[i]; var ease = task[1][2]; var target = {}; var prop = task[0]; target[prop] = task[1][0]; var t = new AlloyPaper.TWEEN.Tween(this.element).to(target, task[1][1]).onStart(function() { if (cmd.start) cmd.start(); }).onUpdate(function() { if (cmd.progress) cmd.progress.call(self.element); self.element[prop] = this[prop]; }).easing(ease ? ease : AlloyPaper.To.linear).onComplete(function() { self.stepCompleteCount++; if (self.stepCompleteCount == len - 1) { if (cmd.end) cmd.end.call(self.element); if (last && self.complete) self.complete(); self.index++; self.start(); } }).start(); this.tweens.push(t); } break; case "wait": this.currentTaskBegin = new Date(); this.timeout = cmd[1][0]; setTimeout(function() { if (self._pause) return; self.index++; self.start(); if (cmd.end) cmd.end.call(self.element); if (last && self.complete) self.complete(); }, cmd[1][0]); break; case "then": var arg = cmd[1][0]; arg.index = 0; arg.complete = function() { self.index++; self.start(); if (last && self.complete) self.complete(); }; arg.start(); break; case "cycle": var count = cmd[1][1]; if (count && self.cycleCount == count) { self.index++; self.start(); if (last && self.complete) self.complete(); } else { self.cycleCount++; self.index = cmd[1][0]; self.start(); } break; } } }); //end-------------------AlloyPaper.To---------------------end //begin-------------------AlloyPaper.UID---------------------begin AlloyPaper.UID = Class.extend({ "statics": { "_nextID": 0, "_nextCacheID": 1, "get": function() { return this._nextID++; }, "getCacheID": function() { return this._nextCacheID++; } } }); //end-------------------AlloyPaper.UID---------------------end //begin-------------------AlloyPaper.Util---------------------begin AlloyPaper.Util = Class.extend({ "statics": { "random": function(min, max) { return min + Math.floor(Math.random() * (max - min + 1)); } } }); //end-------------------AlloyPaper.Util---------------------end //begin-------------------AlloyPaper.Vector2---------------------begin AlloyPaper.Vector2 = Class.extend({ "ctor": function(x, y) { this.x = x; this.y = y; }, "copy": function() { return new AlloyPaper.Vector2(this.x, this.y); }, "length": function() { return Math.sqrt(this.x * this.x + this.y * this.y); }, "sqrLength": function() { return this.x * this.x + this.y * this.y; }, "normalize": function() { var inv = 1 / this.length(); return new AlloyPaper.Vector2(this.x * inv, this.y * inv); }, "negate": function() { return new AlloyPaper.Vector2(-this.x, -this.y); }, "add": function(v) { this.x += v.x; this.y += v.y; }, "subtract": function(v) { return new AlloyPaper.Vector2(this.x - v.x, this.y - v.y); }, "multiply": function(f) { return new AlloyPaper.Vector2(this.x * f, this.y * f); }, "divide": function(f) { var invf = 1 / f; return new AlloyPaper.Vector2(this.x * invf, this.y * invf); }, "dot": function(v) { return this.x * v.x + this.y * v.y; } }); //end-------------------AlloyPaper.Vector2---------------------end //begin-------------------AlloyPaper.Renderer---------------------begin AlloyPaper.Renderer = Class.extend({ "ctor": function (stage, openWebGL) { this.stage = stage; this.objs = []; this.width = this.stage.width; this.height = this.stage.height; this.mainCanvas = this.stage.canvas; var canvasSupport = !! window.CanvasRenderingContext2D, webglSupport = function() { try { var canvas = document.createElement("canvas"); return !!(window.WebGLRenderingContext && (canvas.getContext("webgl") || canvas.getContext("experimental-webgl"))); } catch (e) { return false; } }(); if (webglSupport && openWebGL) { this.renderingEngine = new AlloyPaper.WebGLRenderer(this.stage.canvas); } else { if (canvasSupport) { this.renderingEngine = new AlloyPaper.CanvasRenderer(this.stage.canvas); } else { throw "your browser does not support canvas and webgl "; } } this.mainCtx = this.renderingEngine.ctx; }, "update": function() { var objs = this.objs, ctx = this.mainCtx, engine = this.renderingEngine; objs.length = 0; this.computeMatrix(); engine.clear(); var l = objs.length; for (var m = 0; m < l; m++) { engine.renderObj(ctx, objs[m]); } }, "computeMatrix": function() { for (var i = 0, len = this.stage.children.length; i < len; i++) { this._computeMatrix(this.stage.children[i]); } }, "initComplex": function(o) { o.complexCompositeOperation = this._getCompositeOperation(o); o.complexAlpha = this._getAlpha(o, 1); }, "_computeMatrix": function(o, mtx) { if (!o.isVisible()) { return; } if (mtx) { o._matrix.initialize(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); } else { o._matrix.initialize(1, 0, 0, 1, 0, 0); } if (o instanceof AlloyPaper.Shape) { o._matrix.appendTransform(o.x, o.y, 1, 1, o.rotation, o.skewX, o.skewY, o.regX, o.regY); } else { o._matrix.appendTransform(o.x, o.y, o.scaleX, o.scaleY, o.rotation, o.skewX, o.skewY, o.regX, o.regY); } if (o instanceof AlloyPaper.Container) { var list = o.children, len = list.length, i = 0; for (; i < len; i++) { this._computeMatrix(list[i], o._matrix); } } else { if (o instanceof AlloyPaper.Graphics || o instanceof AlloyPaper.Text) { this.objs.push(o); this.initComplex(o); } else { o.initAABB(); if (this.isInStage(o)) { this.objs.push(o); this.initComplex(o); } } } }, "_getCompositeOperation": function(o) { if (o.compositeOperation) return o.compositeOperation; if (o.parent) return this._getCompositeOperation(o.parent); }, "_getAlpha": function(o, alpha) { var result = o.alpha * alpha; if (o.parent) { return this._getAlpha(o.parent, result); } return result; }, "isInStage": function(o) { return this.collisionBetweenAABB(o.AABB, this.stage.AABB); }, "collisionBetweenAABB": function(AABB1, AABB2) { var maxX = AABB1[0] + AABB1[2]; if (maxX < AABB2[0]) return false; var minX = AABB1[0]; if (minX > AABB2[0] + AABB2[2]) return false; var maxY = AABB1[1] + AABB1[3]; if (maxY < AABB2[1]) return false; var minY = AABB1[1]; if (minY > AABB2[1] + AABB2[3]) return false; return true; } }); //end-------------------AlloyPaper.Renderer---------------------end //begin-------------------AlloyPaper.CanvasRenderer---------------------begin AlloyPaper.CanvasRenderer = Class.extend({ "ctor": function(canvas) { if (canvas) { this.canvas = canvas; this.ctx = this.canvas.getContext("2d"); this.height = this.canvas.height; this.width = this.canvas.width; } }, "hitAABB": function(ctx, o, evt, type) { var list = o.children.slice(0), l = list.length; for (var i = l - 1; i >= 0; i--) { var child = list[i]; if (!this.isbindingEvent(child)) continue; var target = this._hitAABB(ctx, child, evt, type); if (target) return target; } }, "_hitAABB": function(ctx, o, evt, type) { if (!o.isVisible()) { return; } if (o instanceof AlloyPaper.Container) { var list = o.children.slice(0), l = list.length; for (var i = l - 1; i >= 0; i--) { var child = list[i]; var target = this._hitAABB(ctx, child, evt, type); if (target) return target; } } else { if (o.AABB && this.checkPointInAABB(evt.stageX, evt.stageY, o.AABB)) { this._bubbleEvent(o, type, evt); return o; } } }, "hitRender": function(ctx, o, evt, type) { var mtx = o._hitMatrix; var list = o.children.slice(0), l = list.length; for (var i = l - 1; i >= 0; i--) { var child = list[i]; mtx.initialize(1, 0, 0, 1, 0, 0); mtx.appendTransform(o.x - evt.stageX, o.y - evt.stageY, o.scaleX, o.scaleY, o.rotation, o.skewX, o.skewY, o.regX, o.regY); if (!this.isbindingEvent(child)) continue; ctx.save(); var target = this._hitRender(ctx, child, mtx, evt, type); ctx.restore(); if (target) return target; } }, "_hitRender": function(ctx, o, mtx, evt, type) { ctx.clearRect(0, 0, 2, 2); if (!o.isVisible()) { return; } if (mtx) { o._hitMatrix.initialize(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); } else { o._hitMatrix.initialize(1, 0, 0, 1, 0, 0); } mtx = o._hitMatrix; if (o instanceof AlloyPaper.Shape) { mtx.appendTransform(o.x, o.y, 1, 1, o.rotation, o.skewX, o.skewY, o.regX, o.regY); } else { mtx.appendTransform(o.x, o.y, o.scaleX, o.scaleY, o.rotation, o.skewX, o.skewY, o.regX, o.regY); } var mmyCanvas = o.cacheCanvas || o.txtCanvas || o.shapeCanvas; if (mmyCanvas) { ctx.globalAlpha = o.complexAlpha; ctx.globalCompositeOperation = o.complexCompositeOperation; ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); ctx.drawImage(mmyCanvas, 0, 0); } else if (o instanceof AlloyPaper.Container) { var list = o.children.slice(0), l = list.length; for (var i = l - 1; i >= 0; i--) { ctx.save(); var target = this._hitRender(ctx, list[i], mtx, evt, type); if (target) return target; ctx.restore(); } } else if (o instanceof AlloyPaper.Bitmap || o instanceof AlloyPaper.Sprite) { ctx.globalAlpha = o.complexAlpha; ctx.globalCompositeOperation = o.complexCompositeOperation; var rect = o.rect; ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); ctx.drawImage(o.img, rect[0], rect[1], rect[2], rect[3], 0, 0, rect[2], rect[3]); } else if (o instanceof AlloyPaper.Graphics) { ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); o.draw(ctx); } if (ctx.getImageData(0, 0, 1, 1).data[3] > 1 && !(o instanceof AlloyPaper.Container)) { this._bubbleEvent(o, type, evt); return o; } }, "_bubbleEvent": function(o, type, event) { var result = o.execEvent(type, event); if (result !== false) { if (o.parent && o.parent.events && o.parent.events[type] && o.parent.events[type].length > 0 && o.parent.baseInstanceof !== "Stage") { this._bubbleEvent(o.parent, type, event); } } }, "isbindingEvent": function(obj) { if (Object.keys(obj.events).length !== 0) return true; if (obj instanceof AlloyPaper.Container) { for (var i = 0, len = obj.children.length; i < len; i++) { var child = obj.children[i]; if (child instanceof AlloyPaper.Container) { return this.isbindingEvent(child); } else { if (Object.keys(child.events).length !== 0) return true; } } } return false; }, "clear": function() { this.ctx.clearRect(0, 0, this.width, this.height); }, "renderObj": function(ctx, o) { var mtx = o._matrix; ctx.save(); ctx.globalAlpha = o.complexAlpha; ctx.globalCompositeOperation = o.complexCompositeOperation; o.shadow && this._applyShadow(ctx, o.shadow); var mmyCanvas = o.cacheCanvas || o.txtCanvas || o.shapeCanvas; if (mmyCanvas) { ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); ctx.drawImage(mmyCanvas, 0, 0); } else if (o instanceof AlloyPaper.Bitmap || o instanceof AlloyPaper.Sprite) { if (o._clipFn) { ctx.beginPath(); o._clipFn.call(ctx); ctx.closePath(); ctx.clip(); } var rect = o.rect; ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); ctx.drawImage(o.img, rect[0], rect[1], rect[2], rect[3], 0, 0, rect[2], rect[3]); } else if (o instanceof AlloyPaper.Graphics || o instanceof AlloyPaper.Text) { ctx.setTransform(mtx.a, mtx.b, mtx.c, mtx.d, mtx.tx, mtx.ty); o.draw(ctx); } ctx.restore(); }, "_applyShadow" : function(ctx, shadow) { ctx.shadowColor = shadow.color || "transparent"; ctx.shadowOffsetX = shadow.offsetX||0; ctx.shadowOffsetY = shadow.offsetY||0; ctx.shadowBlur = shadow.blur||0; }, "clearBackUpCanvasCache": function() {}, "checkPointInAABB": function(x, y, AABB) { var minX = AABB[0]; if (x < minX) return false; var minY = AABB[1]; if (y < minY) return false; var maxX = minX + AABB[2]; if (x > maxX) return false; var maxY = minY + AABB[3]; if (y > maxY) return false; return true; } }); //end-------------------AlloyPaper.CanvasRenderer---------------------end AlloyPaper.WebGLRenderer = Class.extend({ "ctor": function(canvas) { this.surface = canvas; this.snapToPixel = true; this.canvasRenderer = new AlloyPaper.CanvasRenderer(); this.textureCache = {}; this.textureCanvasCache = {}; this.initSurface(this.surface); }, "initSurface": function(surface) { var options = { depth: false, alpha: true, preserveDrawingBuffer: true, antialias: false, premultipliedAlpha: true }; var ctx = undefined; try { ctx = surface.ctx = surface.getContext("webgl", options) || surface.getContext("experimental-webgl", options); ctx.viewportWidth = surface.width; ctx.viewportHeight = surface.height; } catch (e) {} if (!ctx) { alert("Could not initialise WebGL. Make sure you've updated your browser, or try a different one like Google Chrome."); } var textureShader = ctx.createShader(ctx.FRAGMENT_SHADER); ctx.shaderSource(textureShader, "" + "precision mediump float;\n" + "varying vec3 vTextureCoord;\n" + "varying float vAlpha;\n" + "uniform float uAlpha;\n" + "uniform sampler2D uSampler0;\n" + "void main(void) { \n" + "vec4 color = texture2D(uSampler0, vTextureCoord.st); \n" + "gl_FragColor = vec4(color.rgb, color.a * vAlpha);\n" + "}"); ctx.compileShader(textureShader); if (!ctx.getShaderParameter(textureShader, ctx.COMPILE_STATUS)) { alert(ctx.getShaderInfoLog(textureShader)); } var vertexShader = ctx.createShader(ctx.VERTEX_SHADER); ctx.shaderSource(vertexShader, "" + "attribute vec2 aVertexPosition;\n" + "attribute vec3 aTextureCoord;\n" + "attribute float aAlpha;\n" + "uniform bool uSnapToPixel;\n" + "const mat4 pMatrix = mat4(" + 2 / ctx.viewportWidth + ",0,0,0, 0," + -2 / ctx.viewportHeight + ",0,0, 0,0,-2, 0, -1,1,-1,1); \n" + "varying vec3 vTextureCoord;\n" + "varying float vAlpha;\n" + "void main(void) { \n" + "vTextureCoord = aTextureCoord; \n" + "vAlpha = aAlpha; \n" + "gl_Position = pMatrix * vec4(aVertexPosition.x,aVertexPosition.y,0.0, 1.0);\n" + "}"); ctx.compileShader(vertexShader); if (!ctx.getShaderParameter(vertexShader, ctx.COMPILE_STATUS)) { alert(ctx.getShaderInfoLog(vertexShader)); } var program = surface.shader = ctx.createProgram(); ctx.attachShader(program, vertexShader); ctx.attachShader(program, textureShader); ctx.linkProgram(program); if (!ctx.getProgramParameter(program, ctx.LINK_STATUS)) { alert("Could not initialise shaders"); } ctx.enableVertexAttribArray(program.vertexPositionAttribute = ctx.getAttribLocation(program, "aVertexPosition")); ctx.enableVertexAttribArray(program.uvCoordAttribute = ctx.getAttribLocation(program, "aTextureCoord")); ctx.enableVertexAttribArray(program.colorAttribute = ctx.getAttribLocation(program, "aAlpha")); program.alphaUniform = ctx.getUniformLocation(program, "uAlpha"); program.snapToUniform = ctx.getUniformLocation(program, "uSnapToPixel"); ctx.useProgram(program); this._vertexDataCount = 5; this._degToRad = Math.PI / 180; if (window.Float32Array) { this.vertices = new window.Float32Array(this._vertexDataCount * 4); } else { this.vertices = new Array(this._vertexDataCount * 4); } this.arrayBuffer = ctx.createBuffer(); this.indexBuffer = ctx.createBuffer(); ctx.bindBuffer(ctx.ARRAY_BUFFER, this.arrayBuffer); ctx.bindBuffer(ctx.ELEMENT_ARRAY_BUFFER, this.indexBuffer); var byteCount = this._vertexDataCount * 4; ctx.vertexAttribPointer(program.vertexPositionAttribute, 2, ctx.FLOAT, 0, byteCount, 0); ctx.vertexAttribPointer(program.uvCoordAttribute, 2, ctx.FLOAT, 0, byteCount, 2 * 4); ctx.vertexAttribPointer(program.colorAttribute, 1, ctx.FLOAT, 0, byteCount, 4 * 4); if (window.Uint16Array) { this.indices = new window.Uint16Array(6); } else { this.indices = new Array(6); } for (var i = 0, l = this.indices.length; i < l; i += 6) { var j = i * 4 / 6; this.indices.set([j, j + 1, j + 2, j, j + 2, j + 3], i); } ctx.bufferData(ctx.ARRAY_BUFFER, this.vertices, ctx.STREAM_DRAW); ctx.bufferData(ctx.ELEMENT_ARRAY_BUFFER, this.indices, ctx.STATIC_DRAW); ctx.viewport(0, 0, ctx.viewportWidth, ctx.viewportHeight); ctx.colorMask(true, true, true, true); ctx.blendFuncSeparate(ctx.SRC_ALPHA, ctx.ONE_MINUS_SRC_ALPHA, ctx.SRC_ALPHA, ctx.ONE); ctx.enable(ctx.BLEND); ctx.disable(ctx.DEPTH_TEST); surface.init = true; this.ctx = ctx; }, "_initTexture": function(src, ctx) { if (!this.textureCache[src.src]) { src.glTexture = ctx.createTexture(); src.glTexture.image = src; ctx.activeTexture(ctx.TEXTURE0); ctx.bindTexture(ctx.TEXTURE_2D, src.glTexture); ctx.texImage2D(ctx.TEXTURE_2D, 0, ctx.RGBA, ctx.RGBA, ctx.UNSIGNED_BYTE, src.glTexture.image); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_MAG_FILTER, ctx.LINEAR); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_MIN_FILTER, ctx.LINEAR); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_WRAP_S, ctx.CLAMP_TO_EDGE); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_WRAP_T, ctx.CLAMP_TO_EDGE); this.textureCache[src.src] = src.glTexture; ctx.uniform1i(ctx.getUniformLocation(ctx.canvas.shader, "uSampler0"), 0); } else { src.glTexture = this.textureCache[src.src]; ctx.activeTexture(ctx.TEXTURE0); ctx.bindTexture(ctx.TEXTURE_2D, src.glTexture); } }, "_initCache": function(o, src, ctx) { if (!this.textureCanvasCache[o.cacheID]) { this.textureCanvasCache[this._preCacheId] = null; src.glTexture = ctx.createTexture(); src.glTexture.image = src; ctx.activeTexture(ctx.TEXTURE0); ctx.bindTexture(ctx.TEXTURE_2D, src.glTexture); ctx.texImage2D(ctx.TEXTURE_2D, 0, ctx.RGBA, ctx.RGBA, ctx.UNSIGNED_BYTE, src.glTexture.image); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_MAG_FILTER, ctx.LINEAR); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_MIN_FILTER, ctx.LINEAR); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_WRAP_S, ctx.CLAMP_TO_EDGE); ctx.texParameteri(ctx.TEXTURE_2D, ctx.TEXTURE_WRAP_T, ctx.CLAMP_TO_EDGE); ctx.uniform1i(ctx.getUniformLocation(ctx.canvas.shader, "uSampler0"), 0); this.textureCanvasCache[o.cacheID] = src.glTexture; } else { src.glTexture = this.textureCanvasCache[o.cacheID]; ctx.activeTexture(ctx.TEXTURE0); ctx.bindTexture(ctx.TEXTURE_2D, src.glTexture); } }, "updateCache": function(ctx, o, w, h) { ctx.clearRect(0, 0, w + 1, h + 1); this.renderCache(ctx, o); }, "renderCache": function(ctx, o) { if (!o.isVisible()) { return; } if (o instanceof AlloyPaper.Container || o instanceof AlloyPaper.Stage) { var list = o.children.slice(0); for (var i = 0, l = list.length; i < l; i++) { ctx.save(); this.canvasRenderer.render(ctx, list[i]); ctx.restore(); } } else if (o instanceof AlloyPaper.Bitmap || o instanceof AlloyPaper.Sprite) { var rect = o.rect; ctx.drawImage(o.img, rect[0], rect[1], rect[2], rect[3], 0, 0, rect[2], rect[3]); } else if (o.txtCanvas) { ctx.drawImage(o.txtCanvas, 0, 0); } else if (o.shapeCanvas) { ctx.drawImage(o.shapeCanvas, 0, 0); } }, "clear": function() { this.ctx.clear(this.ctx.COLOR_BUFFER_BIT); }, "renderObj": function(ctx, o) { var mtx = o._matrix, leftSide = 0, topSide = 0, rightSide = 0, bottomSide = 0; var uFrame = 0, vFrame = 0, u = 1, v = 1, img = 0; if (o.complexCompositeOperation === "lighter") { ctx.blendFunc(ctx.SRC_ALPHA, ctx.ONE); } else { ctx.blendFunc(ctx.SRC_ALPHA, ctx.ONE_MINUS_SRC_ALPHA); } var mmyCanvas = o.cacheCanvas || o.txtCanvas || o.shapeCanvas; if (mmyCanvas) { this._initCache(o, mmyCanvas, ctx); rightSide = leftSide + mmyCanvas.width; bottomSide = topSide + mmyCanvas.height; } else if (o instanceof AlloyPaper.Bitmap || o instanceof AlloyPaper.Sprite) { var rect = o.rect; img = o.img; this._initTexture(img, ctx); rightSide = leftSide + rect[2]; bottomSide = topSide + rect[3]; u = rect[2] / img.width; v = rect[3] / img.height; uFrame = rect[0] / img.width; vFrame = rect[1] / img.height; } var a = mtx.a, b = mtx.b, c = mtx.c, d = mtx.d, tx = mtx.tx, ty = mtx.ty, lma = leftSide * a, lmb = leftSide * b, tmc = topSide * c, tmd = topSide * d, rma = rightSide * a, rmb = rightSide * b, bmc = bottomSide * c, bmd = bottomSide * d; var alpha = o.complexAlpha; this.vertices.set([lma + tmc + tx, lmb + tmd + ty, uFrame, vFrame, alpha, lma + bmc + tx, lmb + bmd + ty, uFrame, vFrame + v, alpha, rma + bmc + tx, rmb + bmd + ty, uFrame + u, vFrame + v, alpha, rma + tmc + tx, rmb + tmd + ty, uFrame + u, vFrame, alpha], 0); ctx.bufferSubData(ctx.ARRAY_BUFFER, 0, this.vertices); ctx.drawElements(ctx.TRIANGLES, 6, ctx.UNSIGNED_SHORT, 0); }, "clearBackUpCanvasCache": function() { this.textureCanvasCache[1] = null; } }); //begin-------------------AlloyPaper.DisplayObject---------------------begin AlloyPaper.DisplayObject = Class.extend({ "ctor": function() { this.alpha = this.scaleX = this.scaleY = this.scale = 1; this.x = this.y = this.rotation = this.originX = this.originY = this.skewX = this.skewY = this.width = this.height = this.regX = this.regY = 0; this.textureReady = true; this.visible = true; this._matrix = new AlloyPaper.Matrix2D(); this._hitMatrix = new AlloyPaper.Matrix2D(); this.events = {}; this.id = AlloyPaper.UID.get(); this.cacheID = 0; this.baseInstanceof = "DisplayObject"; this.tickFPS = 60; var self = this; this._watch(this, "originX", function(prop, value) { if (typeof value === "string") { self.regX = parseInt(value); } else { self.regX = self.width * value; } }); this._watch(this, "originY", function(prop, value) { if (typeof value === "string") { self.regY = parseInt(value); } else { self.regY = self.height * value; } }); this._watch(this, "filter", function(prop, value) { self.setFilter.apply(self, value); }); this._watch(this, "scale", function(prop, value) { this.scaleX = this.scaleY = this.scale; }); this.cursor = "default"; this.onHover(function () { //this._setCursor(this, this.cursor); }, function () { this._setCursor(this, AlloyPaper.DefaultCursor); }); }, "_watch": function(target, prop, onPropertyChanged) { if (typeof prop === "string") { target["__" + prop] = this[prop]; Object.defineProperty(target, prop, { get: function() { return this["__" + prop]; }, set: function(value) { this["__" + prop] = value; onPropertyChanged.apply(target, [prop, value]); } }); } else { for (var i = 0, len = prop.length; i < len; i++) { var propName = prop[i]; target["__" + propName] = this[propName]; (function(propName) { Object.defineProperty(target, propName, { get: function() { return this["__" + propName]; }, set: function(value) { this["__" + propName] = value; onPropertyChanged.apply(target, [propName, value]); } }); })(propName); } } }, "isVisible": function() { return !!(this.visible && this.alpha > 0 && this.scaleX != 0 && this.scaleY != 0 && this.textureReady); }, "on": function(type, fn) { this.events[type] || (this.events[type] = []); this.events[type].push(fn); }, "off": function (type, fn) { var fns=this.events[type]; if (fns) { var i = 0, len = fns.length; for (; i < len; i++) { if (fns[i] === fn) { fns.splice(i, 1); break; } } } }, "execEvent": function (type, event) { if (this.events) { var fns = this.events[type], result = true; if (fns) { for (var i = 0, len = fns.length; i < len; i++) { result = fns[i].call(this, event); } } return result; } }, "_setCursor": function (obj, type) { if (obj) { if (obj.parent instanceof AlloyPaper.Stage) { obj.parent.setCursor(type); } else { this._setCursor(obj.parent, type); } } }, "clone": function() { var o = new AlloyPaper.DisplayObject(); this.cloneProps(o); return o; }, "cloneProps": function(o) { o.visible = this.visible; o.alpha = this.alpha; o.originX = this.originX; o.originY = this.originY; o.rotation = this.rotation; o.scaleX = this.scaleX; o.scaleY = this.scaleY; o.skewX = this.skewX; o.skewY = this.skewY; o.x = this.x; o.y = this.y; o.regX = this.regX; o.regY = this.regY; }, "cache": function() { if (!this.cacheCanvas) { this.cacheCanvas = document.createElement("canvas"); var bound = this.getBound(); this.cacheCanvas.width = bound.width; this.cacheCanvas.height = bound.height; this.cacheCtx = this.cacheCanvas.getContext("2d"); } this.cacheID = AlloyPaper.UID.getCacheID(); this.updateCache(this.cacheCtx, this, bound.width, bound.width); }, "uncache": function() { this.cacheCanvas = null; this.cacheCtx = null; this.cacheID = null; }, "setFilter": function(r, g, b, a) { if (this.width === 0 || this.height === 0) return; this.uncache(); this.cache(); var imageData = this.cacheCtx.getImageData(0, 0, this.cacheCanvas.width, this.cacheCanvas.height); var pix = imageData.data; for (var i = 0, n = pix.length; i < n; i += 4) { if (pix[i + 3] > 0) { pix[i] *= r; pix[i + 1] *= g; pix[i + 2] *= b; pix[i + 3] *= a; } } this.cacheCtx.putImageData(imageData, 0, 0); }, "getBound": function() { return { width: this.width, height: this.height }; }, "toCenter": function() { this.originX = .5; this.originY = .5; this.x = this.parent.width / 2; this.y = this.parent.height / 2; }, "destroy": function() { this.cacheCanvas = null; this.cacheCtx = null; this.cacheID = null; this._matrix = null; this.events = null; if (this.parent) { this.parent.remove(this); } }, "initAABB": function() { var x, y, width = this.width, height = this.height, mtx = this._matrix; var x_a = width * mtx.a, x_b = width * mtx.b; var y_c = height * mtx.c, y_d = height * mtx.d; var tx = mtx.tx, ty = mtx.ty; var minX = tx, maxX = tx, minY = ty, maxY = ty; if ((x = x_a + tx) < minX) { minX = x; } else if (x > maxX) { maxX = x; } if ((x = x_a + y_c + tx) < minX) { minX = x; } else if (x > maxX) { maxX = x; } if ((x = y_c + tx) < minX) { minX = x; } else if (x > maxX) { maxX = x; } if ((y = x_b + ty) < minY) { minY = y; } else if (y > maxY) { maxY = y; } if ((y = x_b + y_d + ty) < minY) { minY = y; } else if (y > maxY) { maxY = y; } if ((y = y_d + ty) < minY) { minY = y; } else if (y > maxY) { maxY = y; } this.AABB = [minX, minY, maxX - minX, maxY - minY]; this.rectPoints = [{ x: tx, y: ty},{ x: x_a + tx, y: x_b + ty},{ x: x_a + y_c + tx, y: x_b + y_d + ty},{ x: y_c + tx, y: y_d + ty}]; }, "updateCache": function(ctx, o, w, h) { ctx.clearRect(0, 0, w + 1, h + 1); this.renderCache(ctx, o); }, "renderCache": function(ctx, o) { if (!o.isVisible()) { return; } if (o instanceof AlloyPaper.Container || o instanceof AlloyPaper.Stage) { var list = o.children.slice(0); for (var i = 0, l = list.length; i < l; i++) { ctx.save(); this.render(ctx, list[i]); ctx.restore(); } } else if (o instanceof AlloyPaper.Bitmap || o instanceof AlloyPaper.Sprite) { var rect = o.rect; ctx.drawImage(o.img, rect[0], rect[1], rect[2], rect[3], 0, 0, rect[2], rect[3]); } else if (o.txtCanvas) { ctx.drawImage(o.txtCanvas, 0, 0); } else if (o.shapeCanvas) { ctx.drawImage(o.shapeCanvas, 0, 0); } }, "onClick": function(fn) { this.on("click", fn); }, "onMouseDown": function(fn) { this.on("pressdown", fn); }, "onMouseMove": function(fn) { this.on("mousemove", fn); }, "onMouseUp": function(fn) { this.on("pressup", fn); }, "onMouseOver": function(fn) { this.on("mouseover", fn); }, "onMouseOut": function(fn) { this.on("mouseout", fn); }, "onHover": function(over, out) { this.on("mouseover", over); this.on("mouseout", out); }, "onPressDown": function(fn) { this.on("pressdown", fn); }, "onPressMove": function(fn) { this.on("pressmove", fn); }, "onPressUp": function(fn) { this.on("pressup", fn); }, "onMouseWheel": function(fn) { this.on("mousewheel", fn); }, "onTouchStart": function(fn) { this.on("pressdown", fn); }, "onTouchMove": function(fn) { this.on("pressmove", fn); }, "onTouchEnd": function(fn) { this.on("pressup", fn); }, "onTouchCancel": function () { this.on("touchcancel", fn); }, "onDbClick": function(fn) { this.on("dblclick", fn); }, "addEventListener": function (type, handler) { this.on(this._normalizeEventType(type), handler); }, "removeEventListener": function (type, handler) { this.off(this._normalizeEventType(type), handler); }, "_normalizeEventType": function (type) { var newType = { "touchstart": "pressdown", "touchmove": "pressmove", "touchend": "pressup" }[type]; if (newType) return newType; return type; } }); //end-------------------AlloyPaper.DisplayObject---------------------end AlloyPaper.Bitmap = AlloyPaper.DisplayObject.extend({ "ctor": function(img) { this._super(); Object.defineProperty(this, "rect", { get: function () { return this["__rect"]; }, set: function (value) { this["__rect"] = value; this.width = value[2]; this.height = value[3]; this.regX = value[2] * this.originX; this.regY = value[3] * this.originY; } }); if (arguments.length === 0) return; if (typeof img == "string") { this._initWithSrc(img); this.imgSrc = img; } else { this._init(img); this.imgSrc = img.src; } }, "_initWithSrc": function(img) { var cacheImg = AlloyPaper.Cache[img]; if (cacheImg) { this._init(cacheImg); } else { var self = this; this.textureReady = false; this.img = document.createElement("img"); this.img.crossOrigin = "Anonymous"; this.img.onload = function () { if (!self.rect) self.rect = [0, 0, self.img.width, self.img.height]; AlloyPaper.Cache[img] = self.img; self.textureReady = true; self.imageLoadHandle && self.imageLoadHandle(); if (self.filter) self.filter = self.filter; }; this.img.src = img; } }, "_init": function(img) { if (!img) return; this.img = img; this.img.crossOrigin = "Anonymous"; this.width = img.width; this.height = img.height; this.rect = [0, 0, img.width, img.height]; }, "useImage": function(img) { if (typeof img == "string") { this._initWithSrc(img); } else { this._init(img); this.imageLoadHandle && this.imageLoadHandle(); } }, "onImageLoad": function(fn) { this.imageLoadHandle = fn; }, "clone": function () { if (this.textureReady) { var o = new AlloyPaper.Bitmap(this.img); o.rect = this.rect.slice(0); this.cloneProps(o); return o; } else { var o = new AlloyPaper.Bitmap(this.imgSrc); this.rect&&(o.rect = this.rect.slice(0)); this.cloneProps(o); return o; } }, "clip": function (fn) { this._clipFn = fn; }, "flipX": function() {}, "flipY": function() {} }); //begin-------------------AlloyPaper.Container---------------------begin AlloyPaper.Container = AlloyPaper.DisplayObject.extend({ "ctor": function() { this._super(); this.children = []; this.baseInstanceof = "Container"; }, "add": function(obj) { var len = arguments.length; if (len > 1) { for (var i = 0; i < len; i++) { var item = arguments[i]; if (item) { this.children.push(item); item.parent = this; } } } else { if (obj) { this.children.push(obj); obj.parent = this; } } }, "remove": function(obj) { var len = arguments.length, childLen = this.children.length; if (len > 1) { for (var j = 0; j < len; j++) { var currentObj = arguments[j]; for (var k = childLen; --k >= 0;) { if (currentObj&&this.children[k].id == currentObj.id) { currentObj.parent = null; this.children.splice(k, 1); break; } } } } else { for (var i = childLen; --i >= 0;) { if (obj&&this.children[i].id == obj.id) { obj.parent = null; this.children.splice(i, 1); break; } } } }, "clone": function() { var o = new AlloyPaper.Container(); this.cloneProps(o); var arr = o.children = []; for (var i = this.children.length - 1; i > -1; i--) { var clone = this.children[i].clone(); arr.unshift(clone); } return o; }, "removeAll": function() { var kids = this.children; while (kids.length) { kids.pop().parent = null; } }, "destroy": function() { this._super(); var kids = this.children; while (kids.length) { var kid = kids.pop(); kid.destroy(); kid = null; } }, "swapChildrenAt": function(index1, index2) { var kids = this.children; var o1 = kids[index1]; var o2 = kids[index2]; if (!o1 || !o2) { return; } kids[index1] = o2; kids[index2] = o1; }, "swapChildren": function(child1, child2) { var kids = this.children; var index1, index2; for (var i = 0, l = kids.length; i < l; i++) { if (kids[i] == child1) { index1 = i; } if (kids[i] == child2) { index2 = i; } if (index1 != null && index2 != null) { break; } } if (i == l) { return; } kids[index1] = child2; kids[index2] = child1; }, "swapToTop": function(child) { this.swapChildren(child, this.children[this.children.length - 1]); } }); //end-------------------AlloyPaper.Container---------------------end //begin-------------------AlloyPaper.Graphics---------------------begin AlloyPaper.Graphics = AlloyPaper.DisplayObject.extend({ "ctor": function() { this._super(); this.cmds = []; this.assMethod = ["fillStyle", "strokeStyle", "lineWidth"]; }, "draw": function(ctx) { for (var i = 0, len = this.cmds.length; i < len; i++) { var cmd = this.cmds[i]; if (this.assMethod.join("-").match(new RegExp("\\b" + cmd[0] + "\\b", "g"))) { ctx[cmd[0]] = cmd[1][0]; } else { ctx[cmd[0]].apply(ctx, Array.prototype.slice.call(cmd[1])); } } }, "clearRect": function(x, y, width, height) { this.cmds.push(["clearRect", arguments]); return this; }, "clear": function() { this.cmds.length = 0; return this; }, "strokeRect": function() { this.cmds.push(["strokeRect", arguments]); return this; }, "fillRect": function() { this.cmds.push(["fillRect", arguments]); return this; }, "beginPath": function() { this.cmds.push(["beginPath", arguments]); return this; }, "arc": function() { this.cmds.push(["arc", arguments]); return this; }, "closePath": function() { this.cmds.push(["closePath", arguments]); return this; }, "fillStyle": function() { this.cmds.push(["fillStyle", arguments]); return this; }, "fill": function() { this.cmds.push(["fill", arguments]); return this; }, "strokeStyle": function() { this.cmds.push(["strokeStyle", arguments]); return this; }, "lineWidth": function() { this.cmds.push(["lineWidth", arguments]); return this; }, "stroke": function() { this.cmds.push(["stroke", arguments]); return this; }, "moveTo": function() { this.cmds.push(["moveTo", arguments]); return this; }, "lineTo": function() { this.cmds.push(["lineTo", arguments]); return this; }, "bezierCurveTo": function() { this.cmds.push(["bezierCurveTo", arguments]); return this; }, "clone": function() {} }); //end-------------------AlloyPaper.Graphics---------------------end //begin-------------------AlloyPaper.Label---------------------begin AlloyPaper.Label = AlloyPaper.DisplayObject.extend({ "ctor": function(option) { this._super(); this.value = option.value; this.fontSize = option.fontSize; this.fontFamily = option.fontFamily; this.color = option.color; this.textAlign = "center"; this.textBaseline = "top"; this.fontWeight = option.fontWeight || ""; this.maxWidth = option.maxWidth || 2e3; this.square = option.square || false; this.txtCanvas = document.createElement("canvas"); this.txtCtx = this.txtCanvas.getContext("2d"); this.setDrawOption(); this.shadow = option.shadow; this._watch(this, ["value", "fontSize", "color", "fontFamily"], function() { this.setDrawOption(); }); }, "setDrawOption": function() { var drawOption = this.getDrawOption({ txt: this.value, maxWidth: this.maxWidth, square: this.square, size: this.fontSize, alignment: this.textAlign, color: this.color || "black", fontFamily: this.fontFamily, fontWeight: this.fontWeight, shadow: this.shadow }); this.cacheID = AlloyPaper.UID.getCacheID(); this.width = drawOption.calculatedWidth; this.height = drawOption.calculatedHeight; }, "getDrawOption": function(option) { var canvas = this.txtCanvas; var ctx = this.txtCtx; var canvasX, canvasY; var textX, textY; var text = []; var textToWrite = option.txt; var maxWidth = option.maxWidth; var squareTexture = option.square; var textHeight = option.size; var textAlignment = option.alignment; var textColour = option.color; var fontFamily = option.fontFamily; var fontWeight = option.fontWeight; ctx.font = textHeight + "px " + fontFamily; if (maxWidth && this.measureText(ctx, textToWrite) > maxWidth) { maxWidth = this.createMultilineText(ctx, textToWrite, maxWidth, text); canvasX = this.getPowerOfTwo(maxWidth); } else { text.push(textToWrite); canvasX = this.getPowerOfTwo(ctx.measureText(textToWrite).width); } canvasY = this.getPowerOfTwo(textHeight * (text.length + 1)); if (squareTexture) { canvasX > canvasY ? canvasY = canvasX : canvasX = canvasY; } option.calculatedWidth = canvasX; option.calculatedHeight = canvasY; canvas.width = canvasX; canvas.height = canvasY; switch (textAlignment) { case "left": textX = 0; break; case "center": textX = canvasX / 2; break; case "right": textX = canvasX; break; } textY = canvasY / 2; ctx.fillStyle = textColour; ctx.textAlign = textAlignment; ctx.textBaseline = "middle"; ctx.font = fontWeight + " " + textHeight + "px " + fontFamily; if (option.shadow) { ctx.shadowColor = option.shadow.color || "transparent"; ctx.shadowOffsetX = option.shadow.offsetX || 0; ctx.shadowOffsetY = option.shadow.offsetY || 0; ctx.shadowBlur = option.shadow.blur || 0; } var offset = (canvasY - textHeight * (text.length + 1)) * .5; option.cmd = []; for (var i = 0; i < text.length; i++) { if (text.length > 1) { textY = (i + 1) * textHeight + offset; } option.cmd.push({ text: text[i], x: textX, y: textY }); ctx.fillText(text[i], textX, textY); } return option; }, "getPowerOfTwo": function(value, pow) { var temp_pow = pow || 1; while (temp_pow < value) { temp_pow *= 2; } return temp_pow; }, "measureText": function(ctx, textToMeasure) { return ctx.measureText(textToMeasure).width; }, "createMultilineText": function(ctx, textToWrite, maxWidth, text) { textToWrite = textToWrite.replace("\n", " "); var currentText = textToWrite; var futureText; var subWidth = 0; var maxLineWidth; var wordArray = textToWrite.split(" "); var wordsInCurrent, wordArrayLength; wordsInCurrent = wordArrayLength = wordArray.length; while (this.measureText(ctx, currentText) > maxWidth && wordsInCurrent > 1) { wordsInCurrent--; currentText = futureText = ""; for (var i = 0; i < wordArrayLength; i++) { if (i < wordsInCurrent) { currentText += wordArray[i]; if (i + 1 < wordsInCurrent) { currentText += " "; } } else { futureText += wordArray[i]; if (i + 1 < wordArrayLength) { futureText += " "; } } } } text.push(currentText); maxLineWidth = this.measureText(ctx, currentText); if (futureText) { subWidth = this.createMultilineText(ctx, futureText, maxWidth, text); if (subWidth > maxLineWidth) { maxLineWidth = subWidth; } } return maxLineWidth; }, "draw": function(ctx) { ctx.fillStyle = this.color; ctx.font = this.font; ctx.textAlign = this.textAlign || "left"; ctx.textBaseline = this.textBaseline || "top"; ctx.fillText(this.text, 0, 0); } }); //end-------------------AlloyPaper.Label---------------------end //begin-------------------AlloyPaper.Particle---------------------begin AlloyPaper.Particle = AlloyPaper.Bitmap.extend({ "ctor": function(option) { this._super(option.texture); this.originX = .5; this.originY = .5; this.position = option.position; this.x = this.position.x; this.y = this.position.y; this.rotation = option.rotation || 0; this.velocity = option.velocity; this.acceleration = option.acceleration || new AlloyPaper.Vector2(0, 0); this.rotatingSpeed = option.rotatingSpeed || 0; this.rotatingAcceleration = option.rotatingAcceleration || 0; this.hideSpeed = option.hideSpeed || .01; this.zoomSpeed = option.hideSpeed || .01; this.isAlive = true; this.img = option.texture; this.img.src = ""; }, "tick": function() { this.velocity.add(this.acceleration); this.position.add(this.velocity.multiply(.1)); this.rotatingSpeed += this.rotatingAcceleration; this.rotation += this.rotatingSpeed; this.alpha -= this.hideSpeed; this.x = this.position.x; this.y = this.position.y; } }); //end-------------------AlloyPaper.Particle---------------------end //begin-------------------AlloyPaper.ParticleSystem---------------------begin AlloyPaper.ParticleSystem = AlloyPaper.Container.extend({ "ctor": function(option) { this._super(); this.speed = option.speed; this.angle = option.angle; this.angleRange = option.angleRange; this.emitArea = option.emitArea; this.gravity = option.gravity || { x: 0, y: 0 }; this.filter = option.filter; this.compositeOperation = "lighter"; this.emitCount = option.emitCount; this.maxCount = option.maxCount || 1e3; this.emitX = option.emitX; this.emitY = option.emitY; if (typeof option.texture === "string") { if (AlloyPaper.Cache[option.texture]) { this.texture = AlloyPaper.Cache[option.texture]; this.generateFilterTexture(this.texture); } else { this.bitmap = new AlloyPaper.Bitmap(); this.bitmap._parent = this; this.bitmap.onImageLoad(function() { this._parent.texture = this.img; this._parent.generateFilterTexture(this.img); delete this._parent; }); this.bitmap.useImage(option.texture); } } else { this.texture = option.texture; this.generateFilterTexture(option.texture); } this.totalCount = option.totalCount; this.emittedCount = 0; this.tickFPS = 60; this.hideSpeed = option.hideSpeed || .01; }, "generateFilterTexture": function(texture) { var bitmap = new AlloyPaper.Bitmap(texture); bitmap.filter = this.filter; this.filterTexture = bitmap.cacheCanvas; }, "changeFilter": function (filter) { var bitmap = new AlloyPaper.Bitmap(this.texture); bitmap.filter = filter; this.filterTexture = bitmap.cacheCanvas; }, "emit": function() { var angle = (this.angle + AlloyPaper.Util.random(-this.angleRange / 2, this.angleRange / 2)) * Math.PI / 180; var halfX = this.emitArea[0] / 2, harfY = this.emitArea[1] / 2; var particle = new AlloyPaper.Particle({ position: new AlloyPaper.Vector2(this.emitX + AlloyPaper.Util.random(-halfX, halfX), this.emitY + AlloyPaper.Util.random(-harfY, harfY)), velocity: new AlloyPaper.Vector2(this.speed * Math.cos(angle), this.speed * Math.sin(angle)), texture: this.filterTexture, acceleration: this.gravity, hideSpeed: this.hideSpeed }); this.add(particle); this.emittedCount++; }, "tick": function() { if (this.filterTexture) { var len = this.children.length; if (this.totalCount && this.emittedCount > this.totalCount) { if (len === 0) this.destroy(); } else { if (len < this.maxCount) { for (var k = 0; k < this.emitCount; k++) { this.emit(); } } } for (var i = 0; i < len; i++) { var item = this.children[i]; if (item.isVisible()) { item.tick(); } else { this.remove(item); i--; len--; } } } } }); //end-------------------AlloyPaper.ParticleSystem---------------------end //begin----------------- AlloyPaper.ParticleExplosion -------------------begin AlloyPaper.ParticleExplosion = AlloyPaper.Container.extend({ ctor: function (ps, callback) { this._super(); this.ps = ps; this.add(ps); this.callback = callback; this.tickFPS = 0; setTimeout(function () { this.ps.maxCount = 0; this.tickFPS = 60; }.bind(this), 1000); }, tick: function () { if (this.ps.children.length === 0) { this.tickFPS = 0; this.parent.remove(this); this.callback(); } } }); //end-----------------AlloyPaper.ParticleExplosion-------------------end //begin-------------------AlloyPaper.Shape---------------------begin AlloyPaper.Shape = AlloyPaper.DisplayObject.extend({ "ctor": function(width, height, debug) { this._super(); this.cmds = []; this.assMethod = ["fillStyle", "strokeStyle", "lineWidth"]; this.width = width; this.height = height; this._width = width; this._height = height; this.shapeCanvas = document.createElement("canvas"); this.shapeCanvas.width = this.width; this.shapeCanvas.height = this.height; this.shapeCtx = this.shapeCanvas.getContext("2d"); if (debug) { this.fillStyle("red"); this.fillRect(0, 0, width, height); } this._watch(this, "scaleX", function(prop, value) { this.width = this._width * value; this.height = this._height * this.scaleY; this.shapeCanvas.width = this.width; this.shapeCanvas.height = this.height; this.shapeCtx.scale(value, this.scaleY); this.end(); }); this._watch(this, "scaleY", function(prop, value) { this.width = this._width * this.scaleX; this.height = this._height * value; this.shapeCanvas.width = this.width; this.shapeCanvas.height = this.height; this.shapeCtx.scale(this.scaleX, value); this.end(); }); }, "end": function() { this._preCacheId = this.cacheID; this.cacheID = AlloyPaper.UID.getCacheID(); var ctx = this.shapeCtx; for (var i = 0, len = this.cmds.length; i < len; i++) { var cmd = this.cmds[i]; if (this.assMethod.join("-").match(new RegExp("\\b" + cmd[0] + "\\b", "g"))) { ctx[cmd[0]] = cmd[1][0]; } else { ctx[cmd[0]].apply(ctx, Array.prototype.slice.call(cmd[1])); } } }, "clearRect": function(x, y, width, height) { this.cacheID = AlloyPaper.UID.getCacheID(); this.shapeCtx.clearRect(x, y, width, height); }, "clear": function() { this.cacheID = AlloyPaper.UID.getCacheID(); this.cmds.length = 0; this.shapeCtx.clearRect(0, 0, this.width, this.height); }, "strokeRect": function() { this.cmds.push(["strokeRect", arguments]); return this; }, "fillRect": function() { this.cmds.push(["fillRect", arguments]); return this; }, "beginPath": function() { this.cmds.push(["beginPath", arguments]); return this; }, "arc": function() { this.cmds.push(["arc", arguments]); return this; }, "closePath": function() { this.cmds.push(["closePath", arguments]); return this; }, "fillStyle": function() { this.cmds.push(["fillStyle", arguments]); return this; }, "fill": function() { this.cmds.push(["fill", arguments]); return this; }, "strokeStyle": function() { this.cmds.push(["strokeStyle", arguments]); return this; }, "lineWidth": function() { this.cmds.push(["lineWidth", arguments]); return this; }, "stroke": function() { this.cmds.push(["stroke", arguments]); return this; }, "moveTo": function() { this.cmds.push(["moveTo", arguments]); return this; }, "lineTo": function() { this.cmds.push(["lineTo", arguments]); return this; }, "bezierCurveTo": function() { this.cmds.push(["bezierCurveTo", arguments]); return this; }, "clone": function() {} }); //end-------------------AlloyPaper.Shape---------------------end //begin-------------------AlloyPaper.Sprite---------------------begin AlloyPaper.Sprite = AlloyPaper.DisplayObject.extend({ "ctor": function(option) { this._super(); this.option = option; this.x = option.x || 0; this.y = option.y || 0; this.currentFrameIndex = 0; this.animationFrameIndex = 0; this.currentAnimation = option.currentAnimation || null; this.rect = [0, 0, 10, 10]; this.visible = false; this.bitmaps = []; this._loadedCount = 0; var len = this.option.imgs.length; for (var i = 0; i < len; i++) { var urlOrImg = this.option.imgs[i]; if (typeof urlOrImg === "string") { if (AlloyPaper.Cache[urlOrImg]) { this.bitmaps.push(new AlloyPaper.Bitmap(AlloyPaper.Cache[urlOrImg])); this._loadedCount++; } else { (function(){ var bmp = new AlloyPaper.Bitmap(); bmp._sprite = this; bmp.onImageLoad(function() { bmp._sprite._loadedCount++; if (bmp._sprite._loadedCount === len) { bmp._sprite.visible = true; delete bmp._sprite; } }); bmp.useImage(this.option.imgs[i]); this.bitmaps.push(bmp); })(); } } else { this._loadedCount++; this.bitmaps.push(new AlloyPaper.Bitmap(urlOrImg)); } } if (this._loadedCount === len) { this.visible = true; } this.img = this.bitmaps[0].img; this.interval = 1e3 / option.framerate; this.loop = null; this.paused = false; this.animationEnd = option.animationEnd || null; if (this.currentAnimation) { this.gotoAndPlay(this.currentAnimation); } this.tickAnimationEnd = option.tickAnimationEnd || null; }, "play": function() { this.paused = false; }, "pause": function () { this.paused = true; }, "reset": function() { this.currentFrameIndex = 0; this.animationFrameIndex = 0; }, "gotoAndPlay": function(animation, times) { this.paused = false; this.reset(); clearInterval(this.loop); this.currentAnimation = animation; var self = this; var playTimes = 0; this.loop = setInterval(function() { if (!self.paused) { var opt = self.option; var frames = opt.animations[self.currentAnimation].frames, len = frames.length; self.animationFrameIndex++; if (self.animationFrameIndex > len - 1) { playTimes++; self.animationFrameIndex = 0; if (self.tickAnimationEnd) { self.tickAnimationEnd(); } if (times && playTimes == times) { if (self.animationEnd) self.animationEnd(); self.paused = true; clearInterval(self.loop); self.parent.remove(self); } } self.rect = opt.frames[frames[self.animationFrameIndex]]; self.width = self.rect[2]; self.height = self.rect[3]; var rect = self.rect, rectLen = rect.length; rectLen > 4 && (self.regX = rect[2] * rect[4]); rectLen > 5 && (self.regY = rect[3] * rect[5]); rectLen > 6 && (self.img = self.bitmaps[rect[6]].img); } }, this.interval); }, "gotoAndStop": function(animation) { this.reset(); clearInterval(this.loop); var self = this; self.currentAnimation = animation; var opt = self.option; var frames = opt.animations[self.currentAnimation].frames; self.rect = opt.frames[frames[self.animationFrameIndex]]; self.width = self.rect[2]; self.height = self.rect[3]; var rect = self.rect, rectLen = rect.length; rectLen > 4 && (self.regX = rect[2] * rect[4]); rectLen > 5 && (self.regY = rect[3] * rect[5]); rectLen > 6 && (self.img = self.bitmaps[rect[6]].img); } }); //end-------------------AlloyPaper.Sprite---------------------end AlloyPaper.Stage = AlloyPaper.Container.extend({ "ctor": function(canvas, openWebGL) { this._super(); this.canvas = typeof canvas == "string" ? document.querySelector(canvas) : canvas; this.width = this.canvas.width; this.height = this.canvas.height; this.AABB = [0, 0, this.width, this.height]; this.hitAABB = true; this.hitRenderer = new AlloyPaper.CanvasRenderer(); this.hitCanvas = document.createElement("canvas"); this.hitCanvas.width = 1; this.hitCanvas.height = 1; this.stageRenderer = new AlloyPaper.Renderer(this, openWebGL); this.hitCtx = this.hitCanvas.getContext("2d"); this._scaleX = this._scaleY = null; this.offset = this._getXY(this.canvas); this.overObj = null; this._paused = false; this.fps = 63; this.interval = Math.floor(1e3 / this.fps); this.toList = []; this.tickFns = []; this.beginTick = null; this.endTick = null; var self = this; self.loop = setInterval(function() { if (self._paused) return; self.beginTick && self.beginTick(); self._tick(self); self.endTick && self.endTick(); }, self.interval); Object.defineProperty(this, "useRequestAnimFrame", { set: function(value) { this._useRequestAnimFrame = value; if (value) { clearInterval(self.loop); self.loop = AlloyPaper.RAF.requestInterval(function() { self._tick(self); }, self.interval); } else { AlloyPaper.RAF.clearRequestInterval(self.loop); self.loop = setInterval(function() { self._tick(self); }, self.interval); } }, get: function() { return this._useRequestAnimFrame; } }); this._watch(this, "fps", function(prop, value) { this.interval = Math.floor(1e3 / value); var self = this; if (this.useRequestAnimFrame) { clearInterval(this.loop); try { AlloyPaper.RAF.clearRequestInterval(this.loop); } catch (e) {} this.loop = AlloyPaper.RAF.requestInterval(function() { self._tick(self); }, this.interval); } else { AlloyPaper.RAF.clearRequestInterval(this.loop); try { clearInterval(this.loop); } catch (e) {} this.loop = setInterval(function() { self._tick(self); }, this.interval); } }); this._initDebug(); this._pressmoveObjs = null; this.baseInstanceof = "Stage"; this.overObj = null; this._moveInterval = 16; this._preMoveTime = new Date(); this._currentMoveTime = new Date(); Object.defineProperty(this, "moveFPS", { set: function(value) { this._moveFPS = value; this._moveInterval = 1e3 / value; }, get: function() { return this._moveFPS; } }); this.canvas.addEventListener("mousemove", this._handleMouseMove.bind(this), false); this.canvas.addEventListener("click", this._handleClick.bind(this), false); this.canvas.addEventListener("mousedown", this._handleMouseDown.bind(this), false); this.canvas.addEventListener("mouseup", this._handleMouseUp.bind(this), false); this.canvas.addEventListener("dblclick", this._handleDblClick.bind(this), false); this.addEvent(this.canvas, "mousewheel", this._handleMouseWheel.bind(this)); this.canvas.addEventListener("touchmove", this._handleMouseMove.bind(this), false); this.canvas.addEventListener("touchstart", this._handleMouseDown.bind(this), false); this.canvas.addEventListener("touchend", this._handleMouseUp.bind(this), false); this.canvas.addEventListener("touchcancel", this._handleTouchCancel.bind(this), false); document.addEventListener("DOMContentLoaded", this.adjustLayout.bind(this), false); window.addEventListener("load", this.adjustLayout.bind(this), false); window.addEventListener("resize", this.adjustLayout.bind(this), false); this.autoUpdate = true; this.scaleType = "normal"; this.setCursor(AlloyPaper.DefaultCursor); }, "adjustLayout": function() { this.offset = this._getXY(this.canvas); this.style=this._getStyle(); if (this._scaleX) { this.scaleToScreen(this._scaleX, this._scaleY); } }, "pause": function () { this._paused = true; this._pauseSprite(this); this._pauseTween(); }, "play": function () { this._paused = false; this._playSprite(this); this._playTween(); }, "_pauseSprite": function (obj) { for (var i = 0, len = obj.children.length; i < len; i++) { var child = obj.children[i]; if (child instanceof AlloyPaper.Container) { this._pauseSprite(child); } else if (child instanceof AlloyPaper.Sprite) { child.pause(); } } }, "_pauseTween": function () { for (var i = 0, len = this.toList.length; i < len; i++) { this.toList[i].pause(); } }, "_playSprite": function (obj) { for (var i = 0, len = obj.children.length; i < len; i++) { var child = obj.children[i]; if (child instanceof AlloyPaper.Container) { this._playSprite(child); } else if (child instanceof AlloyPaper.Sprite) { child.play(); } } }, "_playTween": function () { for (var i = 0, len = this.toList.length; i < len; i++) { this.toList[i].play(); } }, "toggle": function () { if (this._paused) { this.play(); } else { this.pause(); } }, "openDebug": function() {}, "closeDebug": function() {}, "_initDebug": function() { this.debugDiv = document.createElement("div"); this.debugDiv.style.cssText = "display:none;position:absolute;z-index:2000;left:0;bottom:0;background-color:yellow;font-size:16px;"; document.body.appendChild(this.debugDiv); Object.defineProperty(this, "debug", { set: function(value) { this._debug = value; if (this._debug) { this.debugDiv.style.display = "block"; } else { this.debugDiv.style.display = "none"; } }, get: function() { return this._debug; } }); }, "_handleMouseWheel": function(event) { this._correctionEvent(event, event.type); var callbacks = this.events["mousewheel"]; if (callbacks) { for (var i = 0, len = callbacks.length; i < len; i++) { var callback = callbacks[i]; callback(event); } } if (this.overObj) { this.hitRenderer._bubbleEvent(this.overObj, "mousewheel", event); } }, "update": function() { this.stageRenderer.update(); }, "_correctionEvent": function (evt, type) { //this.adjustLayout(); if (evt.touches||evt.changedTouches) { var firstTouch = evt.touches[0] || evt.changedTouches[0]; if (firstTouch) { evt.stageX = firstTouch.pageX; evt.stageY = firstTouch.pageY; } } else { evt.stageX = evt.pageX; evt.stageY = evt.pageY; } //if (this.scaleType !== "normal") { var p = this._correction(evt.stageX, evt.stageY); evt.stageX = Math.round(p.x); evt.stageY = Math.round(p.y); //} var callbacks = this.events[type]; if (callbacks) { for (var i = 0, len = callbacks.length; i < len; i++) { var callback = callbacks[i]; callback(evt); } } }, "_handleClick": function(evt) { this._correctionEvent(evt, evt.type); this._getObjectUnderPoint(evt, evt.type); }, "_handleMouseMove": function(evt) { this._currentMoveTime = new Date(); if (this._currentMoveTime - this._preMoveTime > this._moveInterval / 2) { this._correctionEvent(evt, evt.type); if (this._pressmoveObjs) { var pressmoveHandle = this._pressmoveObjs.events["pressmove"]; pressmoveHandle && this._pressmoveObjs.execEvent("pressmove", evt); } var child = this._getObjectUnderPoint(evt, "mousemove"); if (child) { if (this.overObj) { if (child.id != this.overObj.id) { this.hitRenderer._bubbleEvent(this.overObj, "mouseout", evt); this.hitRenderer._bubbleEvent(child, "mouseover", evt); this.overObj = child; } else { this.hitRenderer._bubbleEvent(child, "mousemove", evt); } this._setCursorByOverObject(child); } else { this.overObj = child; this.hitRenderer._bubbleEvent(child, "mouseover", evt); } } else { if (this.overObj) { this.hitRenderer._bubbleEvent(this.overObj, "mouseout", evt); this.overObj = null; } } this._preMoveTime = this._currentMoveTime; } }, "_getPressmoveTarget": function(o) { if (o.events["pressmove"]) { this._pressmoveObjs = o; } if (o.parent) this._getPressmoveTarget(o.parent); }, "_handleMouseDown": function(evt) { this._correctionEvent(evt, "pressdown"); var child = this._getObjectUnderPoint(evt, "pressdown"); if (child) { this._getPressmoveTarget(child); } }, "_handleMouseUp": function(evt) { this._pressmoveObjs = null; this._correctionEvent(evt, "pressup"); this._getObjectUnderPoint(evt, "pressup"); }, "_handleTouchCancel": function (evt) { this._pressmoveObjs = null; this._correctionEvent(evt, "touchcancel"); this._getObjectUnderPoint(evt, "touchcancel"); }, "_handleDblClick": function(evt) { this._correctionEvent(evt, evt.type); this._getObjectUnderPoint(evt, evt.type); }, "_getObjectUnderPoint": function(evt, type) { if (this.hitAABB) { return this.hitRenderer.hitAABB(this.hitCtx, this, evt, type); } else { return this.hitRenderer.hitRender(this.hitCtx, this, evt, type); } }, "_getXY": function(el) { var _t = 0, _l = 0; if (document.documentElement.getBoundingClientRect && el.getBoundingClientRect) { var box = el.getBoundingClientRect(); _l = box.left; _t = box.top; } else { while (el.offsetParent) { _t += el.offsetTop; _l += el.offsetLeft; el = el.offsetParent; } return [_l, _t]; } return [_l + Math.max(document.documentElement.scrollLeft, document.body.scrollLeft), _t + Math.max(document.documentElement.scrollTop, document.body.scrollTop)]; }, "_tick": function(container) { if (container && container.tick && container.tickFPS > 0) { this._initInterval(container); if (!container.hasOwnProperty("_tickInterval")) { container.tick(); } else { container._tickIntervalCurrent = new Date(); if (!container._tickIntervalLast) { container._tickIntervalLast = new Date(); container._tickIntervalPrev = new Date(); } var itv = (container._tickIntervalCurrent - container._tickIntervalLast) +( container._tickIntervalCurrent - container._tickIntervalPrev); if (itv > container._tickInterval) { container.tick(); container._tickIntervalLast = container._tickIntervalCurrent; } container._tickIntervalPrev= new Date(); } } var children = container.children, len = children.length; for (var i = 0; i < len; i++) { var child = children[i]; if (child) { if (child.tick && child.tickFPS > 0) { this._initInterval(child); if (!child.hasOwnProperty("_tickInterval")) { child.tick(); } else { child._tickIntervalCurrent = new Date(); if (!child._tickIntervalLast){ child._tickIntervalLast = new Date(); child._tickIntervalPrev = new Date(); } var itv =( child._tickIntervalCurrent - child._tickIntervalLast)+(child._tickIntervalCurrent-child._tickIntervalPrev); if (itv > child._tickInterval) { child.tick(); child._tickIntervalLast = child._tickIntervalCurrent; } child._tickIntervalPrev= new Date(); } } if (child.baseInstanceof == "Container") { this._tick(child); } } } }, "_initInterval": function(obj) { if (obj.hasOwnProperty("tickFPS")) { obj._tickInterval = 1e3 / obj.tickFPS; } }, "tick": function () { for (var i = 0, len = this.tickFns.length; i < len; i++) { var fn = this.tickFns[i]; if (!fn.hasOwnProperty("_ARE_PrevDate")) { fn(); continue; } fn._ARE_CurrentDate = new Date(); var interval = (fn._ARE_CurrentDate - fn._ARE_PrevDate) + (fn._ARE_CurrentDate - fn._ARE_LastDate); if (interval > fn._ARE_Interval) { fn(); fn._ARE_PrevDate = fn._ARE_CurrentDate; } fn._ARE_LastDate = fn._ARE_CurrentDate; } if(this.autoUpdate)this.update(); if (this.debug) { this.getFPS(); this.debugDiv.innerHTML = "fps : " + this.fpsValue + " <br/>object count : " + this.getTotalCount() + " <br/>rendering mode : " + this.getRenderingMode() + " <br/>inner object count : " + this.stageRenderer.objs.length; } }, "onTick": function(fn,interval) { this.tickFns.push(fn); if (interval !== undefined) { fn._ARE_PrevDate = new Date(); fn._ARE_CurrentDate = new Date(); fn._ARE_LastDate = new Date(); fn._ARE_Interval = interval; } }, "setFPS": function(fps) { this.interval = Math.floor(1e3 / fps); }, "onKeyboard": function(keyCombo, onDownCallback, onUpCallback) { AlloyPaper.Keyboard.on(keyCombo, onDownCallback, onUpCallback); }, "getActiveKeys": function() { return AlloyPaper.Keyboard.getActiveKeys(); }, "scaleToScreen": function (scaleX, scaleY) { this.scaleType = "screen"; if (scaleX === 1 && scaleY === 1) { document.body.style.overflow = "hidden"; document.documentElement.style.overflow = "hidden"; } document.body.style.margin = 0; document.documentElement.style.margin = 0; document.body.style.border = 0; document.documentElement.style.border = 0; document.body.style.padding = 0; document.documentElement.style.padding = 0; document.body.style.width = "100%"; document.documentElement.style.width = "100%"; document.body.style.height = "100%"; document.documentElement.style.height = "100%"; this._scaleX = scaleX; this._scaleY = scaleY; var canvas = this.canvas; canvas.style.position = "absolute"; canvas.style.width = scaleX * 100 + "%"; canvas.style.height = scaleY * 100 + "%"; canvas.style.left = 100 * (1 - scaleX) / 2 + "%"; canvas.style.top = 100 * (1 - scaleY) / 2 + "%"; canvas.style.border = "0px solid #ccc"; this.offset = this._getXY(this.canvas); this.style=this._getStyle(); }, "scaleToBox": function (w, h) { this.scaleType = "box"; if (w === window.innerWidth && h === window.innerHeight) { document.body.style.overflow = "hidden"; document.documentElement.style.overflow = "hidden"; } var canvas = this.canvas; canvas.style.position = "absolute"; canvas.style.width = w + "px"; canvas.style.height = h + "px"; canvas.style.left = (window.innerWidth - w) / 2 + "px"; canvas.style.top = (window.innerHeight - h) / 2 + "px"; canvas.style.border = "0px solid #ccc"; this.offset = this._getXY(this.canvas); this.style=this._getStyle(); }, "correctingXY": function (x, y) { if (this.scaleType === "box") { return { x: x * this.width / parseInt( this.canvas.style.width), y: y * this.height / parseInt(this.canvas.style.height) }; } else { return { x: x * this.width / (window.innerWidth * this._scaleX), y: y * this.height / (window.innerHeight * this._scaleY) }; } }, "getTotalCount": function() { var count = 0; var self = this; function getCount(child) { if (child.baseInstanceof == "Container" || child.baseInstanceof == "Stage") { for (var i = 0, len = child.children.length; i < len; i++) { var subChild = child.children[i]; if (subChild instanceof AlloyPaper.Container) { getCount(subChild); } else { count++; } } } else { count++; } } getCount(this); return count; }, "getRenderingMode": function() { if (this.stageRenderer.renderingEngine instanceof AlloyPaper.CanvasRenderer) { return "Canvas"; } return "WebGL"; }, "getFPS": function() { var fps = AlloyPaper.FPS.get(); this.fpsValue = fps.value; }, "addEvent": function(el, type, fn, capture) { if (type === "mousewheel" && document.mozHidden !== undefined) { type = "DOMMouseScroll"; } el.addEventListener(type, function(event) { var type = event.type; if (type == "DOMMouseScroll" || type == "mousewheel") { event.delta = event.wheelDelta ? event.wheelDelta / 120 : -(event.detail || 0) / 3; } fn.call(this, event); }, capture || false); }, "setCursor": function(type) { this.canvas.style.cursor = type; }, "_setCursorByOverObject": function (obj) { if (obj.cursor !== "default") { this.setCursor(obj.cursor); } else { if (obj.parent) { this._setCursorByOverObject(obj.parent); } } }, "destroy": function () { this._super(); this.canvas.parentNode.removeChild(this.canvas); if (this.useRequestAnimFrame) { AlloyPaper.RAF.clearRequestInterval(this.loop); } else { clearInterval(this.loop); } }, "_getStyle":function() { var style = window.getComputedStyle(this.canvas, null); return { boxSizing: style.boxSizing, borderTopWidth: parseInt(style.borderTopWidth), borderLeftWidth: parseInt(style.borderLeftWidth), width:parseInt(style.width), height:parseInt(style.height) }; }, "_correction":function(pageX,pageY){ var x=pageX-this.offset[0]-this.style.borderLeftWidth, y=pageY-this.offset[1]-this.style.borderTopWidth, canvasWidth=this.style.width, canvasHeight=this.style.height; if(this.style.boxSizing==="border-box"){ canvasWidth-=this.style.borderLeftWidth; canvasHeight-=this.style.borderTopWidth; } return {x: this.width*x/canvasWidth,y:this.height*y/canvasHeight}; } }); //begin-------------------AlloyPaper.Text---------------------begin AlloyPaper.Text = AlloyPaper.DisplayObject.extend({ "ctor": function(value, font, color) { this._super(); this.value = value; this.font = font; this.color = color; this.textAlign = "left"; this.textBaseline = "top"; }, "draw": function(ctx) { ctx.fillStyle = this.color; ctx.font = this.font; ctx.textAlign = this.textAlign || "left"; ctx.textBaseline = this.textBaseline || "top"; ctx.fillText(this.value, 0, 0); }, "clone": function() { var t = new AlloyPaper.Text(this.text, this.font, this.color); this.cloneProps(t); return t; }, "getWidth": function () { var measureCtx = document.createElement("canvas").getContext("2d"); measureCtx.font = this.font; var width = measureCtx.measureText(this.value).width; measureCtx = null; return width; } }); //end-------------------AlloyPaper.Text---------------------end return AlloyPaper; })); ```
/content/code_sandbox/asset/alloy_paper.js
javascript
2016-05-25T23:17:03
2024-08-16T06:23:01
AlloyFinger
AlloyTeam/AlloyFinger
3,420
37,345
```javascript /*! * AlloyLever v1.0.4 By dntzhang * Github: path_to_url */ ;(function (root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory() else if(typeof define === 'function' && define.amd) define([], factory) else if(typeof exports === 'object') exports["AlloyLever"] = factory() else root["AlloyLever"] = factory() })(this, function() { var AlloyLever = {} AlloyLever.settings = { cdn:'//s.url.cn/qqun/qun/qqweb/m/qun/confession/js/vconsole.min.js', reportUrl: null, reportPrefix: '', reportKey: 'msg', otherReport: null, entry: null } AlloyLever.store = [] var methodList = ['log', 'info', 'warn', 'debug', 'error']; methodList.forEach(function(item) { var method = console[item]; console[item] = function() { AlloyLever.store.push({ logType: item, logs: arguments }); method.apply(console, arguments); } }); AlloyLever.logs = [] AlloyLever.config = function(config){ for(var i in config){ if(config.hasOwnProperty(i)){ AlloyLever.settings[i] = config[i] } } if(config.entry){ window.addEventListener('load', function() { AlloyLever.entry(config.entry) }) } var parameter = getParameter('vconsole') if(parameter) { if (parameter === 'show') { AlloyLever.vConsole(true) } else { AlloyLever.vConsole(false) } } } AlloyLever.vConsole = function(show){ loadScript(AlloyLever.settings.cdn, function() { //support vconsole3.0 if (typeof vConsole === 'undefined') { window.vConsole = new VConsole({ defaultPlugins: ['system', 'network', 'element', 'storage'], maxLogNumber: 5000 }) } var i = 0, len = AlloyLever.store.length for (; i < len; i++) { var item = AlloyLever.store[i] //console[item.type].apply(console, item.logs) //prevent twice log item.noOrigin = true window.vConsole.pluginList.default.printLog(item) } if(show) { try { window.vConsole.show() } catch (e) { } window.addEventListener('load', function () { window.vConsole.show() }) } }) } var parameter = getParameter('vconsole') if (parameter) { if (parameter === 'show') { AlloyLever.vConsole(true) } else { AlloyLever.vConsole(false) } } AlloyLever.entry = function(selector) { var count = 0, entry = document.querySelector(selector) if(entry) { entry.addEventListener('click', function () { count++ if (count > 5) { count = -10000 AlloyLever.vConsole(true) } }) } } window.onerror = function(msg, url, line, col, error) { var newMsg = msg if (error && error.stack) { newMsg = processStackMsg(error) } if (isOBJByType(newMsg, "Event")) { newMsg += newMsg.type ? ("--" + newMsg.type + "--" + (newMsg.target ? (newMsg.target.tagName + "::" + newMsg.target.src) : "")) : "" } newMsg = (newMsg + "" || "").substr(0,500) AlloyLever.logs.push({ msg: newMsg, target: url, rowNum: line, colNum: col }) if (msg.toLowerCase().indexOf('script error') > -1) { console.error('Script Error: See Browser Console for Detail') } else { console.error(newMsg) } var ss = AlloyLever.settings if(ss.reportUrl) { var src = ss.reportUrl + (ss.reportUrl.indexOf('?')>-1?'&':'?') + ss.reportKey + '='+( ss.reportPrefix?('[' + ss.reportPrefix +']'):'')+ newMsg+'&t='+new Date().getTime() if(ss.otherReport) { for (var i in ss.otherReport) { if (ss.otherReport.hasOwnProperty(i)) { src += '&' + i + '=' + ss.otherReport[i] } } } new Image().src = src } } function loadScript(src, callback){ var s, r, t r = false s = document.createElement('script') s.type = 'text/javascript' s.src = src s.onload = s.onreadystatechange = function() { //console.log( this.readyState ); //uncomment this line to see which ready states are called. if ( !r && (!this.readyState || this.readyState == 'complete') ) { r = true callback() } } t = document.getElementsByTagName('script')[0] t.parentNode.insertBefore(s, t) } function getParameter(n) { var m = window.location.hash.match(new RegExp('(?:#|&)' + n + '=([^&]*)(&|$)')), result = !m ? '' : decodeURIComponent(m[1]) return result ||getParameterByName(n) } function getParameterByName(name, url) { if (!url) url = window.location.href name = name.replace(/[\[\]]/g, "\\$&") var regex = new RegExp("[?&]" + name + "(=([^&#]*)|&|#|$)"), results = regex.exec(url) if (!results) return null if (!results[2]) return '' return decodeURIComponent(results[2].replace(/\+/g, " ")) } function isOBJByType(o, type) { return Object.prototype.toString.call(o) === "[object " + (type || "Object") + "]" } function processStackMsg (error) { var stack = error.stack .replace(/\n/gi, "") .split(/\bat\b/) .slice(0, 9) .join("@") .replace(/\?[^:]+/gi, "") var msg = error.toString() if (stack.indexOf(msg) < 0) { stack = msg + "@" + stack } return stack } function getCookie(name){ var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)") if(arr=document.cookie.match(reg)) return unescape(arr[2]) else return null } AlloyLever.getCookie = getCookie AlloyLever.getParameter= getParameter AlloyLever.loadScript = loadScript return AlloyLever }); ```
/content/code_sandbox/alloy-lever.js
javascript
2016-05-09T03:17:27
2024-07-29T05:45:29
AlloyLever
AlloyTeam/AlloyLever
1,382
1,542
```html <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=0, minimum-scale=1.0, maximum-scale=1.0"> <title>AlloyLever - 1kb</title> <style> html,body{ padding:0; margin: 0; overflow: hidden; } canvas{ padding:0; margin: 0; border: 0; } #container{ text-align: center; } </style> <style> body { background-color: #1e1d23; /*opacity: 0;*/ display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; text-transform: uppercase; font-size: 12px; letter-spacing: 2px; -webkit-tap-highlight-color: rgba(0,0,0,0); -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } /*body.ready { opacity: 1; }*/ .icon-text path, .icon-text polygon { /*opacity: 0;*/ } .info { text-align: center; } .description { /*opacity: 0;*/ color: white; font-size: 15px; margin-bottom:100px ; } .links { display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; -webkit-box-align: center; -ms-flex-align: center; align-items: center; margin-top: 20px; } @media (min-width: 700px) { .links { -webkit-box-orient: horizontal; -webkit-box-direction: normal; -ms-flex-direction: row; flex-direction: row; } } .button { /*opacity: 0;*/ position: relative; display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; width: 180px; height: 60px; text-decoration: none; } .button.blue { color: #5E89FB; } .button.green { color: #18F38C; } .button.red { color: #FF1461; } .button svg { position: absolute; left: 0; top: 0; right: 0; bottom: 0; stroke-width: 1; fill: none; fill-rule: evenodd; stroke: currentColor; } .button.green svg { -webkit-transform: rotate(180deg); transform: rotate(180deg); } .button path { scale: .9; transform-origin: 50% 50%; } .credits { /*opacity: 0;*/ margin-top: 20px; color: #979797; } .credits a { position: relative; color: currentColor; text-decoration: none; } .credits a:after { content: ""; /*opacity: 0;*/ display: block; position: absolute; left: 0; bottom: -3px; width: 100%; border-bottom: 1px solid currentColor; transition: opacity .75s ease; } /*.credits a:hover:after { opacity: 1; transition: opacity .25s ease; }*/ .credits a, .credits span { transition: color .75s ease; } .credits a:hover, .highlighted { color: white; transition: color .25s ease; } #container{ color: white; height: 40px; line-height: 40px; font-size: 40px; margin-top:200px ; } @media only screen and (max-width: 500px) { #container { margin-top:100px ; font-size: 35px; } .description { font-size: 13px; margin-bottom:10px ; } } </style> </head> <body> <div id="container"> AlloyLever </div> <div class="info"> <p class="description"> 1kb </p> <div class="links"> <a class="button blue" href="path_to_url#" target="_blank"> <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>Docs</span> </a> <a class="button blue" id="entry" href="javascript:" > <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>6</span> </a> <a class="button green" href="path_to_url" target="_blank"> <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>GitHub</span> </a> </div> <p class="credits"> v<span class="version">1.0.0</span> <span class="date">2017</span> AlloyTeam.com </p> </div> <script src="alloy-lever.js"></script> <script> AlloyLever.config({ cdn:'//s.url.cn/qqun/qun/qqweb/m/qun/confession/js/vconsole.min.js', reportUrl: "//a.qq.com", reportPrefix: 'abc', reportKey: 'msg', otherReport: { uin: 100000 }, entry:"#entry" }) </script> <pre> <code> AlloyLever.config({ cdn:'//s.url.cn/qqun/qun/qqweb/m/qun/confession/js/vconsole.min.js', reportUrl: "//a.qq.com", reportPrefix: 'abc', reportKey: 'msg', otherReport: { uin: 491862102 }, entry:"#entry" }) console.log('log') console.info('info') console.debug('debug') console.warn('warn') console.error('error') </code> </pre> <script> console.log('log') console.info('info') console.debug('debug') console.warn('warn') console.error('error') // //AlloyLever.entry('#entry') // //AlloyLever.entry('#entry2') </script> </body> </html> ```
/content/code_sandbox/cn.html
html
2016-05-09T03:17:27
2024-07-29T05:45:29
AlloyLever
AlloyTeam/AlloyLever
1,382
2,093
```html <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=0, minimum-scale=1.0, maximum-scale=1.0"> <title>AlloyLever - 1kb js library contains development debugging, error monitoring and reporting, user problem localization features</title> <script src="alloy-lever.js"></script> <script> AlloyLever.config({ cdn:'//s.url.cn/qqun/qun/qqweb/m/qun/confession/js/vconsole.min.js', reportUrl: "//a.qq.com", reportPrefix: 'abc', reportKey: 'msg', otherReport: { uin: 100000 }, entry:"#entry" }) </script> <style> html,body{ padding:0; margin: 0; overflow: hidden; } canvas{ padding:0; margin: 0; border: 0; } #container{ text-align: center; } </style> <style> body { background-color: #1e1d23; /*opacity: 0;*/ display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; text-transform: uppercase; font-size: 12px; letter-spacing: 2px; -webkit-tap-highlight-color: rgba(0,0,0,0); -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } /*body.ready { opacity: 1; }*/ .icon-text path, .icon-text polygon { /*opacity: 0;*/ } .info { text-align: center; } .description { /*opacity: 0;*/ color: white; font-size: 15px; margin-bottom:100px ; } .links { display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; -webkit-box-align: center; -ms-flex-align: center; align-items: center; margin-top: 20px; } @media (min-width: 700px) { .links { -webkit-box-orient: horizontal; -webkit-box-direction: normal; -ms-flex-direction: row; flex-direction: row; } } .button { /*opacity: 0;*/ position: relative; display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-orient: vertical; -webkit-box-direction: normal; -ms-flex-direction: column; flex-direction: column; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; width: 180px; height: 60px; text-decoration: none; } .button.blue { color: #5E89FB; } .button.green { color: #18F38C; } .button.red { color: #FF1461; } .button svg { position: absolute; left: 0; top: 0; right: 0; bottom: 0; stroke-width: 1; fill: none; fill-rule: evenodd; stroke: currentColor; } .button.green svg { -webkit-transform: rotate(180deg); transform: rotate(180deg); } .button path { scale: .9; transform-origin: 50% 50%; } .credits { /*opacity: 0;*/ margin-top: 20px; color: #979797; } .credits a { position: relative; color: currentColor; text-decoration: none; } .credits a:after { content: ""; /*opacity: 0;*/ display: block; position: absolute; left: 0; bottom: -3px; width: 100%; border-bottom: 1px solid currentColor; transition: opacity .75s ease; } /*.credits a:hover:after { opacity: 1; transition: opacity .25s ease; }*/ .credits a, .credits span { transition: color .75s ease; } .credits a:hover, .highlighted { color: white; transition: color .25s ease; } #container{ color: white; height: 40px; line-height: 40px; font-size: 40px; margin-top:200px ; } @media only screen and (max-width: 500px) { #container { margin-top:100px ; font-size: 35px; } .description { font-size: 13px; margin-bottom:10px ; } } </style> </head> <body> <div id="container"> AlloyLever </div> <div class="info"> <p class="description"> 1kb js library contains development debugging, error monitoring and reporting, user problem localization features </p> <div class="links"> <a class="button blue" href="path_to_url#usage" target="_blank"> <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>Docs</span> </a> <a class="button blue" id="entry" href="javascript:" > <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>Click me 6 times!</span> </a> <a class="button green" href="path_to_url" target="_blank"> <svg viewBox="0 0 180 60"> <path d="M10,10 C10,10 50,9.98999977 90,9.98999977 C130,9.98999977 170,10 170,10 C170,10 170.009995,20 170.009995,30 C170.009995,40 170,50 170,50 C170,50 130,50.0099983 90,50.0099983 C50,50.0099983 10,50 10,50 C10,50 9.98999977,40 9.98999977,30 C9.98999977,20 10,10 10,10 Z"></path> </svg> <span>GitHub</span> </a> </div> <p class="credits"> v<span class="version">1.0.2</span> <span class="date">2017</span> AlloyTeam.com </p> </div> <pre> <code> AlloyLever.config({ cdn:'//s.url.cn/qqun/qun/qqweb/m/qun/confession/js/vconsole.min.js', reportUrl: "//a.qq.com", reportPrefix: 'abc', reportKey: 'msg', otherReport: { uin: 491862102 }, entry:"#entry" }) console.log('this is log content') console.info('this is info content') console.debug('this is debug content') console.warn('this is warn content') console.error('this is error content') </code> </pre> <script> console.log('this is log content') console.info('this is info content') console.debug('this is debug content') console.warn('this is warn content') console.error('this is error content') // //AlloyLever.entry('#entry') // //AlloyLever.entry('#entry2') </script> </body> </html> ```
/content/code_sandbox/index.html
html
2016-05-09T03:17:27
2024-07-29T05:45:29
AlloyLever
AlloyTeam/AlloyLever
1,382
2,164
```shell # Gatekeeper - DDoS protection system. # # This program is free software: you can redistribute it and/or modify # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # along with this program. If not, see <path_to_url # All the dependency projects are maintained in this directory. cd dependencies # Setup DPDK. cd dpdk meson build cd build ninja sudo ninja install # Gatekeeper is being staticly linked with DPDK, so # ldconfig(8) is not needed to make DPDK's libraries available system wide. # sudo ldconfig # Install kernel modules. sudo modprobe vfio-pci # Make modules persist across reboots. Since multiple # users can run this script, don't re-add these modules # if someone else already made them persistent. if ! grep -q "vfio-pci" /etc/modules; then echo "vfio-pci" | sudo tee -a /etc/modules fi # Setup LuaJIT. cd ../../luajit-2.0 # Build and install. make sudo make install # Setup BIRD. cd ../bird # Build and install. autoreconf ./configure make sudo make install cd ../../ # Build interface name -> PCI address map. gcc generate_if_map.c -o generate_if_map -Wall ./generate_if_map lua/if_map.lua # Build client. cd gkctl gcc main.c -o gkctl -Wall -Wextra -pedantic cd .. # Build BPF programs. cd bpf make make copy cd .. sudo mkdir -p /var/run/gatekeeper/ sudo chown -R $USER:$GROUPS /var/run/gatekeeper/ sudo chmod -R 700 /var/run/gatekeeper/ ```
/content/code_sandbox/setup.sh
shell
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
428
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdio.h> #include <string.h> #include <argp.h> #include <unistd.h> #include <ifaddrs.h> #include <sys/ioctl.h> #include <net/if.h> #include <linux/ethtool.h> #include <linux/sockios.h> /* Argp's global variables. */ const char *argp_program_version = "generate-if-map 1.0"; /* Arguments. */ static char adoc[] = "<IF_MAP_LUA>"; static char doc[] = "generate-if-map -- generate the mapping " "network interface device name to its bus information"; static struct argp_option options[] = { {"gatekeeper-if-map", 'o', "FILE", 0, "Output the mapping to a Lua FILE", 1}, { 0 } }; struct args { char *filename; }; static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct args *args = state->input; switch (key) { case 'o': args->filename = arg; break; case ARGP_KEY_INIT: args->filename = NULL; break; case ARGP_KEY_ARG: if (args->filename) argp_error(state, "Wrong number of arguments; only one is allowed"); args->filename = arg; break; case ARGP_KEY_END: if (!args->filename) argp_error(state, "The mapping Lua file was not specified"); break; default: return ARGP_ERR_UNKNOWN; } return 0; } static struct argp argp = {options, parse_opt, adoc, doc, NULL, NULL, NULL}; int main(int argc, char **argv) { FILE *f; struct ifaddrs *addrs, *iter; int sock; int ret; struct args args = { /* Defaults. */ .filename = "./lua/if_map.lua", }; /* Read parameters. */ argp_parse(&argp, argc, argv, 0, NULL, &args); f = fopen(args.filename, "w"); if (f == NULL) { perror("fopen"); return -1; } ret = getifaddrs(&addrs); if (ret == -1) { perror("getifaddrs"); goto file; } sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock == -1) { perror("socket"); goto addrs; } fprintf(f, "return {\n"); iter = addrs; while (iter != NULL) { struct ifreq ifr; struct ethtool_cmd cmd; struct ethtool_drvinfo drvinfo; /* * Use AF_PACKET to only get each interface once, * and skip the loopback interface. */ if (iter->ifa_addr == NULL || iter->ifa_addr->sa_family != AF_PACKET || strcmp(iter->ifa_name, "lo") == 0) goto next; memset(&ifr, 0, sizeof(ifr)); memset(&cmd, 0, sizeof(cmd)); memset(&drvinfo, 0, sizeof(drvinfo)); strcpy(ifr.ifr_name, iter->ifa_name); ifr.ifr_data = (void *)&drvinfo; drvinfo.cmd = ETHTOOL_GDRVINFO; if (ioctl(sock, SIOCETHTOOL, &ifr) < 0) { perror("ioctl"); goto next; } fprintf(f, "\t[\"%s\"] = \"%s\",\n", iter->ifa_name, drvinfo.bus_info); next: iter = iter->ifa_next; } fprintf(f, "}\n"); close(sock); addrs: freeifaddrs(addrs); file: fclose(f); return ret; } ```
/content/code_sandbox/generate_if_map.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
918
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* * This files contains all definitions that a BPF program needs to * implement a flow handler associated a flow entry. */ #ifndef _GATEKEEPER_FLOW_BPF_H_ #define _GATEKEEPER_FLOW_BPF_H_ #include <stdint.h> #include <stdbool.h> /* * Helper macro to place BPF programs, maps, and licenses in * different sections of an ELF BPF file. */ #define SEC(NAME) __attribute__((section(NAME), used)) /* * Priority used for DSCP field of encapsulated packets: * 0 for legacy packets; 1 for granted packets; * 2 for capability renew; 3-63 for request packets. */ #define PRIORITY_GRANTED (1) #define PRIORITY_RENEW_CAP (2) #define PRIORITY_REQ_MIN (3) #define PRIORITY_MAX (63) /* Memory reserved for a GK BPF program in between runs. */ struct gk_bpf_cookie { uint64_t mem[8]; }; /* Possible returns of the function init of a GK BPF program. */ enum gk_bpf_init_return { /* The cookie was successfully initialized. */ GK_BPF_INIT_RET_OK, /* * The initialization of a given cookie failed. * * This is not a regular return since failing to initialize a cookie * implies in not fulfilling a policy decision. Thus, this return * should only be returned under extreme conditions. */ GK_BPF_INIT_RET_ERROR }; /* The context of a GK BPF program for function init. */ struct gk_bpf_init_ctx { uint64_t now; }; /* Possible returns of the function pkt of a GK BPF program. */ enum gk_bpf_pkt_return { /* * The packet is always forwarded toward the grantor server. * The packet is accounted as at state GK_GRANTED. */ GK_BPF_PKT_RET_FORWARD, /* The packet is dropped, but account as at state GK_DECLINED. */ GK_BPF_PKT_RET_DECLINE, /* Some error happened during processing. The packet will be dropped. */ GK_BPF_PKT_RET_ERROR }; /* * The context of a GK BPF program for function pkt. * * The GK block guarantees that @now < @expire_at, that is, * the BPF state has not expired. */ struct gk_bpf_pkt_ctx { uint64_t now; uint64_t expire_at; uint16_t l3_proto; uint8_t l4_proto; bool fragmented; }; /* * The define GK_BPF_INTERNAL, used below, should only be defined * in bk/bpf.c. */ #ifndef GK_BPF_INTERNAL #define GK_BPF_INTERNAL extern #endif /* Symbols available to the BPF functions init() and pkt(). */ extern uint64_t cycles_per_sec; extern uint64_t cycles_per_ms; /* Symbols available to the BPF function init(). */ GK_BPF_INTERNAL struct gk_bpf_cookie *init_ctx_to_cookie( struct gk_bpf_init_ctx *ctx); /* Symbols available to the BPF function pkt(). */ GK_BPF_INTERNAL struct gk_bpf_cookie *pkt_ctx_to_cookie( struct gk_bpf_pkt_ctx *ctx); GK_BPF_INTERNAL struct rte_mbuf *pkt_ctx_to_pkt(struct gk_bpf_pkt_ctx *ctx); GK_BPF_INTERNAL int gk_bpf_prep_for_tx(struct gk_bpf_pkt_ctx *ctx, int priority, int direct_if_possible); #endif /* _GATEKEEPER_FLOW_BPF_H_ */ ```
/content/code_sandbox/include/gatekeeper_flow_bpf.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
840
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GT_H_ #define _GATEKEEPER_GT_H_ #include <stdint.h> #include <rte_ip.h> #include <rte_tcp.h> #include <rte_udp.h> #include <rte_atomic.h> #include <rte_ip_frag.h> #include "gatekeeper_config.h" #define CTYPE_STRUCT_GT_CONFIG_PTR "struct gt_config *" struct gt_packet_headers { uint16_t outer_ethertype; uint16_t inner_ip_ver; uint8_t l4_proto; uint8_t priority; uint8_t outer_ecn; /* Length of packet after IP headers (L4 + L5 + payload). */ uint16_t upper_len; void *l2_hdr; void *outer_l3_hdr; void *inner_l3_hdr; void *l4_hdr; /* Field indicating whether the associated packet is fragmented. */ bool frag; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* Fields for parsing fragmented packets. */ uint32_t l2_outer_l3_len; uint32_t inner_l3_len; struct ipv6_extension_fragment *frag_hdr; }; /* * Data for building policy decision notification packets for * Gatekeeper servers seen by GT block instances. */ struct ggu_notify_pkt { /* The IP address of the Gatekeeper server. */ struct ipaddr ipaddr; /* * The notification packet of policy decisions * being built to send to this Gatekeeper server. */ struct rte_mbuf *buf; }; /* Structures for each GT instance. */ struct gt_instance { /* RX queue on the front interface. */ uint16_t rx_queue; /* TX queue on the front interface. */ uint16_t tx_queue; /* The lua state that belongs to the instance. */ lua_State *lua_state; /* The neighbor hash tables that stores the Ethernet cached headers. */ struct neighbor_hash_table neigh; struct neighbor_hash_table neigh6; /* * The fragment table maintains information about already * received fragments of the packet. */ struct rte_ip_frag_tbl *frag_tbl; /* * Fixed array of packet buffers for policy decision * notification packets for the last @max_ggu_pkts * Gatekeeper servers seen by this GT block instance. */ struct ggu_notify_pkt *ggu_pkts; /* The number of valid entries in @ggu_pkts. */ unsigned int num_ggu_pkts; struct mailbox mb; /* The packet mbuf pool for the GT instance. */ struct rte_mempool *mp; } __rte_cache_aligned; /* Configuration for the GT functional block. */ struct gt_config { /* The UDP source and destination port numbers for GK-GT Unit. */ uint16_t ggu_src_port; uint16_t ggu_dst_port; /* The maximum number of neighbor entries for the GT. */ int max_num_ipv6_neighbors; /* Timeout for scanning the fragmentation table in ms. */ uint32_t frag_scan_timeout_ms; /* Number of buckets in the fragmentation table. */ uint32_t frag_bucket_num; /* Number of entries per bucket. It should be a power of two. */ uint32_t frag_bucket_entries; /* * Maximum number of entries that could be stored in * the fragmentation table. */ uint32_t frag_max_entries; /* Maximum TTL numbers are in ms. */ uint32_t frag_max_flow_ttl_ms; /* The maximum number of packets to retrieve/transmit. */ uint16_t max_pkt_burst; /* * Number of iterations of packets processed by each GT * block before flushing all policy decision notification * packets. Set to 1 to flush after every RX iteration. */ unsigned int batch_interval; /* * Maximum number of Gatekeeper servers for which to * keep policy decision notification packet buffers. */ unsigned int max_ggu_notify_pkts; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* Log level for GT block. */ uint32_t log_level; /* Log ratelimit interval in ms for GT block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for GT block. */ uint32_t log_ratelimit_burst; /* * An option to disable packet reassembling * at Grantor servers. */ bool reassembling_enabled; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* Base directory of the Lua policies. */ char *lua_base_directory; /* Lua policy configuration file for Grantor. */ char *lua_policy_file; rte_atomic32_t ref_cnt; /* The lcore ids at which each instance runs. */ unsigned int *lcores; /* The number of lcore ids in @lcores. */ int num_lcores; /* The network interface configuration. */ struct net_config *net; /* The gt instances. */ struct gt_instance *instances; }; /* Define the possible command operations for GT block. */ enum gt_cmd_op { GT_UPDATE_POLICY, GT_UPDATE_POLICY_INCREMENTALLY, }; /* Currently, the Dynamic config is the only writer of GT mailboxes. */ struct gt_cmd_entry { enum gt_cmd_op op; union { lua_State *lua_state; /* GT_UPDATE_POLICY */ /* GT_UPDATE_POLICY_INCREMENTALLY */ struct { size_t len; char *lua_bytecode; int is_returned; } bc; } u; }; struct gt_config *alloc_gt_conf(void); int gt_conf_put(struct gt_config *gt_conf); int run_gt(struct net_config *net_conf, struct gt_config *gt_conf, const char *lua_base_directory, const char *lua_policy_file); int l_update_gt_lua_states(lua_State *L); static inline void gt_conf_hold(struct gt_config *gt_conf) { rte_atomic32_inc(&gt_conf->ref_cnt); } /* * Key in the registry of a Lua state used to run policies that points to * the lcore_id where the policy runs. */ #define GT_LUA_LCORE_ID_NAME "lcore_id" #endif /* _GATEKEEPER_GT_H_ */ ```
/content/code_sandbox/include/gatekeeper_gt.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,466
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <signal.h> #include <time.h> #include <inttypes.h> #include <argp.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/prctl.h> #include <sys/types.h> #include <unistd.h> #include <rte_eal.h> #include <rte_log.h> #include <rte_common.h> #include <rte_launch.h> #include <rte_cycles.h> #include <rte_timer.h> #include "gatekeeper_main.h" #include "gatekeeper_config.h" #include "gatekeeper_net.h" #include "gatekeeper_launch.h" /* Indicates whether the program needs to exit or not. */ volatile int exiting = false; /* * These metrics are system dependent, and * initialized via time_resolution_init() function. */ uint64_t cycles_per_sec; uint64_t cycles_per_ms; double cycles_per_ns; uint64_t picosec_per_cycle; const char *log_file_name_format; const char *log_base_dir; mode_t log_file_mode; /* Argp's global variables. */ const char *argp_program_version = "Gatekeeper 1.0"; /* Arguments. */ static char adoc[] = ""; static char doc[] = "Gatekeeper -- the first open source " "DDoS protection system"; static struct argp_option options[] = { {"lua-base-dir", 'd', "DIR", 0, "Base directory DIR for Gatekeeper Lua files", 0}, {"gatekeeper-config-file", 'f', "FILE", 0, "Lua configuration FILE to initialize Gatekeeper", 0}, {"log-file-name-format", 'l', "FORMAT", 0, "The name format of log files", 0}, {"log-base-dir", 'g', "DIR", 0, "Base directory DIR for Gatekeeper log files", 0}, {"log-file-mode", 'm', "MODE", 0, "The mode of log files", 0}, { 0 } }; struct args { const char *lua_base_dir; const char *gatekeeper_config_file; const char *log_file_name_format; const char *log_base_dir; mode_t log_file_mode; }; #define MAX_MODE (07777) static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct args *args = state->input; switch (key) { case 'd': args->lua_base_dir = arg; break; case 'f': args->gatekeeper_config_file = arg; break; case 'l': args->log_file_name_format = arg; break; case 'g': args->log_base_dir = arg; break; case 'm': { long mode; char *end; /* Assuming the file mode is in octal. */ mode = strtol(arg, &end, 8); if (!*arg || *end) { argp_error(state, "the log file mode \"%s\" is not an number", arg); } RTE_BUILD_BUG_ON(LONG_MIN >= 0 || MAX_MODE >= LONG_MAX); if (mode < 0 || MAX_MODE < mode) { argp_error(state, "the log file mode \"%s\" is out of range", arg); } args->log_file_mode = mode; break; } default: return ARGP_ERR_UNKNOWN; } return 0; } static struct argp argp = {options, parse_opt, adoc, doc, NULL, NULL, NULL}; char * rte_strdup(const char *type, const char *s) { int len = s == NULL ? 0 : strlen(s) + 1; char *res = rte_malloc(type, len, 0); if (unlikely(res == NULL)) return NULL; return strcpy(res, s); } FILE *log_file; static void cleanup_log(void) { if (log_file != NULL) { if (log_file != stderr) fclose(log_file); log_file = NULL; } } int gatekeeper_log_init(void) { int ret; int log_fd; time_t now; struct tm *p_tm, time_info; char log_file_name[128]; char log_file_path[512]; FILE *new_log_file; if (log_file == NULL) { /* * Initialize log_file with stderr to guarantee that log_file * has a valid value even before a file is open. */ log_file = stderr; } now = time(NULL); if (unlikely(now == ((time_t) -1))) { G_LOG(ERR, "%s(): time() failed with errno=%i: %s\n", __func__, errno, strerror(errno)); return -1; } p_tm = localtime_r(&now, &time_info); if (unlikely(p_tm == NULL)) { G_LOG(ERR, "%s(): localtime_r() failed with errno=%i: %s\n", __func__, errno, strerror(errno)); return -1; } RTE_VERIFY(p_tm == &time_info); #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-nonliteral" ret = strftime(log_file_name, sizeof(log_file_name), log_file_name_format, &time_info); #pragma GCC diagnostic pop if (unlikely(!(ret > 0 && ret < (int)sizeof(log_file_name)))) { G_LOG(ERR, "%s(): Failed to call strftime() to format the log file name\n", __func__); return -1; } ret = snprintf(log_file_path, sizeof(log_file_path), "%s/%s", log_base_dir, log_file_name); if (unlikely(!(ret > 0 && ret < (int)sizeof(log_file_path)))) { G_LOG(ERR, "%s(): Failed to call snprintf() to fill up the log file path\n", __func__); return -1; } log_fd = open(log_file_path, O_CREAT | O_WRONLY, log_file_mode); if (log_fd < 0) { G_LOG(ERR, "%s(): Failed to get log file descriptor %s - %s\n", __func__, log_file_path, strerror(errno)); return -1; } new_log_file = fdopen(log_fd, "a"); if (new_log_file == NULL) { G_LOG(ERR, "%s(): Failed to open log file %s - %s\n", __func__, log_file_path, strerror(errno)); close(log_fd); return -1; } ret = rte_openlog_stream(new_log_file); if (ret != 0) { fclose(new_log_file); return -1; } if (log_file != stderr) fclose(log_file); log_file = new_log_file; return log_fd; } /* Obtain the system time resolution. */ static int time_resolution_init(void) { int ret; uint64_t diff_ns; uint64_t cycles; uint64_t tsc_start; struct timespec tp_start; tsc_start = rte_rdtsc(); ret = clock_gettime(CLOCK_MONOTONIC_RAW, &tp_start); if (ret < 0) return ret; while (1) { uint64_t tsc_now; struct timespec tp_now; ret = clock_gettime(CLOCK_MONOTONIC_RAW, &tp_now); tsc_now = rte_rdtsc(); if (ret < 0) return ret; diff_ns = (uint64_t)(tp_now.tv_sec - tp_start.tv_sec) * ONE_SEC_IN_NANO_SEC + (tp_now.tv_nsec - tp_start.tv_nsec); if (diff_ns >= ONE_SEC_IN_NANO_SEC) { cycles = tsc_now - tsc_start; break; } } cycles_per_sec = cycles * ONE_SEC_IN_NANO_SEC / diff_ns; cycles_per_ms = cycles_per_sec / 1000; cycles_per_ns = (typeof(cycles_per_ns))cycles / diff_ns; picosec_per_cycle = 1000 * diff_ns / cycles; G_LOG(NOTICE, "cycles/second = %" PRIu64 ", cycles/millisecond = %" PRIu64 ", cycles/nanoseconds = %f, picosec/cycle = %" PRIu64 "\n", cycles_per_sec, cycles_per_ms, cycles_per_ns, picosec_per_cycle); return 0; } static void signal_handler(int signum) { if (signum == SIGINT) fprintf(stderr, "caught SIGINT\n"); else if (signum == SIGTERM) fprintf(stderr, "caught SIGTERM\n"); else fprintf(stderr, "caught unknown signal (%d)\n", signum); exiting = true; } static int run_signal_handler(void) { int ret = -1; sig_t pipe_handler; struct sigaction new_action; struct sigaction old_int_action; struct sigaction old_term_action; new_action.sa_handler = signal_handler; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; ret = sigaction(SIGINT, &new_action, &old_int_action); if (ret < 0) goto out; ret = sigaction(SIGTERM, &new_action, &old_term_action); if (ret < 0) goto int_action; pipe_handler = signal(SIGPIPE, SIG_IGN); if (pipe_handler == SIG_ERR) { fprintf(stderr, "Error: failed to ignore SIGPIPE - %s\n", strerror(errno)); goto term_action; } goto out; term_action: sigaction(SIGTERM, &old_term_action, NULL); int_action: sigaction(SIGINT, &old_int_action, NULL); out: return ret; } int main(int argc, char **argv) { struct args args = { /* Defaults. */ .lua_base_dir = "./lua", .gatekeeper_config_file = "main_config.lua", .log_file_name_format = "gatekeeper_%Y_%m_%d_%H_%M.log", .log_base_dir = ".", .log_file_mode = S_IRUSR | S_IWUSR, }; int ret, log_fd; /* * Functional blocks have their own log level, and all log entries of * Gatekeeper use BLOCK_LOGTYPE. Thus, setting BLOCK_LOGTYPE to DEBUG * level instructs DPDK to log everything that the blocks decide to * log. */ ret = rte_log_set_level(BLOCK_LOGTYPE, RTE_LOG_DEBUG); if (ret < 0) rte_exit(EXIT_FAILURE, "Error while setting log level of BLOCK_LOGTYPE\n"); /* * rte_eal_init(), which is called next, creates all threads that * Gatekeeper will use before the code finds out if Gatekeeper is * going to run as root or not. When Gatekeeper runs with a * non-root user, Gatekeeper has to assign capabilities to the * functional blocks, which, in turn, is only possible if the flag * PR_SET_KEEPCAPS is already set. */ RTE_VERIFY(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0) == 0); ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); argc -= ret; argv += ret; /* Parse Gatekeeper arguments (after the EAL ones). */ ret = argp_parse(&argp, argc, argv, 0, NULL, &args); if (ret != 0) rte_exit(EXIT_FAILURE, "Invalid Gatekeeper parameters\n"); log_file_name_format = args.log_file_name_format; log_base_dir = args.log_base_dir; log_file_mode = args.log_file_mode; /* * ATTENTION: this is the only place at which the macro RTE_LOG() * should be used, everywhere else should use one of Gatekeeper's * log macros. * * The log level ALERT is used here to guarantee that * the message will be logged. * While the log level EMERG would raise this guarantee, * it is not used here to avoid broadcasting the message throughout * the system. */ RTE_LOG(ALERT, EAL, "See files in %s for further log\n", log_base_dir); ret = gatekeeper_log_init(); if (ret < 0) goto out; log_fd = ret; /* Used by the LLS block. */ rte_timer_subsystem_init(); /* Given the nature of signal, it's okay to not have a cleanup for them. */ ret = run_signal_handler(); if (ret < 0) goto log; /* * Given the nature of 'clock_gettime()' call, it's okay to not have a * cleanup for them. */ ret = time_resolution_init(); if (ret < 0) goto log; ret = config_gatekeeper(args.lua_base_dir, args.gatekeeper_config_file); if (ret < 0) { G_LOG(ERR, "Failed to configure Gatekeeper\n"); goto net; } /* * Finalize any network configuration, such as building ACL tries, * after blocks have had a chance to make use of network state * during stage 2. This is needed because there is no stage 3 for * the network configuration. * * Finalize any user configuration, such as changing file permissions * and dropping user privileges. */ ret = launch_at_stage2(finalize_stage2, (void *)(intptr_t)log_fd); if (ret < 0) goto net; ret = launch_gatekeeper(); if (ret < 0) exiting = true; rte_eal_mp_wait_lcore(); net: gatekeeper_free_network(); log: cleanup_log(); out: return ret; } ```
/content/code_sandbox/main/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,089
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <lua.h> #include <sys/time.h> #include <rte_atomic.h> #include "gatekeeper_gk.h" #include "gatekeeper_gt.h" #ifndef _GATEKEEPER_CONFIG_H_ #define _GATEKEEPER_CONFIG_H_ /* DPDK restricts hash tables to be at least of size 8. */ #define HASH_TBL_MIN_SIZE 8 #define RETURN_MSG_MAX_LEN 256 /* Configuration for the Dynamic Config functional block. */ struct dynamic_config { /* The lcore id that the block is running on. */ unsigned int lcore_id; /* Reference to the gk configuration struct. */ struct gk_config *gk; /* Reference to the gt configuration struct. */ struct gt_config *gt; /* Log level for Dynamic Configuration block. */ uint32_t log_level; /* Log ratelimit interval in ms for Dynamic Configuration block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for Dynamic Configuration block. */ uint32_t log_ratelimit_burst; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* The server socket descriptor. */ int sock_fd; /* The file path that the Unix socket will use. */ char *server_path; /* Specify the receiving timeouts until reporting an error. */ struct timeval rcv_time_out; /* The directory for Lua files of dynamic configuration. */ char *lua_dy_base_dir; /* The Lua file for initializing dynamic configuration. */ char *dynamic_config_file; struct mailbox mb; /* * The callee that finished processing the return message * needs to increment this counter, so that the Dynamic config block * can finish its operation. */ rte_atomic16_t num_returned_instances; }; /* Define the possible command operations for Dynamic config block. */ enum dy_cmd_op { GT_UPDATE_POLICY_RETURN, }; /* Currently, the GT blocks are the only writers of Dynamic config mailbox. */ struct dy_cmd_entry { enum dy_cmd_op op; union { /* GT_UPDATE_POLICY_RETURN */ struct { unsigned int gt_lcore; unsigned int length; char return_msg[RETURN_MSG_MAX_LEN]; } gt; } u; }; int config_gatekeeper(const char *lua_base_dir, const char *gatekeeper_config_file); int set_lua_path(lua_State *L, const char *path); struct dynamic_config *get_dy_conf(void); void set_dyc_timeout(unsigned sec, unsigned usec, struct dynamic_config *dy_conf); int run_dynamic_config(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, const char *server_path, const char *lua_dy_base_dir, const char *dynamic_config_file, struct dynamic_config *dy_conf, int mode); #endif /* _GATEKEEPER_CONFIG_H_ */ ```
/content/code_sandbox/include/gatekeeper_config.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
748
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_MAILBOX_H_ #define _GATEKEEPER_MAILBOX_H_ #include <rte_ring.h> #include <rte_mempool.h> #include "gatekeeper_main.h" struct mailbox { struct rte_ring *ring; struct rte_mempool *pool; }; /* * For optimum memory usage, the maximum number of elements for * rte_ring_create() is defined as (2^mailbox_max_entries_exp), while * the maximum number of elements for rte_mempool_create() is * defined as (2^mailbox_max_entries_exp - 1). */ int init_mailbox( const char *tag, int mailbox_max_entries_exp, unsigned int ele_size, unsigned int cache_size, unsigned int lcore_id, struct mailbox *mb); void *mb_alloc_entry(struct mailbox *mb); int mb_send_entry(struct mailbox *mb, void *obj); void destroy_mailbox(struct mailbox *mb); static inline int mb_dequeue_burst(struct mailbox *mb, void **obj_table, unsigned n) { return rte_ring_sc_dequeue_burst(mb->ring, obj_table, n, NULL); } static inline void mb_free_entry(struct mailbox *mb, void *obj) { rte_mempool_put(mb->pool, obj); } static inline void mb_free_entry_bulk(struct mailbox *mb, void * const *obj_table, unsigned int n) { rte_mempool_put_bulk(mb->pool, obj_table, n); } #endif /* _GATEKEEPER_MAILBOX_H_ */ ```
/content/code_sandbox/include/gatekeeper_mailbox.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
417
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef SEQLOCK_H #define SEQLOCK_H /* * The code of this file is mostly a copy of the Linux kernel, * and replace the Linux spinlock with DPDK's rte_spinlock_t. * It supports the use of hardware memory transactions (HTM) in DPDK. */ #include <rte_log.h> #include <rte_atomic.h> #include <rte_spinlock.h> #include "gatekeeper_main.h" /* * Reader/writer consistent mechanism without starving writers. This type of * lock is for data where the reader wants a consistent set of information * and is willing to retry if the information changes. Sequence readers which * never block a writer but they may have to retry if a writer is in progress * by detecting change in sequence number. Writers do not wait for a sequence * reader. * * Sequential locks may not work well for data that contains pointers, because * any writer could invalidate a pointer that a reader was following. * * Expected non-blocking reader usage: * do { * seq = read_seqbegin(&foo); * ... * } while (read_seqretry(&foo, seq)); */ static inline void __read_once_size(const volatile void *p, void *res, int size) { switch (size) { case 1: *(uint8_t *)res = *(const volatile uint8_t*)p; break; case 2: *(uint16_t *)res = *(const volatile uint16_t *)p; break; case 4: *(uint32_t *)res = *(const volatile uint32_t *)p; break; case 8: *(uint64_t *)res = *(const volatile uint64_t *)p; break; default: G_LOG(WARNING, "seqlock: data access exceeds word size and won't be atomic\n"); break; } } /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of * READ_ONCE (see below), but only when the compiler is aware of some * particular ordering. One way to make the compiler aware of ordering * is to put the two invocations of READ_ONCE in different C statements. * * READ_ONCE will also work on aggregate data types like structs or unions. * If the size of the accessed data type exceeds the word size of the machine * (e.g., 32 bits or 64 bits) READ_ONCE() will fall back to memcpy and print a * compile-time warning. * * Its two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, * and (2) Ensuring that the compiler does not fold, spindle, or otherwise * mutilate accesses that either do not require ordering or that interact * with an explicit memory barrier or atomic instruction that provides the * required ordering. */ #define READ_ONCE(x) \ ({ union { typeof(x) __val; char __c[1]; } __u; \ __read_once_size(&(x), __u.__c, sizeof(x)); rte_rmb(); __u.__val; }) /* * Version using sequence counter only. * This can be used when code has its own mutex protecting the * updating starting before the write_seqcountbeqin() and ending * after the write_seqcount_end(). */ typedef struct seqcount { unsigned sequence; } seqcount_t; /* * __read_seqcount_begin - begin a seq-read critical section (without barrier). * @s: pointer to seqcount_t * Returns: count to be passed to read_seqcount_retry. * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is * provided before actually loading any of the variables that are to be * protected in this critical section. * * Use carefully, only in critical code, and comment how the barrier is * provided. */ static inline unsigned __read_seqcount_begin(const seqcount_t *s) { unsigned ret; repeat: ret = READ_ONCE(s->sequence); if (unlikely(ret & 1)) { rte_pause(); goto repeat; } return ret; } /* * read_seqcount_begin - begin a seq-read critical section. * @s: pointer to seqcount_t * Returns: count to be passed to read_seqcount_retry. * * read_seqcount_begin opens a read critical section of the given seqcount. * Validity of the critical section is tested by checking read_seqcount_retry * function. */ static inline unsigned read_seqcount_begin(const seqcount_t *s) { unsigned ret = __read_seqcount_begin(s); rte_smp_rmb(); return ret; } /* * __read_seqcount_retry - end a seq-read critical section (without barrier). * @s: pointer to seqcount_t * @start: count, from read_seqcount_begin * Returns: 1 if retry is required, else 0. * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is * provided before actually loading any of the variables that are to be * protected in this critical section. * * Use carefully, only in critical code, and comment how the barrier is * provided. */ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { return unlikely(s->sequence != start); } /* * read_seqcount_retry - end a seq-read critical section. * @s: pointer to seqcount_t * @start: count, from read_seqcount_begin * Returns: 1 if retry is required, else 0. * * read_seqcount_retry closes a read critical section of the given seqcount. * If the critical section was invalid, it must be ignored (and typically * retried). */ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { rte_smp_rmb(); return __read_seqcount_retry(s, start); } static inline void write_seqcount_begin(seqcount_t *s) { s->sequence++; rte_smp_wmb(); } static inline void write_seqcount_end(seqcount_t *s) { rte_smp_wmb(); s->sequence++; } typedef struct { struct seqcount seqcount; rte_spinlock_t lock; } seqlock_t; static inline void seqlock_init(seqlock_t *sl) { sl->seqcount.sequence = 0; rte_spinlock_init(&sl->lock); } /* * Read side functions for starting and finalizing a read side section. */ static inline unsigned read_seqbegin(const seqlock_t *sl) { return read_seqcount_begin(&sl->seqcount); } static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { return read_seqcount_retry(&sl->seqcount, start); } /* * Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. */ static inline void write_seqlock(seqlock_t *sl) { rte_spinlock_lock_tm(&sl->lock); write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock(seqlock_t *sl) { write_seqcount_end(&sl->seqcount); rte_spinlock_unlock_tm(&sl->lock); } #endif /* SEQLOCK_H */ ```
/content/code_sandbox/include/seqlock.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,701
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LAUNCH_H_ #define _GATEKEEPER_LAUNCH_H_ #include <rte_launch.h> /* * Postpone the execution of f(arg) until the Lua configuration finishes, * but before the network devices start. * * If f() returns a non-zero, the inilization terminates in error. * * ATTENTION: * This initilization stage is perfect for allocation of queues in * the network devices. HOWEVER, if you're going to allocate any queue, * DO NOT call this function, but net_launch_at_stage1() instead! * * RETURN * Return 0 if success; otherwise -1. */ int launch_at_stage1(lcore_function_t *f, void *arg); /* Drop the @n last entries of stage1. */ void pop_n_at_stage1(int n); /* * Once stage 1 finishes, the network devices are started, and * stage 2 begins. * * According to the DPDK documentation, any functions from rte_ethdev.h * must be called after the network devices are started, which includes * filters functions in general. * Therefore, this initilization stage is perfect for registering filters in * the network devices. * * RETURN * Return 0 if success; otherwise -1. */ int launch_at_stage2(lcore_function_t *f, void *arg); /* Drop the @n last entries of stage2. */ void pop_n_at_stage2(int n); /* * Once stage 2 finishes, stage 3 begins. * * This initilization stage runs f(arg) on lcore_id. * * RETURN * Return 0 if success; otherwise -1. */ int launch_at_stage3(const char *name, lcore_function_t *f, void *arg, unsigned int lcore_id); /* Drop the @n last entries of stage3. */ void pop_n_at_stage3(int n); int launch_gatekeeper(void); #endif /* _GATEKEEPER_LAUNCH_H_ */ ```
/content/code_sandbox/include/gatekeeper_launch.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
520
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GK_RIB_H_ #define _GATEKEEPER_GK_RIB_H_ #include <stdint.h> #include <stdbool.h> #include <setjmp.h> #include <rte_mempool.h> typedef unsigned __int128 uint128_t; /* * Internal representation of an address. * Unless explicitly noticed, the bits are in host order. */ typedef uint128_t rib_address_t; #define RIB_MAX_ADDRESS_LENGTH ((int)sizeof(rib_address_t) * 8) typedef uint64_t rib_prefix_bits_t; #define RIB_MAX_PREFIX_BITS ((int)sizeof(rib_prefix_bits_t) * 8) struct rib_node { /* * Bits of the prefix to be matched. * These bits are kept in host order, * so they can be directly operated on. */ rib_prefix_bits_t pfx_bits; /* The number of bits present in @pfx_bits. */ uint8_t matched_bits; /* True if there is a value at @next_hop. */ bool has_nh; /* Next hop if the prefix matches all @matched_bits bits. */ uint32_t next_hop; /* * The branches after this node. * @branch[false] is the branch that follows this node when the first * not-matched bit is zero, and @branch[true] when the first * not-matched bit is one. * @branch[false] or @branch[true] is NULL when those branches * do not exist. */ struct rib_node *branch[2]; }; struct rib_head { /* Maximum length of a network address. */ uint8_t max_length; /* * Version of the RIB. * @version changes every time the RIB is edited. * The main purpose of this field is to support iterators. */ uint64_t version; /* * Root of the prefix tree. * * When the field @has_nh is true, the RIB has the default prefix * (i.e. the zero-length prefix). When the default prefix is present, * its next hop is at field @next_hop. * * This is the only node in the prefix tree that has field * @matched_bits equal to zero, all other nodes have a value * greater than zero at this field. */ struct rib_node root_node; /* Memory pool for instances of struct rib_node. */ struct rte_mempool *mp_nodes; }; static inline uint8_t rib_get_max_length(const struct rib_head *rib) { return rib->max_length; } /* * Create a new RIB. * * @name of the internal memory pool used to allocate the nodes of * the prefix tree. * * @socket_id is the NUMA node on which internal memory is allocated. * The value can be SOCKET_ID_ANY if there is no NUMA constraint. * * @max_length is the maximum length of a network address. * @max_length must be a multiple of 8 and not greater than * RIB_MAX_ADDRESS_LENGTH. * Typical values: 32 for IPv4 and 128 for IPv6. * * @max_rules is the maximum number of rules (i.e. a prefix and a next hop) * that this RIB is expected to have. If the RIB has space for more rules, * it will take extra rules. Inspite of the name, this parameter is meant * to mean the minimum number of rules that the RIB will support. * The chosen name is meant to match the name of the field max_rules in * struct rte_lpm_config and struct rte_lpm6_config. */ int rib_create(struct rib_head *rib, const char *name, int socket_id, uint8_t max_length, uint32_t max_rules); /* Free all resources associated to @rib but the memory pointed by it. */ void rib_free(struct rib_head *rib); /* * Add a rule to the RIB. * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * RETURN * -EEXIST if prefix already exist in @rib. * 0 if it successfully adds the new rule. */ int rib_add(struct rib_head *rib, const uint8_t *address, uint8_t depth, uint32_t next_hop); /* * Delete a rule from the RIB. * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * RETURN * -ENOENT if the prefix does not exist in @rib. * 0 if it successfully deletes the rule. */ int rib_delete(struct rib_head *rib, const uint8_t *address, uint8_t depth); /* * Look an address up on the RIB. * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * RETURN * 0 on lookup hit. * -ENOENT on lookup miss. * A negative value on failure. * */ int rib_lookup(const struct rib_head *rib, const uint8_t *address, uint32_t *pnext_hop); /* * Check if a rule is present in the RIB, and provide its next hop if it is. * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * RETURN * 1 if the rule exists. * 0 if it does not. * A negative value on failure. */ int rib_is_rule_present(const struct rib_head *rib, const uint8_t *address, uint8_t depth, uint32_t *pnext_hop); /* * Extra information about struct rib_node that can be computed as * one navigates a RIB. */ struct rib_node_info { /* Prefix in host order that has been matched up to the current node. */ rib_address_t haddr_matched; /* Bit mask for field @haddr_matched. */ rib_address_t haddr_mask; /* Number of bits set in field @haddr_mask. */ int depth; /* * Number of bits missing in field @haddr_mask to reach * the maximum mask. */ int missing_bits; }; struct rib_iterator_rule { rib_address_t address_no; /* Address in network order. */ uint8_t depth; uint32_t next_hop; }; static inline uint32_t ipv4_from_rib_addr(rib_address_t address_no) { uint32_t *p = (typeof(p))&address_no; return *p; } struct rib_longer_iterator_state { /* RIB with which the iterator is associated. */ const struct rib_head *rib; /* * RIB scope of the iterator. */ /* Version of the RIB for which field @start_node is valid. */ uint64_t version; /* Node where the iterator starts. */ const struct rib_node *start_node; /* Information associated with field @start_node. */ struct rib_node_info start_info; /* The minimum depth of prefix in field @next_address; the scope. */ uint8_t min_depth; /* * If true, do not enumerate prefixes longer than the child prefixes * of the parent prefix. */ bool stop_at_children; /* * The following fields are used in between calls of * rib_longer_iterator_next(). */ /* * The next prefix to be returned by rib_longer_iterator_next(). * * If the prefix does not exist in the RIB, * the prefix immediately greater will be returned. */ rib_address_t next_address; /* The depth of the prefix in field @next_address. */ uint8_t next_depth; /* The iterator has finished. */ bool has_ended; /* * The following fields are set and only valid while the execution is * in rib_longer_iterator_next(). */ /* When true, keep looking for prefixes greater than @next_address. */ bool ignore_next_address; /* A return for rib_longer_iterator_next() has been found. */ bool found_return; /* * Pointer to the struct rib_iterator_rule that will receive * the found rule; the output. */ struct rib_iterator_rule *rule; /* Long jump to unwind the recursion. */ jmp_buf jmp_found; }; /* * Initialize @state. * * The first call of rib_longer_iterator_next() returns a rule whose prefix * is at least as deeper as @depth. * * Rules are returned such that prefixes are in increasing order * (e.g. 10.2/16 > 10.1/16). Longer prefixes are greater than * shorter sub-prefixes (e.g. 10.2/16 > 10/8). * Notice that 10.2/16 is greater than 10.1.255.255/32. * * Passing @address = NULL (or any other value) and @depth = 0 and * @stop_at_children = false iterates over the whole RIB; * including the default rule (i.e. the zero-length prefix). * * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * * When @stop_at_children is true, only the prefix @address/@depth * (if it exists) and its children prefixes are enumerated. * In a RIB with 10/8, 10.1/16, 10.2/16, 10.2.2/24, the longer iterator * will list all prefixes when @address = 10.X.X.X and @depth = 8 and * @stop_at_children = false, but will not list 10.2.2/24 when * @stop_at_children = true. * * The parent prefix is @address/@depth. * * If the RIB changes (i.e. rules are added or deleted) * between the call of this function and the call of * rib_longer_iterator_next(), or between two consecutive calls of * rib_longer_iterator_next(), the iterator will enumerate the changes that * are within the scope of the iterator (i.e at least as deeper than * the initial prefix) and that are after the next rule. */ int rib_longer_iterator_state_init(struct rib_longer_iterator_state *state, const struct rib_head *rib, const uint8_t *address, uint8_t depth, bool stop_at_children); /* * When a rule is found, this function updates @rule and returns zero. * Otherwise, this function returns -ENOENT. */ int rib_longer_iterator_next(struct rib_longer_iterator_state *state, struct rib_iterator_rule *rule); /* * Make the prefix @address/@depth the prefix that the following call of * rib_longer_iterator_next() will return. * * If the prefix @address/@depth is not present in @rib, the following call of * rib_longer_iterator_next() will return the prefix immediately following * the prefix @address/@depth. If there is no prefix following the prefix * @address/@depth, the following call of rib_longer_iterator_next() will * return -ENOENT. * * The prefix @address/@depth must be within the scope of the prefix passed * to rib_longer_iterator_state_init(). For example, if the iterator was * initialized with 10.0.0.0/8, 11.0.0.0/8 is out of scope. * * This function can be successful even after rib_longer_iterator_next() * has returned -ENOENT. * * RETURN * -EINVAL If the prefix @address/@depth is not within scope. * 0 If the call is successful. */ int rib_longer_iterator_seek(struct rib_longer_iterator_state *state, const uint8_t *address, uint8_t depth); /* * This function is an efficient equivalent of calling * rib_longer_iterator_seek() on the prefix @address/@depth, AND skipping all * prefixes within the scope of the prefix @address/@depth. * * The prefix @address/@depth must be within the scope of the iterator, but * it does not have to be present in @rib. * * RETURN * -EINVAL If the prefix @address/@depth is not within scope. * 0 If the call is successful. */ int rib_longer_iterator_skip_branch(struct rib_longer_iterator_state *state, const uint8_t *address, uint8_t depth); /* * Free all resources associated to @state but the memory pointed by it. * * CAUTION: This function should only be called on initialized states. */ static inline void rib_longer_iterator_end(struct rib_longer_iterator_state *state) { /* At the current version of the code, there is nothing to do here. */ RTE_SET_USED(state); } struct rib_shorter_iterator_state { /* RIB with which the iterator is associated. */ const struct rib_head *rib; /* Version of the RIB for which field @cur_node is valid. */ uint64_t version; /* Current node of the iteration. */ const struct rib_node *cur_node; /* Information associated with field @cur_node. */ struct rib_node_info info; /* Deepest prefix to consider. */ rib_address_t haddr; uint8_t depth; /* The iterator has finished. */ bool has_ended; }; /* * Initialize @state. * * The first call of rib_shorter_iterator_next() returns a rule whose prefix * is at most as deep as @depth and includes @address. * * The prefix @address/@depth is returned if its rule is present in @rib. * * This iterator is an efficient version of the following loop: * * int i; * for (i = 0; i <= @depth; i++) { * uint32_t next_hop; * if (rib_is_rule_present(rib, address, i, &next_hop) != 1) * continue; * * => These entries are the ones that this iterator returns. * } * * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * * If the RIB changes (i.e. rules are added or deleted) * between the call of this function and the call of * rib_shorter_iterator_next(), or between two consecutive calls of * rib_shorter_iterator_next(), the iterator will return -EFAULT. */ int rib_shorter_iterator_state_init(struct rib_shorter_iterator_state *state, const struct rib_head *rib, const uint8_t *address, uint8_t depth); /* * When a rule is found, this function updates @rule and returns zero. * Otherwise, this function returns -ENOENT. */ int rib_shorter_iterator_next(struct rib_shorter_iterator_state *state, struct rib_iterator_rule *rule); /* * Free all resources associated to @state but the memory pointed by it. * * CAUTION: This function should only be called on initialized states. */ static inline void rib_shorter_iterator_end(struct rib_shorter_iterator_state *state) { /* At the current version of the code, there is nothing to do here. */ RTE_SET_USED(state); } #endif /* _GATEKEEPER_GK_RIB_H_ */ ```
/content/code_sandbox/include/gatekeeper_rib.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,365
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LLS_H_ #define _GATEKEEPER_LLS_H_ #include <lua.h> #include <netinet/in.h> #include <rte_arp.h> #include <rte_ip.h> #include "gatekeeper_log_ratelimit.h" #include "gatekeeper_mailbox.h" #include "gatekeeper_net.h" #include "gatekeeper_ratelimit.h" /* Requests that can be made to the LLS block. */ enum lls_req_ty { /* Express interest in a map by registering a callback function. */ LLS_REQ_HOLD, /* * Remove a previously-submitted hold, if not already deleted * by virtue of a callback function signaling it should not * be invoked again. */ LLS_REQ_PUT, /* Request to handle ARP packets received from another block. */ LLS_REQ_ARP, /* Request to handle ICMP packets received from another block. */ LLS_REQ_ICMP, /* Request to handle ICMPv6 packets received from another block. */ LLS_REQ_ICMP6, }; /* Replies that come from the LLS block. */ enum lls_reply_ty { /* The reply represents a map resolution (or update to one). */ LLS_REPLY_RESOLUTION, /* * The reply is a notification that the hold is * removed, so the requester can free state as needed. */ LLS_REPLY_FREE, }; /* Map that is returned to blocks that request resolution. */ struct lls_map { /* Ethernet address of this map. */ struct rte_ether_addr ha; /* Port on which this map exists. */ uint16_t port_id; /* Whether this map has been marked as stale. */ int stale; /* IP address for this map, in network ordering. */ struct ipaddr addr; }; /* * Format of callback function for requesting LLS maps. * * The LLS block invokes the callbacks, so each block should ensure * that the callback function deals with any race conditions and * is aware that the blocks may reside in different NUMA nodes. * * If the requesting block wants future updates, it should set * *@pcall_again to true before returning. Otherwise, by default * the LLS block will remove the interest from the block. If * *@pcall_again is set to true, then the block may release all * resources attached to the callback before returning. * * When @ty is LLS_REPLY_FREE, @pcall_again is NULL to indicate * that this will be the last callback for this hold. */ typedef void (*lls_req_cb)(const struct lls_map *map, void *arg, enum lls_reply_ty ty, int *pcall_again); /* A hold for an LLS map. */ struct lls_hold { /* Callback function for replies from the LLS block. */ lls_req_cb cb; /* Optional argument to @cb. */ void *arg; /* The lcore that requested this hold. */ unsigned int lcore_id; }; struct lls_record { /* IP --> Ethernet address map for this record. */ struct lls_map map; /* Timestamp of the last update to the map. */ time_t ts; /* * Number of requests to hold this map. Blocks * should only request a hold for a map once * to avoid multiple entries for an lcore in @holds. */ uint32_t num_holds; /* Holds for @map. */ struct lls_hold holds[RTE_MAX_LCORE]; }; /* For dumping LLS entries using the Dynamic Config. */ struct lls_dump_entry { /* Whether this entry is stale. */ bool stale; /* The port on which this entry resides. */ uint16_t port_id; /* The IP address of the entry. */ struct ipaddr addr; /* The MAC address of the entry. */ struct rte_ether_addr ha; }; struct lls_cache { /* Timeout value (in seconds) to mark entries as stale. */ uint32_t front_timeout_sec; uint32_t back_timeout_sec; /* Name string (needed for cache hash). */ const char *name; /* Array of cache records indexed using @hash. */ struct lls_record *records; /* Hash instance that maps IP address keys to LLS cache records. */ struct rte_hash *hash; /* Returns whether the cache is enabled for @iface. */ int (*iface_enabled)(struct net_config *net, struct gatekeeper_if *iface); /* * Returns whether @addr is in the same subnet as the * relevant address for this cache assigned to @iface. */ int (*ip_in_subnet)(struct gatekeeper_if *iface, const struct ipaddr *addr); /* * Function to transmit a request out of @iface to resolve * IP address @addr to an Ethernet address. * * If @ha is NULL, then broadcast (IPv4) or multicast (IPv6). * Otherwise, unicast to @ha. */ void (*xmit_req)(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct rte_ether_addr *ha, uint16_t tx_queue); }; struct lls_config { /* lcore that the LLS block runs on. */ unsigned int lcore_id; /* The maximum number of packets to retrieve/transmit. */ uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; /* The maximum number of ARP or ND packets submitted by GK or GT. */ unsigned int mailbox_max_pkt_sub; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* Number of records that a LLS cache can hold. */ unsigned int max_num_cache_records; /* Length of time (in seconds) to wait between scans of the cache. */ unsigned int cache_scan_interval_sec; /* Log level for LLS block. */ uint32_t log_level; /* Log ratelimit interval in ms for LLS block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for LLS block. */ uint32_t log_ratelimit_burst; /* The rate and burst size of the icmp messages. */ uint32_t front_icmp_msgs_per_sec; uint32_t front_icmp_msgs_burst; uint32_t back_icmp_msgs_per_sec; uint32_t back_icmp_msgs_burst; /* * The fields below are for internal use. * Configuration files should not refer to them. */ struct net_config *net; /* Mailbox to hold requests from other blocks. */ struct mailbox requests; /* Cache of entries that map IPv4 addresses to Ethernet addresses. */ struct lls_cache arp_cache; /* Cache of entries that map IPv6 addresses to Ethernet addresses. */ struct lls_cache nd_cache; /* Timer to scan over LLS cache(s). */ struct rte_timer scan_timer; /* Timer to create new log files. */ struct rte_timer log_timer; /* Receive and transmit queues for both interfaces. */ uint16_t rx_queue_front; uint16_t tx_queue_front; uint16_t rx_queue_back; uint16_t tx_queue_back; /* RX methods for both interfaces. */ uint8_t rx_method_front; uint8_t rx_method_back; unsigned int total_pkt_burst; /* The packet mbuf pool for the LLS block. */ struct rte_mempool *mp; /* Data structures used to limit the rate of icmp messages. */ struct token_bucket_ratelimit_state front_icmp_rs; struct token_bucket_ratelimit_state back_icmp_rs; }; /* * In LLS, it was a conscious design decision to push the task of * avoiding duplicates to the individual blocks, instead of * putting it in the LLS cache. * * For example, the CPS block maintains * a linked list of ARP and ND holds that it has made so that * it does not issue duplicate requests. * * For GK and GT blocks: GK blocks avoid the duplicates with the * help of their hash tables of neighbors that leaves inside of the LPM table. * And each GT block has a hash table of neighbors. */ /* * Interface for functional blocks to resolve IPv4 --> Ethernet addresses. * * To obtain a map, a functional block running on @lcore_id should invoke * hold_arp() with a callback function @cb and an optional @arg. When * a resolution is available, @cb will be invoked by the LLS block to * deliver a struct lls_map (and @arg) to the functional block. * * For every map requested through hold_arp(), functional blocks should * also indicate in an invocation of the callback that they do not wish * for it to be called again and/or call put_arp(). * * Blocks should not repeatedly call hold_arp() for an already-requested * map without first releasing the map by indicating the callback should * not be called again and/or by calling put_arp() to clear its request * from the LLS. */ int hold_arp(lls_req_cb cb, void *arg, struct in_addr *ip_be, unsigned int lcore_id); int put_arp(struct in_addr *ip_be, unsigned int lcore_id); /* * A Gratuitous ARP is an ARP request that serves as an announcement of * a neighbor's mapping. The sender and target IP address should be the same, * AND the target Ethernet address should be the same as the sender Ethernet * address OR zero. */ static inline int is_garp_pkt(const struct rte_arp_hdr *arp_hdr) { return (arp_hdr->arp_data.arp_sip == arp_hdr->arp_data.arp_tip) && (rte_is_zero_ether_addr(&arp_hdr->arp_data.arp_tha) || rte_is_same_ether_addr(&arp_hdr->arp_data.arp_tha, &arp_hdr->arp_data.arp_sha)); } #define rte_ipv6_icmpv6_cksum rte_ipv6_udptcp_cksum struct icmpv6_hdr { /* The type of this ICMPv6 packet. */ uint8_t type; /* An additional value to describe the message, dependent on @type. */ uint8_t code; /* Checksum over the entire ICMPv6 message. */ uint16_t cksum; } __attribute__((__packed__)); struct nd_neigh_msg { /* * For Neighbor Solicitations, @flags is reserved and should be 0. * * For Neighbor Advertisements, the most significant three bits * of @flags should be: router (msb), solicited, and override. * The other 29 bits of @flags are reserved and should be 0. */ uint32_t flags; /* IPv6 address of the target of the ND messages. */ uint8_t target[16]; /* Any ND options, if present. */ uint8_t opts[0]; } __attribute__((__packed__)); /* ND options as defined by RFC 4861. */ enum { /* Link-layer address of sender, optional in Solicitations. */ ND_OPT_SOURCE_LL_ADDR = 1, /* Link-layer address of the target, optional in Advertisements. */ ND_OPT_TARGET_LL_ADDR = 2, /* Other options exist but are not supported here. */ ND_OPT_MAX, }; struct nd_opts { /* Pointers to each option present in an ICMPv6 packet. */ struct nd_opt_hdr *opt_array[ND_OPT_MAX]; }; struct nd_opt_hdr { /* Type of the option. */ uint8_t type; /* Length of option (including @type and @len) in units of 64 bits. */ uint8_t len; } __attribute__((__packed__)); /* Used for both ND_OPT_SOURCE_LL_ADDR and ND_OPT_TARGET_LL_ADDR. */ struct nd_opt_lladdr { /* Type of the option. */ uint8_t type; /* Length of option (including @type and @len) in units of 64 bits. */ uint8_t len; /* Hardware address corresponding to @type. */ struct rte_ether_addr ha; } __attribute__((__packed__)); #define ND_NEIGH_HDR_MIN_LEN (sizeof(struct nd_neigh_msg)) #define ND_NEIGH_PKT_MIN_LEN(l2_len) (l2_len + \ sizeof(struct rte_ipv6_hdr) + sizeof(struct icmpv6_hdr) + \ ND_NEIGH_HDR_MIN_LEN) /* Minimum size of a Neighbor Discovery packet with a link-layer option. */ #define ND_NEIGH_PKT_LLADDR_MIN_LEN(l2_len) (ND_NEIGH_PKT_MIN_LEN(l2_len) + \ sizeof(struct nd_opt_lladdr)) /* * Minimum size of an IPv4 ICMP packet. * * Note that the minimum ICMP header size is 8 bytes (RFC 792), * but DPDK's struct rte_icmp_hdr includes other fields. */ #define ICMP_PKT_MIN_LEN(l2_len) (l2_len + sizeof(struct rte_ipv4_hdr) + 8) /* * Minimum size of an ICMPv6 packet. * * Note that the minimum ICMPv6 header size is the four bytes * defined in struct icmpv6_hdr (RFC 4443). */ #define ICMPV6_PKT_MIN_LEN(l2_len) (l2_len + sizeof(struct rte_ipv6_hdr) + \ sizeof(struct icmpv6_hdr)) /* Flags for Neighbor Advertisements. */ #define LLS_ND_NA_SOLICITED 0x40000000 #define LLS_ND_NA_OVERRIDE 0x20000000 /* The IPv6 all nodes multicast address. */ static const struct in6_addr ip6_allnodes_mc_addr = { .s6_addr = { 0xFF, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } }; /* * ICMP message types. */ /* ICMP Destination Unreachable, Fragmentation required, and DF flag set. */ #define ICMP_DEST_UNREACHABLE_TYPE (3) #define ICMP_FRAG_REQ_DF_CODE (4) /* * ICMPv6 message types. */ /* ICMPv6 Packet Too Big. */ #define ICMPV6_PACKET_TOO_BIG_TYPE (2) #define ICMPV6_PACKET_TOO_BIG_CODE (0) /* ICMPv6 Neighbor Discovery Neighbor Solicitation. */ #define ND_NEIGHBOR_SOLICITATION_TYPE (135) #define ND_NEIGHBOR_SOLICITATION_CODE (0) /* ICMPv6 Neighbor Discovery Neighbor Advertisement. */ #define ND_NEIGHBOR_ADVERTISEMENT_TYPE (136) #define ND_NEIGHBOR_ADVERTISEMENT_CODE (0) static inline int pkt_is_nd_neighbor(uint8_t type, uint8_t code) { return (type == ND_NEIGHBOR_SOLICITATION_TYPE && code == ND_NEIGHBOR_SOLICITATION_CODE) || (type == ND_NEIGHBOR_ADVERTISEMENT_TYPE && code == ND_NEIGHBOR_ADVERTISEMENT_CODE); } static inline int arp_enabled(struct lls_config *lls_conf) { return lls_conf->arp_cache.iface_enabled(lls_conf->net, &lls_conf->net->front) || lls_conf->arp_cache.iface_enabled(lls_conf->net, &lls_conf->net->back); } static inline int nd_enabled(struct lls_config *lls_conf) { return lls_conf->nd_cache.iface_enabled(lls_conf->net, &lls_conf->net->front) || lls_conf->nd_cache.iface_enabled(lls_conf->net, &lls_conf->net->back); } /* * Interface for functional blocks to resolve IPv6 --> Ethernet addresses. * * Functionality is the same as for hold_arp() and put_arp(); see * comments above. */ int hold_nd(lls_req_cb cb, void *arg, struct in6_addr *ip_be, unsigned int lcore_id); int put_nd(struct in6_addr *ip_be, unsigned int lcore_id); static inline int ipv6_addrs_equal(const uint8_t *addr1, const uint8_t *addr2) { const uint64_t *paddr1 = (const uint64_t *)addr1; const uint64_t *paddr2 = (const uint64_t *)addr2; return (paddr1[0] == paddr2[0]) && (paddr1[1] == paddr2[1]); } /* Submit ARP packets to the LLS block (hardware filtering is not available). */ void submit_arp(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface); struct lls_config *get_lls_conf(void); int run_lls(struct net_config *net_conf, struct lls_config *lls_conf); int l_list_lls_arp(lua_State *L); int l_list_lls_nd(lua_State *L); unsigned short icmp_cksum(void *buf, unsigned int size); #endif /* _GATEKEEPER_LLS_H_ */ ```
/content/code_sandbox/include/gatekeeper_lls.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,809
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_ACL_H_ #define _GATEKEEPER_ACL_H_ #include "gatekeeper_config.h" #include "gatekeeper_l2.h" #include "gatekeeper_net.h" struct acl_search { /* The number of packets held for classification. */ unsigned int num; /* Fixed field here. */ const uint8_t **data; /* List of references to each packet's mbuf. */ struct rte_mbuf **mbufs; }; /* Declare and initialize a struct acl_search. */ #define DEFINE_ACL_SEARCH(name, num_pkts) \ const uint8_t *name##_data_array[(num_pkts)]; \ struct rte_mbuf *name##_mbufs_array[(num_pkts)]; \ struct acl_search name = { \ .num = 0, \ .data = name##_data_array, \ .mbufs = name##_mbufs_array, \ } /* Classify batches of packets in @acl and invoke callback functions. */ int process_acl(struct gatekeeper_if *iface, unsigned int lcore_id, struct acl_search *acl, struct acl_state *astate, int acl_enabled, const char *proto_name); /* Free ACLs. */ void destroy_acls(struct acl_state *astate); /* This function expects that the mbuf includes the Ethernet header. */ static inline void add_pkt_acl(struct acl_search *acl, struct rte_mbuf *pkt) { /* pkt_in_skip_l2() was already called by GK or GT. */ acl->data[acl->num] = rte_pktmbuf_mtod_offset(pkt, uint8_t *, pkt_in_l2_hdr_len(pkt)); acl->mbufs[acl->num] = pkt; acl->num++; } static inline bool ipv4_acl_enabled(const struct gatekeeper_if *iface) { return iface->ipv4_acls.enabled; } static inline bool ipv6_acl_enabled(const struct gatekeeper_if *iface) { return iface->ipv6_acls.enabled; } static inline int process_pkts_acl(struct gatekeeper_if *iface, unsigned int lcore, struct acl_search *acl, uint16_t proto) { if (acl->num == 0) return 0; switch (proto) { case RTE_ETHER_TYPE_IPV4: return process_acl(iface, lcore, acl, &iface->ipv4_acls, ipv4_acl_enabled(iface), "IPv4"); case RTE_ETHER_TYPE_IPV6: return process_acl(iface, lcore, acl, &iface->ipv6_acls, ipv6_acl_enabled(iface), "IPv6"); default: rte_panic("%s: called on unknown protocol %hu\n", __func__, proto); } } /* * IPv4 ACLs. */ /* Fields that can be checked in an IPv4 ACL rule. */ enum { PROTO_FIELD_IPV4, DST_FIELD_IPV4, SRCP_FIELD_IPV4, DSTP_FIELD_IPV4, NUM_FIELDS_IPV4, }; extern struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4]; RTE_ACL_RULE_DEF(ipv4_acl_rule, RTE_DIM(ipv4_defs)); /* Allocate IPv4 ACLs. */ int init_ipv4_acls(struct gatekeeper_if *iface); /* Register an IPv4 ACL rule and callback functions. */ int register_ipv4_acl(struct ipv4_acl_rule *rule, acl_cb_func cb_f, ext_cb_func ext_cb_f, struct gatekeeper_if *iface); /* Build the ACL trie. This should be invoked after all ACL rules are added. */ int build_ipv4_acls(struct gatekeeper_if *iface); /* * IPv6 ACLs. */ /* Fields that can be checked in an IPv6 ACL rule. */ enum { PROTO_FIELD_IPV6, DST1_FIELD_IPV6, DST2_FIELD_IPV6, DST3_FIELD_IPV6, DST4_FIELD_IPV6, SRCP_FIELD_IPV6, DSTP_FIELD_IPV6, TYPE_FIELD_ICMPV6, NUM_FIELDS_IPV6, }; extern struct rte_acl_field_def ipv6_defs[NUM_FIELDS_IPV6]; RTE_ACL_RULE_DEF(ipv6_acl_rule, RTE_DIM(ipv6_defs)); /* Allocate IPv6 ACLs. */ int init_ipv6_acls(struct gatekeeper_if *iface); /* Register an IPv6 ACL rule and callback functions. */ int register_ipv6_acl(struct ipv6_acl_rule *rule, acl_cb_func cb_f, ext_cb_func ext_cb_f, struct gatekeeper_if *iface); /* Build the ACL trie. This should be invoked after all ACL rules are added. */ int build_ipv6_acls(struct gatekeeper_if *iface); #endif /* _GATEKEEPER_ACL_H_ */ ```
/content/code_sandbox/include/gatekeeper_acl.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,104
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GK_FIB_H_ #define _GATEKEEPER_GK_FIB_H_ #include <rte_atomic.h> #include "gatekeeper_rib.h" struct fib_tbl8 { rte_atomic32_t nh[0x100]; }; /* No-next-hop constant. */ #define FIB_NO_NH (0x7FFFFFFF) #define FIB_TBL8_FREE_INDEX ((uint32_t)-1) struct fib_head { /* RIB associated with the FIB. */ struct rib_head rib; /* Length of address in bytes. */ uint8_t addr_len_bytes; /* Total number of allocated 8-bit tables. */ uint32_t num_tbl8s; /* * Index of the first free 8-bit table in @tbl8_pool. * * If the pool is empty, * tbl8_pool[first_free_tbl8_idx] == FIB_TBL8_FREE_INDEX. */ uint32_t first_free_tbl8_idx; /* * First free index in @tbl8_pool. * * If the pool is full, * tbl8_pool[first_free_idx] != FIB_TBL8_FREE_INDEX. */ uint32_t first_free_idx; /* Allocated 8-bit tables. */ struct fib_tbl8 *tbl8s; /* * Pool of free 8-bit tables. * * The pool is implemented from scratch as a circular list over * an array to *practically* guarantee that freed 8-bit tables are * always consistent, so there is no need to synchronize readers. */ uint32_t *tbl8_pool; /* Table for the 24 most significative bits. */ rte_atomic32_t tbl24[0x1000000]; }; /* * Create a new FIB. * * @name is the prefix of the names of the internal memory pools. * * @socket_id is the NUMA node on which internal memory is allocated. * The value can be SOCKET_ID_ANY if there is no NUMA constraint. * * @max_length is the maximum length of a network address. * @max_length must be a multiple of 8, greater than or equal to 32, and * less than or equal to RIB_MAX_ADDRESS_LENGTH. * Typical values: 32 for IPv4 and 128 for IPv6. * * @max_rules is the maximum number of rules (i.e. a prefix and a next hop) * that this FIB is expected to have. If the FIB has space for more rules, * it will take extra rules. Inspite of the name, this parameter is meant * to mean the minimum number of rules that the FIB will support. * * @num_tbl8s is the number of TBL8s to be allocated. * @num_tbl8s must be less than FIB_TBL8_FREE_INDEX. * TBL8s are used for network prefixes that are longer than 24 bits. * */ int fib_create(struct fib_head *fib, const char *name, int socket_id, uint8_t max_length, uint32_t max_rules, uint32_t num_tbl8s); /* Free all resources associated to @fib but the memory pointed by it. */ void fib_free(struct fib_head *fib); /* * Return RIB associated to @fib. * * NOTE: Callers should only make read-only accesses to the returned RIB. */ static inline struct rib_head *fib_get_rib(struct fib_head *fib) { return &fib->rib; } /* DO NOT CALL THIS FUNCTION, CALL fib_add() INSTEAD. */ int __fib_add(struct fib_head *fib, const uint8_t *address, uint8_t depth, uint32_t next_hop, bool failsafe); /* * Add a rule to the FIB. * * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * * NOTES * The most significant bit of @next_hop is not available. * * The value FIB_NO_NH is reserved to designate that there is * no next hop. * * RETURN * -EINVAL if @next_hop >= FIB_NO_NH. * -EEXIST if prefix already exist in @fib. * 0 if it successfully adds the new rule. */ static inline int fib_add(struct fib_head *fib, const uint8_t *address, uint8_t depth, uint32_t next_hop) { return __fib_add(fib, address, depth, next_hop, true); } /* DO NOT CALL THIS FUNCTION, CALL fib_add() INSTEAD. */ int __fib_delete(struct fib_head *fib, const uint8_t *address, uint8_t depth, bool failsafe); /* * Delete a rule from the FIB. * * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * * RETURN * -ENOENT if the prefix does not exist in @fib. * 0 if it successfully deletes the rule. */ static inline int fib_delete(struct fib_head *fib, const uint8_t *address, uint8_t depth) { return __fib_delete(fib, address, depth, true); } /* * Look an address up on the FIB. * * @address is in network order (big endian). * @address == NULL is equivalent to the all-zero address. * * The next hop of the longest rule for @address is saved in @pnext_hop. * * RETURN * 0 on lookup hit. * -ENOENT on lookup miss. * A negative value on failure. */ int fib_lookup(const struct fib_head *fib, const uint8_t *address, uint32_t *pnext_hop); /* * Look multiple addresses up on the FIB. * * Each @addresses[i] is in network order (big endian). * @addresses[i] == NULL is equivalent to the all-zero address. * * If the lookup of @addresses[i] fails, next_hops[i] = FIB_NO_NH. * * This function is an optimized version of the following code: * unsigned int i; * for (i = 0; i < n; i++) * if (fib_lookup(fib, addresses[i], &next_hops[i]) != 0) * next_hops[i] = FIB_NO_NH; */ void fib_lookup_bulk(const struct fib_head *fib, const uint8_t **addresses, uint32_t *next_hops, unsigned int n); #endif /* _GATEKEEPER_GK_FIB_H_ */ ```
/content/code_sandbox/include/gatekeeper_fib.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,493
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_L2_H_ #define _GATEKEEPER_L2_H_ #include <rte_ether.h> #include <rte_mbuf.h> #include <rte_mbuf_ptype.h> #include "gatekeeper_main.h" #include "gatekeeper_net.h" #define ETHERNET_II_ETHERTYPES (0x0600) static inline void log_unknown_l2(const char *name, uint16_t ether_type) { /* * If this field is >= 0x0600, it is an EtherType * field from the Ethernet II standard. * * If this field is <= 0x05DC, it is a length * field from the 802.3 standard. Any other * value is invalid. We only log this when in * debug mode. */ if (ether_type < ETHERNET_II_ETHERTYPES) { G_LOG(DEBUG, "l2: %s: invalid Ethernet field or frame not Ethernet II:%" PRIu16 "\n", name, ether_type); } else { G_LOG(NOTICE, "l2: %s: unknown EtherType %" PRIu16 "\n", name, ether_type); } } /* * Return the L2 header length of a received packet. * * WARNING * Note that in order to use this function, @pkt must have first gone * through pkt_in_skip_l2() or another function to set its packet type. */ static inline size_t pkt_in_l2_hdr_len(struct rte_mbuf *pkt) { return pkt->l2_type != RTE_PTYPE_L2_ETHER_VLAN ? sizeof(struct rte_ether_hdr) : sizeof(struct rte_ether_hdr) + sizeof(struct rte_vlan_hdr); } /* * Skip the L2 header of the packet, skipping over any VLAN * headers if present. A pointer to the next header is returned. */ static inline void * pkt_out_skip_l2(struct gatekeeper_if *iface, struct rte_ether_hdr *eth_hdr) { return ((uint8_t *)eth_hdr) + iface->l2_len_out; } /* * Skip the L2 header of the packet, skipping over any VLAN * headers if present. The EtherType of the next header is returned * (in network order). */ static inline uint16_t pkt_in_skip_l2(struct rte_mbuf *pkt, struct rte_ether_hdr *eth_hdr, void **next_hdr) { RTE_VERIFY(next_hdr != NULL); if (likely(eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) { *next_hdr = &eth_hdr[1]; pkt->l2_type = RTE_PTYPE_UNKNOWN; return eth_hdr->ether_type; } else { struct rte_vlan_hdr *vlan_hdr = (struct rte_vlan_hdr *)&eth_hdr[1]; *next_hdr = &vlan_hdr[1]; pkt->l2_type = RTE_PTYPE_L2_ETHER_VLAN; return vlan_hdr->eth_proto; } } /* * Given an Ethernet header and room to put a VLAN header, * set the EtherType field and the VLAN header fields * using the given VLAN tag. */ static inline void fill_vlan_hdr(struct rte_ether_hdr *eth_hdr, uint16_t vlan_tag_be, uint16_t eth_proto) { struct rte_vlan_hdr *vlan_hdr = (struct rte_vlan_hdr *)&eth_hdr[1]; eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); vlan_hdr->vlan_tci = vlan_tag_be; vlan_hdr->eth_proto = rte_cpu_to_be_16(eth_proto); } struct rte_ether_hdr *adjust_pkt_len(struct rte_mbuf *pkt, struct gatekeeper_if *iface, int bytes_to_add); int verify_l2_hdr(struct gatekeeper_if *iface, struct rte_ether_hdr *eth_hdr, uint32_t l2_type, const char *proto_name, uint16_t vlan_tag_be); #endif /* _GATEKEEPER_L2_H_ */ ```
/content/code_sandbox/include/gatekeeper_l2.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
974
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_NET_H_ #define _GATEKEEPER_NET_H_ #include <netinet/in.h> #include <stdbool.h> #include <stdint.h> #include <sys/capability.h> #include <rte_acl.h> #include <rte_eth_bond.h> #include <rte_ethdev.h> #include <rte_timer.h> #include <rte_ether.h> #include "gatekeeper_flow.h" /* To mark whether Gatekeeper/Grantor server configures IPv4 or IPv6. */ #define CONFIGURED_IPV4 (1) #define CONFIGURED_IPV6 (2) #define IPv6_DEFAULT_VTC_FLOW (0x60000000) #define MAX_INET_ADDRSTRLEN (INET6_ADDRSTRLEN) struct ipaddr { /* The network layer protocol of the nexthop. */ uint16_t proto; /* The IP address of the nexthop. */ union { struct in_addr v4; struct in6_addr v6; } ip; }; /* * The minimum and maximum sizes of the (secret) RSS hash key. * * These constants MUST be multiples of 4 since functions such * rte_convert_rss_key() expects so. */ #define GATEKEEPER_RSS_MIN_KEY_LEN (40) #define GATEKEEPER_RSS_MAX_KEY_LEN (128) /* * The maximum number of "rte_eth_rss_reta_entry64" structures can be used to * configure the Redirection Table of the Receive Side Scaling (RSS) feature. * Notice, each "rte_eth_rss_reta_entry64" structure can configure 64 entries * of the table. To configure more than 64 entries supported by hardware, * an array of this structure is needed. */ #define GATEKEEPER_RETA_MAX_SIZE \ (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE) struct gatekeeper_rss_config { uint16_t reta_size; struct rte_eth_rss_reta_entry64 reta_conf[GATEKEEPER_RETA_MAX_SIZE]; }; /* Maximum number of ACL classification types. */ #define GATEKEEPER_ACL_MAX (16) /* * Some blocks can receive packets via different * methods, such as via the NIC or mailboxes. These * methods can change depending on hardware support. */ enum { /* Receive packets from a NIC. */ RX_METHOD_NIC = 0x1, /* * Receive packets from a mailbox. * * Note: All packets that are matched through * an ACL are delivered through mailboxes to * the block that processes them. */ RX_METHOD_MB = 0x2 }; /* * Format of function called when a rule matches in the IPv6 ACL. * Need forward declaration because acl_cb_func and struct gatekeeper_if * are circularly defined. */ struct gatekeeper_if *iface; typedef int (*acl_cb_func)(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface); /* Format of function called when no rule matches in the IPv6 ACL. */ typedef int (*ext_cb_func)(struct rte_mbuf *pkt, struct gatekeeper_if *iface); struct acl_state { /* Per-socket ACLs used for classifying packets. */ struct rte_acl_ctx *acls[RTE_MAX_NUMA_NODES]; /* * Callback functions for each ACL rule type. * * On error, these functions should return a negative value * and free all packets that have not already been handled. */ acl_cb_func funcs[GATEKEEPER_ACL_MAX]; /* * Callback functions for each ACL rule type with * extension headers. * * Returning values: 0 means a match and a negative value * means an error or that there was no match. */ ext_cb_func ext_funcs[GATEKEEPER_ACL_MAX]; /* Number of ACL types installed in @funcs. */ unsigned int func_count; /* * Whether this ACL is enabled. * * Set to true by ipv{4,6}_pkt_filter_add() when an ACL is * determined to be needed. */ bool enabled; }; /* * A Gatekeeper interface is specified by a set of PCI addresses * that map to DPDK port numbers. If multiple ports are specified, * then the ports are bonded. */ struct gatekeeper_if { /* The ports (in PCI address format) that compose this interface. */ char **pci_addrs; /* The number of ports that in this interface (length of @pci_addrs). */ uint8_t num_ports; /* Name of the interface. Needed for setting/getting bonded port. */ char *name; /* Number of RX and TX queues for this interface. */ uint16_t num_rx_queues; uint16_t num_tx_queues; /* * The total burst size of any functional block that * does not have hardware support for this interface. */ uint16_t total_pkt_burst; /* Timeouts for cache entries (in seconds) for Link Layer Support. */ uint32_t arp_cache_timeout_sec; uint32_t nd_cache_timeout_sec; /* The type of bonding used for this interface, if needed. */ uint32_t bonding_mode; /* * Whether @ipv4_vlan_tag/@ipv6_vlan_tag should be applied to egress * traffic. */ int vlan_insert; /* * Maximum permitted length of packets sent from and received on * this interface. It is used to configure both the MTU of the * device and the maximum RX packet length offload feature. * * Notes: * * The value here must conform to DPDK's limits (typically * 64-16128 bytes) and also to whatever limits are imposed by * the specific NIC being used. * * Before adjusting this value, you should take into account * the hardware capabilities and the configured mbuf segment size * in Gatekeeper. By default, the mbuf segment size and MTU are * both set to 2048. * * Gatekeeper servers do not fragment packets on the back interface. * If the back network does not support frame sizes sent by Gatekeeper, * the packet will be dropped. For example, if Gatekeeper receives * a frame close to 1500 bytes and encapsulates it (resulting in a * frame above 1500 bytes), then the back interface may be able to * transmit it but the network may drop it. */ uint16_t mtu; /* The maximum packet lifetime. */ uint8_t ipv6_default_hop_limits; /* * According to the 82599 datasheet, a receive descriptor is * a data structure that contains the receive data buffer address * and fields for hardware to store packet information. Upon receipt * of a packet for this device, hardware stores the packet data into * the indicated buffer and writes the length, status and errors to * the receive descriptor. * * Each packet buffer is indicated by a descriptor. */ uint16_t num_rx_desc; /* * Similar to the receive descriptor, each packet buffer is * indicated by a descriptor as well. */ uint16_t num_tx_desc; /* * Whether IPv4 UDP checksums should be enabled in hardware. * Technically, this only affects the front interface, since * only the GT block (on Grantor servers) uses UDP checksums. */ bool ipv4_hw_udp_cksum; /* * Whether IPv6 UDP checksums should be enabled in hardware. * Technically, this only affects the front interface, since * only the GT block (on Grantor servers) uses UDP checksums. */ bool ipv6_hw_udp_cksum; /* Whether IPv4 (L3) checksums should be enabled in hardware. */ bool ipv4_hw_cksum; /* * This field decides if the flag GRND_RANDOM is passed to getradom(2) * while initializing field @rss_key. */ bool guarantee_random_entropy; /* * Some NICs do not support the RSS hash functions * ETH_RSS_IPV4 amd ETH_RSS_IPV6 (i.e. RSS hash for IPv4 or IPv6 * non-fragmented packets). But they may support the hash functions * ETH_RSS_NONFRAG_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_UDP, * ETH_RSS_NONFRAG_IPV6_TCP, and ETH_RSS_NONFRAG_IPV6_UDP, and * setting the input set of the hash these hash functions. * An example of this behavior is the PMD i40e. * * Enabling the parameter below, Gatekeeper will try the alternative * RSS hash. * * Currently, this parameter only works for PMD i40e. * * If the interface is bonded, all ports in the bond must either * need this parameter disabled or enabled. */ bool alternative_rss_hash; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* Link layer header length for egress packets from this interface. */ size_t l2_len_out; /* VLAN tag to be applied to all outbound IPv4 packets, in network order. */ uint16_t ipv4_vlan_tag_be; /* VLAN tag to be applied to all outbound IPv6 packets, in network order. */ uint16_t ipv6_vlan_tag_be; /* Ethernet address of this interface. */ struct rte_ether_addr eth_addr; /* DPDK port IDs corresponding to each address in @pci_addrs. */ uint16_t *ports; /* * The DPDK port ID for this interface. * * If @ports only has one element, then @id is that port. * If @ports has multiple elements, then @id is the DPDK * *bonded* port ID representing all of those ports. */ uint8_t id; /* The RX and TX queue assignments on this interface for each lcore. */ int16_t rx_queues[RTE_MAX_LCORE]; int16_t tx_queues[RTE_MAX_LCORE]; /* * The next RX and TX queues to be assigned on this interface. * We need atomic here in case multiple blocks are trying to * configure their queues on the same interface at the same time. */ rte_atomic16_t rx_queue_id; rte_atomic16_t tx_queue_id; /* * Specify the IPv4 and IPv6 addresses of this interface. * Notice that, while one address must always be there, * there may not be the second address. */ uint8_t configured_proto; /* IPv4 address and network mask. */ struct in_addr ip4_addr; struct in_addr ip4_mask; uint8_t ip4_addr_plen; /* * Global IPv6 address and network mask. * * This is the address/mask given by configuration * and used for global routing. */ struct in6_addr ip6_addr; struct in6_addr ip6_mask; uint8_t ip6_addr_plen; /* * Addresses related to Neighbor Discovery. */ /* * Link-local IPv6 address and network mask. * * ND messages can be sent from, and to, link-local IPv6 * addresses that are only routable inside the local * network. We are also responsible for responding to * resolution requests for the link-local address. It is * automatically generated. */ struct in6_addr ll_ip6_addr; struct in6_addr ll_ip6_mask; /* * IPv6 solicited-node multicast addresses. * * If a resolution is unknown, an ND Solicitation is sent * to a solicited-node multicast address to reduce the * number of hosts in the broadcast domain that receive * the Solicitation. Two of these multicast addresses are * automatically generated: one that covers the global IPv6 * address and one that covers the IPv6 link-local address. */ struct in6_addr ip6_mc_addr; struct in6_addr ll_ip6_mc_addr; /* * IPv6 multicast Ethernet addresses. * * For packets that use a solicited-node multicast address * for the IPv6 destination field, the Ethernet destination * field should also use a special IPv6 multicast address. * Two such addresses are automatically generated: they cover * the global and link-local solicited-node multicast addresses. */ struct rte_ether_addr eth_mc_addr; struct rte_ether_addr ll_eth_mc_addr; /* Timer to transmit from LLS block to fulfill LACP TX requirement. */ struct rte_timer lacp_timer; /* ACLs and associated callback functions for matching packets. */ struct acl_state ipv4_acls; struct acl_state ipv6_acls; /* Whether this interface supports RSS. */ bool rss; /* Whether the interface has been initialized. */ bool alive; /* Length of the RSS key in bytes. */ uint8_t rss_key_len; /* * RSS hash key. * * The secret key of the RSS hash (RSK) must be random in order * to prevent hackers from knowing it. */ uint8_t rss_key[GATEKEEPER_RSS_MAX_KEY_LEN]; /* @rss_key ready for use with rte_softrss_be(). */ uint8_t rss_key_be[GATEKEEPER_RSS_MAX_KEY_LEN]; }; uint32_t rss_flow_hash(const struct gatekeeper_if *iface, const struct ip_flow *flow); /* * The atomic counters for @rx_queue_id and @tx_queue_id are * signed, so we get about 2^15 possible queues available for use, * which is much more than is needed. * * Use this constant as an out-of-band value to represent that * a queue has not been allocated; if one of the atomic counters * reaches this value, we have exceeded the number of possible * queues. */ #define GATEKEEPER_QUEUE_UNALLOCATED (INT16_MIN) enum queue_type { QUEUE_TYPE_RX, QUEUE_TYPE_TX, QUEUE_TYPE_MAX, }; int get_queue_id(struct gatekeeper_if *iface, enum queue_type ty, unsigned int lcore, struct rte_mempool *mp); /* Configuration for the Network. */ struct net_config { /* * Set to zero (false) when a back interface is * not needed, such as when running gatekeeper * for Grantor. */ int back_iface_enabled; /* * The NUMA nodes used in the host. Element i is true * if NUMA node i is being used; otherwise it is false. */ bool *numa_used; /* Log level for all non-block related activity. */ uint32_t log_level; /* How often the log file should be rotated. The unit is second. */ uint32_t rotate_log_interval_sec; /* * The fields below are for internal use. * Configuration files should not refer to them. */ struct gatekeeper_if front; struct gatekeeper_if back; /* The total number of NUMA nodes in the host. */ uint32_t numa_nodes; /* The ID of the user that will run Gatekeeper after it boots. */ uid_t pw_uid; /* The group ID of the user that will run Gatekeeper after it boots. */ gid_t pw_gid; }; /* * Initializes an array of 16 bytes that represents the IPv6 solicited * node multicast address. Users of this macro need to pass the IPv6 * address as an array of 16 bytes, the last three of which are used * as the last three bytes of the multicast address as well. */ #define IPV6_SN_MC_ADDR(ipv6) { \ 0xFF, 0x02, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x01, \ 0xFF, ipv6[13], ipv6[14], ipv6[15], \ } /* Call lacp_enabled() instead this function wherever possible. */ static inline int __lacp_enabled(const struct gatekeeper_if *iface) { return iface->bonding_mode == BONDING_MODE_8023AD; } static inline int lacp_enabled(struct net_config *net, struct gatekeeper_if *iface) { /* When @iface is the back, need to make sure it's enabled. */ if (iface == &net->back) return net->back_iface_enabled && __lacp_enabled(iface); /* @iface is the front interface. */ return __lacp_enabled(iface); } int lua_init_iface(struct gatekeeper_if *iface, const char *iface_name, const char **pci_addrs, uint8_t num_pci_addrs, const char **ip_cidrs, uint8_t num_ip_cidrs, uint16_t ipv4_vlan_tag, uint16_t ipv6_vlan_tag); int get_ip_type(const char *ip_addr); int convert_str_to_ip(const char *ip_addr, struct ipaddr *res); int convert_ip_to_str(const struct ipaddr *ip_addr, char *res, int n); int ethertype_flow_add(struct gatekeeper_if *iface, uint16_t ether_type, uint16_t queue_id); /* * Add a filter for IPv4 packets based on the destination IP address, * source and destination ports, and protocol. * * The destination IP address as well as all ports and masks * should all be big endian when passed to this function. Although * ntuple filters use big endian values, ACLs use host ordering, * so this function converts these values to host ordering when * the ACL is used. * * @dst_ip_be: destination IP address to match against * @src_port_be: L4 source port to match against * @src_port_mask_be: mask for @src_port_be; set to 0xFFFF to match * against @src_port_be, or set to 0 to not match against @src_port_be * @dst_port_be: L4 destination port to match against * @dst_port_mask_be: mask for @dst_port_be; set to 0xFFFF to match * against @dst_port_be, or set to 0 to not match against @dst_port_be * @proto: next header protocol to match against * * Filters can be installed using whatever methods are available, * including ntuple filters (if supported by hardware) or ACLs * (as a software backup). Depending on the method used, the block * that uses the filter may need to query the NIC or a mailbox, etc. * Therefore, this function inserts the RX method needed into * @rx_method by logical OR'ing it into the existing value. * * @queue_id: the RX queue ID to which matching packets are * steered (if ntuple filters are supported) * @cb_f: the function that is invoked on matching packets * (if the ACL is being used) * @ext_cb_f: the function that is invoked on potential * matching packets that checks for variable-length/extension * headers (if the ACL is being used) */ int ipv4_pkt_filter_add(struct gatekeeper_if *iface, rte_be32_t dst_ip_be, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto, uint16_t queue_id, acl_cb_func cb_f, ext_cb_func ext_cb_f, uint8_t *rx_method); /* * Add a filter for IPv6 packets based on the destination IP address, * source and destination ports, and protocol. * * The destination IP address as well as all ports and masks * should all be big endian when passed to this function. Although * ntuple filters use big endian values, ACLs use host ordering, * so this function converts these values to host ordering when * the ACL is used. * * @dst_ip_be_ptr32: pointer to destination IP address to match against * @src_port_be: L4 source port to match against * @src_port_mask_be: mask for @src_port_be; set to 0xFFFF to match * against @src_port_be, or set to 0 to not match against @src_port_be * @dst_port_be: L4 destination port to match against * @dst_port_mask_be: mask for @dst_port_be; set to 0xFFFF to match * against @dst_port_be, or set to 0 to not match against @dst_port_be * @proto: next header protocol to match against * * Filters can be installed using whatever methods are available, * including ntuple filters (if supported by hardware) or ACLs * (as a software backup). Depending on the method used, the block * that uses the filter may need to query the NIC or a mailbox, etc. * Therefore, this function inserts the RX method needed into * @rx_method by logical OR'ing it into the existing value. * * @queue_id: the RX queue ID to which matching packets are * steered (if ntuple filters are supported) * @cb_f: the function that is invoked on matching packets * (if the ACL is being used) * @ext_cb_f: the function that is invoked on potential * matching packets that checks for variable-length/extension * headers (if the ACL is being used) */ int ipv6_pkt_filter_add(struct gatekeeper_if *iface, const rte_be32_t *dst_ip_be_ptr32, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto, uint16_t queue_id, acl_cb_func cb_f, ext_cb_func ext_cb_f, uint8_t *rx_method); /* * Drop all Linux capabilities (capabilities(7)) * except for those needed by a block. * * @caps contains the @ncap number of entries that * the block wants to keep. */ int needed_caps(int ncap, const cap_value_t *caps); struct net_config *get_net_conf(void); struct gatekeeper_if *get_if_front(struct net_config *net_conf); struct gatekeeper_if *get_if_back(struct net_config *net_conf); int gatekeeper_setup_rss(uint16_t port_id, uint16_t *queues, uint16_t num_queues); int gatekeeper_get_rss_config(uint16_t port_id, struct gatekeeper_rss_config *rss_conf); int gatekeeper_setup_user(struct net_config *net_conf, const char *user); int gatekeeper_init_network(struct net_config *net_conf); void gatekeeper_free_network(void); bool ipv4_configured(struct net_config *net_conf); bool ipv6_configured(struct net_config *net_conf); unsigned int calculate_mempool_config_para(const char *block_name, struct net_config *net_conf, unsigned int total_pkt_burst); struct rte_mempool *create_pktmbuf_pool(const char *block_name, unsigned int lcore, unsigned int num_mbuf); /* * No cleanup for this step, since DPDK * doesn't offer a way to deallocate pools. */ static inline void destroy_mempool(__attribute__((unused)) struct rte_mempool *mp) { return; } static inline bool ipv4_if_configured(const struct gatekeeper_if *iface) { return !!(iface->configured_proto & CONFIGURED_IPV4); } static inline bool ipv6_if_configured(const struct gatekeeper_if *iface) { return !!(iface->configured_proto & CONFIGURED_IPV6); } /* * Compute the IPv4 checksum, either in hardware or software, depending * on the capabilities of the NIC and the configuration. * * The pkt->l2_len and pkt->l3_len must be set before calling this function, * although this is strictly only needed for hardware checksums. */ static inline void set_ipv4_checksum(struct gatekeeper_if *iface, struct rte_mbuf *pkt, struct rte_ipv4_hdr *ipv4) { /* * The IP header checksum field must be set to 0 before * computing the checksum (in hardware or software). */ ipv4->hdr_checksum = 0; pkt->ol_flags |= RTE_MBUF_F_TX_IPV4; if (likely(iface->ipv4_hw_cksum)) pkt->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM; else ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); } static inline int max_prefix_len(int ip_type) { RTE_VERIFY(ip_type == AF_INET || ip_type == AF_INET6); return ip_type == AF_INET ? sizeof(struct in_addr) * 8 : sizeof(struct in6_addr) * 8; } static inline void ip4_prefix_mask(uint8_t prefix_len, struct in_addr *ip4_mask) { RTE_VERIFY(prefix_len <= 32 && ip4_mask != NULL); /* * Need to be careful in case @prefix_len == 0, * since in that case we will be shifting by 32 * bits, which is undefined for a 32-bit quantity. * So shift using a 0ULL (at least 64 bits), and then * cast back down to a 32-bit unsigned integer * implicitly using rte_cpu_to_be_32(). */ ip4_mask->s_addr = rte_cpu_to_be_32(~0ULL << (32 - prefix_len)); } static inline void ip6_prefix_mask(uint8_t prefix_len, struct in6_addr *ip6_mask) { uint64_t *paddr; RTE_VERIFY(prefix_len <= 128 && ip6_mask != NULL); /* * No portable way to do the same trick as IPv4, * so make @prefix_len == 0 into its own case. * Then, the other two cases shift by at most 63 bits. */ paddr = (uint64_t *)ip6_mask->s6_addr; if (prefix_len == 0) { paddr[0] = 0ULL; paddr[1] = 0ULL; } else if (prefix_len <= 64) { paddr[0] = rte_cpu_to_be_64(~0ULL << (64 - prefix_len)); paddr[1] = 0ULL; } else { paddr[0] = ~0ULL; paddr[1] = rte_cpu_to_be_64(~0ULL << (128 - prefix_len)); } } static inline bool ip4_same_subnet(uint32_t addr1, uint32_t addr2, uint32_t ip4_mask) { return !((addr1 ^ addr2) & ip4_mask); } static inline bool ip6_same_subnet(const struct in6_addr *addr1, const struct in6_addr *addr2, const struct in6_addr *ip6_mask) { const uint64_t *paddr_p1 = (const uint64_t *)addr1; const uint64_t *paddr_p2 = (const uint64_t *)addr2; const uint64_t *pmask_p = (const uint64_t *)ip6_mask; return (!((paddr_p1[0] ^ paddr_p2[0]) & pmask_p[0]) && !((paddr_p1[1] ^ paddr_p2[1]) & pmask_p[1])); } static inline int drop_packet(struct rte_mbuf *pkt) { rte_pktmbuf_free(pkt); return 0; } void send_pkts(uint8_t port, uint16_t tx_queue, uint16_t num_pkts, struct rte_mbuf **bufs); /* * Postpone the execution of f(arg) until the Lua configuration finishes, * but before the network devices start. * * This initilization stage is perfect for allocation of queues in * the network devices. * * If you do not need to allocate any queue, you can may call * net_launch_at_stage1() instead. * * front_rx_queues, front_tx_queues, back_rx_queues, and back_tx_queues are * the number of queues on the front and back interfaces of the receiving and * transmitting types. * * If the back interface is not enabled, the parameters back_rx_queues and * back_tx_queues are ignored. * * RETURN * Return 0 if success; otherwise -1. */ int net_launch_at_stage1(struct net_config *net, int front_rx_queues, int front_tx_queues, int back_rx_queues, int back_tx_queues, lcore_function_t *f, void *arg); /* * Do any processing necessary to end stage 2 -- the last part of the * network configuration that happens before individual lcores are * launched. This is useful for any network configuration that requires * input from the individual blocks in stage 2. */ int finalize_stage2(void *arg); #endif /* _GATEKEEPER_NET_H_ */ ```
/content/code_sandbox/include/gatekeeper_net.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
6,403
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_SOL_H_ #define _GATEKEEPER_SOL_H_ #include <stdint.h> #include <rte_atomic.h> #include "list.h" /* * The maximum priority that a packet can be assigned. * * Packets are assigned priorities [0, 63] due to * the limits of the IP DSCP field. */ #define GK_MAX_REQ_PRIORITY (63) /* * XXX #36 The DPDK packet scheduler uses __rte_cache_aligned * on member @memory and on the struct as a whole. Should * it be used here? */ struct req_queue { /* Length of the priority queue. */ uint32_t len; /* The highest priority of any packet currently in the queue. */ uint16_t highest_priority; /* The lowest priority of any packet currently in the queue. */ uint16_t lowest_priority; /* * The head of the priority queue, referencing the node * that contains the packet with the highest priority. */ struct list_head head; /* Array of pointers to the last packets of each priority. */ struct rte_mbuf *priorities[GK_MAX_REQ_PRIORITY + 1]; /* * Token bucket algorithm state. */ /* Capacity of the token bucket (the max number of credits). */ uint64_t tb_max_credit_bytes; /* Number of credits currently in the token bucket. */ uint64_t tb_credit_bytes; /* * CPU cycles per byte for the request queue, * approximated as a rational a/b. */ uint64_t cycles_per_byte_a; uint64_t cycles_per_byte_b; /* * The floor function of CPU cycles per byte, which is useful * to quickly determine whether we have enough cycles to * add some number of credits before executing a division. */ uint64_t cycles_per_byte_floor; /* Current CPU time measured in CPU cyles. */ uint64_t time_cpu_cycles; }; /* Structures for each SOL instance. */ struct sol_instance { /* * Ring into which GK instances enqueue request packets * to be serviced and sent out by the Solicitor. */ struct rte_ring *ring; /* TX queue on the back interface. */ uint16_t tx_queue_back; /* Priority queue for request packets. */ struct req_queue req_queue; } __rte_cache_aligned; /* Configuration for the Solicitor functional block. */ struct sol_config { /* Maximum number of requests to store in priority queue at once. */ unsigned int pri_req_max_len; /* Maximum request enqueue/dequeue size. */ unsigned int enq_burst_size; unsigned int deq_burst_size; /* Token bucket rate approximation error. */ double tb_rate_approx_err; /* * Bandwidth of request channel in Mbps. * * Used only in the case when the Solicitor * block cannot read the back interface's * available bandwidth, such as is the case * with the Amazon ENA. Should be calculated * by the operator. * * Should be set to 0 if not needed. */ double req_channel_bw_mbps; /* Log level for SOL block. */ uint32_t log_level; /* Log ratelimit interval in ms for SOL block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for SOL block. */ uint32_t log_ratelimit_burst; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* * Number of references to this struct. * * The resources associated to this struct are only freed * when field @ref_cnt reaches zero. * * Use sol_conf_hold() and sol_conf_put() to acquire and release * a reference to this struct. */ rte_atomic32_t ref_cnt; /* The lcore ids at which each instance runs. */ unsigned int *lcores; /* The number of lcore ids in @lcores. */ int num_lcores; struct sol_instance *instances; struct net_config *net; }; struct sol_config *alloc_sol_conf(void); int run_sol(struct net_config *net_conf, struct sol_config *sol_conf); int gk_solicitor_enqueue_bulk(struct sol_instance *instance, struct rte_mbuf **pkts, uint16_t num_pkts); static inline void sol_conf_hold(struct sol_config *sol_conf) { rte_atomic32_inc(&sol_conf->ref_cnt); } int sol_conf_put(struct sol_config *sol_conf); static inline struct sol_mbuf_priv * mbuf_to_sol_priv(struct rte_mbuf *pkt) { return rte_mbuf_to_priv(pkt); } static inline void set_prio(struct rte_mbuf *pkt, uint8_t priority) { mbuf_to_sol_priv(pkt)->priority = priority; } #endif /* _GATEKEEPER_SOL_H_ */ ```
/content/code_sandbox/include/gatekeeper_sol.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,154
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef MEMBLOCK_H #define MEMBLOCK_H #include <rte_common.h> #include <rte_branch_prediction.h> #include <rte_malloc.h> #include <stddef.h> struct memblock_head { char *next; char const *end; }; static inline size_t memblock_align(size_t size) { const size_t alignment = RTE_MAX(sizeof(double), sizeof(void *)); const size_t mask = alignment - 1; RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(alignment)); return unlikely(size & mask) ? (size & ~mask) + alignment : size; } #define MEMBLOCK_DEF_INIT(name, size) \ struct { \ struct memblock_head head; \ char block[size]; \ } name = { \ .head.next = name.block, \ .head.end = name.block + memblock_align(size) \ } #define memblock_from_stack(memblock) (&(memblock).head) #define memblock_salloc(memblock, size) \ memblock_alloc(memblock_from_stack(memblock), size) #define memblock_scalloc(memblock, num, size) \ memblock_calloc(memblock_from_stack(memblock), num, size) #define memblock_sfree_all(memblock) \ do { \ typeof(memblock) *b = &(memblock); \ b->head.next = b->block; \ } while (0) struct memblock_head *memblock_alloc_block(size_t size); static inline void memblock_free_block(struct memblock_head *head) { rte_free(head); } void *memblock_alloc(struct memblock_head *head, size_t size); void *memblock_calloc(struct memblock_head *head, size_t num, size_t size); void memblock_free_all(struct memblock_head *head); #endif /* MEMBLOCK_H */ ```
/content/code_sandbox/include/memblock.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
505
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_QID_H_ #define _GATEKEEPER_QID_H_ struct qid { /* The LIFO stack of IDs (indexes) available for use. */ uint32_t *ids; /* The length of @ids. */ uint32_t len; /* * The index of the top of the stack. * If the stack is empty, @top is @len. */ uint32_t top; }; int qid_init(struct qid *qid, uint32_t len, const char *name, int socket); void qid_free(struct qid *qid); int qid_push(struct qid *qid, uint32_t id); int qid_pop(struct qid *qid, uint32_t *p_id); #endif /* _GATEKEEPER_QID_H_ */ ```
/content/code_sandbox/include/gatekeeper_qid.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
269
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_IPIP_H_ #define _GATEKEEPER_IPIP_H_ #include <rte_ether.h> #include "gatekeeper_main.h" #include "gatekeeper_flow.h" #include "gatekeeper_net.h" #define IP_VERSION (0x40) /* Default IP header length == five 32-bits words. */ #define IP_HDRLEN (0x05) /* From RFC 1340. */ #define IP_DEFTTL (64) #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) #define IP_DN_FRAGMENT_FLAG (0x0040) /* * Encapsulates the packet to send to Grantor with the given * priority. Adjusts the size of the Ethernet header, if * needed, for a VLAN header. */ int encapsulate(struct rte_mbuf *pkt, uint8_t priority, struct gatekeeper_if *iface, struct ipaddr *gt_addr); #endif /* _GATEKEEPER_IPIP_H_ */ ```
/content/code_sandbox/include/gatekeeper_ipip.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
303
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_VARIP_H_ #define _GATEKEEPER_VARIP_H_ #include <stdint.h> #include <stdbool.h> #include <rte_ip.h> /* * All functional blocks that parse packets beyond the IP header * must be aware that variable IP headers are possible, and should * use the functionality provided by this library. */ static inline uint8_t ipv4_hdr_len(struct rte_ipv4_hdr *ip4hdr) { return ((ip4hdr->version_ihl & 0xf) << 2); } static inline uint8_t * ipv4_skip_exthdr(struct rte_ipv4_hdr *ip4hdr) { return ((uint8_t *)ip4hdr + ipv4_hdr_len(ip4hdr)); } /* * Skip any extension headers. * * This function parses (potentially truncated) extension headers. * @nexthdrp should be a reference to the type of the header * that comes after the IPv6 header, which may or may not be * an IPv6 extension header. * * It skips all well-known exthdrs, and returns an offset to the start * of the unparsable area i.e. the first header with unknown type. * If it is not -1, *nexthdr is updated by type/protocol of this header. * * NOTES: - If packet terminated with NEXTHDR_NONE it returns -1. * - If packet is truncated, so that all parsed headers are skipped, * it returns -1. * - First fragment header is skipped, not-first ones * are considered as unparsable. * - ESP is unparsable for now and considered like * normal payload protocol. */ int ipv6_skip_exthdr(const struct rte_ipv6_hdr *ip6hdr, int remaining_len, uint8_t *nexthdrp); #endif /* _GATEKEEPER_VARIP_H_ */ ```
/content/code_sandbox/include/gatekeeper_varip.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
499
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _LIST_H_ #define _LIST_H_ #include <rte_common.h> /* * The code of this file is mostly a copy of the Linux kernel. */ #undef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) struct list_head { struct list_head *next, *prev; }; #define LIST_HEAD_INIT(name) { &(name), &(name) } static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } static inline int list_initiated(const struct list_head *head) { return head->next != NULL && head->prev != NULL; } /** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) /** * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) /** * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) /** * list_next_entry - get the next element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_safe - iterate over list of given type safe * against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** * list_empty - tests whether a list is empty * @head: the list to test. */ static inline int list_empty(const struct list_head *head) { return head->next == head; } /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. */ static inline int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add(struct list_head *_new, struct list_head *prev, struct list_head *next) { next->prev = _new; _new->next = next; _new->prev = prev; prev->next = _new; } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /** * list_add_tail - add a new entry * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *_new, struct list_head *head) { __list_add(_new, head->prev, head); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head *prev, struct list_head *next) { next->prev = prev; prev->next = next; } #define LIST_POISON1 ((void *) 0x00100100) #define LIST_POISON2 ((void *) 0x00200200) /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = (struct list_head*)LIST_POISON1; entry->prev = (struct list_head*)LIST_POISON2; } #endif /* _LIST_H_ */ ```
/content/code_sandbox/include/list.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,467
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GGU_H_ #define _GATEKEEPER_GGU_H_ #include "gatekeeper_mailbox.h" #include "gatekeeper_net.h" #include "gatekeeper_flow.h" #include "gatekeeper_flow_bpf.h" #define GGU_PD_VER (1) /* Configuration for the GK-GT Unit functional block. */ struct ggu_config { unsigned int lcore_id; /* The UDP source and destination port numbers for GGU. */ uint16_t ggu_src_port; uint16_t ggu_dst_port; /* The maximum number of packets to retrieve/transmit. */ uint16_t max_pkt_burst; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* Log level for GK-GT Unit block. */ uint32_t log_level; /* Log ratelimit interval in ms for GK-GT Unit block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for GK-GT Unit block. */ uint32_t log_ratelimit_burst; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* The maximum number of packets submitted to GGU mailbox. */ uint16_t mailbox_max_pkt_burst; /* RX queue on the back interface. */ uint16_t rx_queue_back; /* RX method on the back interface. */ uint8_t rx_method_back; struct net_config *net; struct gk_config *gk; /* Mailbox to hold requests from other blocks. */ struct mailbox mailbox; unsigned int total_pkt_burst; /* The packet mbuf pool for the GGU block. */ struct rte_mempool *mp; }; /* Enumeration of policy decisions the GGU block can process. */ enum { /* Decline an IPv4 flow. */ GGU_DEC_IPV4_DECLINED = 0, /* Decline an IPv6 flow. */ GGU_DEC_IPV6_DECLINED, /* Grant an IPv4 flow. */ GGU_DEC_IPV4_GRANTED, /* Grant an IPv6 flow. */ GGU_DEC_IPV6_GRANTED, /* An IPv4 GK BPF flow. */ GGU_DEC_IPV4_BPF, /* An IPv6 GK BPF flow. */ GGU_DEC_IPV6_BPF, __MAX_GGU_DEC }; /* On the wire policy decision to be processed by the GGU. */ struct ggu_decision { /* The policy decision type. */ uint8_t type; /* Reserved for alignment. */ uint8_t res1; uint16_t res2; /* The IP flow relevant to this policy decision. */ uint8_t ip_flow[0]; /* Parameters for this policy decision would follow the IP flow. */ } __attribute__((packed)); /* * Packets that flow between the GT block and the GGU. * * Packet format: Ethernet header(s) + IP header + UDP header + Data. * The UDP payload data is of the following format: * * version: a constant indicating the version of the format, in this case 1. * res1 and res2: reserved space to keep the fields 32-bit aligned. * [policy decision(s)]: a list of one or more policy decisions. * * Each policy decision (struct ggu_decision) is of the following format: * * +---------------------------------+ * | Type | Res1 | Res2 | * +---------------------------------+ * | | * | IP flow (source, destination) | * | | * +---------------------------------+ * | | * | Decision Parameters | * | ... | * +---------------------------------+ * * The decision type is from an enumerated set that both the GT block * and the GGU must understand in order for the decision to be * processed. For example, an action may grant an IPv4 flow. * * The IP flow is either a combination of two IPv4 addresses or two * IPv6 addresses to represent a (source, destination) flow. * * Each decision optionally ends with decision-specific parameters. */ struct ggu_common_hdr { /* Version of packet format. */ uint8_t version; /* Reserved for alignment. */ uint8_t res1; uint16_t res2; /* List of one or more policy decisions. */ struct ggu_decision decisions[0]; } __attribute__((packed)); /* Parameters for declaring a flow granted. */ struct ggu_granted { /* Rate limit: kibibyte/second. */ uint32_t tx_rate_kib_sec; /* * How much time (unit: second) a GK block waits * before it expires the capability. */ uint32_t cap_expire_sec; /* * The first value of send_next_renewal_at at * flow entry comes from next_renewal_ms. */ uint32_t next_renewal_ms; /* * How many milliseconds (unit) GK must wait * before sending the next capability renewal * request. */ uint32_t renewal_step_ms; } __attribute__ ((packed)); /* Parameters for declaring a flow declined. */ struct ggu_declined { /* * How much time (unit: second) a GK block waits * before it expires the declined capability. */ uint32_t expire_sec; } __attribute__ ((packed)); /* * Parameters for declaring a BPF flow in the GGU protocol. * * struct ggu_bpf_wire enables GT blocks to send only * the beginnings of cookies to save space in the GGU message. * The beginnings of cookies are expected to be the initialized part of * the cookie; the non-initialized part is zeroed. * * See struct ggu_bpf for an equivalent struct with full-length cookie. */ struct ggu_bpf_wire { /* * How much time (unit: second) a GK block waits * before it expires the BPF state. */ uint32_t expire_sec; /* Index of the BPF program associated to this state. */ uint8_t program_index; /* Reserved for alignment. */ uint16_t reserved; /* * Lenght of the cookie in 4 bytes. That is, the length of the cookie * is @cookie_len_4by * 4. This preserves the 32-bit alignment. */ uint8_t cookie_len_4by; /* Initialized part of the cookie. */ uint8_t cookie[0]; } __attribute__ ((packed)); /* Parameters for declaring a BPF flow. */ struct ggu_bpf { /* * How much time (unit: second) a GK block waits * before it expires the BPF state. */ uint32_t expire_sec; /* Index of the BPF program associated to this state. */ uint8_t program_index; /* Reserved for alignment. */ uint8_t reserved; /* * Number of used bytes in the cookie starting at * the beginning of the cookie. * These bytes are the initialization parameters * of the corresponding BPF state. * The remaining bytes are expected to be zero. */ uint16_t cookie_len; /* * Initialized memory to be passed to * the BPF proram each time it is executed. */ struct gk_bpf_cookie cookie; } __attribute__ ((packed)); struct ggu_policy { uint8_t state; struct ip_flow flow; /* * The policy decision sent to a GK block must have * enough information to fill out the fields of * struct flow_entry at the corresponding state. */ union { /* Decision is to grant the flow. */ struct ggu_granted granted; /* Decision is to decline the flow. */ struct ggu_declined declined; /* Decision is to run a BPF program on the flow. */ struct ggu_bpf bpf; } params; }; /* * When iterating over policy decisions in a GGU packet, this * function can be applied to the policy and some argument. * A function of this type should be passed to ggu_policy_iterator(). */ typedef void (*ggu_policy_fn)(struct ggu_policy *policy, void *arg); /* * Iterate over a GGU notification packet's decision list, beginning * with @ggu_decision and extending throughout @decision_list_len. * * For each policy decision found, apply @policy_fn to it along * with an optional @policy_arg. */ void ggu_policy_iterator(struct ggu_decision *ggu_decision, unsigned int decision_list_len, ggu_policy_fn policy_fn, void *policy_arg); struct ggu_config *alloc_ggu_conf(unsigned int lcore); int run_ggu(struct net_config *net_conf, struct gk_config *gk_conf, struct ggu_config *ggu_conf); int cleanup_ggu(struct ggu_config *ggu_conf); #endif /* _GATEKEEPER_GGU_H_ */ ```
/content/code_sandbox/include/gatekeeper_ggu.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,039
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LOG_RATELIMIT_H_ #define _GATEKEEPER_LOG_RATELIMIT_H_ #include <stdbool.h> #include <stdint.h> #include <rte_atomic.h> /* Needed for rte_atomic32_t. */ /* * At startup, log ratelimiting is disabled so that all * startup logs are captured. Before the blocks start, * the launch routine calls this function to enable log * ratelimiting during operation. */ void log_ratelimit_enable(void); /* * Check whether a log entry will be permitted, according to the level * of the log entry and the configured level of the system's log. * Note that even when this test passes, log entries may not occur * due to the rate limiting system. */ bool check_log_allowed(uint32_t level); /* * @lcore_id: initialize the log_ratelimit_state data for @lcore_id. * * This will allow to enforce a rate limit on log entries: * no more than @log_ratelimit_state.burst callbacks in * every @log_ratelimit_state.interval milliseconds. * * Note that, to avoid performance degradation caused by locks, the * implementation assumes that each lcore will maintain a separate * struct log_ratelimit_state to rate limit the log entries. */ void log_ratelimit_state_init(unsigned int lcore_id, uint32_t interval, uint32_t burst, uint32_t log_level, const char *block_name); /** * Generates and ratelimits a log message. * * The message will be sent in the stream defined by the previous call * to rte_openlog_stream() if it is not ratelimited. * * The level argument determines if the log should be displayed or * not, depending on the global rte_logs variable. * * @param level * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). * @param logtype * The log type, for example, RTE_LOGTYPE_EAL. * @param format * The format string, as in printf(3), followed by the variable arguments * required by the format. * @return * - 0: Success. * - Negative on error. */ int gatekeeper_log_ratelimit(uint32_t level, uint32_t logtype, const char *format, ...) #ifdef __GNUC__ #if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) __attribute__((cold)) #endif #endif __attribute__((format(printf, 3, 4))); int gatekeeper_log_main(uint32_t level, uint32_t logtype, const char *format, ...) #ifdef __GNUC__ #if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) __attribute__((cold)) #endif #endif __attribute__((format(printf, 3, 4))); /* Functions to set the log level for each functional block as well as lcore. */ int set_log_level_per_block(const char *block_name, uint32_t log_level); int set_log_level_per_lcore(unsigned int lcore_id, uint32_t log_level); struct log_ratelimit_state { uint64_t interval_cycles; uint32_t burst; uint32_t printed; uint32_t suppressed; uint64_t end; rte_atomic32_t log_level; char block_name[16]; } __rte_cache_aligned; struct log_thread_time { char str_date_time[32]; uint64_t update_time_at; }; /* * Only use these variables in file lib/log_ratelimit.c and in macros * G_LOG() and MAIN_LOG(). */ RTE_DECLARE_PER_LCORE(struct log_thread_time, _log_thread_time); extern struct log_ratelimit_state log_ratelimit_states[RTE_MAX_LCORE]; extern bool log_ratelimit_enabled; /* Get the block name for the corresponding lcore. */ static inline const char *get_block_name(unsigned int lcore_id) { return lcore_id < RTE_MAX_LCORE ? log_ratelimit_states[lcore_id].block_name : "NO-lcore"; } #endif /* _GATEKEEPER_LOG_RATELIMIT_H_ */ ```
/content/code_sandbox/include/gatekeeper_log_ratelimit.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,005
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GK_RT_H_ #define _GATEKEEPER_GK_RT_H_ #include <lauxlib.h> #include <rte_ether.h> #include <rte_hash.h> #include <rte_spinlock.h> #include <rte_atomic.h> #include "gatekeeper_net.h" #include "gatekeeper_fib.h" #include "gatekeeper_qid.h" #include "seqlock.h" enum gk_fib_action { /* Forward the packet to the corresponding Grantor. */ GK_FWD_GRANTOR, /* * Forward the packet to the corresponding gateway * in the front network. */ GK_FWD_GATEWAY_FRONT_NET, /* * Forward the packet to the corresponding gateway * in the back network. */ GK_FWD_GATEWAY_BACK_NET, /* * The destination address is a neighbor in the front network. * Forward the packet to the destination directly. */ GK_FWD_NEIGHBOR_FRONT_NET, /* * The destination address is a neighbor in the back network. * Forward the packet to the destination directly. */ GK_FWD_NEIGHBOR_BACK_NET, /* Drop the packet. */ GK_DROP, /* Invalid forward action. */ GK_FIB_MAX, }; /* * The Ethernet header cache. * Fields @stale and @eth_hdr.d_addr are protected by the lock of * the cached entry. */ struct ether_cache { /* * The sequential lock to deal with the * concurrency between GK and LLS on the cached * Ethernet header. * * Notice that, the LLS block will only modify * the @stale and @eth_hdr.d_addr fields. * Therefore, @lock only applies to these two fields. */ seqlock_t lock; /* * The count of how many times the LPM tables refer to it, * so a neighbor entry can go away only when no one referring to it. * Notice, this field is atomic because it's handled by * the LLS block and the GK blocks. */ rte_atomic32_t ref_cnt; /* * The fields below field fields_to_clear are zeroed * when entry is released. */ int fields_to_clear[0]; /* Indicate whether the MAC address is stale or not. */ bool stale; /* The IP address of the nexthop. */ struct ipaddr ip_addr; /* The whole link-layer header. */ struct { /* Ethernet header (required). */ struct rte_ether_hdr eth_hdr; /* VLAN header (optional). */ struct rte_vlan_hdr vlan_hdr; } __attribute__((packed)) l2_hdr; }; struct neighbor_hash_table { int tbl_size; struct rte_hash *hash_table; /* The tables that store the Ethernet headers. */ struct ether_cache *cache_tbl; }; struct grantor_entry { /* The Grantor IP address. */ struct ipaddr gt_addr; /* The cached Ethernet header of the next hop. */ struct ether_cache *eth_cache; }; struct grantor_set { /* Protocol of the Grantor IPs. */ uint8_t proto; /* Number of structs grantor_entry that start at @entries. */ uint16_t num_entries; /* List of Grantors and their next hops' cached Ethernet headers. */ struct grantor_entry entries[0]; }; /* * Route properties. * * Gatekeeper does not acts on these properties. They are needed to * support routing daemons that expect them. */ struct route_properties { /* * Routing table protocol -- origin of the route. * RTPROT_STATIC for routes added by user. * RTPROT_BIRD for routes added by BIRD daemon, etc. */ uint8_t rt_proto; uint32_t priority; }; /* The gk forward information base (fib). */ struct gk_fib { /* The fib action. */ enum gk_fib_action action; union { /* * The nexthop information when the action is * GK_FWD_GATEWAY_*_NET. */ struct { /* The cached Ethernet header. */ struct ether_cache *eth_cache; struct route_properties props; } gateway; /* Route information when the action is GK_FWD_GRANTOR. */ struct { /* * Set of Grantors that packets to this * destination should be load balanced to. */ struct grantor_set *set; } grantor; /* * When the action is GK_FWD_NEIGHBOR_*_NET, it stores all * the neighbors' Ethernet headers in a hash table. * The entries can be accessed according to its IP address. */ struct neighbor_hash_table neigh; /* Route information when the action is GK_DROP. */ struct { struct route_properties props; } drop; } u; }; /* The global LPM table of Gatekeeper servers (not Grantor servers). */ struct gk_lpm { /* Use a spin lock to edit the FIB table. */ rte_spinlock_t lock; /* The IPv4 RIB and FIB. */ struct fib_head fib; /* The IPv4 FIB table that decides the actions on packets. */ struct gk_fib *fib_tbl; /* The data structure for managing available IPv4 FIB entries. */ struct qid qid; /* The IPv6 RIB and FIB. */ struct fib_head fib6; /* The IPv6 FIB table that decides the actions on packets. */ struct gk_fib *fib_tbl6; /* The data structure for managing available IPv6 FIB entries. */ struct qid qid6; }; static inline struct rib_head * rib4_from_ltbl(struct gk_lpm *ltbl) { return fib_get_rib(&ltbl->fib); } static inline struct rib_head * rib6_from_ltbl(struct gk_lpm *ltbl) { return fib_get_rib(&ltbl->fib6); } struct ip_prefix { const char *str; struct ipaddr addr; int len; }; /* * Since GK_FWD_GRANTOR entries can have mulitple Grantor IPs * for load balancing (and therefore multiple next hops), * we group together this information into an address set. */ struct fib_dump_addr_set { /* * The Grantor IP address. Only applicable for * FIB entries of type GK_FWD_GRANTOR. */ struct ipaddr grantor_ip; /* The next hop (gateway) IP address. */ struct ipaddr nexthop_ip; /* The MAC address of @nexthop_ip. */ struct rte_ether_addr d_addr; /* Whether the resolution for @nexthop_ip to @d_addr is invalid. */ bool stale; }; struct gk_fib_dump_entry { /* The IP prefix. */ struct ipaddr addr; /* The prefix length of @addr. */ int prefix_len; /* The FIB action. */ enum gk_fib_action action; /* Unique ID of this FIB entry. */ unsigned int fib_id; /* * The number of entries starting at @addr_sets. * * - For GK_GWD_GRANTOR, this value is the number * of (Grantor, gateway) pairs. * - For GK_DROP, this is 0. * - For all other @action values, this is 1. */ unsigned int num_addr_sets; /* * Address sets. * * When @action is GK_FWD_GRANTOR, all fields * are valid (Grantor IP, next hop IP, next hop MAC, stale), * and there can be multiple address sets. * * When @action is GK_DROP, there are no address sets. * * When @action is anything else, only the fields related * to the next hop are valid (next hop IP, next hop MAC, stale), * and there should only be one address set. */ struct fib_dump_addr_set addr_sets[0]; }; struct gk_neighbor_dump_entry { bool stale; /* The fib action. */ enum gk_fib_action action; /* The IP address of the neighbor. */ struct ipaddr neigh_ip; /* The the MAC address of neigh_ip. */ struct rte_ether_addr d_addr; }; struct gk_config; int clear_ether_cache(struct ether_cache *eth_cache); uint32_t custom_ipv4_hash_func(const void *key, uint32_t length, uint32_t initval); int setup_neighbor_tbl(unsigned int socket_id, int identifier, int ip_ver, int ht_size, struct neighbor_hash_table *neigh, rte_hash_function hash_func); int setup_gk_lpm(struct gk_config *gk_conf, unsigned int socket_id); void destroy_neigh_hash_table(struct neighbor_hash_table *neigh); int parse_ip_prefix(const char *ip_prefix, struct ipaddr *res); int add_fib_entry_numerical(const struct ip_prefix *prefix_info, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf); int add_fib_entry_numerical_locked(const struct ip_prefix *prefix_info, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf); int add_fib_entry(const char *prefix, const char *gt_ip, const char *gw_ip, enum gk_fib_action action, struct gk_config *gk_conf); int del_fib_entry_numerical(const struct ip_prefix *prefix_info, struct gk_config *gk_conf); int del_fib_entry_numerical_locked(const struct ip_prefix *prefix_info, struct gk_config *gk_conf); int del_fib_entry(const char *ip_prefix, struct gk_config *gk_conf); int l_list_gk_fib4(lua_State *L); int l_list_gk_fib6(lua_State *L); int l_list_gk_neighbors4(lua_State *L); int l_list_gk_neighbors6(lua_State *L); int l_ether_format_addr(lua_State *L); int l_ip_format_addr(lua_State *L); int l_add_grantor_entry_lb(lua_State *L); int l_update_grantor_entry_lb(lua_State *L); #define CTYPE_STRUCT_GK_CONFIG_PTR "struct gk_config *" static inline struct ether_cache * lookup_ether_cache(struct neighbor_hash_table *neigh_tbl, void *key) { struct ether_cache *eth_cache; int ret = rte_hash_lookup_data(neigh_tbl->hash_table, key, (void **)&eth_cache); if (ret < 0) return NULL; return eth_cache; } #endif /* _GATEKEEPER_GK_RT_H_ */ ```
/content/code_sandbox/include/gatekeeper_rt.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,457
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_FLOW_H_ #define _GATEKEEPER_FLOW_H_ #include <stdio.h> #include <stdint.h> #include <netinet/in.h> struct ip_flow { /* IPv4 or IPv6. */ uint16_t proto; union { struct { struct in_addr src; struct in_addr dst; } v4; struct { struct in6_addr src; struct in6_addr dst; } v6; } f; }; int flow_cmp(const struct ip_flow *flow1, const struct ip_flow *flow2); static inline bool flow_equal(const struct ip_flow *flow1, const struct ip_flow *flow2) { return flow_cmp(flow1, flow2) == 0; } void print_flow_err_msg(const struct ip_flow *flow, int32_t index, const char *err_msg); #endif /* _GATEKEEPER_FLOW_H_ */ ```
/content/code_sandbox/include/gatekeeper_flow.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
290
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_RATELIMIT_H_ #define _GATEKEEPER_RATELIMIT_H_ #include <stdint.h> #include <stdbool.h> /* * The code of this file is mostly a copy of the Linux kernel. */ struct token_bucket_ratelimit_state { uint32_t rate; uint32_t burst; uint32_t credit; uint64_t stamp; }; void tb_ratelimit_state_init(struct token_bucket_ratelimit_state *tbrs, uint32_t rate, uint32_t burst); uint32_t tb_ratelimit_allow_n(uint32_t n, struct token_bucket_ratelimit_state *tbrs); static inline bool tb_ratelimit_allow(struct token_bucket_ratelimit_state *tbrs) { return tb_ratelimit_allow_n(1, tbrs); } #endif /* _GATEKEEPER_RATELIMIT_H_ */ ```
/content/code_sandbox/include/gatekeeper_ratelimit.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
277
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_MAIN_H_ #define _GATEKEEPER_MAIN_H_ #include <stdint.h> #ifdef RTE_MACHINE_CPUFLAG_SSE4_2 #include <rte_hash_crc.h> #define DEFAULT_HASH_FUNC rte_hash_crc #else #include <rte_jhash.h> #define DEFAULT_HASH_FUNC rte_jhash #endif #include <rte_lcore.h> #include "gatekeeper_log_ratelimit.h" #include "list.h" #define BLOCK_LOGTYPE RTE_LOGTYPE_USER1 #define G_LOG_PREFIX "%s/%u %s %s " #define G_LOG_MAIN "Main" #define G_LOG(level, fmt, ...) \ do { \ unsigned int __g_log_lcore_id = rte_lcore_id(); \ gatekeeper_log_ratelimit(RTE_LOG_ ## level, \ BLOCK_LOGTYPE, G_LOG_PREFIX fmt, \ likely(log_ratelimit_enabled) \ ? log_ratelimit_states[__g_log_lcore_id]\ .block_name \ : G_LOG_MAIN, \ __g_log_lcore_id, \ RTE_PER_LCORE(_log_thread_time).str_date_time, \ #level \ __VA_OPT__(,) __VA_ARGS__); \ } while (0) #define G_LOG_CHECK(level) check_log_allowed(RTE_LOG_ ## level) /* * This macro should only be called in contexts other than logical cores * because it is independent of functional blocks and is not rate limited. * * From logical cores, call G_LOG(). */ #define MAIN_LOG(level, fmt, ...) \ gatekeeper_log_main(RTE_LOG_ ## level, BLOCK_LOGTYPE, \ G_LOG_PREFIX fmt, G_LOG_MAIN, rte_gettid(), \ RTE_PER_LCORE(_log_thread_time).str_date_time, \ #level \ __VA_OPT__(,) __VA_ARGS__) extern volatile int exiting; #define ONE_SEC_IN_NANO_SEC (1000000000L) extern uint64_t cycles_per_sec; extern uint64_t cycles_per_ms; extern double cycles_per_ns; extern uint64_t picosec_per_cycle; extern FILE *log_file; struct sol_mbuf_priv { uint8_t priority; struct list_head list; }; char *rte_strdup(const char *type, const char *s); int gatekeeper_log_init(void); #endif /* _GATEKEEPER_MAIN_H_ */ ```
/content/code_sandbox/include/gatekeeper_main.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
634
```objective-c /* * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. * * This library is modelled strictly after Ralf S. Engelschalls article at * path_to_url So most of the credit must * go to Ralf S. Engelschall <rse@engelschall.com>. * * This coroutine library is very much stripped down. You should either * build your own process abstraction using it or - better - just use GNU * Portable Threads, path_to_url * */ /* * 2006-10-26 Include stddef.h on OS X to work around one of its bugs. * Reported by Michael_G_Schwern. * 2006-11-26 Use _setjmp instead of setjmp on GNU/Linux. * 2007-04-27 Set unwind frame info if gcc 3+ and ELF is detected. * Use _setjmp instead of setjmp on _XOPEN_SOURCE >= 600. * 2007-05-02 Add assembly versions for x86 and amd64 (to avoid reliance * on SIGUSR2 and sigaltstack in Crossfire). * 2008-01-21 Disable CFI usage on anything but GNU/Linux. * 2008-03-02 Switched to 2-clause BSD license with GPL exception. * 2008-04-04 New (but highly unrecommended) pthreads backend. * 2008-04-24 Reinstate CORO_LOSER (had wrong stack adjustments). * 2008-10-30 Support assembly method on x86 with and without frame pointer. * 2008-11-03 Use a global asm statement for CORO_ASM, idea by pippijn. * 2008-11-05 Hopefully fix misaligned stacks with CORO_ASM/SETJMP. * 2008-11-07 rbp wasn't saved in CORO_ASM on x86_64. * introduce coro_destroy, which is a nop except for pthreads. * speed up CORO_PTHREAD. Do no longer leak threads either. * coro_create now allows one to create source coro_contexts. * do not rely on makecontext passing a void * correctly. * try harder to get _setjmp/_longjmp. * major code cleanup/restructuring. * 2008-11-10 the .cfi hacks are no longer needed. * 2008-11-16 work around a freebsd pthread bug. * 2008-11-19 define coro_*jmp symbols for easier porting. * 2009-06-23 tentative win32-backend support for mingw32 (Yasuhiro Matsumoto). * 2010-12-03 tentative support for uclibc (which lacks all sorts of things). * 2011-05-30 set initial callee-saved-registers to zero with CORO_ASM. * use .cfi_undefined rip on linux-amd64 for better backtraces. * 2011-06-08 maybe properly implement weird windows amd64 calling conventions. * 2011-07-03 rely on __GCC_HAVE_DWARF2_CFI_ASM for cfi detection. * 2011-08-08 cygwin trashes stacks, use pthreads with double stack on cygwin. * 2012-12-04 reduce misprediction penalty for x86/amd64 assembly switcher. * 2012-12-05 experimental fiber backend (allocates stack twice). * 2012-12-07 API version 3 - add coro_stack_alloc/coro_stack_free. * 2012-12-21 valgrind stack registering was broken. * 2015-12-05 experimental asm be for arm7, based on a patch by Nick Zavaritsky. * use __name__ for predefined symbols, as in libecb. * enable guard pages on arm, aarch64 and mips. * 2016-08-27 try to disable _FORTIFY_SOURCE with CORO_SJLJ, as it * breaks setjmp/longjmp. Also disable CORO_ASM for asm by default, * as it was reported to crash. * 2016-11-18 disable cfi_undefined again - backtraces might be worse, but * compile compatibility is improved. * 2018-08-14 use a completely different pthread strategy that should allow * sharing of coroutines among different threads. this would * undefined behaviour before as mutexes would be unlocked on * a different thread. overall, this might be slower than * using a pipe for synchronisation, but pipes eat fd's... */ #ifndef CORO_H #define CORO_H #ifdef __cplusplus extern "C" { #endif /* * This library consists of only three files * coro.h, coro.c and LICENSE (and optionally README) * * It implements what is known as coroutines, in a hopefully * portable way. * * All compiletime symbols must be defined both when including coro.h * (using libcoro) as well as when compiling coro.c (the implementation). * * You can manually specify which flavour you want. If you don't define * any of these, libcoro tries to choose a safe and fast default: * * -DCORO_UCONTEXT * * This flavour uses SUSv2's get/set/swap/makecontext functions that * unfortunately only some unices support, and is quite slow. * * -DCORO_SJLJ * * This flavour uses SUSv2's setjmp/longjmp and sigaltstack functions to * do it's job. Coroutine creation is much slower than UCONTEXT, but * context switching is a bit cheaper. It should work on almost all unices. * * -DCORO_LINUX * * CORO_SJLJ variant. * Old GNU/Linux systems (<= glibc-2.1) only work with this implementation * (it is very fast and therefore recommended over other methods, but * doesn't work with anything newer). * * -DCORO_LOSER * * CORO_SJLJ variant. * Microsoft's highly proprietary platform doesn't support sigaltstack, and * this selects a suitable workaround for this platform. It might not work * with your compiler though - it has only been tested with MSVC 6. * * -DCORO_FIBER * * Slower, but probably more portable variant for the Microsoft operating * system, using fibers. Ignores the passed stack and allocates it internally. * Also, due to bugs in cygwin, this does not work with cygwin. * * -DCORO_IRIX * * CORO_SJLJ variant. * For SGI's version of Microsoft's NT ;) * * -DCORO_ASM * * Hand coded assembly, known to work only on a few architectures/ABI: * GCC + arm7/x86/IA32/amd64/x86_64 + GNU/Linux and a few BSDs. Fastest * choice, if it works. * * -DCORO_PTHREAD * * Use the pthread API. You have to provide <pthread.h> and -lpthread. * This is likely the slowest backend, and it also does not support fork(), * so avoid it at all costs. * * If you define neither of these symbols, coro.h will try to autodetect * the best/safest model. To help with the autodetection, you should check * (e.g. using autoconf) and define the following symbols: HAVE_UCONTEXT_H * / HAVE_SETJMP_H / HAVE_SIGALTSTACK. */ /* * Changes when the API changes incompatibly. * This is ONLY the API version - there is no ABI compatibility between releases. * * Changes in API version 2: * replaced bogus -DCORO_LOOSE with grammatically more correct -DCORO_LOSER * Changes in API version 3: * introduced stack management (CORO_STACKALLOC) */ #define CORO_VERSION 3 #include <stddef.h> /* * This is the type for the initialization function of a new coroutine. */ typedef void (*coro_func)(void *); /* * A coroutine state is saved in the following structure. Treat it as an * opaque type. errno and sigmask might be saved, but don't rely on it, * implement your own switching primitive if you need that. */ typedef struct coro_context coro_context; /* * This function creates a new coroutine. Apart from a pointer to an * uninitialised coro_context, it expects a pointer to the entry function * and the single pointer value that is given to it as argument. * * Allocating/deallocating the stack is your own responsibility. * * As a special case, if coro, arg, sptr and ssze are all zero, * then an "empty" coro_context will be created that is suitable * as an initial source for coro_transfer. * * This function is not reentrant, but putting a mutex around it * will work. */ void coro_create (coro_context *ctx, /* an uninitialised coro_context */ coro_func coro, /* the coroutine code to be executed */ void *arg, /* a single pointer passed to the coro */ void *sptr, /* start of stack area */ size_t ssze); /* size of stack area in bytes */ /* * The following prototype defines the coroutine switching function. It is * sometimes implemented as a macro, so watch out. * * This function is thread-safe and reentrant. */ #if 0 void coro_transfer (coro_context *prev, coro_context *next); #endif /* * The following prototype defines the coroutine destroy function. It * is sometimes implemented as a macro, so watch out. It also serves no * purpose unless you want to use the CORO_PTHREAD backend, where it is * used to clean up the thread. You are responsible for freeing the stack * and the context itself. * * This function is thread-safe and reentrant. */ #if 0 void coro_destroy (coro_context *ctx); #endif /*****************************************************************************/ /* optional stack management */ /*****************************************************************************/ /* * You can disable all of the stack management functions by * defining CORO_STACKALLOC to 0. Otherwise, they are enabled by default. * * If stack management is enabled, you can influence the implementation via these * symbols: * * -DCORO_USE_VALGRIND * * If defined, then libcoro will include valgrind/valgrind.h and register * and unregister stacks with valgrind. * * -DCORO_GUARDPAGES=n * * libcoro will try to use the specified number of guard pages to protect against * stack overflow. If n is 0, then the feature will be disabled. If it isn't * defined, then libcoro will choose a suitable default. If guardpages are not * supported on the platform, then the feature will be silently disabled. */ #ifndef CORO_STACKALLOC # define CORO_STACKALLOC 1 #endif #if CORO_STACKALLOC /* * The only allowed operations on these struct members is to read the * "sptr" and "ssze" members to pass it to coro_create, to read the "sptr" * member to see if it is false, in which case the stack isn't allocated, * and to set the "sptr" member to 0, to indicate to coro_stack_free to * not actually do anything. */ struct coro_stack { void *sptr; size_t ssze; #ifdef CORO_USE_VALGRIND int valgrind_id; #endif }; /* * Try to allocate a stack of at least the given size and return true if * successful, or false otherwise. * * The size is *NOT* specified in bytes, but in units of sizeof (void *), * i.e. the stack is typically 4(8) times larger on 32 bit(64 bit) platforms * then the size passed in. * * If size is 0, then a "suitable" stack size is chosen (usually 1-2MB). */ int coro_stack_alloc (struct coro_stack *stack, unsigned int size); /* * Free the stack allocated by coro_stack_alloc again. It is safe to * call this function on the coro_stack structure even if coro_stack_alloc * failed. */ void coro_stack_free (struct coro_stack *stack); #endif /* * That was it. No other user-serviceable parts below here. */ /*****************************************************************************/ #if !defined CORO_LOSER && !defined CORO_UCONTEXT \ && !defined CORO_SJLJ && !defined CORO_LINUX \ && !defined CORO_IRIX && !defined CORO_ASM \ && !defined CORO_PTHREAD && !defined CORO_FIBER # if defined WINDOWS && (defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_AMD64) # define CORO_ASM 1 # elif defined WINDOWS || defined _WIN32 # define CORO_LOSER 1 /* you don't win with windoze */ # elif __linux && (__i386__ || (__x86_64__ && !__ILP32__)) /*|| (__arm__ && __ARM_ARCH == 7)), not working */ # define CORO_ASM 1 # elif defined HAVE_UCONTEXT_H # define CORO_UCONTEXT 1 # elif defined HAVE_SETJMP_H && defined HAVE_SIGALTSTACK # define CORO_SJLJ 1 # else error unknown or unsupported architecture # endif #endif /*****************************************************************************/ #ifdef CORO_UCONTEXT # include <ucontext.h> struct coro_context { ucontext_t uc; }; # define coro_transfer(p,n) swapcontext (&((p)->uc), &((n)->uc)) # define coro_destroy(ctx) (void *)(ctx) #elif defined (CORO_SJLJ) || defined (CORO_LOSER) || defined (CORO_LINUX) || defined (CORO_IRIX) # if defined(CORO_LINUX) && !defined(_GNU_SOURCE) # define _GNU_SOURCE /* for glibc */ # endif /* try to disable well-meant but buggy checks in some libcs */ # ifdef _FORTIFY_SOURCE # undef _FORTIFY_SOURCE # undef __USE_FORTIFY_LEVEL /* helps some more when too much has been included already */ # endif # if !CORO_LOSER # include <unistd.h> # endif /* solaris is hopelessly borked, it expands _XOPEN_UNIX to nothing */ # if __sun # undef _XOPEN_UNIX # define _XOPEN_UNIX 1 # endif # include <setjmp.h> # if _XOPEN_UNIX > 0 || defined (_setjmp) # define coro_jmp_buf jmp_buf # define coro_setjmp(env) _setjmp (env) # define coro_longjmp(env) _longjmp ((env), 1) # elif CORO_LOSER # define coro_jmp_buf jmp_buf # define coro_setjmp(env) setjmp (env) # define coro_longjmp(env) longjmp ((env), 1) # else # define coro_jmp_buf sigjmp_buf # define coro_setjmp(env) sigsetjmp (env, 0) # define coro_longjmp(env) siglongjmp ((env), 1) # endif struct coro_context { coro_jmp_buf env; }; # define coro_transfer(p,n) do { if (!coro_setjmp ((p)->env)) coro_longjmp ((n)->env); } while (0) # define coro_destroy(ctx) (void *)(ctx) #elif CORO_ASM struct coro_context { void **sp; /* must be at offset 0 */ }; #if defined (__i386__) || defined (__x86_64__) void __attribute__ ((__noinline__, __regparm__(2))) #else void __attribute__ ((__noinline__)) #endif coro_transfer (coro_context *prev, coro_context *next); # define coro_destroy(ctx) (void)(ctx) #elif CORO_PTHREAD # include <pthread.h> extern pthread_mutex_t coro_mutex; struct coro_context { int flags; pthread_cond_t cv; }; void coro_transfer (coro_context *prev, coro_context *next); void coro_destroy (coro_context *ctx); #elif CORO_FIBER struct coro_context { void *fiber; /* only used for initialisation */ coro_func coro; void *arg; }; void coro_transfer (coro_context *prev, coro_context *next); void coro_destroy (coro_context *ctx); #endif #ifdef __cplusplus } #endif #endif ```
/content/code_sandbox/include/coro.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
4,099
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_CPS_H_ #define _GATEKEEPER_CPS_H_ #include <net/if.h> #include <rte_timer.h> #include "gatekeeper_gk.h" #include "gatekeeper_gt.h" #include "gatekeeper_mailbox.h" #include "gatekeeper_log_ratelimit.h" #include "list.h" #include "coro.h" /* KNI stands for Kernel Network Interface. */ struct cps_kni{ /* * Virtio-user interface. * This is the interface that the CPS block uses to receive and * to transmit packets. */ char cps_name[IF_NAMESIZE]; uint16_t cps_portid; /* * TAP interface. * This is the interface available to the kernel. */ char krn_name[IF_NAMESIZE]; unsigned int krn_ifindex; }; /* Configuration for the Control Plane Support functional block. */ struct cps_config { /* lcore that the CPS block runs on. */ unsigned int lcore_id; /* Log level for CPS block. */ uint32_t log_level; /* Log ratelimit interval in ms for CPS block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for CPS block. */ uint32_t log_ratelimit_burst; /* The maximum number of packets to retrieve/transmit. */ uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; /* Length of the KNI queues in number of packets. */ uint16_t kni_queue_size; /* Maximum number of route update packets to serve at once. */ unsigned int max_rt_update_pkts; /* * Period between scans of the outstanding * resolution requests from KNIs. */ unsigned int scan_interval_sec; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* Netlink port ID for communicating with routing daemon. */ uint32_t nl_pid; /* Parameters to setup the ARP requests mempool. */ unsigned int arp_max_entries_exp; /* Parameters to setup the ND requests mempool. */ unsigned int nd_max_entries_exp; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* The maximum number of packets submitted to CPS mailbox. */ unsigned int mailbox_max_pkt_burst; struct net_config *net; struct lls_config *lls; /* Kernel NIC interfaces for control plane messages. */ struct cps_kni front_kni; struct cps_kni back_kni; /* Mailbox to hold requests from other blocks. */ struct mailbox mailbox; /* Receive and transmit queues for both interfaces. */ uint16_t rx_queue_front; uint16_t tx_queue_front; uint16_t rx_queue_back; uint16_t tx_queue_back; /* The RX methods for both interfaces. */ uint8_t rx_method_front; uint8_t rx_method_back; /* Unanswered resolution requests from the KNIs. */ struct list_head arp_requests; struct list_head nd_requests; /* Timer to scan over outstanding resolution requests. */ struct rte_timer scan_timer; /* * Netlink socket for receiving from the routing daemon. * Bound to @nl_pid so that userspace routing daemons * can be configured to update Gatekeeper. */ struct mnl_socket *rd_nl; /* * Coroutines for * @coro_root: The main thread of CPS. * @coro_rd: The communication with routing daemons. */ struct coro_context coro_root; struct coro_context coro_rd; struct coro_stack coro_rd_stack; struct gk_config *gk; struct gt_config *gt; unsigned int total_pkt_burst; /* The packet mbuf pool for the CPS block. */ struct rte_mempool *mp; /* The ARP requests pool for the CPS block. */ struct rte_mempool *arp_mp; /* The ND requests pool for the CPS block. */ struct rte_mempool *nd_mp; }; /* Information needed to submit packets directly to KNI. */ struct cps_direct_req { /* Number of packets stored in @pkts. */ unsigned int num_pkts; /* Interface that received @pkts. */ struct gatekeeper_if *iface; /* Packets to submit to KNI. */ struct rte_mbuf *pkts[0]; }; /* * Information needed for the LLS block to submit a request for * the CPS block to send ARP/ND replies back to the KNI. To do so, * the CPS block needs to know the IP and hardware address of the * map, as well as the interface on which this map was received. */ struct cps_arp_req { uint32_t ip; struct rte_ether_addr ha; struct gatekeeper_if *iface; }; struct cps_nd_req { uint8_t ip[16]; struct rte_ether_addr ha; struct gatekeeper_if *iface; }; /* Requests that can be made to the CPS block. */ enum cps_req_ty { /* Request to directly hand packets received by other blocks to KNI. */ CPS_REQ_DIRECT, /* Request to handle a response to an ARP packet. */ CPS_REQ_ARP, /* Request to handle a response to an ND packet. */ CPS_REQ_ND, }; /* Request submitted to the CPS block. */ struct cps_request { /* Type of request. */ enum cps_req_ty ty; int end_of_header[0]; union { /* If @ty is CPS_REQ_DIRECT, use @direct. */ struct cps_direct_req direct; /* If @ty is CPS_REQ_ARP, use @arp. */ struct cps_arp_req arp; /* If @ty is CPS_REQ_ND, use @nd. */ struct cps_nd_req nd; } u; }; int cps_submit_direct(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface); struct cps_config *get_cps_conf(void); int run_cps(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, struct cps_config *cps_conf, struct lls_config *lls_conf); #endif /* _GATEKEEPER_CPS_H_ */ ```
/content/code_sandbox/include/gatekeeper_cps.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,479
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GK_H_ #define _GATEKEEPER_GK_H_ #include <rte_atomic.h> #include <rte_bpf.h> #include "gatekeeper_rt.h" #include "gatekeeper_net.h" #include "gatekeeper_ipip.h" #include "gatekeeper_ggu.h" #include "gatekeeper_mailbox.h" #include "gatekeeper_sol.h" #include "gatekeeper_ratelimit.h" #include "gatekeeper_log_ratelimit.h" #include "gatekeeper_hash.h" /* Store information about a packet. */ struct ipacket { /* Flow identifier for this packet. */ struct ip_flow flow; /* Pointer to the packet itself. */ struct rte_mbuf *pkt; /* * Pointer to the l3 header. * * NOTICE * extract_packet_info() only guarantees * the length of the L3 header without extensions. */ void *l3_hdr; }; /* Structure for the GK basic measurements. */ struct gk_measurement_metrics { /* Total number of packets received. */ uint64_t tot_pkts_num; /* Total size in bytes of packets received. */ uint64_t tot_pkts_size; /* Number of packets forwarded through the granted channel. */ uint64_t pkts_num_granted; /* Size in bytes of packets forwarded through the granted channel. */ uint64_t pkts_size_granted; /* Number of packets forwarded through the request channel. */ uint64_t pkts_num_request; /* Size in bytes of packets forwarded through the request channel. */ uint64_t pkts_size_request; /* * Number of packets dropped because it has been rejected due to * a policy decision. While all packets of flows in declined state are * counted here, packets of flows in granted state may be counted here * too when these packets exceed the allocated bandwidth. */ uint64_t pkts_num_declined; /* Size in bytes of packets dropped because it has been rejected. */ uint64_t pkts_size_declined; /* * Total number of packets dropped. * Declined packets are counted here as well. */ uint64_t tot_pkts_num_dropped; /* Total size in bytes of packets dropped. */ uint64_t tot_pkts_size_dropped; /* * Total number of packets distributed to other blocks. * It includes ARP packets handled to the LLS block, * and packets handed off to the IPv4 and IPv6 ACLs. */ uint64_t tot_pkts_num_distributed; /* Total size in bytes of packets distributed to other blocks. */ uint64_t tot_pkts_size_distributed; }; /* Structures for each GK instance. */ struct gk_instance { struct hs_hash ip_flow_hash_table; struct flow_entry *ip_flow_entry_table; /* RX queue on the front interface. */ uint16_t rx_queue_front; /* TX queue on the front interface. */ uint16_t tx_queue_front; /* RX queue on the back interface. */ uint16_t rx_queue_back; /* TX queue on the back interface. */ uint16_t tx_queue_back; struct mailbox mb; /* Data structure used for the GK basic measurements. */ struct gk_measurement_metrics traffic_stats; /* Data structures used to limit the rate of icmp messages. */ struct token_bucket_ratelimit_state front_icmp_rs; struct token_bucket_ratelimit_state back_icmp_rs; unsigned int num_scan_del; /* The memory pool used for packet buffers in this instance. */ struct rte_mempool *mp; struct sol_instance *sol_inst; /* Size of @ip_flow_entry_table. */ uint32_t ip_flow_entry_table_size; /* Number of items currently in @ip_flow_entry_table. */ uint32_t ip_flow_ht_num_items; } __rte_cache_aligned; #define GK_MAX_BPF_FLOW_HANDLERS (UINT8_MAX + 1) typedef uint64_t (*rte_bpf_jitted_func_t)(void *); struct gk_bpf_flow_handler { /* Required program to initialize cookies. */ struct rte_bpf *f_init; rte_bpf_jitted_func_t f_init_jit; /* Required program to decide the fate of a packet. */ struct rte_bpf *f_pkt; rte_bpf_jitted_func_t f_pkt_jit; }; /* Configuration for the GK functional block. */ struct gk_config { /* The size of the flow hash table. */ unsigned int flow_ht_size; /* The maximum number of probes for an empty bucket in the table. */ unsigned int flow_ht_max_probes; /* * Factor by which to scale the number of buckets. * This allows the number of buckets to purposely * be greater than the number of entries to allow the hash * table to more easily accommodate a higher occupancy. */ double flow_ht_scale_num_bucket; /* * DPDK LPM library implements the DIR-24-8 algorithm * using two types of tables: * (1) tbl24 is a table with 2^24 entries. * (2) tbl8 is a table with 2^8 entries. * * To configure an LPM component instance, one needs to specify: * @max_rules: the maximum number of rules to support. Note that the maximum * number of LPM FIB entries should be equal to the maximum number of rules. * @number_tbl8s: the number of tbl8 tables. * * Here, it supports both IPv4 and IPv6 configuration. */ unsigned int max_num_ipv4_rules; unsigned int num_ipv4_tbl8s; unsigned int max_num_ipv6_rules; unsigned int num_ipv6_tbl8s; /* The maximum number of neighbor entries for the LPM FIB. */ unsigned int max_num_ipv6_neighbors; /* * Number of iterations of the GK block's main loop * between scanning entries of the flow table. Set to * 0 to scan an entry every iteration of the loop. */ unsigned int flow_table_scan_iter; /* * When the flow hash table is full, Gatekeeper will * enable the insertion again only after cleaning up * a number of expired flow entries. */ unsigned int scan_del_thresh; /* The maximum number of packets to retrieve/transmit. */ uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; /* The rate and burst size of the icmp messages. */ uint32_t front_icmp_msgs_per_sec; uint32_t front_icmp_msgs_burst; uint32_t back_icmp_msgs_per_sec; uint32_t back_icmp_msgs_burst; /* Parameters to setup the mailbox instance. */ unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* Log level for GK block. */ uint32_t log_level; /* Log ratelimit interval in ms for GK block. */ uint32_t log_ratelimit_interval_ms; /* Log ratelimit burst size for GK block. */ uint32_t log_ratelimit_burst; /* Time for logging the basic measurements in ms. */ unsigned int basic_measurement_logging_ms; /* Batch size for dumping GK FIB. */ uint8_t fib_dump_batch_size; /* * The fields below are for internal use. * Configuration files should not refer to them. */ /* * Number of references to this struct. * * The resources associated to this struct are only freed * when field @ref_cnt reaches zero. * * Use gk_conf_hold() and gk_conf_put() to acquire and release * a reference to this struct. */ rte_atomic32_t ref_cnt; /* The lcore ids at which each instance runs. */ unsigned int *lcores; /* Mapping the GK instances to the SOL instances. */ unsigned int *gk_sol_map; /* The number of lcore ids in @lcores. */ int num_lcores; /* * Array that maps a front interface * RX queue ID to the associated GK * instance index. Queue IDs not * associated to a GK instance return -1. */ int *queue_id_to_instance; struct gk_instance *instances; struct net_config *net; struct sol_config *sol_conf; /* * The LPM table used by the GK instances. * We assume that all the GK instances are * on the same numa node, so that only one global * LPM table is maintained. */ struct gk_lpm lpm_tbl; /* The RSS configuration for the front interface. */ struct gatekeeper_rss_config rss_conf_front; /* The RSS configuration for the back interface. */ struct gatekeeper_rss_config rss_conf_back; /* BPF programs available for policies to associate to flow entries. */ struct gk_bpf_flow_handler flow_handlers[GK_MAX_BPF_FLOW_HANDLERS]; }; /* A flow entry can be in one of the following states: */ enum { GK_REQUEST, GK_GRANTED, GK_DECLINED, GK_BPF }; /* * A Gatekeeper flow entry. * * Note: it's important to keep @flow as the first entry of * the struct since it is the key used for flow table lookups. * When bulk lookups are performed, @flow is prefetched, which * also brings into the cache the bytes after the key that * complete the cache line into cache. See also the function * type definition hs_hash_key_addr_t. */ struct flow_entry { /* IP flow information. */ struct ip_flow flow; /* RSS hash value of the IP flow. */ uint32_t flow_hash_val; /* The state of the entry. */ uint8_t state; /* Whether this entry is currently in use in ip_flow_entry_table. */ bool in_use; /* * This field was moved from u.bpf.program_index below to solve the * structure padding issue that prevents us from adding new fields. * * More specifically, before this move, the sizeof(struct flow_entry) * is 128, so we cannot add new fields due to the compilation check * in gk/main.c (i.e., RTE_BUILD_BUG_ON(sizeof(*fe) > 128)). * * Index of the BPF program associated to the GK_BPF state. */ uint8_t program_index; /* * The fib entry that instructs where * to send the packets for this flow entry. */ struct gk_fib *grantor_fib; /* * The time at which this flow entry expires (in cycles). */ uint64_t expire_at; union { struct { /* The time the last packet of the entry was seen. */ uint64_t last_packet_seen_at; /* * The priority associated to * the last packet of the entry. */ uint8_t last_priority; /* * The number of packets that the entry is allowed * to send with @last_priority without waiting * the amount of time necessary to be granted * @last_priority. */ uint8_t allowance; } request; struct { /* When @budget_byte is reset. */ uint64_t budget_renew_at; /* * When @budget_byte is reset, reset it to * @tx_rate_kib_cycle * 1024 bytes. */ uint32_t tx_rate_kib_cycle; /* * How many bytes @src can still send in current cycle. */ uint64_t budget_byte; /* * When GK should send the next renewal to * the corresponding grantor. */ uint64_t send_next_renewal_at; /* * How many cycles (unit) GK must wait before * sending the next capability renewal request. */ uint64_t renewal_step_cycle; } granted; struct { /* * Memory to be passed to the BPF proram each time * it is executed. */ struct gk_bpf_cookie cookie; } bpf; } u; }; /* Define the possible command operations for GK block. */ enum gk_cmd_op { GK_ADD_POLICY_DECISION, GK_SYNCH_WITH_LPM, GK_FLUSH_FLOW_TABLE, GK_LOG_FLOW_STATE, GK_FLUSH_BPF, GK_CMD_OP_MAX, }; struct gk_add_policy { struct ggu_policy policy; uint32_t flow_hash_val; }; struct gk_synch_request { struct gk_fib *fib; bool update_only; rte_atomic32_t *done_counter; }; struct gk_flush_request { struct ip_prefix src; struct ip_prefix dst; }; struct gk_log_flow { struct ip_flow flow; uint32_t flow_hash_val; }; struct gk_flush_bpf { uint8_t program_index; rte_atomic32_t *done_counter; }; /* * Structure for each command. * * Notice that, the writers of a GK mailbox: the GK-GT unit and Dynamic config. */ struct gk_cmd_entry { enum gk_cmd_op op; union { /* GGU policy to be added with GK_ADD_POLICY_DECISION op. */ struct gk_add_policy ggu; /* FIB entry to synchronize with GK_SYNCH_WITH_LPM op. */ struct gk_synch_request synch; /* Flow table flush request with GK_FLUSH_FLOW_TABLE op. */ struct gk_flush_request flush; /* Flow state logging request with GK_LOG_FLOW_STATE op. */ struct gk_log_flow log; /* Flow table flush request with GK_FLUSH_BPF op. */ struct gk_flush_bpf flush_bpf; } u; }; struct gk_config *alloc_gk_conf(void); int gk_conf_put(struct gk_config *gk_conf); int run_gk(struct net_config *net_conf, struct gk_config *gk_conf, struct sol_config *sol_conf); struct mailbox *get_responsible_gk_mailbox( uint32_t flow_hash_val, const struct gk_config *gk_conf); int gk_flush_flow_table(const char *src_prefix, const char *dst_prefix, struct gk_config *gk_conf); int gk_log_flow_state(const char *src_addr, const char *dst_addr, struct gk_config *gk_conf); int pkt_copy_cached_eth_header(struct rte_mbuf *pkt, struct ether_cache *eth_cache, size_t l2_len_out); static inline void gk_conf_hold(struct gk_config *gk_conf) { rte_atomic32_inc(&gk_conf->ref_cnt); } int gk_init_bpf_cookie(const struct gk_config *gk_conf, uint8_t program_index, struct gk_bpf_cookie *cookie); static inline struct grantor_entry * choose_grantor_per_flow(struct flow_entry *fe) { return &fe->grantor_fib->u.grantor.set->entries[ fe->flow_hash_val % fe->grantor_fib->u.grantor.set->num_entries ]; } typedef void (*fill_in_gk_cmd_entry_t)(struct gk_cmd_entry *entry, rte_atomic32_t *done_counter, void *arg); void synchronize_gk_instances(struct gk_config *gk_conf, fill_in_gk_cmd_entry_t fill_f, void *arg); #endif /* _GATEKEEPER_GK_H_ */ ```
/content/code_sandbox/include/gatekeeper_gk.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,438
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_HASH_H_ #define _GATEKEEPER_HASH_H_ #include <stdint.h> #include "gatekeeper_qid.h" #define HS_HASH_MISS ((uint32_t)~0) /* * Format of callback function for comparing keys. * * In order to avoid keeping track of keys inside of the hash table * itself, the caller must provide a function that compares two keys. */ typedef int (*hs_hash_key_cmp_t)(const void *key1, const void *key2, size_t key_len, const void *data); /* * Format of callback function for getting key addresses. * * While it is not required, the key should be the first field of the * struct that holds the value of the hash table. The reason for this * recommendation is that the memory prefetch is done on the key, so * if the key is the first field, the bytes after the key that complete * the cache line are also prefetched. * * This helps perform prefetching when doing bulk lookups. */ typedef const void *(*hs_hash_key_addr_t)(uint32_t user_idx, const void *data); /* Type of function that can be used for calculating the hash value. */ typedef uint32_t (*hs_hash_function)(const void *key, uint32_t key_len, uint32_t init_val, const void *data); /* * The struct hs_hash_bucket defined below is so small (i.e., 64 bits) that * increasing the number of buckets for the same number of entries in order * to allocate 100% of the entries is more effective than implementing anything * more sophisticated. For example, the paper *Hopscotch Hashing* by * Maurice Herlihy, Nir Shavit, and Moran Tzafrir ("the Hopscotch paper") * implemented neighborhoods as a linked list instead of as a bitmap (as here) * to reach almost 100% occupancy of the buckets. * * Any other implementation is very likely going to add at least 32 bits to * struct hs_hash_bucket due to memory alignment. Those extra 32 bits mean a * 50% growth in memory consumption for the same number of buckets. This 50% * increase in memory consumption means that our version of Hopscotch can have * 50% more buckets for the same amount of memory. * * By increasing the number of buckets by only 25% relative to the number of * entries, and having an occupancy of only 4/5 = 80% of the buckets, we can * allocate 100% (i.e., 1.25 * 4/5 = 1) of the entries. * * A typical flow table of a GK instance has at least 250M entries. This means * that there are at least ceiling(log_2(250M)) = 28 bits to track * neighborhoods. Using Lemma 6 of the Hopscotch paper, and assuming that all * 28 entries belong to the same neighborhood (the worst case), the expected * occupancy of the buckets is 1 - 1 / (sqrt(2 * 28 - 1) > 86.5% which is * safely greater than 80%. * * An upper bound for the occupancy of the buckets is to be obtained by setting * field @max_probes of struct hs_hash equal to 80 (i.e., 10 cache lines of 64 * bytes) and assuming that all those buckets can be allocated. By Lemma 6 * again, the occupancy of the buckets is 1 - 1 / (sqrt(2 * 80 - 1) > 92%. * * Since hs_hash_create() aligns the number of buckets to the next power of 2, * the number of buckets can be double the number of entries. In this extreme * case -- namely, 50% occupancy of the buckets -- not only is 100% allocation * of the entries virtually guaranteed (i.e., 2 * 0.5 = 1), but each * neighborhood has at most 3 entries; according to Lemma 6 and assuming all * entries belong to the same neighborhood (1 + (1/(1 - 50%))^2)/2 = 2.5. * Therefore, even the smallest hash table with 8 entries (see hs_hash_create()) * has enough bits (i.e., 3 bits) to track all entries in a neighborhood. * Moreover, having at most 3 entries per neighborhood implies that most * neighborhoods are placed in a single cache line; the exception happens when * a neighborhood falls between cache lines. * * As a reference for future changes to this library, it's worth noticing that * one can free the most significant bit of the field @idx by removing the most * significant bit from HS_HASH_MISS and adjusting the code to free the bit. * This is possible because the largest index is (HS_HASH_MAX_NUM_ENTRIES - 1). */ struct hs_hash_bucket { /* High bits of the hash and neighborhood. */ uint32_t hh_nbh; /* The index of this bucket in the entries array. */ uint32_t user_idx; }; struct hs_hash { /* Hopscotch hash buckets. */ struct hs_hash_bucket *buckets; /* Number of buckets in @buckets array. */ uint32_t num_buckets; /* Maximum number of neighborhoods to probe to find an empty bucket. */ uint32_t max_probes; /* * The size of the neighborhood portion of the field * @hh_nbh of struct hs_hash_bucket. */ uint8_t neighborhood_size; /* * The mask for the neighborhood portion of * the field @hh_nbh of struct hs_hash_bucket. */ uint32_t neighborhood_mask; /* * The mask for the high bits of the hash portion of * the field @hh_nbh of struct hs_hash_bucket. */ uint32_t high_hash_mask; /* IDs for the client's array of entries. */ struct qid entry_qid; /* Hash function. */ hs_hash_function hash_func; /* Length of hash key. */ uint32_t key_len; /* Initial value used by @hash_func. */ uint32_t hash_func_init_val; /* User-defined data for @hash_func. */ void *hash_func_data; /* Function used to compare keys. */ hs_hash_key_cmp_t key_cmp_fn; /* Data to be passed to @key_cmp_fn. */ void *key_cmp_fn_data; /* Function used to compare keys. */ hs_hash_key_addr_t key_addr_fn; /* Data to be passed to @key_addr_fn. */ void *key_addr_fn_data; }; struct hs_hash_parameters { /* Name of the hash. */ const char *name; /* Length of client's array of entries. */ uint32_t num_entries; /* Maximum number of probes for an empty bucket. */ uint32_t max_probes; /* Factor by which to scale the number of buckets. */ double scale_num_bucket; /* NUMA socket ID for memory. */ int socket_id; /* Length of hash key. */ uint32_t key_len; /* Hash function. */ hs_hash_function hash_func; /* Initial value used by @hash_func. */ uint32_t hash_func_init_val; /* User-defined data for @hash_func. */ void *hash_func_data; /* Function used to compare keys. */ hs_hash_key_cmp_t key_cmp_fn; /* Data to be passed to @key_cmp_fn. */ void *key_cmp_fn_data; /* Function used to compare keys. */ hs_hash_key_addr_t key_addr_fn; /* Data to be passed to @key_addr_fn. */ void *key_addr_fn_data; }; int hs_hash_create(struct hs_hash *h, const struct hs_hash_parameters *params); void hs_hash_free(struct hs_hash *h); int hs_hash_add_key_with_hash(struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx); int hs_hash_del_key_with_hash(struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx); static inline uint32_t hs_hash_hash(const struct hs_hash *h, const void *key) { return h->hash_func(key, h->key_len, h->hash_func_init_val, h->hash_func_data); } int hs_hash_lookup_with_hash(const struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx); int hs_hash_lookup_with_hash_bulk(const struct hs_hash *h, const void **keys, const uint32_t *hashes, uint32_t n, uint32_t *user_indexes); int hs_hash_iterate(const struct hs_hash *h, uint32_t *next, uint32_t *p_user_idx); void hs_hash_prefetch_bucket_non_temporal(const struct hs_hash *h, uint32_t hash); #endif /* _GATEKEEPER_HASH_H_ */ ```
/content/code_sandbox/include/gatekeeper_hash.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,027
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LLS_ARP_H_ #define _GATEKEEPER_LLS_ARP_H_ #include "gatekeeper_lls.h" #include "gatekeeper_net.h" /* Whether ARP is enabled on this interface. */ int iface_arp_enabled(struct net_config *net, struct gatekeeper_if *iface); /* Return whether @addr is in the same subnet as @iface's IPv4 address. */ int ipv4_in_subnet(struct gatekeeper_if *iface, const struct ipaddr *addr); /* Transmit an ARP request packet. */ void xmit_arp_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct rte_ether_addr *ha, uint16_t tx_queue); /* * Process an ARP packet that arrived on @iface. * * Returns 0 if the packet was transmitted (and already freed), * -1 if it does not need to be transmitted (and needs to be freed). */ int process_arp(struct lls_config *lls_conf, struct gatekeeper_if *iface, uint16_t tx_queue, struct rte_mbuf *buf, struct rte_ether_hdr *eth_hdr, struct rte_arp_hdr *arp_hdr); #endif /* _GATEKEEPER_LLS_ARP_H_ */ ```
/content/code_sandbox/lls/arp.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
356
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdbool.h> #include <arpa/inet.h> #include <rte_hash.h> #include <rte_icmp.h> #include "gatekeeper_cps.h" #include "gatekeeper_l2.h" #include "gatekeeper_lls.h" #include "gatekeeper_varip.h" #include "cache.h" #include "arp.h" #include "nd.h" static void lls_send_request(struct lls_config *lls_conf, struct lls_cache *cache, const struct ipaddr *addr, const struct rte_ether_addr *ha) { struct gatekeeper_if *front = &lls_conf->net->front; struct gatekeeper_if *back = &lls_conf->net->back; if (cache->iface_enabled(lls_conf->net, front) && cache->ip_in_subnet(front, addr)) cache->xmit_req(&lls_conf->net->front, addr, ha, lls_conf->tx_queue_front); if (cache->iface_enabled(lls_conf->net, back) && cache->ip_in_subnet(back, addr)) cache->xmit_req(&lls_conf->net->back, addr, ha, lls_conf->tx_queue_back); } static void lls_cache_dump(struct lls_cache *cache) { uint32_t iter = 0; int32_t index; const void *key; void *data; G_LOG(DEBUG, "LLS cache (%s)\n=====================\n", cache->name); index = rte_hash_iterate(cache->hash, &key, &data, &iter); while (index >= 0) { struct lls_record *record = &cache->records[index]; struct lls_map *map = &record->map; char ip_str[MAX_INET_ADDRSTRLEN]; int ret = convert_ip_to_str(&map->addr, ip_str, sizeof(ip_str)); if (unlikely(ret < 0)) { G_LOG(DEBUG, "Couldn't convert cache record's IP address to string\n"); goto next; } if (map->stale) { G_LOG(DEBUG, "%s: unresolved (%u holds)\n", ip_str, record->num_holds); } else { G_LOG(DEBUG, "%s: %02"PRIx8":%02"PRIx8":%02"PRIx8":%02"PRIx8":%02"PRIx8":%02"PRIx8" (port %hhu) (%u holds)\n", ip_str, map->ha.addr_bytes[0], map->ha.addr_bytes[1], map->ha.addr_bytes[2], map->ha.addr_bytes[3], map->ha.addr_bytes[4], map->ha.addr_bytes[5], map->port_id, record->num_holds); } next: index = rte_hash_iterate(cache->hash, &key, &data, &iter); } } static void lls_update_subscribers(struct lls_record *record) { unsigned int i; for (i = 0; i < record->num_holds; i++) { int call_again = false; record->holds[i].cb(&record->map, record->holds[i].arg, LLS_REPLY_RESOLUTION, &call_again); if (call_again) continue; /* Delete hold; keep all holds in beginning of array. */ record->num_holds--; if (i < record->num_holds) { rte_memcpy(&record->holds[i], &record->holds[record->num_holds], sizeof(record->holds[i])); /* * This cancels out the update of the for loop so we * can redo update of hold at position @i, if needed. */ i--; } } } static int lls_add_record(struct lls_cache *cache, const struct ipaddr *addr) { int ret = rte_hash_add_key(cache->hash, &addr->ip); if (unlikely(ret == -EINVAL || ret == -ENOSPC)) { char ip_str[MAX_INET_ADDRSTRLEN]; int ret2 = convert_ip_to_str(addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "%s, could not add record for %s\n", ret == -EINVAL ? "Invalid params" : "No space", ret2 < 0 ? cache->name : ip_str); } else RTE_VERIFY(ret >= 0); return ret; } static void lls_del_record(struct lls_cache *cache, const struct ipaddr *addr) { int32_t ret = rte_hash_del_key(cache->hash, &addr->ip); if (unlikely(ret == -ENOENT || ret == -EINVAL)) { char ip_str[MAX_INET_ADDRSTRLEN]; int ret2 = convert_ip_to_str(addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "%s, record for %s not deleted\n", ret == -ENOENT ? "No map found" : "Invalid params", ret2 < 0 ? cache->name : ip_str); } } static void lls_process_hold(struct lls_config *lls_conf, struct lls_hold_req *hold_req) { struct lls_cache *cache = hold_req->cache; struct lls_record *record; int ret = rte_hash_lookup(cache->hash, &hold_req->addr.ip); if (ret == -ENOENT) { ret = lls_add_record(cache, &hold_req->addr); if (ret < 0) return; record = &cache->records[ret]; record->map.stale = true; record->map.addr = hold_req->addr; record->ts = time(NULL); RTE_VERIFY(record->ts >= 0); record->holds[0] = hold_req->hold; record->num_holds = 1; /* Try to resolve record using broadcast. */ lls_send_request(lls_conf, cache, &hold_req->addr, NULL); if (lls_conf->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); return; } else if (unlikely(ret == -EINVAL)) { char ip_str[MAX_INET_ADDRSTRLEN]; ret = convert_ip_to_str(&hold_req->addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "Invalid params, could not get %s map; hold failed\n", ret < 0 ? cache->name : ip_str); return; } RTE_VERIFY(ret >= 0); record = &cache->records[ret]; if (!record->map.stale) { int call_again = false; /* Alert requester this map is ready. */ hold_req->hold.cb(&record->map, hold_req->hold.arg, LLS_REPLY_RESOLUTION, &call_again); if (!call_again) return; } record->holds[record->num_holds++] = hold_req->hold; if (lls_conf->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); } static void lls_process_put(struct lls_config *lls_conf, struct lls_put_req *put_req) { struct lls_cache *cache = put_req->cache; struct lls_record *record; unsigned int i; int ret = rte_hash_lookup(cache->hash, &put_req->addr.ip); if (ret == -ENOENT) { /* * Not necessarily an error: the block may have indicated * it did not want its callback to be called again, and * all holds have been released on that entry. */ return; } else if (unlikely(ret == -EINVAL)) { char ip_str[MAX_INET_ADDRSTRLEN]; ret = convert_ip_to_str(&put_req->addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "Invalid params, could not get %s map; put failed\n", ret < 0 ? cache->name : ip_str); return; } RTE_VERIFY(ret >= 0); record = &cache->records[ret]; for (i = 0; i < record->num_holds; i++) { if (put_req->lcore_id == record->holds[i].lcore_id) break; } /* Requesting lcore not found in holds. */ if (i == record->num_holds) return; /* * Alert the requester that its hold will be removed, so it * may free any state that is keeping track of that hold. * * Technically the hold will be removed in the step * below, but alerting the requester first removes the need * to copy the hold into a temporary variable, remove * the hold from record->holds, and then alert the * requester using the temporary variable. This is OK since * there's only one writer. */ record->holds[i].cb(&record->map, record->holds[i].arg, LLS_REPLY_FREE, NULL); /* Keep all holds in beginning of array. */ record->num_holds--; if (i < record->num_holds) rte_memcpy(&record->holds[i], &record->holds[record->num_holds], sizeof(record->holds[i])); if (lls_conf->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); } void lls_process_mod(struct lls_config *lls_conf, struct lls_mod_req *mod_req) { struct lls_cache *cache = mod_req->cache; struct lls_record *record; int changed_ha = false; int changed_port = false; int changed_stale = false; int ret = rte_hash_lookup(cache->hash, &mod_req->addr.ip); if (ret == -ENOENT) { ret = lls_add_record(cache, &mod_req->addr); if (ret < 0) return; /* Fill-in new record. */ record = &cache->records[ret]; rte_ether_addr_copy(&mod_req->ha, &record->map.ha); record->map.port_id = mod_req->port_id; record->map.stale = false; record->map.addr = mod_req->addr; record->ts = mod_req->ts; record->num_holds = 0; if (lls_conf->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); return; } else if (unlikely(ret == -EINVAL)) { char ip_str[MAX_INET_ADDRSTRLEN]; ret = convert_ip_to_str(&mod_req->addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "Invalid params, could not get %s map; mod failed\n", ret < 0 ? cache->name : ip_str); return; } RTE_VERIFY(ret >= 0); record = &cache->records[ret]; if (!rte_is_same_ether_addr(&mod_req->ha, &record->map.ha)) { rte_ether_addr_copy(&mod_req->ha, &record->map.ha); changed_ha = true; } if (record->map.port_id != mod_req->port_id) { record->map.port_id = mod_req->port_id; changed_port = true; } if (record->map.stale) { record->map.stale = false; changed_stale = true; } record->ts = mod_req->ts; if (changed_ha || changed_port || changed_stale) { lls_update_subscribers(record); if (lls_conf->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); } } /* * According to RFC 792, the ICMP checksum is computed * over all of the words in the ICMP "header." This is ambiguous * because ICMP messages can have a "header" portion and * a "data" portion. More accurately, the checksum is computed * over the header *and* data portion, as described by * the formats of the individual ICMP message types in RFC 792. * * @buf is a pointer to the start of the ICMP header. * @size is the length of the ICMP message, including * both the header and data portion. */ unsigned short icmp_cksum(void *buf, unsigned int size) { unsigned short *buffer = buf; unsigned long cksum = 0; while (size > 1) { cksum += *buffer++; size -= sizeof(*buffer); } if (size) cksum += *(unsigned char *)buffer; cksum = (cksum >> 16) + (cksum & 0xffff); cksum += (cksum >> 16); return (unsigned short)(~cksum); } static void submit_icmp_packets(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface, struct lls_config *lls_conf) { struct token_bucket_ratelimit_state *rs = iface == &lls_conf->net->front ? &lls_conf->front_icmp_rs : &lls_conf->back_icmp_rs; unsigned int num_granted_pkts = tb_ratelimit_allow_n(num_pkts, rs); cps_submit_direct(pkts, num_granted_pkts, iface); rte_pktmbuf_free_bulk(&pkts[num_granted_pkts], num_pkts - num_granted_pkts); } static void process_icmp_pkts(struct lls_config *lls_conf, struct lls_icmp_req *icmp) { struct rte_mbuf *kni_pkts[icmp->num_pkts]; unsigned int num_kni_pkts = 0; int i; for (i = 0; i < icmp->num_pkts; i++) { struct rte_mbuf *pkt = icmp->pkts[i]; struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv4_hdr *ip4hdr; struct rte_icmp_hdr *icmphdr; size_t l2_len = pkt_in_l2_hdr_len(pkt); pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip4hdr); if (unlikely(pkt->data_len < (ICMP_PKT_MIN_LEN(l2_len) + ipv4_hdr_len(ip4hdr) - sizeof(*ip4hdr)))) { rte_pktmbuf_free(pkt); continue; } /* * We must check whether the packet is fragmented here because * although match_icmp() checks for it, the ACL rule does not. */ if (unlikely(rte_ipv4_frag_pkt_is_fragmented(ip4hdr))) { kni_pkts[num_kni_pkts++] = pkt; continue; } /* * We don't need to make sure the next header is ICMP * because both match_icmp() and the ACL rule already check. */ icmphdr = (struct rte_icmp_hdr *)ipv4_skip_exthdr(ip4hdr); if (icmphdr->icmp_type == ICMP_DEST_UNREACHABLE_TYPE && icmphdr->icmp_code == ICMP_FRAG_REQ_DF_CODE) { char src_ip_buf[INET_ADDRSTRLEN]; const char *src_ip_or_err = inet_ntop(AF_INET, &ip4hdr->src_addr, src_ip_buf, sizeof(src_ip_buf)); if (unlikely(!src_ip_or_err)) src_ip_or_err = "(could not convert IP to string)"; G_LOG(ERR, "Received \"Fragmentation required, and DF flag set\" ICMP packet on the %s interface from source IP %s; check MTU along path\n", icmp->iface->name, src_ip_or_err); } kni_pkts[num_kni_pkts++] = pkt; } if (num_kni_pkts > 0) { submit_icmp_packets(kni_pkts, num_kni_pkts, icmp->iface, lls_conf); } } static void process_icmp6_pkts(struct lls_config *lls_conf, struct lls_icmp6_req *icmp6) { struct rte_mbuf *kni_pkts[icmp6->num_pkts]; unsigned int num_kni_pkts = 0; int i; for (i = 0; i < icmp6->num_pkts; i++) { struct rte_mbuf *pkt = icmp6->pkts[i]; /* * The ICMPv6 header offset in terms of the * beginning of the IPv6 header. */ int icmpv6_offset; uint8_t nexthdr; struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv6_hdr *ip6hdr; struct icmpv6_hdr *icmp6_hdr; size_t l2_len = pkt_in_l2_hdr_len(pkt); pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip6hdr); /* * We must check whether the packet is fragmented here because * although match_icmp6() checks for it, the ACL rule does not. */ if (unlikely(rte_ipv6_frag_get_ipv6_fragment_header(ip6hdr) != NULL)) { kni_pkts[num_kni_pkts++] = pkt; continue; } /* * We don't need to make sure the next header is ICMPv6 * because both match_icmp6() and the ACL rule already check. * We also don't need to verify that the header extensions * were not malformed, since if there were extension headers * then match_icmp6() would have already verified them. But * we can at least add an assertion to catch bugs. */ icmpv6_offset = ipv6_skip_exthdr(ip6hdr, pkt->data_len - l2_len, &nexthdr); RTE_VERIFY(icmpv6_offset >= 0); icmp6_hdr = (struct icmpv6_hdr *)((uint8_t *)ip6hdr + icmpv6_offset); if (pkt_is_nd_neighbor(icmp6_hdr->type, icmp6_hdr->code)) { if (process_nd(lls_conf, icmp6->iface, pkt) == -1) rte_pktmbuf_free(pkt); continue; } if (icmp6_hdr->type == ICMPV6_PACKET_TOO_BIG_TYPE && icmp6_hdr->code == ICMPV6_PACKET_TOO_BIG_CODE) { char src_ip_buf[INET6_ADDRSTRLEN]; const char *src_ip_or_err = inet_ntop(AF_INET6, &ip6hdr->src_addr, src_ip_buf, sizeof(src_ip_buf)); if (unlikely(!src_ip_or_err)) src_ip_or_err = "(could not convert IP to string)"; G_LOG(ERR, "Received \"Packet Too Big\" ICMPv6 packet on %s interface from source IP %s; check MTU along path\n", icmp6->iface->name, src_ip_or_err); } kni_pkts[num_kni_pkts++] = pkt; } if (num_kni_pkts > 0) { submit_icmp_packets(kni_pkts, num_kni_pkts, icmp6->iface, lls_conf); } } unsigned int lls_process_reqs(struct lls_config *lls_conf) { unsigned int mailbox_burst_size = lls_conf->mailbox_burst_size; struct lls_request *reqs[mailbox_burst_size]; unsigned int count = mb_dequeue_burst(&lls_conf->requests, (void **)reqs, mailbox_burst_size); unsigned int i; for (i = 0; i < count; i++) { switch (reqs[i]->ty) { case LLS_REQ_HOLD: lls_process_hold(lls_conf, &reqs[i]->u.hold); break; case LLS_REQ_PUT: lls_process_put(lls_conf, &reqs[i]->u.put); break; case LLS_REQ_ARP: { struct lls_arp_req *arp = &reqs[i]->u.arp; uint16_t tx_queue = (arp->iface == &lls_conf->net->front) ? lls_conf->tx_queue_front : lls_conf->tx_queue_back; int i; for (i = 0; i < arp->num_pkts; i++) { struct rte_mbuf *pkt = arp->pkts[i]; struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_arp_hdr *arp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_arp_hdr *, pkt_in_l2_hdr_len(pkt)); if (process_arp(lls_conf, arp->iface, tx_queue, pkt, eth_hdr, arp_hdr) == -1) rte_pktmbuf_free(pkt); } break; } case LLS_REQ_ICMP: process_icmp_pkts(lls_conf, &reqs[i]->u.icmp); break; case LLS_REQ_ICMP6: process_icmp6_pkts(lls_conf, &reqs[i]->u.icmp6); break; default: G_LOG(ERR, "Unrecognized request type (%d)\n", reqs[i]->ty); break; } mb_free_entry(&lls_conf->requests, reqs[i]); } return count; } struct lls_map * lls_cache_get(struct lls_cache *cache, const struct ipaddr *addr) { int ret = rte_hash_lookup(cache->hash, &addr->ip); if (ret < 0) return NULL; return &cache->records[ret].map; } void lls_cache_scan(struct lls_config *lls_conf, struct lls_cache *cache) { uint32_t iter = 0; int32_t index; const void *key; void *data; struct gatekeeper_if *front = &lls_conf->net->front; struct gatekeeper_if *back = &lls_conf->net->back; time_t now = time(NULL); RTE_VERIFY(now >= 0); index = rte_hash_iterate(cache->hash, (void *)&key, &data, &iter); while (index >= 0) { struct lls_record *record = &cache->records[index]; struct ipaddr *addr = &record->map.addr; uint32_t timeout; /* * If a map is already stale, continue to * try to resolve it while there's interest. */ if (record->map.stale) { if (record->num_holds > 0) lls_send_request(lls_conf, cache, addr, NULL); else lls_del_record(cache, addr); goto next; } if (record->map.port_id == front->id) timeout = cache->front_timeout_sec; else if (lls_conf->net->back_iface_enabled && record->map.port_id == back->id) timeout = cache->back_timeout_sec; else { char ip_str[MAX_INET_ADDRSTRLEN]; int ret = convert_ip_to_str(addr, ip_str, sizeof(ip_str)); G_LOG(ERR, "Map for %s has an invalid port %hhu\n", ret < 0 ? cache->name : ip_str, record->map.port_id); lls_del_record(cache, addr); goto next; } if (now - record->ts >= timeout) { record->map.stale = true; lls_update_subscribers(record); if (record->num_holds > 0) lls_send_request(lls_conf, cache, addr, &record->map.ha); } else if (timeout > lls_conf->cache_scan_interval_sec && (now - record->ts >= timeout - lls_conf-> cache_scan_interval_sec)) { /* * If the record is close to being stale, * preemptively send a unicast probe. */ if (record->num_holds > 0) lls_send_request(lls_conf, cache, addr, &record->map.ha); } next: index = rte_hash_iterate(cache->hash, (void *)&key, &data, &iter); } if (get_lls_conf()->log_level == RTE_LOG_DEBUG) lls_cache_dump(cache); } void lls_cache_destroy(struct lls_cache *cache) { rte_hash_free(cache->hash); cache->hash = NULL; rte_free(cache->records); cache->records = NULL; } int lls_cache_init(struct lls_config *lls_conf, struct lls_cache *cache, uint32_t key_len) { unsigned int socket_id = rte_lcore_to_socket_id(lls_conf->lcore_id); struct rte_hash_parameters lls_cache_params = { .name = cache->name, .entries = lls_conf->max_num_cache_records < HASH_TBL_MIN_SIZE ? HASH_TBL_MIN_SIZE : lls_conf->max_num_cache_records, .reserved = 0, .key_len = key_len, .hash_func = DEFAULT_HASH_FUNC, .hash_func_init_val = 0, .socket_id = socket_id, /* * Enable concurrency control for race conditions * between writers (LLS) and readers (Dynamic Config). */ .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, }; cache->records = rte_calloc_socket("lls_records", lls_conf->max_num_cache_records, sizeof(*cache->records), 0, socket_id); if (cache->records == NULL) { G_LOG(ERR, "Could not allocate %s cache records\n", cache->name); return -1; } cache->hash = rte_hash_create(&lls_cache_params); if (cache->hash == NULL) { G_LOG(ERR, "Could not create %s cache hash\n", cache->name); goto records; } return 0; records: lls_cache_destroy(cache); return -1; } ```
/content/code_sandbox/lls/cache.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
5,836
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LLS_ND_H_ #define _GATEKEEPER_LLS_ND_H_ #include "gatekeeper_lls.h" #include "gatekeeper_net.h" /* Whether ND is enabled on this interface. */ int iface_nd_enabled(struct net_config *net, struct gatekeeper_if *iface); /* Return whether @addr is in the same subnet as @iface's IPv6 address. */ int ipv6_in_subnet(struct gatekeeper_if *iface, const struct ipaddr *addr); /* Transmit an ND request packet. */ void xmit_nd_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct rte_ether_addr *ha, uint16_t tx_queue); /* * Process an ND neighbor packet that arrived on @iface. * * Returns 0 if the packet was transmitted (and already freed), * -1 if it does not need to be transmitted (and needs to be freed). */ int process_nd(struct lls_config *lls_conf, struct gatekeeper_if *iface, struct rte_mbuf *buf); #endif /* _GATEKEEPER_LLS_ND_H_ */ ```
/content/code_sandbox/lls/nd.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
330
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <arpa/inet.h> #include <rte_arp.h> #include "gatekeeper_l2.h" #include "arp.h" #include "cache.h" int iface_arp_enabled(struct net_config *net, struct gatekeeper_if *iface) { /* When @iface is the back, need to make sure it's enabled. */ if (iface == &net->back) return net->back_iface_enabled && ipv4_if_configured(iface); /* @iface is the front interface. */ return ipv4_if_configured(iface); } int ipv4_in_subnet(struct gatekeeper_if *iface, const struct ipaddr *addr) { return ip4_same_subnet(iface->ip4_addr.s_addr, addr->ip.v4.s_addr, iface->ip4_mask.s_addr); } void xmit_arp_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct rte_ether_addr *ha, uint16_t tx_queue) { struct rte_mbuf *created_pkt; struct rte_ether_hdr *eth_hdr; struct rte_arp_hdr *arp_hdr; size_t pkt_size; struct lls_config *lls_conf = get_lls_conf(); int ret; created_pkt = rte_pktmbuf_alloc(lls_conf->mp); if (created_pkt == NULL) { G_LOG(ERR, "Could not alloc a packet for an ARP request\n"); return; } pkt_size = iface->l2_len_out + sizeof(struct rte_arp_hdr); created_pkt->data_len = pkt_size; created_pkt->pkt_len = pkt_size; /* Set-up Ethernet header. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *); rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->src_addr); if (ha == NULL) memset(&eth_hdr->dst_addr, 0xFF, RTE_ETHER_ADDR_LEN); else rte_ether_addr_copy(ha, &eth_hdr->dst_addr); /* Set-up VLAN header. */ if (iface->vlan_insert) fill_vlan_hdr(eth_hdr, iface->ipv4_vlan_tag_be, RTE_ETHER_TYPE_ARP); else eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP); /* Set-up ARP header. */ arp_hdr = pkt_out_skip_l2(iface, eth_hdr); arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER); arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN; arp_hdr->arp_plen = sizeof(struct in_addr); arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST); rte_ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr; memset(&arp_hdr->arp_data.arp_tha, 0, RTE_ETHER_ADDR_LEN); arp_hdr->arp_data.arp_tip = addr->ip.v4.s_addr; ret = rte_eth_tx_burst(iface->id, tx_queue, &created_pkt, 1); if (ret <= 0) { rte_pktmbuf_free(created_pkt); G_LOG(ERR, "Could not transmit an ARP request\n"); } } int process_arp(struct lls_config *lls_conf, struct gatekeeper_if *iface, uint16_t tx_queue, struct rte_mbuf *buf, struct rte_ether_hdr *eth_hdr, struct rte_arp_hdr *arp_hdr) { struct ipaddr addr = { .proto = RTE_ETHER_TYPE_IPV4, .ip.v4.s_addr = arp_hdr->arp_data.arp_sip, }; struct lls_mod_req mod_req; uint16_t pkt_len; size_t l2_len; int ret; if (unlikely(!ipv4_if_configured(iface))) return -1; /* pkt_in_skip_l2() already called by LLS. */ l2_len = pkt_in_l2_hdr_len(buf); pkt_len = rte_pktmbuf_data_len(buf); if (pkt_len < l2_len + sizeof(*arp_hdr)) { G_LOG(ERR, "%s interface received ARP packet of size %hu bytes, but it should be at least %zu bytes\n", iface->name, pkt_len, l2_len + sizeof(*arp_hdr)); return -1; } ret = verify_l2_hdr(iface, eth_hdr, buf->l2_type, "ARP", iface->ipv4_vlan_tag_be); if (ret < 0) return ret; if (unlikely(arp_hdr->arp_hardware != rte_cpu_to_be_16( RTE_ARP_HRD_ETHER) || arp_hdr->arp_protocol != rte_cpu_to_be_16( RTE_ETHER_TYPE_IPV4) || arp_hdr->arp_hlen != RTE_ETHER_ADDR_LEN || arp_hdr->arp_plen != sizeof(struct in_addr))) return -1; /* If sip is not in the same subnet as our IP address, drop. */ if (!ipv4_in_subnet(iface, &addr)) return -1; /* Update cache with source resolution, regardless of operation. */ mod_req.cache = &lls_conf->arp_cache; mod_req.addr = addr; rte_ether_addr_copy(&arp_hdr->arp_data.arp_sha, &mod_req.ha); mod_req.port_id = iface->id; mod_req.ts = time(NULL); RTE_VERIFY(mod_req.ts >= 0); lls_process_mod(lls_conf, &mod_req); /* * If it's a Gratuitous ARP or if the target address * is not us, then no response is needed. */ if (is_garp_pkt(arp_hdr) || (iface->ip4_addr.s_addr != arp_hdr->arp_data.arp_tip)) return -1; switch (rte_be_to_cpu_16(arp_hdr->arp_opcode)) { case RTE_ARP_OP_REQUEST: { uint16_t num_tx; /* * We are reusing the frame, but an ARP reply always goes out * the same interface that received it. Therefore, the L2 * space of the frame is the same. If needed, the correct * VLAN tag was set in verify_l2_hdr(). */ /* Set-up Ethernet header. */ rte_ether_addr_copy(&eth_hdr->src_addr, &eth_hdr->dst_addr); rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->src_addr); /* Set-up ARP header. */ arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); rte_ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha); arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip; rte_ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr; /* Need to transmit reply. */ num_tx = rte_eth_tx_burst(iface->id, tx_queue, &buf, 1); if (unlikely(num_tx != 1)) { G_LOG(NOTICE, "ARP reply failed\n"); return -1; } return 0; } case RTE_ARP_OP_REPLY: /* * No further action required. Could check to make sure * arp_hdr->arp_data.arp_tha is equal to arp->ether_addr, * but there's nothing that can be done if it's wrong anyway. */ return -1; default: G_LOG(NOTICE, "%s received an ARP packet with an unknown operation (%hu)\n", __func__, rte_be_to_cpu_16(arp_hdr->arp_opcode)); return -1; } } ```
/content/code_sandbox/lls/arp.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,852
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <stdbool.h> #include <unistd.h> #include <rte_cycles.h> #include <rte_ethdev.h> #include <rte_icmp.h> #include "gatekeeper_config.h" #include "gatekeeper_l2.h" #include "gatekeeper_launch.h" #include "gatekeeper_lls.h" #include "gatekeeper_varip.h" #include "arp.h" #include "cache.h" #include "nd.h" /* * When using LACP, the requirement must be met: * * - RX/TX burst functions must be invoked at least once every 100ms. * To do so, the RX burst function is called with every iteration * of the loop in lls_proc(), and lls_lacp_announce() fulfills the * TX burst requirement on a timer that runs slightly more frequently * than every 100ms, defined below. */ #define LLS_LACP_ANNOUNCE_INTERVAL_MS 99 /* * XXX #64 Don't alert user of LLS transmission failures while LACP * is still configuring, and warn the user if LACP is taking an * unusually long time to configure (since this could mean the * link partner does not have LACP configured). */ static struct lls_config lls_conf = { .arp_cache = { .name = "arp", .iface_enabled = iface_arp_enabled, .ip_in_subnet = ipv4_in_subnet, .xmit_req = xmit_arp_req, }, .nd_cache = { .name = "nd", .iface_enabled = iface_nd_enabled, .ip_in_subnet = ipv6_in_subnet, .xmit_req = xmit_nd_req, }, }; struct lls_config * get_lls_conf(void) { return &lls_conf; } static int cleanup_lls(void) { struct net_config *net_conf = lls_conf.net; if (lacp_enabled(net_conf, &net_conf->back)) rte_timer_stop(&net_conf->back.lacp_timer); if (lacp_enabled(net_conf, &net_conf->front)) rte_timer_stop(&net_conf->front.lacp_timer); if (nd_enabled(&lls_conf)) lls_cache_destroy(&lls_conf.nd_cache); if (arp_enabled(&lls_conf)) lls_cache_destroy(&lls_conf.arp_cache); rte_timer_stop(&lls_conf.log_timer); rte_timer_stop(&lls_conf.scan_timer); destroy_mailbox(&lls_conf.requests); destroy_mempool(lls_conf.mp); return 0; } int hold_arp(lls_req_cb cb, void *arg, struct in_addr *ipv4, unsigned int lcore_id) { struct lls_request *req; if (unlikely(!arp_enabled(&lls_conf))) { G_LOG(ERR, "%s(lcore=%u): ARP service is not enabled\n", __func__, lcore_id); return -ENOTSUP; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) return -ENOENT; *req = (typeof(*req)) { .ty = LLS_REQ_HOLD, .u.hold = { .cache = &lls_conf.arp_cache, .addr = { .proto = RTE_ETHER_TYPE_IPV4, .ip.v4 = *ipv4, }, .hold = { .cb = cb, .arg = arg, .lcore_id = lcore_id, }, }, }; return mb_send_entry(&lls_conf.requests, req); } int put_arp(struct in_addr *ipv4, unsigned int lcore_id) { struct lls_request *req; if (unlikely(!arp_enabled(&lls_conf))) { G_LOG(ERR, "%s(lcore=%u): ARP service is not enabled\n", __func__, lcore_id); return -ENOTSUP; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) return -ENOENT; *req = (typeof(*req)) { .ty = LLS_REQ_PUT, .u.put = { .cache = &lls_conf.arp_cache, .addr = { .proto = RTE_ETHER_TYPE_IPV4, .ip.v4 = *ipv4, }, .lcore_id = lcore_id, }, }; return mb_send_entry(&lls_conf.requests, req); } int hold_nd(lls_req_cb cb, void *arg, struct in6_addr *ipv6, unsigned int lcore_id) { struct lls_request *req; if (unlikely(!nd_enabled(&lls_conf))) { G_LOG(ERR, "%s(lcore=%u): ND service is not enabled\n", __func__, lcore_id); return -ENOTSUP; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) return -ENOENT; *req = (typeof(*req)) { .ty = LLS_REQ_HOLD, .u.hold = { .cache = &lls_conf.nd_cache, .addr = { .proto = RTE_ETHER_TYPE_IPV6, .ip.v6 = *ipv6, }, .hold = { .cb = cb, .arg = arg, .lcore_id = lcore_id, }, }, }; return mb_send_entry(&lls_conf.requests, req); } int put_nd(struct in6_addr *ipv6, unsigned int lcore_id) { struct lls_request *req; if (unlikely(!nd_enabled(&lls_conf))) { G_LOG(ERR, "%s(lcore=%u): ND service is not enabled\n", __func__, lcore_id); return -ENOTSUP; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) return -ENOENT; *req = (typeof(*req)) { .ty = LLS_REQ_PUT, .u.put = { .cache = &lls_conf.nd_cache, .addr = { .proto = RTE_ETHER_TYPE_IPV6, .ip.v6 = *ipv6, }, .lcore_id = lcore_id, }, }; return mb_send_entry(&lls_conf.requests, req); } void submit_arp(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface) { struct lls_request *req; int ret; if (unlikely(num_pkts > lls_conf.mailbox_max_pkt_sub)) { G_LOG(ERR, "%s(): too many packets: num_pkts=%u > lls_conf.mailbox_max_pkt_sub=%u\n", __func__, num_pkts, lls_conf.mailbox_max_pkt_sub); goto free_pkts; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) goto free_pkts; *req = (typeof(*req)) { .ty = LLS_REQ_ARP, .u.arp = { .num_pkts = num_pkts, .iface = iface, }, }; rte_memcpy(req->u.arp.pkts, pkts, sizeof(req->u.arp.pkts[0]) * num_pkts); ret = mb_send_entry(&lls_conf.requests, req); if (unlikely(ret < 0)) goto free_pkts; return; free_pkts: rte_pktmbuf_free_bulk(pkts, num_pkts); } static int submit_icmp(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface) { struct lls_request *req; int ret; if (unlikely(num_pkts > lls_conf.mailbox_max_pkt_sub)) { G_LOG(ERR, "%s(): too many packets: num_pkts=%u > lls_conf.mailbox_max_pkt_sub=%u\n", __func__, num_pkts, lls_conf.mailbox_max_pkt_sub); ret = -EINVAL; goto free_pkts; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) { ret = -ENOENT; goto free_pkts; } *req = (typeof(*req)) { .ty = LLS_REQ_ICMP, .u.icmp = { .num_pkts = num_pkts, .iface = iface, }, }; rte_memcpy(req->u.icmp.pkts, pkts, sizeof(req->u.icmp.pkts[0]) * num_pkts); ret = mb_send_entry(&lls_conf.requests, req); if (unlikely(ret < 0)) goto free_pkts; return 0; free_pkts: rte_pktmbuf_free_bulk(pkts, num_pkts); return ret; } /* * Match the packet if it fails to be classifed by ACL rules. * * Return values: 0 for successful match, and -ENOENT for no matching. */ static int match_icmp(struct rte_mbuf *pkt, struct gatekeeper_if *iface) { const uint16_t BE_ETHER_TYPE_IPv4 = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv4_hdr *ip4hdr; uint16_t ether_type_be = pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip4hdr); size_t l2_len = pkt_in_l2_hdr_len(pkt); if (unlikely(ether_type_be != BE_ETHER_TYPE_IPv4)) return -ENOENT; if (pkt->data_len < ICMP_PKT_MIN_LEN(l2_len)) return -ENOENT; if (ip4hdr->dst_addr != iface->ip4_addr.s_addr) return -ENOENT; if (ip4hdr->next_proto_id != IPPROTO_ICMP) return -ENOENT; if (pkt->data_len < (ICMP_PKT_MIN_LEN(l2_len) + ipv4_hdr_len(ip4hdr) - sizeof(*ip4hdr))) return -ENOENT; if (rte_ipv4_frag_pkt_is_fragmented(ip4hdr)) { G_LOG(WARNING, "Received fragmented ICMP packets destined to this server at %s\n", __func__); return -ENOENT; } return 0; } static int submit_icmp6(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface) { struct lls_request *req; int ret; if (unlikely(num_pkts > lls_conf.mailbox_max_pkt_sub)) { G_LOG(ERR, "%s(): too many packets: num_pkts=%u > lls_conf.mailbox_max_pkt_sub=%u\n", __func__, num_pkts, lls_conf.mailbox_max_pkt_sub); ret = -EINVAL; goto free_pkts; } req = mb_alloc_entry(&lls_conf.requests); if (unlikely(req == NULL)) { ret = -ENOENT; goto free_pkts; } *req = (typeof(*req)) { .ty = LLS_REQ_ICMP6, .u.icmp6 = { .num_pkts = num_pkts, .iface = iface, }, }; rte_memcpy(req->u.icmp6.pkts, pkts, sizeof(req->u.icmp6.pkts[0]) * num_pkts); ret = mb_send_entry(&lls_conf.requests, req); if (unlikely(ret < 0)) goto free_pkts; return 0; free_pkts: rte_pktmbuf_free_bulk(pkts, num_pkts); return ret; } /* * Match the packet if it fails to be classifed by ACL rules. * * Return values: 0 for successful match, and -ENOENT for no matching. */ static int match_icmp6(struct rte_mbuf *pkt, struct gatekeeper_if *iface) { /* * The ICMPv6 header offset in terms of the * beginning of the IPv6 header. */ int icmpv6_offset; uint8_t nexthdr; const uint16_t BE_ETHER_TYPE_IPv6 = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv6_hdr *ip6hdr; uint16_t ether_type_be = pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip6hdr); size_t l2_len = pkt_in_l2_hdr_len(pkt); if (unlikely(ether_type_be != BE_ETHER_TYPE_IPv6)) return -ENOENT; if (pkt->data_len < ICMPV6_PKT_MIN_LEN(l2_len)) return -ENOENT; if ((memcmp(ip6hdr->dst_addr, &iface->ip6_addr, sizeof(iface->ip6_addr)) != 0) && (memcmp(ip6hdr->dst_addr, &iface->ll_ip6_addr, sizeof(iface->ll_ip6_addr)) != 0) && (memcmp(ip6hdr->dst_addr, &iface->ip6_mc_addr, sizeof(iface->ip6_mc_addr)) != 0) && (memcmp(ip6hdr->dst_addr, &iface->ll_ip6_mc_addr, sizeof(iface->ll_ip6_mc_addr)) != 0) && (memcmp(ip6hdr->dst_addr, &ip6_allnodes_mc_addr, sizeof(ip6_allnodes_mc_addr)) != 0)) return -ENOENT; if (rte_ipv6_frag_get_ipv6_fragment_header(ip6hdr) != NULL) { G_LOG(WARNING, "Received fragmented ICMPv6 packets destined to this server at %s\n", __func__); return -ENOENT; } icmpv6_offset = ipv6_skip_exthdr(ip6hdr, pkt->data_len - l2_len, &nexthdr); if (icmpv6_offset < 0 || nexthdr != IPPROTO_ICMPV6) return -ENOENT; if (pkt->data_len < (ICMPV6_PKT_MIN_LEN(l2_len) + icmpv6_offset - sizeof(*ip6hdr))) return -ENOENT; return 0; } static void rotate_log(__attribute__((unused)) struct rte_timer *timer, __attribute__((unused)) void *arg) { gatekeeper_log_init(); } static void lls_scan(__attribute__((unused)) struct rte_timer *timer, void *arg) { struct lls_config *lls_conf = (struct lls_config *)arg; if (arp_enabled(lls_conf)) lls_cache_scan(lls_conf, &lls_conf->arp_cache); if (nd_enabled(lls_conf)) lls_cache_scan(lls_conf, &lls_conf->nd_cache); } static void lls_lacp_announce(__attribute__((unused)) struct rte_timer *timer, void *arg) { struct gatekeeper_if *iface = (struct gatekeeper_if *)arg; uint16_t tx_queue = iface == &lls_conf.net->front ? lls_conf.tx_queue_front : lls_conf.tx_queue_back; /* * This function returns 0 when no packets are transmitted or * when there's an error. Since we're asking for no packets to * be transmitted, we can't differentiate between success and * failure, so we don't check. However, if this fails repeatedly, * the LACP bonding driver will log an error. */ rte_eth_tx_burst(iface->id, tx_queue, NULL, 0); } static inline int lacp_timer_reset(struct lls_config *lls_conf, struct gatekeeper_if *iface) { return rte_timer_reset(&iface->lacp_timer, (uint64_t)((LLS_LACP_ANNOUNCE_INTERVAL_MS / 1000.0) * rte_get_timer_hz()), PERIODICAL, lls_conf->lcore_id, lls_lacp_announce, iface); } static void fillup_lls_dump_entry(struct lls_dump_entry *dentry, struct lls_map *map) { dentry->stale = map->stale; dentry->port_id = map->port_id; dentry->addr = map->addr; rte_ether_addr_copy(&map->ha, &dentry->ha); } #define CTYPE_STRUCT_LLS_DUMP_ENTRY_PTR "struct lls_dump_entry *" static void list_lls(lua_State *L, struct lls_cache *cache) { uint32_t next = 0; const void *key; void *data; int32_t index; void *cdata; uint32_t correct_ctypeid_lls_dentry = luaL_get_ctypeid(L, CTYPE_STRUCT_LLS_DUMP_ENTRY_PTR); index = rte_hash_iterate(cache->hash, (void *)&key, &data, &next); while (index >= 0) { struct lls_dump_entry dentry; struct lls_record *record = &cache->records[index]; fillup_lls_dump_entry(&dentry, &record->map); lua_pushvalue(L, 2); lua_insert(L, 3); cdata = luaL_pushcdata(L, correct_ctypeid_lls_dentry, sizeof(struct lls_dump_entry *)); *(struct lls_dump_entry **)cdata = &dentry; lua_insert(L, 4); lua_call(L, 2, 1); index = rte_hash_iterate(cache->hash, (void *)&key, &data, &next); } } static void list_arp(lua_State *L, struct lls_config *lls_conf) { if (!ipv4_configured(lls_conf->net)) return; list_lls(L, &lls_conf->arp_cache); } static void list_nd(lua_State *L, struct lls_config *lls_conf) { if (!ipv6_configured(lls_conf->net)) return; list_lls(L, &lls_conf->nd_cache); } typedef void (*list_lls_fn)(lua_State *L, struct lls_config *lls_conf); #define CTYPE_STRUCT_LLS_CONFIG_PTR "struct lls_config *" static void list_lls_for_lua(lua_State *L, list_lls_fn f) { uint32_t ctypeid; uint32_t correct_ctypeid_lls_config = luaL_get_ctypeid(L, CTYPE_STRUCT_LLS_CONFIG_PTR); struct lls_config *lls_conf; /* First argument must be of type CTYPE_STRUCT_LLS_CONFIG_PTR. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_LLS_CONFIG_PTR); if (ctypeid != correct_ctypeid_lls_config) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_LLS_CONFIG_PTR); /* Second argument must be a Lua function. */ luaL_checktype(L, 2, LUA_TFUNCTION); /* Third argument should be a Lua value. */ if (lua_gettop(L) != 3) luaL_error(L, "Expected three arguments, however it got %d arguments", lua_gettop(L)); lls_conf = *(struct lls_config **)cdata; f(L, lls_conf); lua_remove(L, 1); lua_remove(L, 1); } int l_list_lls_arp(lua_State *L) { list_lls_for_lua(L, list_arp); return 1; } int l_list_lls_nd(lua_State *L) { list_lls_for_lua(L, list_nd); return 1; } static int process_pkts(struct lls_config *lls_conf, struct gatekeeper_if *iface, uint16_t rx_queue, uint16_t tx_queue, uint16_t max_pkt_burst) { struct rte_mbuf *bufs[max_pkt_burst]; uint16_t num_rx = rte_eth_rx_burst(iface->id, rx_queue, bufs, max_pkt_burst); int num_tx = 0; uint16_t i; for (i = 0; i < num_rx; i++) { struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *); void *next_hdr; uint16_t ether_type; /* * The destination MAC address should be the broadcast * address or match the interface's Ethernet address, * because for round robin and LACP bonding the * slave interfaces assume the MAC address of the * bonded interface. * * See: path_to_url#configuration * * XXX #74 Is this check needed? By default, the NIC only * accepts the assigned MAC address, broadcast address, * and any MAC added (for example, for IPv6 Ethernet multicast). */ if (unlikely(!rte_is_broadcast_ether_addr(&eth_hdr->dst_addr) && !rte_is_same_ether_addr(&eth_hdr->dst_addr, &iface->eth_mc_addr) && !rte_is_same_ether_addr(&eth_hdr->dst_addr, &iface->ll_eth_mc_addr) && !rte_is_same_ether_addr(&eth_hdr->dst_addr, &iface->eth_addr))) goto free_buf; ether_type = rte_be_to_cpu_16(pkt_in_skip_l2(bufs[i], eth_hdr, &next_hdr)); switch (ether_type) { case RTE_ETHER_TYPE_ARP: if (process_arp(lls_conf, iface, tx_queue, bufs[i], eth_hdr, next_hdr) == -1) goto free_buf; /* ARP reply was sent, so no free is needed. */ num_tx++; continue; /* * Both back and front interfaces cannot * see ND packets received here. * All ND packets come from the IPv6 filter. */ default: /* * The log level of the following log entry cannot be * ERR because NICs typically send unmatched patckets * to queue 0, which the LLS block often serves. * * The log level cannot be WARNING either because * Gatekeeper servers have to tolerate unwanted * traffic at some vantage points and LLS blocks * typically run at WARNING level. */ G_LOG(NOTICE, "%s interface should not be seeing a packet with EtherType 0x%04hx\n", iface->name, ether_type); goto free_buf; } free_buf: rte_pktmbuf_free(bufs[i]); } return num_tx; } static int lls_proc(void *arg) { struct lls_config *lls_conf = (struct lls_config *)arg; struct net_config *net_conf = lls_conf->net; struct gatekeeper_if *front = &net_conf->front; struct gatekeeper_if *back = &net_conf->back; uint64_t prev_tsc = rte_rdtsc(), cur_tsc, diff_tsc; uint64_t timer_resolution_cycles = net_conf->rotate_log_interval_sec * cycles_per_sec; G_LOG(NOTICE, "The LLS block is running at tid = %u\n", gettid()); if (needed_caps(0, NULL) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } while (likely(!exiting)) { /* Read in packets on front and back interfaces. */ int num_tx; if (lls_conf->rx_method_front & RX_METHOD_NIC) { num_tx = process_pkts(lls_conf, front, lls_conf->rx_queue_front, lls_conf->tx_queue_front, lls_conf->front_max_pkt_burst); if ((num_tx > 0) && lacp_enabled(net_conf, front)) { if (lacp_timer_reset(lls_conf, front) < 0) G_LOG(NOTICE, "Can't reset front LACP timer to skip cycle\n"); } } if (net_conf->back_iface_enabled && lls_conf->rx_method_back & RX_METHOD_NIC) { num_tx = process_pkts(lls_conf, back, lls_conf->rx_queue_back, lls_conf->tx_queue_back, lls_conf->back_max_pkt_burst); if ((num_tx > 0) && lacp_enabled(net_conf, back)) { if (lacp_timer_reset(lls_conf, back) < 0) G_LOG(NOTICE, "Can't reset back LACP timer to skip cycle\n"); } } /* * Process any requests. The RX method does not * matter here, since the mailbox is always used * for ARP/ND hold requests from other blocks. */ if (likely(lls_process_reqs(lls_conf) == 0)) { /* * If there are no requests to go through, then do a * scan of the cache (if enough time has passed). * * XXX #151 In theory, many new LLS changes could starve * the ability to scan, but this will not likely * happen. In fact, we may want to reduce the amount * of times this is called, since reading the HPET * timer is inefficient. See the timer application. * * Also invoke the TX burst function to fulfill * the LACP requirement. * * XXX #151 The LACP requirement could be starved if * the LLS block receives a lot of requests but * we are unable to answer them -- i.e. the * number of requests > 0 for a sustained * period but we never invoke the TX burst. */ rte_timer_manage(); prev_tsc = rte_rdtsc(); continue; } cur_tsc = rte_rdtsc(); diff_tsc = cur_tsc - prev_tsc; if (diff_tsc >= timer_resolution_cycles) { rte_timer_manage(); prev_tsc = cur_tsc; } } G_LOG(NOTICE, "The LLS block is exiting\n"); return cleanup_lls(); } static int register_icmp_filter(struct gatekeeper_if *iface, uint16_t rx_queue, uint8_t *rx_method) { int ret = ipv4_pkt_filter_add(iface, iface->ip4_addr.s_addr, 0, 0, 0, 0, IPPROTO_ICMP, rx_queue, submit_icmp, match_icmp, rx_method); if (ret < 0) { G_LOG(ERR, "Could not add IPv4 ICMP filter on %s iface\n", iface->name); return ret; } return 0; } static int register_icmp6_filters(struct gatekeeper_if *iface, uint16_t rx_queue, uint8_t *rx_method) { /* All of the IPv6 addresses that a Gatekeeper interface supports. */ const struct in6_addr *ip6_addrs[] = { &iface->ip6_addr, &iface->ll_ip6_addr, &iface->ip6_mc_addr, &iface->ll_ip6_mc_addr, /* * The all nodes multicast address is only used to ignore * router solitication/advertisement messages so that they * do not clutter the Gatekeeper log. */ &ip6_allnodes_mc_addr, }; unsigned int i; int ret; for (i = 0; i < RTE_DIM(ip6_addrs); i++) { ret = ipv6_pkt_filter_add(iface, (rte_be32_t *)&ip6_addrs[i]->s6_addr, 0, 0, 0, 0, IPPROTO_ICMPV6, rx_queue, submit_icmp6, match_icmp6, rx_method); if (ret < 0) { G_LOG(ERR, "Could not add IPv6 ICMP filter on %s iface\n", iface->name); return ret; } } return 0; } static int assign_lls_queue_ids(struct lls_config *lls_conf) { int ret; /* * Take the packets created for processing requests * from mailbox as well as ARP and ND cache tables scan. */ unsigned int total_pkt_burst = lls_conf->total_pkt_burst + lls_conf->mailbox_burst_size + 2 * lls_conf->max_num_cache_records; unsigned int num_mbuf; /* The front NIC doesn't have hardware support. */ if (!lls_conf->net->front.rss) total_pkt_burst -= lls_conf->front_max_pkt_burst; /* The back NIC is enabled and doesn't have hardware support. */ if (lls_conf->net->back_iface_enabled && !lls_conf->net->back.rss) total_pkt_burst -= lls_conf->back_max_pkt_burst; num_mbuf = calculate_mempool_config_para("lls", lls_conf->net, total_pkt_burst); lls_conf->mp = create_pktmbuf_pool("lls", lls_conf->lcore_id, num_mbuf); if (lls_conf->mp == NULL) { ret = -1; goto fail; } /* * LLS should only get its own RX queue if RSS is enabled, * even if EtherType filter is not enabled. * * If RSS is disabled, then the network configuration can * tell that it should ignore all other blocks' requests * for queues and just allocate one RX queue. * * If RSS is enabled, then LLS has already informed the * network configuration that it will be using a queue. * The network configuration will crash if LLS doesn't * configure that queue, so it still should, even if * EtherType filter is not supported and LLS will not use it. */ if (lls_conf->net->front.rss) { ret = get_queue_id(&lls_conf->net->front, QUEUE_TYPE_RX, lls_conf->lcore_id, lls_conf->mp); if (ret < 0) goto fail; lls_conf->rx_queue_front = ret; } ret = get_queue_id(&lls_conf->net->front, QUEUE_TYPE_TX, lls_conf->lcore_id, NULL); if (ret < 0) goto fail; lls_conf->tx_queue_front = ret; if (lls_conf->net->back_iface_enabled) { if (lls_conf->net->back.rss) { ret = get_queue_id(&lls_conf->net->back, QUEUE_TYPE_RX, lls_conf->lcore_id, lls_conf->mp); if (ret < 0) goto fail; lls_conf->rx_queue_back = ret; } ret = get_queue_id(&lls_conf->net->back, QUEUE_TYPE_TX, lls_conf->lcore_id, NULL); if (ret < 0) goto fail; lls_conf->tx_queue_back = ret; } return 0; fail: G_LOG(ERR, "Cannot assign queues\n"); return ret; } #define ARP_REQ_SIZE(num_pkts) (offsetof(struct lls_request, end_of_header) + \ sizeof(struct lls_arp_req) + sizeof(struct rte_mbuf *) * num_pkts) #define ICMP_REQ_SIZE(num_pkts) (offsetof(struct lls_request, end_of_header) + \ sizeof(struct lls_icmp_req) + sizeof(struct rte_mbuf *) * num_pkts) #define ICMP6_REQ_SIZE(num_pkts) (offsetof(struct lls_request, end_of_header) +\ sizeof(struct lls_icmp6_req) + sizeof(struct rte_mbuf *) * num_pkts) static int lls_stage1(void *arg) { struct lls_config *lls_conf = arg; int ele_size = RTE_MAX(sizeof(struct lls_request), RTE_MAX(ARP_REQ_SIZE(lls_conf->mailbox_max_pkt_sub), RTE_MAX(ICMP_REQ_SIZE(lls_conf->mailbox_max_pkt_sub), ICMP6_REQ_SIZE(lls_conf->mailbox_max_pkt_sub)))); int ret = assign_lls_queue_ids(lls_conf); if (ret < 0) return ret; /* * Since run_lls() in lua/lls.lua will be called before lua/gk.lua * or lua/gt.lua, if we put init_mailbox() in run_lls(), then we have * already initialized LLS' mailbox with the initial * lls_conf.mailbox_max_pkt_sub specified in lua/lls.lua, even if we * change the value of lls_conf.mailbox_max_pkt_sub in lua/gk.lua or * lua/gt.lua, it won't change the size of the entries in LLS mailbox. * * To initialize the LLS mailbox only after we get the final * configuration by considering GK or GT blocks, we initialize * LLS mailbox here. */ return init_mailbox("lls_req", lls_conf->mailbox_max_entries_exp, ele_size, lls_conf->mailbox_mem_cache_size, lls_conf->lcore_id, &lls_conf->requests); } static int lls_stage2(void *arg) { struct lls_config *lls_conf = arg; struct net_config *net_conf = lls_conf->net; int ret; if (lls_conf->arp_cache.iface_enabled(net_conf, &net_conf->front)) { ret = ethertype_flow_add(&net_conf->front, RTE_ETHER_TYPE_ARP, lls_conf->rx_queue_front); if (ret < 0 && net_conf->front.rss && lls_conf->rx_queue_front != 0) { /* * If EtherType flows are not supported but RSS is, * the LLS block should be listening on queue 0. This * is because RSS on most NICs seems to default to * sending ARP (and other non-IP packets) to queue 0. * The LLS block can then simply discard any other * non-ARP and non-IP packets that it receives. * * On the Elastic Network Adapter (ENA) on Amazon, * non-IP packets seem to be given to the first * queue configured for RSS. Therefore, LLS does not * need to run on queue 0 in that case, but there's * no easy way of detecting this case at runtime. */ G_LOG(ERR, "If EtherType filters are not supported, the LLS block needs to listen on queue 0 on the front iface\n"); return -1; } if (ret >= 0) { /* ARP packets can be received from the NIC. */ lls_conf->rx_method_front |= RX_METHOD_NIC; } else { /* * EtherType flows cannot be used, perhaps because * they are not supported by hardware, RSS is not * supported by hardware, or the particular protocol * (ARP) is not permitted. In this case, ARP packets * will be received via mailboxes. */ lls_conf->rx_method_front |= RX_METHOD_MB; } ret = register_icmp_filter(&net_conf->front, lls_conf->rx_queue_front, &lls_conf->rx_method_front); if (ret < 0) return ret; } if (lls_conf->arp_cache.iface_enabled(net_conf, &net_conf->back)) { /* See comments above about return values. */ ret = ethertype_flow_add(&net_conf->back, RTE_ETHER_TYPE_ARP, lls_conf->rx_queue_back); if (ret < 0 && net_conf->back.rss && lls_conf->rx_queue_back != 0) { G_LOG(ERR, "If EtherType flows are not supported, the LLS block must listen on queue 0 on the back iface\n"); return -1; } if (ret >= 0) { /* ARP packets can be received from the NIC. */ lls_conf->rx_method_back |= RX_METHOD_NIC; } else { /* ARP packets will be received via mailboxes. */ lls_conf->rx_method_back |= RX_METHOD_MB; } ret = register_icmp_filter(&net_conf->back, lls_conf->rx_queue_back, &lls_conf->rx_method_back); if (ret < 0) return ret; } if (lls_conf->nd_cache.iface_enabled(net_conf, &net_conf->front)) { ret = register_icmp6_filters(&net_conf->front, lls_conf->rx_queue_front, &lls_conf->rx_method_front); if (ret < 0) return ret; } if (lls_conf->nd_cache.iface_enabled(net_conf, &net_conf->back)) { ret = register_icmp6_filters(&net_conf->back, lls_conf->rx_queue_back, &lls_conf->rx_method_back); if (ret < 0) return ret; } return 0; } int run_lls(struct net_config *net_conf, struct lls_config *lls_conf) { int ret; uint16_t front_inc, back_inc = 0; if (net_conf == NULL || lls_conf == NULL) { ret = -1; goto out; } log_ratelimit_state_init(lls_conf->lcore_id, lls_conf->log_ratelimit_interval_ms, lls_conf->log_ratelimit_burst, lls_conf->log_level, "LLS"); tb_ratelimit_state_init(&lls_conf->front_icmp_rs, lls_conf->front_icmp_msgs_per_sec, lls_conf->front_icmp_msgs_burst); tb_ratelimit_state_init(&lls_conf->back_icmp_rs, lls_conf->back_icmp_msgs_per_sec, lls_conf->back_icmp_msgs_burst); if (!(lls_conf->front_max_pkt_burst > 0 && (net_conf->back_iface_enabled == 0 || (net_conf->back_iface_enabled && lls_conf->back_max_pkt_burst > 0)))) { ret = -1; goto out; } front_inc = lls_conf->front_max_pkt_burst; net_conf->front.total_pkt_burst += front_inc; if (net_conf->back_iface_enabled) { back_inc = lls_conf->back_max_pkt_burst; net_conf->back.total_pkt_burst += back_inc; } lls_conf->total_pkt_burst = front_inc + back_inc; ret = net_launch_at_stage1(net_conf, 1, 1, 1, 1, lls_stage1, lls_conf); if (ret < 0) goto burst; ret = launch_at_stage2(lls_stage2, lls_conf); if (ret < 0) goto stage1; ret = launch_at_stage3("lls", lls_proc, lls_conf, lls_conf->lcore_id); if (ret < 0) goto stage2; /* * Do LLS cache scan every @lls_conf->cache_scan_interval_sec * seconds. */ rte_timer_init(&lls_conf->scan_timer); ret = rte_timer_reset(&lls_conf->scan_timer, lls_conf->cache_scan_interval_sec * rte_get_timer_hz(), PERIODICAL, lls_conf->lcore_id, lls_scan, lls_conf); if (ret < 0) { G_LOG(ERR, "Cannot set LLS scan timer\n"); goto stage3; } /* Rotate log file every rotate_log_interval_sec seconds. */ rte_timer_init(&lls_conf->log_timer); ret = rte_timer_reset(&lls_conf->log_timer, net_conf->rotate_log_interval_sec * rte_get_timer_hz(), PERIODICAL, lls_conf->lcore_id, rotate_log, NULL); if (ret < 0) { G_LOG(ERR, "Cannot set Gatekeeper log timer\n"); goto scan_timer; } lls_conf->net = net_conf; if (arp_enabled(lls_conf)) { ret = lls_cache_init(lls_conf, &lls_conf->arp_cache, sizeof(struct in_addr)); if (ret < 0) { G_LOG(ERR, "ARP cache cannot be started\n"); goto log_timer; } /* Set timeouts for front and back (if needed). */ if (lls_conf->arp_cache.iface_enabled(net_conf, &net_conf->front)) lls_conf->arp_cache.front_timeout_sec = net_conf->front.arp_cache_timeout_sec; if (lls_conf->arp_cache.iface_enabled(net_conf, &net_conf->back)) lls_conf->arp_cache.back_timeout_sec = lls_conf->net->back.arp_cache_timeout_sec; } if (nd_enabled(lls_conf)) { ret = lls_cache_init(lls_conf, &lls_conf->nd_cache, sizeof(struct in6_addr)); if (ret < 0) { G_LOG(ERR, "ND cache cannot be started\n"); goto arp; } /* Set timeouts for front and back (if needed). */ if (lls_conf->nd_cache.iface_enabled(net_conf, &net_conf->front)) lls_conf->nd_cache.front_timeout_sec = net_conf->front.nd_cache_timeout_sec; if (lls_conf->nd_cache.iface_enabled(net_conf, &net_conf->back)) lls_conf->nd_cache.back_timeout_sec = lls_conf->net->back.nd_cache_timeout_sec; } /* Set per-interface LACP timers, if needed. */ if (lacp_enabled(net_conf, &net_conf->front)) { rte_timer_init(&net_conf->front.lacp_timer); ret = lacp_timer_reset(lls_conf, &net_conf->front); if (ret < 0) { G_LOG(ERR, "Cannot set LACP timer on front interface\n"); goto nd; } } if (lacp_enabled(net_conf, &net_conf->back)) { rte_timer_init(&net_conf->back.lacp_timer); ret = lacp_timer_reset(lls_conf, &net_conf->back); if (ret < 0) { G_LOG(ERR, "Cannot set LACP timer on back interface\n"); goto lacp; } } return 0; lacp: if (lacp_enabled(net_conf, &net_conf->front)) rte_timer_stop(&net_conf->front.lacp_timer); nd: if (nd_enabled(lls_conf)) lls_cache_destroy(&lls_conf->nd_cache); arp: if (arp_enabled(lls_conf)) lls_cache_destroy(&lls_conf->arp_cache); log_timer: rte_timer_stop(&lls_conf->log_timer); scan_timer: rte_timer_stop(&lls_conf->scan_timer); stage3: pop_n_at_stage3(1); stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); burst: net_conf->front.total_pkt_burst -= front_inc; net_conf->back.total_pkt_burst -= back_inc; out: return ret; } ```
/content/code_sandbox/lls/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
9,686
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_LLS_CACHE_H_ #define _GATEKEEPER_LLS_CACHE_H_ #include "gatekeeper_lls.h" /* Information needed to add a hold to a record. */ struct lls_hold_req { /* Cache that holds (or will hold) this map. */ struct lls_cache *cache; /* IP address for this request. */ struct ipaddr addr; /* Hold that this is requesting. */ struct lls_hold hold; }; /* Information needed to drop a struct lls_hold from a record. */ struct lls_put_req { /* Cache that (possibly) has this hold. */ struct lls_cache *cache; /* IP address for this request. */ struct ipaddr addr; /* The lcore that requested this put. */ unsigned int lcore_id; }; /* Information needed to submit ARP packets to the LLS block. */ struct lls_arp_req { /* Number of packets stored in @pkts. */ int num_pkts; /* Interface that received @pkt. */ struct gatekeeper_if *iface; /* ARP neighbor packets. */ struct rte_mbuf *pkts[0]; }; /* Information needed to submit ND packets to the LLS block. */ struct lls_nd_req { /* Number of packets stored in @pkts. */ int num_pkts; /* Interface that received @pkt. */ struct gatekeeper_if *iface; /* ND neighbor packets. */ struct rte_mbuf *pkts[0]; }; /* Information needed to submit ICMP packets to the LLS block. */ struct lls_icmp_req { /* Number of packets stored in @pkts. */ int num_pkts; /* Interface that received @pkt. */ struct gatekeeper_if *iface; /* ICMP packets. */ struct rte_mbuf *pkts[0]; }; /* Information needed to submit ICMPv6 packets to the LLS block. */ struct lls_icmp6_req { /* Number of packets stored in @pkts. */ int num_pkts; /* Interface that received @pkt. */ struct gatekeeper_if *iface; /* ICMPv6 packets. */ struct rte_mbuf *pkts[0]; }; /* A modification to an LLS map. */ struct lls_mod_req { /* Cache that holds (or will hold) this map. */ struct lls_cache *cache; /* IP address for this modification. */ struct ipaddr addr; /* * Ethernet address of modification, possibly * not different from existing address in record. */ struct rte_ether_addr ha; /* * Port of modification, possibly not * different from existing port ID in record. */ uint16_t port_id; /* Timestamp of this modification. */ time_t ts; }; /* Request submitted to the LLS block. */ struct lls_request { /* Type of request. */ enum lls_req_ty ty; int end_of_header[0]; union { /* If @ty is LLS_REQ_HOLD, use @hold. */ struct lls_hold_req hold; /* If @ty is LLS_REQ_PUT, use @put. */ struct lls_put_req put; /* If @ty is LLS_REQ_ARP, use @arp. */ struct lls_arp_req arp; /* If @ty is LLS_REQ_ICMP, use @icmp. */ struct lls_icmp_req icmp; /* If @ty is LLS_REQ_ICMP6, use @icmp6. */ struct lls_icmp6_req icmp6; } u; }; int lls_cache_init(struct lls_config *lls_conf, struct lls_cache *cache, uint32_t key_len); void lls_cache_destroy(struct lls_cache *cache); /* Process any requests to the LLS block. */ unsigned int lls_process_reqs(struct lls_config *lls_conf); /* * Modify a cache entry without going through the mailbox. * * NOTE * This should only be used by the LLS block itself. Other * requests to modify the cache should go through lls_req(). */ void lls_process_mod(struct lls_config *lls_conf, struct lls_mod_req *mod); /* * Fetch a map according to the key in @addr. * * NOTE * This should only be used by the LLS block itself. Other * requests to get maps should go through hold requests. */ struct lls_map *lls_cache_get(struct lls_cache *cache, const struct ipaddr *addr); /* Scan the cache and send requests or remove entries as needed. */ void lls_cache_scan(struct lls_config *lls_conf, struct lls_cache *cache); #endif /* _GATEKEEPER_LLS_CACHE_H_ */ ```
/content/code_sandbox/lls/cache.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,129
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <arpa/inet.h> #include <stdbool.h> #include <rte_ip.h> #include <rte_ip_frag.h> #include "gatekeeper_l2.h" #include "gatekeeper_varip.h" #include "cache.h" #include "nd.h" /* * Neighbor Discovery. * * This is an implementation of ND as defined in RFC 4861: * path_to_url * * However, Gatekeeper ND is only currently used for address * resolution, for both our global or link-local IPv6 address. * We do not handle any router or redirection messages. * * Only supporting address resolution has consequences. These * include but are not limited to: * * - We do not implement Duplicate Address Detection, although * we operate normally when we receive ND packets from hosts * who may be trying to participate in DAD and transmit with * an unspecified IPv6 source address. * - We do not use the Router flag in our own ND advertisements, * although we do not fail when we receive ND advertisements * with any particular setting of the Router flag. * - We do not support any ND header options EXCEPT for Source * Link-Layer Address and Target Link-layer Address, although * we do not fail when we receive an ND packet with a different * option. * - We do not maintain a neighbor cache as specified by RFC 4861, * which specifies various states (STALE, INCOMPLETE, REACHABLE) * for neighbor cache entries. */ /* * Returns whether a given IPv6 address is the unspecified address, * which can be used for duplicate address detection. */ static inline int ipv6_addr_unspecified(const uint8_t *ip6_addr) { const uint64_t *paddr = (const uint64_t *)ip6_addr; return (paddr[0] | paddr[1]) == 0ULL; } /* * Returns whether a given IPv6 address is a solicited-node * multicast address. */ static inline int ipv6_addr_solicited_node_mc(const uint8_t *ip6_addr) { const uint64_t *paddr = (const uint64_t *)ip6_addr; return ((paddr[0] ^ rte_cpu_to_be_64(0xff02000000000000UL)) | ((paddr[1] ^ rte_cpu_to_be_64(0x00000001ff000000UL)) & rte_cpu_to_be_64(0xffffffffff000000UL))) == 0ULL; } /* Returns whether a given IPv6 address is generally a multicast address. */ static inline int ipv6_addr_multicast(const uint8_t *ip6_addr) { return ((*(const uint32_t *)ip6_addr) & rte_cpu_to_be_32(0xFF000000)) == rte_cpu_to_be_32(0xFF000000); } int iface_nd_enabled(struct net_config *net, struct gatekeeper_if *iface) { /* When @iface is the back, need to make sure it's enabled. */ if (iface == &net->back) return net->back_iface_enabled && ipv6_if_configured(iface); /* @iface is the front interface. */ return ipv6_if_configured(iface); } int ipv6_in_subnet(struct gatekeeper_if *iface, const struct ipaddr *addr) { /* Check for both link-local and global subnets. */ return (ip6_same_subnet(&iface->ll_ip6_addr, &addr->ip.v6, &iface->ll_ip6_mask) || ip6_same_subnet(&iface->ip6_addr, &addr->ip.v6, &iface->ip6_mask)); } /* * RFC 4861, Section 7.2.2: Sending Neighbor Solicitations. * * We do not follow the requirement from the RFC to retain a small * queue of packets waiting for resolution. It states that this * queue is required, but also says that packets can be dropped from * this queue due to overflow. * * Therefore, the expectation is that retransmissions due to resolution * may need to happen, so we do not maintain this queue at all and expect * any interested clients will have already called hold_nd() anyway. * * Also, we do not stop trying to resolve an address while there are * holds on the entry, and do not return ICMPv6 destination unreachable * indications as required by the RFC. */ void xmit_nd_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct rte_ether_addr *ha, uint16_t tx_queue) { struct lls_config *lls_conf = get_lls_conf(); const uint8_t *ipv6_addr = addr->ip.v6.s6_addr; struct rte_ether_hdr *eth_hdr; struct rte_ipv6_hdr *ipv6_hdr; struct icmpv6_hdr *icmpv6_hdr; struct nd_neigh_msg *nd_msg; struct nd_opt_lladdr *nd_opt; size_t l2_len; struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(lls_conf->mp); if (created_pkt == NULL) { G_LOG(ERR, "Could not alloc a packet for an ND Neighbor Solicitation\n"); return; } /* Solicitation will include source link layer address. */ l2_len = iface->l2_len_out; created_pkt->data_len = ND_NEIGH_PKT_LLADDR_MIN_LEN(l2_len); created_pkt->pkt_len = created_pkt->data_len; /* Set-up Ethernet header. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *); rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->src_addr); if (ha == NULL) { /* * Need to use IPv6 multicast Ethernet address. * Technically, the last four bytes of this * address should be the same as the solicited-node * multicast address formed using @ipv6_addr, but * this is equivalent to 0xFF followed by the * last three bytes of @ipv6_addr. */ struct rte_ether_addr eth_mc_daddr = { { 0x33, 0x33, 0xFF, ipv6_addr[13], ipv6_addr[14], ipv6_addr[15], } }; rte_ether_addr_copy(&eth_mc_daddr, &eth_hdr->dst_addr); } else rte_ether_addr_copy(ha, &eth_hdr->dst_addr); /* Set-up VLAN header. */ if (iface->vlan_insert) fill_vlan_hdr(eth_hdr, iface->ipv6_vlan_tag_be, RTE_ETHER_TYPE_IPV6); else eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); /* Set-up IPv6 header. */ ipv6_hdr = pkt_out_skip_l2(iface, eth_hdr); ipv6_hdr->vtc_flow = rte_cpu_to_be_32(IPv6_DEFAULT_VTC_FLOW); ipv6_hdr->payload_len = rte_cpu_to_be_16(created_pkt->data_len - (l2_len + sizeof(*ipv6_hdr))); ipv6_hdr->proto = IPPROTO_ICMPV6; /* * The IP Hop Limit field must be 255 as required by * RFC 4861, sections 7.1.1 and 7.1.2. */ ipv6_hdr->hop_limits = 255; rte_memcpy(ipv6_hdr->src_addr, iface->ll_ip6_addr.s6_addr, sizeof(ipv6_hdr->src_addr)); if (ha == NULL) { /* Need to use IPv6 solicited-node multicast address. */ uint8_t ip6_mc_daddr[16] = IPV6_SN_MC_ADDR(ipv6_addr); rte_memcpy(ipv6_hdr->dst_addr, ip6_mc_daddr, sizeof(ipv6_hdr->dst_addr)); } else rte_memcpy(ipv6_hdr->dst_addr, ipv6_addr, sizeof(ipv6_hdr->dst_addr)); /* Set-up ICMPv6 header. */ icmpv6_hdr = (struct icmpv6_hdr *)&ipv6_hdr[1]; icmpv6_hdr->type = ND_NEIGHBOR_SOLICITATION_TYPE; icmpv6_hdr->code = ND_NEIGHBOR_SOLICITATION_CODE; icmpv6_hdr->cksum = 0; /* Calculated below. */ /* Set-up ND header with options. */ nd_msg = (struct nd_neigh_msg *)&icmpv6_hdr[1]; nd_msg->flags = 0; rte_memcpy(nd_msg->target, ipv6_addr, sizeof(nd_msg->target)); nd_opt = (struct nd_opt_lladdr *)&nd_msg[1]; nd_opt->type = ND_OPT_SOURCE_LL_ADDR; nd_opt->len = 1; rte_ether_addr_copy(&iface->eth_addr, &nd_opt->ha); icmpv6_hdr->cksum = rte_ipv6_icmpv6_cksum(ipv6_hdr, icmpv6_hdr); if (rte_eth_tx_burst(iface->id, tx_queue, &created_pkt, 1) <= 0) { rte_pktmbuf_free(created_pkt); G_LOG(ERR, "Could not send an ND Neighbor Solicitation\n"); } } /* * Parse the options of an ND packet, looking specifically for * ND_OPT_SOURCE_LL_ADDR (in Neighbor Solicitations) and * ND_OPT_TARGET_LL_ADDR (in Neighbor Advertisements). * * Returns NULL if there are partial options or if the length * field of any option is set to zero. Otherwise, returns the * given @ndopts structure which contains an array of pointers * to the relevant options. */ static struct nd_opts * parse_nd_opts(struct nd_opts *ndopts, uint8_t *opt, uint16_t opt_len) { struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; memset(ndopts, 0, sizeof(*ndopts)); while (opt_len) { int len_bytes; if (opt_len < sizeof(*nd_opt)) return NULL; /* @nd_opt->len is the number of 64-bit chunks. */ len_bytes = nd_opt->len << 3; if (opt_len < len_bytes || len_bytes == 0) return NULL; switch (nd_opt->type) { case ND_OPT_SOURCE_LL_ADDR: case ND_OPT_TARGET_LL_ADDR: if (ndopts->opt_array[nd_opt->type]) G_LOG(INFO, "Multiple options of type %d in an ND Neighbor packet\n", nd_opt->type); else ndopts->opt_array[nd_opt->type] = nd_opt; break; default: /* * No support for Prefix Information, Redirected Header, * MTU, Route Information, or any other ND option. */ break; } opt_len -= len_bytes; nd_opt = (struct nd_opt_hdr *)((uint8_t *)nd_opt + len_bytes); } return ndopts; } /* * RFC 4861, Section 7.2.3: Receipt of Neighbor Solicitations. * * If the newly-received source link-layer address differs from the * one already in the cache, we do not set the entry to stale as * required by the RFC. */ static int process_nd_neigh_solicitation(struct lls_config *lls_conf, struct rte_mbuf *buf, struct rte_ether_hdr *eth_hdr, struct rte_ipv6_hdr *ipv6_hdr, struct icmpv6_hdr *icmpv6_hdr, uint16_t pkt_len, size_t l2_len, uint16_t icmpv6_len, struct gatekeeper_if *iface, uint16_t tx_queue) { struct nd_neigh_msg *nd_msg = (struct nd_neigh_msg *)&icmpv6_hdr[1]; struct nd_opt_lladdr *nd_opt; struct nd_opts ndopts; int src_unspec = ipv6_addr_unspecified(ipv6_hdr->src_addr); struct rte_ether_addr *src_eth_addr = NULL; size_t min_len; int ret; /* * Most of the checks required by RFC 4861, Section 7.1.1 * have already been done by nd_pkt_valid(). */ /* * RFC 4861, Section 7.1.1. * * Target Address must not be a multicast address. */ if (ipv6_addr_multicast(nd_msg->target)) return -1; /* * RFC 4861, Section 7.1.1. * * If the IP source address is the unspecified address, the IP * destination address must be a solicited-node multicast address. */ if (src_unspec && !ipv6_addr_solicited_node_mc(ipv6_hdr->dst_addr)) return -1; /* * RFC 4861, Section 7.2.3. * * The Target Address must be a "valid" unicast or anycast address * assigned to the receiving interface. For us, this could be * our global or link-local IPv6 address. * * We do not implement an ND proxy service or Duplicate Address * Detection, so we don't need to check for the Target Address * for those. */ if (!ipv6_addrs_equal(iface->ip6_addr.s6_addr, nd_msg->target) && !ipv6_addrs_equal(iface->ll_ip6_addr.s6_addr, nd_msg->target)) return -1; /* Process any ND neighbor options and save them in @ndopts. */ if (parse_nd_opts(&ndopts, nd_msg->opts, icmpv6_len - (sizeof(*icmpv6_hdr) + sizeof(*nd_msg))) == NULL) return -1; if (ndopts.opt_array[ND_OPT_SOURCE_LL_ADDR] != NULL) { struct lls_mod_req mod_req = { .cache = &lls_conf->nd_cache, .addr.proto = RTE_ETHER_TYPE_IPV6, .port_id = iface->id, .ts = time(NULL), }; /* * RFC 4861, Section 7.1.1. * * If the source address is unspecified, there must * not be the source link layer address option. */ if (src_unspec) return -1; RTE_VERIFY(mod_req.ts >= 0); nd_opt = (struct nd_opt_lladdr *) ndopts.opt_array[ND_OPT_SOURCE_LL_ADDR]; /* Update resolution of source of Solicitation. */ rte_memcpy(mod_req.addr.ip.v6.s6_addr, ipv6_hdr->src_addr, sizeof(mod_req.addr.ip.v6.s6_addr)); rte_ether_addr_copy(&nd_opt->ha, &mod_req.ha); lls_process_mod(lls_conf, &mod_req); /* Save source address to use in advertisement. */ src_eth_addr = &nd_opt->ha; } else { /* * If source link layer address is not in the options, * get the source resolution, if we have it. */ struct ipaddr addr = { .proto = RTE_ETHER_TYPE_IPV6 }; rte_memcpy(addr.ip.v6.s6_addr, ipv6_hdr->src_addr, sizeof(addr.ip.v6.s6_addr)); struct lls_map *map = lls_cache_get(&lls_conf->nd_cache, &addr); if (map != NULL) src_eth_addr = &map->ha; } /* Make sure buffer is correct size. */ min_len = ND_NEIGH_PKT_LLADDR_MIN_LEN(l2_len); RTE_VERIFY(RTE_MBUF_DEFAULT_BUF_SIZE >= min_len); if (pkt_len > min_len) { if (rte_pktmbuf_trim(buf, pkt_len - min_len) < 0) { G_LOG(ERR, "Could not trim packet to correct size for response to a Neighbor Solicitation\n"); return -1; } } else if (pkt_len < min_len) { if (rte_pktmbuf_append(buf, min_len - pkt_len) == NULL) { G_LOG(ERR, "Could not append space to packet to correct size for response to a Neighbor Solicitation\n"); return -1; } } ret = verify_l2_hdr(iface, eth_hdr, buf->l2_type, "ND", iface->ipv6_vlan_tag_be); if (ret < 0) return ret; if (src_eth_addr != NULL) { /* * RFC 4861, Section 7.2.4: * Sending Solicited Neighbor Advertisements. * * Since we re-use the buffer, we skip over * any fields whose value should stay the * same from the Neighbor Solicitation. * Since the reply always goes out the same * interface that received it, the L2 space * of the packet is the same. If needed, the * correct VLAN tag was set in verify_l2_hdr(). */ /* Set-up Ethernet header. */ rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->src_addr); rte_ether_addr_copy(src_eth_addr, &eth_hdr->dst_addr); /* Set-up IPv6 header. */ nd_msg->flags = 0; ipv6_hdr->payload_len = rte_cpu_to_be_16(min_len - (l2_len + sizeof(*ipv6_hdr))); if (src_unspec) { struct in6_addr all_nodes_addr = { .s6_addr = { 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }, }; rte_memcpy(ipv6_hdr->dst_addr, all_nodes_addr.s6_addr, sizeof(ipv6_hdr->dst_addr)); } else rte_memcpy(ipv6_hdr->dst_addr, ipv6_hdr->src_addr, sizeof(ipv6_hdr->dst_addr)); /* * This can be any address from our interface, * but the Linux implementation seems to use * whatever the target is (as long as we own * that address), so we'll use it too. */ rte_memcpy(ipv6_hdr->src_addr, nd_msg->target, sizeof(ipv6_hdr->src_addr)); /* Set-up ICMPv6 header. */ icmpv6_hdr->type = ND_NEIGHBOR_ADVERTISEMENT_TYPE; icmpv6_hdr->code = ND_NEIGHBOR_ADVERTISEMENT_CODE; icmpv6_hdr->cksum = 0; /* Calculated below. */ /* Set up ND Advertisement header with target LL addr option. */ if (src_unspec) nd_msg->flags = rte_cpu_to_be_32(LLS_ND_NA_OVERRIDE); else nd_msg->flags = rte_cpu_to_be_32( LLS_ND_NA_OVERRIDE | LLS_ND_NA_SOLICITED); nd_opt = (struct nd_opt_lladdr *)&nd_msg[1]; nd_opt->type = ND_OPT_TARGET_LL_ADDR; nd_opt->len = 1; rte_ether_addr_copy(&iface->eth_addr, &nd_opt->ha); icmpv6_hdr->cksum = rte_ipv6_icmpv6_cksum(ipv6_hdr, icmpv6_hdr); if (rte_eth_tx_burst(iface->id, tx_queue, &buf, 1) <= 0) { G_LOG(ERR, "Could not send an ND Neighbor Advertisement in response to a Solicitation\n"); return -1; } } else { /* * Can't respond to the original solicitation * until we resolve the address of the source. */ /* * RFC 4861, Section 7.2.2: Sending Neighbor Solicitations. * * Use the same approach as xmit_nd_req(), but don't use * that function directly since we already have a buffer * that has some of the fields correctly filled-in. * Since the new solicitation always goes out the same * interface that received the original, the L2 space * of the packet is the same. If needed, the correct VLAN * tag was set in verify_l2_hdr(). */ uint8_t ip6_mc_daddr[16] = IPV6_SN_MC_ADDR(ipv6_hdr->src_addr); struct rte_ether_addr eth_mc_daddr = { { 0x33, 0x33, ip6_mc_daddr[12], ip6_mc_daddr[13], ip6_mc_daddr[14], ip6_mc_daddr[15], } }; /* * The RFC doesn't mention this case specificatlly, * but if the source IP address was unspecified and * we don't already have a resolution for it, we * don't know where to send a solicitation. */ if (src_unspec) return -1; rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->src_addr); rte_ether_addr_copy(&eth_mc_daddr, &eth_hdr->dst_addr); /* Set-up IPv6 header. */ ipv6_hdr->payload_len = rte_cpu_to_be_16(min_len - (l2_len + sizeof(*ipv6_hdr))); rte_memcpy(nd_msg->target, ipv6_hdr->src_addr, sizeof(nd_msg->target)); rte_memcpy(ipv6_hdr->dst_addr, ip6_mc_daddr, sizeof(ipv6_hdr->dst_addr)); rte_memcpy(ipv6_hdr->src_addr, iface->ll_ip6_addr.s6_addr, sizeof(ipv6_hdr->src_addr)); /* Set-up ICMPv6 header. */ icmpv6_hdr->cksum = 0; /* Calculated below. */ /* Set up ND Solicitation header with source LL addr option. */ nd_msg->flags = 0; nd_opt = (struct nd_opt_lladdr *)&nd_msg[1]; nd_opt->type = ND_OPT_SOURCE_LL_ADDR; nd_opt->len = 1; rte_ether_addr_copy(&iface->eth_addr, &nd_opt->ha); icmpv6_hdr->cksum = rte_ipv6_icmpv6_cksum(ipv6_hdr, icmpv6_hdr); if (rte_eth_tx_burst(iface->id, tx_queue, &buf, 1) <= 0) { G_LOG(ERR, "Could not send an ND Neighbor Solicitation in response to a Solicitation\n"); return -1; } } return 0; } /* * RFC 4861, Section 7.2.5: Receipt of Neighbor Advertisements. * * The RFC states that there is no need to create an entry if * none exists, but we do. * * We do not adhere to the meanings of the Router, Solicited, * or Override flags when it comes to updating an entry (although * we do use the Solicited flag in a validity test), because * we make no distinction between cache entry states such as * STALE, INCOMPLETE, and UNREACHABLE as described by the RFC. * We don't care about whether an entry is a router or whether an * announcement was solicited, we do not implement Neighbor * Unreachability Detection, and we always update an entry * even when the Override flag is not set. */ static int process_nd_neigh_advertisement(struct lls_config *lls_conf, struct rte_ipv6_hdr *ipv6_hdr, struct icmpv6_hdr *icmpv6_hdr, uint16_t icmpv6_len, struct gatekeeper_if *iface) { struct nd_neigh_msg *nd_msg = (struct nd_neigh_msg *)&icmpv6_hdr[1]; struct nd_opt_lladdr *nd_opt; struct nd_opts ndopts; /* * Most of the checks required by RFC 4861, Section 7.1.2 * have already been done by nd_pkt_valid(). */ /* * RFC 4861, Section 7.1.2. * * Target Address must not be a multicast address. */ if (ipv6_addr_multicast(nd_msg->target)) return -1; /* * RFC 4861, Section 7.1.2. * * If the IP Destination Address is a multicast address * the Solicited flag must be zero. */ if (ipv6_addr_multicast(ipv6_hdr->dst_addr) && (nd_msg->flags & rte_cpu_to_be_32(LLS_ND_NA_SOLICITED))) return -1; /* Process any ND neighbor options and save them in @ndopts. */ if (parse_nd_opts(&ndopts, nd_msg->opts, icmpv6_len - (sizeof(*icmpv6_hdr) + sizeof(*nd_msg))) == NULL) return -1; if (ndopts.opt_array[ND_OPT_TARGET_LL_ADDR] != NULL) { struct lls_mod_req mod_req = { .cache = &lls_conf->nd_cache, .addr.proto = RTE_ETHER_TYPE_IPV6, .port_id = iface->id, .ts = time(NULL), }; RTE_VERIFY(mod_req.ts >= 0); nd_opt = (struct nd_opt_lladdr *) ndopts.opt_array[ND_OPT_TARGET_LL_ADDR]; rte_memcpy(mod_req.addr.ip.v6.s6_addr, nd_msg->target, sizeof(mod_req.addr.ip.v6.s6_addr)); rte_ether_addr_copy(&nd_opt->ha, &mod_req.ha); lls_process_mod(lls_conf, &mod_req); } /* Always need to free the packet. */ return -1; } /* * Perform sanity checks to make sure this is a valid ND neighbor packet. * By RFC 4861, sections 7.1.1 and 7.1.2, these checks are required for both * Solicitations and Advertisements. */ static int nd_pkt_valid(struct rte_ipv6_hdr *ipv6_hdr, struct icmpv6_hdr *icmpv6_hdr, uint16_t icmpv6_len) { return ipv6_hdr->hop_limits == 255 && rte_be_to_cpu_16(ipv6_hdr->payload_len) == icmpv6_len && icmpv6_hdr->code == 0 && rte_ipv6_icmpv6_cksum(ipv6_hdr, icmpv6_hdr) == 0; } int process_nd(struct lls_config *lls_conf, struct gatekeeper_if *iface, struct rte_mbuf *buf) { /* * The ICMPv6 header offset in terms of the * beginning of the IPv6 header. */ int icmpv6_offset; uint8_t nexthdr; struct rte_ether_hdr *eth_hdr; struct rte_ipv6_hdr *ipv6_hdr; struct icmpv6_hdr *icmpv6_hdr; uint16_t tx_queue = iface == &lls_conf->net->front ? lls_conf->tx_queue_front : lls_conf->tx_queue_back; uint16_t pkt_len; size_t l2_len; uint16_t icmpv6_len; if (unlikely(!ipv6_if_configured(iface))) return -1; /* pkt_in_skip_l2() was already called by GK or GT. */ l2_len = pkt_in_l2_hdr_len(buf); pkt_len = rte_pktmbuf_data_len(buf); if (pkt_len < ND_NEIGH_PKT_MIN_LEN(l2_len)) { G_LOG(NOTICE, "ND packet received is %"PRIx16" bytes but should be at least %lu bytes in %s\n", pkt_len, ND_NEIGH_PKT_MIN_LEN(l2_len), __func__); return -1; } ipv6_hdr = rte_pktmbuf_mtod_offset(buf, struct rte_ipv6_hdr *, l2_len); if (rte_ipv6_frag_get_ipv6_fragment_header(ipv6_hdr) != NULL) { G_LOG(WARNING, "Received fragmented ND packets destined to this server at %s\n", __func__); return -1; } icmpv6_offset = ipv6_skip_exthdr(ipv6_hdr, buf->data_len - l2_len, &nexthdr); if (icmpv6_offset < 0 || nexthdr != IPPROTO_ICMPV6) return -1; if (pkt_len < (ND_NEIGH_PKT_MIN_LEN(l2_len) + icmpv6_offset - sizeof(*ipv6_hdr))) { G_LOG(NOTICE, "ND packet received is %"PRIx16" bytes but should be at least %lu bytes in %s\n", pkt_len, ND_NEIGH_PKT_MIN_LEN(l2_len) + icmpv6_offset - sizeof(*ipv6_hdr), __func__); return -1; } icmpv6_hdr = (struct icmpv6_hdr *) ((uint8_t *)ipv6_hdr + icmpv6_offset); icmpv6_len = pkt_len - (l2_len + icmpv6_offset); if (unlikely(!nd_pkt_valid(ipv6_hdr, icmpv6_hdr, icmpv6_len))) return -1; eth_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); switch (icmpv6_hdr->type) { case ND_NEIGHBOR_SOLICITATION_TYPE: if (icmpv6_hdr->code != ND_NEIGHBOR_SOLICITATION_CODE) goto log; return process_nd_neigh_solicitation(lls_conf, buf, eth_hdr, ipv6_hdr, icmpv6_hdr, pkt_len, l2_len, icmpv6_len, iface, tx_queue); case ND_NEIGHBOR_ADVERTISEMENT_TYPE: if (icmpv6_hdr->code != ND_NEIGHBOR_ADVERTISEMENT_CODE) goto log; return process_nd_neigh_advertisement(lls_conf, ipv6_hdr, icmpv6_hdr, icmpv6_len, iface); default: log: G_LOG(NOTICE, "%s received an ICMPv6 packet that's not a Neighbor Solicitation or Neighbor Advertisement (type=%hhu, code=%hhu)\n", __func__, icmpv6_hdr->type, icmpv6_hdr->code); return -1; } rte_panic("Reached the end of %s without hitting a switch case\n", __func__); return 0; } ```
/content/code_sandbox/lls/nd.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
6,853
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <math.h> #include <unistd.h> #include <rte_approx.h> #include <rte_sched.h> #include "gatekeeper_gk.h" #include "gatekeeper_launch.h" #include "gatekeeper_log_ratelimit.h" #include "gatekeeper_sol.h" /* * Gatekeeper request priority queue implementation. * * To implement the request priority queue, we maintain a linked list of * packets listed in order of highest priority to lowest priority. We keep * an array where each index represents a priority, and each element of * that array holds a reference to the last packet of that priority. * This allows us to quickly insert new packets of any priority into * the linked list and drop the packet of lowest priority if necessary. */ static inline struct list_head * mbuf_to_list(struct rte_mbuf *m) { return &mbuf_to_sol_priv(m)->list; } /* * This function doesn't require that the list field of * a request to be initialized. */ static void insert_new_priority_req(struct req_queue *req_queue, struct rte_mbuf *req, uint8_t priority) { uint8_t next; /* This should be the first request of @priority. */ RTE_VERIFY(req_queue->priorities[priority] == NULL); req_queue->priorities[priority] = req; /* This is the first packet in the queue. */ if (req_queue->len == 0) { list_add(mbuf_to_list(req), &req_queue->head); req_queue->highest_priority = priority; req_queue->lowest_priority = priority; return; } /* Not the first packet, but still insert at the head of the queue. */ if (priority > req_queue->highest_priority) { list_add(mbuf_to_list(req), &req_queue->head); req_queue->highest_priority = priority; return; } /* * Insert in middle or end of queue. */ if (priority < req_queue->lowest_priority) req_queue->lowest_priority = priority; /* * This function only inserts a request for a priority that * does not currently exist, and there is at least one * request of a higher priority already in the queue. */ RTE_VERIFY(priority != req_queue->highest_priority); for (next = priority + 1; next <= req_queue->highest_priority; next++) { if (req_queue->priorities[next] != NULL) { list_add(mbuf_to_list(req), mbuf_to_list(req_queue->priorities[next])); return; } } rte_panic("sol: %s failed to insert a request of a new priority\n", __func__); } /* * Get the rte_mbuf struct for this entry. * XXX #52 This function should be part of DPDK. */ static inline struct rte_mbuf * rte_priv_to_mbuf(void *ptr) { return RTE_PTR_SUB(ptr, sizeof(struct rte_mbuf)); } static inline struct rte_mbuf * list_to_mbuf(struct list_head *ptr) { return rte_priv_to_mbuf(list_entry(ptr, struct sol_mbuf_priv, list)); } /* * Get the first rte_mbuf element from a list. * Note, that list is expected to be not empty. */ static inline struct rte_mbuf * list_first_entry_m(struct list_head *ptr) { return list_to_mbuf(ptr->next); } /* * Get the last rte_mbuf element from a list. * Note, that list is expected to be not empty. */ static inline struct rte_mbuf * list_last_entry_m(struct list_head *ptr) { return list_to_mbuf(ptr->prev); } /* Get the prev rte_mbuf element in list. */ static inline struct rte_mbuf * list_prev_entry_m(struct rte_mbuf *pos) { return list_to_mbuf(mbuf_to_list(pos)->prev); } /* Get the next rte_mbuf element in list. */ static inline struct rte_mbuf * list_next_entry_m(struct rte_mbuf *pos) { return list_to_mbuf(mbuf_to_list(pos)->next); } static inline uint8_t get_prio(struct rte_mbuf *pkt) { return mbuf_to_sol_priv(pkt)->priority; } static void drop_lowest_priority_pkt(struct req_queue *req_queue) { struct rte_mbuf *lowest_pr_pkt = list_last_entry_m(&req_queue->head); struct rte_mbuf *next_lowest_pr_pkt; uint8_t lowest_prio = get_prio(lowest_pr_pkt); uint8_t next_lowest_prio; RTE_VERIFY(req_queue->len > 0); if (unlikely(req_queue->len == 1)) { req_queue->priorities[lowest_prio] = NULL; req_queue->highest_priority = 0; req_queue->lowest_priority = GK_MAX_REQ_PRIORITY; goto drop; } next_lowest_pr_pkt = list_prev_entry_m(lowest_pr_pkt); next_lowest_prio = get_prio(next_lowest_pr_pkt); /* The lowest priority packet was the only one of that priority. */ if (lowest_prio != next_lowest_prio) { req_queue->priorities[lowest_prio] = NULL; req_queue->lowest_priority = next_lowest_prio; goto drop; } req_queue->priorities[lowest_prio] = next_lowest_pr_pkt; drop: list_del(mbuf_to_list(lowest_pr_pkt)); rte_pktmbuf_free(lowest_pr_pkt); req_queue->len--; } /* * This function doesn't require that the list field of * a request to be initialized. */ static void enqueue_req(struct sol_config *sol_conf, struct sol_instance *instance, struct rte_mbuf *req) { struct req_queue *req_queue = &instance->req_queue; uint8_t priority = get_prio(req); if (unlikely(priority > GK_MAX_REQ_PRIORITY)) { G_LOG(WARNING, "Trying to enqueue a request with priority %hhu, but should be in range [0, %d]. Overwrite the priority to PRIORITY_REQ_MIN (%hhu)\n", priority, GK_MAX_REQ_PRIORITY, PRIORITY_REQ_MIN); set_prio(req, PRIORITY_REQ_MIN); priority = PRIORITY_REQ_MIN; } if (req_queue->len >= sol_conf->pri_req_max_len) { /* New packet is lowest priority, so drop it. */ if (req_queue->lowest_priority >= priority) { rte_pktmbuf_free(req); return; } drop_lowest_priority_pkt(req_queue); } if (req_queue->priorities[priority] == NULL) { /* Insert request of a priority we don't yet have. */ insert_new_priority_req(req_queue, req, priority); } else { /* Append request to end of the appropriate priority. */ list_add(mbuf_to_list(req), mbuf_to_list( req_queue->priorities[priority])); req_queue->priorities[priority] = req; } req_queue->len++; } static void enqueue_reqs(struct sol_config *sol_conf, struct sol_instance *instance) { struct rte_mbuf *reqs[sol_conf->enq_burst_size]; int num_reqs = rte_ring_sc_dequeue_burst(instance->ring, (void **)reqs, sol_conf->enq_burst_size, NULL); int i; for (i = 0; i < num_reqs; i++) enqueue_req(sol_conf, instance, reqs[i]); } static inline void credits_update(struct req_queue *req_queue) { uint64_t curr_cycles = rte_rdtsc(); uint64_t avail_cycles = curr_cycles - req_queue->time_cpu_cycles; #if __WORDSIZE == 64 ldiv_t avail_bytes; #elif __WORDSIZE == 32 lldiv_t avail_bytes; #else #error "unexpected value for __WORDSIZE macro" #endif /* Not enough cycles have passed to update the number of credits. */ if (avail_cycles <= req_queue->cycles_per_byte_floor) return; #if __WORDSIZE == 64 avail_bytes = ldiv(avail_cycles * req_queue->cycles_per_byte_b, req_queue->cycles_per_byte_a); #elif __WORDSIZE == 32 avail_bytes = lldiv(avail_cycles * req_queue->cycles_per_byte_b, req_queue->cycles_per_byte_a); #else #error "unexpected value for __WORDSIZE macro" #endif req_queue->tb_credit_bytes += avail_bytes.quot; if (req_queue->tb_credit_bytes > req_queue->tb_max_credit_bytes) req_queue->tb_credit_bytes = req_queue->tb_max_credit_bytes; /* * If there are spare cycles (that were not converted to credits * because of rounding), keep them for the next iteration. */ req_queue->time_cpu_cycles = curr_cycles - avail_bytes.rem / req_queue->cycles_per_byte_b; } static inline int credits_check(struct req_queue *req_queue, struct rte_mbuf *pkt) { /* Need to include Ethernet frame overhead (preamble, gap, etc.) */ uint32_t pkt_len = pkt->pkt_len + RTE_SCHED_FRAME_OVERHEAD_DEFAULT; if (pkt_len > req_queue->tb_credit_bytes) return false; req_queue->tb_credit_bytes -= pkt_len; return true; } /* * Iterate over list of rte_mbufs safe against removal of list entry. */ #define list_for_each_entry_safe_m(pos, n, head) \ for (pos = list_first_entry_m(head), \ n = list_next_entry_m(pos); \ mbuf_to_list(pos) != (head); \ pos = n, n = list_next_entry_m(n)) static void dequeue_reqs(struct sol_config *sol_conf, struct sol_instance *instance, uint8_t tx_port) { struct req_queue *req_queue = &instance->req_queue; struct rte_mbuf *entry, *next; struct rte_mbuf *pkts_out[sol_conf->deq_burst_size]; uint32_t nb_pkts_out = 0; uint16_t total_sent = 0; /* Get an up-to-date view of our credits. */ credits_update(req_queue); list_for_each_entry_safe_m(entry, next, &req_queue->head) { uint8_t entry_prio; if (!credits_check(req_queue, entry)) { /* * The library log_ratelimit will throtle * the log rate of the log entry below when * Gatekeeper servers are under attacks. */ G_LOG(NOTICE, "Out of request bandwidth\n"); goto out; } entry_prio = get_prio(entry); if (req_queue->len == 1 || (entry_prio != get_prio(next))) req_queue->priorities[entry_prio] = NULL; list_del(mbuf_to_list(entry)); req_queue->len--; pkts_out[nb_pkts_out++] = entry; if (nb_pkts_out >= sol_conf->deq_burst_size) break; } out: if (list_empty(&req_queue->head)) { req_queue->highest_priority = 0; req_queue->lowest_priority = GK_MAX_REQ_PRIORITY; } else { struct rte_mbuf *first = list_first_entry_m(&req_queue->head); req_queue->highest_priority = get_prio(first); } /* We cannot drop the packets, so re-send. */ while (nb_pkts_out > 0) { uint16_t sent = rte_eth_tx_burst(tx_port, instance->tx_queue_back, pkts_out + total_sent, nb_pkts_out); total_sent += sent; nb_pkts_out -= sent; } } static inline double mbits_to_bytes(double mbps) { return mbps * (1000 * 1000 / 8); } /* * @sol_conf is allocated using rte_calloc_socket(), so initializations * to 0 are not strictly necessary in this function. */ static int req_queue_init(struct sol_config *sol_conf) { double max_credit_bytes_precise; double cycles_per_byte_precise; uint64_t cycles_per_byte_floor; uint64_t now; uint32_t a, b; int ret, i; /* Convert to bytes per second. */ max_credit_bytes_precise = mbits_to_bytes(sol_conf->req_channel_bw_mbps); max_credit_bytes_precise /= sol_conf->num_lcores; /* * Compute the number of cycles needed to credit the request queue * with bytes. Represent this ratio of cycles per byte using two * numbers -- a numerator and denominator. * * The function rte_approx() can only approximate a floating-point * number between (0, 1). Therefore, approximate only the fractional * part of the cycles per byte using rte_approx(), and then add * the integer number of cycles per byte to the numerator. */ cycles_per_byte_precise = cycles_per_sec / max_credit_bytes_precise; cycles_per_byte_floor = cycles_per_byte_precise; ret = rte_approx( cycles_per_byte_precise - cycles_per_byte_floor, sol_conf->tb_rate_approx_err, &a, &b); if (ret < 0) { G_LOG(ERR, "%s(): could not approximate the request queue's allocated bandwidth\n", __func__); return ret; } /* Add integer number of cycles per byte to numerator. */ a += cycles_per_byte_floor * b; G_LOG(NOTICE, "%s(): cycles per byte (%f) represented as a rational: %u / %u\n", __func__, cycles_per_byte_precise, a, b); now = rte_rdtsc(); for (i = 0; i < sol_conf->num_lcores; i++) { struct req_queue *req_queue = &sol_conf->instances[i].req_queue; INIT_LIST_HEAD(&req_queue->head); req_queue->len = 0; req_queue->highest_priority = 0; req_queue->lowest_priority = GK_MAX_REQ_PRIORITY; /* Initialize token bucket as full. */ req_queue->tb_max_credit_bytes = round(max_credit_bytes_precise); req_queue->tb_credit_bytes = req_queue->tb_max_credit_bytes; /* * Initialize the number of cycles needed to credit * the request queue with bytes. */ req_queue->cycles_per_byte_floor = cycles_per_byte_floor; req_queue->cycles_per_byte_a = a; req_queue->cycles_per_byte_b = b; req_queue->time_cpu_cycles = now; } return 0; } static int cleanup_sol(struct sol_config *sol_conf) { int i; if (sol_conf->instances == NULL) goto free_sol_conf; for (i = 0; i < sol_conf->num_lcores; i++) { struct sol_instance *inst = &sol_conf->instances[i]; struct req_queue *req_queue = &inst->req_queue; struct rte_mbuf *entry, *next; if (list_initiated(&req_queue->head)) { list_for_each_entry_safe_m(entry, next, &req_queue->head) { list_del(mbuf_to_list(entry)); rte_pktmbuf_free(entry); req_queue->len--; } if (unlikely(req_queue->len > 0)) { G_LOG(CRIT, "%s(): bug: removing all requests from the priority queue on cleanup leaves the queue length at %"PRIu32" at lcore %u\n", __func__, req_queue->len, sol_conf->lcores[i]); } } rte_ring_free(inst->ring); inst->ring = NULL; } rte_free(sol_conf->instances); sol_conf->instances = NULL; free_sol_conf: rte_free(sol_conf); return 0; } int sol_conf_put(struct sol_config *sol_conf) { /* * Atomically decrements the atomic counter by one and returns true * if the result is 0, or false in all other cases. */ if (rte_atomic32_dec_and_test(&sol_conf->ref_cnt)) return cleanup_sol(sol_conf); return 0; } static int get_block_idx(struct sol_config *sol_conf, unsigned int lcore_id) { int i; for (i = 0; i < sol_conf->num_lcores; i++) if (sol_conf->lcores[i] == lcore_id) return i; rte_panic("Unexpected condition: lcore %u is not running a sol block\n", lcore_id); return 0; } static int sol_proc(void *arg) { struct sol_config *sol_conf = (struct sol_config *)arg; unsigned int lcore = rte_lcore_id(); unsigned int block_idx = get_block_idx(sol_conf, lcore); struct sol_instance *instance = &sol_conf->instances[block_idx]; uint8_t tx_port_back = sol_conf->net->back.id; G_LOG(NOTICE, "The Solicitor block is running at tid = %u\n", gettid()); if (needed_caps(0, NULL) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } sol_conf_hold(sol_conf); while (likely(!exiting)) { enqueue_reqs(sol_conf, instance); dequeue_reqs(sol_conf, instance, tx_port_back); } G_LOG(NOTICE, "The Solicitor block is exiting\n"); return sol_conf_put(sol_conf); } static int sol_stage1(void *arg) { struct sol_config *sol_conf = arg; int i; sol_conf->instances = rte_calloc_socket(__func__, sol_conf->num_lcores, sizeof(struct sol_instance), 0, rte_lcore_to_socket_id(sol_conf->lcores[0])); if (sol_conf->instances == NULL) goto cleanup; for (i = 0; i < sol_conf->num_lcores; i++) { unsigned int lcore = sol_conf->lcores[i]; struct sol_instance *inst_ptr = &sol_conf->instances[i]; char ring_name[64]; int ret = snprintf(ring_name, sizeof(ring_name), "sol_reqs_ring_%u", i); RTE_VERIFY(ret > 0 && ret < (int)sizeof(ring_name)); inst_ptr->ring = rte_ring_create(ring_name, rte_align32pow2(sol_conf->pri_req_max_len), rte_lcore_to_socket_id(lcore), RING_F_SC_DEQ); if (inst_ptr->ring == NULL) { G_LOG(ERR, "sol: can't create ring sol_reqs_ring at lcore %u\n", lcore); goto cleanup; } ret = get_queue_id(&sol_conf->net->back, QUEUE_TYPE_TX, lcore, NULL); if (ret < 0) { G_LOG(ERR, "Cannot assign a TX queue for the back interface for lcore %u\n", lcore); goto cleanup; } inst_ptr->tx_queue_back = ret; /* * @inst_ptr->req_queue is initialized at * sol_stage2()/req_queue_init(). */ } return 0; cleanup: cleanup_sol(sol_conf); return -1; } static int sol_stage2(void *arg) { struct sol_config *sol_conf = arg; int ret = req_queue_init(sol_conf); if (ret < 0) goto cleanup; return 0; cleanup: cleanup_sol(sol_conf); return ret; } int run_sol(struct net_config *net_conf, struct sol_config *sol_conf) { int ret, i; uint16_t front_inc; if (unlikely(net_conf == NULL || sol_conf == NULL)) { G_LOG(ERR, "%s(): net_conf = %p or sol_conf = %p cannot be NULL\n", __func__, net_conf, sol_conf); ret = -EINVAL; goto out; } for (i = 0; i < sol_conf->num_lcores; i++) { log_ratelimit_state_init(sol_conf->lcores[i], sol_conf->log_ratelimit_interval_ms, sol_conf->log_ratelimit_burst, sol_conf->log_level, "SOL"); } if (unlikely(!net_conf->back_iface_enabled)) { G_LOG(ERR, "%s(): back interface is required\n", __func__); ret = -EINVAL; goto out; } if (unlikely(sol_conf->pri_req_max_len == 0)) { G_LOG(ERR, "%s(): the parameter pri_req_max_len = %u must be greater than 0\n", __func__, sol_conf->pri_req_max_len); ret = -EINVAL; goto out; } if (unlikely(sol_conf->enq_burst_size == 0 || sol_conf->deq_burst_size == 0)) { G_LOG(ERR, "%s(): the paramters enq_burst_size = %u and deq_burst_size = %u must both be greater than 0\n", __func__, sol_conf->enq_burst_size, sol_conf->deq_burst_size); ret = -EINVAL; goto out; } if (unlikely(sol_conf->enq_burst_size > sol_conf->pri_req_max_len || sol_conf->deq_burst_size > sol_conf->pri_req_max_len)) { G_LOG(ERR, "%s(): the paramters enq_burst_size = %u and deq_burst_size = %u must both be less than or equal to the parameter pri_req_max_len = %u\n", __func__, sol_conf->enq_burst_size, sol_conf->deq_burst_size, sol_conf->pri_req_max_len); ret = -EINVAL; goto out; } if (unlikely(sol_conf->req_channel_bw_mbps <= 0)) { G_LOG(ERR, "%s(): the parameter req_channel_bw_mbps = %f must be greater than 0\n", __func__, sol_conf->req_channel_bw_mbps); ret = -EINVAL; goto out; } if (unlikely(sol_conf->num_lcores <= 0)) { G_LOG(ERR, "%s(): the parameter num_lcores = %i must be greater than 0\n", __func__, sol_conf->num_lcores); ret = -EINVAL; goto out; } /* * Need to account for the packets in the following scenarios: * * (1) sol_conf->pri_req_max_len packets may sit at the ring; * (2) sol_conf->pri_req_max_len packet may sit at the actually queue; * (3) enqueue_reqs() temporarily adds sol_conf->enq_burst_size * more packets; * (4) sol_conf->deq_burst_size does not count because dequeue_reqs() * only reduces the number of packets, that is, it does not add. * * Although the packets are going to the back interface, * they are allocated at the front interface. */ front_inc = (2 * sol_conf->pri_req_max_len + sol_conf->enq_burst_size) * sol_conf->num_lcores; net_conf->front.total_pkt_burst += front_inc; ret = net_launch_at_stage1(net_conf, 0, 0, 0, sol_conf->num_lcores, sol_stage1, sol_conf); if (ret < 0) goto burst; ret = launch_at_stage2(sol_stage2, sol_conf); if (ret < 0) goto stage1; for (i = 0; i < sol_conf->num_lcores; i++) { unsigned int lcore = sol_conf->lcores[i]; ret = launch_at_stage3("sol", sol_proc, sol_conf, lcore); if (ret < 0) { pop_n_at_stage3(i); goto stage2; } } sol_conf->net = net_conf; rte_atomic32_init(&sol_conf->ref_cnt); return 0; stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); burst: net_conf->front.total_pkt_burst -= front_inc; out: return ret; } /* * There should be only one sol_config instance. * Return an error if trying to allocate the second instance. * * Use rte_calloc_socket() to zero-out the instance and initialize the * request queue list to guarantee that cleanup_sol() won't fail * during initialization. */ struct sol_config * alloc_sol_conf(void) { struct sol_config *sol_conf; static rte_atomic16_t num_sol_conf_alloc = RTE_ATOMIC16_INIT(0); if (rte_atomic16_test_and_set(&num_sol_conf_alloc) != 1) { G_LOG(ERR, "Trying to allocate the second instance of struct sol_config\n"); return NULL; } sol_conf = rte_calloc("sol_config", 1, sizeof(struct sol_config), 0); if (sol_conf == NULL) { rte_atomic16_clear(&num_sol_conf_alloc); G_LOG(ERR, "Failed to allocate the first instance of struct sol_config\n"); return NULL; } return sol_conf; } int gk_solicitor_enqueue_bulk(struct sol_instance *instance, struct rte_mbuf **pkts, uint16_t num_pkts) { unsigned int num_enqueued = rte_ring_mp_enqueue_bulk(instance->ring, (void **)pkts, num_pkts, NULL); if (unlikely(num_enqueued < num_pkts)) { G_LOG(ERR, "Failed to enqueue a bulk of %hu requests - only %u requests are enqueued\n", num_pkts, num_enqueued); } return num_enqueued; } ```
/content/code_sandbox/sol/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
5,705
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_icmp.h> #include "gatekeeper_acl.h" #include "gatekeeper_lls.h" /* Maximum number of rules installed per ACL. */ #define MAX_NUM_ACL_RULES (32) /* Result returned when the ACL does not find a matching rule. */ #define ACL_NO_MATCH (0) /* Callback function for when there's no classification match. */ static int drop_unmatched_pkts(struct rte_mbuf **pkts, unsigned int num_pkts, __attribute__((unused)) struct gatekeeper_if *iface) { /* * WARNING * A packet has reached a Gatekeeper server, * and Gatekeeper doesn't know what to do with * this packet. If attackers are able to send * these packets, they may be able to slow * Gatekeeper down since Gatekeeper does a lot of * processing to eventually discard these packets. */ if (unlikely(G_LOG_CHECK(DEBUG))) { unsigned int i; for (i = 0; i < num_pkts; i++) { G_LOG(DEBUG, "acl: a packet failed to match any ACL rules, the whole packet is dumped below:\n"); rte_pktmbuf_dump(log_file, pkts[i], pkts[i]->pkt_len); } } rte_pktmbuf_free_bulk(pkts, num_pkts); return 0; } int process_acl(struct gatekeeper_if *iface, unsigned int lcore_id, struct acl_search *acl, struct acl_state *astate, int acl_enabled, const char *proto_name) { struct rte_mbuf *pkts[astate->func_count][acl->num]; int num_pkts[astate->func_count]; unsigned int socket_id = rte_lcore_to_socket_id(lcore_id); unsigned int i; int ret; /* The classification results for each packet. */ uint32_t res[acl->num]; if (!acl_enabled) { ret = 0; goto drop_acl_pkts; } ret = rte_acl_classify(astate->acls[socket_id], acl->data, res, acl->num, 1); if (unlikely(ret < 0)) { G_LOG(ERR, "acl: invalid arguments given to %s rte_acl_classify()\n", proto_name); goto drop_acl_pkts; } /* Split packets into separate buffers -- one for each type. */ memset(num_pkts, 0, sizeof(num_pkts)); for (i = 0; i < acl->num; i++) { int type = res[i]; if (type == ACL_NO_MATCH) { unsigned int j; /* * @j starts at 1 to skip ACL_NO_MATCH, * which has no matching function. */ for (j = 1; j < astate->func_count; j++) { /* Skip over ACLs without matching function. */ if (astate->ext_funcs[j] == NULL) continue; ret = astate->ext_funcs[j]( acl->mbufs[i], iface); if (ret == 0) { type = j; break; } } } pkts[type][num_pkts[type]++] = acl->mbufs[i]; } /* Transmit separate buffers to registered ACL functions. */ for (i = 0; i < astate->func_count; i++) { if (num_pkts[i] == 0) continue; ret = astate->funcs[i](pkts[i], num_pkts[i], iface); if (unlikely(ret < 0)) { /* * Each ACL function is responsible for * freeing packets not already handled. */ G_LOG(WARNING, "acl: %s ACL function %d failed on %s iface\n", proto_name, i, iface->name); } } ret = 0; goto out; drop_acl_pkts: rte_pktmbuf_free_bulk(acl->mbufs, acl->num); out: acl->num = 0; return ret; } void destroy_acls(struct acl_state *astate) { unsigned int numa_nodes = get_net_conf()->numa_nodes; unsigned int i; for (i = 0; i < numa_nodes; i++) { if (astate->acls[i] != NULL) { rte_acl_free(astate->acls[i]); astate->acls[i] = NULL; } } astate->enabled = false; } /* * IPv4 ACLs. */ /* * Input indices for the IPv4-related ACL fields. Fields are given * unique identifiers, but since the DPDK ACL library processes * each packet in four-byte chunks, the fields need to be grouped * into four-byte input indices. Therefore, adjacent fields may * share the same input index. For example, TCP and UDP ports are * two-byte contiguous fields forming four consecutive bytes, so * they could have the same input index. */ enum { PROTO_INPUT_IPV4, DST_INPUT_IPV4, /* Source/destination ports are grouped together. */ PORTS_INPUT_IPV4, TYPE_INPUT_ICMP, NUM_INPUTS_IPV4, }; /* * All IPv4 fields involved in classification; not all fields must * be specified for every rule. Fields must be grouped into sets of * four bytes, except for the first field. */ struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint8_t), .field_index = PROTO_FIELD_IPV4, .input_index = PROTO_INPUT_IPV4, .offset = offsetof(struct rte_ipv4_hdr, next_proto_id), }, { .type = RTE_ACL_FIELD_TYPE_MASK, .size = sizeof(uint32_t), .field_index = DST_FIELD_IPV4, .input_index = DST_INPUT_IPV4, .offset = offsetof(struct rte_ipv4_hdr, dst_addr), }, /* * The source and destination ports are the first and second * fields in TCP and UDP, so they are the four bytes directly * following the IPv4 header. */ { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint16_t), .field_index = SRCP_FIELD_IPV4, .input_index = PORTS_INPUT_IPV4, .offset = sizeof(struct rte_ipv4_hdr), }, { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint16_t), .field_index = DSTP_FIELD_IPV4, .input_index = PORTS_INPUT_IPV4, .offset = sizeof(struct rte_ipv4_hdr) + sizeof(uint16_t), }, }; static int register_cb_fs(struct acl_state *acls, acl_cb_func cb_f, ext_cb_func ext_cb_f) { unsigned int i, func_count = acls->func_count; if (ext_cb_f == NULL) { /* * Since @ext_cb_f is NULL, * we only care to where the packets are going. * Combining types is important to group more packets for * each call of @cb_f. */ for (i = 0; i < func_count; i++) { if (acls->funcs[i] == cb_f) return i; } goto new_type; } for (i = 0; i < func_count; i++) { if (acls->ext_funcs[i] == ext_cb_f) { if (acls->funcs[i] == cb_f) return i; G_LOG(ERR, "acl: an extension callback function is being used twice, but has different callback functions associated to it\n"); return -1; } } new_type: if (func_count >= GATEKEEPER_ACL_MAX) { G_LOG(ERR, "acl: cannot install more ACL types\n"); return -1; } acls->funcs[func_count] = cb_f; acls->ext_funcs[func_count] = ext_cb_f; acls->func_count++; return func_count; } /* * For each ACL rule set, register a match function that parses * the unmatched IPv4 packets, and direct them to the corresponding * blocks or drop them. This functionality is for the ext_cb_f parameter * and that it's necessary because of variable IP headers that * may not match the ACLs. * * WARNING * You must only register filters that are not subject to * the control of attackers. Otherwise, attackers can overwhelm * Gatekeeper servers since the current implementation of these filters * is not very efficient due to the variable header of IP. */ int register_ipv4_acl(struct ipv4_acl_rule *ipv4_rule, acl_cb_func cb_f, ext_cb_func ext_cb_f, struct gatekeeper_if *iface) { unsigned int i, numa_nodes = get_net_conf()->numa_nodes; int index = register_cb_fs(&iface->ipv4_acls, cb_f, ext_cb_f); if (index < 0) { G_LOG(ERR, "acl: cannot add IPv4 ACL type on interface %s\n", iface->name); return -1; } /* Assign an ID to this rule. */ ipv4_rule->data.userdata = index; for (i = 0; i < numa_nodes; i++) { int ret; if (iface->ipv4_acls.acls[i] == NULL) continue; ret = rte_acl_add_rules(iface->ipv4_acls.acls[i], (struct rte_acl_rule *)ipv4_rule, 1); if (ret < 0) { G_LOG(ERR, "acl: failed to add IPv4 ACL rule on interface %s on socket %d\n", iface->name, i); return ret; } } return 0; } int build_ipv4_acls(struct gatekeeper_if *iface) { struct rte_acl_config acl_build_params; unsigned int numa_nodes = get_net_conf()->numa_nodes; unsigned int i; memset(&acl_build_params, 0, sizeof(acl_build_params)); acl_build_params.num_categories = 1; acl_build_params.num_fields = RTE_DIM(ipv4_defs); rte_memcpy(&acl_build_params.defs, ipv4_defs, sizeof(ipv4_defs)); for (i = 0; i < numa_nodes; i++) { int ret; if (iface->ipv4_acls.acls[i] == NULL) continue; ret = rte_acl_build(iface->ipv4_acls.acls[i], &acl_build_params); if (ret < 0) { G_LOG(ERR, "acl: failed to build IPv4 ACL for the %s iface\n", iface->name); return ret; } } return 0; } int init_ipv4_acls(struct gatekeeper_if *iface) { struct net_config *net_conf = get_net_conf(); unsigned int i; for (i = 0; i < net_conf->numa_nodes; i++) { char acl_name[64]; struct rte_acl_param acl_params = { .socket_id = i, .rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs)), .max_rule_num = MAX_NUM_ACL_RULES, }; int ret; if (!net_conf->numa_used[i]) continue; ret = snprintf(acl_name, sizeof(acl_name), "%s_%u_v4", iface->name, i); RTE_VERIFY(ret > 0 && ret < (int)sizeof(acl_name)); acl_params.name = acl_name; iface->ipv4_acls.acls[i] = rte_acl_create(&acl_params); if (iface->ipv4_acls.acls[i] == NULL) { unsigned int j; G_LOG(ERR, "acl: failed to create IPv4 ACL for the %s iface on socket %d\n", iface->name, i); for (j = 0; j < i; j++) { rte_acl_free(iface->ipv4_acls.acls[i]); iface->ipv4_acls.acls[i] = NULL; } return -1; } } /* Add drop function for packets that cannot be classified. */ RTE_BUILD_BUG_ON(ACL_NO_MATCH != 0); iface->ipv4_acls.funcs[ACL_NO_MATCH] = drop_unmatched_pkts; iface->ipv4_acls.ext_funcs[ACL_NO_MATCH] = NULL; iface->ipv4_acls.func_count = 1; iface->ipv4_acls.enabled = true; return 0; } /* * IPv6 ACLs. */ /* * Input indices for the IPv6-related ACL fields. Fields are given * unique identifiers, but since the DPDK ACL library processes * each packet in four-byte chunks, the fields need to be grouped * into four-byte input indices. Therefore, adjacent fields may * share the same input index. For example, TCP and UDP ports are * two-byte contiguous fields forming four consecutive bytes, so * they could have the same input index. */ enum { PROTO_INPUT_IPV6, DST1_INPUT_IPV6, DST2_INPUT_IPV6, DST3_INPUT_IPV6, DST4_INPUT_IPV6, /* Source/destination ports are grouped together. */ PORTS_INPUT_IPV6, TYPE_INPUT_ICMPV6, NUM_INPUTS_IPV6, }; /* * All IPv6 fields involved in classification; not all fields must * be specified for every rule. Fields must be grouped into sets of * four bytes, except for the first field. */ struct rte_acl_field_def ipv6_defs[NUM_FIELDS_IPV6] = { { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint8_t), .field_index = PROTO_FIELD_IPV6, .input_index = PROTO_INPUT_IPV6, .offset = offsetof(struct rte_ipv6_hdr, proto), }, { .type = RTE_ACL_FIELD_TYPE_MASK, .size = sizeof(uint32_t), .field_index = DST1_FIELD_IPV6, .input_index = DST1_INPUT_IPV6, .offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]), }, { .type = RTE_ACL_FIELD_TYPE_MASK, .size = sizeof(uint32_t), .field_index = DST2_FIELD_IPV6, .input_index = DST2_INPUT_IPV6, .offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]), }, { .type = RTE_ACL_FIELD_TYPE_MASK, .size = sizeof(uint32_t), .field_index = DST3_FIELD_IPV6, .input_index = DST3_INPUT_IPV6, .offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]), }, { .type = RTE_ACL_FIELD_TYPE_MASK, .size = sizeof(uint32_t), .field_index = DST4_FIELD_IPV6, .input_index = DST4_INPUT_IPV6, .offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]), }, /* * The source and destination ports are the first and second * fields in TCP and UDP, so they are the four bytes directly * following the IPv6 header. */ { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint16_t), .field_index = SRCP_FIELD_IPV6, .input_index = PORTS_INPUT_IPV6, .offset = sizeof(struct rte_ipv6_hdr), }, { .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint16_t), .field_index = DSTP_FIELD_IPV6, .input_index = PORTS_INPUT_IPV6, .offset = sizeof(struct rte_ipv6_hdr) + sizeof(uint16_t), }, { /* Enforce grouping into four bytes. */ .type = RTE_ACL_FIELD_TYPE_BITMASK, .size = sizeof(uint32_t), .field_index = TYPE_FIELD_ICMPV6, .input_index = TYPE_INPUT_ICMPV6, .offset = sizeof(struct rte_ipv6_hdr) + offsetof(struct icmpv6_hdr, type), }, }; /* * For each ACL rule set, register a match function that parses * the unmatched IPv6 packets, and direct them to the corresponding * blocks or drop them. This functionality is for the ext_cb_f parameter * and that it's necessary because of variable IP headers that * may not match the ACLs. * * WARNING * You must only register filters that are not subject to * the control of attackers. Otherwise, attackers can overwhelm * Gatekeeper servers since the current implementation of these filters * is not very efficient due to the variable header of IP. */ int register_ipv6_acl(struct ipv6_acl_rule *ipv6_rule, acl_cb_func cb_f, ext_cb_func ext_cb_f, struct gatekeeper_if *iface) { unsigned int i, numa_nodes = get_net_conf()->numa_nodes; int index = register_cb_fs(&iface->ipv6_acls, cb_f, ext_cb_f); if (index < 0) { G_LOG(ERR, "acl: cannot add IPv6 ACL type on interface %s\n", iface->name); return -1; } /* Assign an ID to this rule. */ ipv6_rule->data.userdata = index; for (i = 0; i < numa_nodes; i++) { int ret; if (iface->ipv6_acls.acls[i] == NULL) continue; ret = rte_acl_add_rules(iface->ipv6_acls.acls[i], (struct rte_acl_rule *)ipv6_rule, 1); if (ret < 0) { G_LOG(ERR, "acl: failed to add IPv6 ACL rule on interface %s on socket %d\n", iface->name, i); return ret; } } return 0; } int build_ipv6_acls(struct gatekeeper_if *iface) { struct rte_acl_config acl_build_params; unsigned int numa_nodes = get_net_conf()->numa_nodes; unsigned int i; memset(&acl_build_params, 0, sizeof(acl_build_params)); acl_build_params.num_categories = 1; acl_build_params.num_fields = RTE_DIM(ipv6_defs); rte_memcpy(&acl_build_params.defs, ipv6_defs, sizeof(ipv6_defs)); for (i = 0; i < numa_nodes; i++) { int ret; if (iface->ipv6_acls.acls[i] == NULL) continue; ret = rte_acl_build(iface->ipv6_acls.acls[i], &acl_build_params); if (ret < 0) { G_LOG(ERR, "acl: failed to build IPv6 ACL for the %s iface\n", iface->name); return ret; } } return 0; } int init_ipv6_acls(struct gatekeeper_if *iface) { struct net_config *net_conf = get_net_conf(); unsigned int i; for (i = 0; i < net_conf->numa_nodes; i++) { char acl_name[64]; struct rte_acl_param acl_params = { .socket_id = i, .rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv6_defs)), .max_rule_num = MAX_NUM_ACL_RULES, }; int ret; if (!net_conf->numa_used[i]) continue; ret = snprintf(acl_name, sizeof(acl_name), "%s_%u_v6", iface->name, i); RTE_VERIFY(ret > 0 && ret < (int)sizeof(acl_name)); acl_params.name = acl_name; iface->ipv6_acls.acls[i] = rte_acl_create(&acl_params); if (iface->ipv6_acls.acls[i] == NULL) { unsigned int j; G_LOG(ERR, "acl: failed to create IPv6 ACL for the %s iface on socket %d\n", iface->name, i); for (j = 0; j < i; j++) { rte_acl_free(iface->ipv6_acls.acls[i]); iface->ipv6_acls.acls[i] = NULL; } return -1; } } /* Add drop function for packets that cannot be classified. */ RTE_BUILD_BUG_ON(ACL_NO_MATCH != 0); iface->ipv6_acls.funcs[ACL_NO_MATCH] = drop_unmatched_pkts; iface->ipv6_acls.ext_funcs[ACL_NO_MATCH] = NULL; iface->ipv6_acls.func_count = 1; iface->ipv6_acls.enabled = true; return 0; } ```
/content/code_sandbox/lib/acl.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
4,651
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include "gatekeeper_l2.h" #include "gatekeeper_main.h" /* * Return the difference in the size of the L2 header between * packet received (@pkt) and a packet that will be transmitted * on @iface. This determines what changes have to be made to the * L2 space of the packet. * * A negative number indicates that bytes need to be removed from * the L2 space, and a positive number indicates that bytes need to * be added to the L2 space. * * WARNING * Note that in order to use this function, @pkt must have first gone * through pkt_in_skip_l2() or another function to set its packet type. */ static inline int in_to_out_l2_diff(struct gatekeeper_if *iface, struct rte_mbuf *pkt) { return iface->l2_len_out - pkt_in_l2_hdr_len(pkt); } /* * Adjust a packet's length. * * The parameter @bytes_to_add represents the number of bytes to add for higher * layers, if any, such as for an encapsulating network header. The function * then also takes into account how many bytes are necessary for the L2 header. * If @bytes_to_add is negative, bytes are removed from the packet. */ struct rte_ether_hdr * adjust_pkt_len(struct rte_mbuf *pkt, struct gatekeeper_if *iface, int bytes_to_add) { struct rte_ether_hdr *eth_hdr; bytes_to_add += in_to_out_l2_diff(iface, pkt); if (bytes_to_add > 0) { eth_hdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, bytes_to_add); if (eth_hdr == NULL) { G_LOG(ERR, "l2: not enough headroom space in the first segment\n"); return NULL; } } else if (bytes_to_add < 0) { /* * @bytes_to_add is negative, so its magnitude is * the number of bytes we need to *remove*. */ eth_hdr = (struct rte_ether_hdr *)rte_pktmbuf_adj(pkt, -bytes_to_add); if (eth_hdr == NULL) { G_LOG(ERR, "l2: could not remove headroom space\n"); return NULL; } } else eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); return eth_hdr; } /* * Verify a packet's L2 header with respect to * the interface on which it was received. */ int verify_l2_hdr(struct gatekeeper_if *iface, struct rte_ether_hdr *eth_hdr, uint32_t l2_type, const char *proto_name, uint16_t vlan_tag_be) { if (iface->vlan_insert) { struct rte_vlan_hdr *vlan_hdr; /* * Drop packets that don't have room for VLAN, since * we would have to make space for a new header. */ if (unlikely(l2_type != RTE_PTYPE_L2_ETHER_VLAN)) { G_LOG(WARNING, "l2: %s interface incorrectly received an %s packet without a VLAN header\n", iface->name, proto_name); return -1; } /* Clear priority and CFI fields. */ vlan_hdr = (struct rte_vlan_hdr *)&eth_hdr[1]; RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(RTE_ETHER_MAX_VLAN_ID + 1)); vlan_hdr->vlan_tci &= rte_cpu_to_be_16(RTE_ETHER_MAX_VLAN_ID); /* Drop packets whose VLAN tags are not correct. */ if (unlikely(vlan_hdr->vlan_tci != vlan_tag_be)) { /* * The log level below cannot be low due to * loose filters in some vantage points, that is, * Gatekeeper receives many packets for other VLANs * during normal operation. */ G_LOG(INFO, "l2: %s interface received an %s packet with an incorrect VLAN tag (0x%02x but should be 0x%02x)\n", iface->name, proto_name, rte_be_to_cpu_16(vlan_hdr->vlan_tci), rte_be_to_cpu_16(vlan_tag_be)); return -1; } } else if (unlikely(l2_type != RTE_PTYPE_UNKNOWN)) { /* * Drop packets that have a VLAN header when we're not expecting * one, since we would have to remove space in the header. */ G_LOG(WARNING, "l2: %s interface incorrectly received an %s packet with a VLAN header\n", iface->name, proto_name); return -1; } return 0; } ```
/content/code_sandbox/lib/l2.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,142
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <arpa/inet.h> #include <rte_thash.h> #include <rte_debug.h> #include <rte_ether.h> #include "gatekeeper_net.h" #include "gatekeeper_main.h" #include "gatekeeper_flow.h" int flow_cmp(const struct ip_flow *flow1, const struct ip_flow *flow2) { if (flow1->proto != flow2->proto) return flow1->proto == RTE_ETHER_TYPE_IPV4 ? -1 : 1; if (flow1->proto == RTE_ETHER_TYPE_IPV4) return memcmp(&flow1->f.v4, &flow2->f.v4, sizeof(flow1->f.v4)); return memcmp(&flow1->f.v6, &flow2->f.v6, sizeof(flow1->f.v6)); } static void print_invalid_flow_err_msg(const struct ip_flow *flow, const char *index_str, const char *err_msg) { const uint64_t *src = (const uint64_t *)&flow->f.v6.src; const uint64_t *dst = (const uint64_t *)&flow->f.v6.dst; RTE_BUILD_BUG_ON(sizeof(flow->f.v6.src) != 16); RTE_BUILD_BUG_ON(sizeof(flow->f.v6.dst) != 16); G_LOG(ERR, "INVALID Flow {proto = %i, f.v6.src = 0x%016"PRIx64 "%016"PRIx64", f.v6.dst = 0x%016"PRIx64"%016"PRIx64"}%s: %s\n", flow->proto, rte_be_to_cpu_64(src[0]), rte_be_to_cpu_64(src[1]), rte_be_to_cpu_64(dst[0]), rte_be_to_cpu_64(dst[1]), index_str, err_msg); } #define INVALID_IP_ADDR_STRING "<ERROR>" void print_flow_err_msg(const struct ip_flow *flow, int32_t index, const char *err_msg) { char src[INET6_ADDRSTRLEN]; char dst[INET6_ADDRSTRLEN]; char index_str[64]; int ret; RTE_BUILD_BUG_ON(INET6_ADDRSTRLEN < INET_ADDRSTRLEN); RTE_BUILD_BUG_ON(sizeof(src) < sizeof(INVALID_IP_ADDR_STRING)); RTE_BUILD_BUG_ON(sizeof(dst) < sizeof(INVALID_IP_ADDR_STRING)); if (unlikely(!G_LOG_CHECK(ERR))) return; /* Fill @index_str out. */ if (index >= 0) { ret = snprintf(index_str, sizeof(index_str), " at index %i", index); RTE_VERIFY(ret > 0 && ret < (int)sizeof(index_str)); } else if (likely(index == -ENOENT)) { /* Empty string. */ index_str[0] = '\0'; } else { ret = snprintf(index_str, sizeof(index_str), " error index (%i)", -index); RTE_VERIFY(ret > 0 && ret < (int)sizeof(index_str)); } if (flow->proto == RTE_ETHER_TYPE_IPV4) { if (unlikely(inet_ntop(AF_INET, &flow->f.v4.src, src, sizeof(src)) == NULL)) { G_LOG(ERR, "%s(): failed to convert source IPv4 address to a string (errno=%i): %s\n", __func__, errno, strerror(errno)); strcpy(src, INVALID_IP_ADDR_STRING); } if (unlikely(inet_ntop(AF_INET, &flow->f.v4.dst, dst, sizeof(dst)) == NULL)) { G_LOG(ERR, "%s(): failed to convert destination IPv4 address to a string (errno=%i): %s\n", __func__, errno, strerror(errno)); strcpy(dst, INVALID_IP_ADDR_STRING); } } else if (likely(flow->proto == RTE_ETHER_TYPE_IPV6)) { if (unlikely(inet_ntop(AF_INET6, flow->f.v6.src.s6_addr, src, sizeof(src)) == NULL)) { G_LOG(ERR, "%s(): failed to convert source IPv6 address to a string (errno=%i): %s\n", __func__, errno, strerror(errno)); strcpy(src, INVALID_IP_ADDR_STRING); } if (unlikely(inet_ntop(AF_INET6, flow->f.v6.dst.s6_addr, dst, sizeof(dst)) == NULL)) { G_LOG(ERR, "%s(): failed to convert destination IPv6 address to a string (errno=%i): %s\n", __func__, errno, strerror(errno)); strcpy(dst, INVALID_IP_ADDR_STRING); } } else { return print_invalid_flow_err_msg(flow, index_str, err_msg); } G_LOG(ERR, "Flow (src: %s, dst: %s)%s: %s\n", src, dst, index_str, err_msg); } ```
/content/code_sandbox/lib/flow.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,151
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_malloc.h> #include "gatekeeper_main.h" #include "gatekeeper_qid.h" /* * *Q*uick *Id*entifiers library. * * A LIFO stack of consecutive IDs. One use case is that the * stack holds the available indexes of entries in a pre-allocated * memory pool. * * The IDs are initially placed on the stack from left-to-right, * with the top of stack initially being the leftmost element: * * [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] * ^--- top of stack */ int qid_init(struct qid *qid, uint32_t len, const char *name, int socket) { uint32_t i; qid->ids = rte_malloc_socket(name, len * sizeof(*qid->ids), 0, socket); if (qid->ids == NULL) { G_LOG(ERR, "%s(%s): insufficient memory to create QID\n", __func__, name); return -ENOMEM; } for (i = 0; i < len; i++) qid->ids[i] = i; qid->len = len; qid->top = 0; return 0; } void qid_free(struct qid *qid) { rte_free(qid->ids); qid->ids = NULL; } int qid_push(struct qid *qid, uint32_t id) { if (unlikely(qid->top == 0)) return -ENOSPC; if (unlikely(id >= qid->len)) return -EINVAL; qid->ids[--qid->top] = id; return 0; } int qid_pop(struct qid *qid, uint32_t *p_id) { if (unlikely(qid->top >= qid->len)) return -ENOENT; if (unlikely(p_id == NULL)) return -EINVAL; *p_id = qid->ids[qid->top++]; return 0; } ```
/content/code_sandbox/lib/qid.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
537
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <netinet/ip.h> #include <rte_ip.h> #include <rte_mbuf.h> #include <rte_memcpy.h> #include <rte_byteorder.h> #include "gatekeeper_ipip.h" #include "gatekeeper_l2.h" /* * The Full-functionality Option for setting ECN bits in IP-in-IP packets. * RFC 3168, section 9.1.1. * * If the ECN codepoint of the inside header is CE, set the ECN codepoint of * the outside header to ECT(0). Otherwise (the inside ECN is not-ECT or ECT), * copy the ECN codepoint of the inside header to the outside header. */ static inline uint8_t in_to_out_ecn(uint8_t inner_tos) { return (inner_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE ? IPTOS_ECN_ECT0 : inner_tos & IPTOS_ECN_MASK; } int encapsulate(struct rte_mbuf *pkt, uint8_t priority, struct gatekeeper_if *iface, struct ipaddr *gt_addr) { struct rte_ether_hdr *eth_hdr; struct rte_ipv4_hdr *outer_ip4hdr; struct rte_ipv6_hdr *outer_ip6hdr; if (gt_addr->proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *inner_ip4hdr; /* Allocate space for outer IPv4 header and L2 header. */ eth_hdr = adjust_pkt_len(pkt, iface, sizeof(struct rte_ipv4_hdr)); if (eth_hdr == NULL) { G_LOG(ERR, "ipip: could not adjust IPv4 packet length\n"); return -1; } outer_ip4hdr = pkt_out_skip_l2(iface, eth_hdr); inner_ip4hdr = (struct rte_ipv4_hdr *)&outer_ip4hdr[1]; /* Fill up the outer IP header. */ outer_ip4hdr->version_ihl = IP_VHL_DEF; outer_ip4hdr->type_of_service = (priority << 2) | in_to_out_ecn(inner_ip4hdr->type_of_service); outer_ip4hdr->packet_id = 0; outer_ip4hdr->fragment_offset = IP_DN_FRAGMENT_FLAG; outer_ip4hdr->time_to_live = IP_DEFTTL; outer_ip4hdr->next_proto_id = IPPROTO_IPIP; /* The source address is the Gatekeeper server IP address. */ outer_ip4hdr->src_addr = iface->ip4_addr.s_addr; /* The destination address is the Grantor server IP address. */ outer_ip4hdr->dst_addr = gt_addr->ip.v4.s_addr; outer_ip4hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - iface->l2_len_out); pkt->l3_len = sizeof(struct rte_ipv4_hdr); set_ipv4_checksum(iface, pkt, outer_ip4hdr); } else if (likely(gt_addr->proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *inner_ip6hdr; /* Allocate space for new IPv6 header and L2 header. */ eth_hdr = adjust_pkt_len(pkt, iface, sizeof(struct rte_ipv6_hdr)); if (eth_hdr == NULL) { G_LOG(ERR, "ipip: could not adjust IPv6 packet length\n"); return -1; } outer_ip6hdr = pkt_out_skip_l2(iface, eth_hdr); inner_ip6hdr = (struct rte_ipv6_hdr *)&outer_ip6hdr[1]; /* Fill up the outer IP header. */ outer_ip6hdr->vtc_flow = rte_cpu_to_be_32( IPv6_DEFAULT_VTC_FLOW | (priority << 22) | (in_to_out_ecn(rte_be_to_cpu_32( inner_ip6hdr->vtc_flow) >> 20) << 20)); outer_ip6hdr->proto = IPPROTO_IPV6; outer_ip6hdr->hop_limits = iface->ipv6_default_hop_limits; rte_memcpy(outer_ip6hdr->src_addr, iface->ip6_addr.s6_addr, sizeof(outer_ip6hdr->src_addr)); rte_memcpy(outer_ip6hdr->dst_addr, gt_addr->ip.v6.s6_addr, sizeof(outer_ip6hdr->dst_addr)); outer_ip6hdr->payload_len = rte_cpu_to_be_16(pkt->pkt_len - (sizeof(struct rte_ipv6_hdr) + iface->l2_len_out)); } else return -1; return 0; } ```
/content/code_sandbox/lib/ipip.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,129
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_log.h> #include <rte_lcore.h> #include <rte_debug.h> #include <rte_malloc.h> #include <rte_errno.h> #include "gatekeeper_main.h" #include "gatekeeper_mailbox.h" int init_mailbox(const char *tag, int mailbox_max_entries_exp, unsigned int ele_size, unsigned int cache_size, unsigned int lcore_id, struct mailbox *mb) { int ret; char ring_name[128]; char pool_name[128]; unsigned int socket_id = rte_lcore_to_socket_id(lcore_id); ret = snprintf(ring_name, sizeof(ring_name), "%s_mailbox_ring_%u", tag, lcore_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(ring_name)); mb->ring = (struct rte_ring *)rte_ring_create( ring_name, 1 << mailbox_max_entries_exp, socket_id, RING_F_SC_DEQ); if (mb->ring == NULL) { G_LOG(ERR, "mailbox: can't create ring %s (len = %d) at lcore %u\n", ring_name, ret, lcore_id); ret = -1; goto out; } ret = snprintf(pool_name, sizeof(pool_name), "%s_mailbox_pool_%d", tag, lcore_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(pool_name)); mb->pool = rte_mempool_create(pool_name, (1 << mailbox_max_entries_exp) - 1, ele_size, cache_size, 0, NULL, NULL, NULL, NULL, socket_id, 0); if (mb->pool == NULL) { G_LOG(ERR, "mailbox: can't create mempool %s (len = %d) at lcore %u\n", pool_name, ret, lcore_id); ret = -1; goto free_ring; } ret = 0; goto out; free_ring: rte_ring_free(mb->ring); out: return ret; } void * mb_alloc_entry(struct mailbox *mb) { void *obj = NULL; int ret = rte_mempool_get(mb->pool, &obj); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to get a new mailbox entry (errno=%i): %s\n", __func__, -ret, rte_strerror(-ret)); return NULL; } return obj; } int mb_send_entry(struct mailbox *mb, void *obj) { int ret = rte_ring_mp_enqueue(mb->ring, obj); switch (-ret) { case EDQUOT: G_LOG(WARNING, "%s(): high water mark exceeded; the object has been enqueued\n", __func__); ret = 0; break; case ENOBUFS: G_LOG(ERR, "%s(): quota exceeded; the object has NOT been enqueued\n", __func__); mb_free_entry(mb, obj); break; default: if (likely(ret == 0)) break; mb_free_entry(mb, obj); G_LOG(CRIT, "%s(): bug: unexpected error (errno=%i): %s\n", __func__, -ret, rte_strerror(-ret)); break; } return ret; } void destroy_mailbox(struct mailbox *mb) { if (mb) { if (mb->ring) rte_ring_free(mb->ring); if (mb->pool) rte_mempool_free(mb->pool); } } ```
/content/code_sandbox/lib/mailbox.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
866
```c #include <rte_ip.h> #include <rte_mbuf.h> #include "gatekeeper_varip.h" /* NextHeader field of IPv6 header. */ /* Hop-by-hop option header. */ #define NEXTHDR_HOP (0) /* Routing header. */ #define NEXTHDR_ROUTING (43) /* Fragmentation/reassembly header. */ #define NEXTHDR_FRAGMENT (44) /* Authentication header. */ #define NEXTHDR_AUTH (51) /* No next header. */ #define NEXTHDR_NONE (59) /* Destination options header. */ #define NEXTHDR_DEST (60) static inline bool ipv6_ext_hdr(uint8_t nexthdr) { /* Find out if nexthdr is an extension header or a protocol. */ return (nexthdr == NEXTHDR_HOP) || (nexthdr == NEXTHDR_ROUTING) || (nexthdr == NEXTHDR_FRAGMENT) || (nexthdr == NEXTHDR_AUTH) || (nexthdr == NEXTHDR_NONE) || (nexthdr == NEXTHDR_DEST); } struct ipv6_opt_hdr { uint8_t nexthdr; uint8_t hdrlen; } __attribute__((packed)); int ipv6_skip_exthdr(const struct rte_ipv6_hdr *ip6hdr, int remaining_len, uint8_t *nexthdrp) { int start = sizeof(struct rte_ipv6_hdr); uint8_t nexthdr = ip6hdr->proto; while (ipv6_ext_hdr(nexthdr)) { int hdrlen; const struct ipv6_opt_hdr *hp; if (start + (int)sizeof(struct ipv6_opt_hdr) > remaining_len) return -1; hp = (const struct ipv6_opt_hdr *) ((const uint8_t *)ip6hdr + start); switch (nexthdr) { case NEXTHDR_NONE: return -1; break; case NEXTHDR_FRAGMENT: hdrlen = 8; break; case NEXTHDR_AUTH: hdrlen = ((hp->hdrlen + 2) << 2); break; default: hdrlen = ((hp->hdrlen + 1) << 3); break; } nexthdr = hp->nexthdr; start += hdrlen; if (start > remaining_len) return -1; } *nexthdrp = nexthdr; return start; } ```
/content/code_sandbox/lib/varip.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
556
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <math.h> #include <rte_cycles.h> #include <rte_common.h> #include "gatekeeper_main.h" #include "gatekeeper_ratelimit.h" void tb_ratelimit_state_init(struct token_bucket_ratelimit_state *tbrs, uint32_t rate, uint32_t burst) { tbrs->rate = rate; tbrs->burst = burst; tbrs->credit = burst; tbrs->stamp = 0; } uint32_t tb_ratelimit_allow_n(uint32_t n, struct token_bucket_ratelimit_state *tbrs) { uint32_t credit, incr = 0; uint64_t now = rte_rdtsc(), delta; uint32_t rc = 0; delta = RTE_MIN(now - tbrs->stamp, cycles_per_sec); /* Check if token bucket is empty and cannot be refilled. */ if (!tbrs->credit) { if (delta < cycles_per_sec / tbrs->burst) return rc; } if (delta >= cycles_per_sec / tbrs->burst) { incr = round((double)(tbrs->rate * delta) / cycles_per_sec); if (incr) tbrs->stamp = now; } credit = RTE_MIN(tbrs->credit + incr, tbrs->burst); if (credit > 0) { rc = RTE_MIN(credit, n); credit -= rc; } tbrs->credit = credit; return rc; } ```
/content/code_sandbox/lib/ratelimit.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
425
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <math.h> #include <rte_hash_crc.h> #include <rte_malloc.h> #include <rte_prefetch.h> #include "gatekeeper_main.h" #include "gatekeeper_hash.h" #define HS_HASH_MAX_NUM_ENTRIES ((uint32_t)1 << 31) static inline bool is_in_use(const struct hs_hash_bucket *bucket) { return bucket->user_idx != HS_HASH_MISS; } static inline bool is_neighborhood_full(const struct hs_hash *h, const struct hs_hash_bucket *bucket) { return (bucket->hh_nbh & h->neighborhood_mask) == h->neighborhood_mask; } static inline uint32_t bucket_difference(const struct hs_hash *h, uint32_t bucket1, uint32_t bucket2) { if (likely(bucket1 <= bucket2)) return bucket2 - bucket1; return bucket2 + (h->num_buckets - bucket1); } static inline uint32_t cycle_buckets(const struct hs_hash *h, uint32_t hash) { return hash & (h->num_buckets - 1); } static inline uint32_t hs_jhash(const void *key, uint32_t key_len, uint32_t init_val, __attribute__((unused)) const void *data) { return rte_jhash(key, key_len, init_val); } static inline uint32_t hs_hash_crc(const void *key, uint32_t key_len, uint32_t init_val, __attribute__((unused)) const void *data) { return rte_hash_crc(key, key_len, init_val); } static inline hs_hash_function default_hash_func(void) { hs_hash_function default_hash_func = hs_jhash; #if defined(RTE_ARCH_X86) default_hash_func = hs_hash_crc; #elif defined(RTE_ARCH_ARM64) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32)) default_hash_func = hs_hash_crc; #endif return default_hash_func; } static void init_buckets(struct hs_hash_bucket *buckets, uint32_t num_buckets) { uint32_t i; for (i = 0; i < num_buckets; i++) { buckets[i].hh_nbh = 0; buckets[i].user_idx = HS_HASH_MISS; } } int hs_hash_create(struct hs_hash *h, const struct hs_hash_parameters *params) { struct hs_hash_bucket *buckets; uint32_t num_buckets, scaled_num_entries; char hash_name[128]; int ret; RTE_BUILD_BUG_ON(HS_HASH_MISS <= HS_HASH_MAX_NUM_ENTRIES); if (unlikely(h == NULL)) { G_LOG(ERR, "%s(): hash data structure not allocated\n", __func__); return -EINVAL; } if (unlikely(params == NULL)) { G_LOG(ERR, "%s(): no parameters\n", __func__); return -EINVAL; } if (unlikely(params->name == NULL)) { G_LOG(ERR, "%s(): must provide name in struct hs_hash_parameters\n", __func__); return -EINVAL; } if (unlikely(params->key_len == 0)) { G_LOG(ERR, "%s(%s): given key length 0\n", __func__, params->name); return -EINVAL; } if (unlikely(params->key_cmp_fn == NULL)) { G_LOG(ERR, "%s(%s): must be given a key_cmp_fn in struct hs_hash_parameters\n", __func__, params->name); return -EINVAL; } if (unlikely(params->key_addr_fn == NULL)) { G_LOG(ERR, "%s(%s): must be given a key_addr_fn in struct hs_hash_parameters\n", __func__, params->name); return -EINVAL; } if (unlikely(params->num_entries == 0)) { G_LOG(ERR, "%s(%s): must be given a positive value for the number of entries in struct hs_hash_parameters\n", __func__, params->name); return -EINVAL; } if (unlikely(params->scale_num_bucket <= 0)) { G_LOG(ERR, "%s(%s): must be given a positive value for the number of buckets scale factor in struct hs_hash_parameters\n", __func__, params->name); return -EINVAL; } scaled_num_entries = round(params->num_entries * params->scale_num_bucket); if (unlikely(scaled_num_entries == 0)) { G_LOG(ERR, "%s(%s): number of entries (%u*%f=%u) must be > 0 in struct hs_hash_parameters\n", __func__, params->name, params->num_entries, params->scale_num_bucket, scaled_num_entries); return -EINVAL; } if (unlikely(scaled_num_entries > HS_HASH_MAX_NUM_ENTRIES)) { /* * If we allow @params->num_entries to be any * greater, rte_align32pow2() could return 0. */ G_LOG(ERR, "%s(%s): number of entries (%u*%f=%u) must be <= max entries (%u) in struct hs_hash_parameters\n", __func__, params->name, params->num_entries, params->scale_num_bucket, scaled_num_entries, HS_HASH_MAX_NUM_ENTRIES); return -EINVAL; } /* * Making the number of buckets a power of two makes * the bucket mask of struct hs_hash possible. * * However, we allow the entries array to be shorter * than the number of buckets, so that we can utilize * additional buckets without having to bump the number * of entries up to the next power of 2. */ num_buckets = rte_align32pow2(RTE_MAX(8, scaled_num_entries)); ret = snprintf(hash_name, sizeof(hash_name), "HSHT_buckets_%s", params->name); RTE_VERIFY(ret > 0 && ret < (int)sizeof(hash_name)); /* * Zeroed allocation is not needed here, since the initial * state of the buckets is to have a non-zero index HS_HASH_MISS. */ buckets = rte_malloc_socket(hash_name, num_buckets * sizeof(*buckets), RTE_CACHE_LINE_SIZE, params->socket_id); if (unlikely(buckets == NULL)) { G_LOG(ERR, "%s(%s): buckets memory allocation failed\n", __func__, params->name); return -ENOMEM; } ret = snprintf(hash_name, sizeof(hash_name), "HSHT_qid_%s", params->name); RTE_VERIFY(ret > 0 && ret < (int)sizeof(hash_name)); ret = qid_init(&h->entry_qid, params->num_entries, hash_name, params->socket_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to create QID for managing hash table entries (errno=%i): %s\n", __func__, params->name, -ret, strerror(-ret)); goto free_buckets; } h->buckets = buckets; h->num_buckets = num_buckets; init_buckets(h->buckets, h->num_buckets); h->max_probes = RTE_MIN(params->max_probes, num_buckets); h->neighborhood_size = rte_log2_u32(num_buckets); h->neighborhood_mask = num_buckets - 1; h->high_hash_mask = ~h->neighborhood_mask; h->hash_func = (params->hash_func == NULL) ? default_hash_func() : params->hash_func; h->key_len = params->key_len; h->hash_func_init_val = params->hash_func_init_val; h->hash_func_data = params->hash_func_data; h->key_cmp_fn = params->key_cmp_fn; h->key_cmp_fn_data = params->key_cmp_fn_data; h->key_addr_fn = params->key_addr_fn; h->key_addr_fn_data = params->key_addr_fn_data; return 0; free_buckets: rte_free(buckets); return ret; } void hs_hash_free(struct hs_hash *h) { if (unlikely(h == NULL)) return; h->key_addr_fn_data = NULL; h->key_addr_fn = NULL; h->key_cmp_fn_data = NULL; h->key_cmp_fn = NULL; h->hash_func_data = NULL; h->hash_func_init_val = 0; h->key_len = 0; h->hash_func = NULL; h->high_hash_mask = 0; h->neighborhood_mask = 0; h->neighborhood_size = 0; h->max_probes = 0; qid_free(&h->entry_qid); h->num_buckets = 0; rte_free(h->buckets); h->buckets = NULL; } /* * Finds the index of an empty bucket and stores it in @p_empty_idx * and returns 0 on success. If there is no empty bucket available, * returns -ENOENT. */ static int find_empty_bucket(const struct hs_hash *h, uint32_t start_idx, uint32_t *p_empty_idx) { uint32_t i; for (i = 0; i < h->max_probes; i++) { uint32_t idx = cycle_buckets(h, start_idx + i); if (!is_in_use(&h->buckets[idx])) { *p_empty_idx = idx; return 0; } } return -ENOENT; } static inline void toggle_neighbor(struct hs_hash *h, uint32_t bucket_idx, uint32_t neigh_distance) { if (unlikely(neigh_distance >= h->neighborhood_size)) { G_LOG(CRIT, "%s(): bug: neigh_distance=%u >= neighborhood_size=%u\n", __func__, neigh_distance, h->neighborhood_size); return; } h->buckets[bucket_idx].hh_nbh ^= (uint32_t)1 << neigh_distance; } /* * Swaps an entry in a neighborhood into an empty slot. * * @neigh_idx is the index of the neighborhood in question. * @empty_idx is the index of the empty slot in the neighborhood. * @empty_neigh_bit is the bit that represents @empty_idx in the neighborhood. * @to_swap_idx is the index of the entry to be swapped. * @to_swap_neigh_bit is the bit that represents @to_swap_idx in the * neighborhood. */ static void swap_value_into_empty_bucket(struct hs_hash *h, uint32_t neigh_idx, uint32_t empty_idx, uint32_t empty_neigh_bit, uint32_t to_swap_idx, uint32_t to_swap_neigh_bit) { h->buckets[empty_idx].hh_nbh &= h->neighborhood_mask; h->buckets[empty_idx].hh_nbh |= h->buckets[to_swap_idx].hh_nbh & h->high_hash_mask; h->buckets[empty_idx].user_idx = h->buckets[to_swap_idx].user_idx; /* Add the previously empty bucket to this neighborhood. */ toggle_neighbor(h, neigh_idx, empty_neigh_bit); h->buckets[to_swap_idx].hh_nbh &= h->neighborhood_mask; h->buckets[to_swap_idx].user_idx = HS_HASH_MISS; /* Remove the previously used bucket from this neighborhood. */ toggle_neighbor(h, neigh_idx, to_swap_neigh_bit); } static bool swap_empty_bucket_closer(struct hs_hash *h, uint32_t *p_empty_idx) { uint8_t i; /* * Can only let i be less than h->neighborhood_size - 1, since the * entry at exactly h->neighborhood_size - 1 is the empty one. */ for (i = 0; i < h->neighborhood_size - 1; i++) { /* * Start looking a neighborhood "back" from @empty_idx, * which might cause the index to temporarily go negative, * hence the assignment to @tmp_64idx. * * In the cases where the index becomes negative, we wrap * back around to the *end* of the buckets array by adding * h->num_buckets. * * Either way, ultimately @tmp_64idx holds a proper 32-bit * index. */ int64_t tmp_64idx = (int64_t)*p_empty_idx - h->neighborhood_size + 1 + i; if (unlikely(tmp_64idx < 0)) tmp_64idx += h->num_buckets; uint32_t current_idx = cycle_buckets(h, tmp_64idx); uint32_t neighborhood = h->buckets[current_idx].hh_nbh & h->neighborhood_mask; uint32_t empty_distance, next_bit, swap_idx; /* * There is no need to test if * is_in_use(&h->buckets[current_idx]) is false because * hs_hash_add_key_with_hash(), the only caller, calls * this function after testing all buckets are in use. */ if (neighborhood == 0) continue; empty_distance = h->neighborhood_size - 1 - i; next_bit = rte_bsf32(neighborhood); if (unlikely(next_bit >= empty_distance)) continue; swap_idx = cycle_buckets(h, current_idx + next_bit); swap_value_into_empty_bucket(h, current_idx, *p_empty_idx, empty_distance, swap_idx, next_bit); *p_empty_idx = swap_idx; return true; } /* Could not swap an empty bucket closer. */ return false; } static inline bool hashes_equal(const struct hs_hash *h, uint32_t key1_hash, uint32_t key2_hash_idx) { return !((h->buckets[key2_hash_idx].hh_nbh ^ key1_hash) & h->high_hash_mask); } static inline bool keys_equal(const struct hs_hash *h, const void *key1, uint32_t key2_hash_idx) { return h->key_cmp_fn(key1, h->key_addr_fn(h->buckets[key2_hash_idx].user_idx, h->key_addr_fn_data), h->key_len, h->key_cmp_fn_data) == 0; } /* * Returns whether @key1 is equal to the existing key * represented by the bucket at index @key2_hash_idx. */ static bool hashes_and_keys_equal(const struct hs_hash *h, const void *key1, uint32_t key1_hash, uint32_t key2_hash_idx) { return hashes_equal(h, key1_hash, key2_hash_idx) && likely(keys_equal(h, key1, key2_hash_idx)); } static int find_in_neighborhood(const struct hs_hash *h, const void *key, uint32_t hash, uint32_t hash_idx, uint32_t *p_val_idx) { uint32_t neighborhood = h->buckets[hash_idx].hh_nbh & h->neighborhood_mask; while (neighborhood != 0) { uint32_t next_bit = rte_bsf32(neighborhood); hash_idx = cycle_buckets(h, hash_idx + next_bit); neighborhood >>= next_bit; neighborhood ^= 1; if (hashes_and_keys_equal(h, key, hash, hash_idx)) { *p_val_idx = hash_idx; return 0; } } return -ENOENT; } int hs_hash_add_key_with_hash(struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx) { uint32_t hash_idx, val_idx, new_user_idx, empty_idx; int ret, ret2; if (unlikely(h == NULL || key == NULL || p_user_idx == NULL)) return -EINVAL; hash_idx = cycle_buckets(h, hash); ret = find_in_neighborhood(h, key, hash, hash_idx, &val_idx); if (unlikely(ret == 0)) { /* * Prioritize returning the fact that the key * already exists, since this gives the client * a better opportunity to act on it than -ENOSPC. */ *p_user_idx = h->buckets[val_idx].user_idx; return -EEXIST; } if (unlikely(is_neighborhood_full(h, &h->buckets[hash_idx]))) return -ENOSPC; ret = qid_pop(&h->entry_qid, &new_user_idx); if (unlikely(ret < 0)) { /* Likely no more room in the client's entries array. */ return ret; } ret = find_empty_bucket(h, hash_idx, &empty_idx); if (unlikely(ret < 0)) { /* No free buckets within range. */ goto push_qid; } do { uint32_t bucket_diff = bucket_difference(h, hash_idx, empty_idx); if (likely(bucket_diff < h->neighborhood_size)) { h->buckets[empty_idx].hh_nbh &= h->neighborhood_mask; h->buckets[empty_idx].hh_nbh |= hash & h->high_hash_mask; h->buckets[empty_idx].user_idx = new_user_idx; toggle_neighbor(h, hash_idx, bucket_diff); *p_user_idx = new_user_idx; return 0; } /* * Swap an empty bucket closer to try to move it into range * of the neighborhood. * * Note that it's not possible for the function to set * @empty_idx to be before @hash_idx because above we make * sure that the difference between the buckets is at least * a neighborhood. Therefore, since swap_empty_bucket_closer() * looks at most a neighborhood before the empty index, it * can't find an empty index that comes before @hash_idx. */ } while (swap_empty_bucket_closer(h, &empty_idx)); /* Couldn't swap an empty bucket close enough. */ ret = -ENOSPC; push_qid: ret2 = qid_push(&h->entry_qid, new_user_idx); if (unlikely(ret2 < 0)) { G_LOG(ERR, "%s(): failed to push QID %u (errno=%i): %s\n", __func__, new_user_idx, -ret2, strerror(-ret2)); } return ret; } /* * @nbh_idx is the index of bucket where the value was hashed to. * @val_idx is the index of the bucket where the value is. */ static void delete_from_neighborhood(struct hs_hash *h, uint32_t nbh_idx, uint32_t val_idx) { int ret = qid_push(&h->entry_qid, h->buckets[val_idx].user_idx); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to push QID %u (errno=%i): %s\n", __func__, h->buckets[val_idx].user_idx, -ret, strerror(-ret)); } h->buckets[val_idx].user_idx = HS_HASH_MISS; h->buckets[val_idx].hh_nbh &= h->neighborhood_mask; toggle_neighbor(h, nbh_idx, bucket_difference(h, nbh_idx, val_idx)); } int hs_hash_del_key_with_hash(struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx) { uint32_t hash_idx, val_idx, neighborhood; int ret; if (unlikely(h == NULL || key == NULL || p_user_idx == NULL)) return -EINVAL; hash_idx = cycle_buckets(h, hash); ret = find_in_neighborhood(h, key, hash, hash_idx, &val_idx); if (unlikely(ret < 0)) return ret; *p_user_idx = h->buckets[val_idx].user_idx; delete_from_neighborhood(h, hash_idx, val_idx); neighborhood = h->buckets[hash_idx].hh_nbh & h->neighborhood_mask; if (likely(neighborhood != 0)) { /* * Swap the farthest bucket of the neighborhood * to the empty spot to improve locality. */ uint32_t farthest_bit = rte_fls_u32(neighborhood) - 1; uint32_t val_bit = bucket_difference(h, hash_idx, val_idx); if (likely(farthest_bit > val_bit)) { swap_value_into_empty_bucket(h, hash_idx, val_idx, val_bit, cycle_buckets(h, hash_idx + farthest_bit), farthest_bit); } } return 0; } int hs_hash_lookup_with_hash(const struct hs_hash *h, const void *key, uint32_t hash, uint32_t *p_user_idx) { uint32_t hash_idx, val_idx; int ret; if (unlikely(h == NULL || key == NULL || p_user_idx == NULL)) return -EINVAL; hash_idx = cycle_buckets(h, hash); ret = find_in_neighborhood(h, key, hash, hash_idx, &val_idx); if (likely(ret == 0)) *p_user_idx = h->buckets[val_idx].user_idx; return ret; } /* * Bulk lookup is implemented using the G-Opt technique. The logic * is basically an unrolled version of the loop in find_in_neighborhood(). * * For details on the G-Opt technique, see at least Section 3.2 of * the paper Raising the Bar for Using GPUs in Software Packet Processing * by Anuj Kalia, Dong Zhou, Michael Kaminsky, and David G. Andersen. * published in 12th USENIX Symposium on Networked Systems Design and * Implementation (aka NSDI 2015). */ #define G_SW() \ do { \ i = (i + 1) % n; \ goto *g_labels[i]; \ } while (0) /* Prefetch, Save label, and Switch lookup. */ #define G_PSS(addr, label) \ do { \ rte_prefetch0(addr); \ g_labels[i] = &&label; \ G_SW(); \ } while (0) static inline void * bucket_cache_line(const struct hs_hash *h, uint32_t idx) { return (void *)((uintptr_t)&h->buckets[idx] & (~((uintptr_t)RTE_CACHE_LINE_MASK))); } int hs_hash_lookup_with_hash_bulk(const struct hs_hash *h, const void **keys, const uint32_t *hashes, uint32_t n, uint32_t *user_indexes) { /* Lookup state. */ uint32_t entry_idx[n]; void *prv_cache_line[n]; uint32_t neighborhoods[n]; uint32_t next_bit; void *cache_line; /* G-Opt state. */ void *g_labels[n]; uint32_t i, g_count; if (unlikely(n == 0)) return 0; if (unlikely(h == NULL || keys == NULL || hashes == NULL || user_indexes == NULL)) { return -EINVAL; } for (i = 0; i < n; i++) g_labels[i] = &&g_label_0; i = 0; g_count = 0; g_label_0: entry_idx[i] = cycle_buckets(h, hashes[i]); prv_cache_line[i] = bucket_cache_line(h, entry_idx[i]); /* Prefetch the cache line for the initial bucket. */ G_PSS(prv_cache_line[i], g_label_1); g_label_1: /* This only needs to be initialized once for every index. */ neighborhoods[i] = h->buckets[entry_idx[i]].hh_nbh & h->neighborhood_mask; /* Loop over the entries of the neighborhood begins here. */ g_label_2: if (neighborhoods[i] == 0) { /* * Either there were no entries in this neighborhood to begin * with, or we have checked all of the entries in the * neighborhood and their hashes or keys didn't match. */ user_indexes[i] = HS_HASH_MISS; goto g_done; } /* Find a non-empty entry in this neighborhood. */ next_bit = rte_bsf32(neighborhoods[i]); entry_idx[i] = cycle_buckets(h, entry_idx[i] + next_bit); cache_line = bucket_cache_line(h, entry_idx[i]); neighborhoods[i] >>= next_bit; neighborhoods[i] ^= 1; if (unlikely(prv_cache_line[i] != cache_line)) { /* * This bucket is not in cache. We can use unlikely * here because we set up the hash for 100% occupancy, * leading to entries being on the same cache line * most of the time. */ prv_cache_line[i] = cache_line; G_PSS(cache_line, g_label_3); } g_label_3: if (!hashes_equal(h, hashes[i], entry_idx[i])) { /* Go try to find another entry in the neighborhood. */ goto g_label_2; } /* Prefetch the key of the entry to compare keys. */ user_indexes[i] = h->buckets[entry_idx[i]].user_idx; G_PSS(h->key_addr_fn(user_indexes[i], h->key_addr_fn_data), g_label_4); g_label_4: if (unlikely(!keys_equal(h, keys[i], entry_idx[i]))) { /* * Go try to find another entry in the neighborhood. * Unlikely because if the hashes were equal, then * it's likely that the keys are also equal. */ goto g_label_2; } goto g_done; g_skip: G_SW(); g_done: g_count++; g_labels[i] = &&g_skip; if (likely(g_count < n)) G_SW(); if (unlikely(g_count > n)) { G_LOG(CRIT, "%s(): bug: g_count=%u > n=%u\n", __func__, g_count, n); return -1; } return 0; } int hs_hash_iterate(const struct hs_hash *h, uint32_t *next, uint32_t *p_user_idx) { uint32_t i; int ret; if (unlikely(p_user_idx == NULL)) { /* * Since we can't populate @p_user_idx with HS_HASH_MISS, * the only thing to do is return the error. */ return -EINVAL; } if (unlikely(h == NULL || next == NULL)) { ret = -EINVAL; goto no_entry; } if (unlikely(*next >= h->num_buckets)) { ret = -ENOENT; goto no_entry; } i = *next; while (likely(!is_in_use(&h->buckets[i]))) { i++; if (unlikely(i >= h->num_buckets)) { ret = -ENOENT; *next = i; goto no_entry; } } *p_user_idx = h->buckets[i].user_idx; *next = i + 1; return 0; no_entry: *p_user_idx = HS_HASH_MISS; return ret; } void hs_hash_prefetch_bucket_non_temporal(const struct hs_hash *h, uint32_t hash) { rte_prefetch_non_temporal(&h->buckets[cycle_buckets(h, hash)]); } ```
/content/code_sandbox/lib/hash.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
5,965
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdbool.h> #include <string.h> #include <rte_malloc.h> #include <rte_lcore.h> #include <rte_debug.h> #include <rte_log.h> #include "gatekeeper_launch.h" #include "list.h" #include "gatekeeper_main.h" static struct launch_heads { struct list_head stage1; struct list_head stage2; struct list_head stage3; } launch_heads = { .stage1 = LIST_HEAD_INIT(launch_heads.stage1), .stage2 = LIST_HEAD_INIT(launch_heads.stage2), .stage3 = LIST_HEAD_INIT(launch_heads.stage3), }; struct stage1_entry { struct list_head list; lcore_function_t *f; void *arg; }; int launch_at_stage1(lcore_function_t *f, void *arg) { struct stage1_entry *entry; entry = rte_malloc(__func__, sizeof(*entry), 0); if (entry == NULL) { G_LOG(ERR, "launch: %s: DPDK ran out of memory", __func__); return -1; } entry->f = f; entry->arg = arg; list_add_tail(&entry->list, &launch_heads.stage1); return 0; } static int launch_stage1(void) { struct stage1_entry *entry, *next; list_for_each_entry_safe(entry, next, &launch_heads.stage1, list) { int ret = entry->f(entry->arg); if (ret != 0) return ret; list_del(&entry->list); rte_free(entry); } return 0; } void pop_n_at_stage1(int n) { while (n > 0 && !list_empty(&launch_heads.stage1)) { struct stage1_entry *last = list_last_entry(&launch_heads.stage1, struct stage1_entry, list); list_del(&last->list); rte_free(last); n--; } } struct stage2_entry { struct list_head list; lcore_function_t *f; void *arg; }; int launch_at_stage2(lcore_function_t *f, void *arg) { struct stage2_entry *entry; entry = rte_malloc(__func__, sizeof(*entry), 0); if (entry == NULL) { G_LOG(ERR, "launch: %s: DPDK ran out of memory", __func__); return -1; } entry->f = f; entry->arg = arg; list_add_tail(&entry->list, &launch_heads.stage2); return 0; } static int launch_stage2(void) { struct stage2_entry *entry, *next; list_for_each_entry_safe(entry, next, &launch_heads.stage2, list) { int ret = entry->f(entry->arg); if (ret != 0) return ret; list_del(&entry->list); rte_free(entry); } return 0; } void pop_n_at_stage2(int n) { while (n > 0 && !list_empty(&launch_heads.stage2)) { struct stage2_entry *last = list_last_entry(&launch_heads.stage2, struct stage2_entry, list); list_del(&last->list); rte_free(last); n--; } } struct stage3_entry { struct list_head list; char *name; lcore_function_t *f; void *arg; unsigned int lcore_id; }; int launch_at_stage3(const char *name, lcore_function_t *f, void *arg, unsigned int lcore_id) { struct stage3_entry *entry; char *name_cpy; name_cpy = rte_strdup(__func__, name); if (name_cpy == NULL) goto fail; entry = rte_malloc(__func__, sizeof(*entry), 0); if (entry == NULL) { G_LOG(ERR, "launch: %s: DPDK ran out of memory", __func__); goto name_cpy; } entry->name = name_cpy; entry->f = f; entry->arg = arg; entry->lcore_id = lcore_id; list_add_tail(&entry->list, &launch_heads.stage3); return 0; name_cpy: rte_free(name_cpy); fail: return -1; } static inline void free_stage3_entry(struct stage3_entry *entry) { rte_free(entry->name); rte_free(entry); } static int launch_stage3(void) { unsigned int main_id = rte_get_main_lcore(); struct stage3_entry *entry, *next; RTE_VERIFY(main_id == rte_lcore_id()); list_for_each_entry_safe(entry, next, &launch_heads.stage3, list) { int ret; if (entry->lcore_id == main_id) { /* * Postpone the execution of this call since * this thread is running on the main lcore. */ continue; } ret = rte_eal_remote_launch(entry->f, entry->arg, entry->lcore_id); if (ret != 0) { G_LOG(ERR, "launch: lcore %u failed to launch %s\n", entry->lcore_id, entry->name); return ret; } list_del(&entry->list); free_stage3_entry(entry); } return 0; } static int run_main_if_applicable(void) { unsigned int main_id = rte_get_main_lcore(); struct stage3_entry *first; int ret; RTE_VERIFY(main_id == rte_lcore_id()); if (list_empty(&launch_heads.stage3)) return 0; if (!list_is_singular(&launch_heads.stage3)) { G_LOG(ERR, "launch: list of stage 3 functions should not contain multiple main lcore entries\n"); return -1; } first = list_first_entry(&launch_heads.stage3, struct stage3_entry, list); if (first->lcore_id != main_id) { G_LOG(ERR, "launch: list of stage 3 functions should not contain non-main lcore entries in %s\n", __func__); return -1; } list_del(&first->list); ret = first->f(first->arg); free_stage3_entry(first); return ret; } void pop_n_at_stage3(int n) { while (n > 0 && !list_empty(&launch_heads.stage3)) { struct stage3_entry *last = list_last_entry(&launch_heads.stage3, struct stage3_entry, list); list_del(&last->list); free_stage3_entry(last); n--; } } int launch_gatekeeper(void) { int ret; ret = launch_stage1(); if (ret != 0) return -1; ret = launch_stage2(); if (ret != 0) return -1; ret = launch_stage3(); if (ret != 0) return -1; return run_main_if_applicable(); } ```
/content/code_sandbox/lib/launch.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,578
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_malloc.h> #include <rte_prefetch.h> #include "gatekeeper_main.h" #include "gatekeeper_fib.h" static inline void write_atomics(rte_atomic32_t *array, uint32_t first, uint32_t last, int val) { uint32_t i; for (i = first; i <= last; i++) rte_atomic32_set(&array[i], val); } int fib_create(struct fib_head *fib, const char *name, int socket_id, uint8_t max_length, uint32_t max_rules, uint32_t num_tbl8s) { char rib_name[256], tbl8s_name[256], pool_name[256]; int ret; uint32_t i; /* * Check input. */ if (unlikely(max_length % 8 != 0)) { G_LOG(ERR, "%s(): max_length=%u is not a multiple of 8\n", __func__, max_length); return -EINVAL; } /* * Having max_length >= 32 guarantees that there's at least one level * of tbl8s. This assumption simplifies the code that implements * fib_add() and fib_del(). */ if (unlikely(max_length < 32 || RIB_MAX_ADDRESS_LENGTH < max_length)) { G_LOG(ERR, "%s(): max_length=%u must be in [32, %u]\n", __func__, max_length, RIB_MAX_ADDRESS_LENGTH); return -EINVAL; } if (unlikely(max_rules == 0)) { G_LOG(ERR, "%s(): max_rules=%u must be greater than zero\n", __func__, max_rules); return -EINVAL; } if (unlikely(num_tbl8s == 0 || num_tbl8s >= FIB_TBL8_FREE_INDEX)) { G_LOG(ERR, "%s(): num_tbl8s=%u must be in [0, %u)\n", __func__, num_tbl8s, FIB_TBL8_FREE_INDEX); return -EINVAL; } ret = snprintf(rib_name, sizeof(rib_name), "%s_RIB", name); if (unlikely(ret <= 0 || ret >= (int)sizeof(rib_name))) { G_LOG(ERR, "%s(rib_name): name=`%s' is too long\n", __func__, name); return -EINVAL; } ret = snprintf(tbl8s_name, sizeof(tbl8s_name), "%s_TBL8s", name); if (unlikely(ret <= 0 || ret >= (int)sizeof(tbl8s_name))) { G_LOG(ERR, "%s(tbl8s_name): name=`%s' is too long\n", __func__, name); return -EINVAL; } ret = snprintf(pool_name, sizeof(pool_name), "%s_TBL8s_pool", name); if (unlikely(ret <= 0 || ret >= (int)sizeof(pool_name))) { G_LOG(ERR, "%s(pool_name): name=`%s' is too long\n", __func__, name); return -EINVAL; } /* * Initialize internal RIB. */ ret = rib_create(&fib->rib, rib_name, socket_id, max_length, max_rules); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create RIB %s\n", __func__, rib_name); goto out; } fib->addr_len_bytes = max_length / 8; /* * Initialize 8-bit tables. */ fib->tbl8s = rte_malloc_socket(tbl8s_name, sizeof(*fib->tbl8s) * num_tbl8s, 0, socket_id); if (unlikely(fib->tbl8s == NULL)) { ret = -ENOMEM; goto free_rib; } fib->num_tbl8s = num_tbl8s; for (i = 0; i < num_tbl8s; i++) { write_atomics(fib->tbl8s[i].nh, 0, RTE_DIM(fib->tbl8s[i].nh) - 1, FIB_NO_NH); } /* * Initialize pool of 8-bit tables. */ fib->tbl8_pool = rte_malloc_socket(pool_name, sizeof(*fib->tbl8_pool) * num_tbl8s, 0, socket_id); if (unlikely(fib->tbl8_pool == NULL)) { ret = -ENOMEM; goto free_tbl8s; } fib->first_free_tbl8_idx = 0; fib->first_free_idx = 0; for (i = 0; i < num_tbl8s; i++) fib->tbl8_pool[i] = i; /* Initialize 24-bit table. */ write_atomics(fib->tbl24, 0, RTE_DIM(fib->tbl24) - 1, FIB_NO_NH); return 0; free_tbl8s: rte_free(fib->tbl8s); fib->tbl8s = NULL; free_rib: rib_free(&fib->rib); out: return ret; } void fib_free(struct fib_head *fib) { if (likely(fib->tbl8_pool != NULL)) { /* * This function may be called on an uninitialized @fib. * For example, when a Gatekeeper server is set to work with * either IPv4 or IPv6 only. * In this case, this function shall follow the example of * free(3) that ignores NULL. */ fib->tbl8_pool[0] = FIB_TBL8_FREE_INDEX; rte_free(fib->tbl8_pool); fib->tbl8_pool = NULL; } fib->first_free_tbl8_idx = 0; fib->first_free_idx = 0; fib->num_tbl8s = 0; rte_free(fib->tbl8s); fib->tbl8s = NULL; rib_free(&fib->rib); } static inline uint32_t get_tbl24_idx(const uint8_t *address) { return address[0] << 16 | address[1] << 8 | address[2]; } #define FIB_EXTENDED_NH (0x80000000) static inline bool is_nh_extended(uint32_t nh) { return !!(nh & FIB_EXTENDED_NH); } static inline uint32_t get_tbl8_idx(uint32_t nh) { RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(FIB_EXTENDED_NH)); RTE_BUILD_BUG_ON(FIB_EXTENDED_NH - 1 != FIB_NO_NH); return nh & FIB_NO_NH; } #define ADDR_STR_VAR(name, addr_len) char name[2 * addr_len + 1] static void address_to_str(char *str, const uint8_t *address, uint8_t addr_len) { static const char hex_to_char[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', }; unsigned int i, j; RTE_BUILD_BUG_ON(RTE_DIM(hex_to_char) != 16); for (i = 0, j = 0; i < addr_len; i ++) { uint8_t byte = address[i]; str[j++] = hex_to_char[(byte & 0xF0) >> 4]; str[j++] = hex_to_char[(byte & 0x0F)]; } str[j] = '\0'; } struct prefix_range { bool empty; uint32_t first; uint32_t last; }; static inline uint32_t n_one_bits32(uint8_t n) { if (unlikely(n >= 32)) return (uint32_t)-1; return (1 << n) - 1; } static inline void set_range8_full(struct prefix_range *range) { range->empty = false; range->first = 0x00; range->last = 0xFF; } /* * RETURN true if the prefix was truncated. * NOTE the returned @range is never empty. */ static bool set_range8(struct prefix_range *range, const uint8_t *address, uint8_t depth, uint8_t next_byte) { bool truncated = false; int mask_depth, free_bits; mask_depth = next_byte * 8; if (mask_depth >= depth) { set_range8_full(range); return truncated; } mask_depth = depth - mask_depth; if (mask_depth > 8) { mask_depth = 8; truncated = true; } range->empty = false; free_bits = 8 - mask_depth; range->first = address[next_byte] & (n_one_bits32(mask_depth) << free_bits); range->last = range->first | n_one_bits32(free_bits); return truncated; } /* RETURN true if @acc_range was reduced. */ static bool exclude_range(rte_atomic32_t *array, struct prefix_range *acc_range, const struct prefix_range *range, uint32_t next_hop) { if (unlikely(range->empty || acc_range->empty)) { /* There is nothing to do. */ return false; } /* @range is at the left of @acc_range. */ if (range->first <= acc_range->first) { if (range->last < acc_range->first) { /* There is nothing to do. */ return false; } if (range->last < acc_range->last) acc_range->first = range->last + 1; else acc_range->empty = true; return true; } /* @range is at the right of @acc_range. */ if (range->last >= acc_range->last) { if (range->first > acc_range->last) { /* There is nothing to do. */ return false; } if (range->first > acc_range->first) acc_range->last = range->first - 1; else acc_range->empty = true; return true; } /* @range is at the middle of @acc_range. */ write_atomics(array, acc_range->first, range->first - 1, next_hop); acc_range->first = range->last + 1; return true; } static int get_parent_child_nexthops(const struct fib_head *fib, const uint8_t *address, uint8_t depth, uint32_t *pparent_nexthop, uint32_t *pchild_nexthop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); struct rib_shorter_iterator_state state; uint32_t parent_nexthop, child_nexthop; int ret = rib_shorter_iterator_state_init(&state, &fib->rib, address, depth); if (unlikely(ret < 0)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): failed to initialize RIB iterator (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); return ret; } parent_nexthop = FIB_NO_NH; child_nexthop = FIB_NO_NH; while (true) { struct rib_iterator_rule rule; ret = rib_shorter_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (likely(ret == -ENOENT)) { *pparent_nexthop = parent_nexthop; *pchild_nexthop = child_nexthop; ret = 0; break; } address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): RIB iterator failed (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); break; } if (likely(rule.depth < depth)) parent_nexthop = rule.next_hop; else if (likely(rule.depth == depth)) child_nexthop = rule.next_hop; else { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: rule.depth=%i > depth=%i\n", __func__, addr_str, depth, rule.depth, depth); ret = -EFAULT; break; } } rib_shorter_iterator_end(&state); return ret; } /* * The following definitions and results drive the code. * * Definition Len(P): * Let P be a prefix in a given FIB, * Len(P) is the length (AKA depth) of P. * * Definition Nh(P): * Let P be a prefix in a given FIB, * Nh(P) is the next hop of P. * * Definition P1 includes P2: * Let P1 and P2 be prefixes in a given FIB, * P1 includes P2 if, and only if, * 1. Len(P1) < Len(P2) and * 2. the first Len(P1) bits of P2 are equal to P1. * Notice that 1. forces P1 and P2 to be different. * For example, P1 == 10.0.0.0/8 includes P2 == 10.10.0.0/16. * * Definition P1 is the parent of P2 (or P2 is a child of P1): * Let P1 and P2 be prefixes in a given FIB F, * P1 is the parent of P2 if, and only if, * 1. P1 includes P2 and * 2. there is no prefix P in F such that * P1 includes P and P includes P2. * Notice that a prefix can only have one parent (it it exists), * whereas a prefix can have zero or more children. * * Definition Rt(T): * Let T be a tbl8 in a given FIB F, * Rt(T) is the prefix of the entry where T is rooted. * Notice that Rt(T) may not be in F. * For example, let T be the tbl8 in which the prefix 10.0.0.1/32 * resides, Rt(T) == 10.0.0.0/24. * * Definition Nh(T): * Let T be a tbl8 in a given FIB F. * If Rt(T) is in F, Nh(T) is Nh(Rt(T)). * If Rt(T) is NOT in F but the parent prefix P of Rt(T) is in F, * Nh(T) is Nh(P). * Otherwise, Nh(T) is FIB_NO_NH. * * Definition P justifies T: * A prefix P justifies (or requires) the allocation of * a tbl8 T in a given FIB if, and only if, * 1. Rt(T) includes P and * 2. Nh(P) != Nh(T). * Notice that this definition is not ideal, but a compromise. * For example, consider a FIB with the following prefixes: * Nh(0.0.0.0/0) == A, Nh(10.0.0.0/25) == B, Nh(10.0.0.0/26) == A, * and Nh(10.0.0.64/26) == A. * The definition here requires the tbl8 T whose Rt(T) is 10.0.0.0/24 * Nh(T) == A to be allocated since Rt(T) includes 10.0.0.0/25 and * Nh(T) != Nh(10.0.0.0/25). Nevertheless, NOT allocating T * would produce correct lookups. * * The all-the-same theorem. * * Given a FIB F, let T be a tbl8 such that * there is NO prefix P in F that justifies the allocation of T. * For all prefixes P in F such that Rt(T) includes P, Nh(P) == Nh(T). * * NOTE * While this threorem is not currently being used in the code, * it is a good illustration of the use of the definitions above. * * Proof: * * If there is no prefixes P in F such that Rt(T) includes P, * the theorem cannot be made false. * * Let P be a prefix in F such that Rt(T) includes P. * Since no prefix justifies T (hypothesis), from the definition * "P justifies T", Nh(P) == Nh(T). */ static int nh_of_tbl8(const struct fib_head *fib, const uint8_t *tbl8_address, uint8_t tbl8_depth, uint32_t *ptbl8_nexthop) { uint32_t parent_nexthop, child_nexthop; int ret = get_parent_child_nexthops(fib, tbl8_address, tbl8_depth, &parent_nexthop, &child_nexthop); if (unlikely(ret < 0)) return ret; *ptbl8_nexthop = child_nexthop != FIB_NO_NH ? child_nexthop : parent_nexthop; return 0; } /* * Rt(T) == @tbl8_address/@tbl8_depth. * Nh(T) == @tbl8_nexthop. * * RETURN * < 0 If it fails. * false (0) If the tbl8 T is NOT needed. * true (1) if the tbl8 T is needed. */ static int is_tbl8_needed(struct fib_head *fib, const uint8_t *tbl8_address, uint8_t tbl8_depth, uint32_t tbl8_nexthop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); struct rib_longer_iterator_state state; int ret = rib_longer_iterator_state_init(&state, &fib->rib, tbl8_address, tbl8_depth, false); if (unlikely(ret < 0)) { address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): failed to initialize RIB iterator (errno=%i): %s\n", __func__, addr_str, tbl8_depth, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (likely(ret == -ENOENT)) { ret = false; break; } address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): RIB iterator failed (errno=%i): %s\n", __func__, addr_str, tbl8_depth, -ret, strerror(-ret)); break; } if (likely(rule.depth > tbl8_depth)) { if (rule.next_hop != tbl8_nexthop) { /* * The longer iterator already guarantees that * Rt(T) includes the prefix * @rule.address_no/@rule.depth, so this prefix * justifies T. */ ret = true; break; } } else if (likely(rule.depth == tbl8_depth)) { /* * Ignore prefix @tbl8_address/@tbl8_depth since * it cannot justify T. */ } else { address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: rule.depth=%i < tbl8_depth=%i\n", __func__, addr_str, tbl8_depth, rule.depth, tbl8_depth); ret = -EFAULT; break; } } rib_longer_iterator_end(&state); return ret; } static int tbl8_get(struct fib_head *fib, uint32_t *ptbl8_idx) { uint32_t tbl8_idx_candidate = fib->tbl8_pool[fib->first_free_tbl8_idx]; if (unlikely(tbl8_idx_candidate == FIB_TBL8_FREE_INDEX)) return -ENOSPC; /* No more free TBL8. */ fib->tbl8_pool[fib->first_free_tbl8_idx] = FIB_TBL8_FREE_INDEX; fib->first_free_tbl8_idx = (fib->first_free_tbl8_idx + 1) % fib->num_tbl8s; *ptbl8_idx = tbl8_idx_candidate; return 0; } static void tbl8_put(struct fib_head *fib, uint32_t tbl8_idx) { if (unlikely(tbl8_idx == FIB_TBL8_FREE_INDEX)) { G_LOG(CRIT, "%s(): bug: called to release FIB_TBL8_FREE_INDEX\n", __func__); return; } if (unlikely(fib->tbl8_pool[fib->first_free_idx] != FIB_TBL8_FREE_INDEX)) { G_LOG(CRIT, "%s(): bug: pool overflow\n", __func__); return; } fib->tbl8_pool[fib->first_free_idx] = tbl8_idx; fib->first_free_idx = (fib->first_free_idx + 1) % fib->num_tbl8s; } static void free_tbl8(struct fib_head *fib, uint32_t tbl8_idx) { /* Do not update @tbl8 to avoid disrupting concurrent readers. */ const struct fib_tbl8 *tbl8 = &fib->tbl8s[tbl8_idx]; int i; for (i = 0; i < (typeof(i))RTE_DIM(tbl8->nh); i++) { uint32_t nh = rte_atomic32_read(&tbl8->nh[i]); if (is_nh_extended(nh)) { /* * As long as the FIB is not corrupted, * free_tbl8() is never called twice on * the same @tbl8_idx because each @tbl8_idx represents * a unique range of the address space. */ free_tbl8(fib, get_tbl8_idx(nh)); } } tbl8_put(fib, tbl8_idx); } static int build_fib_tbl8(struct fib_head *fib, rte_atomic32_t *root, const uint8_t *tbl8_address, uint8_t next_byte, uint32_t tbl8_nexthop); /* * RETURN * < 0 Failure. * false (0) Range updating is not needed. * true (1) Range updating may be needed. */ static int check_tbl8(struct fib_head *fib, rte_atomic32_t *root, const uint8_t *tbl8_address, uint8_t next_byte, struct fib_tbl8 **ptbl8, uint32_t *ptbl8_nexthop) { uint32_t tbl8_nexthop, nh; uint8_t tbl8_depth; int ret; tbl8_depth = next_byte * 8; ret = nh_of_tbl8(fib, tbl8_address, tbl8_depth, &tbl8_nexthop); if (unlikely(ret < 0)) return ret; ret = is_tbl8_needed(fib, tbl8_address, tbl8_depth, tbl8_nexthop); if (unlikely(ret < 0)) return ret; nh = rte_atomic32_read(root); if (!ret) { /* The tbl8 is NOT needed. */ rte_atomic32_set(root, tbl8_nexthop); if (is_nh_extended(nh)) free_tbl8(fib, get_tbl8_idx(nh)); return false; } /* * The tbl8 is needed. */ if (!is_nh_extended(nh)) { ret = build_fib_tbl8(fib, root, tbl8_address, next_byte, tbl8_nexthop); if (unlikely(ret < 0)) return ret; return false; } /* Range updating may be needed. */ *ptbl8 = &fib->tbl8s[get_tbl8_idx(nh)]; *ptbl8_nexthop = tbl8_nexthop; return true; } static int build_fib_tbl8(struct fib_head *fib, rte_atomic32_t *root, const uint8_t *tbl8_address, uint8_t next_byte, uint32_t tbl8_nexthop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); uint32_t tbl8_idx; struct fib_tbl8 *tbl8; uint8_t tbl8_depth; struct rib_longer_iterator_state state; /* * Allocate a tbl8. */ int ret = tbl8_get(fib, &tbl8_idx); if (unlikely(ret < 0)) return ret; tbl8 = &fib->tbl8s[tbl8_idx]; write_atomics(tbl8->nh, 0, RTE_DIM(tbl8->nh) - 1, tbl8_nexthop); tbl8_depth = next_byte * 8; ret = rib_longer_iterator_state_init(&state, &fib->rib, tbl8_address, tbl8_depth, false); if (unlikely(ret < 0)) { address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): failed to initialize RIB iterator (errno=%i): %s\n", __func__, addr_str, tbl8_depth, -ret, strerror(-ret)); tbl8_put(fib, tbl8_idx); return ret; } while (true) { struct rib_iterator_rule rule; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (likely(ret == -ENOENT)) { ret = 0; break; } address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): RIB iterator failed (errno=%i): %s\n", __func__, addr_str, tbl8_depth, -ret, strerror(-ret)); break; } if (likely(rule.depth > tbl8_depth)) { const uint8_t *rule_address = (uint8_t *)&rule.address_no; struct prefix_range rule_range; if (set_range8(&rule_range, rule_address, rule.depth, next_byte)) { /* The rule goes deeper. */ struct fib_tbl8 *ignore_tbl8; uint32_t ignore_tbl8_nexthop; ret = check_tbl8(fib, &tbl8->nh[rule_range.first], rule_address, next_byte + 1, &ignore_tbl8, &ignore_tbl8_nexthop); if (unlikely(ret < 0)) break; if (unlikely(ret)) { address_to_str(addr_str, rule_address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: tbl8 still requires updates\n", __func__, addr_str, rule.depth); } ret = rib_longer_iterator_skip_branch(&state, rule_address, tbl8_depth + 8); if (unlikely(ret < 0)) break; } else { write_atomics(tbl8->nh, rule_range.first, rule_range.last, rule.next_hop); } } else if (likely(rule.depth == tbl8_depth)) { /* Ignore prefix @tbl8_address/@tbl8_depth. */ } else { address_to_str(addr_str, tbl8_address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: rule.depth=%i < tbl8_depth=%i\n", __func__, addr_str, tbl8_depth, rule.depth, tbl8_depth); ret = -EFAULT; break; } } rib_longer_iterator_end(&state); if (likely(ret == 0)) { /* Insert @tbl8 to FIB. */ rte_atomic32_set(root, FIB_EXTENDED_NH | tbl8_idx); return 0; } free_tbl8(fib, tbl8_idx); return ret; } static int update_tbl8_nh(struct fib_head *fib, rte_atomic32_t *root, const uint8_t *tbl8_address, uint8_t next_byte); /* * Remove all sub-prefixes of @range, and update @range of @tbl8 with * @range_nexthop. */ static int update_tbl8_range(struct fib_head *fib, struct fib_tbl8 *tbl8, uint8_t next_byte, struct prefix_range range, const uint8_t *range_address, uint8_t range_depth, uint32_t range_nexthop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); struct rib_longer_iterator_state state; int ret = rib_longer_iterator_state_init(&state, &fib->rib, range_address, range_depth, true); if (unlikely(ret < 0)) { address_to_str(addr_str, range_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): failed to initialize RIB iterator (errno=%i): %s\n", __func__, addr_str, range_depth, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (likely(ret == -ENOENT)) { if (!range.empty) { /* Write remaining range. */ write_atomics(tbl8->nh, range.first, range.last, range_nexthop); } ret = 0; break; } address_to_str(addr_str, range_address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): RIB iterator failed (errno=%i): %s\n", __func__, addr_str, range_depth, -ret, strerror(-ret)); break; } if (likely(rule.depth > range_depth)) { const uint8_t *rule_address = (uint8_t *)&rule.address_no; struct prefix_range rule_range; bool dig = set_range8(&rule_range, rule_address, rule.depth, next_byte); if (unlikely(!exclude_range(tbl8->nh, &range, &rule_range, range_nexthop))) { address_to_str(addr_str, range_address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: missing exclusion\n", __func__, addr_str, range_depth); } if (dig) { ret = update_tbl8_nh(fib, &tbl8->nh[rule_range.first], rule_address, next_byte + 1); if (unlikely(ret < 0)) break; ret = rib_longer_iterator_skip_branch(&state, rule_address, (next_byte + 1) * 8); if (unlikely(ret < 0)) break; } } else if (likely(rule.depth == range_depth)) { /* Ignore prefix @range_address/@range_depth. */ } else { address_to_str(addr_str, range_address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: rule.depth=%i < iter_depth=%i\n", __func__, addr_str, range_depth, rule.depth, range_depth); ret = -EFAULT; break; } } rib_longer_iterator_end(&state); return ret; } /* * @root is where the index of the tbl8 T will reside, or resides. * * Rt(T) = @tbl8_address/(@next_byte * 8). * * @next_byte is the index of @address to be evaluated. */ static int update_tbl8_nh(struct fib_head *fib, rte_atomic32_t *root, const uint8_t *tbl8_address, uint8_t next_byte) { struct fib_tbl8 *tbl8; uint32_t tbl8_nexthop; struct prefix_range range; int ret = check_tbl8(fib, root, tbl8_address, next_byte, &tbl8, &tbl8_nexthop); if (ret <= 0) return ret; set_range8_full(&range); return update_tbl8_range(fib, tbl8, next_byte, range, tbl8_address, next_byte * 8, tbl8_nexthop); } /* * @root is where the index of the tbl8 T will reside, or resides. * * Rt(T) = @address/(@next_byte * 8). * * @next_byte is the index of @address to be evaluated. * * The prefix P == @address/@depth is the prefix being updated. * Nh(P) == @next_hop. * * P must be in @fib->rib if it's being added or having its @next_hop updated, * whereas P must not be in @fib->rib if it's being removed. * * Note that @depth > (@next_byte * 8) must be true. */ static int update_tbl8_rule(struct fib_head *fib, rte_atomic32_t *root, uint8_t next_byte, const uint8_t *address, uint8_t depth, uint32_t next_hop) { struct fib_tbl8 *tbl8; uint32_t tbl8_nexthop; struct prefix_range range; int ret; if (unlikely(depth <= next_byte * 8)) return -EINVAL; ret = check_tbl8(fib, root, address, next_byte, &tbl8, &tbl8_nexthop); if (ret <= 0) return ret; if (set_range8(&range, address, depth, next_byte)) { /* The prefix goes deeper. */ return update_tbl8_rule(fib, &tbl8->nh[range.first], next_byte + 1, address, depth, next_hop); } if (depth >= (fib->addr_len_bytes * 8)) { if (unlikely(range.empty || range.first != range.last)) { G_LOG(CRIT, "%s(): bug: range.empty=%i, range.first=%u, range.last=%u\n", __func__, range.empty, range.first, range.last); return -EFAULT; } /* * Avoid the iterator below for the common case of * @depth being the maximum length. */ rte_atomic32_set(&tbl8->nh[range.first], next_hop); return 0; } return update_tbl8_range(fib, tbl8, next_byte, range, address, depth, next_hop); } /* RETURN true if the prefix was truncated. */ static bool set_range24(struct prefix_range *range, const uint8_t *address, uint8_t depth) { bool truncated = false; int free_bits; if (unlikely(depth > 24)) { depth = 24; truncated = true; } range->empty = false; free_bits = 24 - depth; range->first = get_tbl24_idx(address) & (n_one_bits32(depth) << free_bits); range->last = range->first | n_one_bits32(free_bits); return truncated; } /* * IMPORTANT: * P must have already been added/removed from the RIB before calling * this function. */ static int update_fib(struct fib_head *fib, const uint8_t *address, uint8_t depth, uint32_t next_hop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); struct prefix_range range; struct rib_longer_iterator_state state; int ret; if (set_range24(&range, address, depth)) { /* The prefix goes deeper. */ return update_tbl8_rule(fib, &fib->tbl24[get_tbl24_idx(address)], 3, address, depth, next_hop); } /* * Remove all sub-prefixes of @range, and update @range of tbl24. */ ret = rib_longer_iterator_state_init(&state, &fib->rib, address, depth, true); if (unlikely(ret < 0)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): failed to initialize RIB iterator (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (likely(ret == -ENOENT)) { if (!range.empty) { /* Write remaining range. */ write_atomics(fib->tbl24, range.first, range.last, next_hop); } ret = 0; break; } address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): RIB iterator failed (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); break; } if (likely(rule.depth > depth)) { struct prefix_range rule_range; bool dig = set_range24(&rule_range, (uint8_t *)&rule.address_no, rule.depth); if (unlikely(!exclude_range(fib->tbl24, &range, &rule_range, next_hop))) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: missing exclusion\n", __func__, addr_str, depth); } if (dig) { const uint8_t *dig_tbl8_address = (uint8_t *)&rule.address_no; ret = update_tbl8_nh(fib, &fib->tbl24[rule_range.first], dig_tbl8_address, 3); if (unlikely(ret < 0)) break; ret = rib_longer_iterator_skip_branch(&state, dig_tbl8_address, 24); if (unlikely(ret < 0)) break; } } else if (likely(rule.depth == depth)) { /* Ignore prefix @address/@depth. */ } else { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: rule.depth=%i < depth=%i\n", __func__, addr_str, depth, rule.depth, depth); ret = -EFAULT; break; } } rib_longer_iterator_end(&state); return ret; } int __fib_add(struct fib_head *fib, const uint8_t *address, uint8_t depth, uint32_t next_hop, bool failsafe) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); int ret, ret2; uint32_t parent_nexthop, child_nexthop; if (unlikely(depth > rib_get_max_length(&fib->rib))) return -EINVAL; if (unlikely(next_hop >= FIB_NO_NH)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): next_hop=%u must be less than FIB_NO_NH=%u\n", __func__, addr_str, depth, next_hop, FIB_NO_NH); return -EINVAL; } ret = rib_add(&fib->rib, address, depth, next_hop); if (ret < 0) return ret; ret = get_parent_child_nexthops(fib, address, depth, &parent_nexthop, &child_nexthop); if (unlikely(ret < 0 || next_hop != child_nexthop)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: RIB is corrupted: ret=%i, next_hop=%u, child_nexthop=%u\n", __func__, addr_str, depth, ret, next_hop, child_nexthop); /* Free rule in RIB. */ ret = rib_delete(&fib->rib, address, depth); if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(%s/%u): bug: failed to remove prefix just added to RIB (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); } return -EFAULT; } /* Only avoid update_fib() when @failsafe is true. */ if (unlikely(failsafe && parent_nexthop == next_hop)) { /* There's nothing to update in the FIB. */ return 0; } ret = update_fib(fib, address, depth, next_hop); if (likely(ret == 0)) return 0; /* It's done. */ address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): update_fib() failed (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); if (!failsafe) return ret; /* Try to recover @fib to a safe state. */ ret2 = __fib_delete(fib, address, depth, false); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(%s/%u): bug: __fib_delete() cannot restore FIB (errno=%i): %s\n", __func__, addr_str, depth, -ret2, strerror(-ret2)); } return ret; } int __fib_delete(struct fib_head *fib, const uint8_t *address, uint8_t depth, bool failsafe) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); uint32_t parent_nexthop, child_nexthop; int ret, ret2; if (unlikely(depth > rib_get_max_length(&fib->rib))) return -EINVAL; ret = get_parent_child_nexthops(fib, address, depth, &parent_nexthop, &child_nexthop); if (unlikely(ret < 0)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: RIB is corrupted (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); return -EFAULT; } if (child_nexthop == FIB_NO_NH) { /* There's nothing to do. */ return -ENOENT; } /* Free rule in RIB. */ ret = rib_delete(&fib->rib, address, depth); if (unlikely(ret < 0)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s/%u): bug: failed to remove prefix from RIB (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); return ret; } /* Only avoid update_fib() when @failsafe is true. */ if (unlikely(failsafe && parent_nexthop == child_nexthop)) { /* There's nothing to update in the FIB. */ return 0; } ret = update_fib(fib, address, depth, parent_nexthop); if (likely(ret == 0)) return 0; /* It's done. */ address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(ERR, "%s(%s/%u): update_fib() failed (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); if (!failsafe) return ret; /* Try to recover @fib to a safe state. */ ret2 = __fib_add(fib, address, depth, child_nexthop, false); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(%s/%u): bug: __fib_add() cannot restore FIB (errno=%i): %s\n", __func__, addr_str, depth, -ret, strerror(-ret)); } return ret; } int fib_lookup(const struct fib_head *fib, const uint8_t *address, uint32_t *pnext_hop) { ADDR_STR_VAR(addr_str, fib->addr_len_bytes); uint32_t nh_candidate = rte_atomic32_read(&fib->tbl24[get_tbl24_idx(address)]); unsigned int i = 3; while (is_nh_extended(nh_candidate)) { uint32_t next_tbl8_idx = get_tbl8_idx(nh_candidate); if (unlikely(next_tbl8_idx >= fib->num_tbl8s)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s): bug: next_tbl8_idx=%u >= num_tbl8s=%u\n", __func__, addr_str, next_tbl8_idx, fib->num_tbl8s); goto bug; } if (unlikely(i >= fib->addr_len_bytes)) { address_to_str(addr_str, address, fib->addr_len_bytes); G_LOG(CRIT, "%s(%s): bug: i=%u >= addr_len_bytes=%u\n", __func__, addr_str, i, fib->addr_len_bytes); goto bug; } nh_candidate = rte_atomic32_read( &fib->tbl8s[next_tbl8_idx].nh[address[i++]]); } *pnext_hop = nh_candidate; return nh_candidate != FIB_NO_NH ? 0 : -ENOENT; bug: *pnext_hop = FIB_NO_NH; return -EFAULT; } /* * fib_lookup_bulk() was implemented using the G-Opt technique. * * For details on the G-Opt technique, see al least Section 3.2 of * the paper Raising the Bar for Using GPUs in Software Packet Processing * by Anuj Kalia, Dong Zhou, Michael Kaminsky, and David G. Andersen. * published on 12th USENIX Symposium on Networked Systems Design and * Implementation (aka NSDI 2015). */ #define G_SW() \ do { \ i = (i + 1) % n; \ goto *g_labels[i]; \ } while (0) /* Prefetch, Save label, and Switch lookup. */ #define G_PSS(addr, label) \ do { \ rte_prefetch0(addr); \ g_labels[i] = &&label; \ G_SW(); \ } while (0) void fib_lookup_bulk(const struct fib_head *fib, const uint8_t **addresses, uint32_t *next_hops, unsigned int n) { /* Variables used atomically. */ ADDR_STR_VAR(addr_str, fib->addr_len_bytes); uint32_t nh_candidate, next_tbl8_idx; /* Lookup state. */ const rte_atomic32_t *atomic_nh_candidate[n]; uint8_t addr_idx[n]; /* G-Opt state. */ void *g_labels[n]; unsigned int i, g_count; if (unlikely(n == 0)) return; for (i = 0; i < n; i++) g_labels[i] = &&g_label_0; i = 0; g_count = 0; g_label_0: /* Basis step: look up the 24-bit table. */ atomic_nh_candidate[i] = &fib->tbl24[get_tbl24_idx(addresses[i])]; addr_idx[i] = 3; G_PSS(atomic_nh_candidate[i], g_label_1); g_label_1: /* Inductive step: look up the 8-bit tables. */ nh_candidate = rte_atomic32_read(atomic_nh_candidate[i]); if (likely(!is_nh_extended(nh_candidate))) { next_hops[i] = nh_candidate; goto g_done; } next_tbl8_idx = get_tbl8_idx(nh_candidate); if (unlikely(next_tbl8_idx >= fib->num_tbl8s)) { address_to_str(addr_str, addresses[i], fib->addr_len_bytes); G_LOG(CRIT, "%s(%s): bug: next_tbl8_idx=%u >= num_tbl8s=%u\n", __func__, addr_str, next_tbl8_idx, fib->num_tbl8s); goto bug; } if (unlikely(addr_idx[i] >= fib->addr_len_bytes)) { address_to_str(addr_str, addresses[i], fib->addr_len_bytes); G_LOG(CRIT, "%s(%s): bug: addr_idx=%u >= addr_len_bytes=%u\n", __func__, addr_str, addr_idx[i], fib->addr_len_bytes); goto bug; } atomic_nh_candidate[i] = &fib->tbl8s[next_tbl8_idx].nh[addresses[i][addr_idx[i]++]]; G_PSS(atomic_nh_candidate[i], g_label_1); g_skip: G_SW(); bug: next_hops[i] = FIB_NO_NH; g_done: g_count++; g_labels[i] = &&g_skip; if (likely(g_count < n)) G_SW(); if (unlikely(g_count > n)) { G_LOG(CRIT, "%s(): bug: g_count=%u > n=%u\n", __func__, g_count, n); } } ```
/content/code_sandbox/lib/fib.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
11,048
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_byteorder.h> #include <rte_errno.h> #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN #include <rte_memcpy.h> #endif #include "gatekeeper_main.h" #include "gatekeeper_rib.h" static int __read_addr(uint8_t length, rib_address_t *cpu_addr, const uint8_t *address) { if (unlikely(address == NULL)) { *cpu_addr = 0; return 0; } switch (length) { case 32: *cpu_addr = rte_be_to_cpu_32(*((uint32_t *)address)); break; case 128: { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN uint64_t *dst = (uint64_t *)cpu_addr; uint64_t *src = (uint64_t *)address; dst[0] = rte_be_to_cpu_64(src[1]); dst[1] = rte_be_to_cpu_64(src[0]); #else /* RTE_BIG_ENDIAN */ RTE_BUILD_BUG_ON(sizeof(*cpu_addr) != sizeof(uint128_t)); rte_mov128((uint8_t *)cpu_addr, address); #endif break; } default: G_LOG(ERR, "%s(): length=%u is not implemented\n", __func__, length); return -EINVAL; } return 0; } static inline int read_addr(const struct rib_head *rib, rib_address_t *cpu_addr, const uint8_t *address) { return __read_addr(rib->max_length, cpu_addr, address); } static int __write_addr(uint8_t length, uint8_t *address, rib_address_t cpu_addr) { switch (length) { case 32: *((uint32_t *)address) = rte_cpu_to_be_32(cpu_addr); break; case 128: { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN uint64_t *dst = (uint64_t *)address; uint64_t *src = (uint64_t *)&cpu_addr; dst[0] = rte_cpu_to_be_64(src[1]); dst[1] = rte_cpu_to_be_64(src[0]); #else /* RTE_BIG_ENDIAN */ RTE_BUILD_BUG_ON(sizeof(cpu_addr) != sizeof(uint128_t)); rte_mov128(address, (uint8_t *)&cpu_addr); #endif break; } default: G_LOG(ERR, "%s(): length=%u is not implemented\n", __func__, length); return -EINVAL; } return 0; } static inline int write_addr(const struct rib_head *rib, uint8_t *address, rib_address_t cpu_addr) { return __write_addr(rib->max_length, address, cpu_addr); } int rib_create(struct rib_head *rib, const char *name, int socket_id, uint8_t max_length, uint32_t max_rules) { rib_address_t dummy; int ret; unsigned int n; if (unlikely(max_length > RIB_MAX_ADDRESS_LENGTH)) { G_LOG(ERR, "%s(): max_length=%u is greater than RIB_MAX_ADDRESS_LENGTH=%i\n", __func__, max_length, RIB_MAX_ADDRESS_LENGTH); return -EINVAL; } if (unlikely((max_length & 0x7) > 0)) { G_LOG(ERR, "%s(): max_length=%u is not a multiple of 8\n", __func__, max_length); return -EINVAL; } ret = __read_addr(max_length, &dummy, (const uint8_t *)&dummy); if (unlikely(ret < 0)) return ret; ret = __write_addr(max_length, (uint8_t *)&dummy, 0); if (unlikely(ret < 0)) return ret; memset(rib, 0, sizeof(*rib)); rib->max_length = max_length; /* * Number of nodes needed to store a max-length prefix. * Adding (RIB_MAX_PREFIX_BITS - 1) is equivalent to rouding up * the result since it's an integer division. */ n = (max_length + RIB_MAX_PREFIX_BITS - 1) / RIB_MAX_PREFIX_BITS; /* * rib_add() needs at most a new internal node when adding * a prefix to the RIB. */ n++; /* * Loose upper bound on the number of nodes needed to have * @max_rules rules. */ n *= max_rules; rib->mp_nodes = rte_mempool_create(name, n, sizeof(struct rib_node), 0, 0, NULL, NULL, NULL, NULL, socket_id, /* Save memory; struct rib_node is small. */ MEMPOOL_F_NO_SPREAD | MEMPOOL_F_NO_CACHE_ALIGN | /* No synchronization; single writer. */ MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET | /* Nodes are not used for I/O. */ MEMPOOL_F_NO_IOVA_CONTIG); if (unlikely(rib->mp_nodes == NULL)) { ret = rte_errno; G_LOG(ERR, "%s(): cannot create memory pool (errno=%i): %s\n", __func__, ret, rte_strerror(ret)); return -ret; } return 0; } void rib_free(struct rib_head *rib) { rib->root_node.has_nh = false; rib->root_node.branch[false] = NULL; rib->root_node.branch[true] = NULL; rib->version++; rte_mempool_free(rib->mp_nodes); rib->mp_nodes = NULL; } static inline void info_init(struct rib_node_info *info, const struct rib_head *rib) { info->haddr_matched = 0; info->haddr_mask = 0; info->depth = 0; info->missing_bits = rib->max_length; } static rib_address_t lshift(rib_address_t x, uint8_t count) { RTE_BUILD_BUG_ON((typeof(count))-1 < RIB_MAX_ADDRESS_LENGTH); if (unlikely(count > RIB_MAX_ADDRESS_LENGTH)) { G_LOG(CRIT, "%s(): bug: count == %i is greater than %i\n", __func__, count, RIB_MAX_ADDRESS_LENGTH); count = RIB_MAX_ADDRESS_LENGTH; } /* * The result of the left shift operator (i.e. <<) is undefined if * the right operand is negative, or greater than or equal to * the number of bits in the type of the left expression. */ if (unlikely(count == RIB_MAX_ADDRESS_LENGTH)) return 0; return x << count; } static inline rib_address_t n_one_bits(uint8_t n) { return lshift(1, n) - 1; } static void info_update(struct rib_node_info *info, const struct rib_node *cur_node) { rib_address_t lsb_mask; /* Mask for the least-significant bits. */ /* Update @info->missing_bits. */ info->missing_bits -= cur_node->matched_bits; RTE_VERIFY(info->missing_bits >= 0); /* Update @info->depth. */ info->depth += cur_node->matched_bits; /* Update @info->haddr_mask. */ lsb_mask = n_one_bits(cur_node->matched_bits); info->haddr_mask |= lshift(lsb_mask, info->missing_bits); /* Update @info->haddr_matched. */ RTE_VERIFY((cur_node->pfx_bits & ~lsb_mask) == 0); info->haddr_matched |= lshift(cur_node->pfx_bits, info->missing_bits); } static inline bool info_haddr_matches(const struct rib_node_info *info, rib_address_t haddr) { return (haddr & info->haddr_mask) == info->haddr_matched; } static inline bool test_bit_n(rib_address_t haddr, uint8_t bit) { return !!(haddr & lshift(1, bit)); } static int next_bit(const struct rib_node_info *info, rib_address_t haddr) { if (unlikely(info->missing_bits <= 0)) { G_LOG(CRIT, "%s(): bug: missing_bits == %i is not positive\n", __func__, info->missing_bits); return -EINVAL; } return test_bit_n(haddr, info->missing_bits - 1); } static inline const struct rib_node * next_node(const struct rib_node *cur_node, const struct rib_node_info *info, rib_address_t haddr) { int ret = next_bit(info, haddr); if (unlikely(ret < 0)) return NULL; return cur_node->branch[ret]; } int rib_lookup(const struct rib_head *rib, const uint8_t *address, uint32_t *pnext_hop) { rib_address_t haddr; int ret = read_addr(rib, &haddr, address); bool has_nh = false; uint32_t next_hop; struct rib_node_info info; const struct rib_node *cur_node; if (unlikely(ret < 0)) return ret; info_init(&info, rib); cur_node = &rib->root_node; do { info_update(&info, cur_node); if (!info_haddr_matches(&info, haddr)) break; /* One more match. */ if (cur_node->has_nh) { has_nh = true; next_hop = cur_node->next_hop; } if (info.missing_bits == 0) { RTE_VERIFY(cur_node->branch[false] == NULL); RTE_VERIFY(cur_node->branch[true] == NULL); break; } cur_node = next_node(cur_node, &info, haddr); } while (cur_node != NULL); if (has_nh) { *pnext_hop = next_hop; return 0; } return -ENOENT; } int rib_is_rule_present(const struct rib_head *rib, const uint8_t *address, uint8_t depth, uint32_t *pnext_hop) { rib_address_t haddr; int ret; struct rib_node_info info; const struct rib_node *cur_node; if (unlikely(depth > rib->max_length)) return -EINVAL; ret = read_addr(rib, &haddr, address); if (unlikely(ret < 0)) return ret; /* * There is no need to mask @haddr because it is always accessed * within its mask. */ info_init(&info, rib); cur_node = &rib->root_node; do { info_update(&info, cur_node); if (info.depth > depth || !info_haddr_matches(&info, haddr)) break; /* One more match. */ if (info.depth == depth) { if (cur_node->has_nh) { *pnext_hop = cur_node->next_hop; return 1; } break; } cur_node = next_node(cur_node, &info, haddr); } while (cur_node != NULL); return 0; } static struct rib_node * zalloc_node(const struct rib_head *rib) { struct rib_node *new_node; int ret = rte_mempool_get(rib->mp_nodes, (void **)&new_node); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to allocate a node (errno=%i): %s\n", __func__, -ret, rte_strerror(-ret)); return NULL; } return memset(new_node, 0, sizeof(*new_node)); } static int split_cur_node(const struct rib_head *rib, struct rib_node **anchor_cur_node, const struct rib_node_info *info, rib_address_t haddr, uint8_t depth) { struct rib_node *new_node, *old_node; rib_prefix_bits_t new_prefix, first_mismatch_bit; int missing_bits, testing_bits, mismatch_bits; /* Create a new node for the split. */ new_node = zalloc_node(rib); if (unlikely(new_node == NULL)) return -ENOMEM; /* * Find the prefix of @new_node. */ RTE_BUILD_BUG_ON(sizeof(haddr) != sizeof(uint128_t)); RTE_BUILD_BUG_ON(sizeof(new_prefix) != sizeof(uint64_t)); old_node = *anchor_cur_node; missing_bits = rib->max_length - RTE_MIN(info->depth, depth); testing_bits = info->depth <= depth ? old_node->matched_bits : depth - (info->depth - old_node->matched_bits); if (unlikely(testing_bits < 1 || old_node->matched_bits < testing_bits)) { G_LOG(CRIT, "%s(): bug: testing_bits == %i must be in [1, %i]\n", __func__, testing_bits, old_node->matched_bits); goto bug; } new_prefix = (haddr >> missing_bits) & n_one_bits(testing_bits); first_mismatch_bit = rte_align64prevpow2(new_prefix ^ (old_node->pfx_bits >> (old_node->matched_bits - testing_bits))); mismatch_bits = first_mismatch_bit == 0 ? 0 : rte_bsf64(first_mismatch_bit) + 1; if (unlikely(testing_bits <= mismatch_bits)) { G_LOG(CRIT, "%s(): bug: there should be at least one matched bit; testing_bits == %i and mismatch_bits == %i\n", __func__, testing_bits, mismatch_bits); goto bug; } new_node->pfx_bits = new_prefix >> mismatch_bits; new_node->matched_bits = testing_bits - mismatch_bits; /* Update the prefix of the old node. */ if (unlikely(old_node->matched_bits <= new_node->matched_bits)) { G_LOG(CRIT, "%s(): bug: over matching; old_node->matched_bits == %i, testing_bits == %i, mismatch_bits == %i\n", __func__, old_node->matched_bits, testing_bits, mismatch_bits); goto bug; } old_node->matched_bits -= new_node->matched_bits; old_node->pfx_bits &= n_one_bits(old_node->matched_bits); /* Link the old and new nodes. */ new_node->branch[ test_bit_n(old_node->pfx_bits, old_node->matched_bits - 1)] = old_node; *anchor_cur_node = new_node; return 0; bug: rte_mempool_put(rib->mp_nodes, new_node); return -EFAULT; } static inline struct rib_node ** next_p_node(struct rib_node **anchor_cur_node, const struct rib_node_info *info, rib_address_t haddr) { int ret = next_bit(info, haddr); if (unlikely(ret < 0)) return NULL; return &(*anchor_cur_node)->branch[ret]; } static void free_tail(const struct rib_head *rib, struct rib_node *cur_node) { while (cur_node != NULL) { struct rib_node *n_node = cur_node->branch[false]; if (n_node == NULL) n_node = cur_node->branch[true]; rte_mempool_put(rib->mp_nodes, cur_node); cur_node = n_node; } } /* * If successful, @p_anchor_cur_node is updated to refer to * the last node of the tail. */ static int add_haddr_tail(const struct rib_head *rib, struct rib_node ***p_anchor_cur_node, struct rib_node_info *info, const rib_address_t haddr, const uint8_t depth) { struct rib_node **saved_anchor_cur_node = *p_anchor_cur_node; struct rib_node **anchor_cur_node = saved_anchor_cur_node; struct rib_node **prv_anchor_cur_node = NULL; int ret; if (unlikely(p_anchor_cur_node == NULL || *p_anchor_cur_node == NULL || **p_anchor_cur_node != NULL)) { G_LOG(CRIT, "%s(): bug: no location to save tail\n", __func__); return -EINVAL; } if (unlikely(info->depth >= depth)) { G_LOG(CRIT, "%s(): bug: invalid call, info->depth == %i and depth == %i\n", __func__, info->depth, depth); return -EINVAL; } do { struct rib_node *new_node = zalloc_node(rib); if (unlikely(new_node == NULL)) { ret = -ENOMEM; goto error; } *anchor_cur_node = new_node; new_node->matched_bits = RTE_MIN(depth - info->depth, RIB_MAX_PREFIX_BITS); new_node->pfx_bits = (haddr >> (info->missing_bits - new_node->matched_bits)) & n_one_bits(new_node->matched_bits); info_update(info, new_node); prv_anchor_cur_node = anchor_cur_node; if (info->depth == depth) break; anchor_cur_node = next_p_node(anchor_cur_node, info, haddr); if (unlikely(anchor_cur_node == NULL)) { ret = -EFAULT; goto error; } } while (info->depth < depth); if (unlikely(info->depth != depth)) { G_LOG(CRIT, "%s(): bug: something went wrong, info->depth == %i and depth == %i\n", __func__, info->depth, depth); ret = -EFAULT; goto error; } *p_anchor_cur_node = prv_anchor_cur_node; return 0; error: free_tail(rib, *saved_anchor_cur_node); *saved_anchor_cur_node = NULL; return ret; } int rib_add(struct rib_head *rib, const uint8_t *address, const uint8_t depth, const uint32_t next_hop) { rib_address_t haddr; int ret; struct rib_node_info info; struct rib_node *fake_root, **anchor_cur_node; if (unlikely(depth > rib->max_length)) return -EINVAL; ret = read_addr(rib, &haddr, address); if (unlikely(ret < 0)) return ret; /* * There is no need to mask @haddr because it is always accessed * within its mask. */ info_init(&info, rib); /* @fake_root is only used to bootstrap the loop. */ fake_root = &rib->root_node; anchor_cur_node = &fake_root; do { struct rib_node_info prv_info = info; info_update(&info, *anchor_cur_node); if (info.depth > depth || !info_haddr_matches(&info, haddr)) { /* * If execution is here, @haddr and * @(*anchor_cur_node)->pfx_bits match at least * the most significant bit of * @(*anchor_cur_node)->pfx_bits. * * Proof: * * If @*anchor_cur_node points to @rib->root_node, * the test in this if statement is false, * so the execution cannot be here. Therefore, * if the execution is here, @*anchor_cur_node must * point to a node that is not @rib->root_node. * * All nodes but @rib->root_node make * @(*anchor_cur_node)->matched_bits > 0 true. * Therefore, whenever next_p_node() returns, * @*anchor_cur_node matches at least the most * significant bit of @(*anchor_cur_node)->pfx_bits * with @haddr. * * Since that @*anchor_cur_node does not point to * @rib->root_node, the loop has reached next_p_node() * at least once. */ ret = split_cur_node(rib, anchor_cur_node, &info, haddr, depth); if (unlikely(ret < 0)) return ret; /* * If there is an error after here, the newly split * node will be left in @rib, so iterators must * be aware of the change. */ rib->version++; /* Back track to the new node. */ info = prv_info; info_update(&info, *anchor_cur_node); } /* One more match. */ if (info.depth == depth) { if ((*anchor_cur_node)->has_nh) return -EEXIST; goto add_rule; } anchor_cur_node = next_p_node(anchor_cur_node, &info, haddr); if (unlikely(anchor_cur_node == NULL)) return -EFAULT; } while (*anchor_cur_node != NULL); ret = add_haddr_tail(rib, &anchor_cur_node, &info, haddr, depth); if (unlikely(ret < 0)) return ret; add_rule: (*anchor_cur_node)->has_nh = true; (*anchor_cur_node)->next_hop = next_hop; rib->version++; return 0; } struct rib_delete_state { /* Parameters of rib_delete(). */ struct rib_head *rib; rib_address_t haddr; uint8_t depth; /* Long jump to unwind the recursion. */ jmp_buf jmp_end; }; static inline bool is_node_root(struct rib_head *rib, struct rib_node *cur_node) { return cur_node == &rib->root_node; } static unsigned int count_children(struct rib_node *cur_node, struct rib_node ***p_anchor_of_single_child) { if (cur_node->branch[false] != NULL) { if (cur_node->branch[true] != NULL) { *p_anchor_of_single_child = NULL; return 2; } *p_anchor_of_single_child = &cur_node->branch[false]; return 1; } if (cur_node->branch[true] != NULL) { *p_anchor_of_single_child = &cur_node->branch[true]; return 1; } *p_anchor_of_single_child = NULL; return 0; } static void __rib_delete(struct rib_delete_state *state, struct rib_node **anchor_cur_node, struct rib_node_info info) { struct rib_node **anchor_of_single_child; unsigned int children; if (*anchor_cur_node == NULL) longjmp(state->jmp_end, -ENOENT); info_update(&info, *anchor_cur_node); if (info.depth > state->depth || !info_haddr_matches(&info, state->haddr)) longjmp(state->jmp_end, -ENOENT); /* One more match. */ if (info.depth == state->depth) { if (!(*anchor_cur_node)->has_nh) longjmp(state->jmp_end, -ENOENT); (*anchor_cur_node)->has_nh = false; } else { __rib_delete(state, next_p_node(anchor_cur_node, &info, state->haddr), info); } /* * Try to merge @(*anchor_cur_node) downstream. */ if (is_node_root(state->rib, *anchor_cur_node) || (*anchor_cur_node)->has_nh) goto done; children = count_children(*anchor_cur_node, &anchor_of_single_child); if (children >= 2) goto done; if (children == 0) { /* @(*anchor_cur_node) is a leaf node. */ rte_mempool_put(state->rib->mp_nodes, *anchor_cur_node); *anchor_cur_node = NULL; return; /* Allow further compression of @state->rib. */ } /* @children == 1 */ if ((*anchor_cur_node)->matched_bits + (*anchor_of_single_child)->matched_bits > RIB_MAX_PREFIX_BITS) { /* @(*anchor_cur_node) cannot merge downstream, try upstream. */ return; } (*anchor_of_single_child)->pfx_bits |= (*anchor_cur_node)->pfx_bits << (*anchor_of_single_child)->matched_bits; (*anchor_of_single_child)->matched_bits += (*anchor_cur_node)->matched_bits; rte_mempool_put(state->rib->mp_nodes, *anchor_cur_node); *anchor_cur_node = *anchor_of_single_child; return; /* Allow further compression of @state->rib. */ done: longjmp(state->jmp_end, true); } int rib_delete(struct rib_head *rib, const uint8_t *address, uint8_t depth) { struct rib_delete_state state; int ret; if (unlikely(depth > rib->max_length)) return -EINVAL; ret = read_addr(rib, &state.haddr, address); if (unlikely(ret < 0)) return ret; /* * There is no need to mask @state.haddr because it is always accessed * within its mask. */ state.rib = rib; state.depth = depth; ret = setjmp(state.jmp_end); if (ret == 0) { struct rib_node *fake_root = &rib->root_node; struct rib_node_info info; info_init(&info, rib); __rib_delete(&state, &fake_root, info); goto done; } if (ret < 0) return ret; done: rib->version++; return 0; } static inline void mask_haddr(const struct rib_head *rib, rib_address_t *haddr, uint8_t depth) { *haddr &= ~n_one_bits(rib->max_length - depth); } static inline bool is_haddr_in_scope(const struct rib_node_info *info, rib_address_t haddr, uint8_t depth) { rib_address_t shorter_mask = lshift(info->haddr_mask, info->depth - depth); RTE_VERIFY(depth <= info->depth); return !((haddr ^ info->haddr_matched) & shorter_mask); } static void scope_longer_iterator(struct rib_longer_iterator_state *state) { struct rib_node_info prv_info, info; const struct rib_node *cur_node; info_init(&info, state->rib); cur_node = &state->rib->root_node; do { prv_info = info; info_update(&info, cur_node); if (info.depth >= state->min_depth) { if (!is_haddr_in_scope(&info, state->next_address, state->min_depth)) break; /* Found the scope. */ goto scope; } /* It is not deep enough into the prefix tree. */ if (!info_haddr_matches(&info, state->next_address)) break; cur_node = next_node(cur_node, &info, state->next_address); } while (cur_node != NULL); /* * There is no prefix with @state->min_depth for * @state->next_address. */ cur_node = NULL; scope: state->version = state->rib->version; state->start_node = cur_node; state->start_info = prv_info; } int rib_longer_iterator_state_init(struct rib_longer_iterator_state *state, const struct rib_head *rib, const uint8_t *address, uint8_t depth, bool stop_at_children) { int ret; if (unlikely(depth > rib->max_length)) return -EINVAL; ret = read_addr(rib, &state->next_address, address); if (unlikely(ret < 0)) return ret; /* * It is necessary to mask @state->next_address because * the iterator compares it with the prefixes of the branches of * the RIB to avoid previously visited prefixes. And these comparisons * are done without masking. */ mask_haddr(rib, &state->next_address, depth); state->rib = rib; state->min_depth = depth; state->stop_at_children = stop_at_children; state->next_depth = depth; state->has_ended = false; scope_longer_iterator(state); return 0; } static void __rib_longer_iterator_next(struct rib_longer_iterator_state *state, const struct rib_node *cur_node, struct rib_node_info info) { if (cur_node == NULL) return; info_update(&info, cur_node); if (!state->ignore_next_address) { /* * Invariants: * * 1. Only the body of this if statement sets * @state->ignore_next_address true. * * Proof: Inspect the code. * * 2. When @state->ignore_next_address is set true, * no recursive call already in the stack is affected, * that is, changes its execution. * * Proof: * * Assume that @state->ignore_next_address is false; * otherwise setting it can not affect recursive calls * already in the stack. * * All recursive calls already in the stack are either * (A) within the body of this if statement or (B) after it. * * (A) The recursive calls already in the stack and within * the body of this if statement do not test * @state->ignore_next_address again; inspect the code. * * (B) The recursive calls already in the stack and after * the body of this if statement have no reference to * @state->ignore_next_address thanks to invariant 1. * * Notice that a recursive call already in the stack may * make another recursive call that will be affected, but * this is a *new* recursive call. */ if (info.depth < state->next_depth && info_haddr_matches(&info, state->next_address)) { int next_b = next_bit(&info, state->next_address); if (unlikely(next_b < 0)) return; __rib_longer_iterator_next(state, cur_node->branch[next_b], info); /* * There may or may not be a rule for the prefix * @state->next_address/@state->next_depth, but * one needs to find the immediately greater prefix. * * One does not need to check @cur_node->has_nh * because (info.depth < state->next_depth) means * that the rule of @cur_node is before the prefix * @state->next_address/@state->next_depth. */ if (next_b == 0) { __rib_longer_iterator_next(state, cur_node->branch[true], info); } return; } /* * Notice that if @cur_node corresponds to the prefix * @state->next_address/@state->next_depth, but * @cur_node->has_nh is false because the prefix was removed, * @state->found_return can only become true in another * point of the recursion. * * This is why @state->ignore_next_address and * @state->found_return may have different values. */ state->ignore_next_address = true; if (info.haddr_matched < state->next_address) { /* * There is no rule for the prefix * @state->next_address/@state->next_depth or * after it on this branch. */ return; } } /* Any prefix from here on is a match. */ if (cur_node->has_nh) { if (state->found_return) { state->next_address = info.haddr_matched; state->next_depth = info.depth; longjmp(state->jmp_found, true); } state->found_return = true; RTE_VERIFY(write_addr(state->rib, (uint8_t *)&state->rule->address_no, info.haddr_matched) == 0); state->rule->depth = info.depth; state->rule->next_hop = cur_node->next_hop; /* Still missing the next rule after the found rule. */ /* * The test info.depth > state->min_depth is needed to * avoid only listing the parent prefix if it exists. */ if (state->stop_at_children && info.depth > state->min_depth) return; } __rib_longer_iterator_next(state, cur_node->branch[false], info); __rib_longer_iterator_next(state, cur_node->branch[true], info); } int rib_longer_iterator_next(struct rib_longer_iterator_state *state, struct rib_iterator_rule *rule) { if (unlikely(state->has_ended)) return -ENOENT; /* Set fields used during recursion. */ state->ignore_next_address = false; state->found_return = false; state->rule = rule; if (setjmp(state->jmp_found) == 0) { if (state->version != state->rib->version) scope_longer_iterator(state); /* Start recursion. */ __rib_longer_iterator_next(state, state->start_node, state->start_info); state->has_ended = true; if (state->found_return) { /* It found the last rule. */ goto found; } return -ENOENT; } found: /* A rule was found. */ return 0; } static inline bool is_within_scope(const struct rib_head *rib, rib_address_t haddr, uint8_t depth, rib_address_t test_haddr) { return !((haddr ^ test_haddr) & lshift(n_one_bits(depth), rib->max_length - depth)); } int __rib_longer_iterator_seek(struct rib_longer_iterator_state *state, rib_address_t haddr, uint8_t depth) { if (unlikely(depth > state->rib->max_length)) return -EINVAL; if (unlikely(depth < state->min_depth)) return -EINVAL; if (unlikely(!is_within_scope(state->rib, state->next_address, state->min_depth, haddr))) return -EINVAL; /* * It is necessary to mask @state->next_address because * the iterator compares it with the prefixes of the branches of * the RIB to avoid previously visited prefixes. And these comparisons * are done without masking. */ mask_haddr(state->rib, &haddr, depth); state->next_address = haddr; state->next_depth = depth; state->has_ended = false; return 0; } int rib_longer_iterator_seek(struct rib_longer_iterator_state *state, const uint8_t *address, uint8_t depth) { rib_address_t haddr; int ret = read_addr(state->rib, &haddr, address); if (unlikely(ret < 0)) return ret; return __rib_longer_iterator_seek(state, haddr, depth); } int rib_longer_iterator_skip_branch(struct rib_longer_iterator_state *state, const uint8_t *address, uint8_t depth) { rib_address_t haddr, haddr2, backup_next_address; uint8_t backup_next_depth; bool backup_has_ended; struct rib_iterator_rule rule; int ret; if (unlikely(depth > state->rib->max_length)) return -EINVAL; ret = read_addr(state->rib, &haddr, address); if (unlikely(ret < 0)) return ret; /* Obtain the last possible prefix of the branch. */ haddr |= n_one_bits(state->rib->max_length - depth); backup_next_address = state->next_address; backup_next_depth = state->next_depth; backup_has_ended = state->has_ended; ret = __rib_longer_iterator_seek(state, haddr, state->rib->max_length); if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(): bug: failed to seek (errno=%i): %s\n", __func__, -ret, strerror(-ret)); return -EFAULT; } ret = rib_longer_iterator_next(state, &rule); if (ret == -ENOENT) { /* * There's no more rules after the prefix @haddr/@depth, so * there is nothing else to do. */ return 0; } if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(): bug: failed to iterate forward (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto restore; } ret = read_addr(state->rib, &haddr2, (uint8_t *)&rule.address_no); if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(): bug: cannot read rule (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto restore; } if (unlikely(rule.depth == state->rib->max_length && haddr == haddr2)) { /* * The last possible prefix of the branch is present in @rib. * Therefore, the iterator has already skipped the branch. */ return 0; } /* @rule has the first rule after the branch, so go back to it. */ ret = __rib_longer_iterator_seek(state, haddr2, rule.depth); if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(): bug: cannot go back (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto restore; } return 0; restore: state->next_address = backup_next_address; state->next_depth = backup_next_depth; state->has_ended = backup_has_ended; return -EFAULT; } int rib_shorter_iterator_state_init(struct rib_shorter_iterator_state *state, const struct rib_head *rib, const uint8_t *address, uint8_t depth) { int ret; if (unlikely(depth > rib->max_length)) return -EINVAL; ret = read_addr(rib, &state->haddr, address); if (unlikely(ret < 0)) return ret; /* * There is no need to mask @haddr because it is always accessed * within its mask. */ state->rib = rib; state->version = rib->version; state->cur_node = &rib->root_node; info_init(&state->info, rib); state->depth = depth; state->has_ended = false; return 0; } int rib_shorter_iterator_next(struct rib_shorter_iterator_state *state, struct rib_iterator_rule *rule) { bool found_return = false; if (unlikely(state->has_ended)) return -ENOENT; if (unlikely(state->version != state->rib->version)) return -EFAULT; do { info_update(&state->info, state->cur_node); if (state->info.depth > state->depth || !info_haddr_matches(&state->info, state->haddr)) goto end; /* One more match. */ if (state->cur_node->has_nh) { RTE_VERIFY(write_addr(state->rib, (uint8_t *)&rule->address_no, state->info.haddr_matched) == 0); rule->depth = state->info.depth; rule->next_hop = state->cur_node->next_hop; found_return = true; } if (state->info.depth == state->depth) goto end; state->cur_node = next_node(state->cur_node, &state->info, state->haddr); if (state->cur_node == NULL) goto end; } while (!found_return); goto out; end: state->has_ended = true; out: return found_return ? 0 : -ENOENT; } ```
/content/code_sandbox/lib/rib.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
8,662
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <string.h> #include <memblock.h> struct memblock_head * memblock_alloc_block(size_t size) { const size_t head_length = memblock_align(sizeof(struct memblock_head)); struct memblock_head *block; /* Avoid wasting bytes that wouldn't be used due to misalignment. */ size = memblock_align(size); block = rte_malloc("memblock", head_length + size, 0); if (unlikely(block == NULL)) return NULL; block->next = ((char *)block) + head_length; block->end = block->next + size; return block; } void memblock_free_all(struct memblock_head *head) { const size_t head_length = memblock_align(sizeof(struct memblock_head)); head->next = ((char *)head) + head_length; } void * memblock_alloc(struct memblock_head *head, size_t size) { char *block; char *next; if (unlikely(size == 0)) return NULL; size = memblock_align(size); block = head->next; next = block + size; if (unlikely(next > head->end)) return NULL; head->next = next; return block; } void * memblock_calloc(struct memblock_head *head, size_t num, size_t size) { size_t tot_size = num * size; void *ret = memblock_alloc(head, tot_size); if (unlikely(ret == NULL)) return NULL; return memset(ret, 0, tot_size); } ```
/content/code_sandbox/lib/memblock.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
409
```c /* * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. * * This library is modelled strictly after Ralf S. Engelschalls article at * path_to_url So most of the credit must * go to Ralf S. Engelschall <rse@engelschall.com>. */ #include "coro.h" #include <stddef.h> #include <string.h> /*****************************************************************************/ /* ucontext/setjmp/asm backends */ /*****************************************************************************/ #if defined (CORO_UCONTEXT) || defined (CORO_SJLJ) || defined (CORO_LOSER) || defined (CORO_LINUX) || defined (CORO_IRIX) || defined (CORO_ASM) # ifdef CORO_UCONTEXT # include <stddef.h> # endif # if !defined(STACK_ADJUST_PTR) # ifdef __sgi /* IRIX is decidedly NON-unix */ # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) # elif (defined (__i386__) && defined (CORO_LINUX)) || (defined (_M_IX86) && defined (CORO_LOSER)) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) # define STACK_ADJUST_SIZE(sp,ss) (ss) # elif (defined (__amd64__) && defined (CORO_LINUX)) || ((defined (_M_AMD64) || defined (_M_IA64)) && defined (CORO_LOSER)) # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) # define STACK_ADJUST_SIZE(sp,ss) (ss) # else # define STACK_ADJUST_PTR(sp,ss) (sp) # define STACK_ADJUST_SIZE(sp,ss) (ss) # endif # endif # include <stdlib.h> # ifdef CORO_SJLJ # include <stdio.h> # include <signal.h> # include <unistd.h> # endif static coro_func coro_init_func; static void *coro_init_arg; static coro_context *new_coro, *create_coro; static void coro_init (void) { volatile coro_func func = coro_init_func; volatile void *arg = coro_init_arg; coro_transfer (new_coro, create_coro); #if defined (__GCC_HAVE_DWARF2_CFI_ASM) && defined (__amd64) /*asm (".cfi_startproc");*/ /*asm (".cfi_undefined rip");*/ #endif #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" func ((void *)arg); #pragma GCC diagnostic pop #if __GCC_HAVE_DWARF2_CFI_ASM && __amd64 /*asm (".cfi_endproc");*/ #endif /* the new coro returned. bad. just abort() for now */ abort (); } # ifdef CORO_SJLJ static volatile int trampoline_done; /* trampoline signal handler */ static void trampoline (int sig) { if (coro_setjmp (new_coro->env)) coro_init (); /* start it */ else trampoline_done = 1; } # endif # if CORO_ASM #if (defined __arm__) && \ (defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ \ || __ARM_ARCH == 7) #define CORO_ARM 1 #endif #if defined (_WIN32) || defined (__CYGWIN__) #define CORO_WIN_TIB 1 #endif asm ( "\t.text\n" #if defined (_WIN32) || defined (__CYGWIN__) "\t.globl _coro_transfer\n" "_coro_transfer:\n" #else "\t.globl coro_transfer\n" "coro_transfer:\n" #endif /* windows, of course, gives a shit on the amd64 ABI and uses different registers */ /* path_to_url */ #ifdef __amd64 #if defined (_WIN32) || defined (__CYGWIN__) #define NUM_SAVED 29 "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */ "\tmovaps %xmm6, (%rsp)\n" "\tmovaps %xmm7, 16(%rsp)\n" "\tmovaps %xmm8, 32(%rsp)\n" "\tmovaps %xmm9, 48(%rsp)\n" "\tmovaps %xmm10, 64(%rsp)\n" "\tmovaps %xmm11, 80(%rsp)\n" "\tmovaps %xmm12, 96(%rsp)\n" "\tmovaps %xmm13, 112(%rsp)\n" "\tmovaps %xmm14, 128(%rsp)\n" "\tmovaps %xmm15, 144(%rsp)\n" "\tpushq %rsi\n" "\tpushq %rdi\n" "\tpushq %rbp\n" "\tpushq %rbx\n" "\tpushq %r12\n" "\tpushq %r13\n" "\tpushq %r14\n" "\tpushq %r15\n" #if CORO_WIN_TIB "\tpushq %fs:0x0\n" "\tpushq %fs:0x8\n" "\tpushq %fs:0xc\n" #endif "\tmovq %rsp, (%rcx)\n" "\tmovq (%rdx), %rsp\n" #if CORO_WIN_TIB "\tpopq %fs:0xc\n" "\tpopq %fs:0x8\n" "\tpopq %fs:0x0\n" #endif "\tpopq %r15\n" "\tpopq %r14\n" "\tpopq %r13\n" "\tpopq %r12\n" "\tpopq %rbx\n" "\tpopq %rbp\n" "\tpopq %rdi\n" "\tpopq %rsi\n" "\tmovaps (%rsp), %xmm6\n" "\tmovaps 16(%rsp), %xmm7\n" "\tmovaps 32(%rsp), %xmm8\n" "\tmovaps 48(%rsp), %xmm9\n" "\tmovaps 64(%rsp), %xmm10\n" "\tmovaps 80(%rsp), %xmm11\n" "\tmovaps 96(%rsp), %xmm12\n" "\tmovaps 112(%rsp), %xmm13\n" "\tmovaps 128(%rsp), %xmm14\n" "\tmovaps 144(%rsp), %xmm15\n" "\taddq $168, %rsp\n" #else #define NUM_SAVED 6 "\tpushq %rbp\n" "\tpushq %rbx\n" "\tpushq %r12\n" "\tpushq %r13\n" "\tpushq %r14\n" "\tpushq %r15\n" "\tmovq %rsp, (%rdi)\n" "\tmovq (%rsi), %rsp\n" "\tpopq %r15\n" "\tpopq %r14\n" "\tpopq %r13\n" "\tpopq %r12\n" "\tpopq %rbx\n" "\tpopq %rbp\n" #endif "\tpopq %rcx\n" "\tjmpq *%rcx\n" #elif __i386__ #define NUM_SAVED 4 "\tpushl %ebp\n" "\tpushl %ebx\n" "\tpushl %esi\n" "\tpushl %edi\n" #if CORO_WIN_TIB #undef NUM_SAVED #define NUM_SAVED 7 "\tpushl %fs:0\n" "\tpushl %fs:4\n" "\tpushl %fs:8\n" #endif "\tmovl %esp, (%eax)\n" "\tmovl (%edx), %esp\n" #if CORO_WIN_TIB "\tpopl %fs:8\n" "\tpopl %fs:4\n" "\tpopl %fs:0\n" #endif "\tpopl %edi\n" "\tpopl %esi\n" "\tpopl %ebx\n" "\tpopl %ebp\n" "\tpopl %ecx\n" "\tjmpl *%ecx\n" #elif CORO_ARM /* untested, what about thumb, neon, iwmmxt? */ #if __ARM_PCS_VFP "\tvpush {d8-d15}\n" #define NUM_SAVED (9 + 8 * 2) #else #define NUM_SAVED 9 #endif "\tpush {r4-r11,lr}\n" "\tstr sp, [r0]\n" "\tldr sp, [r1]\n" "\tpop {r4-r11,lr}\n" #if __ARM_PCS_VFP "\tvpop {d8-d15}\n" #endif "\tmov r15, lr\n" #elif __mips__ && 0 /* untested, 32 bit only */ #define NUM_SAVED (12 + 8 * 2) /* TODO: n64/o64, lw=>ld */ "\t.set nomips16\n" "\t.frame $sp,112,$31\n" #if __mips_soft_float "\taddiu $sp,$sp,-44\n" #else "\taddiu $sp,$sp,-112\n" "\ts.d $f30,88($sp)\n" "\ts.d $f28,80($sp)\n" "\ts.d $f26,72($sp)\n" "\ts.d $f24,64($sp)\n" "\ts.d $f22,56($sp)\n" "\ts.d $f20,48($sp)\n" #endif "\tsw $28,40($sp)\n" "\tsw $31,36($sp)\n" "\tsw $fp,32($sp)\n" "\tsw $23,28($sp)\n" "\tsw $22,24($sp)\n" "\tsw $21,20($sp)\n" "\tsw $20,16($sp)\n" "\tsw $19,12($sp)\n" "\tsw $18,8($sp)\n" "\tsw $17,4($sp)\n" "\tsw $16,0($sp)\n" "\tsw $sp,0($4)\n" "\tlw $sp,0($5)\n" #if !__mips_soft_float "\tl.d $f30,88($sp)\n" "\tl.d $f28,80($sp)\n" "\tl.d $f26,72($sp)\n" "\tl.d $f24,64($sp)\n" "\tl.d $f22,56($sp)\n" "\tl.d $f20,48($sp)\n" #endif "\tlw $28,40($sp)\n" "\tlw $31,36($sp)\n" "\tlw $fp,32($sp)\n" "\tlw $23,28($sp)\n" "\tlw $22,24($sp)\n" "\tlw $21,20($sp)\n" "\tlw $20,16($sp)\n" "\tlw $19,12($sp)\n" "\tlw $18,8($sp)\n" "\tlw $17,4($sp)\n" "\tlw $16,0($sp)\n" "\tj $31\n" #if __mips_soft_float "\taddiu $sp,$sp,44\n" #else "\taddiu $sp,$sp,112\n" #endif #else #error unsupported architecture #endif ); # endif void coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) { coro_context nctx; # ifdef CORO_SJLJ stack_t ostk, nstk; struct sigaction osa, nsa; sigset_t nsig, osig; # endif if (!coro) return; coro_init_func = coro; coro_init_arg = arg; new_coro = ctx; create_coro = &nctx; # ifdef CORO_SJLJ /* we use SIGUSR2. first block it, then fiddle with it. */ sigemptyset (&nsig); sigaddset (&nsig, SIGUSR2); sigprocmask (SIG_BLOCK, &nsig, &osig); nsa.sa_handler = trampoline; sigemptyset (&nsa.sa_mask); nsa.sa_flags = SA_ONSTACK; if (sigaction (SIGUSR2, &nsa, &osa)) { perror ("sigaction"); abort (); } /* set the new stack */ nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */ nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize); nstk.ss_flags = 0; if (sigaltstack (&nstk, &ostk) < 0) { perror ("sigaltstack"); abort (); } trampoline_done = 0; kill (getpid (), SIGUSR2); sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); while (!trampoline_done) sigsuspend (&nsig); sigaltstack (0, &nstk); nstk.ss_flags = SS_DISABLE; if (sigaltstack (&nstk, 0) < 0) perror ("sigaltstack"); sigaltstack (0, &nstk); if (~nstk.ss_flags & SS_DISABLE) abort (); if (~ostk.ss_flags & SS_DISABLE) sigaltstack (&ostk, 0); sigaction (SIGUSR2, &osa, 0); sigprocmask (SIG_SETMASK, &osig, 0); # elif defined (CORO_LOSER) coro_setjmp (ctx->env); #if __CYGWIN__ && __i386__ ctx->env[8] = (long) coro_init; ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); #elif __CYGWIN__ && __x86_64__ ctx->env[7] = (long) coro_init; ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); #elif defined __MINGW32__ ctx->env[5] = (long) coro_init; ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); #elif defined _M_IX86 ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); #elif defined _M_AMD64 ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); #elif defined _M_IA64 ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); #else #error "microsoft libc or architecture not supported" #endif # elif defined (CORO_LINUX) coro_setjmp (ctx->env); #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long); #elif defined (__GNU_LIBRARY__) && defined (__i386__) ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init; ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); #elif defined (__GNU_LIBRARY__) && defined (__x86_64__) ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); #else #error "linux libc or architecture not supported" #endif # elif defined (CORO_IRIX) coro_setjmp (ctx->env, 0); ctx->env[JB_PC] = (__uint64_t)coro_init; ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); # elif CORO_ASM #if defined (__i386__) || defined (__x86_64__) ctx->sp = (void **)(ssize + (char *)sptr); #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" *--ctx->sp = (void *)abort; /* needed for alignment only */ #pragma GCC diagnostic pop *--ctx->sp = (void *)coro_init; #ifdef CORO_WIN_TIB *--ctx->sp = 0; /* ExceptionList */ *--ctx->sp = (char *)sptr + ssize; /* StackBase */ *--ctx->sp = sptr; /* StackLimit */ #endif #elif CORO_ARM /* return address stored in lr register, don't push anything */ #else #error unsupported architecture #endif ctx->sp -= NUM_SAVED; memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED); #if defined (__i386__) || defined (__x86_64__) /* done already */ #elif defined (CORO_ARM) ctx->sp[0] = coro; /* r4 */ ctx->sp[1] = arg; /* r5 */ ctx->sp[8] = (char *)coro_init; /* lr */ #else #error unsupported architecture #endif # elif CORO_UCONTEXT getcontext (&(ctx->uc)); ctx->uc.uc_link = 0; ctx->uc.uc_stack.ss_sp = sptr; ctx->uc.uc_stack.ss_size = (size_t)ssize; ctx->uc.uc_stack.ss_flags = 0; makecontext (&(ctx->uc), (void (*)())coro_init, 0); # endif coro_transfer (create_coro, new_coro); } /*****************************************************************************/ /* pthread backend */ /*****************************************************************************/ #elif CORO_PTHREAD /* this mutex will be locked by the running coroutine */ pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; struct coro_init_args { coro_func func; void *arg; coro_context *self, *main; }; static void * coro_init (void *args_) { struct coro_init_args *args = (struct coro_init_args *)args_; coro_func func = args->func; void *arg = args->arg; coro_transfer (args->self, args->main); func (arg); return 0; } void coro_transfer (coro_context *prev, coro_context *next) { pthread_mutex_lock (&coro_mutex); next->flags = 1; pthread_cond_signal (&next->cv); prev->flags = 0; while (!prev->flags) pthread_cond_wait (&prev->cv, &coro_mutex); if (prev->flags == 2) { pthread_mutex_unlock (&coro_mutex); pthread_cond_destroy (&prev->cv); pthread_detach (pthread_self ()); pthread_exit (0); } pthread_mutex_unlock (&coro_mutex); } void coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) { static coro_context nctx; static int once; if (!once) { once = 1; pthread_cond_init (&nctx.cv, 0); } pthread_cond_init (&ctx->cv, 0); if (coro) { pthread_attr_t attr; struct coro_init_args args; pthread_t id; args.func = coro; args.arg = arg; args.self = ctx; args.main = &nctx; pthread_attr_init (&attr); #if __UCLIBC__ /* exists, but is borked */ /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/ #elif __CYGWIN__ /* POSIX, not here */ pthread_attr_setstacksize (&attr, (size_t)ssize); #else pthread_attr_setstack (&attr, sptr, (size_t)ssize); #endif pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); pthread_create (&id, &attr, coro_init, &args); coro_transfer (args.main, args.self); } } void coro_destroy (coro_context *ctx) { pthread_mutex_lock (&coro_mutex); ctx->flags = 2; pthread_cond_signal (&ctx->cv); pthread_mutex_unlock (&coro_mutex); } /*****************************************************************************/ /* fiber backend */ /*****************************************************************************/ #elif CORO_FIBER #define WIN32_LEAN_AND_MEAN #if _WIN32_WINNT < 0x0400 #undef _WIN32_WINNT #define _WIN32_WINNT 0x0400 #endif #include <windows.h> VOID CALLBACK coro_init (PVOID arg) { coro_context *ctx = (coro_context *)arg; ctx->coro (ctx->arg); } void coro_transfer (coro_context *prev, coro_context *next) { if (!prev->fiber) { prev->fiber = GetCurrentFiber (); if (prev->fiber == 0 || prev->fiber == (void *)0x1e00) prev->fiber = ConvertThreadToFiber (0); } SwitchToFiber (next->fiber); } void coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) { ctx->fiber = 0; ctx->coro = coro; ctx->arg = arg; if (!coro) return; ctx->fiber = CreateFiber (ssize, coro_init, ctx); } void coro_destroy (coro_context *ctx) { DeleteFiber (ctx->fiber); } #else #error unsupported backend #endif /*****************************************************************************/ /* stack management */ /*****************************************************************************/ #if CORO_STACKALLOC #include <stdlib.h> #ifndef _WIN32 # include <unistd.h> #endif #ifdef CORO_USE_VALGRIND # include <valgrind/valgrind.h> #endif #ifdef _POSIX_MAPPED_FILES # include <sys/mman.h> # define CORO_MMAP 1 # ifndef MAP_ANONYMOUS # ifdef MAP_ANON # define MAP_ANONYMOUS MAP_ANON # else # undef CORO_MMAP # endif # endif # include <limits.h> #else # undef CORO_MMAP #endif #if _POSIX_MEMORY_PROTECTION # ifndef CORO_GUARDPAGES # define CORO_GUARDPAGES 4 # endif #else # undef CORO_GUARDPAGES #endif #if !CORO_MMAP # undef CORO_GUARDPAGES #endif #if !defined (__i386__) && !defined (__x86_64__) && !defined (__powerpc__) && !defined (__arm__) && !defined (__aarch64__) && !defined (__m68k__) && !defined (__alpha__) && !defined (__mips__) && !defined (__sparc64__) # undef CORO_GUARDPAGES #endif #ifndef CORO_GUARDPAGES # define CORO_GUARDPAGES 0 #endif #ifndef PAGESIZE #if !CORO_MMAP #define PAGESIZE 4096 #else static size_t coro_pagesize (void) { static size_t pagesize; if (!pagesize) pagesize = sysconf (_SC_PAGESIZE); return pagesize; } #define PAGESIZE coro_pagesize () #endif #endif int coro_stack_alloc (struct coro_stack *stack, unsigned int size) { if (!size) size = 256 * 1024; stack->sptr = 0; stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE; #ifdef CORO_FIBER stack->sptr = (void *)stack; return 1; #else size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE; void *base; #if CORO_MMAP /* mmap supposedly does allocate-on-write for us */ base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (base == (void *)-1) { /* some systems don't let us have executable heap */ /* we assume they won't need executable stack in that case */ base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (base == (void *)-1) return 0; } #if CORO_GUARDPAGES mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE); #endif base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE); #else base = malloc (ssze); if (!base) return 0; #endif #ifdef CORO_USE_VALGRIND stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE); #endif stack->sptr = base; return 1; #endif } void coro_stack_free (struct coro_stack *stack) { #ifdef CORO_FIBER /* nop */ #else #ifdef CORO_USE_VALGRIND VALGRIND_STACK_DEREGISTER (stack->valgrind_id); #endif #if CORO_MMAP if (stack->sptr) munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE), stack->ssze + CORO_GUARDPAGES * PAGESIZE); #else free (stack->sptr); #endif #endif } #endif ```
/content/code_sandbox/lib/coro.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
6,880
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdbool.h> #include <string.h> #include <math.h> #include <rte_cycles.h> #include <rte_lcore.h> #include <rte_memory.h> #include "gatekeeper_main.h" #include "gatekeeper_log_ratelimit.h" RTE_DEFINE_PER_LCORE(struct log_thread_time, _log_thread_time); struct log_ratelimit_state log_ratelimit_states[RTE_MAX_LCORE]; bool log_ratelimit_enabled; void log_ratelimit_enable(void) { log_ratelimit_enabled = true; } bool check_log_allowed(uint32_t level) { struct log_ratelimit_state *lrs = &log_ratelimit_states[rte_lcore_id()]; return level <= (uint32_t)rte_atomic32_read(&lrs->log_level); } #define NO_TIME_STR "NO TIME" static void update_str_date_time(uint64_t now) { struct log_thread_time *ttime = &RTE_PER_LCORE(_log_thread_time); struct timespec tp; struct tm *p_tm, time_info; uint64_t diff_ns; int ret; RTE_BUILD_BUG_ON(sizeof(NO_TIME_STR) > sizeof(ttime->str_date_time)); if (likely(now < ttime->update_time_at)) { /* Fast path, that is, high log rate. */ return; } ret = clock_gettime(CLOCK_REALTIME, &tp); if (unlikely(ret < 0)) { /* Things are bad; fail safe. */ goto no_tp; } /* @tp is available from now on. */ p_tm = localtime_r(&tp.tv_sec, &time_info); if (unlikely(p_tm != &time_info)) goto no_time; ret = strftime(ttime->str_date_time, sizeof(ttime->str_date_time), "%Y-%m-%d %H:%M:%S", &time_info); if (unlikely(ret == 0)) goto no_time; goto next_update; no_tp: tp.tv_nsec = 0; no_time: strcpy(ttime->str_date_time, NO_TIME_STR); next_update: diff_ns = likely(tp.tv_nsec >= 0 && tp.tv_nsec < ONE_SEC_IN_NANO_SEC) ? (ONE_SEC_IN_NANO_SEC - tp.tv_nsec) : ONE_SEC_IN_NANO_SEC; /* C library bug! */ ttime->update_time_at = now + (typeof(now))round(diff_ns * cycles_per_ns); } static void log_ratelimit_reset(struct log_ratelimit_state *lrs, uint64_t now) { lrs->printed = 0; if (lrs->suppressed > 0) { update_str_date_time(now); rte_log(RTE_LOG_NOTICE, BLOCK_LOGTYPE, G_LOG_PREFIX "%u log entries were suppressed during the last ratelimit interval\n", lrs->block_name, rte_lcore_id(), RTE_PER_LCORE(_log_thread_time).str_date_time, "NOTICE", lrs->suppressed); } lrs->suppressed = 0; lrs->end = now + lrs->interval_cycles; } void log_ratelimit_state_init(unsigned int lcore_id, uint32_t interval, uint32_t burst, uint32_t log_level, const char *block_name) { struct log_ratelimit_state *lrs; RTE_VERIFY(lcore_id < RTE_MAX_LCORE); lrs = &log_ratelimit_states[lcore_id]; RTE_VERIFY(strlen(block_name) < sizeof(lrs->block_name)); lrs->interval_cycles = interval * cycles_per_ms; lrs->burst = burst; lrs->suppressed = 0; rte_atomic32_set(&lrs->log_level, log_level); strcpy(lrs->block_name, block_name); log_ratelimit_reset(lrs, rte_rdtsc()); } /* * Rate limiting log entries. * * Returns: * - true means go ahead and do it. * - false means callbacks will be suppressed. */ static bool log_ratelimit_allow(struct log_ratelimit_state *lrs, uint64_t now) { /* unlikely() reason: all logs are rate-limited in production. */ if (unlikely(lrs->interval_cycles == 0)) return true; /* * unlikely() reason: there is only one * reset every @lrs->interval_cycles. */ if (unlikely(lrs->end < now)) log_ratelimit_reset(lrs, now); if (lrs->burst > lrs->printed) { lrs->printed++; return true; } lrs->suppressed++; return false; } int gatekeeper_log_ratelimit(uint32_t level, uint32_t logtype, const char *format, ...) { uint64_t now = rte_rdtsc(); /* Freeze current time. */ struct log_ratelimit_state *lrs = &log_ratelimit_states[rte_lcore_id()]; va_list ap; int ret; /* * unlikely() reason: @log_ratelimit_enabled is only false during * startup. */ if (unlikely(!log_ratelimit_enabled)) goto log; if (level <= (uint32_t)rte_atomic32_read(&lrs->log_level) && log_ratelimit_allow(lrs, now)) goto log; return 0; log: update_str_date_time(now); va_start(ap, format); ret = rte_vlog(level, logtype, format, ap); va_end(ap); return ret; } int gatekeeper_log_main(uint32_t level, uint32_t logtype, const char *format, ...) { va_list ap; int ret; update_str_date_time(rte_rdtsc()); va_start(ap, format); ret = rte_vlog(level, logtype, format, ap); va_end(ap); return ret; } int set_log_level_per_block(const char *block_name, uint32_t log_level) { int n = 0; for (int i = 0; i < RTE_MAX_LCORE; i++) { if(strcmp(log_ratelimit_states[i].block_name, block_name) == 0) { rte_atomic32_set(&log_ratelimit_states[i].log_level, log_level); n++; } } return n; } int set_log_level_per_lcore(unsigned int lcore_id, uint32_t log_level) { if (lcore_id >= RTE_MAX_LCORE) { return -1; } rte_atomic32_set(&log_ratelimit_states[lcore_id].log_level, log_level); return 0; } ```
/content/code_sandbox/lib/log_ratelimit.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,498
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdbool.h> #include <unistd.h> #include <sys/socket.h> #include <netdb.h> #include <arpa/inet.h> #include <sys/random.h> #include <sys/types.h> #include <pwd.h> #include <grp.h> #include <net/if.h> #include <rte_mbuf.h> #include <rte_thash.h> #include <rte_errno.h> #include <rte_ethdev.h> #include <rte_eth_bond.h> #include <rte_malloc.h> #include <rte_pmd_i40e.h> #include "gatekeeper_acl.h" #include "gatekeeper_main.h" #include "gatekeeper_net.h" #include "gatekeeper_config.h" #include "gatekeeper_launch.h" static struct net_config config; /* * Add a filter that steers packets to queues based on their EtherType. * * Tecnically, the DPDK rte_flow API allows filters to be specified * on any field in an Ethernet header, but in practice, drivers implement * the RTE_FLOW_ITEM_TYPE_ETH using the EtherType filters available * in hardware. Typically, EtherType filters only support destination * MAC addresses and the EtherType field. We choose to only allow * the EtherType field to be specified, since the destination MAC * address may be extraneous anyway (#74). * * @ether_type should be passed in host ordering, but is converted * to big endian ordering before being added as a filter, as * required by the rte_flow API. Individual device drivers can then * convert it to whatever endianness is needed. */ int ethertype_flow_add(struct gatekeeper_if *iface, uint16_t ether_type, uint16_t queue_id) { struct rte_flow_attr attr = { .ingress = 1 }; struct rte_flow_action_queue queue = { .index = queue_id }; struct rte_flow_action action[] = { { .type = RTE_FLOW_ACTION_TYPE_QUEUE, .conf = &queue, }, { .type = RTE_FLOW_ACTION_TYPE_END, } }; struct rte_flow_item_eth eth_spec = { .type = rte_cpu_to_be_16(ether_type), }; struct rte_flow_item_eth eth_mask = { .type = 0xFFFF, }; struct rte_flow_item pattern[] = { { .type = RTE_FLOW_ITEM_TYPE_ETH, .spec = &eth_spec, .mask = &eth_mask, }, { .type = RTE_FLOW_ITEM_TYPE_END, }, }; struct rte_flow_error error; struct rte_flow *flow; int ret; if (!iface->rss) { /* * If RSS is not supported, then data plane packets * could be assigned to RX queues that are serviced * by non-data plane blocks (e.g., LLS). */ G_LOG(NOTICE, "%s(%s): cannot use EtherType filters when RSS is not supported\n", __func__, iface->name); return -1; } ret = rte_flow_validate(iface->id, &attr, pattern, action, &error); if (ret < 0) { /* * A negative errno value was returned * (and also put in rte_errno). */ G_LOG(NOTICE, "%s(%s): cannot validate EtherType=0x%x flow, errno=%i (%s), rte_flow_error_type=%i: %s\n", __func__, iface->name, ether_type, -ret, rte_strerror(-ret), error.type, error.message); return -1; } flow = rte_flow_create(iface->id, &attr, pattern, action, &error); if (flow == NULL) { /* rte_errno is set to a positive errno value. */ G_LOG(ERR, "%s(%s): cannot create EtherType=0x%x flow, errno=%i (%s), rte_flow_error_type=%i: %s\n", __func__, iface->name, ether_type, rte_errno, rte_strerror(rte_errno), error.type, error.message); return -1; } G_LOG(NOTICE, "%s(%s): EtherType=0x%x flow supported\n", __func__, iface->name, ether_type); return 0; } #define STR_NOIP "NO IP" static int ipv4_flow_add(struct gatekeeper_if *iface, rte_be32_t dst_ip_be, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto, uint16_t queue_id) { struct rte_flow_attr attr = { .ingress = 1, }; struct rte_flow_action_queue queue = { .index = queue_id }; struct rte_flow_action action[] = { { .type = RTE_FLOW_ACTION_TYPE_QUEUE, .conf = &queue, }, { .type = RTE_FLOW_ACTION_TYPE_END, } }; struct rte_flow_item_eth eth_spec = { .type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), }; struct rte_flow_item_eth eth_mask = { .type = 0xFFFF, }; struct rte_flow_item_ipv4 ip_spec = { .hdr = { .dst_addr = dst_ip_be, .next_proto_id = proto, } }; struct rte_flow_item_ipv4 ip_mask = { .hdr = { .dst_addr = 0xFFFFFFFF, .next_proto_id = 0xFF, } }; struct rte_flow_item pattern[] = { { .type = RTE_FLOW_ITEM_TYPE_ETH, .spec = &eth_spec, .mask = &eth_mask, }, { .type = RTE_FLOW_ITEM_TYPE_IPV4, .spec = &ip_spec, .mask = &ip_mask, }, { }, { .type = RTE_FLOW_ITEM_TYPE_END, }, }; struct rte_flow *flow; struct rte_flow_item_tcp tcp_spec; struct rte_flow_item_tcp tcp_mask; struct rte_flow_item_udp udp_spec; struct rte_flow_item_udp udp_mask; struct rte_flow_error error; int ret; const char *str_proto = "NO PROTO"; char str_dst_ip[INET_ADDRSTRLEN], str_flow[256]; if (!iface->rss) { /* * IPv4 flows can only be used if supported by the NIC * (to steer matching packets) and if RSS is supported * (to steer non-matching packets elsewhere). */ G_LOG(NOTICE, "%s(%s): cannot use IPv4 flows when RSS is not supported\n", __func__, iface->name); return -1; } if (proto == IPPROTO_TCP) { memset(&tcp_spec, 0, sizeof(tcp_spec)); memset(&tcp_mask, 0, sizeof(tcp_mask)); tcp_spec.hdr.src_port = src_port_be; tcp_mask.hdr.src_port = src_port_mask_be; tcp_spec.hdr.dst_port = dst_port_be; tcp_mask.hdr.dst_port = dst_port_mask_be; pattern[2].type = RTE_FLOW_ITEM_TYPE_TCP; pattern[2].spec = &tcp_spec; pattern[2].mask = &tcp_mask; str_proto = "TCP"; } else if (proto == IPPROTO_UDP) { memset(&udp_spec, 0, sizeof(udp_spec)); memset(&udp_mask, 0, sizeof(udp_mask)); udp_spec.hdr.src_port = src_port_be; udp_mask.hdr.src_port = src_port_mask_be; udp_spec.hdr.dst_port = dst_port_be; udp_mask.hdr.dst_port = dst_port_mask_be; pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP; pattern[2].spec = &udp_spec; pattern[2].mask = &udp_mask; str_proto = "UDP"; } else { G_LOG(ERR, "%s(%s): unexpected L4 protocol %hu for IPv4 flow\n", __func__, iface->name, proto); return -1; } /* Get a human-readable description of the flow. */ if (unlikely(inet_ntop(AF_INET, &dst_ip_be, str_dst_ip, sizeof(str_dst_ip)) == NULL)) { G_LOG(ERR, "%s(%s): inet_ntop() failed, errno=%i: %s\n", __func__, iface->name, errno, strerror(errno)); RTE_BUILD_BUG_ON(sizeof(STR_NOIP) > sizeof(str_dst_ip)); strcpy(str_dst_ip, STR_NOIP); } ret = snprintf(str_flow, sizeof(str_flow), "DstIP=%s %s SrcPort=%i/0x%x DstPort=%i/0x%x", str_dst_ip, str_proto, rte_be_to_cpu_16(src_port_be), rte_be_to_cpu_16(src_port_mask_be), rte_be_to_cpu_16(dst_port_be), rte_be_to_cpu_16(dst_port_mask_be)); RTE_VERIFY(ret > 0 && ret < (int)sizeof(str_flow)); ret = rte_flow_validate(iface->id, &attr, pattern, action, &error); if (ret < 0) { /* * A negative errno value was returned * (and also put in rte_errno). */ G_LOG(NOTICE, "%s(%s, %s): cannot validate IPv4 flow, errno=%i (%s), rte_flow_error_type=%i: %s\n", __func__, iface->name, str_flow, -ret, rte_strerror(-ret), error.type, error.message); return -1; } flow = rte_flow_create(iface->id, &attr, pattern, action, &error); if (flow == NULL) { /* rte_errno is set to a positive errno value. */ G_LOG(ERR, "%s(%s, %s): cannot create IPv4 flow, errno=%i (%s), rte_flow_error_type=%i: %s\n", __func__, iface->name, str_flow, rte_errno, rte_strerror(rte_errno), error.type, error.message); return -1; } G_LOG(NOTICE, "%s(%s, %s): IPv4 flow supported\n", __func__, iface->name, str_flow); return 0; } static void ipv4_fill_acl_rule(struct ipv4_acl_rule *rule, rte_be32_t dst_ip_be, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto) { rule->data.category_mask = 0x1; rule->data.priority = 1; /* Userdata is filled in in register_ipv4_acl(). */ rule->field[PROTO_FIELD_IPV4].value.u8 = proto; rule->field[PROTO_FIELD_IPV4].mask_range.u8 = 0xFF; rule->field[DST_FIELD_IPV4].value.u32 = rte_be_to_cpu_32(dst_ip_be); rule->field[DST_FIELD_IPV4].mask_range.u32 = 32; rule->field[SRCP_FIELD_IPV4].value.u16 = rte_be_to_cpu_16(src_port_be); rule->field[SRCP_FIELD_IPV4].mask_range.u16 = rte_be_to_cpu_16(src_port_mask_be); rule->field[DSTP_FIELD_IPV4].value.u16 = rte_be_to_cpu_16(dst_port_be); rule->field[DSTP_FIELD_IPV4].mask_range.u16 = rte_be_to_cpu_16(dst_port_mask_be); } int ipv4_pkt_filter_add(struct gatekeeper_if *iface, rte_be32_t dst_ip_be, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto, uint16_t queue_id, acl_cb_func cb_f, ext_cb_func ext_cb_f, uint8_t *rx_method) { struct ipv4_acl_rule ipv4_rule = { }; int ret; if (proto == IPPROTO_TCP || proto == IPPROTO_UDP) { ret = ipv4_flow_add(iface, dst_ip_be, src_port_be, src_port_mask_be, dst_port_be, dst_port_mask_be, proto, queue_id); if (ret < 0) { G_LOG(NOTICE, "Cannot register IPv4 flow on the %s interface; falling back to software filters\n", iface->name); goto acl; } *rx_method |= RX_METHOD_NIC; return 0; } acl: if (!ipv4_acl_enabled(iface)) { ret = init_ipv4_acls(iface); if (ret < 0) return ret; } ipv4_fill_acl_rule(&ipv4_rule, dst_ip_be, src_port_be, src_port_mask_be, dst_port_be, dst_port_mask_be, proto); ret = register_ipv4_acl(&ipv4_rule, cb_f, ext_cb_f, iface); if (ret < 0) { G_LOG(ERR, "Cannot register IPv4 ACL on the %s interface\n", iface->name); return ret; } *rx_method |= RX_METHOD_MB; return 0; } static void ipv6_fill_acl_rule(struct ipv6_acl_rule *rule, const rte_be32_t *dst_ip_be_ptr32, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto) { int i; rule->data.category_mask = 0x1; rule->data.priority = 1; /* Userdata is filled in in register_ipv6_acl(). */ rule->field[PROTO_FIELD_IPV6].value.u8 = proto; rule->field[PROTO_FIELD_IPV6].mask_range.u8 = 0xFF; for (i = DST1_FIELD_IPV6; i <= DST4_FIELD_IPV6; i++) { rule->field[i].value.u32 = rte_be_to_cpu_32(*dst_ip_be_ptr32); rule->field[i].mask_range.u32 = 32; dst_ip_be_ptr32++; } rule->field[SRCP_FIELD_IPV6].value.u16 = rte_be_to_cpu_16(src_port_be); rule->field[SRCP_FIELD_IPV6].mask_range.u16 = rte_be_to_cpu_16(src_port_mask_be); rule->field[DSTP_FIELD_IPV6].value.u16 = rte_be_to_cpu_16(dst_port_be); rule->field[DSTP_FIELD_IPV6].mask_range.u16 = rte_be_to_cpu_16(dst_port_mask_be); } int ipv6_pkt_filter_add(struct gatekeeper_if *iface, const rte_be32_t *dst_ip_be_ptr32, rte_be16_t src_port_be, rte_be16_t src_port_mask_be, rte_be16_t dst_port_be, rte_be16_t dst_port_mask_be, uint8_t proto, __attribute__((unused)) uint16_t queue_id, acl_cb_func cb_f, ext_cb_func ext_cb_f, uint8_t *rx_method) { /* * XXX #466 The ntuple filter does not consistently * work with IPv6 destination addresses, so we * completely disable its usage and use an ACL instead. */ struct ipv6_acl_rule ipv6_rule = { }; int ret; if (!ipv6_acl_enabled(iface)) { ret = init_ipv6_acls(iface); if (ret < 0) return ret; } ipv6_fill_acl_rule(&ipv6_rule, dst_ip_be_ptr32, src_port_be, src_port_mask_be, dst_port_be, dst_port_mask_be, proto); ret = register_ipv6_acl(&ipv6_rule, cb_f, ext_cb_f, iface); if (ret < 0) { G_LOG(ERR, "Could not register IPv6 ACL on the %s interface\n", iface->name); return ret; } *rx_method |= RX_METHOD_MB; return 0; } static uint32_t find_num_numa_nodes(void) { unsigned int i; uint32_t nb_numa_nodes = 0; RTE_LCORE_FOREACH(i) { uint32_t socket_id = rte_lcore_to_socket_id(i); if (nb_numa_nodes <= socket_id) nb_numa_nodes = socket_id + 1; } return nb_numa_nodes; } static int configure_queue(const struct gatekeeper_if *iface, uint16_t queue_id, enum queue_type ty, struct rte_mempool *mp) { uint16_t port_id = iface->id; /* * The bonding driver (see file * dependencies/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c) passes * rte_eth_dev_socket_id(port_id) for the parameter socket_id * of rte_eth_rx_queue_setup() and rte_eth_tx_queue_setup(). * * If @numa_node is not equal to rte_eth_dev_socket_id(port_id), * the function rte_eth_dma_zone_reserve() will fail when * when the driver of the NIC calls it. * * Although this issue is only raised while using the bonding driver, * it makes sense to have the RX and TX queues on the same * NUMA socket to which the underlying Ethernet device is connected. */ unsigned int numa_node = rte_eth_dev_socket_id(port_id); int ret; switch (ty) { case QUEUE_TYPE_RX: ret = rte_eth_rx_queue_setup(port_id, queue_id, iface->num_rx_desc, numa_node, NULL, mp); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to configure RX queue %u (errno=%i): %s\n", __func__, iface->name, queue_id, -ret, rte_strerror(-ret)); return ret; } break; case QUEUE_TYPE_TX: ret = rte_eth_tx_queue_setup(port_id, queue_id, iface->num_tx_desc, numa_node, NULL); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to configure TX queue %u (errno=%d): %s\n", __func__, iface->name, queue_id, -ret, rte_strerror(-ret)); return ret; } break; default: G_LOG(CRIT, "%s(%s): bug: unsupported queue type (%d)\n", __func__, iface->name, ty); return -EINVAL; } return 0; } static inline int iface_bonded(const struct gatekeeper_if *iface) { return iface->num_ports > 1 || iface->bonding_mode == BONDING_MODE_8023AD; } /* * Get a queue identifier for a given functional block instance (lcore), * using a certain interface for either RX or TX. */ int get_queue_id(struct gatekeeper_if *iface, enum queue_type ty, unsigned int lcore, struct rte_mempool *mp) { int16_t *queues, new_queue_id; int ret; if (unlikely(lcore >= RTE_MAX_LCORE || ty >= QUEUE_TYPE_MAX)) return -EINVAL; queues = (ty == QUEUE_TYPE_RX) ? iface->rx_queues : iface->tx_queues; if (queues[lcore] != GATEKEEPER_QUEUE_UNALLOCATED) goto queue; /* Get next queue identifier. */ new_queue_id = rte_atomic16_add_return(ty == QUEUE_TYPE_RX ? &iface->rx_queue_id : &iface->tx_queue_id, 1); if (unlikely(new_queue_id == GATEKEEPER_QUEUE_UNALLOCATED)) { G_LOG(ERR, "%s(%s): exhausted all %s queues; this is likely a bug\n", __func__, iface->name, (ty == QUEUE_TYPE_RX) ? "RX" : "TX"); return -ENOSPC; } queues[lcore] = new_queue_id; ret = configure_queue(iface, new_queue_id, ty, mp); if (unlikely(ret < 0)) return ret; queue: return queues[lcore]; } /* * Guarantee that rte_eth_dev_callback_register() and * rte_eth_dev_callback_unregister() are called with the exact same parameters. */ static int lsc_event_callback(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param); static inline int register_callback_for_lsc(struct gatekeeper_if *iface, uint16_t port_id) { return rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_INTR_LSC, lsc_event_callback, iface); } static inline int unregister_callback_for_lsc(struct gatekeeper_if *iface, uint16_t port_id) { return rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_LSC, lsc_event_callback, iface); } static void close_iface_ports(struct gatekeeper_if *iface) { uint8_t i; for (i = 0; i < iface->num_ports; i++) { uint16_t port_id = iface->ports[i]; /* * It's safe to unregister a callback that hasn't been * registered before. */ unregister_callback_for_lsc(iface, port_id); rte_eth_dev_close(port_id); } } enum iface_destroy_cmd { /* Destroy only the data allocated by Lua. */ IFACE_DESTROY_LUA, /* Destroy the data associated with initializing the ports. */ IFACE_DESTROY_PORTS, /* Destroy the data initialized by the first phase of net config. */ IFACE_DESTROY_INIT, /* Destroy data associated with running ports (stop them). */ IFACE_DESTROY_STOP, /* Destroy all data for this interface. */ IFACE_DESTROY_ALL, }; static int bonded_if_name(char *port_name, const struct gatekeeper_if *iface) { /* * The name of the bonded device must start with the name of * the bonding driver. Otherwise, DPDK cannot identify * the correct driver. * * The ID of the first port is used instead of the name of * the interface (i.e. iface->name) because IF_NAMESIZE is * small. */ int ret = snprintf(port_name, IF_NAMESIZE, "net_bonding%u", iface->ports[0]); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): snprintf() failed (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); return ret; } if (unlikely(ret >= IF_NAMESIZE)) { G_LOG(ERR, "%s(%s): port name is too long (len=%i)\n", __func__, iface->name, ret); return -ENOSPC; } return 0; } static void destroy_iface(struct gatekeeper_if *iface, enum iface_destroy_cmd cmd) { if (!iface->alive) return; switch (cmd) { case IFACE_DESTROY_ALL: /* Destroy the ACLs for each socket. */ if (ipv6_acl_enabled(iface)) destroy_acls(&iface->ipv6_acls); if (ipv4_acl_enabled(iface)) destroy_acls(&iface->ipv4_acls); /* FALLTHROUGH */ case IFACE_DESTROY_STOP: rte_eth_dev_stop(iface->id); /* FALLTHROUGH */ case IFACE_DESTROY_INIT: /* FALLTHROUGH */ case IFACE_DESTROY_PORTS: /* Stop and close bonded port, if needed. */ if (iface_bonded(iface)) { char if_name[IF_NAMESIZE]; int ret; /* * It's safe to unregister a callback that hasn't been * registered before. */ unregister_callback_for_lsc(iface, iface->id); ret = bonded_if_name(if_name, iface); if (likely(ret == 0)) rte_eth_bond_free(if_name); } /* Close and free interface ports. */ close_iface_ports(iface); rte_free(iface->ports); iface->ports = NULL; /* FALLTHROUGH */ case IFACE_DESTROY_LUA: { /* Free PCI addresses. */ uint8_t i; for (i = 0; i < iface->num_ports; i++) rte_free(iface->pci_addrs[i]); rte_free(iface->pci_addrs); iface->pci_addrs = NULL; /* Free interface name. */ rte_free(iface->name); iface->name = NULL; iface->alive = false; break; } default: rte_panic("Unexpected condition\n"); break; } } int get_ip_type(const char *ip_addr) { int ret; struct addrinfo hint; struct addrinfo *res = NULL; memset(&hint, 0, sizeof(hint)); hint.ai_family = PF_UNSPEC; hint.ai_flags = AI_NUMERICHOST; ret = getaddrinfo(ip_addr, NULL, &hint, &res); if (ret) { G_LOG(ERR, "net: invalid ip address %s; %s\n", ip_addr, gai_strerror(ret)); return AF_UNSPEC; } if (res->ai_family != AF_INET && res->ai_family != AF_INET6) G_LOG(ERR, "net: %s is an is unknown address format %d\n", ip_addr, res->ai_family); ret = res->ai_family; freeaddrinfo(res); return ret; } int convert_str_to_ip(const char *ip_addr, struct ipaddr *res) { int ip_type = get_ip_type(ip_addr); if (ip_type == AF_INET) { if (inet_pton(AF_INET, ip_addr, &res->ip.v4) != 1) return -1; res->proto = RTE_ETHER_TYPE_IPV4; } else if (likely(ip_type == AF_INET6)) { if (inet_pton(AF_INET6, ip_addr, &res->ip.v6) != 1) return -1; res->proto = RTE_ETHER_TYPE_IPV6; } else return -1; return 0; } int convert_ip_to_str(const struct ipaddr *ip_addr, char *res, int n) { if (ip_addr->proto == RTE_ETHER_TYPE_IPV4) { if (unlikely(inet_ntop(AF_INET, &ip_addr->ip.v4, res, n) == NULL)) { G_LOG(ERR, "%s(): failed to convert an IPv4 address to string (errno=%i): %s\n", __func__, errno, strerror(errno)); return -1; } return 0; } if (likely(ip_addr->proto == RTE_ETHER_TYPE_IPV6)) { if (unlikely(inet_ntop(AF_INET6, &ip_addr->ip.v6, res, n) == NULL)) { G_LOG(ERR, "%s(): failed to convert an IPv6 address to string (errno=%i): %s\n", __func__, errno, strerror(errno)); return -1; } return 0; } G_LOG(CRIT, "%s(): unexpected condition: unknown IP type %hu\n", __func__, ip_addr->proto); return -1; } static int check_vlan_tag(const char *iface_name, uint16_t vlan_tag) { if (vlan_tag > RTE_ETHER_MAX_VLAN_ID) { G_LOG(ERR, "net: VLAN ID %d of interface %s is too big; the maximum VLAN ID is %d\n", vlan_tag, iface_name, RTE_ETHER_MAX_VLAN_ID); return -1; } return 0; } int lua_init_iface(struct gatekeeper_if *iface, const char *iface_name, const char **pci_addrs, uint8_t num_pci_addrs, const char **ip_cidrs, uint8_t num_ip_cidrs, uint16_t ipv4_vlan_tag, uint16_t ipv6_vlan_tag) { uint8_t i, j; if (num_ip_cidrs < 1 || num_ip_cidrs > 2) { G_LOG(ERR, "net: an interface has at least 1 IP address, also at most 1 IPv4 and 1 IPv6 address.\n"); return -1; } iface->num_ports = num_pci_addrs; iface->name = rte_malloc("iface_name", strlen(iface_name) + 1, 0); if (iface->name == NULL) { G_LOG(ERR, "net: %s: Out of memory for iface name\n", __func__); return -1; } strcpy(iface->name, iface_name); iface->pci_addrs = rte_calloc("pci_addrs", num_pci_addrs, sizeof(*pci_addrs), 0); if (iface->pci_addrs == NULL) { G_LOG(ERR, "net: %s: Out of memory for PCI array\n", __func__); goto name; } for (i = 0; i < num_pci_addrs; i++) { iface->pci_addrs[i] = rte_malloc(NULL, strlen(pci_addrs[i]) + 1, 0); if (iface->pci_addrs[i] == NULL) { G_LOG(ERR, "net: %s: Out of memory for PCI address %s\n", __func__, pci_addrs[i]); for (j = 0; j < i; j++) rte_free(iface->pci_addrs[j]); rte_free(iface->pci_addrs); iface->pci_addrs = NULL; goto name; } strcpy(iface->pci_addrs[i], pci_addrs[i]); } for (i = 0; i < num_ip_cidrs; i++) { /* Need to make copy to tokenize. */ size_t ip_cidr_len = strlen(ip_cidrs[i]); char ip_cidr_copy[ip_cidr_len + 1]; char *ip_addr; char *saveptr; char *prefix_len_str; char *end; long prefix_len; int gk_type; int max_prefix; strcpy(ip_cidr_copy, ip_cidrs[i]); ip_addr = strtok_r(ip_cidr_copy, "/", &saveptr); if (ip_addr == NULL) goto pci_addrs; gk_type = get_ip_type(ip_addr); if (gk_type == AF_INET && inet_pton(AF_INET, ip_addr, &iface->ip4_addr) == 1) { iface->configured_proto |= CONFIGURED_IPV4; } else if (gk_type == AF_INET6 && inet_pton(AF_INET6, ip_addr, &iface->ip6_addr) == 1) { /* * According to RFC 6164, addresses with all zeros * in the rightmost 64 bits SHOULD NOT be assigned as * unicast addresses; addresses in which the rightmost * 64 bits are assigned the highest 128 values * (i.e., ffff:ffff:ffff:ff7f to ffff:ffff:ffff:ffff) * SHOULD NOT be used as unicast addresses. */ uint64_t addr2 = rte_be_to_cpu_64(((rte_be64_t *)iface->ip6_addr.s6_addr)[1]); if (addr2 == 0 || addr2 >= 0xffffffffffffff7f) { G_LOG(ERR, "net: the rightmost 64 bits of the IP address %016" PRIx64 " SHOULD NOT be assigned to the interface\n", addr2); goto pci_addrs; } iface->configured_proto |= CONFIGURED_IPV6; } else goto pci_addrs; prefix_len_str = strtok_r(NULL, "\0", &saveptr); if (prefix_len_str == NULL) goto pci_addrs; prefix_len = strtol(prefix_len_str, &end, 10); if (prefix_len_str == end || !*prefix_len_str || *end) { G_LOG(ERR, "net: prefix length \"%s\" is not a number\n", prefix_len_str); goto pci_addrs; } if ((prefix_len == LONG_MAX || prefix_len == LONG_MIN) && errno == ERANGE) { G_LOG(ERR, "net: prefix length \"%s\" caused underflow or overflow\n", prefix_len_str); goto pci_addrs; } max_prefix = max_prefix_len(gk_type) - 1; if (prefix_len < 0 || prefix_len > max_prefix) { G_LOG(ERR, "net: invalid prefix length \"%s\" on %s; must be in range [0, %d] to provide enough addresses for a valid deployment\n", prefix_len_str, ip_addr, max_prefix); goto pci_addrs; } if (gk_type == AF_INET) { ip4_prefix_mask(prefix_len, &iface->ip4_mask); iface->ip4_addr_plen = prefix_len; } else if (gk_type == AF_INET6) { ip6_prefix_mask(prefix_len, &iface->ip6_mask); iface->ip6_addr_plen = prefix_len; } } iface->l2_len_out = sizeof(struct rte_ether_hdr); if (iface->vlan_insert) { if (check_vlan_tag(iface_name, ipv4_vlan_tag) != 0 || check_vlan_tag(iface_name, ipv6_vlan_tag) != 0) goto pci_addrs; iface->ipv4_vlan_tag_be = rte_cpu_to_be_16(ipv4_vlan_tag); iface->ipv6_vlan_tag_be = rte_cpu_to_be_16(ipv6_vlan_tag); iface->l2_len_out += sizeof(struct rte_vlan_hdr); } return 0; pci_addrs: for (i = 0; i < num_pci_addrs; i++) rte_free(iface->pci_addrs[i]); rte_free(iface->pci_addrs); iface->pci_addrs = NULL; name: rte_free(iface->name); iface->name = NULL; return -1; } struct net_config * get_net_conf(void) { return &config; } struct gatekeeper_if * get_if_front(struct net_config *net_conf) { return &net_conf->front; } struct gatekeeper_if * get_if_back(struct net_config *net_conf) { return net_conf->back_iface_enabled ? &net_conf->back : NULL; } static int i40e_clear_inset_field(struct rte_pmd_i40e_inset *inset, uint8_t field_idx) { int ret = rte_pmd_i40e_inset_field_clear(&inset->inset, field_idx); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): cannot clear field %i\n", __func__, field_idx); } return ret; } static int i40e_disable_ports_from_inset(uint16_t port_id, uint8_t pctype_id) { struct rte_pmd_i40e_inset inset; /* Obtain the current RSS hash inset for @pctype_id. */ int ret = rte_pmd_i40e_inset_get(port_id, pctype_id, &inset, INSET_HASH); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(port_id=%i, pctype=%i): cannot get inset (errno=%i): %s\n", __func__, port_id, pctype_id, -ret, rte_strerror(-ret)); return ret; } /* * Remove source port (i.e. first two bytes) of @pctype_id. * * Field index obtained at "Table 7-12. Default field vector table" * of "Intel Ethernet Controller X710/XXV710/XL710 Datasheet". * * The field index below works for @pctype_id equals to TCP or UDP. */ ret = i40e_clear_inset_field(&inset, 29); if (unlikely(ret < 0)) return ret; /* * Remove destination port (i.e. third and forth bytes) of * @pctype_id. */ ret = i40e_clear_inset_field(&inset, 30); if (unlikely(ret < 0)) return ret; ret = rte_pmd_i40e_inset_set(port_id, pctype_id, &inset, INSET_HASH); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(port_id=%i, pctype=%i): cannot set inset (errno=%i): %s\n", __func__, port_id, pctype_id, -ret, rte_strerror(-ret)); } return ret; } static int i40e_disable_pctypes_ports_from_inset(uint16_t port_id, uint8_t *pctypes, uint8_t n) { int i; for (i = 0; i < n; i++) { int ret = i40e_disable_ports_from_inset(port_id, pctypes[i]); if (unlikely(ret < 0)) return ret; } return 0; } static int i40e_disable_ipv4_tcp_udp_ports_from_inset(uint16_t port_id) { /* * PCTYPES obtained at "Table 7-5. Packet classifier types and * its input sets" of "Intel Ethernet Controller X710/XXV710/XL710 * Datasheet". */ uint8_t pctypes[] = { 31, /* Non-fragmented IPv4, UDP. */ 33, /* Non-fragmented IPv4, TCP. */ }; return i40e_disable_pctypes_ports_from_inset(port_id, pctypes, RTE_DIM(pctypes)); } static int i40e_disable_ipv6_tcp_udp_ports_from_inset(uint16_t port_id) { /* * PCTYPES obtained at "Table 7-5. Packet classifier types and * its input sets" of "Intel Ethernet Controller X710/XXV710/XL710 * Datasheet". */ uint8_t pctypes[] = { 41, /* Non-fragmented IPv6, UDP. */ 43, /* Non-fragmented IPv6, TCP. */ }; return i40e_disable_pctypes_ports_from_inset(port_id, pctypes, RTE_DIM(pctypes)); } static int randomize_rss_key(struct gatekeeper_if *iface) { uint16_t final_set_count; unsigned int flags = iface->guarantee_random_entropy ? GRND_RANDOM : 0; /* * To validate if the key generated is reasonable, the * number of bits set to 1 in the key must be greater than * 10% and less than 90% of the total bits in the key. * min_num_set_bits and max_num_set_bits represent the lower * and upper bound for the key. */ const uint16_t min_num_set_bits = iface->rss_key_len * 8 * 0.1; const uint16_t max_num_set_bits = iface->rss_key_len * 8 * 0.9; do { int number_of_bytes = 0; uint8_t i; /* * When the last parameter of the system call getrandom() * (i.e flags) is zero, getrandom() uses the /dev/urandom pool. */ do { int ret = getrandom(iface->rss_key + number_of_bytes, iface->rss_key_len - number_of_bytes, flags); if (ret < 0) return ret; number_of_bytes += ret; } while (number_of_bytes < iface->rss_key_len); final_set_count = 0; for (i = 0; i < iface->rss_key_len; i++) { final_set_count += __builtin_popcount(iface->rss_key[i]); } } while (final_set_count < min_num_set_bits || final_set_count > max_num_set_bits); return 0; } /* * Split up RTE_ETH_RSS_IP into IPv4-related and IPv6-related hash functions. * For each type of IP being used in Gatekeeper, check the supported * hashes of the device. If none are supported, disable RSS. * If RTE_ETH_RSS_IPV{4,6} is not supported, issue a warning since we expect * this to be a common and critical hash function. Some devices (i40e * and AVF) do not support the RTE_ETH_RSS_IPV{4,6} hashes, but the hashes * they do support may be enough. */ #define GATEKEEPER_IPV4_RSS_HF ( \ RTE_ETH_RSS_IPV4 | \ RTE_ETH_RSS_FRAG_IPV4 | \ RTE_ETH_RSS_NONFRAG_IPV4_OTHER) #define GATEKEEPER_IPV6_RSS_HF ( \ RTE_ETH_RSS_IPV6 | \ RTE_ETH_RSS_FRAG_IPV6 | \ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ RTE_ETH_RSS_IPV6_EX) static int check_if_rss(struct gatekeeper_if *iface, const struct rte_eth_dev_info *dev_info, struct rte_eth_conf *port_conf) { uint8_t rss_hash_key[GATEKEEPER_RSS_MAX_KEY_LEN]; struct rte_eth_rss_conf __rss_conf = { .rss_key = rss_hash_key, .rss_key_len = sizeof(rss_hash_key), }; uint64_t rss_off = dev_info->flow_type_rss_offloads; int ret; RTE_BUILD_BUG_ON((GATEKEEPER_IPV4_RSS_HF | GATEKEEPER_IPV6_RSS_HF) != RTE_ETH_RSS_IP); /* * Set up device RSS. * * Assume all ports support RSS until shown otherwise. * If not, RSS will be disabled and only one queue is used. * * Check each port for the RSS hash functions it supports, * and configure each to use the intersection of supported * hash functions. */ iface->rss = true; port_conf->rx_adv_conf.rss_conf.rss_hf = 0; if (ipv4_if_configured(iface)) { port_conf->rx_adv_conf.rss_conf.rss_hf |= GATEKEEPER_IPV4_RSS_HF; if (iface->alternative_rss_hash) port_conf->rx_adv_conf.rss_conf.rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP; } if (ipv6_if_configured(iface)) { port_conf->rx_adv_conf.rss_conf.rss_hf |= GATEKEEPER_IPV6_RSS_HF; if (iface->alternative_rss_hash) port_conf->rx_adv_conf.rss_conf.rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP; } ret = rte_eth_dev_rss_hash_conf_get(iface->id, &__rss_conf); if (unlikely(ret == -ENOTSUP)) { G_LOG(WARNING, "%s(%s): interface did not return RSS configuration\n", __func__, iface->name); goto disable_rss; } /* Do not use @__rss_conf from now on. See issue #624 for details. */ if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to get RSS hash configuration (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } RTE_VERIFY(ret == 0); /* This interface doesn't support RSS, so disable RSS. */ if (unlikely(rss_off == 0)) { G_LOG(WARNING, "%s(%s): interface does not support RSS\n", __func__, iface->name); goto disable_rss; } /* Does Gatekeeper support the key length of @dev_info? */ if (unlikely(dev_info->hash_key_size < GATEKEEPER_RSS_MIN_KEY_LEN || dev_info->hash_key_size > GATEKEEPER_RSS_MAX_KEY_LEN || dev_info->hash_key_size % 4 != 0)) { G_LOG(WARNING, "%s(%s): interface requires an RSS hash key of %i bytes; Gatekeeper only supports keys of [%i, %i] bytes long that are multiple of 4\n", __func__, iface->name, dev_info->hash_key_size, GATEKEEPER_RSS_MIN_KEY_LEN, GATEKEEPER_RSS_MAX_KEY_LEN); goto disable_rss; } iface->rss_key_len = dev_info->hash_key_size; if (unlikely(iface->alternative_rss_hash && iface_bonded(iface))) { G_LOG(ERR, "%s(%s): the parameter alternative_rss_hash cannot be true when the interface is bonded\n", __func__, iface->name); return -EINVAL; } /* Check IPv4 RSS hashes. */ if (port_conf->rx_adv_conf.rss_conf.rss_hf & GATEKEEPER_IPV4_RSS_HF) { if (unlikely((rss_off & GATEKEEPER_IPV4_RSS_HF) == 0)) { G_LOG(WARNING, "%s(%s): interface does not support any IPv4 RSS hash\n", __func__, iface->name); goto disable_rss; } if (iface->alternative_rss_hash) { ret = i40e_disable_ipv4_tcp_udp_ports_from_inset( iface->id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): i40e_disable_ipv4_tcp_udp_ports_from_inset() failed (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto disable_rss; } } else if (unlikely((rss_off & RTE_ETH_RSS_IPV4) == 0)) { G_LOG(WARNING, "%s(%s): interface does not support the ETH_RSS_IPV4 hash function. The device may not hash packets to the correct queues; you may try the parameter alternative_rss_hash\n", __func__, iface->name); } } /* Check IPv6 RSS hashes. */ if (port_conf->rx_adv_conf.rss_conf.rss_hf & GATEKEEPER_IPV6_RSS_HF) { if (unlikely((rss_off & GATEKEEPER_IPV6_RSS_HF) == 0)) { G_LOG(WARNING, "%s(%s): interface does not support any IPv6 RSS hash\n", __func__, iface->name); goto disable_rss; } if (iface->alternative_rss_hash) { ret = i40e_disable_ipv6_tcp_udp_ports_from_inset( iface->id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): i40e_disable_ipv6_tcp_udp_ports_from_inset() failed (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto disable_rss; } } else if (unlikely((rss_off & RTE_ETH_RSS_IPV6) == 0)) { G_LOG(WARNING, "%s(%s): interface does not support the ETH_RSS_IPV6 hash function. The device may not hash packets to the correct queues; you may try the parameter alternative_rss_hash\n", __func__, iface->name); } } /* * Any missing hash that will cause RSS to definitely fail * or are likely to cause RSS to fail are handled above. * Here, also log if the device doesn't support any of the requested * hashes, including the hashes considered non-essential. */ if ((rss_off & port_conf->rx_adv_conf.rss_conf.rss_hf) != port_conf->rx_adv_conf.rss_conf.rss_hf) { G_LOG(WARNING, "%s(%s): interface only supports RSS hash functions 0x%"PRIx64", but Gatekeeper asks for 0x%"PRIx64"\n", __func__, iface->name, rss_off, port_conf->rx_adv_conf.rss_conf.rss_hf); } port_conf->rx_adv_conf.rss_conf.rss_hf &= rss_off; ret = randomize_rss_key(iface); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to initialize RSS key (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); return ret; } /* Convert RSS key. */ RTE_VERIFY(iface->rss_key_len % 4 == 0); rte_convert_rss_key((uint32_t *)iface->rss_key, (uint32_t *)iface->rss_key_be, iface->rss_key_len); port_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_RSS; port_conf->rx_adv_conf.rss_conf.rss_key = iface->rss_key; port_conf->rx_adv_conf.rss_conf.rss_key_len = iface->rss_key_len; return 0; disable_rss: iface->rss = false; port_conf->rx_adv_conf.rss_conf.rss_hf = 0; iface->num_rx_queues = 1; G_LOG(WARNING, "%s(%s): the interface does not have RSS capabilities; the GK or GT block will receive all packets and send them to the other blocks as needed. Gatekeeper or Grantor should only be run with one lcore dedicated to GK or GT in this mode; restart with only one GK or GT lcore if necessary\n", __func__, iface->name); return 0; } static int check_if_mtu(struct gatekeeper_if *iface, const struct rte_eth_dev_info *dev_info, struct rte_eth_conf *port_conf) { /* * Set up device MTU. * * If greater than the size of the mbufs, then add the * multi-segment buffer flag. */ port_conf->rxmode.mtu = iface->mtu; if (iface->mtu > RTE_MBUF_DEFAULT_BUF_SIZE) port_conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; if (unlikely(dev_info->min_mtu > port_conf->rxmode.mtu)) { G_LOG(ERR, "%s(%s): the minimum MTU %u is larger than the configured MTU %"PRIu32"\n", __func__, iface->name, dev_info->min_mtu, port_conf->rxmode.mtu); return -EINVAL; } if (unlikely(dev_info->max_mtu < port_conf->rxmode.mtu)) { G_LOG(ERR, "%s(%s): the maximum MTU %u is smaller than the configured MTU %"PRIu32"\n", __func__, iface->name, dev_info->max_mtu, port_conf->rxmode.mtu); return -EINVAL; } if (unlikely((port_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) && !(dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))) { G_LOG(NOTICE, "%s(%s): interface does not support offloading multi-segment TX buffers\n", __func__, iface->name); port_conf->txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_MULTI_SEGS; } return 0; } static int check_if_checksums(struct gatekeeper_if *iface, const struct rte_eth_dev_info *dev_info, struct rte_eth_conf *port_conf) { /* * Set up checksumming. * * Gatekeeper and Grantor do IPv4 checksumming in hardware, * if available. * * Grantor also does UDP checksumming in hardware, if available. * * In both cases, we set up the devices to assume that * IPv4 and UDP checksumming are supported unless querying * the device shows otherwise. * * Note that the IPv4 checksum field is only computed over * the IPv4 header and the UDP checksum is computed over an IPv4 * pseudoheader (i.e. not the direct bytes of the IPv4 header). * Therefore, even though offloading checksum computations can cause * checksum fields to be set to 0 or an intermediate value during * processing, the IPv4 and UDP checksum operations do not overlap, * and can be configured as hardware or software independently. */ if (ipv4_if_configured(iface) && iface->ipv4_hw_cksum) port_conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; if (!config.back_iface_enabled && (iface->ipv4_hw_udp_cksum || iface->ipv6_hw_udp_cksum)) port_conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM; if (unlikely((port_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) && !(dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))) { G_LOG(NOTICE, "%s(%s): interface does not support offloading IPv4 checksumming; using software IPv4 checksums\n", __func__, iface->name); port_conf->txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; iface->ipv4_hw_cksum = false; } if (unlikely((port_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) && !(dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { G_LOG(NOTICE, "%s(%s): interface does not support offloading UDP checksumming; using software UDP checksums\n", __func__, iface->name); port_conf->txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_UDP_CKSUM; iface->ipv4_hw_udp_cksum = false; iface->ipv6_hw_udp_cksum = false; } return 0; } static int check_if_interruption(struct gatekeeper_if *iface, const struct rte_eth_dev_info *dev_info, struct rte_eth_conf *port_conf) { RTE_SET_USED(iface); /* * Do not log the fact that an interface does not support the LSC * interruption since this is already logged in monitor_port(). */ port_conf->intr_conf.lsc = !!(*dev_info->dev_flags & RTE_ETH_DEV_INTR_LSC); return 0; } static int check_if_offloads(struct gatekeeper_if *iface, struct rte_eth_conf *port_conf) { struct rte_eth_dev_info dev_info; int ret = rte_eth_dev_info_get(iface->id, &dev_info); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot obtain interface information (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } ret = check_if_rss(iface, &dev_info, port_conf); if (unlikely(ret < 0)) return ret; ret = check_if_mtu(iface, &dev_info, port_conf); if (unlikely(ret < 0)) return ret; ret = check_if_checksums(iface, &dev_info, port_conf); if (unlikely(ret < 0)) return ret; ret = check_if_interruption(iface, &dev_info, port_conf); if (unlikely(ret < 0)) return ret; return 0; } int gatekeeper_setup_rss(uint16_t port_id, uint16_t *queues, uint16_t num_queues) { int ret = 0; uint32_t i; struct rte_eth_dev_info dev_info; struct rte_eth_rss_reta_entry64 reta_conf[GATEKEEPER_RETA_MAX_SIZE]; /* Get RSS redirection table (RETA) information. */ ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret < 0) { G_LOG(ERR, "%s(): cannot obtain information on port %hu (errno=%i): %s\n", __func__, port_id, -ret, rte_strerror(-ret)); goto out; } if (dev_info.reta_size == 0) { G_LOG(ERR, "net: failed to setup RSS at port %hhu (invalid RETA size = 0)\n", port_id); ret = -1; goto out; } if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) { G_LOG(ERR, "net: failed to setup RSS at port %hhu (invalid RETA size = %u)\n", port_id, dev_info.reta_size); ret = -1; goto out; } /* Setup RSS RETA contents. */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < dev_info.reta_size; i++) { uint32_t idx = i / RTE_ETH_RETA_GROUP_SIZE; uint32_t shift = i % RTE_ETH_RETA_GROUP_SIZE; uint32_t queue_idx = i % num_queues; /* Select all fields to set. */ reta_conf[idx].mask = ~0LL; reta_conf[idx].reta[shift] = (uint16_t)queues[queue_idx]; } /* RETA update. */ ret = rte_eth_dev_rss_reta_update(port_id, reta_conf, dev_info.reta_size); if (ret == -ENOTSUP) { G_LOG(ERR, "net: failed to setup RSS at port %hhu hardware doesn't support\n", port_id); ret = -1; goto out; } else if (ret == -EINVAL) { G_LOG(ERR, "net: failed to setup RSS at port %hhu (RETA update with bad redirection table parameter)\n", port_id); ret = -1; goto out; } /* RETA query. */ ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, dev_info.reta_size); if (ret == -ENOTSUP) { G_LOG(ERR, "net: failed to setup RSS at port %hhu hardware doesn't support\n", port_id); ret = -1; } else if (ret == -EINVAL) { G_LOG(ERR, "net: failed to setup RSS at port %hhu (RETA query with bad redirection table parameter)\n", port_id); ret = -1; } out: return ret; } int gatekeeper_get_rss_config(uint16_t port_id, struct gatekeeper_rss_config *rss_conf) { uint16_t i; struct rte_eth_dev_info dev_info; /* Get RSS redirection table (RETA) information. */ int ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret < 0) { G_LOG(ERR, "%s(): cannot obtain information on port %hu (errno=%i): %s\n", __func__, port_id, -ret, rte_strerror(-ret)); goto out; } rss_conf->reta_size = dev_info.reta_size; if (rss_conf->reta_size == 0 || rss_conf->reta_size > RTE_ETH_RSS_RETA_SIZE_512) { G_LOG(ERR, "net: failed to setup RSS at port %hhu (invalid RETA size = %hu)\n", port_id, rss_conf->reta_size); ret = -1; goto out; } for (i = 0; i < dev_info.reta_size; i++) { uint32_t idx = i / RTE_ETH_RETA_GROUP_SIZE; /* Select all fields to query. */ rss_conf->reta_conf[idx].mask = ~0LL; } /* RETA query. */ ret = rte_eth_dev_rss_reta_query(port_id, rss_conf->reta_conf, rss_conf->reta_size); if (ret == -ENOTSUP) { G_LOG(ERR, "net: failed to query RSS configuration at port %hhu hardware doesn't support\n", port_id); ret = -1; } else if (ret == -EINVAL) { G_LOG(ERR, "net: failed to query RSS configuration at port %hhu (RETA query with bad redirection table parameter)\n", port_id); ret = -1; } out: return ret; } int gatekeeper_setup_user(struct net_config *net_conf, const char *user) { struct passwd *pw; if (user == NULL) { net_conf->pw_uid = 0; net_conf->pw_gid = 0; return 0; } if ((pw = getpwnam(user)) == NULL) { G_LOG(ERR, "%s: failed to call getpwnam() for user %s - %s\n", __func__, user, strerror(errno)); return -1; } net_conf->pw_uid = pw->pw_uid; net_conf->pw_gid = pw->pw_gid; return 0; } static int create_bond(struct gatekeeper_if *iface) { char dev_name[IF_NAMESIZE]; unsigned int i; int ret2, ret = bonded_if_name(dev_name, iface); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot name bonded port (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); return ret; } ret = rte_eth_bond_create(dev_name, iface->bonding_mode, rte_eth_dev_socket_id(iface->ports[0])); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to create bonded port (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } iface->id = ret; if (iface->num_ports > 1) { /* * The default balancing policy is BALANCE_XMIT_POLICY_LAYER2; * see bond_alloc() in file * dependencies/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c * This mode does not fit Gatekeeper since the next hops for * Gatekeeper typically are only a couple routers. * * Use BALANCE_XMIT_POLICY_LAYER23 instead of * BALANCE_XMIT_POLICY_LAYER34 to lower the cost per packet. */ ret = rte_eth_bond_xmit_policy_set(iface->id, BALANCE_XMIT_POLICY_LAYER23); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to set transmission policy (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto close_bond; } } if (__lacp_enabled(iface)) { /* * If LACP is enabled, enable multicast addresses. * Otherwise, rx_burst_8023ad() of DPDK's bonding driver * (see rte_eth_bond_pmd.c) is going to discard * multicast Ethernet packets such as ARP and * ND packets. */ ret = rte_eth_allmulticast_enable(iface->id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot enable multicast on bond device (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto close_bond; } } /* Add members to bond. */ for (i = 0; i < iface->num_ports; i++) { ret = rte_eth_bond_member_add(iface->id, iface->ports[i]); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to add member %u (errno=%i): %s\n", __func__, iface->name, iface->ports[i], -ret, rte_strerror(-ret)); goto close_bond; } } if (__lacp_enabled(iface) && iface->num_ports > 1) { /* * XXX #686 Ensure that all members can receive packets * destined to the MAC address of the bond. * * This must come after adding members. Otherwise, * rte_eth_dev_mac_addr_add() unfortunately does nothing. */ struct rte_ether_addr if_macaddr; ret = rte_eth_macaddr_get(iface->id, &if_macaddr); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot get MAC address (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto close_bond; } ret = rte_eth_dev_mac_addr_add(iface->id, &if_macaddr, 0); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot add interface MAC address (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto close_bond; } } return 0; close_bond: ret2 = rte_eth_bond_free(dev_name); if (unlikely(ret2 < 0)) { G_LOG(WARNING, "%s(%s): rte_eth_bond_free() failed (errno=%i): %s\n", __func__, iface->name, -ret2, rte_strerror(-ret2)); } return ret; } #define MAX_LOG_IF_NAME (IF_NAMESIZE + 16) static void log_if_name(char *if_name, size_t len, const struct gatekeeper_if *iface, uint16_t port_id) { if (unlikely(len == 0)) { G_LOG(CRIT, "%s(%s/%u): bug: len == 0\n", __func__, iface->name, port_id); return; } if (iface->id != port_id) { int ret = snprintf(if_name, len, "%s/%u", iface->name, port_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s/%u): snprintf() failed (errno=%i): %s\n", __func__, iface->name, port_id, -ret, strerror(-ret)); /* Fall back to interface name. */ } else if (unlikely((typeof(len))ret >= len)) { G_LOG(CRIT, "%s(%s/%u): bug: len = %lu <= %i\n", __func__, iface->name, port_id, len, ret); /* Fall back to interface name. */ } else { /* Success. */ return; } } strncpy(if_name, iface->name, len); if (unlikely(if_name[len - 1] != '\0')) { G_LOG(CRIT, "%s(%s/%u): bug: len = %lu < strlen(iface->name) = %lu\n", __func__, iface->name, port_id, len, strlen(iface->name)); if_name[len - 1] = '\0'; } } /* * ATTENTION: This function is called in the interrupt host thread, * which is not associated to an lcore, therefore it must call MAIN_LOG() * instead of G_LOG(). */ static void get_str_members(char *str_members, size_t size, uint16_t *members, uint16_t count) { unsigned int i, total = 0; if (unlikely(size <= 0)) return; str_members[0] = '\0'; for (i = 0; i < count; i++) { size_t remainder = size - total; int ret = snprintf(str_members + total, remainder, "%u%s", members[i], i + 1 < count ? ", " : ""); if (unlikely(ret < 0)) { MAIN_LOG(ERR, "%s(): snprintf() failed (errno=%i): %s\n", __func__, errno, strerror(errno)); return; } total += ret; if (unlikely((size_t)ret >= remainder)) { MAIN_LOG(CRIT, "%s(): bug: str_members' size must be more than %u bytes\n", __func__, total + 1 /* Accounting for '\0'. */); str_members[size - 1] = '\0'; return; } } } #define STR_ERROR_MEMBERS "ERROR" /* * ATTENTION: This function is called in the interrupt host thread, * which is not associated to an lcore, therefore it must call MAIN_LOG() * instead of G_LOG(). */ static int lsc_event_callback(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param) { const struct gatekeeper_if *iface = cb_arg; char if_name[MAX_LOG_IF_NAME]; struct rte_eth_link link; char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; int ret; RTE_SET_USED(ret_param); log_if_name(if_name, sizeof(if_name), iface, port_id); if (unlikely(event != RTE_ETH_EVENT_INTR_LSC)) { MAIN_LOG(CRIT, "%s(%s): bug: unexpected event %i\n", __func__, if_name, event); return -EFAULT; } ret = rte_eth_link_get_nowait(port_id, &link); if (unlikely(ret < 0)) { MAIN_LOG(ERR, "%s(%s): cannot get link status (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_link_to_str(link_status_text, sizeof(link_status_text), &link); if (unlikely(ret < 0)) { MAIN_LOG(ERR, "%s(%s): cannot get status string (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return ret; } if (iface_bonded(iface) && iface->id == port_id) { uint16_t members[RTE_MAX_ETHPORTS]; char str_members[16 * RTE_DIM(members)]; ret = rte_eth_bond_active_members_get(iface->id, members, RTE_DIM(members)); if (unlikely(ret < 0)) { RTE_BUILD_BUG_ON(sizeof(STR_ERROR_MEMBERS) > sizeof(str_members)); MAIN_LOG(ERR, "%s(%s): cannot get active members (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); strcpy(str_members, STR_ERROR_MEMBERS); } else { get_str_members(str_members, sizeof(str_members), members, ret); } MAIN_LOG(NOTICE, "%s(%s): active members: %s; %s\n", __func__, if_name, str_members, link_status_text); return 0; } MAIN_LOG(NOTICE, "%s(%s): %s\n", __func__, if_name, link_status_text); if (iface_bonded(iface)) { /* * A member's link changed, but the link of the bond may not * change. Thus, force an "event" for the bonded interface. * * The following call works because this callback is added * _after_ the bonded interface is created, so * the bonded interface receives this event _before_ * this callback and updates its state. */ lsc_event_callback(iface->id, event, cb_arg, NULL); } return 0; } /* * RETURN * false @port_id does not support the LSC interruption. * No monitoring is added. * true @port_id is being monitored from now on. * <0 An errror happened. */ static int monitor_port(struct gatekeeper_if *iface, uint16_t port_id) { char if_name[MAX_LOG_IF_NAME]; struct rte_eth_dev_info dev_info; int ret; log_if_name(if_name, sizeof(if_name), iface, port_id); ret = rte_eth_dev_info_get(port_id, &dev_info); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot obtain interface information (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return ret; } if (unlikely(!(*dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))) { G_LOG(WARNING, "%s(%s): no support for LSC interruption\n", __func__, if_name); return false; } ret = register_callback_for_lsc(iface, port_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot register LSC callback (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return ret; } return true; } static int monitor_links(struct gatekeeper_if *iface) { bool monitoring_all_members = true; unsigned int i; int ret; for (i = 0; i < iface->num_ports; i++) { ret = monitor_port(iface, iface->ports[i]); if (unlikely(ret < 0)) goto error; monitoring_all_members = monitoring_all_members && ret; } /* * There is no need to monitor a bonded interface when all of * its members are already being monitored because each member reports * on the bonded interface. */ if (iface_bonded(iface) && !monitoring_all_members) { ret = monitor_port(iface, iface->id); if (unlikely(ret < 0)) goto error; } return 0; error: while (i > 0) unregister_callback_for_lsc(iface, iface->ports[--i]); return ret; } static int init_iface(struct gatekeeper_if *iface) { struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = RTE_ETH_MQ_RX_NONE, }, /* Other offloads configured below. */ }; unsigned int i; int ret; iface->alive = true; /* Initialize all potential queues on this interface. */ for (i = 0; i < RTE_MAX_LCORE; i++) { iface->rx_queues[i] = GATEKEEPER_QUEUE_UNALLOCATED; iface->tx_queues[i] = GATEKEEPER_QUEUE_UNALLOCATED; } rte_atomic16_set(&iface->rx_queue_id, -1); rte_atomic16_set(&iface->tx_queue_id, -1); iface->ports = rte_calloc("ports", iface->num_ports, sizeof(*iface->ports), 0); if (unlikely(iface->ports == NULL)) { G_LOG(ERR, "%s(%s): out of memory for ports\n", __func__, iface->name); destroy_iface(iface, IFACE_DESTROY_LUA); return -ENOMEM; } /* Initialize all ports on this interface. */ for (i = 0; i < iface->num_ports; i++) { ret = rte_eth_dev_get_port_by_name(iface->pci_addrs[i], &iface->ports[i]); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to map PCI %s to a port (errno=%i): %s\n", __func__, iface->name, iface->pci_addrs[i], -ret, rte_strerror(-ret)); goto free_ports; } } /* Initialize bonded port, if needed. */ if (!iface_bonded(iface)) { RTE_VERIFY(iface->num_ports == 1); iface->id = iface->ports[0]; } else { ret = create_bond(iface); if (unlikely(ret < 0)) goto free_ports; } /* Make sure the interface supports hardware offloads. */ ret = check_if_offloads(iface, &port_conf); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): interface doesn't support a critical hardware capability (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto close_ports; } ret = rte_eth_dev_configure(iface->id, iface->num_rx_queues, iface->num_tx_queues, &port_conf); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to configure interface (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto close_ports; } ret = monitor_links(iface); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot monitor links (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto close_ports; } return 0; close_ports: destroy_iface(iface, IFACE_DESTROY_PORTS); return ret; free_ports: rte_free(iface->ports); iface->ports = NULL; destroy_iface(iface, IFACE_DESTROY_LUA); return ret; } static inline void gen_ipv6_link_local(struct gatekeeper_if *iface) { /* Link-local IPv6 calculation according to RFC 4291. */ struct in6_addr *addr = &iface->ll_ip6_addr; uint64_t *pmask = (uint64_t *)iface->ll_ip6_mask.s6_addr; addr->s6_addr[0] = 0xFE; addr->s6_addr[1] = 0x80; memset(addr->s6_addr + 2, 0, 6); rte_memcpy(addr->s6_addr + 8, iface->eth_addr.addr_bytes, 3); addr->s6_addr[11] = 0xFF; addr->s6_addr[12] = 0xFE; rte_memcpy(addr->s6_addr + 13, iface->eth_addr.addr_bytes + 3, 3); addr->s6_addr[8] ^= 2; pmask[0] = ~0ULL; pmask[1] = 0ULL; } /* * Setup the various IPv6 addresses that represent this host. * Needed whenever IPv6 is configured. * * Note: must be called after the interface's MAC address is * fetched (for the link local address), which can only happen * after the interface is started. */ static void setup_ipv6_addrs(struct gatekeeper_if *iface) { /* * Generate and assign IPv6 solicited-node multicast * address for our global address. */ uint8_t ip6_mc_addr[16] = IPV6_SN_MC_ADDR(iface->ip6_addr.s6_addr); struct rte_ether_addr eth_mc_addr = { .addr_bytes = { 0x33, 0x33, ip6_mc_addr[12], ip6_mc_addr[13], ip6_mc_addr[14], ip6_mc_addr[15], }, }; rte_memcpy(iface->ip6_mc_addr.s6_addr, ip6_mc_addr, sizeof(iface->ip6_mc_addr.s6_addr)); rte_ether_addr_copy(&eth_mc_addr, &iface->eth_mc_addr); /* * Generate a link-local address, and then use it to * generate a solicited-node multicast address for * that link-local address. */ gen_ipv6_link_local(iface); { uint8_t ll_ip6_mc_addr[16] = IPV6_SN_MC_ADDR(iface->ll_ip6_addr.s6_addr); struct rte_ether_addr ll_eth_mc_addr = { .addr_bytes = { 0x33, 0x33, ll_ip6_mc_addr[12], ll_ip6_mc_addr[13], ll_ip6_mc_addr[14], ll_ip6_mc_addr[15], }, }; struct rte_ether_addr mc_addrs[2] = { eth_mc_addr, ll_eth_mc_addr }; rte_memcpy(iface->ll_ip6_mc_addr.s6_addr, ll_ip6_mc_addr, sizeof(iface->ll_ip6_mc_addr.s6_addr)); rte_ether_addr_copy(&ll_eth_mc_addr, &iface->ll_eth_mc_addr); /* Add to list of accepted MAC addresses. */ rte_eth_dev_set_mc_addr_list(iface->id, mc_addrs, 2); } } static int check_if_rss_key_update(const struct gatekeeper_if *iface) { struct rte_eth_dev_info dev_info; uint8_t rss_hash_key[GATEKEEPER_RSS_MAX_KEY_LEN]; struct rte_eth_rss_conf rss_conf = { .rss_key = rss_hash_key, .rss_key_len = sizeof(rss_hash_key), }; int ret; if (!iface->rss) return 0; ret = rte_eth_dev_info_get(iface->id, &dev_info); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot obtain interface information (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_dev_rss_hash_conf_get(iface->id, &rss_conf); switch (ret) { case 0: break; case -ENODEV: G_LOG(WARNING, "%s(%s): failed to get RSS hash configuration: interface identifier is invalid\n", __func__, iface->name); return ret; case -EIO: G_LOG(WARNING, "%s(%s): failed to get RSS hash configuration: device is removed\n", __func__, iface->name); return ret; case -ENOTSUP: G_LOG(WARNING, "%s(%s): failed to get RSS hash configuration: hardware does not support RSS\n", __func__, iface->name); return ret; default: G_LOG(WARNING, "%s(%s): failed to get RSS hash configuration (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } /* * XXX #624 Use @dev_info.hash_key_size instead of * @rss_conf.rss_key_len to avoid a bug in DPDK. */ if (unlikely(dev_info.hash_key_size != iface->rss_key_len || memcmp(rss_conf.rss_key, iface->rss_key, iface->rss_key_len) != 0)) { G_LOG(WARNING, "%s(%s): the obtained RSS hash configuration does not match the expected RSS configuration\n", __func__, iface->name); return -EINVAL; } return 0; } static uint32_t count_macs(const char *if_name, const struct rte_ether_addr *macaddrs, uint32_t max_mac_addrs) { bool ended = false; uint32_t i, count = 0; for (i = 0; i < max_mac_addrs; i++) { if (rte_is_zero_ether_addr(&macaddrs[i])) { ended = true; continue; } if (unlikely(ended)) { G_LOG(ERR, "%s(%s): MAC " RTE_ETHER_ADDR_PRT_FMT " at index %" PRIu32 " comes after the last index; count = %" PRIu32 "\n", __func__, if_name, RTE_ETHER_ADDR_BYTES(&macaddrs[i]), i, count); break; } count++; } return count; } static void report_macs(const char *if_name, uint16_t port_id, uint32_t max_mac_addrs) { struct rte_ether_addr macaddrs[max_mac_addrs]; uint32_t i, count; int ret = rte_eth_macaddrs_get(port_id, macaddrs, max_mac_addrs); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot get MAC addresses (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return; } count = count_macs(if_name, macaddrs, max_mac_addrs); for (i = 0; i < count; i++) { G_LOG(INFO, "%s(%s): MAC [%u/%u] " RTE_ETHER_ADDR_PRT_FMT "\n", __func__, if_name, i + 1, count, RTE_ETHER_ADDR_BYTES(&macaddrs[i])); } } static void report_port_macs(const struct gatekeeper_if *iface, uint16_t port_id) { char if_name[MAX_LOG_IF_NAME]; struct rte_eth_dev_info dev_info; int ret; log_if_name(if_name, sizeof(if_name), iface, port_id); ret = rte_eth_dev_info_get(port_id, &dev_info); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot obtain interface information (errno=%i): %s\n", __func__, if_name, -ret, rte_strerror(-ret)); return; } report_macs(if_name, port_id, dev_info.max_mac_addrs); } static void report_if_macs(const struct gatekeeper_if *iface) { unsigned int i; if (iface_bonded(iface)) report_port_macs(iface, iface->id); for (i = 0; i < iface->num_ports; i++) report_port_macs(iface, iface->ports[i]); } static int start_iface(struct gatekeeper_if *iface) { int ret = rte_eth_dev_start(iface->id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to start interface (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); destroy_iface(iface, IFACE_DESTROY_INIT); return ret; } /* * If we try to update/get the RSS hash configuration before * the start of the NICs, no meaningful operations will be * done; even the return values indicate no errors. * * After checking the source code of DPDK library, * it turns out that RSS is disabled in the MRQC register * before we start the NICs. * * Only after the NICs start, we can check whether the RSS hash * is configured correctly or not. */ ret = check_if_rss_key_update(iface); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): port does not have the correct RSS hash key (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto stop; } ret = rte_eth_macaddr_get(iface->id, &iface->eth_addr); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot get MAC address (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto stop; } if (ipv6_if_configured(iface)) setup_ipv6_addrs(iface); report_if_macs(iface); return 0; stop: destroy_iface(iface, IFACE_DESTROY_STOP); return ret; } static inline uint32_t if_rx_desc(const struct gatekeeper_if *iface) { return (uint32_t)iface->num_ports * iface->num_rx_desc; } static inline uint32_t if_tx_desc(const struct gatekeeper_if *iface) { return (uint32_t)iface->num_ports * iface->num_tx_desc; } unsigned int calculate_mempool_config_para(const char *block_name, struct net_config *net_conf, unsigned int total_pkt_burst) { /* * The total number of receive descriptors to allocate per lcore * for the receive ring of the front interface. */ uint32_t total_rx_desc = if_rx_desc(&net_conf->front); /* * The total number of transmit descriptors to allocate per lcore * for the transmit ring of the front interface. */ uint32_t total_tx_desc = if_tx_desc(&net_conf->front); uint32_t max_num_pkt; unsigned int num_mbuf; if (net_conf->back_iface_enabled) { /* Account for the back interface. */ total_rx_desc += if_rx_desc(&net_conf->back); total_tx_desc += if_tx_desc(&net_conf->back); } /* * The number of elements in the mbuf pool. * * Need to provision enough memory for the worst case. * It's the number of RX descriptors, the number of TX descriptors, * and the number of packet burst buffers. */ max_num_pkt = total_rx_desc + total_tx_desc + total_pkt_burst; /* * The optimum size (in terms of memory usage) for a mempool is when * it is a power of two minus one. */ num_mbuf = rte_align32pow2(max_num_pkt) - 1; G_LOG(NOTICE, "%s(%s): total_pkt_burst = %u packets, total_rx_desc = %u descriptors, total_tx_desc = %u descriptors, max_num_pkt = %u packets, num_mbuf = %u packets\n", __func__, block_name, total_pkt_burst, total_rx_desc, total_tx_desc, max_num_pkt, num_mbuf); return num_mbuf; } struct rte_mempool * create_pktmbuf_pool(const char *block_name, unsigned int lcore, unsigned int num_mbuf) { struct rte_mempool *mp; char pool_name[64]; int ret = snprintf(pool_name, sizeof(pool_name), "pktmbuf_pool_%s_%u", block_name, lcore); RTE_VERIFY(ret > 0 && ret < (int)sizeof(pool_name)); mp = rte_pktmbuf_pool_create_by_ops(pool_name, num_mbuf, 0, sizeof(struct sol_mbuf_priv), RTE_MBUF_DEFAULT_BUF_SIZE, rte_lcore_to_socket_id(lcore), "ring_mp_sc"); if (mp == NULL) { G_LOG(ERR, "net: failed to allocate mbuf for block %s at lcore %u\n", block_name, lcore); if (rte_errno == E_RTE_NO_CONFIG) G_LOG(ERR, "function could not get pointer to rte_config structure\n"); else if (rte_errno == E_RTE_SECONDARY) G_LOG(ERR, "function was called from a secondary process instance\n"); else if (rte_errno == EINVAL) G_LOG(ERR, "cache size provided is too large\n"); else if (rte_errno == ENOSPC) G_LOG(ERR, "the maximum number of memzones has already been allocated\n"); else if (rte_errno == EEXIST) G_LOG(ERR, "a memzone with the same name already exists\n"); else if (rte_errno == ENOMEM) G_LOG(ERR, "no appropriate memory area found in which to create memzone\n"); else G_LOG(ERR, "unknown error creating mbuf pool\n"); return NULL; } return mp; } static int init_iface_stage1(void *arg) { struct gatekeeper_if *iface = arg; return init_iface(iface); } static int start_network_stage2(void *arg) { struct net_config *net = arg; int ret = start_iface(&net->front); if (unlikely(ret < 0)) goto fail; if (net->back_iface_enabled) { ret = start_iface(&net->back); if (unlikely(ret < 0)) goto destroy_front; } return 0; destroy_front: destroy_iface(&net->front, IFACE_DESTROY_STOP); fail: G_LOG(ERR, "%s(): failed to start Gatekeeper network\n", __func__); return ret; } static int copy_amb_to_inh(cap_t cap_p) { cap_value_t i; for (i = 0; i <= CAP_LAST_CAP; i++) { char *cap_name; int old_errno; int ret; int value = cap_get_ambient(i); if (value < 0) { old_errno = errno; cap_name = cap_to_name(i); if (cap_name == NULL) { G_LOG(WARNING, "%s(): could not get string for capability %u (%s) while reporting that it is not supported by the running kernel (%s)\n", __func__, i, strerror(errno), strerror(old_errno)); continue; } G_LOG(WARNING, "%s(): capability %s (%u) not supported by the running kernel: %s\n", __func__, cap_name, i, strerror(old_errno)); cap_free(cap_name); continue; } ret = cap_set_flag(cap_p, CAP_INHERITABLE, 1, &i, value ? CAP_SET : CAP_CLEAR); if (ret != 0) { old_errno = errno; cap_name = cap_to_name(i); if (cap_name == NULL) { G_LOG(WARNING, "%s(): could not get string for capability %u (%s) while reporting that it could not be set to CAP_INHERITABLE (%s)\n", __func__, i, strerror(errno), strerror(old_errno)); continue; } G_LOG(ERR, "%s(): could not set CAP_INHERITABLE to %u for capability %s (%u): %s\n", __func__, value ? CAP_SET : CAP_CLEAR, cap_name, i, strerror(old_errno)); cap_free(cap_name); return -1; } } return 0; } static void log_proc_caps(const char *context) { cap_t cap_p = cap_get_proc(); char *cap_output, *amb_output; int ret; if (cap_p == NULL) { G_LOG(ERR, "%s(): cannot get capabilities: %s\n", __func__, strerror(errno)); return; } cap_output = cap_to_text(cap_p, NULL); if (cap_output == NULL) { G_LOG(ERR, "%s(): cannot get text string of capabilities: %s\n", __func__, strerror(errno)); goto proc; } if (!CAP_AMBIENT_SUPPORTED()) { G_LOG(DEBUG, "%s: %s\n", context, cap_output); goto cap; } /* Log ambient capabilities. */ cap_clear(cap_p); ret = copy_amb_to_inh(cap_p); if (ret < 0) goto cap; amb_output = cap_to_text(cap_p, NULL); if (amb_output == NULL) { G_LOG(ERR, "%s(): cannot get text string of ambient capabilities: %s\n", __func__, strerror(errno)); goto cap; } G_LOG(DEBUG, "%s: %s\t(ambient as inheritable): %s\n", context, cap_output, amb_output); cap_free(amb_output); cap: cap_free(cap_output); proc: cap_free(cap_p); } int needed_caps(int ncap, const cap_value_t *caps) { cap_t cap_p; int ret; /* No capablities are needed when run as root. */ if (config.pw_uid == 0) return 0; log_proc_caps("Capabilities before setting"); cap_p = cap_init(); if (cap_p == NULL) { G_LOG(ERR, "%s(): could not create a capability state in working storage: %s\n", __func__, strerror(errno)); return -1; } if (ncap > 0) { ret = cap_set_flag(cap_p, CAP_PERMITTED, ncap, caps, CAP_SET); if (ret != 0) { G_LOG(ERR, "%s(): could not set CAP_PERMITTED for %d capabilities: %s\n", __func__, ncap, strerror(errno)); goto free; } ret = cap_set_flag(cap_p, CAP_EFFECTIVE, ncap, caps, CAP_SET); if (ret != 0) { G_LOG(ERR, "%s(): could not set CAP_EFFECTIVE for %d capabilities: %s\n", __func__, ncap, strerror(errno)); goto free; } } ret = cap_set_proc(cap_p); if (ret != 0) { G_LOG(ERR, "%s(): could not set capabilities for process: %s\n", __func__, strerror(errno)); goto free; } free: cap_free(cap_p); if (ret < 0) return ret; if (CAP_AMBIENT_SUPPORTED()) { ret = cap_reset_ambient(); if (ret != 0) { G_LOG(ERR, "%s(): could not reset ambient capabilities: %s\n", __func__, strerror(errno)); } } log_proc_caps("Capabilities after setting"); return ret; } static int set_groups(const char *user, gid_t gid) { int ret; int old_num_gids, num_gids = 0; gid_t *gids; /* Fetch number of groups this user is a member of. */ ret = getgrouplist(user, gid, NULL, &num_gids); if (ret != -1) { G_LOG(ERR, "%s: getgrouplist indicates user %s is not in any groups, but belongs to at least %d\n", __func__, user, gid); return -1; } RTE_VERIFY(num_gids >= 0); if (num_gids == 0) { /* User belongs to no groups. */ ret = cap_setgroups(gid, 0, NULL); if (ret == -1) { G_LOG(ERR, "%s: could not assign empty group set with cap_setgroups: %s\n", __func__, strerror(errno)); return -1; } return 0; } gids = rte_malloc("gids", num_gids * sizeof(*gids), 0); if (gids == NULL) { G_LOG(ERR, "%s: could not allocate memory for the %d groups of user %s\n", __func__, num_gids, user); return -1; } old_num_gids = num_gids; ret = getgrouplist(user, gid, gids, &num_gids); if (ret != old_num_gids) { G_LOG(ERR, "%s: expected %d groups but received %d from getgrouplist\n", __func__, old_num_gids, ret); ret = -1; goto free; } ret = cap_setgroups(gid, num_gids, gids); if (ret == -1) { G_LOG(ERR, "%s: could not set the groups of user %s with cap_setgroups: %s\n", __func__, user, strerror(errno)); } free: rte_free(gids); return ret; } static int change_user(void) { struct passwd *pw; int ret; errno = 0; pw = getpwuid(config.pw_uid); if (pw == NULL) { G_LOG(ERR, "%s: failed to get the passwd struct for uid %u - %s\n", __func__, config.pw_uid, errno != 0 ? strerror(errno) : "user not found"); return -1; } G_LOG(DEBUG, "Ambient capabilities supported: %s\n", CAP_AMBIENT_SUPPORTED() ? "yes" : "no"); log_proc_caps("Capabilities before changing privileges"); ret = set_groups(pw->pw_name, config.pw_gid); if (ret < 0) { G_LOG(ERR, "%s: failed to set groups for user %s (gid %d)\n", __func__, pw->pw_name, config.pw_gid); return -1; } log_proc_caps("Capabilities after changing group(s)"); ret = cap_setuid(config.pw_uid); if (ret != 0) { G_LOG(ERR, "%s: failed to set UID for user %s (uid %d): %s\n", __func__, pw->pw_name, config.pw_uid, strerror(errno)); return -1; } log_proc_caps("Capabilities after changing user"); if (seteuid(0) != -1) { G_LOG(ERR, "%s: seteuid() was able to set the effective ID of a non-root user to root\n", __func__); return -1; } if (setegid(0) != -1) { G_LOG(ERR, "%s: setegid() was able to set the effective group ID of a non-root user to root\n", __func__); return -1; } return 0; } int finalize_stage2(void *arg) { int ret; if (ipv4_acl_enabled(&config.front)) { ret = build_ipv4_acls(&config.front); if (ret < 0) return ret; } if (ipv4_acl_enabled(&config.back)) { ret = build_ipv4_acls(&config.back); if (ret < 0) return ret; } if (ipv6_acl_enabled(&config.front)) { ret = build_ipv6_acls(&config.front); if (ret < 0) return ret; } if (ipv6_acl_enabled(&config.back)) { ret = build_ipv6_acls(&config.back); if (ret < 0) return ret; } if (config.pw_uid != 0) { int log_fd = (intptr_t)arg; ret = fchown(log_fd, config.pw_uid, config.pw_gid); if (ret != 0) { G_LOG(ERR, "Failed to change the owner of the file (with descriptor %d) to user with uid %u and gid %u - %s\n", log_fd, config.pw_uid, config.pw_gid, strerror(errno)); return ret; } ret = change_user(); if (ret != 0) return ret; } G_LOG(NOTICE, "Gatekeeper pid = %u\n", getpid()); /* Enable rate-limited logging now that startup is complete. */ log_ratelimit_enable(); return 0; } static bool ipv4_test_same_subnet(struct net_config *net) { const uint32_t ip4_mask = net->front.ip4_addr_plen <= net->back.ip4_addr_plen ? net->front.ip4_mask.s_addr : net->back.ip4_mask.s_addr; return ip4_same_subnet(net->front.ip4_addr.s_addr, net->back.ip4_addr.s_addr, ip4_mask); } static bool ipv6_test_same_subnet(struct net_config *net) { const struct in6_addr *ip6_mask = net->front.ip6_addr_plen <= net->back.ip6_addr_plen ? &net->front.ip6_mask : &net->back.ip6_mask; return ip6_same_subnet(&net->front.ip6_addr, &net->back.ip6_addr, ip6_mask); } /* Initialize the network. */ int gatekeeper_init_network(struct net_config *net_conf) { int num_ports; int ret = -1; if (net_conf == NULL) return -1; if (net_conf->back_iface_enabled) { if (ipv4_if_configured(&net_conf->front) != ipv4_if_configured(&net_conf->back)) { G_LOG(ERR, "net: front and back interfaces must either both support IPv4 or neither support IPv4\n"); return -1; } if (ipv6_if_configured(&net_conf->front) != ipv6_if_configured(&net_conf->back)) { G_LOG(ERR, "net: front and back interfaces must either both support IPv6 or neither support IPv6\n"); return -1; } if (ipv4_if_configured(&net_conf->front) && ipv4_if_configured(&net_conf->back) && ipv4_test_same_subnet(net_conf)) { G_LOG(ERR, "net: the IPv4 addresses of the front and back interfaces cannot belong to the same subnet\n"); return -1; } if (ipv6_if_configured(&net_conf->front) && ipv6_if_configured(&net_conf->back) && ipv6_test_same_subnet(net_conf)) { G_LOG(ERR, "net: the IPv6 addresses of the front and back interfaces cannot belong to the same subnet\n"); return -1; } } net_conf->numa_nodes = find_num_numa_nodes(); net_conf->numa_used = rte_calloc("numas", net_conf->numa_nodes, sizeof(*net_conf->numa_used), 0); if (net_conf->numa_used == NULL) { G_LOG(ERR, "net: %s: out of memory for NUMA used array\n", __func__); return -1; } /* Check port limits. */ num_ports = net_conf->front.num_ports + (net_conf->back_iface_enabled ? net_conf->back.num_ports : 0); if (num_ports > rte_eth_dev_count_avail()) { G_LOG(ERR, "net: there are only %i network ports available to DPDK/Gatekeeper, but configuration is using %i ports\n", rte_eth_dev_count_avail(), num_ports); ret = -1; goto numa; } net_conf->front.total_pkt_burst = 0; net_conf->back.total_pkt_burst = 0; /* Initialize interfaces. */ ret = launch_at_stage1(init_iface_stage1, &net_conf->front); if (ret < 0) goto numa; ret = launch_at_stage2(start_network_stage2, net_conf); if (ret < 0) goto destroy_front; if (net_conf->back_iface_enabled) { ret = launch_at_stage1(init_iface_stage1, &net_conf->back); if (ret < 0) goto do_not_start_net; } goto out; do_not_start_net: pop_n_at_stage2(1); destroy_front: pop_n_at_stage1(1); numa: rte_free(net_conf->numa_used); net_conf->numa_used = NULL; out: return ret; } void gatekeeper_free_network(void) { if (config.back_iface_enabled) destroy_iface(&config.back, IFACE_DESTROY_ALL); destroy_iface(&config.front, IFACE_DESTROY_ALL); rte_free(config.numa_used); config.numa_used = NULL; } int net_launch_at_stage1(struct net_config *net, int front_rx_queues, int front_tx_queues, int back_rx_queues, int back_tx_queues, lcore_function_t *f, void *arg) { int ret = launch_at_stage1(f, arg); if (ret < 0) return ret; RTE_VERIFY(front_rx_queues >= 0); RTE_VERIFY(front_tx_queues >= 0); net->front.num_rx_queues += front_rx_queues; net->front.num_tx_queues += front_tx_queues; if (net->back_iface_enabled) { RTE_VERIFY(back_rx_queues >= 0); RTE_VERIFY(back_tx_queues >= 0); net->back.num_rx_queues += back_rx_queues; net->back.num_tx_queues += back_tx_queues; } return 0; } bool ipv4_configured(struct net_config *net_conf) { if (net_conf->back_iface_enabled) { return ipv4_if_configured(&net_conf->front) && ipv4_if_configured(&net_conf->back); } return ipv4_if_configured(&net_conf->front); } bool ipv6_configured(struct net_config *net_conf) { if (net_conf->back_iface_enabled) { return ipv6_if_configured(&net_conf->front) && ipv6_if_configured(&net_conf->back); } return ipv6_if_configured(&net_conf->front); } void send_pkts(uint8_t port, uint16_t tx_queue, uint16_t num_pkts, struct rte_mbuf **bufs) { uint16_t i, num_tx_succ; if (num_pkts == 0) return; /* Send burst of TX packets, to second port of pair. */ num_tx_succ = rte_eth_tx_burst(port, tx_queue, bufs, num_pkts); /* XXX #71 Do something better here! For now, free any unsent packets. */ if (unlikely(num_tx_succ < num_pkts)) { for (i = num_tx_succ; i < num_pkts; i++) drop_packet(bufs[i]); } } /* * Optimized generic implementation of RSS hash function. * If you want the calculated hash value matches NIC RSS value, * you have to use special converted key with rte_convert_rss_key() fn. * @param input_tuple * Pointer to input tuple with network order. * @param input_len * Length of input_tuple in 4-bytes chunks. * @param *rss_key * Pointer to RSS hash key. * @return * Calculated hash value. */ static inline uint32_t gk_softrss_be(const uint32_t *input_tuple, uint32_t input_len, const uint8_t *rss_key) { uint32_t i; uint32_t j; uint32_t ret = 0; for (j = 0; j < input_len; j++) { /* * Need to use little endian, * since it takes ordering as little endian in both bytes and bits. */ uint32_t val = rte_be_to_cpu_32(input_tuple[j]); for (i = 0; i < 32; i++) if (val & (1 << (31 - i))) { /* * The cast (uint64_t) is needed because when * @i == 0, the expression requires a 32-bit * shift of a 32-bit unsigned integer, * what is undefined. * The C standard only defines bit shifting * up to the bit-size of the integer minus one. * Finally, the cast (uint32_t) avoid promoting * the expression before the bit-or (i.e. `|`) * to uint64_t. */ ret ^= ((const uint32_t *)rss_key)[j] << i | (uint32_t)((uint64_t) (((const uint32_t *)rss_key) [j + 1]) >> (32 - i)); } } return ret; } uint32_t rss_flow_hash(const struct gatekeeper_if *iface, const struct ip_flow *flow) { if (flow->proto == RTE_ETHER_TYPE_IPV4) { RTE_BUILD_BUG_ON(sizeof(flow->f.v4) % sizeof(uint32_t) != 0); return gk_softrss_be((uint32_t *)&flow->f, (sizeof(flow->f.v4)/sizeof(uint32_t)), iface->rss_key_be); } if (likely(flow->proto == RTE_ETHER_TYPE_IPV6)) { RTE_BUILD_BUG_ON(sizeof(flow->f.v6) % sizeof(uint32_t) != 0); return gk_softrss_be((uint32_t *)&flow->f, (sizeof(flow->f.v6)/sizeof(uint32_t)), iface->rss_key_be); } rte_panic("%s(): unknown protocol: %i\n", __func__, flow->proto); return 0; } ```
/content/code_sandbox/lib/net.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
24,701
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GT_LUA_LPM_H_ #define _GATEKEEPER_GT_LUA_LPM_H_ #include <lualib.h> void lualpm_openlib(lua_State *L); #endif /* _GATEKEEPER_GT_LUA_LPM_H_ */ ```
/content/code_sandbox/gt/lua_lpm.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
156
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <stdbool.h> #include <arpa/inet.h> #include <lualib.h> #include <lauxlib.h> #include <netinet/ip.h> #include <math.h> #include <unistd.h> #include <rte_log.h> #include <rte_ether.h> #include <rte_lcore.h> #include <rte_malloc.h> #include <rte_random.h> #include <rte_cycles.h> #include <rte_common.h> #include <rte_byteorder.h> #include "gatekeeper_lls.h" #include "gatekeeper_acl.h" #include "gatekeeper_ggu.h" #include "gatekeeper_ipip.h" #include "gatekeeper_gk.h" #include "gatekeeper_gt.h" #include "gatekeeper_main.h" #include "gatekeeper_net.h" #include "gatekeeper_launch.h" #include "gatekeeper_l2.h" #include "gatekeeper_varip.h" #include "lua_lpm.h" static int get_block_idx(struct gt_config *gt_conf, unsigned int lcore_id) { int i; for (i = 0; i < gt_conf->num_lcores; i++) if (gt_conf->lcores[i] == lcore_id) return i; rte_panic("Unexpected condition: lcore %u is not running a gt block\n", lcore_id); return 0; } static int gt_setup_rss(struct gt_config *gt_conf) { int i; uint16_t port_in = gt_conf->net->front.id; uint16_t gt_queues[gt_conf->num_lcores]; for (i = 0; i < gt_conf->num_lcores; i++) gt_queues[i] = gt_conf->instances[i].rx_queue; return gatekeeper_setup_rss(port_in, gt_queues, gt_conf->num_lcores); } static int gt_parse_incoming_pkt(struct rte_mbuf *pkt, struct gt_packet_headers *info) { uint8_t inner_ip_ver; uint16_t parsed_len; int outer_ipv6_hdr_len = 0; struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv4_hdr *outer_ipv4_hdr = NULL; struct rte_ipv6_hdr *outer_ipv6_hdr = NULL; struct rte_ipv4_hdr *inner_ipv4_hdr = NULL; struct rte_ipv6_hdr *inner_ipv6_hdr = NULL; info->frag = false; info->l2_hdr = eth_hdr; info->outer_ethertype = rte_be_to_cpu_16(pkt_in_skip_l2(pkt, eth_hdr, &info->outer_l3_hdr)); parsed_len = pkt_in_l2_hdr_len(pkt); switch (info->outer_ethertype) { case RTE_ETHER_TYPE_IPV4: if (pkt->data_len < parsed_len + sizeof(struct rte_ipv4_hdr)) return -1; outer_ipv4_hdr = (struct rte_ipv4_hdr *)info->outer_l3_hdr; if (outer_ipv4_hdr->next_proto_id != IPPROTO_IPIP) return -1; parsed_len += ipv4_hdr_len(outer_ipv4_hdr); info->priority = (outer_ipv4_hdr->type_of_service >> 2); info->outer_ecn = outer_ipv4_hdr->type_of_service & IPTOS_ECN_MASK; break; case RTE_ETHER_TYPE_IPV6: { uint32_t vtc_flow; uint8_t encapsulated_proto; if (pkt->data_len < parsed_len + sizeof(struct rte_ipv6_hdr)) return -1; outer_ipv6_hdr = (struct rte_ipv6_hdr *)info->outer_l3_hdr; outer_ipv6_hdr_len = ipv6_skip_exthdr(outer_ipv6_hdr, pkt->data_len - parsed_len, &encapsulated_proto); if (outer_ipv6_hdr_len < 0) { G_LOG(ERR, "Failed to parse the packet's outer IPv6 extension headers\n"); return -1; } if (encapsulated_proto != IPPROTO_IPV6) return -1; parsed_len += outer_ipv6_hdr_len; vtc_flow = rte_be_to_cpu_32(outer_ipv6_hdr->vtc_flow); info->priority = ((vtc_flow >> 20) & 0xFF) >> 2; info->outer_ecn = (vtc_flow >> 20) & IPTOS_ECN_MASK; break; } default: return -1; } if (pkt->data_len < parsed_len + sizeof(struct rte_ipv4_hdr)) return -1; if (outer_ipv4_hdr != NULL) { inner_ipv4_hdr = (struct rte_ipv4_hdr *)ipv4_skip_exthdr(outer_ipv4_hdr); } else { inner_ipv4_hdr = (struct rte_ipv4_hdr *)( (uint8_t *)outer_ipv6_hdr + outer_ipv6_hdr_len); } inner_ip_ver = (inner_ipv4_hdr->version_ihl & 0xF0) >> 4; info->inner_l3_hdr = inner_ipv4_hdr; if (inner_ip_ver == 4) { info->inner_ip_ver = RTE_ETHER_TYPE_IPV4; info->l4_proto = inner_ipv4_hdr->next_proto_id; info->l4_hdr = ipv4_skip_exthdr(inner_ipv4_hdr); if (rte_ipv4_frag_pkt_is_fragmented(inner_ipv4_hdr)) { info->frag = true; info->l2_outer_l3_len = parsed_len; info->inner_l3_len = ipv4_hdr_len(inner_ipv4_hdr); info->frag_hdr = NULL; } parsed_len += ipv4_hdr_len(inner_ipv4_hdr); } else if (likely(inner_ip_ver == 6)) { int inner_ipv6_len; if (pkt->data_len < parsed_len + sizeof(struct rte_ipv6_hdr)) return -1; inner_ipv6_hdr = (struct rte_ipv6_hdr *)info->inner_l3_hdr; inner_ipv6_len = ipv6_skip_exthdr(inner_ipv6_hdr, pkt->data_len - parsed_len, &info->l4_proto); if (inner_ipv6_len < 0) { G_LOG(ERR, "Failed to parse the packet's inner IPv6 extension headers\n"); return -1; } info->inner_ip_ver = RTE_ETHER_TYPE_IPV6; info->l4_hdr = (uint8_t *)inner_ipv6_hdr + inner_ipv6_len; info->frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(inner_ipv6_hdr); if (info->frag_hdr != NULL) { info->frag = true; info->l2_outer_l3_len = parsed_len; info->inner_l3_len = inner_ipv6_len; } parsed_len += inner_ipv6_len; } else return -1; info->upper_len = pkt->data_len - parsed_len; return 0; } static struct rte_mbuf * gt_reassemble_incoming_pkt(struct rte_mbuf *pkt, uint64_t tms, struct gt_packet_headers *info, struct rte_ip_frag_death_row *death_row, struct gt_instance *instance) { /* Prepare mbuf: setup l2_len/l3_len. */ pkt->l2_len = info->l2_outer_l3_len; pkt->l3_len = info->inner_l3_len; if (info->inner_ip_ver == RTE_ETHER_TYPE_IPV4) { /* Process this IPv4 fragment. */ return rte_ipv4_frag_reassemble_packet( instance->frag_tbl, death_row, pkt, tms, info->inner_l3_hdr); } if (likely(info->inner_ip_ver == RTE_ETHER_TYPE_IPV6)) { /* Process this IPv6 fragment. */ return rte_ipv6_frag_reassemble_packet( instance->frag_tbl, death_row, pkt, tms, info->inner_l3_hdr, info->frag_hdr); } rte_panic("%s() at lcore %u: unexpected condition: packet with unknown IP version %hu\n", __func__, rte_lcore_id(), info->inner_ip_ver); return NULL; } #define CTYPE_STRUCT_GT_PACKET_HEADERS_PTR "struct gt_packet_headers *" #define CTYPE_STRUCT_GGU_POLICY_PTR "struct ggu_policy *" struct lua_lookup_arg { struct gt_packet_headers *pkt_info; struct ggu_policy *policy; bool result; }; /* * ATTENTION * ALL Lua calls, including the lua_call(), may raise an exception, * so this function must be called with lua_cpcall(). */ static int l_lookup_policy_decision(lua_State *L) { struct lua_lookup_arg *arg = lua_touserdata(L, 1); uint32_t correct_ctypeid_gt_packet_headers; uint32_t correct_ctypeid_ggu_policy; void *gt_pkt_hdr_cdata; void *ggu_policy_cdata; lua_getglobal(L, "lookup_policy"); correct_ctypeid_gt_packet_headers = luaL_get_ctypeid(L, CTYPE_STRUCT_GT_PACKET_HEADERS_PTR); gt_pkt_hdr_cdata = luaL_pushcdata(L, correct_ctypeid_gt_packet_headers, sizeof(struct gt_packet_headers *)); *(struct gt_packet_headers **)gt_pkt_hdr_cdata = arg->pkt_info; correct_ctypeid_ggu_policy = luaL_get_ctypeid(L, CTYPE_STRUCT_GGU_POLICY_PTR); ggu_policy_cdata = luaL_pushcdata(L, correct_ctypeid_ggu_policy, sizeof(struct ggu_policy *)); *(struct ggu_policy **)ggu_policy_cdata = arg->policy; lua_call(L, 2, 1); arg->result = lua_toboolean(L, -1); return 0; } static uint64_t lua_mem(lua_State *L) { return (uint64_t)lua_gc(L, LUA_GCCOUNT, 0) * 1024 + lua_gc(L, LUA_GCCOUNTB, 0); } static int lookup_policy_decision(struct gt_packet_headers *pkt_info, struct ggu_policy *policy, struct gt_instance *instance) { struct lua_lookup_arg arg = { .pkt_info = pkt_info, .policy = policy, .result = false, }; bool first_time_running = true; /* * Make @policy invalid, so caller can identify when @policy has not * been filled in. */ policy->state = -1; policy->flow.proto = pkt_info->inner_ip_ver; if (pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ip4_hdr = pkt_info->inner_l3_hdr; policy->flow.f.v4.src.s_addr = ip4_hdr->src_addr; policy->flow.f.v4.dst.s_addr = ip4_hdr->dst_addr; } else if (likely(pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ip6_hdr = pkt_info->inner_l3_hdr; rte_memcpy(policy->flow.f.v6.src.s6_addr, ip6_hdr->src_addr, sizeof(policy->flow.f.v6.src.s6_addr)); rte_memcpy(policy->flow.f.v6.dst.s6_addr, ip6_hdr->dst_addr, sizeof(policy->flow.f.v6.dst.s6_addr)); } else { G_LOG(CRIT, "%s(): unexpected condition: non-IP packet with Ethernet type: %i\n", __func__, pkt_info->inner_ip_ver); return -EINVAL; } memset(&policy->params, 0, sizeof(policy->params)); while (true) { uint64_t mem_before, mem_after; int ret = lua_cpcall(instance->lua_state, l_lookup_policy_decision, &arg); if (likely(ret == 0)) break; mem_before = lua_mem(instance->lua_state); G_LOG(ERR, "%s(): Lua function lookup_policy() failed%s: %s. Memory allocated in Lua: %" PRIu64 " bytes\n", __func__, first_time_running ? "" : " AGAIN", lua_tostring(instance->lua_state, -1), mem_before); /* * Do not test for (ret != LUA_ERRMEM) because the policy * may have tried to catch the exception. If so, the error * code LUA_ERRMEM may have been lost. For example, * the error code goes from LUA_ERRMEM to LUA_ERRRUN if * the policy produces another error while handling the * original out-of-memory error. */ if (unlikely(!first_time_running)) return -EFAULT; first_time_running = false; lua_gc(instance->lua_state, LUA_GCCOLLECT, 0); mem_after = lua_mem(instance->lua_state); if (mem_after >= mem_before) { G_LOG(ERR, "%s(): cannot retry Lua function lookup_policy() because there is no memory to release. There was %" PRIu64 " bytes before running Lua's garbage collector, and there is %" PRIu64 " bytes afterwards\n", __func__, mem_before, mem_after); return -ENOMEM; } /* * Although the next log entry is not an error per se, * it has the log level ERR instead of WARNING to guarantee * that it follows the previous log entry. */ G_LOG(ERR, "%s(): retrying Lua function lookup_policy()... There was %" PRIu64 " bytes before running Lua's garbage collector, and there is %" PRIu64 " bytes afterwards\n", __func__, mem_before, mem_after); } lua_settop(instance->lua_state, 0); return arg.result; } static int lookup_frag_punish_policy_decision(struct gt_packet_headers *pkt_info, struct ggu_policy *policy, struct gt_instance *instance) { void *ggu_policy_cdata; uint32_t correct_ctypeid_ggu_policy = luaL_get_ctypeid( instance->lua_state, CTYPE_STRUCT_GGU_POLICY_PTR); policy->flow.proto = pkt_info->inner_ip_ver; if (pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ip4_hdr = pkt_info->inner_l3_hdr; policy->flow.f.v4.src.s_addr = ip4_hdr->src_addr; policy->flow.f.v4.dst.s_addr = ip4_hdr->dst_addr; } else if (likely(pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ip6_hdr = pkt_info->inner_l3_hdr; rte_memcpy(policy->flow.f.v6.src.s6_addr, ip6_hdr->src_addr, sizeof(policy->flow.f.v6.src.s6_addr)); rte_memcpy(policy->flow.f.v6.dst.s6_addr, ip6_hdr->dst_addr, sizeof(policy->flow.f.v6.dst.s6_addr)); } else { G_LOG(CRIT, "%s(): unexpected condition: non-IP packet with Ethernet type: %i\n", __func__, pkt_info->inner_ip_ver); return -1; } lua_getglobal(instance->lua_state, "lookup_frag_punish_policy"); ggu_policy_cdata = luaL_pushcdata(instance->lua_state, correct_ctypeid_ggu_policy, sizeof(struct ggu_policy *)); *(struct ggu_policy **)ggu_policy_cdata = policy; if (lua_pcall(instance->lua_state, 1, 0, 0) != 0) { G_LOG(ERR, "Error running Lua function lookup_frag_punish_policy(): %s\n", lua_tostring(instance->lua_state, -1)); return -1; } return 0; } static inline bool is_valid_dest_addr(struct gt_config *gt_conf, struct gt_packet_headers *pkt_info) { return (pkt_info->outer_ethertype == RTE_ETHER_TYPE_IPV4 && ((struct rte_ipv4_hdr *) pkt_info->outer_l3_hdr)->dst_addr == gt_conf->net->front.ip4_addr.s_addr) || (pkt_info->outer_ethertype == RTE_ETHER_TYPE_IPV6 && memcmp(((struct rte_ipv6_hdr *) pkt_info->outer_l3_hdr)->dst_addr, gt_conf->net->front.ip6_addr.s6_addr, sizeof(gt_conf->net->front.ip6_addr)) == 0); } static void print_ip_err_msg(struct gt_packet_headers *pkt_info) { char src[128]; char dst[128]; if (pkt_info->outer_ethertype == RTE_ETHER_TYPE_IPV4) { if (inet_ntop(AF_INET, &((struct rte_ipv4_hdr *) pkt_info->outer_l3_hdr)->src_addr, src, sizeof(src)) == NULL) { G_LOG(ERR, "%s: failed to convert a number to an IPv4 address (%s)\n", __func__, strerror(errno)); return; } if (inet_ntop(AF_INET, &((struct rte_ipv4_hdr *) pkt_info->outer_l3_hdr)->dst_addr, dst, sizeof(dst)) == NULL) { G_LOG(ERR, "%s: failed to convert a number to an IPv4 address (%s)\n", __func__, strerror(errno)); return; } } else { if (inet_ntop(AF_INET6, &((struct rte_ipv6_hdr *) pkt_info->outer_l3_hdr)->src_addr, src, sizeof(src)) == NULL) { G_LOG(ERR, "%s: failed to convert a number to an IPv6 address (%s)\n", __func__, strerror(errno)); return; } if (inet_ntop(AF_INET6, &((struct rte_ipv6_hdr *) pkt_info->outer_l3_hdr)->dst_addr, dst, sizeof(dst)) == NULL) { G_LOG(ERR, "%s: failed to convert a number to an IPv6 address (%s)\n", __func__, strerror(errno)); return; } } G_LOG(ALERT, "Receiving a packet with IP source address %s, and destination address %s, whose destination IP address is not the Grantor server itself\n", src, dst); } static void gt_arp_and_nd_req_cb(const struct lls_map *map, void *arg, __attribute__((unused))enum lls_reply_ty ty, int *pcall_again) { struct ether_cache *eth_cache = arg; if (pcall_again == NULL) { clear_ether_cache(eth_cache); return; } /* * Deal with concurrency control by sequential lock * on the nexthop entry. */ write_seqlock(&eth_cache->lock); rte_ether_addr_copy(&map->ha, &eth_cache->l2_hdr.eth_hdr.dst_addr); eth_cache->stale = map->stale; write_sequnlock(&eth_cache->lock); *pcall_again = true; } /* * Fill up the Ethernet cached header. * Note that the destination MAC address should be filled up by LLS. */ static int gt_fill_up_ether_cache_locked(struct ether_cache *eth_cache, uint16_t inner_ip_ver, void *ip_dst, struct gatekeeper_if *iface) { int ret; unsigned lcore_id = rte_lcore_id(); uint16_t vlan_tag_be; eth_cache->stale = true; eth_cache->ip_addr.proto = inner_ip_ver; if (inner_ip_ver == RTE_ETHER_TYPE_IPV4) { vlan_tag_be = iface->ipv4_vlan_tag_be; rte_memcpy(&eth_cache->ip_addr.ip.v4, ip_dst, sizeof(eth_cache->ip_addr.ip.v4)); } else { vlan_tag_be = iface->ipv6_vlan_tag_be; rte_memcpy(&eth_cache->ip_addr.ip.v6, ip_dst, sizeof(eth_cache->ip_addr.ip.v6)); } if (iface->vlan_insert) fill_vlan_hdr(&eth_cache->l2_hdr.eth_hdr, vlan_tag_be, inner_ip_ver); else { eth_cache->l2_hdr.eth_hdr.ether_type = rte_cpu_to_be_16(inner_ip_ver); } rte_ether_addr_copy(&iface->eth_addr, &eth_cache->l2_hdr.eth_hdr.src_addr); rte_atomic32_set(&eth_cache->ref_cnt, 1); if (inner_ip_ver == RTE_ETHER_TYPE_IPV4) { ret = hold_arp(gt_arp_and_nd_req_cb, eth_cache, ip_dst, lcore_id); } else { ret = hold_nd(gt_arp_and_nd_req_cb, eth_cache, ip_dst, lcore_id); } if (ret < 0) clear_ether_cache(eth_cache); return ret; } static int drop_cache_entry_randomly(struct neighbor_hash_table *neigh, uint16_t ip_ver) { int ret; uint32_t entry_id = rte_rand() % neigh->tbl_size; struct ether_cache *eth_cache; uint32_t entry_start_idx = entry_id; while (true) { eth_cache = &neigh->cache_tbl[entry_id]; if (rte_atomic32_read(&eth_cache->ref_cnt) == 0) { entry_id = (entry_id + 1) % neigh->tbl_size; eth_cache = NULL; } else break; if (entry_start_idx == entry_id) break; } if (eth_cache == NULL) return -1; if (ip_ver == RTE_ETHER_TYPE_IPV4) { ret = put_arp(&eth_cache->ip_addr.ip.v4, rte_lcore_id()); if (ret < 0) return ret; ret = rte_hash_del_key(neigh->hash_table, &eth_cache->ip_addr.ip.v4); if (ret < 0) { G_LOG(CRIT, "Failed to delete an Ethernet cache entry from the IPv4 neighbor table at %s, we are not trying to recover from this failure\n", __func__); } return ret; } if (likely(ip_ver == RTE_ETHER_TYPE_IPV6)) { ret = put_nd(&eth_cache->ip_addr.ip.v6, rte_lcore_id()); if (ret < 0) return ret; ret = rte_hash_del_key(neigh->hash_table, &eth_cache->ip_addr.ip.v6); if (ret < 0) { G_LOG(CRIT, "Failed to delete an Ethernet cache entry from the IPv6 neighbor table at %s, we are not trying to recover from this failure\n", __func__); } return ret; } return -1; } static struct ether_cache * get_new_ether_cache(struct neighbor_hash_table *neigh) { int i; for (i = 0; i < neigh->tbl_size; i++) { if (rte_atomic32_read(&neigh->cache_tbl[i].ref_cnt) == 0) return &neigh->cache_tbl[i]; } return NULL; } static struct ether_cache * gt_neigh_get_ether_cache(struct neighbor_hash_table *neigh, uint16_t inner_ip_ver, void *ip_dst, struct gatekeeper_if *iface) { int ret; struct ether_cache *eth_cache = lookup_ether_cache(neigh, ip_dst); if (eth_cache != NULL) return eth_cache; eth_cache = get_new_ether_cache(neigh); if (eth_cache == NULL) { ret = drop_cache_entry_randomly(neigh, inner_ip_ver); if (ret < 0) return NULL; eth_cache = get_new_ether_cache(neigh); if (eth_cache == NULL) { G_LOG(WARNING, "Failed to get a new Ethernet cache entry from the neighbor hash table at %s, the cache is overflowing\n", __func__); return NULL; } } ret = gt_fill_up_ether_cache_locked( eth_cache, inner_ip_ver, ip_dst, iface); if (ret < 0) return NULL; ret = rte_hash_add_key_data(neigh->hash_table, ip_dst, eth_cache); if (ret == 0) return eth_cache; G_LOG(ERR, "Failed to add a cache entry to the neighbor hash table at %s\n", __func__); if (inner_ip_ver == RTE_ETHER_TYPE_IPV4) put_arp(ip_dst, rte_lcore_id()); else put_nd(ip_dst, rte_lcore_id()); /* * By calling put_xxx(), the LLS block will call * gt_arp_and_nd_req_cb(), which, in turn, will call * clear_ether_cache(), so we can return directly here. */ return NULL; } static int decap_and_fill_eth(struct rte_mbuf *m, struct gt_config *gt_conf, struct gt_packet_headers *pkt_info, struct gt_instance *instance) { uint16_t vlan_tag_be; struct neighbor_hash_table *neigh; struct ether_cache *eth_cache; void *ip_dst; bool is_neighbor; int bytes_to_add; struct gatekeeper_if *iface = &gt_conf->net->front; if (pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV4) { /* * The Full-functionality Option for setting ECN bits in * IP-in-IP packets. RFC 3168, section 9.1.1. * * If the outer header's ECN codepoint is CE and the inner * header's ECN codepoint is not CE, set it and clear the * checksum so that hardware can recompute it. */ struct rte_ipv4_hdr *inner_ipv4_hdr = pkt_info->inner_l3_hdr; if (((inner_ipv4_hdr->type_of_service & IPTOS_ECN_MASK) != IPTOS_ECN_CE) && (pkt_info->outer_ecn == IPTOS_ECN_CE)) { inner_ipv4_hdr->type_of_service |= IPTOS_ECN_CE; m->l3_len = ipv4_hdr_len(inner_ipv4_hdr); set_ipv4_checksum(iface, m, inner_ipv4_hdr); } neigh = &instance->neigh; ip_dst = &inner_ipv4_hdr->dst_addr; is_neighbor = ip4_same_subnet(iface->ip4_addr.s_addr, *(uint32_t *)ip_dst, iface->ip4_mask.s_addr); vlan_tag_be = iface->ipv4_vlan_tag_be; } else if (likely(pkt_info->inner_ip_ver == RTE_ETHER_TYPE_IPV6)) { /* * Since there's no checksum in the IPv6 header, skip the * extra comparisons and set the ECN bits if needed * (even if it's redundant). */ struct rte_ipv6_hdr *inner_ipv6_hdr = pkt_info->inner_l3_hdr; if (pkt_info->outer_ecn == IPTOS_ECN_CE) inner_ipv6_hdr->vtc_flow |= rte_cpu_to_be_32(IPTOS_ECN_CE << 20); neigh = &instance->neigh6; ip_dst = inner_ipv6_hdr->dst_addr; is_neighbor = ip6_same_subnet(&iface->ip6_addr, ip_dst, &iface->ip6_mask); vlan_tag_be = iface->ipv6_vlan_tag_be; } else return -1; bytes_to_add = pkt_info->outer_ethertype == RTE_ETHER_TYPE_IPV4 ? -sizeof(struct rte_ipv4_hdr) : -sizeof(struct rte_ipv6_hdr); if (adjust_pkt_len(m, iface, bytes_to_add) == NULL) { G_LOG(ERR, "Could not adjust packet length\n"); return -1; } if (!is_neighbor) { struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); struct rte_ether_hdr *raw_eth = pkt_info->l2_hdr; rte_ether_addr_copy(&raw_eth->src_addr, &eth_hdr->dst_addr); rte_ether_addr_copy(&raw_eth->dst_addr, &eth_hdr->src_addr); m->l2_len = iface->l2_len_out; if (iface->vlan_insert) fill_vlan_hdr(eth_hdr, vlan_tag_be, pkt_info->inner_ip_ver); else { eth_hdr->ether_type = rte_cpu_to_be_16(pkt_info->inner_ip_ver); } return 0; } /* * The destination MAC address comes from LLS block. */ eth_cache = gt_neigh_get_ether_cache(neigh, pkt_info->inner_ip_ver, ip_dst, iface); if (eth_cache == NULL) { /* * Note: the first packet to each new destination * will always be dropped, since we don't have an * Ethernet cache entry for it. */ return -1; } if (pkt_copy_cached_eth_header(m, eth_cache, iface->l2_len_out)) return -1; return 0; } static void fill_eth_hdr_reverse(struct gatekeeper_if *iface, struct rte_ether_hdr *eth_hdr, struct gt_packet_headers *pkt_info) { struct rte_ether_hdr *raw_eth = (struct rte_ether_hdr *)pkt_info->l2_hdr; rte_ether_addr_copy(&raw_eth->src_addr, &eth_hdr->dst_addr); rte_ether_addr_copy(&raw_eth->dst_addr, &eth_hdr->src_addr); if (iface->vlan_insert) { uint16_t vlan_tag_be = pkt_info->outer_ethertype == RTE_ETHER_TYPE_IPV4 ? iface->ipv4_vlan_tag_be : iface->ipv6_vlan_tag_be; fill_vlan_hdr(eth_hdr, vlan_tag_be, pkt_info->outer_ethertype); } else { eth_hdr->ether_type = rte_cpu_to_be_16(pkt_info->outer_ethertype); } } /* * When creating a new notification packet, set all of the header * fields from each layer as much as possible. Fields that need to * wait to be filled until the packet is ready to be sent are * filled in prep_notify_pkt(). */ static void fill_notify_pkt_hdr(struct rte_mbuf *notify_pkt, struct gt_packet_headers *pkt_info, struct gt_config *gt_conf) { uint16_t ethertype = pkt_info->outer_ethertype; struct rte_ether_hdr *notify_eth; struct rte_ipv4_hdr *notify_ipv4 = NULL; struct rte_ipv6_hdr *notify_ipv6 = NULL; struct rte_udp_hdr *notify_udp; struct ggu_common_hdr *notify_ggu; struct gatekeeper_if *iface = &gt_conf->net->front; size_t l2_len = iface->l2_len_out; if (ethertype == RTE_ETHER_TYPE_IPV4) { notify_eth = (struct rte_ether_hdr *)rte_pktmbuf_append( notify_pkt, l2_len + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct ggu_common_hdr)); notify_ipv4 = (struct rte_ipv4_hdr *) ((uint8_t *)notify_eth + l2_len); notify_udp = (struct rte_udp_hdr *)&notify_ipv4[1]; notify_ggu = (struct ggu_common_hdr *)&notify_udp[1]; } else if (likely(ethertype == RTE_ETHER_TYPE_IPV6)) { notify_eth = (struct rte_ether_hdr *)rte_pktmbuf_append( notify_pkt, l2_len + sizeof(struct rte_ipv6_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct ggu_common_hdr)); notify_ipv6 = (struct rte_ipv6_hdr *) ((uint8_t *)notify_eth + l2_len); notify_udp = (struct rte_udp_hdr *)&notify_ipv6[1]; notify_ggu = (struct ggu_common_hdr *)&notify_udp[1]; } else rte_panic("Unexpected condition: gt fills up a notify packet with unknown ethernet type %hu\n", ethertype); memset(notify_ggu, 0, sizeof(*notify_ggu)); notify_ggu->version = GGU_PD_VER; /* Fill up the link-layer header. */ fill_eth_hdr_reverse(iface, notify_eth, pkt_info); notify_pkt->l2_len = l2_len; /* Fill up the IP header. */ if (ethertype == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)pkt_info->outer_l3_hdr; /* Fill up the IPv4 header. */ notify_ipv4->version_ihl = IP_VHL_DEF; notify_ipv4->packet_id = 0; notify_ipv4->fragment_offset = IP_DN_FRAGMENT_FLAG; notify_ipv4->time_to_live = IP_DEFTTL; notify_ipv4->next_proto_id = IPPROTO_UDP; /* The source address is the Grantor server IP address. */ notify_ipv4->src_addr = ipv4_hdr->dst_addr; /* * The destination address is the * Gatekeeper server IP address. */ notify_ipv4->dst_addr = ipv4_hdr->src_addr; notify_pkt->l3_len = sizeof(struct rte_ipv4_hdr); notify_pkt->ol_flags |= RTE_MBUF_F_TX_IPV4; /* IPv4 checksum set in prep_notify_pkt(). */ } else if (likely(ethertype == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)pkt_info->outer_l3_hdr; /* Fill up the outer IPv6 header. */ notify_ipv6->vtc_flow = rte_cpu_to_be_32(IPv6_DEFAULT_VTC_FLOW); notify_ipv6->proto = IPPROTO_UDP; notify_ipv6->hop_limits = iface->ipv6_default_hop_limits; rte_memcpy(notify_ipv6->src_addr, ipv6_hdr->dst_addr, sizeof(notify_ipv6->src_addr)); rte_memcpy(notify_ipv6->dst_addr, ipv6_hdr->src_addr, sizeof(notify_ipv6->dst_addr)); notify_pkt->l3_len = sizeof(struct rte_ipv6_hdr); notify_pkt->ol_flags |= RTE_MBUF_F_TX_IPV6; } /* Fill up the UDP header. */ notify_udp->src_port = gt_conf->ggu_src_port; notify_udp->dst_port = gt_conf->ggu_dst_port; notify_pkt->l4_len = sizeof(struct rte_udp_hdr); } static void print_unsent_policy(struct ggu_policy *policy, __attribute__((unused)) void *arg) { int ret; char err_msg[1024]; switch (policy->state) { case GK_REQUEST: ret = snprintf(err_msg, sizeof(err_msg), "%s(): GK_REQUEST is not a policy decision; there is a bug in the Lua policy\n", __func__); break; case GK_GRANTED: ret = snprintf(err_msg, sizeof(err_msg), "%s(): failed to send out the notification to Gatekeeper with policy decision [state: GK_GRANTED (%hhu), tx_rate_kib_sec: %u, cap_expire_sec: %u, next_renewal_ms: %u, renewal_step_ms: %u]", __func__, policy->state, policy->params.granted.tx_rate_kib_sec, policy->params.granted.cap_expire_sec, policy->params.granted.next_renewal_ms, policy->params.granted.renewal_step_ms); break; case GK_DECLINED: ret = snprintf(err_msg, sizeof(err_msg), "%s(): failed to send out the notification to Gatekeeper with policy decision [state: GK_DECLINED (%hhu), expire_sec: %u]", __func__, policy->state, policy->params.declined.expire_sec); break; case GK_BPF: { uint64_t *c = policy->params.bpf.cookie.mem; RTE_BUILD_BUG_ON(RTE_DIM(policy->params.bpf.cookie.mem) != 8); ret = snprintf(err_msg, sizeof(err_msg), "%s(): failed to send out the notification to Gatekeeper with policy decision [state: GK_BPF (%hhu), expire_sec: %u, program_index=%u, cookie=" "%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 "]", __func__, policy->state, policy->params.bpf.expire_sec, policy->params.bpf.program_index, rte_be_to_cpu_64(c[0]), rte_be_to_cpu_64(c[1]), rte_be_to_cpu_64(c[2]), rte_be_to_cpu_64(c[3]), rte_be_to_cpu_64(c[4]), rte_be_to_cpu_64(c[5]), rte_be_to_cpu_64(c[6]), rte_be_to_cpu_64(c[7])); break; } default: ret = snprintf(err_msg, sizeof(err_msg), "%s(): unknown policy decision with state %hhu; there is a bug in the Lua policy\n", __func__, policy->state); break; } RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&policy->flow, -ENOENT, err_msg); } /* Print all unsent policy decisions in a notification packet. */ static void print_unsent_policies(struct ggu_notify_pkt *ggu_pkt) { unsigned int offset = ggu_pkt->buf->l2_len + ggu_pkt->buf->l3_len + sizeof(struct rte_udp_hdr) + sizeof(struct ggu_common_hdr); struct ggu_decision *ggu_decision = rte_pktmbuf_mtod_offset( ggu_pkt->buf, struct ggu_decision *, offset); unsigned int decision_list_len = ggu_pkt->buf->data_len - offset; ggu_policy_iterator(ggu_decision, decision_list_len, print_unsent_policy, NULL); } /* * Find the notification packet being buffered for the Gatekeeper * server specified in @pkt_info, if any. */ static struct ggu_notify_pkt * find_notify_pkt(struct gt_config *gt_conf, struct gt_packet_headers *pkt_info, struct gt_instance *instance) { unsigned int i; if (instance->num_ggu_pkts == 0) return NULL; for (i = 0; i < gt_conf->max_ggu_notify_pkts; i++) { struct ggu_notify_pkt *ggu_pkt = &instance->ggu_pkts[i]; if (ggu_pkt->buf == NULL) continue; if (pkt_info->outer_ethertype != ggu_pkt->ipaddr.proto) continue; if (ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)pkt_info->outer_l3_hdr; if (ggu_pkt->ipaddr.ip.v4.s_addr == ipv4_hdr->src_addr) return ggu_pkt; } else if (likely(ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)pkt_info->outer_l3_hdr; if (ipv6_addrs_equal(ipv6_hdr->src_addr, ggu_pkt->ipaddr.ip.v6.s6_addr)) return ggu_pkt; } } return NULL; } static void prep_notify_pkt(struct ggu_notify_pkt *ggu_pkt, struct gatekeeper_if *iface) { /* * Complete the packet fields that can only be done * when the packet is ready to be transmitted. */ struct rte_udp_hdr *notify_udp; /* * Datagram length needs to be set before calling * rte_ipv*_udptcp_cksum(). Although it doesn't * need to be set for rte_ipv*_phdr_cksum(), do * it here to avoid calculating it in multiple places. */ uint16_t dgram_len_be = rte_cpu_to_be_16((uint16_t)(ggu_pkt->buf->data_len - ggu_pkt->buf->l2_len - ggu_pkt->buf->l3_len)); if (ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *notify_ipv4 = rte_pktmbuf_mtod_offset(ggu_pkt->buf, struct rte_ipv4_hdr *, ggu_pkt->buf->l2_len); notify_ipv4->total_length = rte_cpu_to_be_16( ggu_pkt->buf->data_len - ggu_pkt->buf->l2_len); set_ipv4_checksum(iface, ggu_pkt->buf, notify_ipv4); notify_udp = rte_pktmbuf_mtod_offset(ggu_pkt->buf, struct rte_udp_hdr *, ggu_pkt->buf->l2_len + ggu_pkt->buf->l3_len); notify_udp->dgram_len = dgram_len_be; if (likely(iface->ipv4_hw_udp_cksum)) { /* Offload the UDP checksum. */ ggu_pkt->buf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM; notify_udp->dgram_cksum = rte_ipv4_phdr_cksum(notify_ipv4, ggu_pkt->buf->ol_flags); } else { notify_udp->dgram_cksum = 0; notify_udp->dgram_cksum = rte_ipv4_udptcp_cksum(notify_ipv4, notify_udp); } } else if (likely(ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *notify_ipv6 = rte_pktmbuf_mtod_offset(ggu_pkt->buf, struct rte_ipv6_hdr *, ggu_pkt->buf->l2_len); /* * Distinct from @dgram_len_be because the IPv6 * payload field could in theory include the length * of any extension headers. */ notify_ipv6->payload_len = rte_cpu_to_be_16( ggu_pkt->buf->data_len - ggu_pkt->buf->l2_len - sizeof(struct rte_ipv6_hdr)); notify_udp = rte_pktmbuf_mtod_offset(ggu_pkt->buf, struct rte_udp_hdr *, ggu_pkt->buf->l2_len + ggu_pkt->buf->l3_len); notify_udp->dgram_len = dgram_len_be; if (likely(iface->ipv6_hw_udp_cksum)) { /* Offload the UDP checksum. */ ggu_pkt->buf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM; notify_udp->dgram_cksum = rte_ipv6_phdr_cksum(notify_ipv6, ggu_pkt->buf->ol_flags); } else { notify_udp->dgram_cksum = 0; notify_udp->dgram_cksum = rte_ipv6_udptcp_cksum(notify_ipv6, notify_udp); } } else { rte_panic("%s() at lcore %u: unexpected condition: unknown IP version %hu\n", __func__, rte_lcore_id(), ggu_pkt->ipaddr.proto); } } static void send_notify_pkt(struct gt_config *gt_conf, struct gt_instance *instance, struct ggu_notify_pkt *ggu_pkt) { prep_notify_pkt(ggu_pkt, &gt_conf->net->front); if (rte_eth_tx_burst(gt_conf->net->front.id, instance->tx_queue, &ggu_pkt->buf, 1) != 1) { print_unsent_policies(ggu_pkt); rte_pktmbuf_free(ggu_pkt->buf); } ggu_pkt->buf = NULL; instance->num_ggu_pkts--; } /* Send all saved policy decision notification packets being buffered. */ static void flush_notify_pkts(struct gt_config *gt_conf, struct gt_instance *instance) { unsigned int max_pkts = gt_conf->max_ggu_notify_pkts; struct rte_mbuf *bufs[max_pkts]; int num_to_send = 0; int num_sent; int sent_count = 0; unsigned int i; if (instance->ggu_pkts == NULL || instance->num_ggu_pkts == 0) return; for (i = 0; i < max_pkts; i++) { struct ggu_notify_pkt *ggu_pkt = &instance->ggu_pkts[i]; if (ggu_pkt->buf == NULL) continue; prep_notify_pkt(ggu_pkt, &gt_conf->net->front); bufs[num_to_send++] = ggu_pkt->buf; } num_sent = rte_eth_tx_burst(gt_conf->net->front.id, instance->tx_queue, bufs, num_to_send); for (i = 0; i < max_pkts; i++) { struct ggu_notify_pkt *ggu_pkt = &instance->ggu_pkts[i]; if (ggu_pkt->buf == NULL) continue; if (unlikely(num_sent != num_to_send)) { if (sent_count < num_sent) sent_count++; else { print_unsent_policies(ggu_pkt); rte_pktmbuf_free(ggu_pkt->buf); } } ggu_pkt->buf = NULL; instance->num_ggu_pkts--; } RTE_VERIFY(instance->num_ggu_pkts == 0); } /* * Start building a new notification packet for the Gatekeeper * server indicated by @pkt_info. * * If there's no more room for a notification packet, then * send a random one to make room. */ static struct ggu_notify_pkt * add_notify_pkt(struct gt_config *gt_conf, struct gt_instance *instance, struct gt_packet_headers *pkt_info) { unsigned int max_pkts = gt_conf->max_ggu_notify_pkts; struct ggu_notify_pkt *ggu_pkt = NULL; unsigned int i; /* Find an available packet, sending a packet if necessary. */ if (instance->num_ggu_pkts == max_pkts) { int idx = rte_rand() % max_pkts; ggu_pkt = &instance->ggu_pkts[idx]; send_notify_pkt(gt_conf, instance, ggu_pkt); } else { for (i = 0; i < max_pkts; i++) { if (instance->ggu_pkts[i].buf == NULL) { ggu_pkt = &instance->ggu_pkts[i]; break; } } } RTE_VERIFY(ggu_pkt != NULL); ggu_pkt->ipaddr.proto = pkt_info->outer_ethertype; if (ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)pkt_info->outer_l3_hdr; ggu_pkt->ipaddr.ip.v4.s_addr = ipv4_hdr->src_addr; } else if (likely(ggu_pkt->ipaddr.proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)pkt_info->outer_l3_hdr; rte_memcpy(ggu_pkt->ipaddr.ip.v6.s6_addr, ipv6_hdr->src_addr, sizeof(ggu_pkt->ipaddr.ip.v6.s6_addr)); } else { rte_panic("%s() at lcore %u: unexpected condition: unknown IP version %hu\n", __func__, rte_lcore_id(), ggu_pkt->ipaddr.proto); } ggu_pkt->buf = rte_pktmbuf_alloc(instance->mp); if (ggu_pkt->buf == NULL) { G_LOG(ERR, "Failed to allocate notification packet\n"); return NULL; } fill_notify_pkt_hdr(ggu_pkt->buf, pkt_info, gt_conf); instance->num_ggu_pkts++; return ggu_pkt; } /* * Return how many 4 bytes are used in @cookie. * All bytes after that are zeros. */ static unsigned int find_cookie_len_4by(struct gk_bpf_cookie *cookie, unsigned int cookie_len) { uint32_t *p = (uint32_t *)cookie; unsigned int n; int i; RTE_VERIFY(cookie_len <= sizeof(*cookie)); n = cookie_len / 4; if (unlikely(cookie_len % 4 != 0)) n++; for (i = n - 1; i >= 0; i--) if (p[i] != 0) return i + 1; return 0; } /* * To estimate the maximum size of an on-the-wire policy decision, * sum the size of the decision prefix (type and length fields) with * the size of the in-memory GGU policy struct. This is a slight * overestimate, which is acceptable for determining whether a * packet has enough room for another decision. */ #define GGU_MAX_DECISION_LEN (sizeof(struct ggu_decision) + \ sizeof(struct ggu_policy)) /* * Add a policy decision to a notification packet. If a notification * does not exist for this Gatekeeper server, then create one. */ static void fill_notify_pkt(struct ggu_policy *policy, struct gt_packet_headers *pkt_info, struct gt_instance *instance, struct gt_config *gt_conf) { struct ggu_notify_pkt *ggu_pkt; struct ggu_decision *ggu_decision; size_t params_offset; int cookie_len_4by = 0; if (unlikely(policy->flow.proto != RTE_ETHER_TYPE_IPV4 && policy->flow.proto != RTE_ETHER_TYPE_IPV6)) { G_LOG(ERR, "Policy decision with unknown protocol %u\n", policy->flow.proto); return; } switch (policy->state) { case GK_GRANTED: case GK_DECLINED: break; case GK_BPF: if (unlikely(policy->params.bpf.cookie_len > sizeof(policy->params.bpf.cookie))) { G_LOG(ERR, "Policy BPF decision with cookie length too long: %u\n", policy->params.bpf.cookie_len); print_unsent_policy(policy, NULL); return; } break; default: /* The state GK_REQUEST is unexpected here. */ print_unsent_policy(policy, NULL); return; } /* Get a GGU packet. */ ggu_pkt = find_notify_pkt(gt_conf, pkt_info, instance); if (ggu_pkt == NULL) { ggu_pkt = add_notify_pkt(gt_conf, instance, pkt_info); if (ggu_pkt == NULL) { print_unsent_policy(policy, NULL); return; } } /* Fill up the policy decision. */ if (policy->flow.proto == RTE_ETHER_TYPE_IPV4 && policy->state == GK_DECLINED) { ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v4) + sizeof(policy->params.declined)); ggu_decision->type = GGU_DEC_IPV4_DECLINED; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v4, sizeof(policy->flow.f.v4)); params_offset = sizeof(policy->flow.f.v4); } else if (policy->flow.proto == RTE_ETHER_TYPE_IPV6 && policy->state == GK_DECLINED) { ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v6) + sizeof(policy->params.declined)); ggu_decision->type = GGU_DEC_IPV6_DECLINED; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v6, sizeof(policy->flow.f.v6)); params_offset = sizeof(policy->flow.f.v6); } else if (policy->flow.proto == RTE_ETHER_TYPE_IPV4 && policy->state == GK_GRANTED) { ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v4) + sizeof(policy->params.granted)); ggu_decision->type = GGU_DEC_IPV4_GRANTED; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v4, sizeof(policy->flow.f.v4)); params_offset = sizeof(policy->flow.f.v4); } else if (policy->flow.proto == RTE_ETHER_TYPE_IPV6 && policy->state == GK_GRANTED) { ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v6) + sizeof(policy->params.granted)); ggu_decision->type = GGU_DEC_IPV6_GRANTED; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v6, sizeof(policy->flow.f.v6)); params_offset = sizeof(policy->flow.f.v6); } else if (policy->flow.proto == RTE_ETHER_TYPE_IPV4 && policy->state == GK_BPF) { cookie_len_4by = find_cookie_len_4by(&policy->params.bpf.cookie, policy->params.bpf.cookie_len); ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v4) + sizeof(struct ggu_bpf_wire) + cookie_len_4by * 4); ggu_decision->type = GGU_DEC_IPV4_BPF; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v4, sizeof(policy->flow.f.v4)); params_offset = sizeof(policy->flow.f.v4); } else if (likely(policy->flow.proto == RTE_ETHER_TYPE_IPV6 && policy->state == GK_BPF)) { cookie_len_4by = find_cookie_len_4by(&policy->params.bpf.cookie, policy->params.bpf.cookie_len); ggu_decision = (struct ggu_decision *) rte_pktmbuf_append(ggu_pkt->buf, sizeof(*ggu_decision) + sizeof(policy->flow.f.v6) + sizeof(struct ggu_bpf_wire) + cookie_len_4by * 4); ggu_decision->type = GGU_DEC_IPV6_BPF; rte_memcpy(ggu_decision->ip_flow, &policy->flow.f.v6, sizeof(policy->flow.f.v6)); params_offset = sizeof(policy->flow.f.v6); } else rte_panic("Unexpected condition: gt fills up a notify packet with unexpected policy state %u\n", policy->state); switch (policy->state) { case GK_GRANTED: { struct ggu_granted *granted_be = (struct ggu_granted *) (ggu_decision->ip_flow + params_offset); granted_be->tx_rate_kib_sec = rte_cpu_to_be_32( policy->params.granted.tx_rate_kib_sec); granted_be->cap_expire_sec = rte_cpu_to_be_32( policy->params.granted.cap_expire_sec); granted_be->next_renewal_ms = rte_cpu_to_be_32( policy->params.granted.next_renewal_ms); granted_be->renewal_step_ms = rte_cpu_to_be_32( policy->params.granted.renewal_step_ms); break; } case GK_DECLINED: { struct ggu_declined *declined_be = (struct ggu_declined *) (ggu_decision->ip_flow + params_offset); declined_be->expire_sec = rte_cpu_to_be_32( policy->params.declined.expire_sec); break; } case GK_BPF: { struct ggu_bpf_wire *bpf_wire_be = (struct ggu_bpf_wire *) (ggu_decision->ip_flow + params_offset); bpf_wire_be->expire_sec = rte_cpu_to_be_32( policy->params.bpf.expire_sec); bpf_wire_be->program_index = policy->params.bpf.program_index; bpf_wire_be->reserved = 0; bpf_wire_be->cookie_len_4by = cookie_len_4by; /* * It's reposibility of the BPF program to put * the cookie in network order (if needed) since Gatekeeper * does not know how the cookie is used. */ rte_memcpy(bpf_wire_be->cookie, &policy->params.bpf.cookie, cookie_len_4by * 4); break; } default: rte_panic("Unexpected condition: gt fills up a notify packet parameters with unexpected policy state %u\n", policy->state); } ggu_decision->res1 = 0; ggu_decision->res2 = 0; /* * If we're close to the end of the packet, possibly * without room for another decision, send it now. */ if (rte_pktmbuf_tailroom(ggu_pkt->buf) < GGU_MAX_DECISION_LEN) send_notify_pkt(gt_conf, instance, ggu_pkt); } /* * Use the @dr to notify the GK * about the punishment policies on declined flows * with fragmented packets. */ static void process_death_row(int punish, struct rte_ip_frag_death_row *death_row, struct gt_instance *instance, struct gt_config *gt_conf) { uint32_t i; for (i = 0; i < death_row->cnt; i++) { int ret; struct gt_packet_headers pkt_info; struct ggu_policy policy; if (!punish) goto free_packet; ret = gt_parse_incoming_pkt(death_row->row[i], &pkt_info); if (ret < 0) { G_LOG(WARNING, "Failed to parse the fragments at %s, and the packet doesn't trigger any policy consultation at all\n", __func__); rte_pktmbuf_free(death_row->row[i]); continue; } /* * Given the gravity of the issue, * we must send a decline to the gatekeeper server * to expire in 10 minutes and log our failsafe * action here. * Otherwise, a misconfigured grantor server can put * a large deployment at risk. */ ret = lookup_frag_punish_policy_decision( &pkt_info, &policy, instance); if (ret < 0) { policy.state = GK_DECLINED; policy.params.declined.expire_sec = 600; G_LOG(WARNING, "Failed to lookup the punishment policy for the packet fragment! Our failsafe action is to decline the flow for 10 minutes\n"); } /* * Add the policy decision to the notification * packet to be sent to the GT-GK Unit. */ fill_notify_pkt(&policy, &pkt_info, instance, gt_conf); free_packet: rte_pktmbuf_free(death_row->row[i]); } death_row->cnt = 0; } static void gt_process_unparsed_incoming_pkt(struct acl_search *acl4, struct acl_search *acl6, uint16_t *num_arp, struct rte_mbuf **arp_bufs, struct rte_mbuf *pkt, uint16_t outer_ethertype) { switch (outer_ethertype) { case RTE_ETHER_TYPE_IPV4: add_pkt_acl(acl4, pkt); return; case RTE_ETHER_TYPE_IPV6: add_pkt_acl(acl6, pkt); return; case RTE_ETHER_TYPE_ARP: arp_bufs[(*num_arp)++] = pkt; return; } log_unknown_l2("gt", outer_ethertype); rte_pktmbuf_free(pkt); } static void return_message(struct gt_instance *instance) { int ret; size_t reply_len; struct dynamic_config *dy_conf = get_dy_conf(); struct dy_cmd_entry *entry; const char *reply_msg = lua_tolstring(instance->lua_state, -1, &reply_len); if (reply_msg == NULL) { G_LOG(WARNING, "New Lua update returned a NULL message\n"); goto out; } entry = mb_alloc_entry(&dy_conf->mb); if (entry == NULL) { G_LOG(ERR, "Failed to send new Lua update return to Dynamic config block at lcore %d\n", dy_conf->lcore_id); goto out; } if (unlikely(reply_len > RETURN_MSG_MAX_LEN)) { G_LOG(WARNING, "The return message length (%lu) exceeds the limit (%d)\n", reply_len, RETURN_MSG_MAX_LEN); reply_len = RETURN_MSG_MAX_LEN; } entry->op = GT_UPDATE_POLICY_RETURN; entry->u.gt.gt_lcore = rte_lcore_id(); entry->u.gt.length = reply_len; rte_memcpy(entry->u.gt.return_msg, reply_msg, reply_len); ret = mb_send_entry(&dy_conf->mb, entry); if (ret != 0) { G_LOG(ERR, "Failed to send new Lua update return to Dynamic config block at lcore %d\n", dy_conf->lcore_id); } out: rte_atomic16_inc(&dy_conf->num_returned_instances); } static void process_gt_cmd(struct gt_cmd_entry *entry, struct gt_instance *instance) { switch (entry->op) { case GT_UPDATE_POLICY: lua_close(instance->lua_state); instance->lua_state = entry->u.lua_state; G_LOG(NOTICE, "Successfully updated the Lua state\n"); break; case GT_UPDATE_POLICY_INCREMENTALLY: /* Load the compiled Lua bytecode, and run it. */ if ((luaL_loadbuffer(instance->lua_state, entry->u.bc.lua_bytecode, entry->u.bc.len, "incremental_update_of_gt_lua_state") != 0) || (lua_pcall(instance->lua_state, 0, !!entry->u.bc.is_returned, 0) != 0)) { G_LOG(ERR, "Failed to incrementally update Lua state: %s\n", lua_tostring(instance->lua_state, -1)); } else { G_LOG(NOTICE, "Successfully updated the Lua state incrementally\n"); } if (entry->u.bc.is_returned) { return_message(instance); lua_pop(instance->lua_state, 1); } rte_free(entry->u.bc.lua_bytecode); break; default: G_LOG(ERR, "Unknown command operation %u\n", entry->op); break; } } static void process_cmds_from_mailbox(struct gt_instance *instance, struct gt_config *gt_conf) { int i; int num_cmd; struct gt_cmd_entry *gt_cmds[gt_conf->mailbox_burst_size]; /* Load a set of commands from its mailbox ring. */ num_cmd = mb_dequeue_burst(&instance->mb, (void **)gt_cmds, gt_conf->mailbox_burst_size); for (i = 0; i < num_cmd; i++) { process_gt_cmd(gt_cmds[i], instance); mb_free_entry(&instance->mb, gt_cmds[i]); } } static inline void prefetch0_128_bytes(void *pointer) { #if RTE_CACHE_LINE_SIZE == 64 rte_prefetch0(pointer); rte_prefetch0(((char *)pointer) + RTE_CACHE_LINE_SIZE); #elif RTE_CACHE_LINE_SIZE == 128 rte_prefetch0(pointer); #else #error "Unsupported cache line size" #endif } static int gt_proc(void *arg) { unsigned int lcore = rte_lcore_id(); struct gt_config *gt_conf = (struct gt_config *)arg; unsigned int block_idx = get_block_idx(gt_conf, lcore); struct gt_instance *instance = &gt_conf->instances[block_idx]; uint64_t last_tsc = rte_rdtsc(); uint16_t port = gt_conf->net->front.id; uint16_t rx_queue = instance->rx_queue; uint16_t tx_queue = instance->tx_queue; uint64_t frag_scan_timeout_cycles = round( gt_conf->frag_scan_timeout_ms * rte_get_tsc_hz() / 1000.); unsigned int batch = 0; /* * The mbuf death row contains * packets to be freed. */ struct rte_ip_frag_death_row death_row; uint16_t gt_max_pkt_burst; bool reassembling_enabled = gt_conf->reassembling_enabled; /* * GT instances need capabilities CAP_DAC_OVERRIDE and CAP_SYS_ADMIN * to allow policies to allocate more hugepages from the kernel * when dylib.update_gt_lua_states_incrementally() is called from * the dynamic configuration block. * More details on why these capabilities are needed are found in * dyn_cfg_proc(). */ cap_value_t caps[] = {CAP_DAC_OVERRIDE, CAP_SYS_ADMIN}; death_row.cnt = 0; gt_max_pkt_burst = gt_conf->max_pkt_burst; G_LOG(NOTICE, "The GT block is running at tid = %u\n", gettid()); if (needed_caps(RTE_DIM(caps), caps) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } gt_conf_hold(gt_conf); while (likely(!exiting)) { int i; int ret; uint16_t num_rx; uint16_t num_tx = 0; uint16_t num_tx_succ; uint16_t num_arp = 0; uint64_t cur_tsc = rte_rdtsc(); struct rte_mbuf *rx_bufs[gt_max_pkt_burst]; struct rte_mbuf *tx_bufs[gt_max_pkt_burst]; struct rte_mbuf *arp_bufs[gt_max_pkt_burst]; DEFINE_ACL_SEARCH(acl4, gt_max_pkt_burst); DEFINE_ACL_SEARCH(acl6, gt_max_pkt_burst); /* Load a set of packets from the front NIC. */ num_rx = rte_eth_rx_burst(port, rx_queue, rx_bufs, gt_max_pkt_burst); if (unlikely(num_rx == 0)) { process_cmds_from_mailbox(instance, gt_conf); flush_notify_pkts(gt_conf, instance); continue; } /* * Note that GT blocks expect packets that are encapsulated. * * This prefetch is enough to load Ethernet header (14 bytes), * optional Ethernet VLAN header (8 bytes), and either * two IPv4 headers without options (20*2 bytes), or * two IPv6 headers without options (40*2 bytes). * IPv4: 14 + 8 + 20*2 = 62 * IPv6: 14 + 8 + 40*2 = 102 */ for (i = 0; i < num_rx; i++) { prefetch0_128_bytes(rte_pktmbuf_mtod_offset( rx_bufs[i], void *, 0)); } for (i = 0; i < num_rx; i++) { struct rte_mbuf *m = rx_bufs[i]; struct gt_packet_headers pkt_info; struct ggu_policy policy; /* * Only request packets and priority packets * with capabilities about to expire go through a * policy decision. * * Other packets will be fowarded directly. */ ret = gt_parse_incoming_pkt(m, &pkt_info); if (ret < 0) { gt_process_unparsed_incoming_pkt( &acl4, &acl6, &num_arp, arp_bufs, m, pkt_info.outer_ethertype); continue; } /* * If packet reassembling at Grantor servers * is enabled, and it is a fragmented packet, * then try to reassemble. */ if (reassembling_enabled && pkt_info.frag) { m = gt_reassemble_incoming_pkt( m, cur_tsc, &pkt_info, &death_row, instance); /* Process the death packets. */ process_death_row(m == NULL, &death_row, instance, gt_conf); if (m == NULL) continue; ret = gt_parse_incoming_pkt( m, &pkt_info); if (ret < 0) { gt_process_unparsed_incoming_pkt( &acl4, &acl6, &num_arp, arp_bufs, m, pkt_info.outer_ethertype); continue; } } if (unlikely(!is_valid_dest_addr(gt_conf, &pkt_info))) { print_ip_err_msg(&pkt_info); rte_pktmbuf_free(m); continue; } if (pkt_info.priority <= PRIORITY_GRANTED) { ret = decap_and_fill_eth(m, gt_conf, &pkt_info, instance); if (ret < 0) rte_pktmbuf_free(m); else tx_bufs[num_tx++] = m; continue; } /* * Lookup the policy decision. * * The policy, which is defined by a Lua script, * decides which capabilities to grant or decline, * the maximum receiving rate of the granted * capabilities, and when each decision expires. */ ret = lookup_policy_decision( &pkt_info, &policy, instance); if (ret < 0) { rte_pktmbuf_free(m); continue; } /* * Add the policy decision to the notification * packet to be sent to the GT-GK Unit. */ fill_notify_pkt(&policy, &pkt_info, instance, gt_conf); if (ret != 0) { ret = decap_and_fill_eth(m, gt_conf, &pkt_info, instance); if (ret < 0) rte_pktmbuf_free(m); else tx_bufs[num_tx++] = m; } else rte_pktmbuf_free(m); } /* Send burst of TX packets, to second port of pair. */ num_tx_succ = rte_eth_tx_burst(port, tx_queue, tx_bufs, num_tx); /* * XXX #71 Do something better here! * For now, free any unsent packets. */ if (unlikely(num_tx_succ < num_tx)) { rte_pktmbuf_free_bulk(&tx_bufs[num_tx_succ], num_tx - num_tx_succ); } if (num_arp > 0) submit_arp(arp_bufs, num_arp, &gt_conf->net->front); process_pkts_acl(&gt_conf->net->front, lcore, &acl4, RTE_ETHER_TYPE_IPV4); process_pkts_acl(&gt_conf->net->front, lcore, &acl6, RTE_ETHER_TYPE_IPV6); process_cmds_from_mailbox(instance, gt_conf); if (reassembling_enabled && cur_tsc - last_tsc >= frag_scan_timeout_cycles) { RTE_VERIFY(death_row.cnt == 0); rte_ip_frag_table_del_expired_entries( instance->frag_tbl, &death_row, cur_tsc); /* Process the death packets. */ process_death_row(true, &death_row, instance, gt_conf); last_tsc = rte_rdtsc(); } batch = (batch + 1) % gt_conf->batch_interval; if (batch == 0) flush_notify_pkts(gt_conf, instance); } G_LOG(NOTICE, "The GT block is exiting\n"); return gt_conf_put(gt_conf); } struct gt_config * alloc_gt_conf(void) { return rte_calloc("gt_config", 1, sizeof(struct gt_config), 0); } static inline void cleanup_gt_instance(struct gt_config *gt_conf, struct gt_instance *instance) { destroy_mempool(instance->mp); destroy_mailbox(&instance->mb); flush_notify_pkts(gt_conf, instance); rte_free(instance->ggu_pkts); instance->ggu_pkts = NULL; if (instance->frag_tbl != NULL) { rte_ip_frag_table_destroy(instance->frag_tbl); instance->frag_tbl = NULL; } destroy_neigh_hash_table(&instance->neigh6); destroy_neigh_hash_table(&instance->neigh); lua_close(instance->lua_state); instance->lua_state = NULL; } static int cleanup_gt(struct gt_config *gt_conf) { int i; for (i = 0; i < gt_conf->num_lcores; i++) cleanup_gt_instance(gt_conf, &gt_conf->instances[i]); rte_free(gt_conf->lua_policy_file); rte_free(gt_conf->lua_base_directory); rte_free(gt_conf->instances); rte_free(gt_conf->lcores); rte_free(gt_conf); return 0; } int gt_conf_put(struct gt_config *gt_conf) { /* * Atomically decrements the atomic counter (v) by one and returns true * if the result is 0, or false in all other cases. */ if (rte_atomic32_dec_and_test(&gt_conf->ref_cnt)) return cleanup_gt(gt_conf); return 0; } /* XXX #143 Search for another comment on this issue for an explanation. */ #if 0 static void * alloc_lua_mem_in_dpdk(void *ud, void *ptr, __attribute__((unused))size_t osize, size_t nsize) { if (nsize == 0) { rte_free(ptr); return NULL; } if (ptr == NULL) { int socket = (intptr_t)ud; return rte_malloc_socket(__func__, nsize, 0, socket); } return rte_realloc(ptr, nsize, 0); } #endif static lua_State * alloc_and_setup_lua_state(struct gt_config *gt_conf, unsigned int lcore_id) { int ret; char lua_entry_path[128]; lua_State *lua_state; ret = snprintf(lua_entry_path, sizeof(lua_entry_path), "%s/%s", gt_conf->lua_base_directory, gt_conf->lua_policy_file); RTE_VERIFY(ret > 0 && ret < (int)sizeof(lua_entry_path)); /* * XXX #143 LuaJIT does not currently support * lua_newstate() on 64-bit targets. * * Once lua_newstate() is available, the following call should * replace the call to luaL_newstate() below: * lua_state = lua_newstate(alloc_lua_mem_in_dpdk, * (void *)(intptr_t)rte_lcore_to_socket_id(lcore_id)); */ lua_state = luaL_newstate(); if (lua_state == NULL) { G_LOG(ERR, "Failed to create new Lua state at %s\n", __func__); goto out; } /* Add lcore_id information to the registry of @lua_state. */ lua_pushstring(lua_state, GT_LUA_LCORE_ID_NAME); lua_pushnumber(lua_state, lcore_id); lua_settable(lua_state, LUA_REGISTRYINDEX); luaL_openlibs(lua_state); lualpm_openlib(lua_state); set_lua_path(lua_state, gt_conf->lua_base_directory); ret = luaL_loadfile(lua_state, lua_entry_path); if (ret != 0) { G_LOG(ERR, "%s\n", lua_tostring(lua_state, -1)); goto clean_lua_state; } /* Run the loaded chunk. */ ret = lua_pcall(lua_state, 0, 0, 0); if (ret != 0) { G_LOG(ERR, "%s\n", lua_tostring(lua_state, -1)); goto clean_lua_state; } return lua_state; clean_lua_state: lua_close(lua_state); out: return NULL; } static int config_gt_instance(struct gt_config *gt_conf, unsigned int lcore_id) { int ret; unsigned int block_idx = get_block_idx(gt_conf, lcore_id); /* Maximum TTL in cycles for each fragmented packet. */ uint64_t frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * gt_conf->frag_max_flow_ttl_ms; struct gt_instance *instance = &gt_conf->instances[block_idx]; instance->lua_state = alloc_and_setup_lua_state(gt_conf, lcore_id); if (instance->lua_state == NULL) { G_LOG(ERR, "Failed to create new Lua state at lcore %u\n", lcore_id); ret = -1; goto out; } if (ipv4_if_configured(&gt_conf->net->front)) { ret = setup_neighbor_tbl( rte_lcore_to_socket_id(gt_conf->lcores[0]), lcore_id * RTE_MAX_LCORE + 0, RTE_ETHER_TYPE_IPV4, (1 << (32 - gt_conf->net->front.ip4_addr_plen)), &instance->neigh, custom_ipv4_hash_func); if (ret < 0) goto cleanup; } if (ipv6_if_configured(&gt_conf->net->front)) { ret = setup_neighbor_tbl( rte_lcore_to_socket_id(gt_conf->lcores[0]), lcore_id * RTE_MAX_LCORE + 1, RTE_ETHER_TYPE_IPV6, gt_conf->max_num_ipv6_neighbors, &instance->neigh6, DEFAULT_HASH_FUNC); if (ret < 0) goto cleanup; } if (!rte_is_power_of_2(gt_conf->frag_bucket_entries)) { G_LOG(ERR, "Configuration error - the number of entries per bucket should be a power of 2, while it is %u\n", gt_conf->frag_bucket_entries); ret = -1; goto cleanup; } if (gt_conf->frag_max_entries > gt_conf->frag_bucket_num * gt_conf->frag_bucket_entries) { G_LOG(ERR, "Configuration error - the maximum number of entries should be less than or equal to %u, while it is %u\n", gt_conf->frag_bucket_num * gt_conf->frag_bucket_entries, gt_conf->frag_max_entries); ret = -1; goto cleanup; } if (gt_conf->reassembling_enabled) { /* Setup the fragmentation table. */ instance->frag_tbl = rte_ip_frag_table_create( gt_conf->frag_bucket_num, gt_conf->frag_bucket_entries, gt_conf->frag_max_entries, frag_cycles, rte_lcore_to_socket_id(lcore_id)); if (instance->frag_tbl == NULL) { G_LOG(ERR, "Failed to create fragmentation table at lcore %u\n", lcore_id); ret = -1; goto cleanup; } } instance->num_ggu_pkts = 0; instance->ggu_pkts = rte_calloc_socket(__func__, gt_conf->max_ggu_notify_pkts, sizeof(struct ggu_notify_pkt), 0, rte_lcore_to_socket_id(lcore_id)); if (instance->ggu_pkts == NULL) { G_LOG(ERR, "Failed to allocate fixed array of Gatekeeper notification packets on lcore %u\n", lcore_id); ret = -1; goto cleanup; } ret = init_mailbox("gt", gt_conf->mailbox_max_entries_exp, sizeof(struct gt_cmd_entry), gt_conf->mailbox_mem_cache_size, lcore_id, &instance->mb); if (ret < 0) goto cleanup; goto out; cleanup: cleanup_gt_instance(gt_conf, instance); out: return ret; } static int init_gt_instances(struct gt_config *gt_conf) { int i; int ret; int num_succ_instances = 0; struct gt_instance *inst_ptr; /* * (1) Need gt_conf->max_pkt_burst to read those packets * from the queue of the NIC. * * (2) Need gt_conf->frag_max_entries for the fragment packets. * * Take the GGU packets into account as well. * * (3) The GGU packets that GT normally sends out. * * (4) As the GT blocks call process_death_row() to process * the expired packets. In the worst case, process_death_row() * needs to notify Gatekeeper the decisions about all the packets * in the fragmentation table via GGU packets. However, the number * of GGU packets is limited by gt_conf->max_ggu_notify_pkts. */ unsigned int num_mbuf = calculate_mempool_config_para("gt", gt_conf->net, gt_conf->max_pkt_burst + gt_conf->frag_max_entries + gt_conf->max_pkt_burst + gt_conf->max_ggu_notify_pkts + /* * One cannot divide the sum below per gt_conf->num_lcores * because, though unlikely, it might happen that * all packets go to a single instance. */ gt_conf->net->front.total_pkt_burst); /* Set up queue identifiers now for RSS, before instances start. */ for (i = 0; i < gt_conf->num_lcores; i++) { unsigned int lcore = gt_conf->lcores[i]; inst_ptr = &gt_conf->instances[i]; inst_ptr->mp = create_pktmbuf_pool("gt", lcore, num_mbuf); if (inst_ptr->mp == NULL) { ret = -1; goto free_gt_instance; } ret = get_queue_id(&gt_conf->net->front, QUEUE_TYPE_RX, lcore, inst_ptr->mp); if (ret < 0) { G_LOG(ERR, "Cannot assign an RX queue for the front interface for lcore %u\n", lcore); goto free_gt_instance; } inst_ptr->rx_queue = ret; ret = get_queue_id(&gt_conf->net->front, QUEUE_TYPE_TX, lcore, NULL); if (ret < 0) { G_LOG(ERR, "Cannot assign a TX queue for the front interface for lcore %u\n", lcore); goto free_gt_instance; } inst_ptr->tx_queue = ret; /* * Set up the lua state, neighbor tables, and * fragmentation table for each instance, and * initialize the policy tables. */ ret = config_gt_instance(gt_conf, lcore); if (ret < 0) goto free_gt_instance; num_succ_instances++; } ret = 0; goto out; free_gt_instance: for (i = 0; i < num_succ_instances; i++) cleanup_gt_instance(gt_conf, &gt_conf->instances[i]); out: return ret; } static int gt_stage1(void *arg) { int ret; struct gt_config *gt_conf = arg; gt_conf->instances = rte_calloc_socket(__func__, gt_conf->num_lcores, sizeof(struct gt_instance), 0, rte_lcore_to_socket_id(gt_conf->lcores[0])); if (gt_conf->instances == NULL) { ret = -1; goto out; } ret = init_gt_instances(gt_conf); if (ret < 0) goto instance; goto out; instance: rte_free(gt_conf->instances); gt_conf->instances = NULL; rte_free(gt_conf->lcores); gt_conf->lcores = NULL; out: return ret; } static int gt_stage2(void *arg) { struct gt_config *gt_conf = arg; int ret; if (gt_conf->net->front.rss) { ret = gt_setup_rss(gt_conf); if (ret < 0) goto cleanup; } return 0; cleanup: cleanup_gt(gt_conf); return ret; } int run_gt(struct net_config *net_conf, struct gt_config *gt_conf, const char *lua_base_directory, const char *lua_policy_file) { int ret = -1, i; if (net_conf == NULL || gt_conf == NULL || lua_base_directory == NULL || lua_policy_file == NULL) goto out; for (i = 0; i < gt_conf->num_lcores; i++) { log_ratelimit_state_init(gt_conf->lcores[i], gt_conf->log_ratelimit_interval_ms, gt_conf->log_ratelimit_burst, gt_conf->log_level, "GT"); } gt_conf->lua_base_directory = rte_strdup("lua_base_directory", lua_base_directory); if (gt_conf->lua_base_directory == NULL) { ret = -1; goto out; } gt_conf->lua_policy_file = rte_strdup("lua_policy_file", lua_policy_file); if (gt_conf->lua_policy_file == NULL) goto policy_dir; if (!(gt_conf->max_pkt_burst > 0)) goto gt_config_file; if (gt_conf->batch_interval == 0) { G_LOG(ERR, "Batch interval (%u) must be greater than 0\n", gt_conf->batch_interval); goto gt_config_file; } if (gt_conf->max_ggu_notify_pkts == 0) { G_LOG(ERR, "Max number of GGU notification packets (%u) must be greater than 0\n", gt_conf->max_ggu_notify_pkts); goto gt_config_file; } gt_conf->net = net_conf; if (gt_conf->num_lcores <= 0) goto gt_config_file; ret = net_launch_at_stage1(net_conf, gt_conf->num_lcores, gt_conf->num_lcores, 0, 0, gt_stage1, gt_conf); if (ret < 0) goto gt_config_file; ret = launch_at_stage2(gt_stage2, gt_conf); if (ret < 0) goto stage1; for (i = 0; i < gt_conf->num_lcores; i++) { unsigned int lcore = gt_conf->lcores[i]; ret = launch_at_stage3("gt", gt_proc, gt_conf, lcore); if (ret < 0) { pop_n_at_stage3(i); goto stage2; } } /* * Convert port numbers in CPU order to network order * to avoid recomputation for each packet. */ gt_conf->ggu_src_port = rte_cpu_to_be_16(gt_conf->ggu_src_port); gt_conf->ggu_dst_port = rte_cpu_to_be_16(gt_conf->ggu_dst_port); rte_atomic32_init(&gt_conf->ref_cnt); return 0; stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); gt_config_file: rte_free(gt_conf->lua_policy_file); gt_conf->lua_policy_file = NULL; policy_dir: rte_free(gt_conf->lua_base_directory); gt_conf->lua_base_directory = NULL; out: return ret; } int l_update_gt_lua_states(lua_State *L) { int i; uint32_t ctypeid; struct gt_config *gt_conf; uint32_t correct_ctypeid_gt_config = luaL_get_ctypeid(L, CTYPE_STRUCT_GT_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_GT_CONFIG_PTR. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GT_CONFIG_PTR); if (ctypeid != correct_ctypeid_gt_config) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_GT_CONFIG_PTR); gt_conf = *(struct gt_config **)cdata; for (i = 0; i < gt_conf->num_lcores; i++) { int ret; struct gt_cmd_entry *entry; struct gt_instance *instance = &gt_conf->instances[i]; unsigned int lcore_id = gt_conf->lcores[i]; lua_State *lua_state = alloc_and_setup_lua_state(gt_conf, lcore_id); if (lua_state == NULL) { luaL_error(L, "gt: failed to allocate new lua state to GT block %d at lcore %d\n", i, lcore_id); continue; } entry = mb_alloc_entry(&instance->mb); if (entry == NULL) { lua_close(lua_state); luaL_error(L, "gt: failed to allocate a mailbox entry to send new lua state to GT block %d at lcore %d\n", i, lcore_id); continue; } entry->op = GT_UPDATE_POLICY; entry->u.lua_state = lua_state; ret = mb_send_entry(&instance->mb, entry); if (ret != 0) { lua_close(lua_state); luaL_error(L, "gt: failed to send new lua state to GT block %d at lcore %d\n", i, lcore_id); } } return 0; } /* * The prototype is needed, otherwise there will be a compilation error: * no previous prototype for 'gt_cpu_to_be_16' [-Werror=missing-prototypes] */ uint16_t gt_cpu_to_be_16(uint16_t x); uint32_t gt_cpu_to_be_32(uint32_t x); uint16_t gt_be_to_cpu_16(uint16_t x); uint32_t gt_be_to_cpu_32(uint32_t x); unsigned int gt_lcore_id(void); /* * This function is only meant to be used in Lua policies. * If you need it in Gatekeeper's C code, use rte_cpu_to_be_16() */ uint16_t gt_cpu_to_be_16(uint16_t x) { return rte_cpu_to_be_16(x); } /* * This function is only meant to be used in Lua policies. * If you need it in Gatekeeper's C code, use rte_cpu_to_be_32() */ uint32_t gt_cpu_to_be_32(uint32_t x) { return rte_cpu_to_be_32(x); } /* * This function is only meant to be used in Lua policies. * If you need it in Gatekeeper's C code, use rte_be_to_cpu_16() */ uint16_t gt_be_to_cpu_16(uint16_t x) { return rte_be_to_cpu_16(x); } /* * This function is only meant to be used in Lua policies. * If you need it in Gatekeeper's C code, use rte_be_to_cpu_32() */ uint32_t gt_be_to_cpu_32(uint32_t x) { return rte_be_to_cpu_32(x); } /* * This function is only meant to be used in Lua policies. * If you need it in Gatekeeper's C code, use rte_lcore_id() */ unsigned int gt_lcore_id(void) { return rte_lcore_id(); } ```
/content/code_sandbox/gt/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
20,156
```lua return function (gatekeeper_server) -- -- Change these parameters to configure the network. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG local user --= "gatekeeper" local front_ports = {"enp133s0f0"} local front_ips = {"10.0.1.1/24", "2001:db8:1::1/48"} local front_bonding_mode = staticlib.c.BONDING_MODE_ROUND_ROBIN local front_ipv4_vlan_tag = 0x123 local front_ipv6_vlan_tag = front_ipv4_vlan_tag local front_vlan_insert = true local front_mtu = 1500 local back_ports = {"enp133s0f1"} local back_ips = {"10.0.2.1/24", "2001:db8:2::1/48"} local back_bonding_mode = staticlib.c.BONDING_MODE_ROUND_ROBIN local back_ipv4_vlan_tag = 0x456 local back_ipv6_vlan_tag = back_ipv4_vlan_tag local back_vlan_insert = true local back_mtu = 2048 -- XXX #155 These parameters should only be changed for performance reasons. local front_arp_cache_timeout_sec = 7200 -- (2 hours) local front_nd_cache_timeout_sec = 7200 -- (2 hours) local front_num_rx_desc = gatekeeper_server and 512 or 128 local front_num_tx_desc = 128 local back_arp_cache_timeout_sec = 7200 -- (2 hours) local back_nd_cache_timeout_sec = 7200 -- (2 hours) local back_num_rx_desc = 128 local back_num_tx_desc = 128 -- These variables are unlikely to need to be changed. local guarantee_random_entropy = false local front_ipv6_default_hop_limits = 255 local back_ipv6_default_hop_limits = 255 local rotate_log_interval_sec = 60 * 60 -- (1 hour) local front_ipv4_hw_udp_cksum = true local front_ipv6_hw_udp_cksum = true local back_ipv4_hw_udp_cksum = true local back_ipv6_hw_udp_cksum = true local front_ipv4_hw_cksum = true local back_ipv4_hw_cksum = true local front_alternative_rss_hash = false local back_alternative_rss_hash = false -- -- End configuration of the network. -- local net_conf = staticlib.c.get_net_conf() net_conf.log_level = log_level net_conf.rotate_log_interval_sec = rotate_log_interval_sec local back_iface_enabled = gatekeeper_server if back_iface_enabled then staticlib.check_ifaces(front_ports, back_ports) end local front_iface = staticlib.c.get_if_front(net_conf) front_iface.arp_cache_timeout_sec = front_arp_cache_timeout_sec front_iface.nd_cache_timeout_sec = front_nd_cache_timeout_sec front_iface.bonding_mode = front_bonding_mode front_iface.vlan_insert = front_vlan_insert front_iface.mtu = front_mtu front_iface.ipv6_default_hop_limits = front_ipv6_default_hop_limits front_iface.num_rx_desc = front_num_rx_desc front_iface.num_tx_desc = front_num_tx_desc front_iface.ipv4_hw_udp_cksum = front_ipv4_hw_udp_cksum front_iface.ipv6_hw_udp_cksum = front_ipv6_hw_udp_cksum front_iface.ipv4_hw_cksum = front_ipv4_hw_cksum front_iface.guarantee_random_entropy = guarantee_random_entropy front_iface.alternative_rss_hash = front_alternative_rss_hash local ret = staticlib.init_iface(front_iface, "front", front_ports, front_ips, front_ipv4_vlan_tag, front_ipv6_vlan_tag) if ret < 0 then error("Failed to initialize the front interface") end net_conf.back_iface_enabled = back_iface_enabled if back_iface_enabled then local back_iface = staticlib.c.get_if_back(net_conf) back_iface.arp_cache_timeout_sec = back_arp_cache_timeout_sec back_iface.nd_cache_timeout_sec = back_nd_cache_timeout_sec back_iface.bonding_mode = back_bonding_mode back_iface.vlan_insert = back_vlan_insert back_iface.mtu = back_mtu back_iface.ipv6_default_hop_limits = back_ipv6_default_hop_limits back_iface.num_rx_desc = back_num_rx_desc back_iface.num_tx_desc = back_num_tx_desc back_iface.ipv4_hw_udp_cksum = back_ipv4_hw_udp_cksum back_iface.ipv6_hw_udp_cksum = back_ipv6_hw_udp_cksum back_iface.ipv4_hw_cksum = back_ipv4_hw_cksum back_iface.guarantee_random_entropy = guarantee_random_entropy back_iface.alternative_rss_hash = back_alternative_rss_hash ret = staticlib.init_iface(back_iface, "back", back_ports, back_ips, back_ipv4_vlan_tag, back_ipv6_vlan_tag) if ret < 0 then error("Failed to initialize the back interface") end end -- Setup the user that Gatekeeper runs on after it boots. if user ~= nil then ret = staticlib.c.gatekeeper_setup_user(net_conf, user) if ret < 0 then error("Failed to setup the user") end end -- Initialize the network. ret = staticlib.c.gatekeeper_init_network(net_conf) if ret < 0 then error("Failed to initilize the network") end return net_conf end ```
/content/code_sandbox/lua/net.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,222
```lua return function (net_conf, lls_conf, numa_table) -- -- Configure the variables below for the GT block. -- -- These parameters should likely be initially changed. local n_lcores = 2 local log_level = staticlib.c.RTE_LOG_DEBUG local lua_policy_file = "examples/policy.lua" local lua_base_directory = "./lua" -- XXX #155 These parameters should only be changed for performance reasons. local mailbox_max_entries_exp = 7 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local max_pkt_burst = 32 local max_num_ipv6_neighbors = 1024 local frag_bucket_num = 0x1000 local frag_bucket_entries = 4 local frag_max_entries = 0x1000 local frag_max_flow_ttl_ms = 1000 -- (1 second) local frag_scan_timeout_ms = 2 * 60 * 1000 -- (2 minutes) local batch_interval = 2 local max_ggu_notify_pkts = 8 local reassembling_enabled = false -- These variables are unlikely to need to be changed. local ggu_src_port = 0xA0A0 local ggu_dst_port = 0xB0B0 -- -- End configuration of GT block. -- local gt_conf = staticlib.c.alloc_gt_conf() if gt_conf == nil then error("Failed to allocate gt_conf") end gt_conf.log_level = log_level gt_conf.mailbox_max_entries_exp = mailbox_max_entries_exp gt_conf.mailbox_mem_cache_size = mailbox_mem_cache_size gt_conf.mailbox_burst_size = mailbox_burst_size gt_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms gt_conf.log_ratelimit_burst = log_ratelimit_burst gt_conf.reassembling_enabled = reassembling_enabled gt_conf.max_num_ipv6_neighbors = max_num_ipv6_neighbors gt_conf.frag_bucket_num = frag_bucket_num gt_conf.frag_bucket_entries = frag_bucket_entries gt_conf.frag_max_entries = frag_max_entries gt_conf.frag_max_flow_ttl_ms = frag_max_flow_ttl_ms gt_conf.frag_scan_timeout_ms = math.floor( frag_scan_timeout_ms / gt_conf.frag_bucket_num + 0.5) gt_conf.batch_interval = batch_interval gt_conf.max_ggu_notify_pkts = max_ggu_notify_pkts gt_conf.ggu_src_port = ggu_src_port gt_conf.ggu_dst_port = ggu_dst_port gt_conf.max_pkt_burst = staticlib.get_front_burst_config( max_pkt_burst, net_conf) -- The maximum number of ARP or ND packets in LLS submitted by -- GK or GT. The code below makes sure that the parameter should -- be at least the same with the maximum configured value of GT. lls_conf.mailbox_max_pkt_sub = math.max(lls_conf.mailbox_max_pkt_sub, gt_conf.max_pkt_burst) local gt_lcores = staticlib.convert_numa_table_to_array( staticlib.alloc_lcores_evenly_from_all_numa_nodes(numa_table, n_lcores, 0)) staticlib.gt_assign_lcores(gt_conf, gt_lcores) local ret = staticlib.c.run_gt(net_conf, gt_conf, lua_base_directory, lua_policy_file) if ret < 0 then error("Failed to run gt block(s)") end return gt_conf end ```
/content/code_sandbox/lua/gt.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
785
```lua return function (net_conf, gk_conf, gt_conf, numa_table) -- -- Configure the variables below for the Dynamic Config block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG -- XXX #155 These parameters should only be changed for performance reasons. local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local mailbox_max_entries_exp = 7 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 -- These variables are unlikely to need to be changed. local server_path = "/var/run/gatekeeper/dyn_cfg.socket" local lua_dy_base_dir = "./lua" local lua_dy_lib = "gatekeeper/dylib.lua" local rcv_timeout_sec = 30 local rcv_timeout_usec = 0 local mode = staticlib.c.S_IRUSR + staticlib.c.S_IWUSR -- -- End configuration of Dynamic Config block. -- local dy_conf = staticlib.c.get_dy_conf() if dy_conf == nil then error("Failed to allocate dy_conf") end dy_conf.lcore_id = staticlib.alloc_an_lcore(numa_table) dy_conf.log_level = log_level dy_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms dy_conf.log_ratelimit_burst = log_ratelimit_burst dy_conf.mailbox_max_entries_exp = mailbox_max_entries_exp dy_conf.mailbox_mem_cache_size = mailbox_mem_cache_size dy_conf.mailbox_burst_size = mailbox_burst_size staticlib.c.set_dyc_timeout(rcv_timeout_sec, rcv_timeout_usec, dy_conf) local ret = staticlib.c.run_dynamic_config( net_conf, gk_conf, gt_conf, server_path, lua_dy_base_dir, lua_dy_lib, dy_conf, mode) if ret < 0 then error("Failed to run dynamic config block") end return dy_conf end ```
/content/code_sandbox/lua/dyn_cfg.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
433
```lua return function (net_conf, sol_lcores) -- -- Configure the variables below for the SOL block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG local destination_bw_gbps = 1 -- XXX #155 These parameters should only be changed for performance reasons. local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local pri_req_max_len = 512 local req_bw_rate = 0.05 local enq_burst_size = 32 local deq_burst_size = 32 -- These variables are unlikely to need to be changed. local tb_rate_approx_err = 1e-7 -- -- End configuration of SOL block. -- local sol_conf = staticlib.c.alloc_sol_conf() if sol_conf == nil then error("Failed to allocate sol_conf") end staticlib.sol_assign_lcores(sol_conf, sol_lcores) sol_conf.log_level = log_level sol_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms sol_conf.log_ratelimit_burst = log_ratelimit_burst sol_conf.pri_req_max_len = pri_req_max_len sol_conf.enq_burst_size = enq_burst_size sol_conf.deq_burst_size = deq_burst_size sol_conf.tb_rate_approx_err = tb_rate_approx_err sol_conf.req_channel_bw_mbps = destination_bw_gbps * 1000 * req_bw_rate local ret = staticlib.c.run_sol(net_conf, sol_conf) if ret < 0 then error("Failed to run sol block") end return sol_conf end ```
/content/code_sandbox/lua/sol.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
372
```lua return function (net_conf, gk_conf, lcore) -- -- Configure the variables below for the GGU block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG -- XXX #155 These parameters should only be changed for performance reasons. local mailbox_max_entries_exp = 7 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local max_pkt_burst = 32 -- These variables are unlikely to need to be changed. local ggu_src_port = 0xA0A0 local ggu_dst_port = 0xB0B0 -- -- End configuration of GGU block. -- local ggu_conf = staticlib.c.alloc_ggu_conf(lcore) if ggu_conf == nil then error("Failed to allocate ggu_conf") end ggu_conf.log_level = log_level ggu_conf.mailbox_max_entries_exp = mailbox_max_entries_exp ggu_conf.mailbox_mem_cache_size = mailbox_mem_cache_size ggu_conf.mailbox_burst_size = mailbox_burst_size ggu_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms ggu_conf.log_ratelimit_burst = log_ratelimit_burst ggu_conf.max_pkt_burst = max_pkt_burst ggu_conf.ggu_src_port = ggu_src_port ggu_conf.ggu_dst_port = ggu_dst_port local ret = staticlib.c.run_ggu(net_conf, gk_conf, ggu_conf) if ret < 0 then error("Failed to run ggu block") end return ggu_conf end ```
/content/code_sandbox/lua/ggu.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
382
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <arpa/inet.h> #include <lauxlib.h> #include <rte_lcore.h> #include <rte_atomic.h> #include "gatekeeper_fib.h" #include "gatekeeper_gt.h" #include "lua_lpm.h" static int l_str_to_prefix(lua_State *L) { int ret; struct ipaddr ip_addr; /* First argument must be an IP prefix string. */ const char *prefix_str = luaL_checkstring(L, 1); if (lua_gettop(L) != 1) luaL_error(L, "Expected one argument, however it got %d arguments", lua_gettop(L)); ret = parse_ip_prefix(prefix_str, &ip_addr); if (ret < 0 || ip_addr.proto != RTE_ETHER_TYPE_IPV4) luaL_error(L, "gk: failed to parse the IPv4 prefix: %s", prefix_str); lua_pushinteger(L, ip_addr.ip.v4.s_addr); lua_pushinteger(L, ret); return 2; } #define CTYPE_STRUCT_IN6_ADDR "struct in6_addr" #define CTYPE_STRUCT_IN6_ADDR_REF "struct in6_addr &" #define CTYPE_STRUCT_IN6_ADDR_PTR "struct in6_addr *" static struct in6_addr * get_ipv6_addr(lua_State *L, int idx) { /* Testing for type CTYPE_STRUCT_IN6_ADDR. */ uint32_t correct_ctypeid_in6_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_IN6_ADDR); uint32_t ctypeid; void *cdata = luaL_checkcdata(L, idx, &ctypeid, CTYPE_STRUCT_IN6_ADDR); if (ctypeid == correct_ctypeid_in6_addr) return cdata; /* Testing for type CTYPE_STRUCT_IN6_ADDR_REF. */ correct_ctypeid_in6_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_IN6_ADDR_REF); cdata = luaL_checkcdata(L, idx, &ctypeid, CTYPE_STRUCT_IN6_ADDR_REF); if (likely(ctypeid == correct_ctypeid_in6_addr)) return *(struct in6_addr **)cdata; /* Testing for type CTYPE_STRUCT_IN6_ADDR_PTR. */ correct_ctypeid_in6_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_IN6_ADDR_PTR); cdata = luaL_checkcdata(L, idx, &ctypeid, CTYPE_STRUCT_IN6_ADDR_PTR); if (likely(ctypeid == correct_ctypeid_in6_addr)) return *(struct in6_addr **)cdata; luaL_error(L, "Expected '%s', `%s', or '%s' as argument #%d", CTYPE_STRUCT_IN6_ADDR, CTYPE_STRUCT_IN6_ADDR_REF, CTYPE_STRUCT_IN6_ADDR_PTR, idx); /* Make compiler happy; the above luaL_error() doesn't return. */ return NULL; } static int l_str_to_prefix6(lua_State *L) { int ret; struct ipaddr ip_addr; struct in6_addr *cdata; uint32_t correct_ctypeid_in6_addr; /* First argument must be an IP prefix string. */ const char *prefix_str = luaL_checkstring(L, 1); if (lua_gettop(L) != 1) luaL_error(L, "Expected one argument, however it got %d arguments", lua_gettop(L)); ret = parse_ip_prefix(prefix_str, &ip_addr); if (ret < 0 || ip_addr.proto != RTE_ETHER_TYPE_IPV6) luaL_error(L, "gk: failed to parse the IPv6 prefix: %s", prefix_str); correct_ctypeid_in6_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_IN6_ADDR); cdata = luaL_pushcdata(L, correct_ctypeid_in6_addr, sizeof(struct in6_addr)); *cdata = ip_addr.ip.v6; lua_pushinteger(L, ret); return 2; } #define LUA_LPM_UD_TNAME "gt_lpm_ud" struct lpm_lua_userdata { struct fib_head *fib; /* Parameters of @fib. */ uint32_t max_rules; uint32_t num_tbl8s; }; static int l_new_lpm(lua_State *L) { static rte_atomic32_t identifier = RTE_ATOMIC32_INIT(0); struct lpm_lua_userdata *lpm_ud; unsigned int lcore_id, socket_id; int32_t instance_id; char fib_head_name[128], fib_name[128]; int ret; if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } lpm_ud = lua_newuserdata(L, sizeof(*lpm_ud)); /* First argument must be a Lua number. */ lpm_ud->max_rules = luaL_checknumber(L, 1); /* Second argument must be a Lua number. */ lpm_ud->num_tbl8s = luaL_checknumber(L, 2); /* Get @lcore_id. */ lua_getfield(L, LUA_REGISTRYINDEX, GT_LUA_LCORE_ID_NAME); lcore_id = lua_tonumber(L, -1); lua_pop(L, 1); socket_id = rte_lcore_to_socket_id(lcore_id); /* * Obtain unique names. */ instance_id = rte_atomic32_add_return(&identifier, 1); ret = snprintf(fib_head_name, sizeof(fib_head_name), "gt_fib_ipv4_head_%u_%u", lcore_id, instance_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(fib_head_name)); ret = snprintf(fib_name, sizeof(fib_name), "gt_fib_ipv4_%u_%u", lcore_id, instance_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(fib_name)); /* * Alloc FIB. */ lpm_ud->fib = rte_malloc_socket(fib_head_name, sizeof(*lpm_ud->fib), 0, socket_id); if (unlikely(lpm_ud->fib == NULL)) { luaL_error(L, "%s(): not enough memory for a FIB head", __func__); } ret = fib_create(lpm_ud->fib, fib_name, socket_id, 32, lpm_ud->max_rules, lpm_ud->num_tbl8s); if (unlikely(ret < 0)) { rte_free(lpm_ud->fib); lpm_ud->fib = NULL; luaL_error(L, "%s(): failed to initialize an IPv4 LPM table (errno=%d): %s", __func__, -ret, strerror(-ret)); } luaL_getmetatable(L, LUA_LPM_UD_TNAME); lua_setmetatable(L, -2); return 1; } static int l_lpm_add(lua_State *L) { int ret; /* First argument must be of type struct lpm_lua_userdata *. */ struct lpm_lua_userdata *lpm_ud = luaL_checkudata(L, 1, LUA_LPM_UD_TNAME); /* * Second argument must be a Lua number. * @ip must be in network order. */ uint32_t ip = luaL_checknumber(L, 2); /* Third argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 3); /* Fourth argument must be a Lua number. */ uint32_t label = luaL_checknumber(L, 4); if (unlikely(lua_gettop(L) != 4)) { luaL_error(L, "%s(): expected four arguments, however it got %d arguments", __func__, lua_gettop(L)); } ret = fib_add(lpm_ud->fib, (uint8_t *)&ip, depth, label); if (unlikely(ret < 0)) { luaL_error(L, "%s(): failed to add network policy [ip: %d, depth: %d, label: %d] (errno=%d): %s", __func__, ip, depth, label, -ret, strerror(-ret)); } return 0; } static int l_lpm_del(lua_State *L) { /* First argument must be of type struct lpm_lua_userdata *. */ struct lpm_lua_userdata *lpm_ud = luaL_checkudata(L, 1, LUA_LPM_UD_TNAME); /* * Second argument must be a Lua number. * @ip must be in network order. * */ uint32_t ip = luaL_checknumber(L, 2); /* Third argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 3); if (unlikely(lua_gettop(L) != 3)) { luaL_error(L, "%s(): expected three arguments, however it got %d arguments", __func__, lua_gettop(L)); } lua_pushinteger(L, fib_delete(lpm_ud->fib, (uint8_t *)&ip, depth)); return 1; } static int l_lpm_lookup(lua_State *L) { /* First argument must be of type struct lpm_lua_userdata *. */ struct lpm_lua_userdata *lpm_ud = luaL_checkudata(L, 1, LUA_LPM_UD_TNAME); uint32_t label; int ret; /* * Second argument must be a Lua number. * @ip must be in network order. */ uint32_t ip = luaL_checknumber(L, 2); if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } ret = fib_lookup(lpm_ud->fib, (uint8_t *)&ip, &label); lua_pushinteger(L, ret >= 0 ? (lua_Integer)label : ret); return 1; } static int debug_lookup(lua_State *L, struct fib_head *fib, const uint8_t *address) { uint32_t rib_label, fib_label; int rib_ret, fib_ret; rib_ret = rib_lookup(fib_get_rib(fib), address, &rib_label); if (unlikely(rib_ret < 0 && rib_ret != -ENOENT)) { luaL_error(L, "%s(): RIB lookup failed (errno=%d): %s", __func__, -rib_ret, strerror(-rib_ret)); } fib_ret = fib_lookup(fib, address, &fib_label); if (unlikely(fib_ret < 0 && fib_ret != -ENOENT)) { luaL_error(L, "%s(): RIB lookup (ret=%d, label=%d); FIB lookup failed (errno=%d): %s", __func__, rib_ret, rib_label, -fib_ret, strerror(-fib_ret)); } if (rib_ret == 0) { if (likely(fib_ret == 0 && rib_label == fib_label)) return 0; } else { if (likely(rib_ret == -ENOENT && fib_ret == -ENOENT)) return 0; } luaL_error(L, "%s(): RIB lookup (ret=%d, label=%d) != FIB lookup (ret=%d, label=%d)", __func__, rib_ret, rib_label, fib_ret, fib_label); return -EFAULT; } static int l_lpm_debug_lookup(lua_State *L) { /* First argument must be of type struct lpm_lua_userdata *. */ struct lpm_lua_userdata *lpm_ud = luaL_checkudata(L, 1, LUA_LPM_UD_TNAME); /* * Second argument must be a Lua number. * @ip must be in network order. */ uint32_t ip = luaL_checknumber(L, 2); if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } lua_pushinteger(L, debug_lookup(L, lpm_ud->fib, (uint8_t *)&ip)); return 1; } static int l_ip_mask_addr(lua_State *L) { uint32_t masked_ip; struct in_addr mask; char buf[INET_ADDRSTRLEN]; /* * First argument must be a Lua number. * @ip must be in network order. */ uint32_t ip = luaL_checknumber(L, 1); /* Second argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 2); if (unlikely(depth > 32)) { luaL_error(L, "%s(): depth=%d must be in [0, 32]", __func__, depth); } if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } ip4_prefix_mask(depth, &mask); masked_ip = htonl(ntohl(ip) & rte_be_to_cpu_32(mask.s_addr)); if (unlikely(inet_ntop(AF_INET, &masked_ip, buf, sizeof(buf)) == NULL)) { luaL_error(L, "%s(): failed to convert a number to an IPv4 address (errno=%d): %s", __func__, errno, strerror(errno)); } lua_pushstring(L, buf); return 1; } static int l_lpm_get_paras(lua_State *L) { /* First argument must be of type struct lpm_lua_userdata *. */ struct lpm_lua_userdata *lpm_ud = luaL_checkudata(L, 1, LUA_LPM_UD_TNAME); if (unlikely(lua_gettop(L) != 1)) { luaL_error(L, "%s(): expected one argument, however it got %d arguments", __func__, lua_gettop(L)); } lua_pushinteger(L, lpm_ud->max_rules); lua_pushinteger(L, lpm_ud->num_tbl8s); return 2; } #define LUA_LPM6_UD_TNAME "gt_lpm6_ud" /* * This struct is currently identical to struct lpm_lua_userdata. * These structs are kept independent of each other to enable a possible * divergence in the future as have happened in the past. */ struct lpm6_lua_userdata { struct fib_head *fib; /* Parameters of @fib. */ uint32_t max_rules; uint32_t num_tbl8s; }; static int l_new_lpm6(lua_State *L) { static rte_atomic32_t identifier6 = RTE_ATOMIC32_INIT(0); struct lpm6_lua_userdata *lpm6_ud; unsigned int lcore_id, socket_id; int32_t instance_id; char fib_head_name[128], fib_name[128]; int ret; if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } lpm6_ud = lua_newuserdata(L, sizeof(*lpm6_ud)); /* First argument must be a Lua number. */ lpm6_ud->max_rules = luaL_checknumber(L, 1); /* Second argument must be a Lua number. */ lpm6_ud->num_tbl8s = luaL_checknumber(L, 2); /* Get @lcore_id. */ lua_getfield(L, LUA_REGISTRYINDEX, GT_LUA_LCORE_ID_NAME); lcore_id = lua_tonumber(L, -1); lua_pop(L, 1); socket_id = rte_lcore_to_socket_id(lcore_id); /* * Obtain unique names. */ instance_id = rte_atomic32_add_return(&identifier6, 1); ret = snprintf(fib_head_name, sizeof(fib_head_name), "gt_fib_ipv6_head_%u_%u", lcore_id, instance_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(fib_head_name)); ret = snprintf(fib_name, sizeof(fib_name), "gt_fib_ipv6_%u_%u", lcore_id, instance_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(fib_name)); /* * Alloc FIB. */ lpm6_ud->fib = rte_malloc_socket(fib_head_name, sizeof(*lpm6_ud->fib), 0, socket_id); if (unlikely(lpm6_ud->fib == NULL)) { luaL_error(L, "%s(): not enough memory for a FIB head", __func__); } ret = fib_create(lpm6_ud->fib, fib_name, socket_id, 128, lpm6_ud->max_rules, lpm6_ud->num_tbl8s); if (unlikely(ret < 0)) { rte_free(lpm6_ud->fib); lpm6_ud->fib = NULL; luaL_error(L, "%s(): failed to initialize a IPv6 LPM table (errno=%d): %s", __func__, -ret, strerror(-ret)); } luaL_getmetatable(L, LUA_LPM6_UD_TNAME); lua_setmetatable(L, -2); return 1; } static int l_lpm6_add(lua_State *L) { int ret; /* First argument must be of type struct lpm6_lua_userdata *. */ struct lpm6_lua_userdata *lpm6_ud = luaL_checkudata(L, 1, LUA_LPM6_UD_TNAME); /* Second argument must be a struct in6_add. */ struct in6_addr *ipv6_addr = get_ipv6_addr(L, 2); /* Third argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 3); /* Fourth argument must be a Lua number. */ uint32_t label = luaL_checknumber(L, 4); if (unlikely(lua_gettop(L) != 4)) { luaL_error(L, "%s(): expected four arguments, however it got %d arguments", __func__, lua_gettop(L)); } ret = fib_add(lpm6_ud->fib, ipv6_addr->s6_addr, depth, label); if (unlikely(ret < 0)) { char addr_buf[INET6_ADDRSTRLEN]; if (unlikely(inet_ntop(AF_INET6, ipv6_addr, addr_buf, sizeof(addr_buf)) == NULL)) { luaL_error(L, "%s(): failed to add a network policy to the lpm6 table (errno=%d): %s", __func__, -ret, strerror(-ret)); } luaL_error(L, "%s(%s/%d): failed to add a network policy to the lpm6 table (errno=%d): %s", __func__, addr_buf, depth, -ret, strerror(-ret)); } return 0; } static int l_lpm6_del(lua_State *L) { /* First argument must be of type struct lpm6_lua_userdata *. */ struct lpm6_lua_userdata *lpm6_ud = luaL_checkudata(L, 1, LUA_LPM6_UD_TNAME); /* Second argument must be a struct in6_add. */ struct in6_addr *ipv6_addr = get_ipv6_addr(L, 2); /* Third argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 3); if (unlikely(lua_gettop(L) != 3)) { luaL_error(L, "%s(): expected three arguments, however it got %d arguments", __func__, lua_gettop(L)); } lua_pushinteger(L, fib_delete(lpm6_ud->fib, ipv6_addr->s6_addr, depth)); return 1; } static int l_lpm6_lookup(lua_State *L) { /* First argument must be of type struct lpm6_lua_userdata *. */ struct lpm6_lua_userdata *lpm6_ud = luaL_checkudata(L, 1, LUA_LPM6_UD_TNAME); uint32_t label; int ret; /* Second argument must be a struct in6_add. */ struct in6_addr *ipv6_addr = get_ipv6_addr(L, 2); if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } ret = fib_lookup(lpm6_ud->fib, ipv6_addr->s6_addr, &label); lua_pushinteger(L, ret >= 0 ? (lua_Integer)label : ret); return 1; } static int l_lpm6_debug_lookup(lua_State *L) { /* First argument must be of type struct lpm6_lua_userdata *. */ struct lpm6_lua_userdata *lpm6_ud = luaL_checkudata(L, 1, LUA_LPM6_UD_TNAME); /* Second argument must be a struct in6_add. */ struct in6_addr *ipv6_addr = get_ipv6_addr(L, 2); if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } lua_pushinteger(L, debug_lookup(L, lpm6_ud->fib, ipv6_addr->s6_addr)); return 1; } /* * Takes an array of uint8_t (IPv6 address) and masks it using the depth. */ static void ip6_mask_addr(uint8_t *ip, uint8_t depth) { struct in6_addr mask; uint64_t *paddr = (uint64_t *)ip; const uint64_t *pmask = (const uint64_t *)mask.s6_addr; ip6_prefix_mask(depth, &mask); paddr[0] &= pmask[0]; paddr[1] &= pmask[1]; } /* Copy ipv6 address. */ static inline void ip6_copy_addr(uint8_t *dst, const uint8_t *src) { rte_memcpy(dst, src, sizeof(struct in6_addr)); } static int l_ip6_mask_addr(lua_State *L) { struct in6_addr masked_ip; char buf[INET6_ADDRSTRLEN]; /* First argument must be a struct in6_add. */ struct in6_addr *ipv6_addr = get_ipv6_addr(L, 1); /* Second argument must be a Lua number. */ uint8_t depth = luaL_checknumber(L, 2); if (unlikely(depth > 128)) { luaL_error(L, "%s(): depth=%d must be in [0, 128]", __func__, depth); } if (unlikely(lua_gettop(L) != 2)) { luaL_error(L, "%s(): expected two arguments, however it got %d arguments", __func__, lua_gettop(L)); } ip6_copy_addr(masked_ip.s6_addr, ipv6_addr->s6_addr); ip6_mask_addr(masked_ip.s6_addr, depth); if (unlikely(inet_ntop(AF_INET6, masked_ip.s6_addr, buf, sizeof(buf)) == NULL)) { luaL_error(L, "%s(): failed to convert a number to an IPv6 address (errno=%d): %s", __func__, errno, strerror(errno)); } lua_pushstring(L, buf); return 1; } static int l_lpm6_get_paras(lua_State *L) { /* First argument must be of type struct lpm6_lua_userdata *. */ struct lpm6_lua_userdata *lpm6_ud = luaL_checkudata(L, 1, LUA_LPM6_UD_TNAME); if (lua_gettop(L) != 1) luaL_error(L, "Expected one argument, however it got %d arguments", lua_gettop(L)); lua_pushinteger(L, lpm6_ud->max_rules); lua_pushinteger(L, lpm6_ud->num_tbl8s); return 2; } static const struct luaL_reg lpmlib_lua_c_funcs [] = { {"str_to_prefix", l_str_to_prefix}, {"new_lpm", l_new_lpm}, {"lpm_add", l_lpm_add}, {"lpm_del", l_lpm_del}, {"lpm_lookup", l_lpm_lookup}, {"ip_mask_addr", l_ip_mask_addr}, {"lpm_get_paras", l_lpm_get_paras}, {"lpm_debug_lookup", l_lpm_debug_lookup}, {"str_to_prefix6", l_str_to_prefix6}, {"new_lpm6", l_new_lpm6}, {"lpm6_add", l_lpm6_add}, {"lpm6_del", l_lpm6_del}, {"lpm6_lookup", l_lpm6_lookup}, {"ip6_mask_addr", l_ip6_mask_addr}, {"lpm6_get_paras", l_lpm6_get_paras}, {"lpm6_debug_lookup", l_lpm6_debug_lookup}, {NULL, NULL} /* Sentinel. */ }; static int lpm_ud_gc(lua_State *L) { struct lpm_lua_userdata *lpm_ud = lua_touserdata(L, 1); fib_free(lpm_ud->fib); rte_free(lpm_ud->fib); return 0; } static int lpm6_ud_gc(lua_State *L) { struct lpm6_lua_userdata *lpm6_ud = lua_touserdata(L, 1); fib_free(lpm6_ud->fib); rte_free(lpm6_ud->fib); return 0; } void lualpm_openlib(lua_State *L) { luaL_newmetatable(L, LUA_LPM_UD_TNAME); lua_pushstring(L, "__gc"); lua_pushcfunction(L, lpm_ud_gc); lua_settable(L, -3); luaL_newmetatable(L, LUA_LPM6_UD_TNAME); lua_pushstring(L, "__gc"); lua_pushcfunction(L, lpm6_ud_gc); lua_settable(L, -3); luaL_register(L, "lpmlib", lpmlib_lua_c_funcs); } ```
/content/code_sandbox/gt/lua_lpm.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
5,748
```lua return function (net_conf, gk_conf, gt_conf, lls_conf, numa_table) -- -- Configure the variables below for the CPS block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG -- XXX #155 These parameters should only be changed for performance reasons. local mailbox_max_entries_exp = 7 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local front_max_pkt_burst = 32 local back_max_pkt_burst = 32 local arp_max_entries_exp = 10 local nd_max_entries_exp = 10 -- These variables are unlikely to need to be changed. local kni_queue_size = 1024 local max_rt_update_pkts = 8 local scan_interval_sec = 5 -- -- End configuration of CPS block. -- local cps_conf = staticlib.c.get_cps_conf() if cps_conf == nil then error("Failed to allocate cps_conf") end cps_conf.lcore_id = staticlib.alloc_an_lcore(numa_table) cps_conf.log_level = log_level cps_conf.mailbox_max_entries_exp = mailbox_max_entries_exp cps_conf.mailbox_mem_cache_size = mailbox_mem_cache_size cps_conf.mailbox_burst_size = mailbox_burst_size cps_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms cps_conf.log_ratelimit_burst = log_ratelimit_burst cps_conf.front_max_pkt_burst = front_max_pkt_burst cps_conf.back_max_pkt_burst = back_max_pkt_burst cps_conf.kni_queue_size = kni_queue_size cps_conf.max_rt_update_pkts = max_rt_update_pkts cps_conf.scan_interval_sec = scan_interval_sec -- Netlink port ID to receive updates and scans from routing daemon. cps_conf.nl_pid = 0x6A7E cps_conf.arp_max_entries_exp = arp_max_entries_exp cps_conf.nd_max_entries_exp = nd_max_entries_exp local ret = staticlib.c.run_cps(net_conf, gk_conf, gt_conf, cps_conf, lls_conf) if ret < 0 then error("Failed to run cps block") end return cps_conf end ```
/content/code_sandbox/lua/cps.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
522
```lua return function (net_conf, numa_table) -- -- Configure the variables below for the LLS block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG -- XXX #155 These parameters should only be changed for performance reasons. local mailbox_max_entries_exp = 7 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 local mailbox_max_pkt_sub = 32 local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local front_max_pkt_burst = 32 local back_max_pkt_burst = 32 local front_icmp_msgs_per_sec = 1000 local front_icmp_msgs_burst = 50 local back_icmp_msgs_per_sec = 1000 local back_icmp_msgs_burst = 50 -- These variables are unlikely to need to be changed. local max_num_cache_records = 1024 local cache_scan_interval_sec = 10 -- -- End configuration of LLS block. -- local lls_conf = staticlib.c.get_lls_conf() if lls_conf == nil then error("Failed to allocate lls_conf") end lls_conf.log_level = log_level lls_conf.mailbox_max_entries_exp = mailbox_max_entries_exp lls_conf.mailbox_mem_cache_size = mailbox_mem_cache_size lls_conf.mailbox_burst_size = mailbox_burst_size lls_conf.mailbox_max_pkt_sub = mailbox_max_pkt_sub lls_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms lls_conf.log_ratelimit_burst = log_ratelimit_burst lls_conf.front_max_pkt_burst = front_max_pkt_burst lls_conf.back_max_pkt_burst = back_max_pkt_burst lls_conf.front_icmp_msgs_per_sec = front_icmp_msgs_per_sec lls_conf.front_icmp_msgs_burst = front_icmp_msgs_burst lls_conf.back_icmp_msgs_per_sec = back_icmp_msgs_per_sec lls_conf.back_icmp_msgs_burst = back_icmp_msgs_burst lls_conf.max_num_cache_records = max_num_cache_records lls_conf.cache_scan_interval_sec = cache_scan_interval_sec lls_conf.lcore_id = staticlib.alloc_an_lcore(numa_table) local ret = staticlib.c.run_lls(net_conf, lls_conf) if ret < 0 then error("Failed to run lls block") end return lls_conf end ```
/content/code_sandbox/lua/lls.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
547
```lua return function (net_conf, lls_conf, sol_conf, gk_lcores, gk_sol_map) -- -- Configure the variables below for the GK block. -- -- These parameters should likely be initially changed. local log_level = staticlib.c.RTE_LOG_DEBUG local bpf_base_directory = "./lua/bpf" local bpf_programs = { [0] = "granted.bpf", [1] = "declined.bpf", [2] = "grantedv2.bpf", [3] = "web.bpf", [4] = "tcp-services.bpf", } -- XXX #155 These parameters should only be changed for performance reasons. local mailbox_max_entries_exp = 14 local mailbox_mem_cache_size = 0 local mailbox_burst_size = 32 local log_ratelimit_interval_ms = 5000 local log_ratelimit_burst = 10 local max_pkt_burst_front = 32 local max_pkt_burst_back = 32 local flow_ht_size = 1024 local flow_ht_max_probes = 80 -- 10 cache lines local flow_ht_scale_num_bucket = 1.25 local flow_table_scan_iter = 0 local scan_del_thresh = flow_ht_size * 0.1 local max_num_ipv4_rules = 1024 local num_ipv4_tbl8s = 256 local max_num_ipv6_rules = 1024 local num_ipv6_tbl8s = 65536 local max_num_ipv6_neighbors = 65536 local basic_measurement_logging_ms = 60 * 1000 -- (1 minute) local front_icmp_msgs_per_sec = 1000 local front_icmp_msgs_burst = 50 local back_icmp_msgs_per_sec = 1000 local back_icmp_msgs_burst = 50 local fib_dump_batch_size = 32 -- These variables are unlikely to need to be changed. local bpf_enable_jit = true -- -- End configuration of GK block. -- local gk_conf = staticlib.c.alloc_gk_conf() if gk_conf == nil then error("Failed to allocate gk_conf") end local num_lcores = #gk_lcores staticlib.gk_assign_lcores(gk_conf, gk_lcores) staticlib.gk_assign_sol_map(gk_conf, gk_sol_map) gk_conf.log_level = log_level gk_conf.mailbox_max_entries_exp = mailbox_max_entries_exp gk_conf.mailbox_mem_cache_size = mailbox_mem_cache_size gk_conf.mailbox_burst_size = mailbox_burst_size gk_conf.log_ratelimit_interval_ms = log_ratelimit_interval_ms gk_conf.log_ratelimit_burst = log_ratelimit_burst gk_conf.flow_ht_size = flow_ht_size gk_conf.flow_ht_max_probes = flow_ht_max_probes gk_conf.flow_ht_scale_num_bucket = flow_ht_scale_num_bucket gk_conf.max_num_ipv4_rules = max_num_ipv4_rules gk_conf.num_ipv4_tbl8s = num_ipv4_tbl8s gk_conf.max_num_ipv6_rules = max_num_ipv6_rules gk_conf.num_ipv6_tbl8s = num_ipv6_tbl8s gk_conf.max_num_ipv6_neighbors = max_num_ipv6_neighbors gk_conf.flow_table_scan_iter = flow_table_scan_iter gk_conf.basic_measurement_logging_ms = basic_measurement_logging_ms gk_conf.scan_del_thresh = scan_del_thresh gk_conf.front_icmp_msgs_per_sec = math.floor(front_icmp_msgs_per_sec / num_lcores + 0.5) gk_conf.front_icmp_msgs_burst = front_icmp_msgs_burst gk_conf.back_icmp_msgs_per_sec = math.floor(back_icmp_msgs_per_sec / num_lcores + 0.5) gk_conf.back_icmp_msgs_burst = back_icmp_msgs_burst gk_conf.front_max_pkt_burst = staticlib.get_front_burst_config(max_pkt_burst_front, net_conf) gk_conf.back_max_pkt_burst = staticlib.get_back_burst_config(max_pkt_burst_back, net_conf) gk_conf.fib_dump_batch_size = fib_dump_batch_size -- The maximum number of ARP or ND packets in LLS submitted by -- GK or GT. The code below makes sure that the parameter should -- be at least the same with the maximum configured value of GK. lls_conf.mailbox_max_pkt_sub = math.max(lls_conf.mailbox_max_pkt_sub, gk_conf.front_max_pkt_burst, gk_conf.back_max_pkt_burst) -- Load BPF programs. for program_index, program_name in pairs(bpf_programs) do local filename = bpf_base_directory .. "/" .. program_name local ret = staticlib.c.gk_load_bpf_flow_handler(gk_conf, program_index, filename, bpf_enable_jit) if ret < 0 then error("Failed to load BPF program: " .. filename) end end local ret = staticlib.c.run_gk(net_conf, gk_conf, sol_conf) if ret < 0 then error("Failed to run gk block(s)") end return gk_conf end ```
/content/code_sandbox/lua/gk.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,151
```lua -- The gatekeeper module is already loaded, but it only contains -- C functions statically linked. -- Unsetting its package.loaded entry allows Lua to load -- the Lua functions of the module. package.loaded["staticlib"] = nil require "gatekeeper/staticlib" function gatekeeper_init() -- When gatekeeper_server is true, -- Gatekeeper will run as a Gatekeeper server. -- Otherwise, it will run as a grantor server. local gatekeeper_server = true -- Set the global log level to one of -- RTE_LOG_{EMERG,ALERT,CRIT,ERR,WARNING,NOTICE,INFO,DEBUG}. -- All logs equal to or to the left will be output. local global_log_level = staticlib.c.RTE_LOG_DEBUG staticlib.c.rte_log_set_global_level(global_log_level) local netf = require("net") local net_conf = netf(gatekeeper_server) local numa_table = staticlib.get_numa_table(net_conf) local n_fixed_lcores = gatekeeper_server and 4 or 3 local aux_numa_table = staticlib.alloc_lcores_evenly_from_all_numa_nodes(numa_table, n_fixed_lcores, 0) -- LLS should be the first block initialized, since it should have -- queue IDs of 0 so that when ARP filters are not supported ARP -- packets are steered to the LLS block by the NIC. This occurs because -- many NICs direct non-IP packets to queue 0. This is not necessary -- when running Gatekeeper on Amazon, since the ENA distributes non-IP -- packets to the first queue configured for RSS. local llsf = require("lls") local lls_conf = llsf(net_conf, aux_numa_table) local gk_conf local gt_conf if gatekeeper_server == true then -- The following expression to set the number of -- GK block instances is a good recommendation, -- but it may not be optimal for all cases. local n_gk_lcores = 2 * staticlib.count_numa_nodes(numa_table) if n_gk_lcores <= 0 then error("No GK block allocated for Gatekeeper server") end local n_sol_lcores_per_socket = 1 local lcores_table = staticlib.alloc_lcores_evenly_from_all_numa_nodes(numa_table, n_gk_lcores, n_sol_lcores_per_socket) local gk_lcores = staticlib.convert_numa_table_to_array( staticlib.alloc_lcores_evenly_from_all_numa_nodes(lcores_table, n_gk_lcores, 0)) local sol_lcores = staticlib.convert_numa_table_to_array(lcores_table) local gk_sol_map = staticlib.gk_sol_map(gk_lcores, sol_lcores) local solf = require("sol") local sol_conf = solf(net_conf, sol_lcores) local gkf = require("gk") gk_conf = gkf(net_conf, lls_conf, sol_conf, gk_lcores, gk_sol_map) local gguf = require("ggu") local ggu_lcore = staticlib.alloc_an_lcore(aux_numa_table) local ggu_conf = gguf(net_conf, gk_conf, ggu_lcore) else local gtf = require("gt") gt_conf = gtf(net_conf, lls_conf, numa_table) end local cpsf = require("cps") local cps_conf = cpsf(net_conf, gk_conf, gt_conf, lls_conf, aux_numa_table) local dyf = require("dyn_cfg") local dy_conf = dyf(net_conf, gk_conf, gt_conf, aux_numa_table) -- A return value of 1 is required for success. return 1 end ```
/content/code_sandbox/lua/main_config.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
839
```lua -- Gatekeeper - DDoS protection system. -- -- This program is free software: you can redistribute it and/or modify -- (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- -- along with this program. If not, see <path_to_url module("stdcdefs", package.seeall) -- -- C structs exported through FFI -- local ffi = require("ffi") -- Structs ffi.cdef[[ struct in_addr { uint32_t s_addr; }; struct in6_addr { unsigned char s6_addr[16]; }; ]] c = ffi.C ```
/content/code_sandbox/lua/gatekeeper/stdcdefs.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
163
```lua -- Gatekeeper - DDoS protection system. -- -- This program is free software: you can redistribute it and/or modify -- (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- -- along with this program. If not, see <path_to_url module("lpmlib", package.seeall) function lpm_add_tbl8s(ip_addr, prefix_len, prefixes) local masked_ip if prefix_len <= 24 then return 0 end -- For a prefix with length longer than 24, one tbl8 -- is needed according to the addition description in -- DPDK LPM library: -- path_to_url#addition masked_ip = lpmlib.ip_mask_addr(ip_addr, 24) if prefixes[masked_ip] then return 0 end prefixes[masked_ip] = true return 1 end function lpm6_add_tbl8s(ip6_addr, prefix_len, prefixes) local depth = 24 local ret = 0 -- For a prefix with length longer than 24, one tbl8 -- is needed every 8 bits. If the prefix length is not -- a multiple of 8, then prefix expansion will be performed -- on that tbl8 entry. More details can be found on -- the addition description in DPDK LPM6 library: -- path_to_url#addition while depth < prefix_len do local prefix = lpmlib.ip6_mask_addr(ip6_addr, depth) .. "/" .. depth if not prefixes[prefix] then prefixes[prefix] = true ret = ret + 1 end depth = depth + 8 end return ret end ```
/content/code_sandbox/lua/gatekeeper/lpmlib.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
421