text
stringlengths
3
1.05M
#!C:\Users\sandro.ferreira\PycharmProjects\InstagramBot\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install' __requires__ = 'setuptools==40.8.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')() )
"""ist440 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import include, path urlpatterns = [ path('admin/', admin.site.urls), path('', include('application.urls')), ]
function RADTODEGREE(a){return 180*a/Math.PI}function DEGREETORAD(a){return a*Math.PI/180}function Layer(){this.renderables=new Array,this.opacity=1,this.hasTransformations=!0,this.transformation=new Transformation,this.visible=!0}function PreloadImages(){this.assets=new Array,this.funcUpdate=void 0,this.funcComplete=void 0,this.funcCompleteObj=void 0,this.funcCompleteName=void 0,this.counter=0,this.counterLoaded=0}function Renderable(){this.transformation=new Transformation,this.shape=RenderableShape.Circle,this.visible=!0,this.mouseover=!1,this.OnCamera=!0,this.opacity=void 0,this.isClickable=!1,this.width=this.height=0,this.updateFunc=void 0,this.updateFuncObj=void 0,this.updateFuncName=void 0}function Scene(){this.camera={transformation:new Transformation},this.isTranslated=!1,this.isScaled=!1,this.opacity=1,this.opacityState=1,this.sceneOpacityChanged=!1,this.layers=new Array(new Layer),this.mouse=new Vector2D,this.usingMouse=!0,this.element=void 0,this.isCulling=!0,this.sceneType=void 0,this.initTime=(new Date).getTime(),this.currentTime=0}function SceneCanvas(a){Scene.call(this),this.element="string"==typeof a?document.getElementById(a):a,this.context=this.element.getContext("2d"),this.CreateEventListeners()}function Sprite(){Renderable.call(this),this.textures=[],this.textureInUse=0,this.clippingX=0,this.clippingY=0,this.clippingWidth=0,this.clippingHeight=0,this.isClipping=!1}function SpriteAnimated(a){Sprite.call(this),this.isPlaying=this.isLooping=this.isPaused=!1,this.repeat=1,this.timerPauseLength=this.timeStart=0,this._frame=0,this._internalRepeat=1,this.yoyo=!1,this.frameSpeed=a,this.startFunc=void 0,this.startFuncObj=void 0,this.startFuncName=void 0,this.endFunc=void 0,this.endFuncObj=void 0,this.endFuncName=void 0,this.startedPause=!1,this.startedPauseRecovery=!1,this.startedAnimation=!1,this.frameLimit=void 0}function SpriteAnimatedAtlas(a){SpriteAnimated.call(this,a)}function Text2D(){Renderable.call(this),this.font=void 0,this.lineHeight=0,this.text=[]}function Transformation(){this.position=new Vector2D,this.scale=new Vector2D(1,1),this.angle=0,this.rotationPivot=new Vector2D}function Vector2D(a,b){this.x=a||0,this.y=b||0}Layer.prototype={Add:function(a){this.renderables.push(a)},Remove:function(a){for(var b=0;b<this.renderables.length;b++)if(this.renderables[b]==a&&this.renderables[b]==a){this.renderables.splice(b,1);break}},SetOpacity:function(a){this.opacity=a},GetOpacity:function(){return this.opacity},Visible:function(a){this.visible=a}},PreloadImages.prototype={OnUpdate:function(a,b){return b?(this.funcUpdateObj=a,this.funcUpdateName=b):this.funcUpdate=func,this},OnComplete:function(a,b){return b?(this.funcCompleteObj=a,this.funcCompleteName=b):this.funcComplete=a,this},NumberOfImagesToLoad:function(){return this.counter},NumberOfLoadedImages:function(){return this.counterLoaded},_PreLoadImage:function(a){var b=new Image;b.src=a;var c=this;b.onload=function(d){c._LoadedImage(b,a)}},_LoadedImage:function(a,b){this.assets[b]=a,this.counterLoaded++,this.couner!=this.counterLoaded&&(void 0!==this.funcUpdate?this.funcUpdate():void 0!==this.funcUpdateObj&&this.funcUpdateObj[this.funcUpdateName]()),this.counter==this.counterLoaded&&(void 0!==this.funcComplete?this.funcComplete():void 0!==this.funcCompleteObj&&this.funcCompleteObj[this.funcCompleteName]())},Load:function(a){return this._PreLoadImage(a,this.counter++),this},GetImage:function(a){return this.assets[a]}},Renderable.prototype={SetShape:function(a){this.shape=a},SetVisible:function(a){this.visible=a},IsVisible:function(){return this.visible},SetOnCamera:function(a){this.OnCamera=a},IsOnCamera:function(){return this.OnCamera},SetOpacity:function(a){this.opacity=a},GetOpacity:function(){return this.opacity},SetClickable:function(a){this.isClickable=a},GetWidth:function(){return this.width},GetHeight:function(){return this.height},Draw:function(a){},_Draw:function(a){a.save(),a.translate(this.transformation.position.x,this.transformation.position.y*-1),a.rotate(this.transformation.angle),a.translate(this.transformation.rotationPivot.x,this.transformation.rotationPivot.y),this.Draw(a),a.restore()},_OnClick:function(a,b){},_OnMouseMove:function(a,b){},_OnUpdate:function(){this.updateFunc&&this.updateFunc(),this.updateFuncObj&&this.updateFuncObj[this.updateFuncName]()},_update:function(a){this.Update(a),void 0!=this.updateFunc&&this.updateFunc(),void 0!=this.updateFuncObj&&this.updateFuncObj[this.updateFuncName]()},Update:function(a){},OnClick:function(){},OnMouseOver:function(){},OnMouseOut:function(){},OnUpdate:function(a,b){void 0===b?this.updateFunc=a:(this.updateFuncObj=a,this.updateFuncName=b)}};var RenderableShape={Circle:1,Quad:2};Scene.prototype={constructor:Scene,Update:function(){var a=(new Date).getTime();this.time;if(this.time=a,this.currentTime=(this.time-this.initTime)/1e3,1!=this.camera.transformation.scale.x||1!=this.camera.transformation.scale.y)var b=.5*(this.GetWidth()-this.GetWidth()/this.camera.transformation.scale.x),c=.5*(this.GetHeight()-this.GetHeight()/this.camera.transformation.scale.y),d=b+this.mouse.x*(this.GetWidth()-2*b)/this.GetWidth(),e=c+this.mouse.y*(this.GetHeight()-2*c)/this.GetHeight();for(var f=0;f<this.layers.length;f++)if(this.layers[f].visible)for(var g=0;g<this.layers[f].renderables.length;g++)this.layers[f].renderables[g]._update(this.currentTime),this.usingMouse&&void 0!=this.layers[f].renderables[g]&&this.layers[f].renderables[g].IsVisible()&&this.layers[f].renderables[g].isClickable&&(this.layers[f].renderables[g].IsOnCamera()?1!=this.camera.transformation.scale.x||1!=this.camera.transformation.scale.y?this.layers[f].renderables[g]._OnMouseMove(d-this.camera.transformation.position.x,e-this.camera.transformation.position.y):this.layers[f].renderables[g]._OnMouseMove(this.mouse.x-this.camera.transformation.position.x,this.mouse.y-this.camera.transformation.position.y):this.layers[f].renderables[g]._OnMouseMove(this.mouse.x,this.mouse.y))},SetGlobalOpacity:function(a){this.opacity=a},GetLayer:function(a){return void 0===a&&(a=0),this.layers[a]},Resize:function(a,b){this.element.width=a,this.element.height=b},Draw:function(){},Add:function(a,b){if(void 0===b&&(b=0),this.layers.length<b+1)for(var c=0;c<=b;c++)void 0===this.layers[c]&&(this.layers[c]=new Layer);this.layers[b].Add(a)},Remove:function(a,b){void 0===b&&(b=0),this.layers[b].Remove(a),0==this.layers[b].renderables.length&&this.layers.splice(b,1)},GetWidth:function(){return this.element.width},GetHeight:function(){return this.element.height},GetTime:function(){return this.currentTime},DisableMouse:function(){this.usingMouse=!1},EnableCulling:function(){this.isCulling=!0},DisableCulling:function(){this.isCulling=!1},EnableMouse:function(){this.usingMouse=!0},_Culling:function(a){if(!this.isCulling)return!0;var b=this.camera.transformation.position;a.IsOnCamera()||(b={x:0,y:0});var c=a.transformation.position.x-.5*a.GetWidth()*a.transformation.scale.x,d=a.transformation.position.x+.5*a.GetWidth()*a.transformation.scale.x,e=-a.transformation.position.y-.5*a.GetHeight()*a.transformation.scale.y,f=-a.transformation.position.y+.5*a.GetHeight()*a.transformation.scale.y;return!(c+b.x>this.GetWidth()||d+b.x<-this.GetWidth()||this.GetHeight()<e+b.y||-this.GetHeight()>f+b.y)},_OnClick:function(){if(this.usingMouse)for(var a=this.layers.length-1;a>=0;a--)if(this.layers[a].visible)for(var b=this.layers[a].renderables.length-1;b>=0;b--)if(this.layers[a].renderables[b].IsVisible()&&this.layers[a].renderables[b].isClickable)if(this.layers[a].renderables[b].IsOnCamera()){if(this.layers[a].renderables[b]._OnClick(this.mouse.x-this.camera.transformation.position.x,this.mouse.y+this.camera.transformation.position.y))return!0}else if(this.layers[a].renderables[b]._OnClick(this.mouse.x,this.mouse.y))return!0;return!1},_OnMouseMove:function(a,b){if(this.usingMouse){if(1!=this.camera.transformation.scale.x||1!=this.camera.transformation.scale.y)var c=.5*(this.GetWidth()-this.GetWidth()/this.camera.transformation.scale.x),d=.5*(this.GetHeight()-this.GetHeight()/this.camera.transformation.scale.y),e=c+a*(this.GetWidth()-2*c)/this.GetWidth(),f=d+b*(this.GetHeight()-2*d)/this.GetHeight();for(var g=this.layers.length-1;g>=0;g--)if(this.layers[g].visible)for(var h=this.layers[g].renderables.length-1;h>=0;h--)this.layers[g].renderables[h].IsVisible()&&this.layers[g].renderables[h].isClickable&&(this.layers[g].renderables[h].IsOnCamera()?1!=this.camera.transformation.scale.x||1!=this.camera.transformation.scale.y?this.layers[g].renderables[h]._OnMouseMove(e-this.camera.transformation.position.x,f-this.camera.transformation.position.y):this.layers[g].renderables[h]._OnMouseMove(a-this.camera.transformation.position.x,b-this.camera.transformation.position.y):this.layers[g].renderables[h]._OnMouseMove(a,b))}},_MouseClick:function(){this._OnClick()},_MouseMove:function(a){var b=a.clientX,c=a.clientY;b-=this.element.offsetLeft,c-=this.element.offsetTop,this.mouse.x=b,this.mouse.y=c,this._OnMouseMove(this.mouse.x,this.mouse.y)}},SceneCanvas.prototype=Object.create(Scene.prototype),SceneCanvas.prototype.constructor=SceneCanvas,SceneCanvas.prototype.CreateEventListeners=function(){this.element.addEventListener("mousedown",this,!1),this.element.addEventListener("mousemove",this,!1),this.handleEvent=function(a){switch(a.type){case"mousemove":this._MouseMove(a);break;case"mousedown":this._MouseClick(a)}}},SceneCanvas.prototype.Draw=function(){var a=this.context;a.clearRect(0,0,this.element.width,this.element.height),a.save(),a.translate(.5*this.GetWidth(),.5*this.GetHeight());for(var b=0;b<this.layers.length;b++){this.opacityState=this.opacity,a.globalAlpha=this.opacity,this.sceneOpacityChanged=1!=this.opacity,a.save(),this.layers[b].hasTransformations&&(a.save(),a.translate(this.layers[b].transformation.position.x,this.layers[b].transformation.position.y),a.rotate(this.layers[b].transformation.angle),a.scale(this.layers[b].transformation.scale.x,this.layers[b].transformation.scale.y));for(var c=0;c<this.layers[b].renderables.length;c++)!this.layers[b].renderables[c].IsOnCamera()&&this.layers[b].visible&&this.layers[b].renderables[c].IsVisible()&&this._Culling(this.layers[b].renderables[c])&&(1!=this.layers[b].GetOpacity()&&(this.opacityState=this.layers[b].GetOpacity(),a.globalAlpha=this.layers[b].GetOpacity(),this.layerOpacityChanged=!0),this.sceneOpacityChanged||void 0===this.layers[b].renderables[c].GetOpacity()||(this.opacityState=this.layers[b].renderables[c].GetOpacity(),a.globalAlpha=this.layers[b].renderables[c].GetOpacity()),this.layers[b].renderables[c]._Draw(a),this.opacity!=this.opacityState&&(this.opacityState=this.opacity,a.globalAlpha=this.opacity,this.sceneOpacityChanged=1!=this.opacity));a.save(),1==this.camera.transformation.scale.x&&1==this.camera.transformation.scale.y||(a.translate(-this.GetWidth()*(.5*this.camera.transformation.scale.x-.5),-this.GetHeight()*(.5*this.camera.transformation.scale.y-.5)),a.scale(this.camera.transformation.scale.x,this.camera.transformation.scale.y)),a.translate(this.camera.transformation.position.x,this.camera.transformation.position.y),a.rotate(-this.camera.transformation.angle),a.translate(this.camera.transformation.rotationPivot.x,this.camera.transformation.rotationPivot.y);for(var c=0;c<this.layers[b].renderables.length;c++)this.layers[b].renderables[c].IsOnCamera()&&this.layers[b].visible&&(1!=this.layers[b].GetOpacity()&&(this.opacityState=this.layers[b].GetOpacity(),a.globalAlpha=this.layers[b].GetOpacity(),this.layerOpacityChanged=!0),this.layers[b].renderables[c].IsVisible()&&this._Culling(this.layers[b].renderables[c])&&(this.sceneOpacityChanged||void 0===this.layers[b].renderables[c].GetOpacity()||(this.opacityState=this.layers[b].renderables[c].GetOpacity(),a.globalAlpha=this.layers[b].renderables[c].GetOpacity()),this.layers[b].renderables[c]._Draw(a),this.opacity!=this.opacityState&&(this.opacityState=this.opacity,a.globalAlpha=this.opacity,this.sceneOpacityChanged=1!=this.opacity)));a.restore(),this.layers[b].hasTransformations&&a.restore(),a.restore()}a.restore(),a.restore()},Sprite.prototype=Object.create(Renderable.prototype),Sprite.prototype.constructor=Sprite,Sprite.prototype.LoadTexture=function(a){this.textures.push(a)},Sprite.prototype.SetClipping=function(a,b,c,d){this.clippingX=a,this.clippingY=b,this.clippingWidth=c,this.clippingHeight=d,this.isClipping=!0},Sprite.prototype.DisableClipping=function(){this.isClipping=!1},Sprite.prototype.GetWidth=function(){return 0==this.textures.length?0:this.isClipping?this.clippingWidth:this.textures[this.textureInUse].width},Sprite.prototype.GetHeight=function(){return 0==this.textures.length?0:this.isClipping?this.clippingHeight:this.textures[this.textureInUse].height},Sprite.prototype.Draw=function(a){this.textures.length>0&&a.drawImage(this.textures[this.textureInUse],this.isClipping?this.clippingX:0,this.isClipping?this.clippingY:0,this.isClipping?this.clippingWidth:this.textures[this.textureInUse].width,this.isClipping?this.clippingHeight:this.textures[this.textureInUse].height,.5*-this.transformation.scale.x*(this.isClipping?this.clippingWidth:this.textures[this.textureInUse].width),.5*-this.transformation.scale.y*(this.isClipping?this.clippingHeight:this.textures[this.textureInUse].height),this.isClipping?this.clippingWidth*this.transformation.scale.x:this.transformation.scale.x*this.textures[this.textureInUse].width,this.isClipping?this.clippingHeight*this.transformation.scale.y:this.transformation.scale.y*this.textures[this.textureInUse].height)},Sprite.prototype._OnClick=function(a,b){return this.mouseover&&this.OnClick(),this.mouseover},Sprite.prototype._OnMouseMove=function(a,b){switch(this.shape){case RenderableShape.Circle:var c=this.transformation.position.x-a,d=this.transformation.position.y-b,e=this.GetWidth()*this.transformation.scale.x*.5;c*c+d*d<e*e?(this.mouseover=!0,this.OnMouseOver()):(this.mouseover&&this.OnMouseOut(),this.mouseover=!1);break;case RenderableShape.Quad:Math.abs(a-this.transformation.position.x)<.5*this.transformation.scale.x*this.GetWidth()&&Math.abs(b-this.transformation.position.y)<.5*this.transformation.scale.y*this.GetHeight()?(this.mouseover=!0,this.OnMouseOver()):(this.mouseover&&this.OnMouseOut(),this.mouseover=!1)}},SpriteAnimated.prototype=Object.create(Sprite.prototype),SpriteAnimated.prototype.constructor=SpriteAnimated,SpriteAnimated.prototype.OnStart=function(a,b){void 0===b?this.startFunc=a:(this.startFuncObj=a,this.startFuncName=b)},SpriteAnimated.prototype.OnComplete=function(a,b){void 0===b?this.endFunc=a:(this.endFuncObj=a,this.endFuncName=b)},SpriteAnimated.prototype._OnStart=function(){this.startFunc&&this.startFunc(),this.startFuncObj&&this.startFuncObj[this.startFuncName]()},SpriteAnimated.prototype._OnComplete=function(){this.endFunc&&this.endFunc(),this.endFuncObj&&this.endFuncObj[this.endFuncName]()},SpriteAnimated.prototype.IsPlaying=function(){return this.isPlaying},SpriteAnimated.prototype.GetNumberOfFrames=function(){return(this.frameLimit?this.frameLimit:this.textures.length)-(this.frameInit?this.frameInit:0)},SpriteAnimated.prototype.SetInitialFrame=function(a){this.frameInit=a},SpriteAnimated.prototype.SetFinalFrame=function(a){this.frameLimit=a},SpriteAnimated.prototype._update=function(a){if(this.startedAnimation&&(this.startedAnimation=!1,this.timeStart=a),this.startedPause&&(this.isPaused=!0,this.startedPause=!1,this.timerPauseStart=a),this.startedPauseRecovery&&(this.startedPauseRecovery=!1,this.timerPauseEnd=a,this.isPaused=!1,this.timerPauseLength+=this.timerPauseEnd-this.timerPauseStart),this.isPlaying&&!this.isPaused){var b=1/this.frameSpeed,c=a-(this.timeStart+this.timerPauseLength),d=this.GetNumberOfFrames()*(this.yoyo?2:1),e=Math.abs(Math.ceil(c/b)-1);e<d?this.yoyo&&e>=this.GetNumberOfFrames()?this._frame=this.GetNumberOfFrames()-(e-this.GetNumberOfFrames()+1):this._frame=e:this.isLooping?(this.timeStart=a,this.timerPauseLength=0,this._frame=0):this.repeat>0&&this._internalRepeat<this.repeat?(this.timeStart=a,this.timerPauseLength=0,this._frame=0,this._internalRepeat++):(this._internalRepeat=1,this.isPlaying=!1,this._frame=this.yoyo?0:d-1,this._OnComplete()),this._OnUpdate()}this.textureInUse=(this.reverse?this.GetNumberOfFrames()-this._frame-1:this._frame)+(this.frameInit?this.frameInit:0),this.Update(a)},SpriteAnimated.prototype.Play=function(a,b){this.repeat=void 0===a?0:a,this.reverse=void 0!==b,this.isLooping=this.repeat<=0,this.isPlaying=!0,this.OnStart(),this.startedAnimation=!0},SpriteAnimated.prototype.YoYo=function(a){this.yoyo=a},SpriteAnimated.prototype.Pause=function(){this.isPaused?this.startedPauseRecovery=!0:this.startedPause=!0},SpriteAnimated.prototype.Stop=function(){this.isPlaying=!1,this._frame=0},SpriteAnimatedAtlas.prototype=Object.create(SpriteAnimated.prototype),SpriteAnimatedAtlas.prototype.constructor=SpriteAnimatedAtlas,SpriteAnimatedAtlas.prototype.LoadTexture=function(a,b,c,d){SpriteAnimated.prototype.LoadTexture.call(this,a),this.numberOfFrames=d,this.framePerColumn=b,this.framePerLine=c,this.frameWidth=a.width/b,this.frameHeight=a.height/c},SpriteAnimatedAtlas.prototype.GetNumberOfFrames=function(){return(this.frameLimit?this.frameLimit:this.numberOfFrames)-(this.frameInit?this.frameInit:0)},SpriteAnimatedAtlas.prototype._update=function(a){SpriteAnimated.prototype._update.call(this,a);var b=this.textureInUse;this.textureInUse=0,this.SetClipping(b%this.framePerColumn*this.frameWidth,Math.floor(b/this.framePerColumn)*this.frameHeight,this.frameWidth,this.frameHeight)},Text2D.prototype=Object.create(Renderable.prototype),Text2D.prototype.constructor=Text2D,Text2D.prototype.SetFont=function(a){this.font=a},Text2D.prototype.SetColor=function(a){this.color=a},Text2D.prototype.SetText=function(a){this.text=a.split("\n");var b=document.createElement("canvas");b.width=1e4,b.height=1e4;var c=b.getContext("2d");c.font=this.font;var d=c.measureText(this.text[0]);this.width=this.height=d.width},Text2D.prototype.SetLineHeight=function(a){this.lineHeight=a},Text2D.prototype.Draw=function(a){a.translate(.5*-this.GetWidth(),0),a.font=this.font,a.fillStyle=this.color;for(var b=0;b<this.text.length;b++)a.fillText(this.text[b],0,b*this.lineHeight)},Transformation.prototype={SetRotation:function(a){this.angle=a||0},ClearTransforms:function(){this.transformation.position=new Vector2D,this.transformation.scale=new Vector2D,this.angle=0,this.rotationPivot=new Vector2D}},Vector2D.prototype={SetX:function(a){this.x=a||0},SetY:function(a){this.y=a||0},GetX:function(){return this.x},GetY:function(){return this.y},MagnitudeSQR:function(){return this.x*this.x+this.y*this.y},Magnitude:function(){return Math.sqrt(this.MagnitudeSQR())},Add:function(a){return a instanceof Vector2D?new Vector2D(this.x+a.x,this.y+a.y):new Vector2D(this.x+a,this.y+a)},Sub:function(a){return a instanceof Vector2D?new Vector2D(this.x-a.x,this.y-a.y):new Vector2D(this.x-a,this.y-a)},Mul:function(a){return a instanceof Vector2D?new Vector2D(this.x*a.x,this.y*a.y):new Vector2D(this.x*a,this.y*a)},Div:function(a){return a instanceof Vector2D?new Vector2D(this.x/a.x,this.y/a.y):new Vector2D(this.x/a,this.y/a)},DistanceSQR:function(a){return this.Sub(a).MagnitudeSQR()},Distance:function(a){return this.Sub(a).Magnitude()},Negate:function(){this.x=-this.x,this.y=-this.y},Abs:function(){return new Vector2D(Math.abs(this.x),Math.abs(this.y))},Normalize:function(){var a=this.Magnitude();return 0==a&&(a=1),new Vector2D(this.x/a,this.y/a)}};
import axios from 'axios' const baseUrl = "/api" export default { getMyMessage() { return axios({ method: 'get', url: `${baseUrl}/sm/student/me` }) }, getMyMessageById(id) { return axios({ method: 'get', url: `${baseUrl}/studentMessage/${id}` }) }, upDateUser(params) { return axios({ method: 'PUT', url: `${baseUrl}/sm/student/edit`, data: params }) }, outLogin() { return axios({ method: 'GET', url: `${baseUrl}/loginout` }) }, getpersonalCompatePageList(params,pageNum) { return axios({ method: 'POST', url: `${baseUrl}/sm/compateStudent/page/${pageNum}`, data: params }) }, deletepersonalCompate(params) { return axios({ method: 'delete', url: `${baseUrl}/sm/compateStudent/delete`, params: params }) }, getCompateStudentPageList(params,pageNum) { return axios({ method: 'POST', url: `${baseUrl}/sm/compateStudent/page/${pageNum}`, data: params }) }, getScoreList(params,pageNum) { return axios({ method: 'POST', url: `${baseUrl}/sm/studentScore/page/${pageNum}`, data: params }) }, addContestantScore(params) { return axios({ method: 'POST', url: `${baseUrl}/sm/studentScore/add`, data: params }) }, addMessageToUser(params) { return axios({ method: 'POST', url: `${baseUrl}/sm/message/refereeSendCompateMessage`, data: params }) }, addAppeal(params) { return axios({ method: 'POST', url: `${baseUrl}/sm/compateAppeal/add`, data: params }) }, getMessageNum(params) { return axios({ method: 'POST', url: `${baseUrl}/sm/message/count`, data: params }) }, getMessagePageList(params,pageNum) { return axios({ method: 'POST', url: `${baseUrl}/sm/message/page/${pageNum}`, data: params }) }, changeRead(params) { return axios({ method: 'get', url: `${baseUrl}/sm/message/isRead`, params: params }) }, addMedia(params) { return axios({ method: 'post', url: `${baseUrl}/sm/compateMedia/add`, data: params }) }, getCompateEquipmentList(pageNum) { return axios({ method: 'POST', url: `${baseUrl}/sm/compateEquipment/page/${pageNum}` }) } }
module.exports = new Date(2019, 5, 28)
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015-2019 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Index syncing extension.""" from __future__ import absolute_import, print_function from flask import current_app from werkzeug.utils import cached_property from . import config from .cli import index_cmd from .utils import build_alias_name, obj_or_import_string class InvenioIndexMigrator(object): """Invenio index sync extension.""" def __init__(self, app=None, **kwargs): """Extension initialization. :param app: An instance of :class:`~flask.app.Flask`. """ self._clients = {} if app: self.init_app(app, **kwargs) @cached_property def recipes(self): """Get all configured migration recipes.""" recipes_config = current_app.config.get('INDEX_MIGRATOR_RECIPES', {}) for recipe_id, recipe_cfg in recipes_config.items(): recipe_cfg['cls'] = obj_or_import_string(recipe_cfg['cls']) return recipes_config @cached_property def config_index(self): """Return migration index.""" return build_alias_name( current_app.config['INDEX_MIGRATOR_INDEX_NAME'] ) def init_app(self, app): """Flask application initialization. :param app: An instance of :class:`~flask.app.Flask`. """ self.init_config(app) app.cli.add_command(index_cmd) app.extensions['invenio-index-migrator'] = self @staticmethod def init_config(app): """Initialize configuration. :param app: An instance of :class:`~flask.app.Flask`. """ for k in dir(config): if k.startswith('INDEX_MIGRATOR_'): app.config.setdefault(k, getattr(config, k))
'''A module for demonstrating exception''' import sys def convert(s): '''Convert to an integer.''' try: return int(s) except (ValueError, TypeError) as e: print("Conversion error : {}".format(str(e)), file=sys.stderr) raise finally: print("done!") print(convert("10")) print(convert("10A"))
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } import React, { Component } from 'react'; import { findDOMNode } from 'react-dom'; import styled from 'styled-components'; import { Box } from '../Box'; var ResizerBox = styled(Box).withConfig({ displayName: 'Resizer__ResizerBox', componentId: 'sc-8l808w-0' })(['cursor:col-resize;']); export var Resizer = function (_Component) { _inherits(Resizer, _Component); function Resizer() { var _temp, _this, _ret; _classCallCheck(this, Resizer); for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } return _ret = (_temp = (_this = _possibleConstructorReturn(this, _Component.call.apply(_Component, [this].concat(args))), _this), _this.state = {}, _this.ref = React.createRef(), _this.onMouseDown = function (event) { if (_this.ref.current) { var element = findDOMNode(_this.ref.current); var rect = element.getBoundingClientRect(); _this.setState({ start: event.clientX, width: rect.width }, function () { document.addEventListener('mousemove', _this.onMouseMove); document.addEventListener('mouseup', _this.onMouseUp); }); } }, _this.onMouseMove = function (event) { var property = _this.props.property; var width = _this.state.width; // We determined 12 empirically as being wide enough to hit but // not too wide to cause false hits. var nextWidth = Math.max(12, width + (event.clientX - _this.state.start)); _this.props.onResize(property)(nextWidth); }, _this.onMouseUp = function () { document.removeEventListener('mouseup', _this.onMouseUp); document.removeEventListener('mousemove', _this.onMouseMove); _this.setState({ start: undefined, width: undefined }); }, _temp), _possibleConstructorReturn(_this, _ret); } Resizer.prototype.render = function render() { var _props = this.props, children = _props.children, onResize = _props.onResize, theme = _props.theme; var start = this.state.start; if (onResize) { return React.createElement( Box, { ref: this.ref, direction: 'row', fill: true }, children, React.createElement(ResizerBox, _extends({ flex: false }, theme.dataTable.resize, { onMouseDown: this.onMouseDown, onMouseMove: start ? this.onMouseMove : undefined, onMouseUp: start ? this.onMouseUp : undefined })) ); } return children; }; return Resizer; }(Component);
// Globals var yScale; var xScale; var margin; var barWidth; var barOffset; var div; var height; var currAggregateBy; var data function loadChart(aggregateBy, preserveMenuLabel) { // Convert dates to format for rendering chart // data = [{ "2016-09-20": { 1.0: 2, 2.0:1 } }]; if (currAggregateBy == aggregateBy) { var rangeStartText = document.getElementById('dropdownMenu1').innerHTML; var rangeEndText = document.getElementById('dropdownMenu2').innerHTML; var rangeStart, rangeEnd; if (rangeStartText.indexOf("Range") == -1) { // a time has been selected if (aggregateBy.indexOf("Hour") == -1) { // Aggregate by days rangeStart = getDateFromOption(rangeStartText.substring(0, 10)); } else { // Hour rangeStart = new Date(0,0,0, parseInt(rangeStartText.substring(0, 5))); } } if (rangeEndText.indexOf("Range") == -1) { // a time has been selected if (aggregateBy.indexOf("Hour") == -1) { // Aggregate by days rangeEnd = getDateFromOption(rangeEndText.substring(0, 10)); } else { // Hour rangeEnd = new Date(0,0,0, parseInt(rangeEndText.substring(0, 5))); } } } var dates = _.groupBy(rawData.rows, aggregateBy); var datesForOptions = []; var newDates = {}; _.forEach(dates, function(value, key) { var keyTime; if (aggregateBy.indexOf("Date") != -1) { keyTime = new Date(key); } else { keyTime = new Date(0, 0, 0, key); } datesForOptions.push(keyTime); if (rangeStart) { if (keyTime < rangeStart) return; } if (rangeEnd) { if (keyTime > rangeEnd) return; } newDates[key] = 0; _.forEach(value, function(v) { newDates[key] += parseInt(v["Amount"]); }) }); if (!document.getElementById("rangestart").getElementsByTagName("li").length || currAggregateBy != aggregateBy) { setTimeRangeOptions(datesForOptions); } data = _.map(newDates, function(value, prop) { var newVal = {}; newVal[prop] = value; return newVal; }); // Add missing dates in range with a value of 0 var sortedDates; var minDate; var minDateDate; var maxDate; var maxDateDate; var workingDate; if (aggregateBy.indexOf("Hour") == -1) { // Aggregate by days sortedDates = _.sortBy(Object.keys(newDates)); minDate = sortedDates[0]; minDateDate = new Date(minDate); minDateDate.setHours(24); // Handles different timezones maxDate = sortedDates[sortedDates.length - 1]; maxDateDate = new Date(maxDate); maxDateDate.setHours(24); // Handles different timezones workingDate = new Date(minDate); workingDate.setHours(24); // Handles different timezones } else { // Hours sortedDates = _.sortBy(Object.keys(newDates), [function(o) { return parseInt(o); }]) // Convert hours to dates for comparison minDate = sortedDates[0] + ":"; minDateDate = getDateFromOption(minDate); maxDate = sortedDates[sortedDates.length - 1] + ":"; maxDateDate = getDateFromOption(maxDate); workingDate = getDateFromOption(minDate); } while (workingDate < maxDateDate) { var workingDateStr; if (aggregateBy.indexOf("Hour") == -1) { // Aggregate by days workingDateStr = workingDate.toISOString().substring(0,10); } else { // Hours workingDateStr = workingDate.getDate() + ""; } if (!_.includes(sortedDates, workingDateStr)) { var newDate = {}; newDate[workingDateStr] = 0; // Calculate date difference to get index var insertAtIndex = (workingDate - minDateDate) / (1000*60*60*24); data.splice(insertAtIndex, 0, newDate); } workingDate.setDate(workingDate.getDate() + 1); } renderChart(data, aggregateBy, preserveMenuLabel); } function renderChart(data, aggregateBy, preserveMenuLabel) { currAggregateBy = aggregateBy; // Set graph dimensions margin = { top: 20, right: 20, bottom: 50, left: 40 }; height = 400; var width = 100 + (data.length * 30); // Max Width (for 30 days) is 1000 barWidth = 20; barOffset = 10; // Set scales var maxHeight = 0; _.forEach(data, function(d) { var key = Object.keys(d)[0]; if (d[key] > maxHeight) maxHeight = d[key]; }); yScale = d3.scaleLinear() .domain([0, maxHeight]) .range([0, height - margin.top - margin.bottom]); xScale = d3.scaleLinear() .domain([0, data.length]) .range([0, width - margin.left - margin.right]); // Create tooltip div = d3.select("body").append("div") .attr("class", "tooltip") .attr("id", "tooltip") .style("opacity", 0); // Initialize chart var chart = d3.select('.chart'); chart .attr('width', width) .attr('height', height); // Create bars for the tsunami counts var mag0rects = chart.append('g').attr('class', 'mag0rects'); var magnitude0 = "0.0"; mag0rects .selectAll('rect') .data(data) .enter() .append('rect') .attr('class', function(d) { return getTsunamiCountFillClass(d); }) .attr('width', barWidth) .attr('height', function(data) { return getHeight(data, magnitude0); }) .attr('x', getX) .attr('y', function(data, i) { return getY(data, i, magnitude0); }) .on("mouseover", function(data) { showTooltip(data, magnitude0); }) .on("mouseout", hideTooltip); // Add X-axis line var timeFormat = "%Y-%m-%d"; if (aggregateBy == "QuakeHour") timeFormat = "%H:00"; var minDate = new Date(9999,11,31); var maxDate = new Date(1900,0,1); var allDates = []; _.forEach(data, function(d, key) { if (aggregateBy != "QuakeHour") { var currDate = new Date(Object.keys(d)[0]); } else { var currDate = new Date(0, 0, 0, Object.keys(d)[0]); } currDate.setDate(currDate.getDate() + 1); if (aggregateBy != "QuakeHour") currDate.setHours(0); allDates.push(currDate); if (currDate > maxDate) maxDate = currDate; if (currDate < minDate) minDate = currDate; }); // Add one lower date so that the tick labels do not begin at the origin var minMinusOne = new Date(minDate); if (aggregateBy != "QuakeHour") { minMinusOne.setDate(minMinusOne.getDate() - 1); } else { minMinusOne.setHours(minMinusOne.getHours() - 1); } var xTimeDomain = [minMinusOne, maxDate]; var xTimeScale = d3.scaleTime() .domain(xTimeDomain) .range([margin.left, width - margin.right]); var xAxis = d3.axisBottom(xTimeScale) .ticks(data.length + 1, timeFormat) .tickValues(allDates); chart.append("g") .attr("transform", "translate(0," + (height - margin.bottom) + ")") .call(xAxis) .selectAll("text") .attr("transform", "rotate(-45)"); // Add Y-axis line var yAxisScale = d3.scaleLinear() .domain([maxHeight, 0]) .range([margin.top, height - margin.bottom]); var yAxis = d3.axisLeft(yAxisScale) .ticks(maxHeight / 50, "s"); chart .append("g") .attr("transform", "translate(" + margin.left + ", 0)") .call(yAxis); // Add legend var legend = d3.selectAll(".legend") .attr("height", 40) .attr("width", 780); legend .selectAll('text') .data(['Amount Legend:']) .enter() .append('text') .attr('y', 25) .text(function(d) { return d; }) legend .selectAll('rect') .data(["0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0"]) .enter() .append('rect') .attr('class', function(d) { return getTsunamiMagnitudeFillClass(d) }) .attr('x', function(d, i) { return 150 + (i * 70)} ) .attr('width', '65') .attr('height', '20'); legend .selectAll('text') .data(['---', '1 - 99', '100 - 149', '150 - 199', '200 - 249', '250 - 299', '300 - 349', '350 - 399', '400 - 449', '450 +']) .enter() .append('text') .attr('x', function(d, i) { return 150 + ((i - 1) * 70)}) .attr('y', 35) .text(function(d) { return d; }) .style('fill', 'black'); } function getHeight(data, magnitude) { var key = Object.keys(data)[0]; if (!data[key]) return 0; return yScale(data[key]); } function getX(data, i) { return margin.left + xScale(i + 1) - (barWidth / 2); } function getY(data, i, magnitude) { var key = Object.keys(data)[0]; return height - margin.bottom - yScale(parseInt(data[key])); } function showTooltip(d, magnitude) { var key = Object.keys(d)[0]; document.getElementById('tooltip').innerHTML=d[key]; if (!d[key]) return; div.transition() .style("opacity", 1) .style("left", (d3.event.pageX) + "px") .style("top", (d3.event.pageY - 28) + "px"); } function clearChart() { d3.select('.chart') .selectAll('g') .remove(); d3.selectAll('.tooltip') .remove(); } function getTsunamiCountFillClass(d) { var key = Object.keys(d)[0]; var countInt = parseInt(d[key]); if (countInt < 100) return "tsunami0"; if (countInt < 150) return "tsunami1"; if (countInt < 200) return "tsunami2"; if (countInt < 250) return "tsunami3"; if (countInt < 300) return "tsunami4"; if (countInt < 350) return "tsunami5"; if (countInt < 400) return "tsunami6"; if (countInt < 450) return "tsunami7"; if (countInt >= 450) return "tsunami8plus"; } document.getElementsByTagName("body")[0].onload = loadTsunamiMagnitudeData();
/** * SendinBlue API * SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | * * OpenAPI spec version: 3.0.0 * Contact: contact@sendinblue.com * * NOTE: This class is auto generated by the swagger code generator program. * https://github.com/swagger-api/swagger-codegen.git * * Swagger Codegen version: 2.3.1 * * Do not edit the class manually. * */ (function(root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define(['ApiClient'], factory); } else if (typeof module === 'object' && module.exports) { // CommonJS-like environments that support module.exports, like Node. module.exports = factory(require('../ApiClient')); } else { // Browser globals (root is window) if (!root.SibApiV3Sdk) { root.SibApiV3Sdk = {}; } root.SibApiV3Sdk.ErrorModel = factory(root.SibApiV3Sdk.ApiClient); } }(this, function(ApiClient) { 'use strict'; /** * The ErrorModel model module. * @module model/ErrorModel * @version 8.2.0 */ /** * Constructs a new <code>ErrorModel</code>. * @alias module:model/ErrorModel * @class * @param code {module:model/ErrorModel.CodeEnum} Error code displayed in case of a failure * @param message {String} Readable message associated to the failure */ var exports = function(code, message) { var _this = this; _this['code'] = code; _this['message'] = message; }; /** * Constructs a <code>ErrorModel</code> from a plain JavaScript object, optionally creating a new instance. * Copies all relevant properties from <code>data</code> to <code>obj</code> if supplied or a new instance if not. * @param {Object} data The plain JavaScript object bearing properties of interest. * @param {module:model/ErrorModel} obj Optional instance to populate. * @return {module:model/ErrorModel} The populated <code>ErrorModel</code> instance. */ exports.constructFromObject = function(data, obj) { if (data) { obj = obj || new exports(); if (data.hasOwnProperty('code')) { obj['code'] = ApiClient.convertToType(data['code'], 'String'); } if (data.hasOwnProperty('message')) { obj['message'] = ApiClient.convertToType(data['message'], 'String'); } } return obj; } /** * Error code displayed in case of a failure * @member {module:model/ErrorModel.CodeEnum} code */ exports.prototype['code'] = undefined; /** * Readable message associated to the failure * @member {String} message */ exports.prototype['message'] = undefined; /** * Allowed values for the <code>code</code> property. * @enum {String} * @readonly */ exports.CodeEnum = { /** * value: "invalid_parameter" * @const */ "invalid_parameter": "invalid_parameter", /** * value: "missing_parameter" * @const */ "missing_parameter": "missing_parameter", /** * value: "out_of_range" * @const */ "out_of_range": "out_of_range", /** * value: "campaign_processing" * @const */ "campaign_processing": "campaign_processing", /** * value: "campaign_sent" * @const */ "campaign_sent": "campaign_sent", /** * value: "document_not_found" * @const */ "document_not_found": "document_not_found", /** * value: "reseller_permission_denied" * @const */ "reseller_permission_denied": "reseller_permission_denied", /** * value: "not_enough_credits" * @const */ "not_enough_credits": "not_enough_credits", /** * value: "permission_denied" * @const */ "permission_denied": "permission_denied", /** * value: "duplicate_parameter" * @const */ "duplicate_parameter": "duplicate_parameter", /** * value: "duplicate_request" * @const */ "duplicate_request": "duplicate_request", /** * value: "method_not_allowed" * @const */ "method_not_allowed": "method_not_allowed", /** * value: "unauthorized" * @const */ "unauthorized": "unauthorized", /** * value: "account_under_validation" * @const */ "account_under_validation": "account_under_validation", /** * value: "not_acceptable" * @const */ "not_acceptable": "not_acceptable" }; return exports; }));
import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import ( SquadResult, SquadV1Processor, SquadV2Processor, ) try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) ALL_MODELS = sum( (tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), (), ) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def load_and_cache_examples( args, model_path1, tokenizer, evaluate=False, output_examples=False ): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, model_path1.split("/"))).pop(), str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ( (evaluate and not args.predict_file) or (not evaluate and not args.train_file) ): try: import tensorflow_datasets as tfds except ImportError: raise ImportError( "If not data_dir is specified, tensorflow_datasets needs to be installed." ) if args.version_2_with_negative: logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset( tfds_examples, evaluate=evaluate ) else: processor = ( SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() ) if evaluate: examples = processor.get_dev_examples( args.data_dir, filename=args.predict_file ) else: examples = processor.get_train_examples( args.data_dir, filename=args.train_file ) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save( {"features": features, "dataset": dataset, "examples": examples}, cached_features_file, ) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def evaluate(args, model_path1, model1, model2, model3, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples( args, model_path1, tokenizer, evaluate=True, output_examples=True ) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader( dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model1, torch.nn.DataParallel): model1 = torch.nn.DataParallel(model1) if args.n_gpu > 1 and not isinstance(model2, torch.nn.DataParallel): model2 = torch.nn.DataParallel(model2) if args.n_gpu > 1 and not isinstance(model3, torch.nn.DataParallel): model3 = torch.nn.DataParallel(model3) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model1.eval() model2.eval() model3.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } feature_indices = batch[3] outputs1 = model1(**inputs) outputs2 = model2(**inputs) outputs3 = model3(**inputs) # print("outputs1", outputs1) for i, feature_index in enumerate(feature_indices): # TODO: i and feature_index are the same number! Simplify by removing enumerate? eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output1 = [to_list(output1[i]) for output1 in outputs1] # print("output1", output1) # print("len(output1)", len(output1[0])) output2 = [to_list(output2[i]) for output2 in outputs2] output3 = [to_list(output3[i]) for output3 in outputs3] start_logits1, end_logits1 = output1 start_logits2, end_logits2 = output2 start_logits3, end_logits3 = output3 weights = [0.4, 0.2, 0.4] start_logits = [ weights[0] * log1 + weights[1] * log2 + weights[2] * log3 for log1, log2, log3 in zip(start_logits1, start_logits2, start_logits3) ] end_logits = [ weights[0] * log1 + weights[1] * log2 + weights[2] * log3 for log1, log2, log3 in zip(end_logits1, end_logits2, end_logits3) ] # print("start_logits1", start_logits1[0]) # print("start_logits2", start_logits2[0]) # print("start_logits3", start_logits3[0]) # print("start_logits", start_logits[0]) result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info( " Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset), ) # Compute predictions output_prediction_file = os.path.join( args.output_dir, "predictions_{}.json".format(prefix) ) output_nbest_file = os.path.join( args.output_dir, "nbest_predictions_{}.json".format(prefix) ) if args.version_2_with_negative: output_null_log_odds_file = os.path.join( args.output_dir, "null_odds_{}.json".format(prefix) ) else: output_null_log_odds_file = None predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_TYPES), ) # parser.add_argument( # "--model_name_or_path", # default=None, # type=str, # required=True, # help="Path to pre-trained model or shortcut name selected in the list: " # + ", ".join(ALL_MODELS), # ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.", ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.", ) parser.add_argument( "--do_train", action="store_true", help="Whether to run training." ) parser.add_argument( "--do_eval", action="store_true", help="Whether to run eval on the dev set." ) parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument( "--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--weight_decay", default=0.0, type=float, help="Weight decay if we apply some." ) parser.add_argument( "--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer." ) parser.add_argument( "--max_grad_norm", default=1.0, type=float, help="Max gradient norm." ) parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps." ) parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument( "--lang_id", default=0, type=int, help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)", ) parser.add_argument( "--logging_steps", type=int, default=500, help="Log every X updates steps." ) parser.add_argument( "--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.", ) parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( "--no_cuda", action="store_true", help="Whether not to use CUDA when available" ) parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets", ) parser.add_argument( "--seed", type=int, default=42, help="random seed for initialization" ) parser.add_argument( "--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument( "--server_ip", type=str, default="", help="Can be used for distant debugging." ) parser.add_argument( "--server_port", type=str, default="", help="Can be used for distant debugging." ) parser.add_argument( "--threads", type=int, default=1, help="multiple threads for converting example to features", ) # parser.add_argument( # "--gru_layers", type=int, default=1, help="number of layers in GRU" # ) # parser.add_argument( # "--gru_hidden_size", type=int, default=256, help="hidden size in GRU" # ) args = parser.parse_args() if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach( address=(args.server_ip, args.server_port), redirect_output=True ) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() model_path1 = "roberta_large_adv_output2/DuReader/checkpoint-3000/" model_path2 = "roberta_large_brightmart_output/DuReader/checkpoint-3000/" model_path3 = "roberta_large_adv_output/DuReader/checkpoint-3000/" # config1 = AutoConfig.from_pretrained( # model_path1, # cache_dir=args.cache_dir if args.cache_dir else None, # ) tokenizer = AutoTokenizer.from_pretrained( model_path1, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) # tokenizer2 = AutoTokenizer.from_pretrained( # model_path2, # do_lower_case=args.do_lower_case, # cache_dir=args.cache_dir if args.cache_dir else None, # ) # tokenizer3 = AutoTokenizer.from_pretrained( # model_path3, # do_lower_case=args.do_lower_case, # cache_dir=args.cache_dir if args.cache_dir else None, # ) # model1 = AutoModelForQuestionAnswering.from_pretrained( # model_path1, # from_tf=bool(".ckpt" in args.model_name_or_path), # config=config, # cache_dir=args.cache_dir if args.cache_dir else None, # ) if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() # model1.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} logger.info("Loading checkpoint %s for evaluation", model_path1) global_step = "" # Reload the model model1 = AutoModelForQuestionAnswering.from_pretrained( model_path1 ) # , force_download=True) model1.to(args.device) model2 = AutoModelForQuestionAnswering.from_pretrained( model_path2 ) # , force_download=True) model2.to(args.device) model3 = AutoModelForQuestionAnswering.from_pretrained( model_path3 ) # , force_download=True) model3.to(args.device) # Evaluate result = evaluate(args, model_path1, model1, model2, model3, tokenizer, prefix="") result = dict( (k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items() ) results.update(result) # logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
import os import json class Struct: def __init__(self, **args): self._urls = {} self._urls.update(args) def get_url(self, key): return self._urls[key] def __getattr__(self, key): return self._urls[key] def __str__(self): return str(self._urls) def __repr__(self): return str(self._urls) try: nar_path = os.environ["NARRATIVEDIR"] config_json = open(os.path.join(nar_path, "config.json")).read() config = json.loads(config_json) url_config = config[config['config']] # fun, right? URLS = Struct(**url_config) except: url_dict = { "workspace": "https://kbase.us/services/ws/", "invocation": "https://kbase.us/services/invocation", "fba": "https://kbase.us/services/KBaseFBAModeling", "genomeCmp": "https://kbase.us/services/genome_comparison/jsonrpc", "trees": "https://kbase.us/services/trees", "log_proxy_port": 32001, "log_proxy_host": "172.17.42.1" } URLS = Struct(**url_dict)
/* * Document : compCharts.js * Author : pixelcave * Description: Custom javascript code used in Charts page */ var CompCharts = function() { // Get random number function from a given range var getRandomInt = function(min, max) { return Math.floor(Math.random() * (max - min + 1)) + min; }; return { init: function() { /* Mini Line Charts with jquery.sparkline plugin, for more examples you can check out http://omnipotent.net/jquery.sparkline/#s-about */ var miniChartLineOptions = { type: 'line', width: '120px', height: '65px', tooltipOffsetX: -25, tooltipOffsetY: 20, lineColor: '#de815c', fillColor: '#de815c', spotColor: '#555555', minSpotColor: '#555555', maxSpotColor: '#555555', highlightSpotColor: '#555555', highlightLineColor: '#555555', spotRadius: 3, tooltipPrefix: '', tooltipSuffix: ' Tickets', tooltipFormat: '{{prefix}}{{y}}{{suffix}}' }; $('#mini-chart-line1').sparkline('html', miniChartLineOptions); miniChartLineOptions['lineColor'] = '#5ccdde'; miniChartLineOptions['fillColor'] = '#5ccdde'; miniChartLineOptions['tooltipPrefix'] = '$ '; miniChartLineOptions['tooltipSuffix'] = ''; $('#mini-chart-line2').sparkline('html', miniChartLineOptions); /* Mini Bar Charts with jquery.sparkline plugin, for more examples you can check out http://omnipotent.net/jquery.sparkline/#s-about */ var miniChartBarOptions = { type: 'bar', barWidth: 7, barSpacing: 6, height: '65px', tooltipOffsetX: -25, tooltipOffsetY: 20, barColor: '#de815c', tooltipPrefix: '', tooltipSuffix: ' Tickets', tooltipFormat: '{{prefix}}{{value}}{{suffix}}' }; $('#mini-chart-bar1').sparkline('html', miniChartBarOptions); miniChartBarOptions['barColor'] = '#5ccdde'; miniChartBarOptions['tooltipPrefix'] = '$ '; miniChartBarOptions['tooltipSuffix'] = ''; $('#mini-chart-bar2').sparkline('html', miniChartBarOptions); // Randomize easy pie charts values var random; $('.toggle-pies').click(function() { $('.pie-chart').each(function() { random = getRandomInt(1, 100); $(this).data('easyPieChart').update(random); }); }); /* * Flot Charts Jquery plugin is used for charts * * For more examples or getting extra plugins you can check http://www.flotcharts.org/ * Plugins included in this template: pie, resize, stack, time */ // Get the elements where we will attach the charts var chartClassic = $('#chart-classic'); var chartStacked = $('#chart-stacked'); var chartPie = $('#chart-pie'); var chartBars = $('#chart-bars'); // Data for the charts var dataEarnings = [[1, 1900], [2, 2300], [3, 3200], [4, 2500], [5, 4200], [6, 3100], [7, 3600], [8, 2500], [9, 4600], [10, 3700], [11, 4200], [12, 5200]]; var dataSales = [[1, 850], [2, 750], [3, 1500], [4, 900], [5, 1500], [6, 1150], [7, 1500], [8, 900], [9, 1800], [10, 1700], [11, 1900], [12, 2550]]; var dataTickets = [[1, 130], [2, 330], [3, 220], [4, 350], [5, 150], [6, 275], [7, 280], [8, 380], [9, 120], [10, 330], [11, 190], [12, 410]]; var dataSalesBefore = [[1, 200], [4, 350], [7, 700], [10, 950], [13, 800], [16, 1050], [19, 1200], [22, 750], [25, 980], [28, 1300], [31, 1350], [34, 1200]]; var dataSalesAfter = [[2, 450], [5, 700], [8, 980], [11, 1200], [14, 1350], [17, 1200], [20, 1530], [23, 1750], [26, 1300], [29, 1620], [32, 1750], [35, 1750]]; var dataMonths = [[1, 'Jan'], [2, 'Feb'], [3, 'Mar'], [4, 'Apr'], [5, 'May'], [6, 'Jun'], [7, 'Jul'], [8, 'Aug'], [9, 'Sep'], [10, 'Oct'], [11, 'Nov'], [12, 'Dec']]; var dataMonthsBars = [[2, 'Jan'], [5, 'Feb'], [8, 'Mar'], [11, 'Apr'], [14, 'May'], [17, 'Jun'], [20, 'Jul'], [23, 'Aug'], [26, 'Sep'], [29, 'Oct'], [32, 'Nov'], [35, 'Dec']]; // Classic Chart $.plot(chartClassic, [ { label: 'Earnings', data: dataEarnings, lines: {show: true, fill: true, fillColor: {colors: [{opacity: .6}, {opacity: .6}]}}, points: {show: true, radius: 5} }, { label: 'Sales', data: dataSales, lines: {show: true, fill: true, fillColor: {colors: [{opacity: .2}, {opacity: .2}]}}, points: {show: true, radius: 5} }, { label: 'Tickets', data: dataTickets, lines: {show: true, fill: true, fillColor: {colors: [{opacity: .2}, {opacity: .2}]}}, points: {show: true, radius: 5} } ], { colors: ['#5ccdde', '#454e59', '#ffffff'], legend: {show: true, position: 'nw', backgroundOpacity: 0}, grid: {borderWidth: 0, hoverable: true, clickable: true}, yaxis: {tickColor: '#f5f5f5', ticks: 3}, xaxis: {ticks: dataMonths, tickColor: '#f5f5f5'} } ); // Creating and attaching a tooltip to the classic chart var previousPoint = null, ttlabel = null; chartClassic.bind('plothover', function(event, pos, item) { if (item) { if (previousPoint !== item.dataIndex) { previousPoint = item.dataIndex; $('#chart-tooltip').remove(); var x = item.datapoint[0], y = item.datapoint[1]; if (item.seriesIndex === 0) { ttlabel = '$ <strong>' + y + '</strong>'; } else if (item.seriesIndex === 1) { ttlabel = '<strong>' + y + '</strong> sales'; } else { ttlabel = '<strong>' + y + '</strong> tickets'; } $('<div id="chart-tooltip" class="chart-tooltip">' + ttlabel + '</div>') .css({top: item.pageY - 45, left: item.pageX + 5}).appendTo("body").show(); } } else { $('#chart-tooltip').remove(); previousPoint = null; } }); // Stacked Chart $.plot(chartStacked, [{label: 'Tickets', data: dataTickets}, {label: 'Sales', data: dataSales}, {label: 'Earnings', data: dataEarnings}], { colors: ['#aaaaaa', '#454e59', '#5ccdde'], series: {stack: true, lines: {show: true, fill: true}}, lines: {show: true, lineWidth: 0, fill: true, fillColor: {colors: [{opacity: .6}, {opacity: .6}]}}, legend: {show: true, position: 'nw', sorted: true, backgroundOpacity: 0}, grid: {borderWidth: 0}, yaxis: {tickColor: '#f5f5f5', ticks: 3}, xaxis: {ticks: dataMonths, tickColor: '#f5f5f5'} } ); // Pie Chart $.plot(chartPie, [ {label: 'Sales', data: 30}, {label: 'Tickets', data: 10}, {label: 'Earnings', data: 60} ], { colors: ['#454e59', '#5cafde', '#5ccdde'], legend: {show: false}, series: { pie: { show: true, radius: 1, label: { show: true, radius: 2/3, formatter: function(label, pieSeries) { return '<div class="chart-pie-label">' + label + '<br>' + Math.round(pieSeries.percent) + '%</div>'; }, background: {opacity: .75, color: '#000000'} } } } } ); // Bars Chart $.plot(chartBars, [ { label: 'Sales Before', data: dataSalesBefore, bars: {show: true, lineWidth: 0, fillColor: {colors: [{opacity: .6}, {opacity: .6}]}} }, { label: 'Sales After', data: dataSalesAfter, bars: {show: true, lineWidth: 0, fillColor: {colors: [{opacity: .6}, {opacity: .6}]}} } ], { colors: ['#5ccdde', '#454e59'], legend: {show: true, position: 'nw', backgroundOpacity: 0}, grid: {borderWidth: 0}, yaxis: {ticks: 3, tickColor: '#f5f5f5'}, //xaxis: {ticks: dataMonthsBars, tickColor: '#f5f5f5'} } ); } }; }();
import axios from 'axios' import moment from 'moment' import { apiConfig } from '../../../config/apiConfig' const BASE_URL = apiConfig.BASE_URL export default { // Pages should probably start at largest page and go down in number // this way links stay around and it's easy to link toa particular page. // perhaps even including a limit and redirect to max if something // too large is provided (in the url). fetchListData: ({ commit, dispatch, state, getters }, { type, category, page = 1, createdAtBefore, createdAfter, tags, search }) => { if (!createdAtBefore && !createdAfter) createdAtBefore = moment().toISOString() const token = getters.getToken commit('setActiveType', { type }) const options = {} if (token) { options.headers = { 'Authorization': 'Bearer ' + token } } let url = `${BASE_URL}/posts?page=${page}&type=${type}` if (createdAtBefore) url += `&createdAtBefore=${createdAtBefore}` if (createdAfter) url += `&createdAfter=${createdAfter}` if (search) url += `&search=${search}` if (category) url += `&categories=${category}` if (tags) { const tagString = tags.join(',') url += `&tags=${tagString}` } return axios.get(url, options) .then((response) => { commit('setList', { type, posts: response.data }) commit('setPosts', { posts: response.data }) return { posts: response.data, maxPage: 4 } }) .catch((error) => { // @TODO: Add pretty pop up here console.log(error.response) alert(error.response.data.message) }) }, fetchRecommendations: ({ commit, dispatch, state, getters }, { page = 1, category, createdAtBefore, type }) => { const token = getters.getToken commit('setActiveType', { type }) let url = `${BASE_URL}/posts/recommendations?page=${page}` if (createdAtBefore) url += `&createdAtBefore=${createdAtBefore}` if (category) url += `&categories=${category}` return axios.get(url, { headers: { 'Authorization': 'Bearer ' + token } }) .then((response) => { commit('setList', { type, posts: response.data }) commit('setPosts', { posts: response.data }) return { posts: response.data, maxPage: 4 } }) .catch((error) => { // @TODO: Add pretty pop up here console.log(error) // alert(error.message) // alert(error.response.data.message) }) }, fetchArticle: ({ commit, state, getters }, { id }) => { console.log('fetch article', id) const options = {} const token = getters.getToken if (token) { options.headers = { 'Authorization': 'Bearer ' + token } } return axios.get(`${BASE_URL}/posts/${id}`, options) .then((response) => { var post = response.data commit('setPosts', { posts: [post] }) return { post } }) .catch((error) => { // @TODO: Add pretty pop up here console.log(error.response) alert(error.response.data.message) }) }, upvote: ({ commit, getters, state }, { id }) => { const token = getters.getToken if (!token) { alert('You must login to vote') return } commit('upVote', { articleId: id }) const article = state.posts[id] return axios.post(`${BASE_URL}/posts/${article._id}/upvote`, {}, { headers: { 'Authorization': 'Bearer ' + token } }) }, downvote: ({ commit, getters, state }, { id }) => { const token = getters.getToken if (!token) { alert('You must login to vote') return } commit('downVote', { articleId: id }) const article = state.posts[id] return axios.post(`${BASE_URL}/posts/${article._id}/downvote`, {}, { headers: { 'Authorization': 'Bearer ' + token } }) } }
const mix = require('laravel-mix'); /* |-------------------------------------------------------------------------- | Mix Asset Management |-------------------------------------------------------------------------- | | Mix provides a clean, fluent API for defining some Webpack build steps | for your Laravel application. By default, we are compiling the Sass | file for the application as well as bundling up all the JS files. | */ mix // .js('resources/js/app.js', 'public/js') .sass('resources/sass/app.scss', 'public/css') .browserSync({ proxy: '127.0.0.1:8000' });
# # Metrix++, Copyright 2009-2019, Metrix++ Project # Link: https://github.com/metrixplusplus/metrixplusplus # # This file is a part of Metrix++ Tool. # from metrixpp.mpp import api import re class Plugin(api.Plugin, api.IConfigurable, api.Child, api.MetricPluginMixin): def declare_configuration(self, parser): parser.add_option("--myext.magic.numbers", "--mmn", action="store_true", default=False, help="Enables collection of magic numbers metric [default: %default]") # Add new option parser.add_option("--myext.magic.numbers.simplier", "--mmns", action="store_true", default=False, help="Is set, 0, -1 and 1 numbers are not counted [default: %default]") def configure(self, options): self.is_active_numbers = options.__dict__['myext.magic.numbers'] # remember the option here self.is_active_numbers_simplier = options.__dict__['myext.magic.numbers.simplier'] def initialize(self): pattern_to_search_java = re.compile( r'''((const(\s+[_$a-zA-Z][_$a-zA-Z0-9]*)+\s*[=]\s*)[-+]?[0-9]+\b)|(\b[0-9]+\b)''') pattern_to_search_cpp_cs = re.compile( r'''((const(\s+[_a-zA-Z][_a-zA-Z0-9]*)+\s*[=]\s*)[-+]?[0-9]+\b)|(\b[0-9]+\b)''') pattern_to_search = re.compile( r'''\b[0-9]+\b''') self.declare_metric(self.is_active_numbers, self.Field('numbers', int, non_zero=True), { 'std.code.java': (pattern_to_search_java, self.NumbersCounter), 'std.code.cpp': (pattern_to_search_cpp_cs, self.NumbersCounter), 'std.code.cs': (pattern_to_search_cpp_cs, self.NumbersCounter), '*': pattern_to_search }, marker_type_mask=api.Marker.T.CODE, region_type_mask=api.Region.T.ANY) super(Plugin, self).initialize(fields=self.get_fields(), # remember option settings in data file properties # in order to detect changes in settings on iterative re-run properties=[self.Property('number.simplier', self.is_active_numbers_simplier)]) if self.is_active() == True: self.subscribe_by_parents_interface(api.ICode) class NumbersCounter(api.MetricPluginMixin.IterIncrementCounter): def increment(self, match): if (match.group(0).startswith('const') or (self.plugin.is_active_numbers_simplier == True and match.group(0) in ['0', '1', '-1', '+1'])): return 0 return 1
class NoReturn: """Do not store the return value in the object store. If a task returns this object, then Ray will not store this object in the object store. Calling `ray.get` on the task's return ObjectIDs may block indefinitely unless the task manually stores an object for the corresponding ObjectID. """ def __init__(self): raise TypeError("The `NoReturn` object should not be instantiated")
# Think about how to improved based on this basis def mincoin(coins, target): coins.sort() cnt = target for coin in coins: if target - coin >= 0: tmp = 1 + mincoin(coins, target - coin) if cnt > tmp: cnt = tmp else: break return cnt print mincoin([1, 2], 3)
from pathlib import Path import pytest from souschef.recipe import Recipe @pytest.fixture def path_data() -> Path: return Path(__file__).parent / "data" @pytest.fixture(scope="function") def pure_yaml_with_comments(path_data): return Recipe(load_file=path_data / "pure.yaml", show_comments=True) @pytest.fixture(scope="function") def pure_yaml(path_data): return Recipe(load_file=path_data / "pure.yaml", show_comments=False) @pytest.fixture(scope="function") def simple_yaml(path_data): return Recipe(load_file=path_data / "simple.yaml") @pytest.fixture def comment_yaml(path_data): return Recipe(load_file=path_data / "comment.yaml") @pytest.fixture def simple_full_recipe(path_data): return Recipe(load_file=path_data / "simple_full_recipe.yaml") @pytest.fixture def multiple_jinja_var_same_line(path_data): return Recipe(load_file=path_data / "multiple_jinja_var_same_line.yaml")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Decoder definition.""" import torch from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import PositionwiseFeedForward from espnet.nets.pytorch_backend.transformer.repeat import repeat from espnet.nets.scorer_interface import ScorerInterface class Decoder(ScorerInterface, torch.nn.Module): """Transfomer decoder module. :param int odim: output dim :param int attention_dim: dimention of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate for attention :param str or torch.nn.Module input_layer: input layer type :param bool use_output_layer: whether to use output layer :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first block :param bool concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) """ def __init__(self, odim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, self_attention_dropout_rate=0.0, src_attention_dropout_rate=0.0, input_layer="embed", use_output_layer=True, pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False): """Construct an Decoder object.""" torch.nn.Module.__init__(self) if input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(odim, attention_dim), pos_enc_class(attention_dim, positional_dropout_rate) ) elif input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(odim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(attention_dim, positional_dropout_rate) ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise NotImplementedError("only `embed` or torch.nn.Module is supported.") self.normalize_before = normalize_before self.decoders = repeat( num_blocks, lambda: DecoderLayer( attention_dim, MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate), MultiHeadedAttention(attention_heads, attention_dim, src_attention_dropout_rate), PositionwiseFeedForward(attention_dim, linear_units, dropout_rate), dropout_rate, normalize_before, concat_after ) ) if self.normalize_before: self.after_norm = LayerNorm(attention_dim) if use_output_layer: self.output_layer = torch.nn.Linear(attention_dim, odim) else: self.output_layer = None def forward(self, tgt, tgt_mask, memory, memory_mask, return_penultimate_state=False): """Forward decoder. :param torch.Tensor tgt: input token ids, int64 (batch, maxlen_out) if input_layer == "embed" input tensor (batch, maxlen_out, #mels) in the other cases :param torch.Tensor tgt_mask: input token mask, (batch, maxlen_out) dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (include 1.2) :param torch.Tensor memory: encoded memory, float32 (batch, maxlen_in, feat) :param torch.Tensor memory_mask: encoded memory mask, (batch, maxlen_in) dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (include 1.2) :return x: decoded token score before softmax (batch, maxlen_out, token) if use_output_layer is True, final block outputs (batch, maxlen_out, attention_dim) in the other cases :rtype: torch.Tensor :return tgt_mask: score mask before softmax (batch, maxlen_out) :rtype: torch.Tensor """ x = self.embed(tgt) x, tgt_mask, memory, memory_mask = self.decoders(x, tgt_mask, memory, memory_mask) if self.normalize_before: x = self.after_norm(x) # to utilize penultimate_state, we need to store it if return_penultimate_state: penultimate_state = x if self.output_layer is not None: x = self.output_layer(x) if return_penultimate_state: return x, tgt_mask, penultimate_state return x, tgt_mask def forward_one_step(self, tgt, tgt_mask, memory, cache=None): """Forward one step. :param torch.Tensor tgt: input token ids, int64 (batch, maxlen_out) :param torch.Tensor tgt_mask: input token mask, (batch, maxlen_out) dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (include 1.2) :param torch.Tensor memory: encoded memory, float32 (batch, maxlen_in, feat) :param List[torch.Tensor] cache: cached output list of (batch, max_time_out-1, size) :return y, cache: NN output value and cache per `self.decoders`. `y.shape` is (batch, maxlen_out, token) :rtype: Tuple[torch.Tensor, List[torch.Tensor]] """ x = self.embed(tgt) if cache is None: cache = self.init_state() new_cache = [] for c, decoder in zip(cache, self.decoders): x, tgt_mask, memory, memory_mask = decoder(x, tgt_mask, memory, None, cache=c) new_cache.append(x) if self.normalize_before: y = self.after_norm(x[:, -1]) else: y = x[:, -1] if self.output_layer is not None: y = torch.log_softmax(self.output_layer(y), dim=-1) return y, new_cache # beam search API (see ScorerInterface) def init_state(self, x=None): """Get an initial state for decoding.""" return [None for i in range(len(self.decoders))] def score(self, ys, state, x): """Score.""" ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0) logp, state = self.forward_one_step(ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state) return logp.squeeze(0), state
''' .. warning:: These drivetrain models are not particularly realistic, and if you are using a tank drive style drivetrain you should use the :class:`.TankModel` instead. Based on input from various drive motors, these helper functions simulate moving the robot in various ways. Many thanks to `Ether <http://www.chiefdelphi.com/forums/member.php?u=34863>`_ for assistance with the motion equations. When specifying the robot speed to the below functions, the following may help you determine the approximate speed of your robot: * Slow: 4ft/s * Typical: 5 to 7ft/s * Fast: 8 to 12ft/s Obviously, to get the best simulation results, you should try to estimate the speed of your robot accurately. Here's an example usage of the drivetrains:: from pyfrc.physics import drivetrains class PhysicsEngine: def __init__(self, physics_controller): self.physics_controller = physics_controller self.drivetrain = drivetrains.TwoMotorDrivetrain(deadzone=drivetrains.linear_deadzone(0.2)) def update_sim(self, hal_data, now, tm_diff): # TODO: get motor values from hal_data speed, rotation = self.drivetrain.get_vector(l_motor, r_motor) self.physics_controller.drive(speed, rotation, tm_diff) # optional: compute encoder # l_encoder = self.drivetrain.l_speed * tm_diff ''' import math import typing DeadzoneCallable = typing.Callable[[float], float] def linear_deadzone(deadzone: float) -> DeadzoneCallable: ''' Real motors won't actually move unless you give them some minimum amount of input. This computes an output speed for a motor and causes it to 'not move' if the input isn't high enough. Additionally, the output is adjusted linearly to compensate. Example: For a deadzone of 0.2: * Input of 0.0 will result in 0.0 * Input of 0.2 will result in 0.0 * Input of 0.3 will result in ~0.12 * Input of 1.0 will result in 1.0 This returns a function that computes the deadzone. You should pass the returned function to one of the drivetrain simulation functions as the ``deadzone`` parameter. :param motor_input: The motor input (between -1 and 1) :param deadzone: Minimum input required for the motor to move (between 0 and 1) ''' assert 0.0 < deadzone < 1.0 scale_param = 1.0 - deadzone def _linear_deadzone(motor_input): abs_motor_input = abs(motor_input) if abs_motor_input < deadzone: return 0.0 else: return math.copysign((abs_motor_input - deadzone) / scale_param, motor_input) return _linear_deadzone class TwoMotorDrivetrain: ''' Two center-mounted motors with a simple drivetrain. The motion equations are as follows:: FWD = (L+R)/2 RCW = (L-R)/W * L is forward speed of the left wheel(s), all in sync * R is forward speed of the right wheel(s), all in sync * W is wheelbase in feet If you called "SetInvertedMotor" on any of your motors in RobotDrive, then you will need to multiply that motor's value by -1. .. note:: WPILib RobotDrive assumes that to make the robot go forward, the left motor must be set to -1, and the right to +1 .. versionadded:: 2018.2.0 ''' def __init__(self, x_wheelbase:float=2, speed:float=5, deadzone:DeadzoneCallable=None): ''' :param x_wheelbase: The distance in feet between right and left wheels. :param speed: Speed of robot in feet per second (see above) :param deadzone: A function that adjusts the output of the motor (see :func:`linear_deadzone`) ''' self.x_wheelbase = x_wheelbase self.speed = speed self.deadzone = deadzone # Use these to compute encoder data after calling get_vector self.l_speed = 0 self.r_speed = 0 def get_vector(self, l_motor: float, r_motor: float) -> typing.Tuple[float, float]: ''' Given motor values, retrieves the vector of (distance, speed) for your robot :param l_motor: Left motor value (-1 to 1); -1 is forward :param r_motor: Right motor value (-1 to 1); 1 is forward :returns: speed of robot (ft/s), clockwise rotation of robot (radians/s) ''' if self.deadzone: l_motor = self.deadzone(l_motor) r_motor = self.deadzone(r_motor) l = -l_motor * self.speed r = r_motor * self.speed # Motion equations fwd = (l + r) * 0.5 rcw = (l - r) / float(self.x_wheelbase) self.l_speed = l self.r_speed = r return fwd, rcw def two_motor_drivetrain(l_motor, r_motor, x_wheelbase=2, speed=5, deadzone=None): ''' .. deprecated:: 2018.2.0 Use :class:`TwoMotorDrivetrain` instead ''' return TwoMotorDrivetrain(x_wheelbase, speed, deadzone).get_vector(l_motor, r_motor) class FourMotorDrivetrain: ''' Four motors, each side chained together. The motion equations are as follows:: FWD = (L+R)/2 RCW = (L-R)/W * L is forward speed of the left wheel(s), all in sync * R is forward speed of the right wheel(s), all in sync * W is wheelbase in feet If you called "SetInvertedMotor" on any of your motors in RobotDrive, then you will need to multiply that motor's value by -1. .. note:: WPILib RobotDrive assumes that to make the robot go forward, the left motors must be set to -1, and the right to +1 .. versionadded:: 2018.2.0 ''' #: Use this to compute encoder data after get_vector is called l_speed = 0 r_speed = 0 def __init__(self, x_wheelbase:float=2, speed:float=5, deadzone:DeadzoneCallable=None): ''' :param x_wheelbase: The distance in feet between right and left wheels. :param speed: Speed of robot in feet per second (see above) :param deadzone: A function that adjusts the output of the motor (see :func:`linear_deadzone`) ''' self.x_wheelbase = x_wheelbase self.speed = speed self.deadzone = deadzone def get_vector(self, lr_motor: float, rr_motor: float, lf_motor: float, rf_motor: float) -> typing.Tuple[float, float]: ''' :param lr_motor: Left rear motor value (-1 to 1); -1 is forward :param rr_motor: Right rear motor value (-1 to 1); 1 is forward :param lf_motor: Left front motor value (-1 to 1); -1 is forward :param rf_motor: Right front motor value (-1 to 1); 1 is forward :returns: speed of robot (ft/s), clockwise rotation of robot (radians/s) ''' if self.deadzone: lf_motor = self.deadzone(lf_motor) lr_motor = self.deadzone(lr_motor) rf_motor = self.deadzone(rf_motor) rr_motor = self.deadzone(rr_motor) l = -(lf_motor + lr_motor) * 0.5 * self.speed r = (rf_motor + rr_motor) * 0.5 * self.speed # Motion equations fwd = (l + r) * 0.5 rcw = (l - r) / float(self.x_wheelbase) self.l_speed = l self.r_speed = r return fwd, rcw def four_motor_drivetrain(lr_motor, rr_motor, lf_motor, rf_motor, x_wheelbase=2, speed=5, deadzone=None): ''' .. deprecated:: 2018.2.0 Use :class:`FourMotorDrivetrain` instead ''' return FourMotorDrivetrain(x_wheelbase, speed, deadzone).get_vector(lr_motor, rr_motor, lf_motor, rf_motor) class MecanumDrivetrain: ''' Four motors, each with a mechanum wheel attached to it. If you called "SetInvertedMotor" on any of your motors in RobotDrive, then you will need to multiply that motor's value by -1. .. note:: WPILib RobotDrive assumes that to make the robot go forward, all motors are set to +1 .. versionadded:: 2018.2.0 ''' #: Use this to compute encoder data after get_vector is called lr_speed = 0 rr_speed = 0 lf_speed = 0 rf_speed = 0 def __init__(self, x_wheelbase:float=2, y_wheelbase:float=3, speed:float=5, deadzone:DeadzoneCallable=None): ''' :param x_wheelbase: The distance in feet between right and left wheels. :param y_wheelbase: The distance in feet between forward and rear wheels. :param speed: Speed of robot in feet per second (see above) :param deadzone: A function that adjusts the output of the motor (see :func:`linear_deadzone`) ''' self.x_wheelbase = x_wheelbase self.y_wheelbase = y_wheelbase self.speed = speed self.deadzone = deadzone def get_vector(self, lr_motor: float, rr_motor: float, lf_motor: float, rf_motor: float) -> typing.Tuple[float, float, float]: ''' Given motor values, retrieves the vector of (distance, speed) for your robot :param lr_motor: Left rear motor value (-1 to 1); 1 is forward :param rr_motor: Right rear motor value (-1 to 1); 1 is forward :param lf_motor: Left front motor value (-1 to 1); 1 is forward :param rf_motor: Right front motor value (-1 to 1); 1 is forward :returns: Speed of robot in x (ft/s), Speed of robot in y (ft/s), clockwise rotation of robot (radians/s) ''' # # From http://www.chiefdelphi.com/media/papers/download/2722 pp7-9 # [F] [omega](r) = [V] # # F is # .25 .25 .25 .25 # -.25 .25 -.25 .25 # -.25k -.25k .25k .25k # # omega is # [lf lr rr rf] if self.deadzone: lf_motor = self.deadzone(lf_motor) lr_motor = self.deadzone(lr_motor) rf_motor = self.deadzone(rf_motor) rr_motor = self.deadzone(rr_motor) # Calculate speed of each wheel lr = lr_motor * self.speed rr = rr_motor * self.speed lf = lf_motor * self.speed rf = rf_motor * self.speed # Calculate K k = abs(self.x_wheelbase/2.0) + abs(self.y_wheelbase/2.0) # Calculate resulting motion Vy = .25 * (lf + lr + rr + rf) Vx = .25 * (lf + -lr + rr + -rf) Vw = (.25/k) * (lf + lr + -rr + -rf) self.lr_speed = lr self.rr_speed = rr self.lf_speed = lf self.rf_speed = rf return Vx, Vy, Vw def mecanum_drivetrain(lr_motor, rr_motor, lf_motor, rf_motor, x_wheelbase=2, y_wheelbase=3, speed=5, deadzone=None): ''' .. deprecated:: 2018.2.0 Use :class:`MecanumDrivetrain` instead ''' return MecanumDrivetrain(x_wheelbase, y_wheelbase, speed, deadzone).get_vector(lr_motor, rr_motor, lf_motor, rf_motor) def four_motor_swerve_drivetrain(lr_motor, rr_motor, lf_motor, rf_motor, lr_angle, rr_angle, lf_angle, rf_angle, x_wheelbase=2, y_wheelbase=2, speed=5, deadzone=None): ''' Four motors that can be rotated in any direction If any motors are inverted, then you will need to multiply that motor's value by -1. :param lr_motor: Left rear motor value (-1 to 1); 1 is forward :param rr_motor: Right rear motor value (-1 to 1); 1 is forward :param lf_motor: Left front motor value (-1 to 1); 1 is forward :param rf_motor: Right front motor value (-1 to 1); 1 is forward :param lr_angle: Left rear motor angle in degrees (0 to 360 measured clockwise from forward position) :param rr_angle: Right rear motor angle in degrees (0 to 360 measured clockwise from forward position) :param lf_angle: Left front motor angle in degrees (0 to 360 measured clockwise from forward position) :param rf_angle: Right front motor angle in degrees (0 to 360 measured clockwise from forward position) :param x_wheelbase: The distance in feet between right and left wheels. :param y_wheelbase: The distance in feet between forward and rear wheels. :param speed: Speed of robot in feet per second (see above) :param deadzone: A function that adjusts the output of the motor (see :func:`linear_deadzone`) :returns: Speed of robot in x (ft/s), Speed of robot in y (ft/s), clockwise rotation of robot (radians/s) ''' if deadzone: lf_motor = deadzone(lf_motor) lr_motor = deadzone(lr_motor) rf_motor = deadzone(rf_motor) rr_motor = deadzone(rr_motor) # Calculate speed of each wheel lr = lr_motor * speed rr = rr_motor * speed lf = lf_motor * speed rf = rf_motor * speed # Calculate angle in radians lr_rad = math.radians(lr_angle) rr_rad = math.radians(rr_angle) lf_rad = math.radians(lf_angle) rf_rad = math.radians(rf_angle) # Calculate wheelbase radius wheelbase_radius = math.hypot(x_wheelbase / 2.0, y_wheelbase / 2.0) # Calculates the Vx and Vy components # Sin an Cos inverted because forward is 0 on swerve wheels Vx = (math.sin(lr_rad) * lr) + (math.sin(rr_rad) * rr) + (math.sin(lf_rad) * lf) + (math.sin(rf_rad) * rf) Vy = (math.cos(lr_rad) * lr) + (math.cos(rr_rad) * rr) + (math.cos(lf_rad) * lf) + (math.cos(rf_rad) * rf) # Adjusts the angle corresponding to a diameter that is perpendicular to the radius (add or subtract 45deg) lr_rad = (lr_rad + (math.pi / 4)) % (2 * math.pi) rr_rad = (rr_rad - (math.pi / 4)) % (2 * math.pi) lf_rad = (lf_rad - (math.pi / 4)) % (2 * math.pi) rf_rad = (rf_rad + (math.pi / 4)) % (2 * math.pi) # Finds the rotational velocity by finding the torque and adding them up Vw = wheelbase_radius * ((math.cos(lr_rad) * lr) + (math.cos(rr_rad) * -rr) + (math.cos(lf_rad) * lf) + (math.cos(rf_rad) * -rf)) Vx *= 0.25 Vy *= 0.25 Vw *= 0.25 return Vx, Vy, Vw # TODO: holonomic, etc
#ifndef __scanner_h__ #define __scanner_h__ #include <string> #include <iostream> #include <fstream> #include <iterator> #include "chunk.h" namespace PsqlChunks { class ChunkScanner { protected: std::istream & strm; Chunk chunkCache; linenumber_t line_number; enum Content { SEP, // seperator FILE_MARKER,// file marker from previous concat COMMENT, // sql single line comment COMMENT_END, COMMENT_START, EMPTY, // empty or only whitespace OTHER // anything, propably sql }; enum State { CAPTURE_SQL, CAPTURE_START_COMMENT, CAPTURE_END_COMMENT, NEW_CHUNK, END_CHUNK, IGNORE, COPY_CACHED }; bool hasMarker(const std::string &, const std::string &, size_t , size_t &); Content classifyLine( std::string &, size_t &); // state machine variables Content stm_last_cls; State stm_state; linenumber_t last_nonempty_line; public: ChunkScanner(std::istream &); ~ChunkScanner(); /** read next chunk * * returns false on failure */ bool nextChunk( Chunk& ); bool eof(); }; /** * * Usage: * for (ChunkIterator cit(is); cit != ChunkIterator(); ++cit) { * std::cout << *cit << std::endl; * } */ class ChunkIterator : public std::iterator<std::input_iterator_tag, Chunk> { private: ChunkScanner * scanner; Chunk chunk; ChunkIterator(const ChunkIterator&); ChunkIterator& operator=(const ChunkIterator&); inline void fetchChunk() { if (scanner && !scanner->nextChunk(chunk)) { delete scanner; scanner = NULL; } } public: ChunkIterator(std::istream& is) : scanner(NULL), chunk() { scanner = new ChunkScanner(is); fetchChunk(); }; ChunkIterator() : scanner(NULL), chunk() {}; ~ChunkIterator() { delete scanner; }; Chunk& operator*() { return chunk; } ChunkIterator& operator++() { fetchChunk(); return *this; } bool operator==(const ChunkIterator& rhs) const { return (!rhs.scanner) == (!scanner); } bool operator!=(const ChunkIterator& rhs) const { return !(rhs == *this); } }; }; #endif
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports.FIRST_GROUP_PATTERN = undefined; var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; // This is a port of Google Android `libphonenumber`'s // `phonenumberutil.js` of 17th November, 2016. // // https://github.com/googlei18n/libphonenumber/commits/master/javascript/i18n/phonenumbers/phonenumberutil.js exports.default = format; exports.format_national_number_using_format = format_national_number_using_format; exports.choose_format_for_number = choose_format_for_number; exports.changeInternationalFormatStyle = changeInternationalFormatStyle; exports.formatIDDSameCountryCallingCodeNumber = formatIDDSameCountryCallingCodeNumber; var _common = require('./common'); var _parse = require('./parse'); var _parse2 = _interopRequireDefault(_parse); var _IDD = require('./IDD'); var _metadata = require('./metadata'); var _metadata2 = _interopRequireDefault(_metadata); var _RFC = require('./RFC3966'); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var defaultOptions = { formatExtension: function formatExtension(number, extension, metadata) { return '' + number + metadata.ext() + extension; } // Formats a phone number // // Example use cases: // // ```js // format('8005553535', 'RU', 'INTERNATIONAL') // format('8005553535', 'RU', 'INTERNATIONAL', metadata) // format({ phone: '8005553535', country: 'RU' }, 'INTERNATIONAL') // format({ phone: '8005553535', country: 'RU' }, 'INTERNATIONAL', metadata) // format('+78005553535', 'NATIONAL') // format('+78005553535', 'NATIONAL', metadata) // ``` // };function format(arg_1, arg_2, arg_3, arg_4, arg_5) { var _sort_out_arguments = sort_out_arguments(arg_1, arg_2, arg_3, arg_4, arg_5), input = _sort_out_arguments.input, format_type = _sort_out_arguments.format_type, options = _sort_out_arguments.options, metadata = _sort_out_arguments.metadata; if (input.country) { // Validate `input.country`. if (!metadata.hasCountry(input.country)) { throw new Error('Unknown country: ' + input.country); } metadata.country(input.country); } else if (input.countryCallingCode) { metadata.chooseCountryByCountryCallingCode(input.countryCallingCode); } else return input.phone || ''; var countryCallingCode = metadata.countryCallingCode(); var nationalNumber = options.v2 ? input.nationalNumber : input.phone; // This variable should have been declared inside `case`s // but Babel has a bug and it says "duplicate variable declaration". var number = void 0; switch (format_type) { case 'INTERNATIONAL': // Legacy argument support. // (`{ country: ..., phone: '' }`) if (!nationalNumber) { return '+' + countryCallingCode; } number = format_national_number(nationalNumber, 'INTERNATIONAL', metadata); number = '+' + countryCallingCode + ' ' + number; return add_extension(number, input.ext, metadata, options.formatExtension); case 'E.164': // `E.164` doesn't define "phone number extensions". return '+' + countryCallingCode + nationalNumber; case 'RFC3966': return (0, _RFC.formatRFC3966)({ number: '+' + countryCallingCode + nationalNumber, ext: input.ext }); case 'IDD': if (!options.fromCountry) { return; // throw new Error('`fromCountry` option not passed for IDD-prefixed formatting.') } var IDDPrefix = (0, _IDD.getIDDPrefix)(options.fromCountry, metadata.metadata); if (!IDDPrefix) { return; } if (options.humanReadable) { var formattedForSameCountryCallingCode = countryCallingCode && formatIDDSameCountryCallingCodeNumber(nationalNumber, metadata.countryCallingCode(), options.fromCountry, metadata); if (formattedForSameCountryCallingCode) { number = formattedForSameCountryCallingCode; } else { number = IDDPrefix + ' ' + countryCallingCode + ' ' + format_national_number(nationalNumber, 'INTERNATIONAL', metadata); } return add_extension(number, input.ext, metadata, options.formatExtension); } return '' + IDDPrefix + countryCallingCode + nationalNumber; case 'NATIONAL': // Legacy argument support. // (`{ country: ..., phone: '' }`) if (!nationalNumber) { return ''; } number = format_national_number(nationalNumber, 'NATIONAL', metadata); return add_extension(number, input.ext, metadata, options.formatExtension); } } // This was originally set to $1 but there are some countries for which the // first group is not used in the national pattern (e.g. Argentina) so the $1 // group does not match correctly. Therefore, we use \d, so that the first // group actually used in the pattern will be matched. var FIRST_GROUP_PATTERN = exports.FIRST_GROUP_PATTERN = /(\$\d)/; function format_national_number_using_format(number, format, useInternationalFormat, includeNationalPrefixForNationalFormat, metadata) { var formattedNumber = number.replace(new RegExp(format.pattern()), useInternationalFormat ? format.internationalFormat() : format.nationalPrefixFormattingRule() && (!format.nationalPrefixIsOptionalWhenFormatting() || includeNationalPrefixForNationalFormat) ? format.format().replace(FIRST_GROUP_PATTERN, format.nationalPrefixFormattingRule()) : format.format()); if (useInternationalFormat) { return changeInternationalFormatStyle(formattedNumber); } return formattedNumber; } function format_national_number(number, format_as, metadata) { var format = choose_format_for_number(metadata.formats(), number); if (!format) { return number; } return format_national_number_using_format(number, format, format_as === 'INTERNATIONAL', true, metadata); } function choose_format_for_number(available_formats, national_number) { for (var _iterator = available_formats, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { var _ref; if (_isArray) { if (_i >= _iterator.length) break; _ref = _iterator[_i++]; } else { _i = _iterator.next(); if (_i.done) break; _ref = _i.value; } var _format = _ref; // Validate leading digits if (_format.leadingDigitsPatterns().length > 0) { // The last leading_digits_pattern is used here, as it is the most detailed var last_leading_digits_pattern = _format.leadingDigitsPatterns()[_format.leadingDigitsPatterns().length - 1]; // If leading digits don't match then move on to the next phone number format if (national_number.search(last_leading_digits_pattern) !== 0) { continue; } } // Check that the national number matches the phone number format regular expression if ((0, _common.matches_entirely)(national_number, _format.pattern())) { return _format; } } } // Removes brackets and replaces dashes with spaces. // // E.g. "(999) 111-22-33" -> "999 111 22 33" // // For some reason Google's metadata contains `<intlFormat/>`s with brackets and dashes. // Meanwhile, there's no single opinion about using punctuation in international phone numbers. // // For example, Google's `<intlFormat/>` for USA is `+1 213-373-4253`. // And here's a quote from WikiPedia's "North American Numbering Plan" page: // https://en.wikipedia.org/wiki/North_American_Numbering_Plan // // "The country calling code for all countries participating in the NANP is 1. // In international format, an NANP number should be listed as +1 301 555 01 00, // where 301 is an area code (Maryland)." // // I personally prefer the international format without any punctuation. // For example, brackets are remnants of the old age, meaning that the // phone number part in brackets (so called "area code") can be omitted // if dialing within the same "area". // And hyphens were clearly introduced for splitting local numbers into memorizable groups. // For example, remembering "5553535" is difficult but "555-35-35" is much simpler. // Imagine a man taking a bus from home to work and seeing an ad with a phone number. // He has a couple of seconds to memorize that number until it passes by. // If it were spaces instead of hyphens the man wouldn't necessarily get it, // but with hyphens instead of spaces the grouping is more explicit. // I personally think that hyphens introduce visual clutter, // so I prefer replacing them with spaces in international numbers. // In the modern age all output is done on displays where spaces are clearly distinguishable // so hyphens can be safely replaced with spaces without losing any legibility. // function changeInternationalFormatStyle(local) { return local.replace(new RegExp('[' + _common.VALID_PUNCTUATION + ']+', 'g'), ' ').trim(); } // Sort out arguments function sort_out_arguments(arg_1, arg_2, arg_3, arg_4, arg_5) { var input = void 0; var format_type = void 0; var options = void 0; var metadata = void 0; // Sort out arguments. // If the phone number is passed as a string. // `format('8005553535', ...)`. if (typeof arg_1 === 'string') { // If country code is supplied. // `format('8005553535', 'RU', 'NATIONAL', [options], metadata)`. if (typeof arg_3 === 'string') { format_type = arg_3; if (arg_5) { options = arg_4; metadata = arg_5; } else { metadata = arg_4; } input = (0, _parse2.default)(arg_1, { defaultCountry: arg_2, extended: true }, metadata); } // Just an international phone number is supplied // `format('+78005553535', 'NATIONAL', [options], metadata)`. else { if (typeof arg_2 !== 'string') { throw new Error('`format` argument not passed to `formatNumber(number, format)`'); } format_type = arg_2; if (arg_4) { options = arg_3; metadata = arg_4; } else { metadata = arg_3; } input = (0, _parse2.default)(arg_1, { extended: true }, metadata); } } // If the phone number is passed as a parsed number object. // `format({ phone: '8005553535', country: 'RU' }, 'NATIONAL', [options], metadata)`. else if (is_object(arg_1)) { input = arg_1; format_type = arg_2; if (arg_4) { options = arg_3; metadata = arg_4; } else { metadata = arg_3; } } else throw new TypeError('A phone number must either be a string or an object of shape { phone, [country] }.'); if (format_type === 'International') { format_type = 'INTERNATIONAL'; } else if (format_type === 'National') { format_type = 'NATIONAL'; } // Validate `format_type`. switch (format_type) { case 'E.164': case 'INTERNATIONAL': case 'NATIONAL': case 'RFC3966': case 'IDD': break; default: throw new Error('Unknown format type argument passed to "format()": "' + format_type + '"'); } // Apply default options. if (options) { options = _extends({}, defaultOptions, options); } else { options = defaultOptions; } return { input: input, format_type: format_type, options: options, metadata: new _metadata2.default(metadata) }; } // Babel transforms `typeof` into some "branches" // so istanbul will show this as "branch not covered". /* istanbul ignore next */ var is_object = function is_object(_) { return (typeof _ === 'undefined' ? 'undefined' : _typeof(_)) === 'object'; }; function add_extension(number, ext, metadata, formatExtension) { return ext ? formatExtension(number, ext, metadata) : number; } function formatIDDSameCountryCallingCodeNumber(number, toCountryCallingCode, fromCountry, toCountryMetadata) { var fromCountryMetadata = new _metadata2.default(toCountryMetadata.metadata); fromCountryMetadata.country(fromCountry); // If calling within the same country calling code. if (toCountryCallingCode === fromCountryMetadata.countryCallingCode()) { // For NANPA regions, return the national format for these regions // but prefix it with the country calling code. if (toCountryCallingCode === '1') { return toCountryCallingCode + ' ' + format_national_number(number, 'NATIONAL', toCountryMetadata); } // If regions share a country calling code, the country calling code need // not be dialled. This also applies when dialling within a region, so this // if clause covers both these cases. Technically this is the case for // dialling from La Reunion to other overseas departments of France (French // Guiana, Martinique, Guadeloupe), but not vice versa - so we don't cover // this edge case for now and for those cases return the version including // country calling code. Details here: // http://www.petitfute.com/voyage/225-info-pratiques-reunion // return format_national_number(number, 'NATIONAL', toCountryMetadata); } } //# sourceMappingURL=format.js.map
# -*- coding: utf-8 -*- # vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
parcelRequire=function(e,r,n,t){var i="function"==typeof parcelRequire&&parcelRequire,o="function"==typeof require&&require;function u(n,t){if(!r[n]){if(!e[n]){var f="function"==typeof parcelRequire&&parcelRequire;if(!t&&f)return f(n,!0);if(i)return i(n,!0);if(o&&"string"==typeof n)return o(n);var c=new Error("Cannot find module '"+n+"'");throw c.code="MODULE_NOT_FOUND",c}p.resolve=function(r){return e[n][1][r]||r},p.cache={};var l=r[n]=new u.Module(n);e[n][0].call(l.exports,p,l,l.exports,this)}return r[n].exports;function p(e){return u(p.resolve(e))}}u.isParcelRequire=!0,u.Module=function(e){this.id=e,this.bundle=u,this.exports={}},u.modules=e,u.cache=r,u.parent=i,u.register=function(r,n){e[r]=[function(e,r){r.exports=n},{}]};for(var f=0;f<n.length;f++)u(n[f]);if(n.length){var c=u(n[n.length-1]);"object"==typeof exports&&"undefined"!=typeof module?module.exports=c:"function"==typeof define&&define.amd?define(function(){return c}):t&&(this[t]=c)}return u}({"yLQN":[function(require,module,exports) { function n(n,e){postMessage({action:"consoleLogIfRunning",payload:{msg:n,chan:e}})}function e(){postMessage({action:"execEnded"})}function t(t){return fetch(t,{method:"POST",headers:{Accept:"application/json","Content-Type":"application/json"}}).then(function(e){if(e.body)return new Promise(function(t,a){var o=e.body.getReader(),c=new TextDecoder("utf-8"),i=!0;!function e(){o.read().then(function(a){var o=a.done,r=a.value;o?t({hasLastNewLine:i}):(c.decode(r).split("\n").map(function(e){if(0!==e.trim().length){var t=JSON.parse(e),a=JSON.parse(t.data),o=a.lastIndexOf("\n");i=o===a.length-1,n(a,t.chan)}}),e())})}()});var t=!0;return e.text().then(function(e){return e.split("\n").map(function(e){if(0!==e.trim().length){var a=JSON.parse(e),o=JSON.parse(a.data),c=o.lastIndexOf("\n");t=c===o.length-1,n(o,a.chan)}}),{hasLastNewLine:t}})}).then(function(t){t.hasLastNewLine||n("%\n","forcednl"),n("--- Done.\n\n","info"),e()}).catch(function(t){n("\n--- An error occurred during execution.\n\n","stderr"),e()})}onmessage=function(n){switch(n.data.action){case"exec":t(n.data.url)}}; },{}]},{},["yLQN"], null) //# sourceMappingURL=/worker.b574a43a.map
const VERSION_INFO_KEY = 'VERSION_INFO'; export class FooterDao { constructor(RESOURCE, ArtifactoryDaoFactory, ArtifactoryStorage) { this.storage = ArtifactoryStorage; this._resource = ArtifactoryDaoFactory() .setPath(RESOURCE.FOOTER) .getInstance(); } get(force = false) { if (!this.cached || force) { this.cached = this._resource.get().$promise .then(info => this._info = info); } return this.cached; } getInfo() { return this._info; } }
/************************************************************************************************************************ Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************************************************************/ #ifndef CLIENT_SESSION_H #define CLIENT_SESSION_H #include <stdbool.h> #include <stdint.h> #include "awa/client.h" #include "lwm2m_definition.h" #include "session_common.h" #include "map.h" #ifdef __cplusplus extern "C" { #endif bool ClientSession_IsConnected(const AwaClientSession * session); IPCChannel * ClientSession_GetChannel(const AwaClientSession * session); DefinitionRegistry * ClientSession_GetDefinitionRegistry(const AwaClientSession * session); AwaError ClientSession_CheckResourceTypeFromPath(const AwaClientSession * session, const char * path, AwaResourceType expected); const AwaResourceDefinition * ClientSession_GetResourceDefinitionFromPath(const AwaClientSession * session, const char * path); SessionCommon * ClientSession_GetSessionCommon(const AwaClientSession * session); MapType * ClientSession_GetSubscribers(const AwaClientSession * session); #ifdef __cplusplus } #endif #endif // CLIENT_SESSION_H
## # Copyright : Copyright (c) MOSEK ApS, Denmark. All rights reserved. # # File : opt_server_async.py # # Purpose : Demonstrates how to use MOSEK OptServer # to solve optimization problem asynchronously ## import mosek import sys import time def streamprinter(msg): sys.stdout.write(msg) sys.stdout.flush() if len(sys.argv) != 5: print("Missing argument, syntax is:") print(" opt-server-async inputfile host port numpolls") else: filename = sys.argv[1] host = sys.argv[2] port = sys.argv[3] numpolls = int(sys.argv[4]) token = None with mosek.Env() as env: with env.Task(0, 0) as task: print("reading task from file") task.readdata(filename) print("Solve the problem remotely (async)") token = task.asyncoptimize(host, port) print("Task token: %s" % token) with env.Task(0, 0) as task: task.readdata(filename) task.set_Stream(mosek.streamtype.log, streamprinter) i = 0 while i < numpolls: time.sleep(0.1) print("poll %d..." % i) respavailable, res, trm = task.asyncpoll(host, port, token) print("done!") if respavailable: print("solution available!") respavailable, res, trm = task.asyncgetresult(host, port, token) task.solutionsummary(mosek.streamtype.log) break i = i + 1 if i == numpolls: print("max number of polls reached, stopping host.") task.asyncstop(host, port, token)
goog.provide('ol.interaction.Select'); goog.provide('ol.interaction.SelectEvent'); goog.provide('ol.interaction.SelectEventType'); goog.provide('ol.interaction.SelectFilterFunction'); goog.require('goog.array'); goog.require('goog.asserts'); goog.require('goog.events'); goog.require('goog.events.Event'); goog.require('goog.functions'); goog.require('goog.object'); goog.require('ol.CollectionEventType'); goog.require('ol.Feature'); goog.require('ol.array'); goog.require('ol.events.condition'); goog.require('ol.geom.GeometryType'); goog.require('ol.interaction.Interaction'); goog.require('ol.layer.Vector'); goog.require('ol.source.Vector'); /** * @enum {string} */ ol.interaction.SelectEventType = { /** * Triggered when feature(s) has been (de)selected. * @event ol.interaction.SelectEvent#select * @api */ SELECT: 'select' }; /** * A function that takes an {@link ol.Feature} or {@link ol.render.Feature} and * an {@link ol.layer.Layer} and returns `true` if the feature may be selected * or `false` otherwise. * @typedef {function((ol.Feature|ol.render.Feature), ol.layer.Layer): * boolean} * @api */ ol.interaction.SelectFilterFunction; /** * @classdesc * Events emitted by {@link ol.interaction.Select} instances are instances of * this type. * * @param {string} type The event type. * @param {Array.<ol.Feature>} selected Selected features. * @param {Array.<ol.Feature>} deselected Deselected features. * @param {ol.MapBrowserEvent} mapBrowserEvent Associated * {@link ol.MapBrowserEvent}. * @implements {oli.SelectEvent} * @extends {goog.events.Event} * @constructor */ ol.interaction.SelectEvent = function(type, selected, deselected, mapBrowserEvent) { goog.base(this, type); /** * Selected features array. * @type {Array.<ol.Feature>} * @api */ this.selected = selected; /** * Deselected features array. * @type {Array.<ol.Feature>} * @api */ this.deselected = deselected; /** * Associated {@link ol.MapBrowserEvent}. * @type {ol.MapBrowserEvent} * @api */ this.mapBrowserEvent = mapBrowserEvent; }; goog.inherits(ol.interaction.SelectEvent, goog.events.Event); /** * @classdesc * Interaction for selecting vector features. By default, selected features are * styled differently, so this interaction can be used for visual highlighting, * as well as selecting features for other actions, such as modification or * output. There are three ways of controlling which features are selected: * using the browser event as defined by the `condition` and optionally the * `toggle`, `add`/`remove`, and `multi` options; a `layers` filter; and a * further feature filter using the `filter` option. * * @constructor * @extends {ol.interaction.Interaction} * @param {olx.interaction.SelectOptions=} opt_options Options. * @fires ol.interaction.SelectEvent * @api stable */ ol.interaction.Select = function(opt_options) { goog.base(this, { handleEvent: ol.interaction.Select.handleEvent }); var options = opt_options ? opt_options : {}; /** * @private * @type {ol.events.ConditionType} */ this.condition_ = options.condition ? options.condition : ol.events.condition.singleClick; /** * @private * @type {ol.events.ConditionType} */ this.addCondition_ = options.addCondition ? options.addCondition : ol.events.condition.never; /** * @private * @type {ol.events.ConditionType} */ this.removeCondition_ = options.removeCondition ? options.removeCondition : ol.events.condition.never; /** * @private * @type {ol.events.ConditionType} */ this.toggleCondition_ = options.toggleCondition ? options.toggleCondition : ol.events.condition.shiftKeyOnly; /** * @private * @type {boolean} */ this.multi_ = options.multi ? options.multi : false; /** * @private * @type {ol.interaction.SelectFilterFunction} */ this.filter_ = options.filter ? options.filter : goog.functions.TRUE; var layerFilter; if (options.layers) { if (goog.isFunction(options.layers)) { layerFilter = options.layers; } else { var layers = options.layers; layerFilter = /** * @param {ol.layer.Layer} layer Layer. * @return {boolean} Include. */ function(layer) { return ol.array.includes(layers, layer); }; } } else { layerFilter = goog.functions.TRUE; } /** * @private * @type {function(ol.layer.Layer): boolean} */ this.layerFilter_ = layerFilter; /** * An association between selected feature (key) * and layer (value) * @private * @type {Object.<number, ol.layer.Layer>} */ this.featureLayerAssociation_ = {}; /** * @private * @type {ol.layer.Vector} */ this.featureOverlay_ = new ol.layer.Vector({ source: new ol.source.Vector({ useSpatialIndex: false, features: options.features, wrapX: options.wrapX }), style: options.style ? options.style : ol.interaction.Select.getDefaultStyleFunction(), updateWhileAnimating: true, updateWhileInteracting: true }); var features = this.featureOverlay_.getSource().getFeaturesCollection(); goog.events.listen(features, ol.CollectionEventType.ADD, this.addFeature_, false, this); goog.events.listen(features, ol.CollectionEventType.REMOVE, this.removeFeature_, false, this); }; goog.inherits(ol.interaction.Select, ol.interaction.Interaction); /** * @param {ol.Feature|ol.render.Feature} feature Feature. * @param {ol.layer.Layer} layer Layer. * @private */ ol.interaction.Select.prototype.addFeatureLayerAssociation_ = function(feature, layer) { var key = goog.getUid(feature); this.featureLayerAssociation_[key] = layer; }; /** * Get the selected features. * @return {ol.Collection.<ol.Feature>} Features collection. * @api stable */ ol.interaction.Select.prototype.getFeatures = function() { return this.featureOverlay_.getSource().getFeaturesCollection(); }; /** * Returns the associated {@link ol.layer.Vector vectorlayer} of * the (last) selected feature. * @param {ol.Feature|ol.render.Feature} feature Feature * @return {ol.layer.Vector} Layer. * @api */ ol.interaction.Select.prototype.getLayer = function(feature) { goog.asserts.assertInstanceof(feature, ol.Feature, 'feature should be an ol.Feature'); var key = goog.getUid(feature); return /** @type {ol.layer.Vector} */ (this.featureLayerAssociation_[key]); }; /** * Handles the {@link ol.MapBrowserEvent map browser event} and may change the * selected state of features. * @param {ol.MapBrowserEvent} mapBrowserEvent Map browser event. * @return {boolean} `false` to stop event propagation. * @this {ol.interaction.Select} * @api */ ol.interaction.Select.handleEvent = function(mapBrowserEvent) { if (!this.condition_(mapBrowserEvent)) { return true; } var add = this.addCondition_(mapBrowserEvent); var remove = this.removeCondition_(mapBrowserEvent); var toggle = this.toggleCondition_(mapBrowserEvent); var set = !add && !remove && !toggle; var map = mapBrowserEvent.map; var features = this.featureOverlay_.getSource().getFeaturesCollection(); var /** @type {!Array.<ol.Feature>} */ deselected = []; var /** @type {!Array.<ol.Feature>} */ selected = []; var change = false; if (set) { // Replace the currently selected feature(s) with the feature(s) at the // pixel, or clear the selected feature(s) if there is no feature at // the pixel. map.forEachFeatureAtPixel(mapBrowserEvent.pixel, /** * @param {ol.Feature|ol.render.Feature} feature Feature. * @param {ol.layer.Layer} layer Layer. */ function(feature, layer) { if (this.filter_(feature, layer)) { selected.push(feature); this.addFeatureLayerAssociation_(feature, layer); return !this.multi_; } }, this, this.layerFilter_); if (selected.length > 0 && features.getLength() == 1 && features.item(0) == selected[0]) { // No change } else { change = true; if (features.getLength() !== 0) { deselected = Array.prototype.concat(features.getArray()); features.clear(); } features.extend(selected); // Modify object this.featureLayerAssociation_ if (selected.length === 0) { goog.object.clear(this.featureLayerAssociation_); } else { if (deselected.length > 0) { deselected.forEach(function(feature) { this.removeFeatureLayerAssociation_(feature); }, this); } } } } else { // Modify the currently selected feature(s). map.forEachFeatureAtPixel(mapBrowserEvent.pixel, /** * @param {ol.Feature|ol.render.Feature} feature Feature. * @param {ol.layer.Layer} layer Layer. */ function(feature, layer) { if (!ol.array.includes(features.getArray(), feature)) { if (add || toggle) { if (this.filter_(feature, layer)) { selected.push(feature); this.addFeatureLayerAssociation_(feature, layer); } } } else { if (remove || toggle) { deselected.push(feature); this.removeFeatureLayerAssociation_(feature); } } }, this, this.layerFilter_); var i; for (i = deselected.length - 1; i >= 0; --i) { features.remove(deselected[i]); } features.extend(selected); if (selected.length > 0 || deselected.length > 0) { change = true; } } if (change) { this.dispatchEvent( new ol.interaction.SelectEvent(ol.interaction.SelectEventType.SELECT, selected, deselected, mapBrowserEvent)); } return ol.events.condition.pointerMove(mapBrowserEvent); }; /** * Remove the interaction from its current map, if any, and attach it to a new * map, if any. Pass `null` to just remove the interaction from the current map. * @param {ol.Map} map Map. * @api stable */ ol.interaction.Select.prototype.setMap = function(map) { var currentMap = this.getMap(); var selectedFeatures = this.featureOverlay_.getSource().getFeaturesCollection(); if (!goog.isNull(currentMap)) { selectedFeatures.forEach(currentMap.unskipFeature, currentMap); } goog.base(this, 'setMap', map); this.featureOverlay_.setMap(map); if (!goog.isNull(map)) { selectedFeatures.forEach(map.skipFeature, map); } }; /** * @return {ol.style.StyleFunction} Styles. */ ol.interaction.Select.getDefaultStyleFunction = function() { var styles = ol.style.createDefaultEditingStyles(); goog.array.extend(styles[ol.geom.GeometryType.POLYGON], styles[ol.geom.GeometryType.LINE_STRING]); goog.array.extend(styles[ol.geom.GeometryType.GEOMETRY_COLLECTION], styles[ol.geom.GeometryType.LINE_STRING]); return function(feature, resolution) { return styles[feature.getGeometry().getType()]; }; }; /** * @param {ol.CollectionEvent} evt Event. * @private */ ol.interaction.Select.prototype.addFeature_ = function(evt) { var feature = evt.element; var map = this.getMap(); goog.asserts.assertInstanceof(feature, ol.Feature, 'feature should be an ol.Feature'); if (!goog.isNull(map)) { map.skipFeature(feature); } }; /** * @param {ol.CollectionEvent} evt Event. * @private */ ol.interaction.Select.prototype.removeFeature_ = function(evt) { var feature = evt.element; var map = this.getMap(); goog.asserts.assertInstanceof(feature, ol.Feature, 'feature should be an ol.Feature'); if (!goog.isNull(map)) { map.unskipFeature(feature); } }; /** * @param {ol.Feature|ol.render.Feature} feature Feature. * @private */ ol.interaction.Select.prototype.removeFeatureLayerAssociation_ = function(feature) { var key = goog.getUid(feature); delete this.featureLayerAssociation_[key]; };
/* eslint-disable no-shadow */ import '../../utils/dotenv'; import pm2 from 'pm2'; import mkdir from '../../utils/logsFolder'; const company = 'coronavirus'; const path = mkdir(company); pm2.connect(err => { if (err) { console.error(err); process.exit(2); } pm2.start( [ { name: `${company}-bot`, namespace: `${company}`, script: './dist/clientes/coronavirus/services/BingTracker.js', exec_mode: 'fork', watch: true, env_production: { NODE_ENV: 'production', }, log_date_format: 'DD-MM-YYYY HH:mm:ss', error_file: `${path}/${company}-error.log`, out_file: `${path}/${company}-out.log`, }, ], err => { pm2.disconnect(); if (err) throw err; } ); });
var Neutralino;(()=>{"use strict";var e={885:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.open=t.getConfig=t.keepAlive=t.killProcess=t.exit=void 0;const n=i(69);t.exit=function(e){return n.request({url:"app.exit",type:n.RequestType.POST,data:{code:e},isNativeMethod:!0})},t.killProcess=function(){return n.request({url:"app.killProcess",type:n.RequestType.GET,isNativeMethod:!0})},t.keepAlive=function(){return n.request({url:"app.keepAlive",type:n.RequestType.GET,isNativeMethod:!0})},t.getConfig=function(){return n.request({url:"app.getConfig",type:n.RequestType.GET,isNativeMethod:!0})},t.open=function(e){return n.request({url:"app.open",type:n.RequestType.POST,data:e,isNativeMethod:!0})}},308:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.getRamUsage=void 0;const n=i(69);t.getRamUsage=function(){return n.request({url:"computer.getRamUsage",type:n.RequestType.GET,isNativeMethod:!0})}},199:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.log=t.LoggerType=void 0;const n=i(69);var r;(r=t.LoggerType||(t.LoggerType={})).WARN="WARN",r.ERROR="ERROR",r.INFO="INFO",t.log=function(e){return n.request({url:"debug.log",type:n.RequestType.POST,data:e,isNativeMethod:!0})}},284:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.dispatch=t.off=t.on=void 0,t.on=function(e,t){return new Promise(((i,n)=>{window.addEventListener(e,t),i()}))},t.off=function(e,t){return new Promise(((i,n)=>{window.removeEventListener(e,t),i()}))},t.dispatch=function(e,t){return new Promise(((i,n)=>{let r=new CustomEvent(e,{detail:t});window.dispatchEvent(r),i()}))}},543:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.readDirectory=t.removeFile=t.readBinaryFile=t.readFile=t.writeBinaryFile=t.writeFile=t.removeDirectory=t.createDirectory=void 0;const n=i(69);t.createDirectory=function(e){return n.request({url:"filesystem.createDirectory",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.removeDirectory=function(e){return n.request({url:"filesystem.removeDirectory",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.writeFile=function(e){return n.request({url:"filesystem.writeFile",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.writeBinaryFile=function(e){let t=new Uint8Array(e.data),i="";for(let e of t)i+=String.fromCharCode(e);return n.request({url:"filesystem.writeBinaryFile",type:n.RequestType.POST,data:{fileName:e.fileName,data:window.btoa(i)},isNativeMethod:!0})},t.readFile=function(e){return n.request({url:"filesystem.readFile",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.readBinaryFile=function(e){return new Promise(((t,i)=>{n.request({url:"filesystem.readBinaryFile",type:n.RequestType.POST,data:e,isNativeMethod:!0}).then((e=>{let i=window.atob(e),n=i.length,r=new Uint8Array(n);for(let e=0;e<n;e++)r[e]=i.charCodeAt(e);t({data:r.buffer})})).catch((e=>{i(e)}))}))},t.removeFile=function(e){return n.request({url:"filesystem.removeFile",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.readDirectory=function(e){return n.request({url:"filesystem.readDirectory",type:n.RequestType.POST,data:e,isNativeMethod:!0})}},109:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.init=void 0;const n=i(473),r=i(359),o=i(306);t.init=function(){if(window.NL_MODE&&"browser"==window.NL_MODE&&n.ping.start(),void 0!==window.NL_ARGS)for(let e=0;e<window.NL_ARGS.length;e++)if("--debug-mode"==window.NL_ARGS[e]){r.devClient.start();break}window.NL_CVERSION=o.version}},325:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.setTray=t.showMessageBox=t.showNotification=t.showDialogSave=t.showDialogOpen=t.getEnvar=t.execCommand=t.MessageBoxType=void 0;const n=i(69);var r;(r=t.MessageBoxType||(t.MessageBoxType={})).WARN="WARN",r.ERROR="ERROR",r.INFO="INFO",r.QUESTION="QUESTION",t.execCommand=function(e){return n.request({url:"os.execCommand",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.getEnvar=function(e){return n.request({url:"os.getEnvar",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.showDialogOpen=function(e){return n.request({url:"os.dialogOpen",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.showDialogSave=function(e){return n.request({url:"os.dialogSave",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.showNotification=function(e){return n.request({url:"os.showNotification",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.showMessageBox=function(e){return n.request({url:"os.showMessageBox",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.setTray=function(e){return n.request({url:"os.setTray",type:n.RequestType.POST,data:e,isNativeMethod:!0})}},100:(e,t,i)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.getData=t.putData=void 0;const n=i(69);t.putData=function(e){return n.request({url:"storage.putData",type:n.RequestType.POST,data:e,isNativeMethod:!0})},t.getData=function(e){return n.request({url:"storage.getData",type:n.RequestType.POST,data:e,isNativeMethod:!0})}},776:function(e,t,i){var n=this&&this.__awaiter||function(e,t,i,n){return new(i||(i=Promise))((function(r,o){function s(e){try{a(n.next(e))}catch(e){o(e)}}function u(e){try{a(n.throw(e))}catch(e){o(e)}}function a(e){var t;e.done?r(e.value):(t=e.value,t instanceof i?t:new i((function(e){e(t)}))).then(s,u)}a((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:!0}),t.setSize=t.setDraggableRegion=t.move=t.setIcon=t.focus=t.isVisible=t.hide=t.show=t.isFullScreen=t.exitFullScreen=t.setFullScreen=t.minimize=t.isMaximized=t.unmaximize=t.maximize=t.setTitle=void 0;const r=i(69);t.setTitle=function(e){return r.request({url:"window.setTitle",type:r.RequestType.POST,data:{title:e},isNativeMethod:!0})},t.maximize=function(){return r.request({url:"window.maximize",type:r.RequestType.GET,isNativeMethod:!0})},t.unmaximize=function(){return r.request({url:"window.unmaximize",type:r.RequestType.GET,isNativeMethod:!0})},t.isMaximized=function(){return r.request({url:"window.isMaximized",type:r.RequestType.GET,isNativeMethod:!0})},t.minimize=function(){return r.request({url:"window.minimize",type:r.RequestType.GET,isNativeMethod:!0})},t.setFullScreen=function(){return r.request({url:"window.setFullScreen",type:r.RequestType.GET,isNativeMethod:!0})},t.exitFullScreen=function(){return r.request({url:"window.exitFullScreen",type:r.RequestType.GET,isNativeMethod:!0})},t.isFullScreen=function(){return r.request({url:"window.isFullScreen",type:r.RequestType.GET,isNativeMethod:!0})},t.show=function(){return r.request({url:"window.show",type:r.RequestType.GET,isNativeMethod:!0})},t.hide=function(){return r.request({url:"window.hide",type:r.RequestType.GET,isNativeMethod:!0})},t.isVisible=function(){return r.request({url:"window.isVisible",type:r.RequestType.GET,isNativeMethod:!0})},t.focus=function(){return r.request({url:"window.focus",type:r.RequestType.GET,isNativeMethod:!0})},t.setIcon=function(e){return r.request({url:"window.setIcon",type:r.RequestType.POST,isNativeMethod:!0,data:{icon:e}})},t.move=function(e,t){return r.request({url:"window.move",type:r.RequestType.POST,isNativeMethod:!0,data:{x:e,y:t}})},t.setDraggableRegion=function(e){return new Promise(((t,i)=>{let r=document.getElementById(e),o=0,s=0;function u(e){return n(this,void 0,void 0,(function*(){yield Neutralino.window.move(e.screenX-o,e.screenY-s)}))}r||i(`Unable to find dom element: #${e}`),r.addEventListener("mousedown",(e=>{o=e.clientX,s=e.clientY,r.addEventListener("mousemove",u)})),r.addEventListener("mouseup",(()=>{r.removeEventListener("mousemove",u)})),t()}))},t.setSize=function(e){return r.request({url:"window.setSize",type:r.RequestType.POST,isNativeMethod:!0,data:e})}},359:function(e,t,i){var n=this&&this.__awaiter||function(e,t,i,n){return new(i||(i=Promise))((function(r,o){function s(e){try{a(n.next(e))}catch(e){o(e)}}function u(e){try{a(n.throw(e))}catch(e){o(e)}}function a(e){var t;e.done?r(e.value):(t=e.value,t instanceof i?t:new i((function(e){e(t)}))).then(s,u)}a((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:!0}),t.devClient=void 0;const r=i(69);t.devClient={start:function(){setInterval((()=>n(this,void 0,void 0,(function*(){try{(yield r.request({url:"http://localhost:5050",type:r.RequestType.GET})).needsReload&&location.reload()}catch(e){console.error("Unable to communicate with neu devServer")}}))),1e3)}}},69:(e,t)=>{var i;Object.defineProperty(t,"__esModule",{value:!0}),t.request=t.RequestType=void 0,(i=t.RequestType||(t.RequestType={})).GET="GET",i.POST="POST",t.request=function(e){return new Promise(((t,i)=>{let n="",r=function(){let e;return e=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP"),e}();r.onreadystatechange=()=>{if(4==r.readyState&&200==r.status){let e=null,n=r.responseText;n&&(e=JSON.parse(n)),e&&e.success&&(e.hasOwnProperty("returnValue")?t(e.returnValue):t(e)),e&&e.error&&i(e.error)}else 4==r.readyState&&i("Neutralino server is offline. Try restarting the application")},e.isNativeMethod&&(e.url="http://localhost:"+window.NL_PORT+"/__nativeMethod_"+e.url),e.data&&(n=JSON.stringify(e.data)),"GET"==e.type&&(r.open("GET",e.url,!0),r.setRequestHeader("Authorization","Basic "+window.NL_TOKEN),r.send()),"POST"==e.type&&(r.open("POST",e.url,!0),r.setRequestHeader("Content-type","application/x-www-form-urlencoded"),r.setRequestHeader("Authorization","Basic "+window.NL_TOKEN),r.send(n))}))}},473:function(e,t,i){var n=this&&this.__awaiter||function(e,t,i,n){return new(i||(i=Promise))((function(r,o){function s(e){try{a(n.next(e))}catch(e){o(e)}}function u(e){try{a(n.throw(e))}catch(e){o(e)}}function a(e){var t;e.done?r(e.value):(t=e.value,t instanceof i?t:new i((function(e){e(t)}))).then(s,u)}a((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:!0}),t.ping=void 0;const r=i(885);t.ping={start:()=>{setInterval((()=>n(void 0,void 0,void 0,(function*(){yield r.keepAlive()}))),5e3)}}},306:e=>{e.exports=JSON.parse('{"name":"neutralino-client-library","version":"1.3.0","description":"","main":"index.js","scripts":{"test":"echo \\"Error: no test specified\\" && exit 1","build":"webpack","watch":"webpack --watch"},"repository":{"type":"git","url":"git+https://github.com/neutralinojs/neutralino.js.git"},"author":"Neutralinojs","license":"MIT","bugs":{"url":"https://github.com/neutralinojs/neutralino.js/issues"},"homepage":"https://github.com/neutralinojs/neutralino.js#readme","devDependencies":{"@types/node":"^16.4.2","ts-loader":"^9.2.4","typescript":"^4.3.5","webpack":"^5.46.0","webpack-cli":"^4.7.2"},"dependencies":{}}')}},t={};function i(n){var r=t[n];if(void 0!==r)return r.exports;var o=t[n]={exports:{}};return e[n].call(o.exports,o,o.exports,i),o.exports}var n={};(()=>{var e=n;Object.defineProperty(e,"__esModule",{value:!0}),e.init=e.events=e.window=e.app=e.debug=e.storage=e.computer=e.os=e.filesystem=void 0,e.filesystem=i(543),e.os=i(325),e.computer=i(308),e.storage=i(100),e.debug=i(199),e.app=i(885),e.window=i(776),e.events=i(284);var t=i(109);Object.defineProperty(e,"init",{enumerable:!0,get:function(){return t.init}})})(),Neutralino=n})();
const log = require("../utils/log"); const calc = require("../utils/calc"); const { DerivativeProviders, ethToken, DerivativeStatus, DerivativeType } = require("../utils/constants"); const Fund = artifacts.require("OlympusBasicFund"); const AsyncWithdraw = artifacts.require("components/widrwaw/AsyncWithdraw"); const Marketplace = artifacts.require("Marketplace"); const MockToken = artifacts.require("MockToken"); const ComponentList = artifacts.require("ComponentList"); // Buy and sell tokens const ExchangeProvider = artifacts.require("../contracts/components/exchange/ExchangeProvider"); const MockKyberNetwork = artifacts.require("../contracts/components/exchange/exchanges/MockKyberNetwork"); const ERC20 = artifacts.require("../contracts/libs/ERC20Extended"); const fundData = { name: "OlympusBasicFund", symbol: "MBF", category: "Tests", description: "Sample of base fund", decimals: 18 }; const toTokenWei = amount => { return amount * 10 ** fundData.decimals; }; contract("BasicFund", accounts => { let fund; let market; let mockKyber; let tokens; let mockMOT; let exchange; let asyncWithdraw; let componentList; const investorA = accounts[1]; const investorB = accounts[2]; const investorC = accounts[3]; before("Set Component list", async () => { mockMOT = await MockToken.deployed(); market = await Marketplace.deployed(); mockKyber = await MockKyberNetwork.deployed(); tokens = (await mockKyber.supportedTokens()).slice(0, 2); exchange = await ExchangeProvider.deployed(); asyncWithdraw = await AsyncWithdraw.deployed(); componentList = await ComponentList.deployed(); await exchange.setMotAddress(mockMOT.address); await asyncWithdraw.setMotAddress(mockMOT.address); componentList.setComponent(DerivativeProviders.MARKET, market.address); componentList.setComponent(DerivativeProviders.EXCHANGE, exchange.address); componentList.setComponent(DerivativeProviders.WITHDRAW, asyncWithdraw.address); }); it("Create a fund", async () => { fund = await Fund.new(fundData.name, fundData.symbol, fundData.description, fundData.category, fundData.decimals); assert.equal((await fund.status()).toNumber(), 0); // new await calc.assertReverts(async () => await fund.changeStatus(DerivativeStatus.Active), "Must be still new"); await fund.initialize(componentList.address); const myProducts = await market.getOwnProducts(); assert.equal(myProducts.length, 1); assert.equal(myProducts[0], fund.address); assert.equal((await fund.status()).toNumber(), 1); // Active // The fee send is not took in account in the price but as a fee assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether")); }); it("Cant call initialize twice ", async () => { await calc.assertReverts(async () => { await fund.initialize(componentList.address); }, "Shall revert"); }); it("Update component shall approve MOT ", async () => { // Set new market place const newWithdraw = await AsyncWithdraw.new(); await newWithdraw.setMotAddress(mockMOT.address); await componentList.setComponent(await fund.WITHDRAW(), newWithdraw.address); await fund.updateComponent(await fund.WITHDRAW()); assert.equal(await fund.getComponentByName(await fund.WITHDRAW()), newWithdraw.address); // Check we allowance const allowance = await mockMOT.allowance(fund.address, newWithdraw.address); assert.isAbove(allowance.toNumber(), 10 ** 32, 0, "MOT is approved for new component"); }); it("Fund shall be able to deploy", async () => { assert.equal(await fund.name(), fundData.name); assert.equal(await fund.description(), fundData.description); assert.equal(await fund.symbol(), fundData.symbol); assert.equal(calc.bytes32ToString(await fund.category()), fundData.category); assert.equal((await fund.fundType()).toNumber(), DerivativeType.Fund); }); it("Fund shall allow investment", async () => { let tx; // With 0 supply price is 1 eth assert.equal((await fund.totalSupply()).toNumber(), 0, "Starting supply is 0"); assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether")); tx = await fund.invest({ value: web3.toWei(1, "ether"), from: investorA }); tx = await fund.invest({ value: web3.toWei(1, "ether"), from: investorB }); assert.equal((await fund.totalSupply()).toNumber(), web3.toWei(2, "ether"), "Supply is updated"); // Price is the same, as no Token value has changed assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether")); assert.equal((await fund.balanceOf(investorA)).toNumber(), toTokenWei(1)); assert.equal((await fund.balanceOf(investorB)).toNumber(), toTokenWei(1)); }); it("Shall be able to request and withdraw", async () => { let tx; let tokenInWei = toTokenWei(1); assert.equal((await fund.balanceOf(investorA)).toNumber(), tokenInWei, "A has invested"); assert.equal((await fund.balanceOf(investorB)).toNumber(), tokenInWei, "B has invested"); // Request tx = await fund.withdraw({ from: investorA }); assert.equal((await fund.balanceOf(investorA)).toNumber(), 0, " A has withdrawn"); tx = await fund.withdraw({ from: investorB }); assert.equal((await fund.balanceOf(investorB)).toNumber(), 0, "B has withdrawn"); }); it("Shall be able to invest", async () => { let tx; // invest allowed await fund.invest({ value: web3.toWei(1, "ether"), from: investorA }); await fund.invest({ value: web3.toWei(1, "ether"), from: investorB }); // Request always allowed await fund.withdraw({ from: investorA }); assert.equal((await fund.balanceOf(investorA)).toNumber(), 0, " A has withdrawn"); await fund.withdraw({ from: investorB }); assert.equal((await fund.balanceOf(investorB)).toNumber(), 0, " B has withdrawn"); }); it("Buy tokens fails if ether required is not enough", async () => { // invest allowed await fund.invest({ value: web3.toWei(1.8, "ether"), from: investorA }); const balance = (await web3.eth.getBalance(fund.address)).toNumber(); assert.equal(balance, web3.toWei(1.8, "ether"), "This test must start with 1.8 eth"); const amounts = [web3.toWei(1, "ether"), web3.toWei(1, "ether")]; const rates = await Promise.all( tokens.map(async token => await mockKyber.getExpectedRate(ethToken, token, web3.toWei(0.5, "ether"))) ); await calc.assertReverts( async () => await fund.buyTokens(0x0, tokens, amounts, rates.map(rate => rate[0])), "revert if fund balance is not enough" ); }); it("Buy tokens fails if token has more than 18 decimals", async () => { const fundBalance = (await fund.getETHBalance()).toNumber(); assert.isAbove(fundBalance, 0, "This test must start with some ETH invested eth"); const token20Decimals = await MockToken.new("20 DECIMALS", "T20", 20, 10 ** 32); await calc.assertReverts( async () => await fund.buyTokens(0x0, [token20Decimals.address], [fundBalance], [0]), "Revert buy tokens with more than 18 decimals" ); }); it("Shall be able to buy tokens", async () => { // From the previous test we got 1.8 ETH const initialBalance = (await web3.eth.getBalance(fund.address)).toNumber(); assert.equal(initialBalance, web3.toWei(1.8, "ether"), "This test must start with 1.8 eth"); const rates = await Promise.all( tokens.map(async token => await mockKyber.getExpectedRate(ethToken, token, web3.toWei(0.5, "ether"))) ); const amounts = [web3.toWei(0.5, "ether"), web3.toWei(0.5, "ether")]; let tx; tx = await fund.buyTokens("", tokens, amounts, rates.map(rate => rate[0])); const fundTokensAndBalance = await fund.getTokens(); for (let i = 0; i < tokens.length; i++) { let erc20 = await ERC20.at(tokens[i]); let balance = await erc20.balanceOf(fund.address); assert.equal(balance, 0.5 * rates[i][0], " Fund get ERC20 correct balance"); // Check the fund data is updated correctly assert.equal(fundTokensAndBalance[0][i], tokens[i], "Token exist in fund"); assert.equal(fundTokensAndBalance[1][i].toNumber(), 0.5 * rates[i][0], "Balance is correct in th fund"); } // Price is constant assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether"), "Price keeps constant after buy tokens"); // ETH balance is reduced assert.equal((await web3.eth.getBalance(fund.address)).toNumber(), web3.toWei(0.8, "ether"), "ETH balance reduced"); }); it("Shall be able to support token swap", async () => { let token0 = await ERC20.at(tokens[0]); let token1 = await ERC20.at(tokens[1]); let beforebalance0 = await token0.balanceOf(fund.address); let beforebalance1 = await token1.balanceOf(fund.address); await fund.tokenSwap("", tokens[0], tokens[1], 100 * 10 ** 18, 10 ** 18); let afterbalance0 = await token0.balanceOf(fund.address); let afterbalance1 = await token1.balanceOf(fund.address); assert.equal(beforebalance0 - afterbalance0, 100 * 10 ** 18, "Token Swap"); assert.equal(afterbalance1 - beforebalance1, 100 * 10 ** 18, "Token Swap"); }); it("Shall be able to sell tokens", async () => { let tx; // From the previous test we got 1.8 ETH const initialBalance = (await web3.eth.getBalance(fund.address)).toNumber(); assert.equal(initialBalance, web3.toWei(0.8, "ether"), "This test must start with 1.8 eth"); let fundTokensAndBalance = await fund.getTokens(); const balances = fundTokensAndBalance.map(tokenBalance => tokenBalance[1]); const sellRates = await Promise.all( tokens.map(async (token, index) => await mockKyber.getExpectedRate(token, ethToken, balances[index])) ); // We sell all tx = await fund.sellTokens("", fundTokensAndBalance[0], fundTokensAndBalance[1], sellRates.map(rate => rate[0])); fundTokensAndBalance = await fund.getTokens(); for (let i = 0; i < tokens.length; i++) { let erc20 = await ERC20.at(tokens[i]); let balance = await erc20.balanceOf(fund.address); assert.equal(balance.toNumber(), 0, "Fund get ERC20 correct balance"); // Check the fund data is updated correctly assert.equal(fundTokensAndBalance[0][i], tokens[i], "Token exist in fund"); assert.equal(fundTokensAndBalance[1][i].toNumber(), 0, "Balance is correct in the fund"); } // Price is constant assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether"), "Price keeps constant after buy tokens"); // ETH balance is reduced assert.equal((await web3.eth.getBalance(fund.address)).toNumber(), web3.toWei(1.8, "ether"), "ETH balance reduced"); }); it("Shall be able to sell tokens to get enough eth for withdraw", async () => { // From the previous test we got 1.8 ETH, and investor got 1.8 Token const initialBalance = (await web3.eth.getBalance(fund.address)).toNumber(); assert.equal(initialBalance, web3.toWei(1.8, "ether"), "This test must start with 1.8 eth"); assert.equal((await fund.balanceOf(investorA)).toNumber(), toTokenWei(1.8), "A has invested with fee"); const investorABefore = await calc.ethBalance(investorA); const rates = await Promise.all( tokens.map(async token => await mockKyber.getExpectedRate(ethToken, token, web3.toWei(0.5, "ether"))) ); const amounts = [web3.toWei(0.9, "ether"), web3.toWei(0.9, "ether")]; await fund.buyTokens("", tokens, amounts, rates.map(rate => rate[0])); for (let i = 0; i < tokens.length; i++) { let erc20 = await ERC20.at(tokens[i]); let balance = await erc20.balanceOf(fund.address); assert.equal(balance.toNumber(), 0.9 * rates[i][0], " Fund get ERC20 correct balance"); } assert.equal( (await web3.eth.getBalance(fund.address)).toNumber(), web3.toWei(0, "ether"), "We sold all underlying tokens" ); // Request withdraw, it should sell all tokens. await fund.withdraw({ from: investorA }); // Investor has recover all his eth tokens const investorAAfter = await calc.ethBalance(investorA); assert.equal((await fund.balanceOf(investorA)).toNumber(), toTokenWei(0), "Investor redeemed all the funds"); assert(await calc.inRange(investorAAfter - investorABefore, 1.8, 0.001), "Investor A received ether"); // Price is constant assert.equal((await fund.getPrice()).toNumber(), web3.toWei(1, "ether"), "Price keeps constant after buy tokens"); }); it("Shall be able to change the status", async () => { assert.equal((await fund.status()).toNumber(), DerivativeStatus.Active, "Status Is active"); await fund.changeStatus(DerivativeStatus.Paused); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Paused, " Status is paused"); await fund.changeStatus(DerivativeStatus.Active); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Active, "Status Is active"); await calc.assertReverts( async () => await fund.changeStatus(DerivativeStatus.New), "Shall not be able to change to New" ); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Active, " Cant change to new"); await calc.assertReverts( async () => await fund.changeStatus(DerivativeStatus.Closed), "Shall not change to Close" ); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Active, " Cant change to close"); }); it("Shall be able to close a fund", async () => { await fund.invest({ value: web3.toWei(2, "ether"), from: investorC }); const initialBalance = (await web3.eth.getBalance(fund.address)).toNumber(); assert.equal((await fund.balanceOf(investorC)).toNumber(), toTokenWei(2), "C has invested"); const rates = await Promise.all( tokens.map(async token => await mockKyber.getExpectedRate(ethToken, token, web3.toWei(0.5, "ether"))) ); const amounts = [web3.toWei(1, "ether"), web3.toWei(1, "ether")]; await fund.buyTokens("", tokens, amounts, rates.map(rate => rate[0])); // ETH balance is reduced assert.equal((await web3.eth.getBalance(fund.address)).toNumber(), web3.toWei(0, "ether"), "ETH balance reduced"); await fund.close(); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Closed, " Status is closed"); let fundTokensAndBalance = await fund.getTokens(); assert.equal(fundTokensAndBalance[1][0].toNumber(), 0, "token amount == 0"); assert.equal(fundTokensAndBalance[1][1].toNumber(), 0, "token amount == 0"); assert.equal((await web3.eth.getBalance(fund.address)).toNumber(), web3.toWei(2, "ether"), "ETH balance returned"); await calc.assertReverts(async () => await fund.changeStatus(DerivativeStatus.Active), "Shall not be close"); assert.equal((await fund.status()).toNumber(), DerivativeStatus.Closed, " Cant change to active "); }); it("Investor cant invest but can withdraw after close", async () => { assert.equal((await fund.balanceOf(investorC)).toNumber(), toTokenWei(2), "C starting balance"); // Investor cant invest can withdraw await calc.assertReverts( async () => await fund.invest({ value: web3.toWei(1, "ether"), from: investorA }), "Cant invest after close" ); // Request await fund.withdraw({ from: investorC }); assert.equal((await fund.balanceOf(investorC)).toNumber(), 0, " A has withdrawn"); }); });
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_DIAL_DIAL_INTERNAL_MESSAGE_UTIL_H_ #define CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_DIAL_DIAL_INTERNAL_MESSAGE_UTIL_H_ #include <memory> #include <string> #include "base/macros.h" #include "base/values.h" #include "chrome/browser/media/router/discovery/dial/dial_app_discovery_service.h" #include "chrome/browser/media/router/discovery/dial/parsed_dial_app_info.h" #include "components/media_router/common/mojom/media_router.mojom.h" namespace media_router { class MediaSinkInternal; // Types of internal messages that are used in a custom DIAL launch workflow. enum class DialInternalMessageType { // Cast SDK -> MR kClientConnect, kV2Message, // MR -> Cast SDK kNewSession, kReceiverAction, kError, // MR <-> Cast SDK kCustomDialLaunch, kDialAppInfo, kOther }; // Possible types of ReceiverAction taken by the user on a receiver. enum class DialReceiverAction { // The user selected a receiver with the intent of casting to it with the // sender application. kCast, // The user requested to stop the session running on a receiver. kStop }; // Parsed custom DIAL launch internal message coming from a Cast SDK client. struct DialInternalMessage { // Returns a DialInternalMessage for |message|. If |message| is not a valid // custom DIAL launch internal message, returns nullptr and sets |error| with // an error reason. static std::unique_ptr<DialInternalMessage> From(base::Value message, std::string* error); DialInternalMessage(DialInternalMessageType type, base::Optional<base::Value> body, const std::string& client_id, int sequence_number); ~DialInternalMessage(); DialInternalMessageType type; base::Optional<base::Value> body; std::string client_id; int sequence_number; DISALLOW_COPY_AND_ASSIGN(DialInternalMessage); }; // Parsed CUSTOM_DIAL_LAUNCH response from the Cast SDK client. struct CustomDialLaunchMessageBody { // Returns a CustomDialLaunchMessageBody for |message|. // This method is only valid to call if |message.type| == |kCustomDialLaunch|. static CustomDialLaunchMessageBody From(const DialInternalMessage& message); CustomDialLaunchMessageBody(); CustomDialLaunchMessageBody( bool do_launch, const base::Optional<std::string>& launch_parameter); CustomDialLaunchMessageBody(const CustomDialLaunchMessageBody& other); ~CustomDialLaunchMessageBody(); // If |true|, the DialMediaRouteProvider should handle the app launch. bool do_launch = true; // If |do_launch| is |true|, optional launch parameter to include with the // launch (POST) request. This overrides the launch parameter that was // specified in the MediaSource (if any). base::Optional<std::string> launch_parameter; }; class DialInternalMessageUtil final { public: // |hash_token|: A per-profile value used to hash sink IDs. explicit DialInternalMessageUtil(const std::string& hash_token); ~DialInternalMessageUtil(); // Returns |true| if |message| is a valid STOP_SESSION message. static bool IsStopSessionMessage(const DialInternalMessage& message); // Returns a NEW_SESSION message to be sent to the page when the user requests // an app launch. mojom::RouteMessagePtr CreateNewSessionMessage( const std::string& app_name, const std::string& client_id, const MediaSinkInternal& sink) const; // Returns a RECEIVER_ACTION / CAST message to be sent to the page when the // user requests an app launch. mojom::RouteMessagePtr CreateReceiverActionCastMessage( const std::string& client_id, const MediaSinkInternal& sink) const; // Returns a RECEIVER_ACTION / STOP message to be sent to the page when an app // is stopped by DialMediaRouteProvider. mojom::RouteMessagePtr CreateReceiverActionStopMessage( const std::string& client_id, const MediaSinkInternal& sink) const; // Returns a CUSTOM_DIAL_LAUNCH request message to be sent to the page. // Generates and returns the next number to associate a DIAL launch sequence // with. std::pair<mojom::RouteMessagePtr, int> CreateCustomDialLaunchMessage( const std::string& client_id, const MediaSinkInternal& sink, const ParsedDialAppInfo& app_info) const; // Creates an app info message used in a DIAL_APP_INFO response or a // CUSTOM_DIAL_LAUNCH (called via CreateCustomDialLaunchMessage() above) // message. mojom::RouteMessagePtr CreateDialAppInfoMessage( const std::string& client_id, const MediaSinkInternal& sink, const ParsedDialAppInfo& app_info, int sequence_number, DialInternalMessageType type) const; mojom::RouteMessagePtr CreateDialAppInfoErrorMessage( DialAppInfoResultCode result_code, const std::string& client_id, int sequence_number, const std::string& error_message, base::Optional<int> http_error_code = base::nullopt) const; private: base::Value CreateReceiver(const MediaSinkInternal& sink) const; base::Value CreateReceiverActionBody(const MediaSinkInternal& sink, DialReceiverAction action) const; base::Value CreateNewSessionBody(const std::string& app_name, const MediaSinkInternal& sink) const; base::Value CreateDialAppInfoBody(const MediaSinkInternal& sink, const ParsedDialAppInfo& app_info) const; // |sequence_number| is used by the Cast SDK to match up requests from the SDK // to Chrome with responses from Chrome. If a message from Chrome has no // corresponding request, then its |sequence_number| is an invalid value of // -1. base::Value CreateDialMessageCommon(DialInternalMessageType type, base::Value body, const std::string& client_id, int sequence_number = -1) const; std::string hash_token_; DISALLOW_COPY_AND_ASSIGN(DialInternalMessageUtil); }; } // namespace media_router #endif // CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_DIAL_DIAL_INTERNAL_MESSAGE_UTIL_H_
export default { CHANGE_LOGS_NUM: (state)=>{ let logs = state.index.logs; let newLogs = []; logs.map((item,i)=>{ if(item.indexOf(". ") === -1){ newLogs.push((i+1) + '. ' + item) }else{ newLogs.push(item) } }); return newLogs; } }
/* See LICENSE file for copyright and license details. */ /* Default settings; can be overriden by command line. */ static int topbar = 1; /* -b option; if 0, dmenu appears at bottom */ static int fuzzy = 1; /* -F option; if 0, dmenu doesn't use fuzzy matching */ /* -fn option overrides fonts[0]; default X11 font or font set */ static const char *fonts[] = { "monospace:size=10" }; static const unsigned int bgalpha = 0xe0; static const unsigned int fgalpha = OPAQUE; static const char *prompt = NULL; /* -p option; prompt to the left of input field */ static const char *colors[SchemeLast][2] = { /* fg bg */ [SchemeNorm] = { "#bbbbbb", "#222222" }, [SchemeSel] = { "#eeeeee", "#005577" }, [SchemeSelHighlight] = { "#ffc978", "#005577" }, [SchemeNormHighlight] = { "#ffc978", "#222222" }, [SchemeOut] = { "#000000", "#00ffff" }, }; static unsigned int alphas[SchemeLast][2] = { /* fgalpha bgalphga */ [SchemeNorm] = { fgalpha, bgalpha }, [SchemeSel] = { fgalpha, bgalpha }, [SchemeSelHighlight] = { fgalpha, bgalpha }, [SchemeNormHighlight] = { fgalpha, bgalpha }, [SchemeOut] = { fgalpha, bgalpha }, }; /* -l option; if nonzero, dmenu uses vertical list with given number of lines */ static unsigned int lines = 0; /* * Characters not considered part of a word while deleting words * for example: " /?\"&[]" */ static const char worddelimiters[] = " ";
#!/usr/bin/python ''' --- Day 7: The Sum of Its Parts --- You find yourself standing on a snow-covered coastline; apparently, you landed a little off course. The region is too hilly to see the North Pole from here, but you do spot some Elves that seem to be trying to unpack something that washed ashore. It's quite cold out, so you decide to risk creating a paradox by asking them for directions. "Oh, are you the search party?" Somehow, you can understand whatever Elves from the year 1018 speak; you assume it's Ancient Nordic Elvish. Could the device on your wrist also be a translator? "Those clothes don't look very warm; take this." They hand you a heavy coat. "We do need to find our way back to the North Pole, but we have higher priorities at the moment. You see, believe it or not, this box contains something that will solve all of Santa's transportation problems - at least, that's what it looks like from the pictures in the instructions." It doesn't seem like they can read whatever language it's in, but you can: "Sleigh kit. Some assembly required." "'Sleigh'? What a wonderful name! You must help us assemble this 'sleigh' at once!" They start excitedly pulling more parts out of the box. The instructions specify a series of steps and requirements about which steps must be finished before others can begin (your puzzle input). Each step is designated by a single letter. For example, suppose you have the following instructions: Step C must be finished before step A can begin. Step C must be finished before step F can begin. Step A must be finished before step B can begin. Step A must be finished before step D can begin. Step B must be finished before step E can begin. Step D must be finished before step E can begin. Step F must be finished before step E can begin. Visually, these requirements look like this: -->A--->B-- / \\ \ C -->D----->E \\ / ---->F----- Your first goal is to determine the order in which the steps should be completed. If more than one step is ready, choose the step which is first alphabetically. In this example, the steps would be completed as follows: Only C is available, and so it is done first. Next, both A and F are available. A is first alphabetically, so it is done next. Then, even though F was available earlier, steps B and D are now also available, and B is the first alphabetically of the three. After that, only D and F are available. E is not available because only some of its prerequisites are complete. Therefore, D is completed next. F is the only choice, so it is done next. Finally, E is completed. So, in this example, the correct order is CABDFE. In what order should the steps in your instructions be completed? --- Part Two --- As you're about to begin construction, four of the Elves offer to help. "The sun will set soon; it'll go faster if we work together." Now, you need to account for multiple people working on steps simultaneously. If multiple steps are available, workers should still begin them in alphabetical order. Each step takes 60 seconds plus an amount corresponding to its letter: A=1, B=2, C=3, and so on. So, step A takes 60+1=61 seconds, while step Z takes 60+26=86 seconds. No time is required between steps. To simplify things for the example, however, suppose you only have help from one Elf (a total of two workers) and that each step takes 60 fewer seconds (so that step A takes 1 second and step Z takes 26 seconds). Then, using the same instructions as above, this is how each second would be spent: Second Worker 1 Worker 2 Done 0 C . 1 C . 2 C . 3 A F C 4 B F CA 5 B F CA 6 D F CAB 7 D F CAB 8 D F CAB 9 D . CABF 10 E . CABFD 11 E . CABFD 12 E . CABFD 13 E . CABFD 14 E . CABFD 15 . . CABFDE Each row represents one second of time. The Second column identifies how many seconds have passed as of the beginning of that second. Each worker column shows the step that worker is currently doing (or . if they are idle). The Done column shows completed steps. Note that the order of the steps has changed; this is because steps now take time to finish and multiple workers can begin multiple steps simultaneously. In this example, it would take 15 seconds for two workers to complete these steps. With 5 workers and the 60+ second step durations described above, how long will it take to complete all of the steps? ''' import textwrap def get_input(filename='2018/7/input.txt'): with open(filename) as fil: for line in fil.readlines(): yield line def example_input(): for line in textwrap.dedent( '''Step C must be finished before step A can begin. Step C must be finished before step F can begin. Step A must be finished before step B can begin. Step A must be finished before step D can begin. Step B must be finished before step E can begin. Step D must be finished before step E can begin. Step F must be finished before step E can begin. ''').splitlines(): yield line def parse_input(data): ''' >>> parse_input(example_input()) [('C', 'A'), ('C', 'F'), ('A', 'B'), ('A', 'D'), ('B', 'E'), ('D', 'E'), ('F', 'E')] ''' output = [] for line in data: line = line.split() pre = line[1] nxt = line[7] output.append((pre, nxt)) return output def build_graph(steps): ''' >>> build_graph(parse_input(example_input())) {'C': ['A', 'F'], 'A': ['B', 'D'], 'F': ['E'], 'B': ['E'], 'D': ['E'], 'E': []} ''' graph = {} for pre, nxt in steps: if pre in graph: graph[pre].append(nxt) else: graph[pre] = [nxt] if not nxt in graph: graph[nxt] = [] return graph def is_blocked(node, graph): ''' >>> is_blocked('A', build_graph(parse_input(example_input()))) True >>> is_blocked('C', build_graph(parse_input(example_input()))) False ''' for a, pre in graph.items(): for p in pre: if p == node: return True return False def free_to_build(graph): ''' >>> free_to_build(build_graph(parse_input(example_input()))) ['C'] ''' free = [] for node in graph: if not is_blocked(node, graph): free += node return free def build_sleigh(graph): ''' >>> build_sleigh(build_graph(parse_input(example_input()))) 'CABDFE' ''' sequence = '' while graph: node = sorted(free_to_build(graph))[0] sequence += node del(graph[node]) return sequence def work_to_do(node, base_time=0): ''' >>> work_to_do('A') 1 >>> work_to_do('Z') 26 >>> work_to_do('A', 60) 61 >>> work_to_do('Z', 60) 86 ''' return ord(node) - ord('A') + 1 + base_time def in_progress(node, workers): for work in workers: if work['node'] == node: return True return False def build_sleigh_with_workers(num_workers, graph, base_time=0): ''' >>> build_sleigh_with_workers(2, build_graph(parse_input(example_input())), 0) (15, 'CABFDE') ''' workers = [{'node': None, 'remain': 0} for _ in range(num_workers)] #{node, sec_remaining} sequence = '' time = 0 while graph: for work in workers: if work['remain']: work['remain'] -= 1 if work['node'] and not work['remain']: #work on node is done del(graph[work['node']]) sequence += work['node'] work['node'] = None for work in workers: if not work['node']: nodes = sorted(free_to_build(graph)) for node in nodes: if not in_progress(node, workers): work['node'] = node work['remain'] = work_to_do(node, base_time) break time += 1 return time-1, sequence def main(): ''' >>> main() Example A: CABDFE Part A: CGKMUWXFAIHSYDNLJQTREOPZBV Example B: 15 Part B: 1046 ''' print('Example A:', build_sleigh(build_graph(parse_input(example_input())))) print('Part A:', build_sleigh(build_graph(parse_input(get_input())))) eb, _ = build_sleigh_with_workers(2, build_graph(parse_input(example_input())), 0) print('Example B:', eb) eb, _ = build_sleigh_with_workers(5, build_graph(parse_input(get_input())), 60) print('Part B:', eb) # data = get_input() if __name__ == '__main__': main()
self.insert_sample().running = False self.insert_sample().timeout_start = 1559568346.443381 cancelled = False timeout_period = 10 timeout_start = 1559569685.139272 x = 0.263365334794732 y = 0.548708348754011 self.retract_sample().running = False self.retract_sample().timeout_start = 1559569686.983505 self.pump_turn_off().running = False self.pump_turn_off().timeout_start = 1549250188.466 action = 'extract sample' extract_step = u'-510' load_step = u'1000' circulate_step = u'2000' self.pump_turn_on().running = False self.pump_turn_on().timeout_start = 1549250842.22
'use strict' const assert = require('assert') const sinon = require('sinon') const { cloneDeep } = require('lodash') const IlpPacket = require('ilp-packet') const appHelper = require('../helpers/app') const logHelper = require('../helpers/log') const logger = require('../../build/common/log') const START_DATE = 1434412800000 // June 16, 2015 00:00:00 GMT const mockPlugin = require('../mocks/mockPlugin') const mock = require('mock-require') mock('ilp-plugin-mock', mockPlugin) const env = cloneDeep(process.env) describe('Plugin Profile Mode', function () { logHelper(logger) beforeEach(async function () { process.env.DEBUG = '*' process.env.CONNECTOR_ACCOUNTS = JSON.stringify({ 'parent': { 'relation': 'parent', 'assetCode': 'CAD', 'assetScale': 4, 'plugin': 'ilp-plugin-mock', 'disableMiddleware': true }, 'test.usd-ledger': { 'relation': 'peer', 'assetCode': 'USD', 'assetScale': 4, 'plugin': 'ilp-plugin-mock', 'options': {} } }) process.env.CONNECTOR_PROFILE = 'plugin' appHelper.create(this) this.clock = sinon.useFakeTimers(START_DATE) this.accounts.setOwnAddress(this.config.ilpAddress) await this.accounts.startup() await this.backend.connect() await this.routeBroadcaster.reloadLocalRoutes() }) afterEach(async function () { this.clock.restore() process.env = cloneDeep(env) }) it('routes ILP packets sent from plugin directly to parent', async function () { const preparePacket = IlpPacket.serializeIlpPrepare({ amount: '100', executionCondition: Buffer.from('uzoYx3K6u+Nt6kZjbN6KmH0yARfhkj9e17eQfpSeB7U=', 'base64'), expiresAt: new Date(START_DATE + 2000), destination: 'mock.test2.bob', data: Buffer.alloc(0) }) const fulfillPacket = IlpPacket.serializeIlpFulfill({ fulfillment: Buffer.from('HS8e5Ew02XKAglyus2dh2Ohabuqmy3HDM8EXMLz22ok', 'base64'), data: Buffer.alloc(0) }) const stub = sinon.stub(mockPlugin.prototype, 'sendData').resolves(fulfillPacket) const ilpPrepareControllerSpy = sinon.stub(this.ilpPrepareController, 'sendIlpPacket') const result = await this.accounts.get('test.usd-ledger').getPlugin()._dataHandler(preparePacket) assert.strictEqual(result.toString('hex'), fulfillPacket.toString('hex')) sinon.assert.notCalled(ilpPrepareControllerSpy) stub.restore() ilpPrepareControllerSpy.restore() }) it('routes ILP packets sent from parent directly to plugin', async function () { const preparePacket = IlpPacket.serializeIlpPrepare({ amount: '100', executionCondition: Buffer.from('uzoYx3K6u+Nt6kZjbN6KmH0yARfhkj9e17eQfpSeB7U=', 'base64'), expiresAt: new Date(START_DATE + 2000), destination: 'mock.test2.bob', data: Buffer.alloc(0) }) const fulfillPacket = IlpPacket.serializeIlpFulfill({ fulfillment: Buffer.from('HS8e5Ew02XKAglyus2dh2Ohabuqmy3HDM8EXMLz22ok', 'base64'), data: Buffer.alloc(0) }) const stub = sinon.stub(mockPlugin.prototype, 'sendData').resolves(fulfillPacket) const ilpPrepareControllerSpy = sinon.stub(this.ilpPrepareController, 'sendIlpPacket') const result = await this.accounts.get('parent').getPlugin()._dataHandler(preparePacket) assert.strictEqual(result.toString('hex'), fulfillPacket.toString('hex')) sinon.assert.notCalled(ilpPrepareControllerSpy) stub.restore() ilpPrepareControllerSpy.restore() }) it('doesnt have any middleware in parent pipeline', async function () { const result = await this.accounts.getAccountMiddleware(this.accounts.get('parent')) assert.deepStrictEqual(result, {}) }) it('it does have middleware on plugin pipeline', async function () { const result = await this.accounts.getAccountMiddleware(this.accounts.get('test.usd-ledger')) // Note the this.accounts._middlewares is the instantiated middleware already assert.deepStrictEqual(result, this.accounts._middlewares) }) })
import morepath from more.webassets import WebassetsApp from more.webassets.core import webassets_injector_tween from onegov.core.cache import lru_cache from onegov.core.security import Public from onegov.user.auth.core import Auth from onegov.user.auth.provider import AUTHENTICATION_PROVIDERS, AzureADProvider from onegov.user.auth.provider import AuthenticationProvider from onegov.user.auth.provider import Conclusion from onegov.user.auth.provider import provider_by_name from webob.exc import HTTPUnauthorized from webob.response import Response class UserApp(WebassetsApp): """ Provides user integration. Historically it was not necessary to use this app for user integration, and most features are still possible without it. However, third-party authentication providers only work if the UserApp is integrated. The following configuration options are accepted: :authentication_providers: A dictionary of provider-specific configuration settings, see :mod:`onegov.user.auth.provider` for more information. """ @property def providers(self): """ Returns a tuple of availabe providers. """ return getattr(self, 'available_providers', ()) @lru_cache(maxsize=8) def provider(self, name): return provider_by_name(self.providers, name) def on_login(self, request, user): """ Called by the auth module, whenever a successful login was completed. """ def redirect_after_login(self, identity, request, default): """ Returns the path to redirect after login, given the received identity, the request and the default path. Returns a path, or None if the default path should be used. """ return None def configure_authentication_providers(self, **cfg): def bound(provider): if 'authentication_providers' not in cfg: return {} if provider.metadata.name not in cfg['authentication_providers']: return {} return cfg['authentication_providers'][provider.metadata.name] available = AUTHENTICATION_PROVIDERS.values() available = (cls.configure(**bound(cls)) for cls in available) available = (obj for obj in available if obj is not None) self.available_providers = tuple(available) # enable auto login for the first provider that has it configured, and # only the first (others are ignored) for provider in self.available_providers: config = cfg['authentication_providers'][provider.metadata.name] if config.get('auto_login'): self.auto_login_provider = provider break else: self.auto_login_provider = None @UserApp.path( model=AuthenticationProvider, path='/auth/provider/{name}') def authentication_provider(app, name, to='/'): if name == 'auto': provider = app.auto_login_provider else: provider = app.provider(name) if not provider: return None # the 'to' is just held here to be able to reuse it in the view provider.to = to return provider @UserApp.view( model=AuthenticationProvider, permission=Public) def handle_authentication(self, request): response = self.authenticate_request(request) # the provider returned its own HTTP response if isinstance(response, Response): return response # the provider reached a conclusion if isinstance(response, Conclusion): ajax_request = request.path_info.endswith('/auto') if response: if not ajax_request: request.success(request.translate(response.note)) return Auth.from_request(request, to=self.to)\ .complete_login(user=response.user, request=request) else: if not ajax_request: request.alert(request.translate(response.note)) # Answering with a plain 403 would be more correct, but some # frontend-web-servers will not show our content in this case and # that means we cannot help the user much. # # So we deliberately chose to be less correct, but more user # friendly here. Unfortunately we do not always control the # frontend-web-servers to mitigate this (on-premise deployments). if not ajax_request: return request.redirect(request.class_link(Auth, name='login')) return HTTPUnauthorized() # the provider returned something illegal raise RuntimeError(f"Invalid response from {self.name}: {response}") @UserApp.view( model=AuthenticationProvider, permission=Public, name='redirect' ) def handle_provider_authorisation(self, request): response = self.request_authorisation(request) if isinstance(response, Response): return response if isinstance(response, Conclusion): # catching the success conclusion with the ensured user if response: return Auth.from_request(request, to=self.to) \ .complete_login(user=response.user, request=request) else: request.alert(request.translate(response.note)) login_to = request.browser_session.pop('login_to') # On failure we take `to` and bring the user to the url where he # started his authentication process. return request.redirect( request.class_link(Auth, {'to': login_to}, name='login') ) raise RuntimeError(f"Invalid response from {self.name}: {response}") @UserApp.view( model=AuthenticationProvider, permission=Public, name='logout' ) def handle_provider_logout(self, request): """ We contact Microsoft that the user wants to log out and redirecting him to our main logout view. """ if isinstance(self, AzureADProvider): request.browsser_session['logout_to'] = self.to return morepath.redirect(self.logout_url(request)) raise NotImplementedError @UserApp.webasset_path() def get_js_path(): return 'assets/js' @UserApp.webasset('auto-login') def get_preview_widget_asset(): yield 'auto-login.js' @UserApp.tween_factory(over=webassets_injector_tween) def auto_login_tween_factory(app, handler): def auto_login_tween(request): """ Optionally injects an auto-login javascript asset. The auto-login javascript will call the auto-login provider and redirect the user if successful. This requires that the login provider can do the login without user interaction. """ if getattr(app, 'auto_login_provider', False): request.include('auto-login') return handler(request) return auto_login_tween
import { Interpolant } from '../Interpolant.js'; import { Quaternion } from '../Quaternion.js'; /** * Spherical linear unit quaternion interpolant. */ function QuaternionLinearInterpolant( parameterPositions, sampleValues, sampleSize, resultBuffer ) { Interpolant.call( this, parameterPositions, sampleValues, sampleSize, resultBuffer ); } QuaternionLinearInterpolant.prototype = Object.assign( Object.create( Interpolant.prototype ), { constructor: QuaternionLinearInterpolant, interpolate_: function ( i1, t0, t, t1 ) { const result = this.resultBuffer, values = this.sampleValues, stride = this.valueSize, alpha = ( t - t0 ) / ( t1 - t0 ); let offset = i1 * stride; for ( let end = offset + stride; offset !== end; offset += 4 ) { Quaternion.slerpFlat( result, 0, values, offset - stride, values, offset, alpha ); } return result; } } ); export { QuaternionLinearInterpolant };
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE194_Unexpected_Sign_Extension__negative_memcpy_42.c Label Definition File: CWE194_Unexpected_Sign_Extension.label.xml Template File: sources-sink-42.tmpl.c */ /* * @description * CWE: 194 Unexpected Sign Extension * BadSource: negative Set data to a fixed negative number * GoodSource: Positive integer * Sink: memcpy * BadSink : Copy strings using memcpy() with the length of data * Flow Variant: 42 Data flow: data returned from one function to another in the same source file * * */ #include "std_testcase.h" #ifndef OMITBAD static short bad_source(short data) { /* FLAW: Use a negative number */ data = -1; return data; } void CWE194_Unexpected_Sign_Extension__negative_memcpy_42_bad() { short data; /* Initialize data */ data = 0; data = bad_source(data); { char src[100]; char dest[100] = ""; memset(src, 'A', 100-1); src[100-1] = '\0'; if (data < 100) { /* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative, * the sign extension could result in a very large number */ memcpy(dest, src, data); dest[data] = '\0'; /* NULL terminate */ } printLine(dest); } } #endif /* OMITBAD */ #ifndef OMITGOOD static short goodG2B_source(short data) { /* FIX: Use a positive integer less than &InitialDataSize&*/ data = 100-1; return data; } /* goodG2B uses the GoodSource with the BadSink */ static void goodG2B() { short data; /* Initialize data */ data = 0; data = goodG2B_source(data); { char src[100]; char dest[100] = ""; memset(src, 'A', 100-1); src[100-1] = '\0'; if (data < 100) { /* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative, * the sign extension could result in a very large number */ memcpy(dest, src, data); dest[data] = '\0'; /* NULL terminate */ } printLine(dest); } } void CWE194_Unexpected_Sign_Extension__negative_memcpy_42_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE194_Unexpected_Sign_Extension__negative_memcpy_42_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE194_Unexpected_Sign_Extension__negative_memcpy_42_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
# -*- coding: utf-8 -*- """A plugin to generate a list of domains visited.""" from urllib import parse as urlparse from plaso.analysis import interface from plaso.analysis import manager class UniqueDomainsVisitedPlugin(interface.AnalysisPlugin): """A plugin to generate a list all domains visited. This plugin will extract domains from browser history events extracted by Plaso. The list produced can be used to quickly determine if there has been a visit to a site of interest, for example, a known phishing site. """ NAME = 'unique_domains_visited' _SUPPORTED_EVENT_DATA_TYPES = frozenset([ 'chrome:history:file_downloaded', 'chrome:history:page_visited', 'firefox:downloads:download', 'firefox:places:page_visited', 'macosx:lsquarantine', 'msiecf:redirected', 'msiecf:url', 'msie:webcache:container', 'opera:history', 'safari:history:visit']) # pylint: disable=unused-argument def ExamineEvent( self, analysis_mediator, event, event_data, event_data_stream): """Analyzes an event and extracts domains from it. We only evaluate straightforward web history events, not visits which can be inferred by TypedURLs, cookies or other means. Args: analysis_mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfVFS. event (EventObject): event to examine. event_data (EventData): event data. event_data_stream (EventDataStream): event data stream. """ if event_data.data_type not in self._SUPPORTED_EVENT_DATA_TYPES: return url = getattr(event_data, 'url', None) if url: parsed_url = urlparse.urlparse(url) domain = getattr(parsed_url, 'netloc', None) if domain: self._analysis_counter[domain] += 1 manager.AnalysisPluginManager.RegisterPlugin(UniqueDomainsVisitedPlugin)
# Generated by Django 3.1.4 on 2020-12-12 06:58 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Blog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30)), ('description', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=30)), ('text', models.TextField()), ('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogs.blog')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/kinesis/Kinesis_EXPORTS.h> #include <aws/kinesis/KinesisRequest.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/kinesis/model/ScalingType.h> #include <utility> namespace Aws { namespace Kinesis { namespace Model { /** */ class AWS_KINESIS_API UpdateShardCountRequest : public KinesisRequest { public: UpdateShardCountRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "UpdateShardCount"; } Aws::String SerializePayload() const override; Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** * <p>The name of the stream.</p> */ inline const Aws::String& GetStreamName() const{ return m_streamName; } /** * <p>The name of the stream.</p> */ inline bool StreamNameHasBeenSet() const { return m_streamNameHasBeenSet; } /** * <p>The name of the stream.</p> */ inline void SetStreamName(const Aws::String& value) { m_streamNameHasBeenSet = true; m_streamName = value; } /** * <p>The name of the stream.</p> */ inline void SetStreamName(Aws::String&& value) { m_streamNameHasBeenSet = true; m_streamName = std::move(value); } /** * <p>The name of the stream.</p> */ inline void SetStreamName(const char* value) { m_streamNameHasBeenSet = true; m_streamName.assign(value); } /** * <p>The name of the stream.</p> */ inline UpdateShardCountRequest& WithStreamName(const Aws::String& value) { SetStreamName(value); return *this;} /** * <p>The name of the stream.</p> */ inline UpdateShardCountRequest& WithStreamName(Aws::String&& value) { SetStreamName(std::move(value)); return *this;} /** * <p>The name of the stream.</p> */ inline UpdateShardCountRequest& WithStreamName(const char* value) { SetStreamName(value); return *this;} /** * <p>The new number of shards.</p> */ inline int GetTargetShardCount() const{ return m_targetShardCount; } /** * <p>The new number of shards.</p> */ inline bool TargetShardCountHasBeenSet() const { return m_targetShardCountHasBeenSet; } /** * <p>The new number of shards.</p> */ inline void SetTargetShardCount(int value) { m_targetShardCountHasBeenSet = true; m_targetShardCount = value; } /** * <p>The new number of shards.</p> */ inline UpdateShardCountRequest& WithTargetShardCount(int value) { SetTargetShardCount(value); return *this;} /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline const ScalingType& GetScalingType() const{ return m_scalingType; } /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline bool ScalingTypeHasBeenSet() const { return m_scalingTypeHasBeenSet; } /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline void SetScalingType(const ScalingType& value) { m_scalingTypeHasBeenSet = true; m_scalingType = value; } /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline void SetScalingType(ScalingType&& value) { m_scalingTypeHasBeenSet = true; m_scalingType = std::move(value); } /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline UpdateShardCountRequest& WithScalingType(const ScalingType& value) { SetScalingType(value); return *this;} /** * <p>The scaling type. Uniform scaling creates shards of equal size.</p> */ inline UpdateShardCountRequest& WithScalingType(ScalingType&& value) { SetScalingType(std::move(value)); return *this;} private: Aws::String m_streamName; bool m_streamNameHasBeenSet; int m_targetShardCount; bool m_targetShardCountHasBeenSet; ScalingType m_scalingType; bool m_scalingTypeHasBeenSet; }; } // namespace Model } // namespace Kinesis } // namespace Aws
/* * Copyright (c) 2015-2018, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ //! //! \file mhw_mmio_g9.h //! \brief Define the MMIO registers access of Gen9 //! \details //! #ifndef __MHW_MMIO_G9_H__ #define __MHW_MMIO_G9_H__ // CS register offsets #define CS_GENERAL_PURPOSE_REGISTER0_LO_OFFSET_G9 0x2600 #define CS_GENERAL_PURPOSE_REGISTER0_HI_OFFSET_G9 0x2604 #define CS_GENERAL_PURPOSE_REGISTER4_LO_OFFSET_G9 0x2620 #define CS_GENERAL_PURPOSE_REGISTER4_HI_OFFSET_G9 0x2624 #define CS_GENERAL_PURPOSE_REGISTER11_LO_OFFSET_G9 0x2658 #define CS_GENERAL_PURPOSE_REGISTER11_HI_OFFSET_G9 0x265C #define CS_GENERAL_PURPOSE_REGISTER12_LO_OFFSET_G9 0x2660 #define CS_GENERAL_PURPOSE_REGISTER12_HI_OFFSET_G9 0x2664 // Vebox register offsets // Used in Commen MI #define GP_REGISTER0_LO_OFFSET_G9 0x1A600 #define GP_REGISTER0_HI_OFFSET_G9 0x1A604 #define GP_REGISTER4_LO_OFFSET_G9 0x1A620 #define GP_REGISTER4_HI_OFFSET_G9 0x1A624 #define GP_REGISTER11_LO_OFFSET_G9 0x1A658 #define GP_REGISTER11_HI_OFFSET_G9 0x1A65C #define GP_REGISTER12_LO_OFFSET_G9 0x1A660 #define GP_REGISTER12_HI_OFFSET_G9 0x1A664 //VDBOX HCP register offsets #define HCP_ENC_IMAGE_STATUS_MASK_REG_OFFSET_INIT_G9 0x1E9B8 #define HCP_ENC_IMAGE_STATUS_CTRL_REG_OFFSET_INIT_G9 0x1E9BC #define HCP_ENC_BIT_STREAM_BYTE_COUNT_FRAME_REG_OFFSET_INIT_G9 0x1E9A0 #define HCP_ENC_BIT_STREAM_SE_BIT_COUNT_FRAME_REG_OFFSET_INIT_G9 0x1E9A8 #define HCP_ENC_BIT_STREAM_BYTE_COUNT_FRAME_NO_HEADER_REG_OFFSET_INIT_G9 0x1E9A4 #define HCP_ENC_QP_STATUS_COUNT_REG_OFFSET_INIT_G9 0x1E9C0 #define HCP_VP9_ENC_BITSTREAM_BYTE_COUNT_FRAME_REG_OFFSET_INIT_G9 0x1E9E0 #define HCP_VP9_ENC_BITSTREAM_BYTE_COUNT_FRAME_NO_HEADER_REG_OFFSET_INIT_G9 0x1E9E4 #define HCP_VP9_ENC_IMAGE_STATUS_MASK_REG_OFFSET_INIT_G9 0x1E9F0 #define HCP_VP9_ENC_IMAGE_STATUS_CTRL_REG_OFFSET_INIT_G9 0x1E9F4 #define HCP_DEC_STATUS_REG_OFFSET_INIT_G9 0x1E900 #define HCP_CABAC_STATUS_REG_OFFSET_INIT_G9 0x1E904 //VDBOX HCP register initial values #define HCP_ENC_SLICE_COUNT_REG_OFFSET_INIT_G9 0 #define HCP_ENC_VDENC_MODE_TIMER_REG_OFFSET_INIT_G9 0 #define CS_ENGINE_ID_OFFSET_INIT_G9 0 //VDBOX HUC #define HUC_UKERNEL_HDR_INFO_REG_OFFSET_NODE_1_INIT_G9 0x0D014 #define HUC_STATUS_REG_OFFSET_NODE_1_INIT_G9 0x0D000 #define HUC_STATUS2_REG_OFFSET_NODE_1_INIT_G9 0x0D3B0 //VDBOX MFX register offsets #define GENERAL_PURPOSE_REGISTER0_LO_OFFSET_NODE_1_INIT_G9 0x12600 #define GENERAL_PURPOSE_REGISTER0_HI_OFFSET_NODE_1_INIT_G9 0x12604 #define GENERAL_PURPOSE_REGISTER4_LO_OFFSET_NODE_1_INIT_G9 0x12620 #define GENERAL_PURPOSE_REGISTER4_HI_OFFSET_NODE_1_INIT_G9 0x12624 #define GENERAL_PURPOSE_REGISTER11_LO_OFFSET_NODE_1_INIT_G9 0x12658 #define GENERAL_PURPOSE_REGISTER11_HI_OFFSET_NODE_1_INIT_G9 0x1265C #define GENERAL_PURPOSE_REGISTER12_LO_OFFSET_NODE_1_INIT_G9 0x12660 #define GENERAL_PURPOSE_REGISTER12_HI_OFFSET_NODE_1_INIT_G9 0x12664 #define MFC_IMAGE_STATUS_MASK_REG_OFFSET_NODE_1_INIT_G9 0x128B4 #define MFC_IMAGE_STATUS_CTRL_REG_OFFSET_NODE_1_INIT_G9 0x128B8 #define MFC_AVC_NUM_SLICES_REG_OFFSET_NODE_1_INIT_G9 0x12954 #define MFC_QP_STATUS_COUNT_OFFSET_NODE_1_INIT_G9 0x128BC #define MFX_ERROR_FLAG_REG_OFFSET_NODE_1_INIT_G9 0x12800 #define MFX_FRAME_CRC_REG_OFFSET_NODE_1_INIT_G9 0x12850 #define MFX_MB_COUNT_REG_OFFSET_NODE_1_INIT_G9 0x12868 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG_OFFSET_NODE_1_INIT_G9 0x128A0 #define MFC_BITSTREAM_SE_BITCOUNT_FRAME_REG_OFFSET_NODE_1_INIT_G9 0x128A4 #define MFC_BITSTREAM_BYTECOUNT_SLICE_REG_OFFSET_NODE_1_INIT_G9 0x128D0 #define MFC_VP8_BITSTREAM_BYTECOUNT_FRAME_REG_OFFSET_NODE_1_INIT_G9 0x12908 #define MFC_VP8_IMAGE_STATUS_MASK_REG_OFFSET_NODE_1_INIT_G9 0x12900 #define MFC_VP8_IMAGE_STATUS_CTRL_REG_OFFSET_NODE_1_INIT_G9 0x12904 #define MFX_VP8_BRC_DQ_INDEX_REG_OFFSET_NODE_1_INIT_G9 0x12910 #define MFX_VP8_BRC_LOOP_FILTER_REG_OFFSET_NODE_1_INIT_G9 0x12914 #define MFX_VP8_BRC_CUMULATIVE_DQ_INDEX01_REG_OFFSET_NODE_1_INIT_G9 0x12918 #define MFX_VP8_BRC_CUMULATIVE_DQ_INDEX23_REG_OFFSET_NODE_1_INIT_G9 0x1291C #define MFX_VP8_BRC_CUMULATIVE_LOOP_FILTER01_REG_OFFSET_NODE_1_INIT_G9 0x12920 #define MFX_VP8_BRC_CUMULATIVE_LOOP_FILTER23_REG_OFFSET_NODE_1_INIT_G9 0x12924 #define MFX_VP8_BRC_CONVERGENCE_STATUS_REG_OFFSET_NODE_1_INIT_G9 0x12928 #define MFX_LRA0_REG_OFFSET_NODE_1_INIT_G9 0x04A50 #define MFX_LRA1_REG_OFFSET_NODE_1_INIT_G9 0x04A54 #define MFX_LRA2_REG_OFFSET_NODE_1_INIT_G9 0x04A58 #define GENERAL_PURPOSE_REGISTER0_LO_OFFSET_NODE_2_INIT_G9 0x1C600 #define GENERAL_PURPOSE_REGISTER0_HI_OFFSET_NODE_2_INIT_G9 0x1C604 #define GENERAL_PURPOSE_REGISTER4_LO_OFFSET_NODE_2_INIT_G9 0x1C620 #define GENERAL_PURPOSE_REGISTER4_HI_OFFSET_NODE_2_INIT_G9 0x1C624 #define GENERAL_PURPOSE_REGISTER11_LO_OFFSET_NODE_2_INIT_G9 0x1C658 #define GENERAL_PURPOSE_REGISTER11_HI_OFFSET_NODE_2_INIT_G9 0x1C65C #define GENERAL_PURPOSE_REGISTER12_LO_OFFSET_NODE_2_INIT_G9 0x1C660 #define GENERAL_PURPOSE_REGISTER12_HI_OFFSET_NODE_2_INIT_G9 0x1C664 #define MFC_IMAGE_STATUS_MASK_REG_OFFSET_NODE_2_INIT_G9 0x1C8B4 #define MFC_IMAGE_STATUS_CTRL_REG_OFFSET_NODE_2_INIT_G9 0x1C8B8 #define MFC_AVC_NUM_SLICES_REG_OFFSET_NODE_2_INIT_G9 0x1C954 #define MFC_QP_STATUS_COUNT_OFFSET_NODE_2_INIT_G9 0x1C8BC #define MFX_ERROR_FLAG_REG_OFFSET_NODE_2_INIT_G9 0x1C800 #define MFX_FRAME_CRC_REG_OFFSET_NODE_2_INIT_G9 0x1C850 #define MFX_MB_COUNT_REG_OFFSET_NODE_2_INIT_G9 0x1C868 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG_OFFSET_NODE_2_INIT_G9 0x1C8A0 #define MFC_BITSTREAM_SE_BITCOUNT_FRAME_REG_OFFSET_NODE_2_INIT_G9 0x1C8A4 #define MFC_BITSTREAM_BYTECOUNT_SLICE_REG_OFFSET_NODE_2_INIT_G9 0x1C8D0 #define MFC_VP8_BITSTREAM_BYTECOUNT_FRAME_REG_OFFSET_NODE_2_INIT_G9 0x1C908 #define MFC_VP8_IMAGE_STATUS_MASK_REG_OFFSET_NODE_2_INIT_G9 0x1C900 #define MFC_VP8_IMAGE_STATUS_CTRL_REG_OFFSET_NODE_2_INIT_G9 0x1C904 #define MFX_VP8_BRC_DQ_INDEX_REG_OFFSET_NODE_2_INIT_G9 0x1C910 #define MFX_VP8_BRC_LOOP_FILTER_REG_OFFSET_NODE_2_INIT_G9 0x1C914 #define MFX_VP8_BRC_CUMULATIVE_DQ_INDEX01_REG_OFFSET_NODE_2_INIT_G9 0X1C918 #define MFX_VP8_BRC_CUMULATIVE_DQ_INDEX23_REG_OFFSET_NODE_2_INIT_G9 0X1C91C #define MFX_VP8_BRC_CUMULATIVE_LOOP_FILTER01_REG_OFFSET_NODE_2_INIT_G9 0X1C920 #define MFX_VP8_BRC_CUMULATIVE_LOOP_FILTER23_REG_OFFSET_NODE_2_INIT_G9 0X1C924 #define MFX_VP8_BRC_CONVERGENCE_STATUS_REG_OFFSET_NODE_2_INIT_G9 0X1C928 //VDBOX MFX register initial values #define MFX_LRA0_REG_OFFSET_NODE_2_INIT_G9 0 #define MFX_LRA1_REG_OFFSET_NODE_2_INIT_G9 0 #define MFX_LRA2_REG_OFFSET_NODE_2_INIT_G9 0 // VDBOX ENCODER #define MHW_CS_GENERAL_PURPOSE_REGISTER_BASE_G9 (0x2600) #define CS_GPR_REGISTER_INDEX(index) (MHW_CS_GENERAL_PURPOSE_REGISTER_BASE_G9 + 8 * (index)) // HAL #define REG_GPR_BASE_G9 MHW_CS_GENERAL_PURPOSE_REGISTER_BASE_G9 #define REG_TIMESTAMP_BASE_G9 0x2358 // RENDER #define L3_CACHE_CNTL2_REG_OFFSET_G9 0xB020 #define L3_CACHE_CNTL3_REG_OFFSET_G9 0xB024 #define L3_CACHE_SQC_REG_OFFSET_G9 0xB100 #endif //__MHW_MMIO_G9_H__
import { clientLogger } from './client_logger'; export function createDataCluster(server) { const config = server.config(); const ElasticsearchClientLogging = clientLogger(server); class DataClientLogging extends ElasticsearchClientLogging { tags = ['data']; logQueries = getConfig().logQueries; } function getConfig() { if (Boolean(config.get('elasticsearch.tribe.url'))) { return config.get('elasticsearch.tribe'); } return config.get('elasticsearch'); } server.plugins.elasticsearch.createCluster( 'data', { log: DataClientLogging, ...getConfig() } ); }
from uuid import uuid4 from django.db import models from users.models.user import User class Achievement(models.Model): code = models.CharField(primary_key=True, max_length=32, null=False, unique=True) name = models.CharField(max_length=64, null=False) image = models.URLField(null=False) description = models.TextField() style = models.CharField(max_length=256, default="", null=True) index = models.IntegerField(default=0) is_visible = models.BooleanField(default=True) class Meta: db_table = "achievements" ordering = ["index"] def achievement_users(self): return User.objects\ .filter(achievements__achievement_id=self.code)\ .order_by("-achievements__created_at") class UserAchievement(models.Model): id = models.UUIDField(primary_key=True, default=uuid4, editable=False) user = models.ForeignKey(User, related_name="achievements", db_index=True, on_delete=models.CASCADE) achievement = models.ForeignKey(Achievement, related_name="users", db_index=True, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) class Meta: db_table = "user_achievements" unique_together = [["achievement", "user"]]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MoneyQuantity) on 2019-07-29. # 2019, SMART Health IT. import sys from dataclasses import dataclass from typing import ClassVar, Optional, List from .fhirabstractbase import empty_list from .element import Element @dataclass class Quantity(Element): """ A measured or measurable amount. A measured amount (or an amount that can potentially be measured). Note that measured amounts include amounts that are not precisely quantified, including amounts involving arbitrary units and floating currencies. """ resource_type: ClassVar[str] = "Quantity" value: Optional[float] = None comparator: Optional[str] = None unit: Optional[str] = None system: Optional[str] = None code: Optional[str] = None def elementProperties(self): js = super(Quantity, self).elementProperties() js.extend([ ("value", "value", float, False, None, False), ("comparator", "comparator", str, False, None, False), ("unit", "unit", str, False, None, False), ("system", "system", str, False, None, False), ("code", "code", str, False, None, False), ]) return js
/*! \file */ #ifndef _OSM_LUA_PROCESSING_H #define _OSM_LUA_PROCESSING_H #include <vector> #include <string> #include <sstream> #include <map> #include "geom.h" #include "osm_store.h" #include "shared_data.h" #include "output_object.h" #include "shp_mem_tiles.h" #include "osm_mem_tiles.h" #include "attribute_store.h" #include "helpers.h" #include <boost/container/flat_map.hpp> // Lua extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "kaguya.hpp" // FIXME: why is this global ? extern bool verbose; /** \brief OsmLuaProcessing - converts OSM objects into OutputObjectOsmStore objects. The input objects are generated by PbfReader. The output objects are sent to OsmMemTiles for storage. This class provides a consistent interface for Lua scripts to access. */ class OsmLuaProcessing { public: // ---- initialization routines OsmLuaProcessing( OSMStore &osmStore, const class Config &configIn, class LayerDefinition &layers, const std::string &luaFile, const class ShpMemTiles &shpMemTiles, class OsmMemTiles &osmMemTiles, AttributeStore &attributeStore); ~OsmLuaProcessing(); // ---- Helpers provided for main routine // Has this object been assigned to any layers? bool empty(); // Do we have Lua routines for non-MP relations? bool canReadRelations(); bool canWriteRelations(); // Shapefile tag remapping bool canRemapShapefiles(); kaguya::LuaTable newTable(); kaguya::LuaTable remapAttributes(kaguya::LuaTable& in_table, const std::string &layerName); // ---- Data loading methods using tag_map_t = boost::container::flat_map<std::string, std::string>; // Scan non-MP relation bool scanRelation(WayID id, const tag_map_t &tags); /// \brief We are now processing a significant node void setNode(NodeID id, LatpLon node, const tag_map_t &tags); /// \brief We are now processing a way void setWay(WayID wayId, LatpLonVec const &llVec, const tag_map_t &tags); /** \brief We are now processing a relation * (note that we store relations as ways with artificial IDs, and that * we use decrementing positive IDs to give a bit more space for way IDs) */ void setRelation(int64_t relationId, WayVec const &outerWayVec, WayVec const &innerWayVec, const tag_map_t &tags, bool isNativeMP); // ---- Metadata queries called from Lua // Get the ID of the current object std::string Id() const; // Check if there's a value for a given key bool Holds(const std::string& key) const; // Get an OSM tag for a given key (or return empty string if none) std::string Find(const std::string& key) const; // ---- Spatial queries called from Lua // Find intersecting shapefile layer std::vector<std::string> FindIntersecting(const std::string &layerName); double AreaIntersecting(const std::string &layerName); bool Intersects(const std::string &layerName); template <typename GeometryT> double intersectsArea(const std::string &layerName, GeometryT &geom) const; template <typename GeometryT> std::vector<uint> intersectsQuery(const std::string &layerName, bool once, GeometryT &geom) const; std::vector<std::string> FindCovering(const std::string &layerName); bool CoveredBy(const std::string &layerName); template <typename GeometryT> std::vector<uint> coveredQuery(const std::string &layerName, bool once, GeometryT &geom) const; // Returns whether it is closed polygon bool IsClosed() const; // Returns area double Area(); double multiPolygonArea(const MultiPolygon &mp) const; // Returns length double Length(); // Return centroid lat/lon std::vector<double> Centroid(); Point calculateCentroid(); // ---- Requests from Lua to write this way/node to a vector tile's Layer template<class GeometryT> bool CorrectGeometry(GeometryT &geom) { #if BOOST_VERSION >= 105800 geom::validity_failure_type failure; if (isRelation && !geom::is_valid(geom,failure)) { if (verbose) std::cout << "Relation " << originalOsmID << " has " << boost_validity_error(failure) << std::endl; } else if (isWay && !geom::is_valid(geom,failure)) { if (verbose && failure!=22) std::cout << "Way " << originalOsmID << " has " << boost_validity_error(failure) << std::endl; } if (failure==boost::geometry::failure_spikes) geom::remove_spikes(geom); if (failure == boost::geometry::failure_few_points) return false; if (failure) { std::time_t start = std::time(0); make_valid(geom); if (verbose && std::time(0)-start>3) { std::cout << (isRelation ? "Relation " : "Way ") << originalOsmID << " took " << (std::time(0)-start) << " seconds to correct" << std::endl; } } #endif return true; } // Add layer void Layer(const std::string &layerName, bool area); void LayerAsCentroid(const std::string &layerName); // Set attributes in a vector tile's Attributes table void Attribute(const std::string &key, const std::string &val); void AttributeWithMinZoom(const std::string &key, const std::string &val, const char minzoom); void AttributeNumeric(const std::string &key, const float val); void AttributeNumericWithMinZoom(const std::string &key, const float val, const char minzoom); void AttributeBoolean(const std::string &key, const bool val); void AttributeBooleanWithMinZoom(const std::string &key, const bool val, const char minzoom); void MinZoom(const double z); void ZOrder(const double z); // Relation scan support kaguya::optional<int> NextRelation(); std::string FindInRelation(const std::string &key); void Accept(); // Write error if in verbose mode void ProcessingError(const std::string &errStr) { if (verbose) { std::cerr << errStr << std::endl; } } // ---- vector_layers metadata entry void setVectorLayerMetadata(const uint_least8_t layer, const std::string &key, const uint type); std::vector<std::string> GetSignificantNodeKeys(); // ---- Cached geometries creation const Linestring &linestringCached(); const Polygon &polygonCached(); const MultiLinestring &multiLinestringCached(); const MultiPolygon &multiPolygonCached(); inline AttributeStore &getAttributeStore() { return attributeStore; } private: /// Internal: clear current cached state inline void reset() { outputs.clear(); llVecPtr = nullptr; outerWayVecPtr = nullptr; innerWayVecPtr = nullptr; linestringInited = false; multiLinestringInited = false; polygonInited = false; multiPolygonInited = false; relationAccepted = false; relationSubscript = -1; } const inline Point getPoint() { return Point(lon/10000000.0,latp/10000000.0); } OSMStore &osmStore; // global OSM store kaguya::State luaState; bool supportsRemappingShapefiles; bool supportsReadingRelations; bool supportsWritingRelations; const class ShpMemTiles &shpMemTiles; class OsmMemTiles &osmMemTiles; AttributeStore &attributeStore; // key/value store uint64_t osmID; ///< ID of OSM object (relations have decrementing way IDs) int64_t originalOsmID; ///< Original OSM object ID bool isWay, isRelation, isClosed; ///< Way, node, relation? bool relationAccepted; // in scanRelation, whether we're using a non-MP relation std::vector<WayID> relationList; // in processWay, list of relations this way is in int relationSubscript = -1; // in processWay, position in the relation list int32_t lon,latp; ///< Node coordinates LatpLonVec const *llVecPtr; WayVec const *outerWayVecPtr; WayVec const *innerWayVecPtr; Linestring linestringCache; bool linestringInited; Polygon polygonCache; bool polygonInited; MultiLinestring multiLinestringCache; bool multiLinestringInited; MultiPolygon multiPolygonCache; bool multiPolygonInited; const class Config &config; class LayerDefinition &layers; std::deque<std::pair<OutputObjectRef, AttributeStoreRef>> outputs; ///< All output objects that have been created boost::container::flat_map<std::string, std::string> currentTags; }; #endif //_OSM_LUA_PROCESSING_H
// Allocator traits -*- C++ -*- // Copyright (C) 2011-2019 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file ext/alloc_traits.h * This file is a GNU extension to the Standard C++ Library. */ #ifndef _EXT_ALLOC_TRAITS_H #define _EXT_ALLOC_TRAITS_H 1 #pragma GCC system_header #if __cplusplus >= 201103L # include <bits/move.h> # include <bits/alloc_traits.h> #else # include <bits/allocator.h> // for __alloc_swap #endif namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @brief Uniform interface to C++98 and C++11 allocators. * @ingroup allocators */ template<typename _Alloc, typename = typename _Alloc::value_type> struct __alloc_traits #if __cplusplus >= 201103L : std::allocator_traits<_Alloc> #endif { typedef _Alloc allocator_type; #if __cplusplus >= 201103L typedef std::allocator_traits<_Alloc> _Base_type; typedef typename _Base_type::value_type value_type; typedef typename _Base_type::pointer pointer; typedef typename _Base_type::const_pointer const_pointer; typedef typename _Base_type::size_type size_type; typedef typename _Base_type::difference_type difference_type; // C++11 allocators do not define reference or const_reference typedef value_type& reference; typedef const value_type& const_reference; using _Base_type::allocate; using _Base_type::deallocate; using _Base_type::construct; using _Base_type::destroy; using _Base_type::max_size; private: template<typename _Ptr> using __is_custom_pointer = std::__and_<std::is_same<pointer, _Ptr>, std::__not_<std::is_pointer<_Ptr>>>; public: // overload construct for non-standard pointer types template<typename _Ptr, typename... _Args> static typename std::enable_if<__is_custom_pointer<_Ptr>::value>::type construct(_Alloc& __a, _Ptr __p, _Args&&... __args) noexcept(noexcept(_Base_type::construct(__a, std::__to_address(__p), std::forward<_Args>(__args)...))) { _Base_type::construct(__a, std::__to_address(__p), std::forward<_Args>(__args)...); } // overload destroy for non-standard pointer types template<typename _Ptr> static typename std::enable_if<__is_custom_pointer<_Ptr>::value>::type destroy(_Alloc& __a, _Ptr __p) noexcept(noexcept(_Base_type::destroy(__a, std::__to_address(__p)))) { _Base_type::destroy(__a, std::__to_address(__p)); } static _Alloc _S_select_on_copy(const _Alloc& __a) { return _Base_type::select_on_container_copy_construction(__a); } static void _S_on_swap(_Alloc& __a, _Alloc& __b) { std::__alloc_on_swap(__a, __b); } static constexpr bool _S_propagate_on_copy_assign() { return _Base_type::propagate_on_container_copy_assignment::value; } static constexpr bool _S_propagate_on_move_assign() { return _Base_type::propagate_on_container_move_assignment::value; } static constexpr bool _S_propagate_on_swap() { return _Base_type::propagate_on_container_swap::value; } static constexpr bool _S_always_equal() { return _Base_type::is_always_equal::value; } static constexpr bool _S_nothrow_move() { return _S_propagate_on_move_assign() || _S_always_equal(); } template<typename _Tp> struct rebind { typedef typename _Base_type::template rebind_alloc<_Tp> other; }; #else typedef typename _Alloc::pointer pointer; typedef typename _Alloc::const_pointer const_pointer; typedef typename _Alloc::value_type value_type; typedef typename _Alloc::reference reference; typedef typename _Alloc::const_reference const_reference; typedef typename _Alloc::size_type size_type; typedef typename _Alloc::difference_type difference_type; _GLIBCXX_NODISCARD static pointer allocate(_Alloc& __a, size_type __n) { return __a.allocate(__n); } static void deallocate(_Alloc& __a, pointer __p, size_type __n) { __a.deallocate(__p, __n); } template<typename _Tp> static void construct(_Alloc& __a, pointer __p, const _Tp& __arg) { __a.construct(__p, __arg); } static void destroy(_Alloc& __a, pointer __p) { __a.destroy(__p); } static size_type max_size(const _Alloc& __a) { return __a.max_size(); } static const _Alloc& _S_select_on_copy(const _Alloc& __a) { return __a; } static void _S_on_swap(_Alloc& __a, _Alloc& __b) { // _GLIBCXX_RESOLVE_LIB_DEFECTS // 431. Swapping containers with unequal allocators. std::__alloc_swap<_Alloc>::_S_do_it(__a, __b); } template<typename _Tp> struct rebind { typedef typename _Alloc::template rebind<_Tp>::other other; }; #endif }; _GLIBCXX_END_NAMESPACE_VERSION } // namespace __gnu_cxx #endif
env={ 'version':'1.0', 'p1open':0, 'SIDEX':100, 'SIDEY':120, 'userLang':"fr", 'defaultdir':"C:\\", }
# ------------------------------------------------------------------------------- # Name: wiof_objstor_datasync.py # Purpose: sync files between WIOF S3 bucket and OpenShift pvc # # Author: HHAY, JMONTEBE, PPLATTEN # # Created: 2021-07-21 # Notes: This is a little-tested proof of concept upload/download functionality # ------------------------------------------------------------------------------- import sys import wiof_objstor_constants import os from datetime import datetime from minio import Minio from minio.error import S3Error # update to be the directory in the pod that the PVC is mounted to pvc_directory = "/etc/datasync" # pvc_directory = "J:\\Scripts\\Testing\\Sync_directory" # copy a pvc file to bucket def copy_to_bucket(minio_client, pvc_directory, file_name): print("copying to bucket: ", file_name) # upload file, never been run minio_client.fput_object( wiof_objstor_constants.OBJSTOR_BUCKET, file_name, os.path.join(pvc_directory, file_name), ) return # copy a bucket file to pvc def copy_to_pvc(minio_client, file_name, last_modified, pvc_directory): print("copying to pvc: ", file_name, pvc_directory) minio_client.fget_object( wiof_objstor_constants.OBJSTOR_BUCKET, file_name, os.path.join(pvc_directory, file_name), ) os.utime(os.path.join(pvc_directory, file_name), (last_modified, last_modified)) def main(argv): # wiof_objstor_constants.print_constants() minio_client = Minio( endpoint=wiof_objstor_constants.OBJSTOR_ENDPOINT, access_key=wiof_objstor_constants.OBJSTOR_ACCESS_KEY, secret_key=wiof_objstor_constants.OBJSTOR_SECRET_KEY, region="US", ) # create a comparison dictionary file_dict = {} # add bucket file names and last modified timestamp to comparison dictionary bucket_files = minio_client.list_objects( wiof_objstor_constants.OBJSTOR_BUCKET, recursive=True, use_url_encoding_type=False, ) for bucket_file in bucket_files: file_name = bucket_file.object_name # Debugging purposes # print(f"file_name: {file_name}") if bucket_file.last_modified is None: print(f"file {bucket_file} missing last_modified") continue file_date = datetime.timestamp(bucket_file.last_modified) if not bucket_file.is_dir: file_dict[file_name] = { "file_name": file_name, "bucket_last_modified": file_date, } # add pvc file names and last modified timestamp to comparison dictionary for dirname, dirnames, filenames in os.walk(pvc_directory): # print path to all filenames. for file_name in filenames: rel_dir = os.path.relpath(dirname, pvc_directory) rel_file = file_name if rel_dir != ".": rel_file = os.path.join(rel_dir, file_name) # next line only needed for Windows as it uses backslashes instead of forward slashes if os.name == "nt": rel_file = rel_file.replace("\\", "/") file_date = os.path.getmtime(os.path.join(dirname, file_name)) if rel_file in file_dict: file_dict[rel_file]["pvc_last_modified"] = file_date else: file_dict[rel_file] = { "file_name": rel_file, "pvc_last_modified": file_date, } # put newer bucket files into pvc, and newer pvc files into bucket, adjust timestamps pvc_timestamp_sync_list = [] for file_name in file_dict: file = file_dict[file_name] if "pvc_last_modified" in file and "bucket_last_modified" in file: # both directories have a copy of the file if file["pvc_last_modified"] > file["bucket_last_modified"]: # pvc has newer file copy_to_bucket(minio_client, pvc_directory, file_name) pvc_timestamp_sync_list.append(file_name) elif file["pvc_last_modified"] < file["bucket_last_modified"]: # bucket has newer file copy_to_pvc( minio_client, file_name, file["bucket_last_modified"], pvc_directory ) # no work to do if the same last modified date elif "pvc_last_modified" in file: # file is only in the pvc copy_to_bucket(minio_client, pvc_directory, file_name) pvc_timestamp_sync_list.append(file_name) else: # file is only in the bucket copy_to_pvc( minio_client, file_name, file["bucket_last_modified"], pvc_directory ) # sync the pvc timestamps up with the new bucket files, as we can't update timestamps on bucket files print("Syncing pvc modified timestamps with object storage") bucket_files = minio_client.list_objects( wiof_objstor_constants.OBJSTOR_BUCKET, recursive=True, use_url_encoding_type=False, ) for bucket_file in bucket_files: # Debugging purposes # print(f"file_name: {bucket_file}") if bucket_file.last_modified is None: print(f"file {bucket_file} missing last_modified") continue file_name = bucket_file.object_name bucket_last_modified = datetime.timestamp(bucket_file.last_modified) if file_name in pvc_timestamp_sync_list: os.utime( os.path.join(pvc_directory, file_name), (bucket_last_modified, bucket_last_modified), ) if __name__ == "__main__": try: print("Starting copy...") main(sys.argv[1:]) print("Copy finished.") except S3Error as exc: print("error occurred.", exc)
/* MIT License * * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __Hacl_SHA2_Vec256_H #define __Hacl_SHA2_Vec256_H #if defined(__cplusplus) extern "C" { #endif #include <string.h> #include "krml/internal/types.h" #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" #include "Hacl_SHA2_Generic.h" #include "Hacl_Krmllib.h" #include "evercrypt_targetconfig.h" #include "libintvector.h" void Hacl_SHA2_Vec256_sha224_8( uint8_t *dst0, uint8_t *dst1, uint8_t *dst2, uint8_t *dst3, uint8_t *dst4, uint8_t *dst5, uint8_t *dst6, uint8_t *dst7, uint32_t input_len, uint8_t *input0, uint8_t *input1, uint8_t *input2, uint8_t *input3, uint8_t *input4, uint8_t *input5, uint8_t *input6, uint8_t *input7 ); void Hacl_SHA2_Vec256_sha256_8( uint8_t *dst0, uint8_t *dst1, uint8_t *dst2, uint8_t *dst3, uint8_t *dst4, uint8_t *dst5, uint8_t *dst6, uint8_t *dst7, uint32_t input_len, uint8_t *input0, uint8_t *input1, uint8_t *input2, uint8_t *input3, uint8_t *input4, uint8_t *input5, uint8_t *input6, uint8_t *input7 ); void Hacl_SHA2_Vec256_sha384_4( uint8_t *dst0, uint8_t *dst1, uint8_t *dst2, uint8_t *dst3, uint32_t input_len, uint8_t *input0, uint8_t *input1, uint8_t *input2, uint8_t *input3 ); void Hacl_SHA2_Vec256_sha512_4( uint8_t *dst0, uint8_t *dst1, uint8_t *dst2, uint8_t *dst3, uint32_t input_len, uint8_t *input0, uint8_t *input1, uint8_t *input2, uint8_t *input3 ); #if defined(__cplusplus) } #endif #define __Hacl_SHA2_Vec256_H_DEFINED #endif
var gulp = require("gulp"); var karma = require("karma").server; gulp.task("test-browsers", ["build"], function (done) { /** * This ensures that the browser tests only run on the first job, * instead of wastefully running the browser tests on every job. */ if (process.env.TRAVIS_BUILD_NUMBER) { if (process.env.TRAVIS_JOB_NUMBER === `${process.env.TRAVIS_BUILD_NUMBER}.1`) { runKarma(done); } else { done(); } } else { runKarma(done); } }); function runKarma(done) { karma.start({ configFile: __dirname + "/../.karma.conf.js" }, done); }
''' Created on 02.05.2021 @author: michael ''' import textract from Asb.ScanQualityScorer import AltoPageLayout def process(filename): text = textract.process(filename) try: alto_layout = AltoPageLayout(filename) score = alto_layout.scan_quality except: score = None return text, score
from django.conf.urls import url from rest_framework import routers from areas import views urlpatterns = [ # url(r"^infos/$",views.ShengFen.as_view()), # url(r"^infos/(?P<shang>\d{6})/$", views.ShiXian.as_view()), ] router=routers.DefaultRouter() router.register(r'infos',views.ChengShi,base_name='chengshi') urlpatterns +=router.urls
later.array = {};
import "../styles/global.css"; import "../styles/boostrap.min.css"; import "react-toastify/dist/ReactToastify.css"; import { StateProvider } from "../components/context/state"; export default function App({ Component, pageProps }) { const initialState = { links: [], socialLinks: [], }; const reducer = (state, action) => { // console.log("reducre"); // console.log(state); // console.log(action); switch (action.type) { case "updateLink": return { ...state, links: action.linkdata, }; case "updateSocial": return { ...state, socialLinks: action.socialdata, }; case "deleteLink": // console.log(state.links.filter((ele) => ele.id != action.id)); return { ...state, links: state.links.filter((ele) => ele.id != action.id), }; case "deleteSocial": return { ...state, socialLinks: state.socialLinks.filter((ele) => ele.id != action.id), }; default: return state; } }; return ( <StateProvider initialState={initialState} reducer={reducer}> <Component {...pageProps} /> </StateProvider> ); }
from time import * import cv2 as cv import torch from PIL import Image from evolveface.align.detector import detect_faces from evolveface.align.visualization_utils_opencv import show_result from evolveface.util.extract_feature_v3 import get_embeddings from instance.calcDistance import calcDistance from instance.load_utils import loadModel, loadFaceData def Recognize(): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Load Reco Model backbone = loadModel("C:/Users/Bill Kerman/PycharmProjects/FaceNetCustomized/model/backbone_ir50_ms1m_epoch120.pth", device) backbone.eval() # Load Face Data features, labels = loadFaceData("C:/Users/Bill Kerman/PycharmProjects/FaceNetCustomized/database/AlphaFeature/", device) cap = cv.VideoCapture(cv.CAP_DSHOW) flag = cap.isOpened() font = cv.FONT_HERSHEY_SIMPLEX while flag: timeStart = time() flag, mat = cap.read() if flag: img = Image.fromarray(cv.cvtColor(mat, cv.COLOR_BGR2RGB)) boundingBoxes, landmarks = detect_faces(img) boxedMat = show_result(mat, boundingBoxes, landmarks) # Recognize faces for boundingBox in boundingBoxes: cropMat = mat[int(boundingBox[1]):int(boundingBox[3]), int(boundingBox[0]):int(boundingBox[2])] feature = get_embeddings(cropMat, backbone, device) dist, label = calcDistance(srcFeature=feature, features=features, labels=labels, device=device) if dist < 1: cv.putText(mat, label + str(round(float(1 - dist) * 100.0, 2)) + "%", (int(boundingBox[0]), int(boundingBox[1])), font, 0.8, (255, 255, 255), 2) timeEnd = time() elapse = round(1.0 / (timeEnd - timeStart), 2) cv.putText(boxedMat, str(elapse), (5, 30), font, 1.2, (255, 0, 0), 2) cv.imshow("Result", boxedMat) if cv.waitKey(5) == 27: break Recognize()
/* * Constants and utilities for encoding channels (Visual variables) * such as 'x', 'y', 'color'. */ import * as tslib_1 from "tslib"; import { flagKeys } from './util'; export var Channel; (function (Channel) { // Facet Channel.ROW = 'row'; Channel.COLUMN = 'column'; // Position Channel.X = 'x'; Channel.Y = 'y'; Channel.X2 = 'x2'; Channel.Y2 = 'y2'; // Geo Position Channel.LATITUDE = 'latitude'; Channel.LONGITUDE = 'longitude'; Channel.LATITUDE2 = 'latitude2'; Channel.LONGITUDE2 = 'longitude2'; // Mark property with scale Channel.COLOR = 'color'; Channel.FILL = 'fill'; Channel.STROKE = 'stroke'; Channel.SHAPE = 'shape'; Channel.SIZE = 'size'; Channel.OPACITY = 'opacity'; // Non-scale channel Channel.TEXT = 'text'; Channel.ORDER = 'order'; Channel.DETAIL = 'detail'; Channel.KEY = 'key'; Channel.TOOLTIP = 'tooltip'; Channel.HREF = 'href'; })(Channel || (Channel = {})); export var X = Channel.X; export var Y = Channel.Y; export var X2 = Channel.X2; export var Y2 = Channel.Y2; export var LATITUDE = Channel.LATITUDE; export var LATITUDE2 = Channel.LATITUDE2; export var LONGITUDE = Channel.LONGITUDE; export var LONGITUDE2 = Channel.LONGITUDE2; export var ROW = Channel.ROW; export var COLUMN = Channel.COLUMN; export var SHAPE = Channel.SHAPE; export var SIZE = Channel.SIZE; export var COLOR = Channel.COLOR; export var FILL = Channel.FILL; export var STROKE = Channel.STROKE; export var TEXT = Channel.TEXT; export var DETAIL = Channel.DETAIL; export var KEY = Channel.KEY; export var ORDER = Channel.ORDER; export var OPACITY = Channel.OPACITY; export var TOOLTIP = Channel.TOOLTIP; export var HREF = Channel.HREF; export var GEOPOSITION_CHANNEL_INDEX = { longitude: 1, longitude2: 1, latitude: 1, latitude2: 1, }; export var GEOPOSITION_CHANNELS = flagKeys(GEOPOSITION_CHANNEL_INDEX); var UNIT_CHANNEL_INDEX = tslib_1.__assign({ // position x: 1, y: 1, x2: 1, y2: 1 }, GEOPOSITION_CHANNEL_INDEX, { // color color: 1, fill: 1, stroke: 1, // other non-position with scale opacity: 1, size: 1, shape: 1, // channels without scales order: 1, text: 1, detail: 1, key: 1, tooltip: 1, href: 1 }); export function isColorChannel(channel) { return channel === 'color' || channel === 'fill' || channel === 'stroke'; } var FACET_CHANNEL_INDEX = { row: 1, column: 1 }; var CHANNEL_INDEX = tslib_1.__assign({}, UNIT_CHANNEL_INDEX, FACET_CHANNEL_INDEX); export var CHANNELS = flagKeys(CHANNEL_INDEX); var _o = CHANNEL_INDEX.order, _d = CHANNEL_INDEX.detail, SINGLE_DEF_CHANNEL_INDEX = tslib_1.__rest(CHANNEL_INDEX, ["order", "detail"]); /** * Channels that cannot have an array of channelDef. * model.fieldDef, getFieldDef only work for these channels. * * (The only two channels that can have an array of channelDefs are "detail" and "order". * Since there can be multiple fieldDefs for detail and order, getFieldDef/model.fieldDef * are not applicable for them. Similarly, selection projection won't work with "detail" and "order".) */ export var SINGLE_DEF_CHANNELS = flagKeys(SINGLE_DEF_CHANNEL_INDEX); export function isChannel(str) { return !!CHANNEL_INDEX[str]; } // CHANNELS without COLUMN, ROW export var UNIT_CHANNELS = flagKeys(UNIT_CHANNEL_INDEX); // NONPOSITION_CHANNELS = UNIT_CHANNELS without X, Y, X2, Y2; var _x = UNIT_CHANNEL_INDEX.x, _y = UNIT_CHANNEL_INDEX.y, // x2 and y2 share the same scale as x and y _x2 = UNIT_CHANNEL_INDEX.x2, _y2 = UNIT_CHANNEL_INDEX.y2, _latitude = UNIT_CHANNEL_INDEX.latitude, _longitude = UNIT_CHANNEL_INDEX.longitude, _latitude2 = UNIT_CHANNEL_INDEX.latitude2, _longitude2 = UNIT_CHANNEL_INDEX.longitude2, // The rest of unit channels then have scale NONPOSITION_CHANNEL_INDEX = tslib_1.__rest(UNIT_CHANNEL_INDEX, ["x", "y", "x2", "y2", "latitude", "longitude", "latitude2", "longitude2"]); export var NONPOSITION_CHANNELS = flagKeys(NONPOSITION_CHANNEL_INDEX); // POSITION_SCALE_CHANNELS = X and Y; var POSITION_SCALE_CHANNEL_INDEX = { x: 1, y: 1 }; export var POSITION_SCALE_CHANNELS = flagKeys(POSITION_SCALE_CHANNEL_INDEX); // NON_POSITION_SCALE_CHANNEL = SCALE_CHANNELS without X, Y var // x2 and y2 share the same scale as x and y // text and tooltip have format instead of scale, // href has neither format, nor scale _t = NONPOSITION_CHANNEL_INDEX.text, _tt = NONPOSITION_CHANNEL_INDEX.tooltip, _hr = NONPOSITION_CHANNEL_INDEX.href, // detail and order have no scale _dd = NONPOSITION_CHANNEL_INDEX.detail, _k = NONPOSITION_CHANNEL_INDEX.key, _oo = NONPOSITION_CHANNEL_INDEX.order, NONPOSITION_SCALE_CHANNEL_INDEX = tslib_1.__rest(NONPOSITION_CHANNEL_INDEX, ["text", "tooltip", "href", "detail", "key", "order"]); export var NONPOSITION_SCALE_CHANNELS = flagKeys(NONPOSITION_SCALE_CHANNEL_INDEX); // Declare SCALE_CHANNEL_INDEX var SCALE_CHANNEL_INDEX = tslib_1.__assign({}, POSITION_SCALE_CHANNEL_INDEX, NONPOSITION_SCALE_CHANNEL_INDEX); /** List of channels with scales */ export var SCALE_CHANNELS = flagKeys(SCALE_CHANNEL_INDEX); export function isScaleChannel(channel) { return !!SCALE_CHANNEL_INDEX[channel]; } /** * Return whether a channel supports a particular mark type. * @param channel channel name * @param mark the mark type * @return whether the mark supports the channel */ export function supportMark(channel, mark) { return mark in getSupportedMark(channel); } /** * Return a dictionary showing whether a channel supports mark type. * @param channel * @return A dictionary mapping mark types to boolean values. */ export function getSupportedMark(channel) { switch (channel) { case COLOR: case FILL: case STROKE: case DETAIL: case KEY: case TOOLTIP: case HREF: case ORDER: // TODO: revise (order might not support rect, which is not stackable?) case OPACITY: case ROW: case COLUMN: return { point: true, tick: true, rule: true, circle: true, square: true, bar: true, rect: true, line: true, trail: true, area: true, text: true, geoshape: true }; case X: case Y: case LATITUDE: case LONGITUDE: return { point: true, tick: true, rule: true, circle: true, square: true, bar: true, rect: true, line: true, trail: true, area: true, text: true }; case X2: case Y2: case LATITUDE2: case LONGITUDE2: return { rule: true, bar: true, rect: true, area: true }; case SIZE: return { point: true, tick: true, rule: true, circle: true, square: true, bar: true, text: true, line: true, trail: true }; case SHAPE: return { point: true, geoshape: true }; case TEXT: return { text: true }; } } export function rangeType(channel) { switch (channel) { case X: case Y: case SIZE: case OPACITY: // X2 and Y2 use X and Y scales, so they similarly have continuous range. case X2: case Y2: return 'continuous'; case ROW: case COLUMN: case SHAPE: // TEXT, TOOLTIP, and HREF have no scale but have discrete output case TEXT: case TOOLTIP: case HREF: return 'discrete'; // Color can be either continuous or discrete, depending on scale type. case COLOR: case FILL: case STROKE: return 'flexible'; // No scale, no range type. case LATITUDE: case LONGITUDE: case LATITUDE2: case LONGITUDE2: case DETAIL: case KEY: case ORDER: return undefined; } /* istanbul ignore next: should never reach here. */ throw new Error('rangeType not implemented for ' + channel); } //# sourceMappingURL=channel.js.map
import axios from 'axios'; export default store => next => action => { const {dispatch, getState} = store; /*如果dispatch来的是一个function,此处不做处理,直接进入下一级*/ if (typeof action === 'function') { action(dispatch, getState); return; } /*解析action*/ const { promise, types, afterSuccess, ...rest } = action; /*没有promise,证明不是想要发送ajax请求的,就直接进入下一步啦!*/ if (!action.promise) { return next(action); } /*解析types*/ const [REQUEST, SUCCESS, FAILURE] = types; /*开始请求的时候,发一个action*/ next({ ...rest, type: REQUEST }); /*定义请求成功时的方法*/ const onFulfilled = result => { next({ ...rest, result, type: SUCCESS }); if (afterSuccess) { afterSuccess(dispatch, getState, result); } }; /*定义请求失败时的方法*/ const onRejected = error => { next({ ...rest, error, type: FAILURE }); }; return promise(axios).then(onFulfilled, onRejected).catch(error => { console.error('MIDDLEWARE ERROR:', error); onRejected(error) }) }
# ============================================================================== # Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE.md file in the project root # for full license information. # ============================================================================== # TODO
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from oneflow.compatible import single_client as flow from oneflow.compatible.single_client.python.oneflow_export import ( oneflow_export, experimental_api, ) from oneflow.compatible.single_client.python.nn.module import Module from oneflow.compatible.single_client.python.framework.tensor import register_tensor_op class Abs(Module): def __init__(self): super().__init__() def forward(self, x): return flow.F.abs(x) @oneflow_export("abs") @register_tensor_op("abs") @experimental_api def abs_op(x): r"""Return the absolute value of each element in input tensor:math:`y = |x|` element-wise. Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow.compatible.single_client.experimental as flow >>> import numpy as np >>> flow.enable_eager_execution() >>> x = flow.Tensor(np.array([-1, 2, -3, 4]).astype(np.float32)) >>> flow.abs(x) tensor([1., 2., 3., 4.], dtype=oneflow.float32) """ return Abs()(x) if __name__ == "__main__": import doctest doctest.testmod(raise_on_error=True)
/** * Auto-generated action file for "SubscriptionsManagementClient (azsadmin-DirectoryTenant)" API. * * Generated at: 2019-06-11T15:13:34.464Z * Mass generator version: 1.1.0 * * flowground :- Telekom iPaaS / azure-com-azsadmin-directory-tenant-connector * Copyright © 2019, Deutsche Telekom AG * contact: flowground@telekom.de * * All files of this connector are licensed under the Apache 2.0 License. For details * see the file LICENSE on the toplevel directory. * * * Operation: 'DirectoryTenants_List' * Endpoint Path: '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Subscriptions.Admin/directoryTenants' * Method: 'get' * */ const Swagger = require('swagger-client'); const processWrapper = require('../services/process-wrapper'); const spec = require('../spec.json'); // this wrapers offers a simplified emitData(data) function module.exports.process = processWrapper(processAction); // parameter names for this call const PARAMETERS = [ "subscriptionId", "resourceGroupName", "api-version" ]; // mappings from connector field names to API field names const FIELD_MAP = { "subscriptionId": "subscriptionId", "resourceGroupName": "resourceGroupName", "api_version": "api-version" }; function processAction(msg, cfg) { var isVerbose = process.env.debug || cfg.verbose; if (isVerbose) { console.log(`---MSG: ${JSON.stringify(msg)}`); console.log(`---CFG: ${JSON.stringify(cfg)}`); console.log(`---ENV: ${JSON.stringify(process.env)}`); } const contentType = undefined; const body = msg.body; mapFieldNames(body); let parameters = {}; for(let param of PARAMETERS) { parameters[param] = body[param]; } // credentials for this operation let securities = {}; securities['azure_auth'] = {token: cfg['auth_azure_auth']}; let callParams = { spec: spec, operationId: 'DirectoryTenants_List', pathName: '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Subscriptions.Admin/directoryTenants', method: 'get', parameters: parameters, requestContentType: contentType, requestBody: body.requestBody, securities: {authorized: securities}, server: spec.servers[cfg.server] || cfg.otherServer, }; if (isVerbose) { let out = Object.assign({}, callParams); out.spec = '[omitted]'; console.log(`--SWAGGER CALL: ${JSON.stringify(out)}`); } // Call operation via Swagger client return Swagger.execute(callParams).then(data => { // emit a single message with data this.emitData(data); // if the response contains an array of entities, you can emit them one by one: // data.obj.someItems.forEach((item) => { // this.emitData(item); // } }); } function mapFieldNames(obj) { if(Array.isArray(obj)) { obj.forEach(mapFieldNames); } else if(typeof obj === 'object' && obj) { Object.keys(obj).forEach(key => { mapFieldNames(obj[key]); let goodKey = FIELD_MAP[key]; if(goodKey && goodKey !== key) { obj[goodKey] = obj[key]; delete obj[key]; } }); } }
from decouple import config PORT = 5000 TOKEN = config('PERSONAL_BOT_TOKEN', default='token') VERIFICATION_TOKEN = config('APP_VERIFICATION_TOKEN', default='token') COMMUNITY_CHANNEL = config('PERSONAL_PRIVATE_CHANNEL', default='community_channel') MENTORS_INTERNAL_CHANNEL = config('PERSONAL_PRIVATE_CHANNEL', default='mentor_channel') AIRTABLE_BASE_KEY = config('DEV_AIRTABLE_BASE_KEY', default='fake_airtable_base') AIRTABLE_API_KEY = config('DEV_AIRTABLE_TOKEN', default='fake_airtable_key') AIRTABLE_TABLE_NAME = 'Mentor Request' DB_USERNAME = config('DEV_DB_USERNAME', default='') DB_PASSWORD = config('DEV_DB_PASSWORD', default='') DB_DIALECT = config('DEV_DB_DIALECT', default='sqlite') DB_ADDR = config('DEV_DB_ADDR', default='dev.db') DB_NAME = config('DEV_DB_NAME', default='') PA_SSH_USERNAME = config('PA_SSH_USERNAME', default=None) PA_SSH_PASSWORD = config('PA_SSH_PASSWORD', default=None) PA_SSH_URL = config('PA_SSH_URL', default=None) PA_SSH_REMOTE_BIND_ADDR = config('PA_PG_IP_ADDR', cast=str, default=None) PA_SSH_REMOTE_BIND_PORT = config('PA_PG_PORT', cast=int, default=0)
import React, {Component} from 'react'; class Text extends Component { constructor(props){ super(props); this.state = {value: props.value}; this.change = this.change.bind(this); } change(event) { event.preventDefault(); var value = event.target.value; this.setState({value}, () => this.props.onChange(this.state.value)); } render(){ return( <div> <input type="number" min="1" max="1000" className="form-control" value={this.state.value} onChange={this.change}/> </div> ); } } export default Text;
from django.contrib import admin from .models import * admin.site.register(Device) admin.site.register(Sensor) admin.site.register(DataField)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .cql3handling import simple_cql_types class CQLHelpTopics(object): def get_help_topics(self): return [ t[5:] for t in dir(self) if t.startswith('help_') ] def print_help_topic(self, topic): getattr(self, 'help_' + topic.lower())() def help_types(self): print "\n CQL types recognized by this version of cqlsh:\n" for t in simple_cql_types: print ' ' + t print """ For information on the various recognizable input formats for these types, or on controlling the formatting of cqlsh query output, see one of the following topics: HELP TIMESTAMP_INPUT HELP BLOB_INPUT HELP UUID_INPUT HELP BOOLEAN_INPUT HELP TEXT_OUTPUT HELP TIMESTAMP_OUTPUT """ def help_timestamp_input(self): print """ Timestamp input CQL supports any of the following ISO 8601 formats for timestamp specification: yyyy-mm-dd HH:mm yyyy-mm-dd HH:mm:ss yyyy-mm-dd HH:mmZ yyyy-mm-dd HH:mm:ssZ yyyy-mm-dd'T'HH:mm yyyy-mm-dd'T'HH:mmZ yyyy-mm-dd'T'HH:mm:ss yyyy-mm-dd'T'HH:mm:ssZ yyyy-mm-dd yyyy-mm-ddZ The Z in these formats refers to an RFC-822 4-digit time zone, expressing the time zone's difference from UTC. For example, a timestamp in Pacific Standard Time might be given thus: 2012-01-20 16:14:12-0800 If no time zone is supplied, the current time zone for the Cassandra server node will be used. """ def help_blob_input(self): print """ Blob input CQL blob data must be specified in a string literal as hexidecimal data. Example: to store the ASCII values for the characters in the string "CQL", use '43514c'. """ def help_uuid_input(self): print """ UUID input UUIDs may be specified in CQL using 32 hexidecimal characters, split up using dashes in the standard UUID format: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX """ def help_boolean_input(self): print """ Boolean input CQL accepts the strings 'true' and 'false' (case insensitive) as input for boolean types. """ def help_timestamp_output(self): print """ Timestamp output Cqlsh will display timestamps in the following format by default: yyyy-mm-dd HH:mm:ssZ which is a format acceptable as CQL timestamp input as well. The output format can be changed by setting 'time_format' property in the [ui] section of .cqlshrc file. """ def help_text_output(self): print """ Textual output When control characters, or other characters which can't be encoded in your current locale, are found in values of 'text' or 'ascii' types, it will be shown as a backslash escape. If color is enabled, any such backslash escapes will be shown in a different color from the surrounding text. Unicode code points in your data will be output intact, if the encoding for your locale is capable of decoding them. If you prefer that non-ascii characters be shown with Python-style "\\uABCD" escape sequences, invoke cqlsh with an ASCII locale (for example, by setting the $LANG environment variable to "C"). """ help_ascii_output = help_text_output def help_create_index(self): print """ CREATE INDEX [<indexname>] ON <cfname> ( <colname> ); A CREATE INDEX statement is used to create a new, automatic secondary index on the given CQL table, for the named column. A name for the index itself can be specified before the ON keyword, if desired. A single column name must be specified inside the parentheses. It is not necessary for the column to exist on any current rows (Cassandra is schema-optional), but the column must already have a type (specified during the CREATE TABLE, or added afterwards with ALTER TABLE). """ def help_drop(self): print """ There are different variants of DROP. For more information, see one of the following: HELP DROP_KEYSPACE; HELP DROP_TABLE; HELP DROP_INDEX; """ def help_drop_keyspace(self): print """ DROP KEYSPACE <keyspacename>; A DROP KEYSPACE statement results in the immediate, irreversible removal of a keyspace, including all column families in it, and all data contained in those column families. """ def help_drop_table(self): print """ DROP TABLE <tablename>; A DROP TABLE statement results in the immediate, irreversible removal of a CQL table and the underlying column family, including all data contained in it. """ help_drop_columnfamily = help_drop_table def help_drop_index(self): print """ DROP INDEX <indexname>; A DROP INDEX statement is used to drop an existing secondary index. """ def help_truncate(self): print """ TRUNCATE <tablename>; TRUNCATE accepts a single argument for the table name, and permanently removes all data from it. """ def help_create(self): print """ There are different variants of CREATE. For more information, see one of the following: HELP CREATE_KEYSPACE; HELP CREATE_TABLE; HELP CREATE_INDEX; """ def help_use(self): print """ USE <keyspacename>; Tells cqlsh and the connected Cassandra instance that you will be working in the given keyspace. All subsequent operations on tables or indexes will be in the context of this keyspace, unless otherwise specified, until another USE command is issued or the connection terminates. As always, when a keyspace name does not work as a normal identifier or number, it can be quoted using single quotes (CQL 2) or double quotes (CQL 3). """ def help_create_table(self): print """ CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [, <colname> <type> [, ...]] ) [WITH <optionname> = <val> [AND <optionname> = <val> [...]]]; CREATE TABLE statements create a new CQL table under the current keyspace. Valid table names are strings of alphanumeric characters and underscores, which begin with a letter. Each table requires a primary key, which will correspond to the underlying columnfamily key and key validator. It's important to note that the key type you use must be compatible with the partitioner in use. For example, OrderPreservingPartitioner and CollatingOrderPreservingPartitioner both require UTF-8 keys. In cql3 mode, a table can have multiple columns composing the primary key (see HELP COMPOUND_PRIMARY_KEYS). For more information, see one of the following: HELP CREATE_TABLE_TYPES; HELP CREATE_TABLE_OPTIONS; """ help_create_columnfamily = help_create_table def help_compound_primary_keys(self): print """ CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type, [, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>); CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify the columns that take part in the compound key after all columns have been specified. , PRIMARY KEY( <key1>, <key2>, ... ) The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY phrase should be parenthesized, as PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>) """ def help_create_table_types(self): print """ CREATE TABLE: Specifying column types CREATE ... (KEY <type> PRIMARY KEY, othercol <type>) ... It is possible to assign columns a type during table creation. Columns configured with a type are validated accordingly when a write occurs, and intelligent CQL drivers and interfaces will be able to decode the column values correctly when receiving them. Column types are specified as a parenthesized, comma-separated list of column term and type pairs. See HELP TYPES; for the list of recognized types. """ help_create_columnfamily_types = help_create_table_types def help_create_table_options(self): print """ CREATE TABLE: Specifying columnfamily options CREATE TABLE blah (...) WITH optionname = val AND otheroption = val2; A number of optional keyword arguments can be supplied to control the configuration of a new CQL table, such as the size of the associated row and key caches for the underlying Cassandra columnfamily. Consult your CQL reference for the complete list of options and possible values. """ help_create_columnfamily_options = help_create_table_options def help_alter_alter(self): print """ ALTER TABLE: altering existing typed columns ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid; ALTER TABLE ... ALTER changes the expected storage type for a column. The column must already have a type in the column family metadata. The column may or may not already exist in current rows-- but be aware that no validation of existing data is done. The bytes stored in values for that column will remain unchanged, and if existing data is not deserializable according to the new type, this may cause your CQL driver or interface to report errors. """ def help_alter_add(self): print """ ALTER TABLE: adding a typed column ALTER TABLE addamsFamily ADD gravesite varchar; The ALTER TABLE ... ADD variant adds a typed column to a column family. The column must not already have a type in the column family metadata. See the warnings on HELP ALTER_ALTER regarding the lack of validation of existing data; they apply here as well. """ def help_alter_drop(self): print """ ALTER TABLE: dropping a typed column ALTER TABLE addamsFamily DROP gender; An ALTER TABLE ... DROP statement removes the type of a column from the column family metadata. Note that this does _not_ remove the column from current rows; it just removes the metadata saying that the bytes stored under that column are expected to be deserializable according to a certain type. """ def help_alter_with(self): print """ ALTER TABLE: changing column family properties ALTER TABLE addamsFamily WITH comment = 'Glad to be here!' AND read_repair_chance = 0.2; An ALTER TABLE ... WITH statement makes adjustments to the table properties, as defined when the table was created (see HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for information about the supported parameter names and values). """ def help_delete_columns(self): print """ DELETE: specifying columns DELETE col1, col2, col3 FROM ... Following the DELETE keyword is an optional comma-delimited list of column name terms. When no column names are given, the remove applies to the entire row(s) matched by the WHERE clause. When column names do not parse as valid CQL identifiers, they can be quoted in single quotes (CQL 2) or double quotes (CQL 3). """ def help_delete_where(self): print """ DELETE: specifying rows DELETE ... WHERE keycol = 'some_key_value'; DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2'; DELETE ... WHERE keycol IN (key1, key2); The WHERE clause is used to determine to which row(s) a DELETE applies. The first form allows the specification of a precise row by specifying a particular primary key value (if the primary key has multiple columns, values for each must be given). The second form allows a list of key values to be specified using the IN operator and a parenthesized list of comma-delimited key values. """ def help_update_set(self): print """ UPDATE: Specifying Columns and Row UPDATE ... SET name1 = value1, name2 = value2 WHERE <key> = keyname; UPDATE ... SET name1 = value1, name2 = value2 WHERE <key> IN ('<key1>', '<key2>', ...) Rows are created or updated by supplying column names and values in term assignment format. Multiple columns can be set by separating the name/value pairs using commas. """ def help_update_counters(self): print """ UPDATE: Updating Counter Columns UPDATE ... SET name1 = name1 + <value> ... UPDATE ... SET name1 = name1 - <value> ... Counter columns can be incremented or decremented by an arbitrary numeric value though the assignment of an expression that adds or subtracts the value. """ def help_update_where(self): print """ UPDATE: Selecting rows to update UPDATE ... WHERE <keyname> = <keyval>; UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...); UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>; Each update statement requires a precise set of keys to be specified using a WHERE clause. If the table's primary key consists of multiple columns, an explicit value must be given for each for the UPDATE statement to make sense. """ def help_select_table(self): print """ SELECT: Specifying Table SELECT ... FROM [<keyspace>.]<tablename> ... The FROM clause is used to specify the CQL table applicable to a SELECT query. The keyspace in which the table exists can optionally be specified along with the table name, separated by a dot (.). This will not change the current keyspace of the session (see HELP USE). """ help_select_columnfamily = help_select_table def help_select_where(self): print """ SELECT: Filtering rows SELECT ... WHERE <key> = keyname AND name1 = value1 SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1 SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...) The WHERE clause provides for filtering the rows that appear in results. The clause can filter on a key name, or range of keys, and in the case of indexed columns, on column values. Key filters are specified using the KEY keyword or key alias name, a relational operator (one of =, >, >=, <, and <=), and a term value. When terms appear on both sides of a relational operator it is assumed the filter applies to an indexed column. With column index filters, the term on the left of the operator is the name, the term on the right is the value to filter _on_. Note: The greater-than and less-than operators (> and <) result in key ranges that are inclusive of the terms. There is no supported notion of "strictly" greater-than or less-than; these operators are merely supported as aliases to >= and <=. """ def help_select_limit(self): print """ SELECT: Limiting results SELECT ... WHERE <clause> [LIMIT n] ... Limiting the number of rows returned can be achieved by adding the LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left unset. """ class CQL3HelpTopics(CQLHelpTopics): def help_create_keyspace(self): print """ CREATE KEYSPACE <ksname> WITH replication = {'class':'<strategy>' [,'<option>':<val>]}; The CREATE KEYSPACE statement creates a new top-level namespace (aka "keyspace"). Valid names are any string constructed of alphanumeric characters and underscores. Names which do not work as valid identifiers or integers should be quoted as string literals. Properties such as replication strategy and count are specified during creation as key-value pairs in the 'replication' map: class [required]: The name of the replication strategy class which should be used for the new keyspace. Some often-used classes are SimpleStrategy and NetworkTopologyStrategy. other options [optional]: Most strategies require additional arguments which can be supplied as key-value pairs in the 'replication' map. Examples: To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1" with a value of "1" and "DC2" with a value of "2" you would use the following statement: CREATE KEYSPACE <ksname> WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2}; To create a keyspace with SimpleStrategy and "replication_factor" option with a value of "3" you would use this statement: CREATE KEYSPACE <ksname> WITH replication = {'class':'SimpleStrategy', 'replication_factor':3}; """ def help_begin(self): print """ BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>] <insert or update or delete statement> ; [ <another insert or update or delete statement ; [...]] APPLY BATCH; BATCH supports setting a client-supplied optional global timestamp which will be used for each of the operations included in the batch. Only data modification statements (specifically, UPDATE, INSERT, and DELETE) are allowed in a BATCH statement. BATCH is _not_ an analogue for SQL transactions. _NOTE: Counter mutations are allowed only within COUNTER batches._ _NOTE: While there are no isolation guarantees, UPDATE queries are atomic within a given record._ """ help_apply = help_begin def help_select(self): print """ SELECT <selectExpr> FROM [<keyspace>.]<table> [WHERE <clause>] [ORDER BY <colname> [DESC]] [LIMIT m]; SELECT is used to read one or more records from a CQL table. It returns a set of rows matching the selection criteria specified. For more information, see one of the following: HELP SELECT_EXPR HELP SELECT_TABLE HELP SELECT_WHERE HELP SELECT_LIMIT """ def help_delete(self): print """ DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename> [USING TIMESTAMP <timestamp>] WHERE <keyname> = <keyvalue>; A DELETE is used to perform the removal of one or more columns from one or more rows. Each DELETE statement requires a precise set of row keys to be specified using a WHERE clause and the KEY keyword or key alias. For more information, see one of the following: HELP DELETE_USING HELP DELETE_COLUMNS HELP DELETE_WHERE """ def help_delete_using(self): print """ DELETE: the USING clause DELETE ... USING TIMESTAMP <timestamp>; <timestamp> defines the optional timestamp for the new tombstone record. It must be an integer. Cassandra timestamps are generally specified using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). """ def help_update(self): print """ UPDATE [<keyspace>.]<columnFamily> [USING [TIMESTAMP <timestamp>] [AND TTL <timeToLive>]] SET name1 = value1, name2 = value2 WHERE <keycol> = keyval [IF EXISTS]; An UPDATE is used to write one or more columns to a record in a table. No results are returned. The record's primary key must be completely and uniquely specified; that is, if the primary key includes multiple columns, all must be explicitly given in the WHERE clause. Statements begin with the UPDATE keyword followed by the name of the table to be updated. For more information, see one of the following: HELP UPDATE_USING HELP UPDATE_SET HELP UPDATE_COUNTERS HELP UPDATE_WHERE """ def help_update_using(self): print """ UPDATE: the USING clause UPDATE ... USING TIMESTAMP <timestamp>; UPDATE ... USING TTL <timeToLive>; The USING clause allows setting of certain query and data parameters. If multiple parameters need to be set, these may be joined using AND. Example: UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603 <timestamp> defines the optional timestamp for the new column value(s). It must be an integer. Cassandra timestamps are generally specified using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). <timeToLive> defines the optional time to live (TTL) in seconds for the new column value(s). It must be an integer. """ def help_insert(self): print """ INSERT INTO [<keyspace>.]<tablename> ( <colname1>, <colname2> [, <colname3> [, ...]] ) VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] ) [USING TIMESTAMP <timestamp>] [AND TTL <timeToLive>]; An INSERT is used to write one or more columns to a record in a CQL table. No results are returned. Values for all component columns in the table's primary key must be given. Also, there must be at least one non-primary-key column specified (Cassandra rows are not considered to exist with only a key and no associated columns). Unlike in SQL, the semantics of INSERT and UPDATE are identical. In either case a record is created if none existed before, and udpated when it does. For more information, see one of the following: HELP UPDATE HELP UPDATE_USING """ def help_select_expr(self): print """ SELECT: Specifying Columns SELECT name1, name2, name3 FROM ... SELECT COUNT(*) FROM ... The SELECT expression determines which columns will appear in the results and takes the form of a comma separated list of names. It is worth noting that unlike the projection in a SQL SELECT, there is no guarantee that the results will contain all of the columns specified. This is because Cassandra is schema-less and there are no guarantees that a given column exists. When the COUNT aggregate function is specified as a column to fetch, a single row will be returned, with a single column named "count" whose value is the number of rows from the pre-aggregation resultset. Currently, COUNT is the only function supported by CQL. """ def help_alter_drop(self): print """ ALTER TABLE: dropping a typed column ALTER TABLE addamsFamily DROP gender; An ALTER TABLE ... DROP statement removes the type of a column from the column family metadata. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won't return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can't be dropped from tables defined with COMPACT STORAGE. """ def help_create(self): super(CQL3HelpTopics, self).help_create() print " HELP CREATE_USER;\n" def help_alter(self): print """ ALTER TABLE <tablename> ALTER <columnname> TYPE <type>; ALTER TABLE <tablename> ADD <columnname> <type>; ALTER TABLE <tablename> RENAME <columnname> TO <columnname> [AND <columnname> TO <columnname>] ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]]; An ALTER statement is used to manipulate table metadata. It allows you to add new typed columns, drop existing columns, change the data storage type of existing columns, or change table properties. No results are returned. See one of the following for more information: HELP ALTER_ALTER; HELP ALTER_ADD; HELP ALTER_DROP; HELP ALTER_RENAME; HELP ALTER_WITH; """ def help_alter_rename(self): print """ ALTER TABLE: renaming a column ALTER TABLE <tablename> RENAME <columnname> TO <columnname> [AND <columnname> TO <columnname>] The ALTER TABLE ... RENAME variant renames a typed column in a column family. """ def help_drop(self): super(CQL3HelpTopics, self).help_drop() print " HELP DROP_USER;\n" def help_list(self): print """ There are different variants of LIST. For more information, see one of the following: HELP LIST_USERS; HELP LIST_PERMISSIONS; """ def help_create_user(self): print """ CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER]; CREATE USER creates a new Cassandra user account. Only superusers can issue CREATE USER requests. To create a superuser account use SUPERUSER option (NOSUPERUSER is the default). WITH PASSWORD clause should only be used with password-based authenticators, e.g. PasswordAuthenticator, SimpleAuthenticator. """ def help_alter_user(self): print """ ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER]; Use ALTER USER to change a user's superuser status and/or password (only with password-based authenticators). Superusers can change a user's password or superuser status (except their own). Users cannot change their own superuser status. Ordinary users can only change their password (if the configured authenticator is password-based). """ def help_drop_user(self): print """ DROP USER <username>; DROP USER removes an existing user. You have to be logged in as a superuser to issue a DROP USER statement. A user cannot drop themselves. """ def help_list_users(self): print """ LIST USERS; List existing users and their superuser status. """ def help_grant(self): print """ GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS]) ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table> TO <username> Grant the specified permission (or all permissions) on a resource to a user. To be able to grant a permission on some resource you have to have that permission yourself and also AUTHORIZE permission on it, or on one of its parent resources. See HELP PERMISSIONS for more info on the available permissions. """ def help_revoke(self): print """ REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS]) ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table> FROM <username> Revokes the specified permission (or all permissions) on a resource from a user. To be able to revoke a permission on some resource you have to have that permission yourself and also AUTHORIZE permission on it, or on one of its parent resources. See HELP PERMISSIONS for more info on the available permissions. """ def help_list_permissions(self): print """ LIST (<permission> [PERMISSION] | ALL [PERMISSIONS]) [ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table>] [OF <username>] [NORECURSIVE] Omitting ON <resource> part will list permissions on ALL KEYSPACES, every keyspace and table. Omitting OF <username> part will list permissions of all users. Omitting NORECURSIVE specifier will list permissions of the resource and all its parents (table, table's keyspace and ALL KEYSPACES). See HELP PERMISSIONS for more info on the available permissions. """ def help_permissions(self): print """ PERMISSIONS Cassandra has 6 permissions: ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX AUTHORIZE: required for GRANT, REVOKE CREATE: required for CREATE KEYSPACE, CREATE TABLE DROP: required for DROP KEYSPACE, DROP TABLE MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE SELECT: required for SELECT """
from __future__ import print_function import sys from random import randint from itertools import count from composes.utils import io_utils from composes.composition.weighted_additive import WeightedAdditive from composes.semantic_space.space import Space stacked_space = io_utils.load("gastrovec.ppmi.svd20.pkl") WA = WeightedAdditive(alpha = 1, beta = 1) recipes = {} max_size = 0 with open("../corpus_collection/composition_counts.txt") as f: for line in f: words = line.split() recipes[words[0]] = words[1:] if len(words)-1 > max_size: max_size = len(words)-1 WA = WeightedAdditive(alpha = 1, beta = 1) last_space = None number = count() for size in xrange(max_size,1,-1): relevant = (rec for rec in recipes if len(recipes[rec]) == size) print(size) composition = [] for recipe in relevant: old = recipes[recipe] if size == 2: name = recipe else: name = "comp_" + str(next(number)) if old[-2] in stacked_space.id2row: composition.append((old[-1],old[-2],name)) recipes[recipe].pop(-1) recipes[recipe].pop(-1) recipes[recipe].append(name) else: recipes[recipe].pop(-2) if composition: last_space = WA.compose(composition, stacked_space) if size != 2: stacked_space = Space.vstack(stacked_space, last_space) io_utils.save(last_space, "recicomp.pkl")
import React from 'react' import { Link } from 'react-router-dom' import AddHacker from './AddHacker'; const Hackers = ({ hackers, addHacker, deleteHacker }) => { const hackerList = hackers.map(hacker => { const id = hacker.url.slice(0, -1).split('/').pop(); return ( <div className="hacker card" key={hacker.url}> <div className="card-content"> <span className="card-title"> <Link to={'/' + id} hacker={hacker}>{ hacker.name }</Link> </span> <p>Skills: { hacker.skill }</p> <div>Age: { hacker.age }</div> <button onClick={() => deleteHacker(hacker.id)}>Delete</button> </div> </div> ) }); return ( <div className="post"> <div className="hacker-list"> { hackerList } </div> <AddHacker addHacker={addHacker}/> </div> ); } export default Hackers
/* * 2007-2013 PrestaShop * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@prestashop.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade PrestaShop to newer * versions in the future. If you wish to customize PrestaShop for your * needs please refer to http://www.prestashop.com for more information. * * @author PrestaShop SA <contact@prestashop.com> * @copyright 2007-2013 PrestaShop SA * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) * International Registered Trademark & Property of PrestaShop SA */ function addProductRuleGroup() { $('#product_rule_group_table').show(); product_rule_groups_counter += 1; product_rule_counters[product_rule_groups_counter] = 0; $.get( 'ajax-tab.php', {controller:'AdminCartRules',token:currentToken,newProductRuleGroup:1,product_rule_group_id:product_rule_groups_counter}, function(content) { if (content != "") $('#product_rule_group_table').append(content); } ); } function removeProductRuleGroup(id) { $('#product_rule_group_' + id + '_tr').remove(); } function addProductRule(product_rule_group_id) { product_rule_counters[product_rule_group_id] += 1; if ($('#product_rule_type_' + product_rule_group_id).val() != 0) $.get( 'ajax-tab.php', {controller:'AdminCartRules',token:currentToken,newProductRule:1,product_rule_type:$('#product_rule_type_' + product_rule_group_id).val(),product_rule_group_id:product_rule_group_id,product_rule_id:product_rule_counters[product_rule_group_id]}, function(content) { if (content != "") $('#product_rule_table_' + product_rule_group_id).append(content); } ); } function removeProductRule(product_rule_group_id, product_rule_id) { $('#product_rule_' + product_rule_group_id + '_' + product_rule_id + '_tr').remove(); } function toggleCartRuleFilter(id) { if ($(id).prop('checked')) $('#' + $(id).attr('id') + '_div').show(400); else $('#' + $(id).attr('id') + '_div').hide(200); } function removeCartRuleOption(item) { var id = $(item).attr('id').replace('_remove', ''); $('#' + id + '_2 option:selected').remove().appendTo('#' + id + '_1'); } function addCartRuleOption(item) { var id = $(item).attr('id').replace('_add', ''); $('#' + id + '_1 option:selected').remove().appendTo('#' + id + '_2'); } function updateProductRuleShortDescription(item) { /******* For IE: put a product in condition on cart rules *******/ if(typeof String.prototype.trim !== 'function') { String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, ''); } } var id1 = $(item).attr('id').replace('_add', '').replace('_remove', ''); var id2 = id1.replace('_select', ''); var length = $('#' + id1 + '_2 option').length; if (length == 1) $('#' + id2 + '_match').val($('#' + id1 + '_2 option').first().text().trim()); else $('#' + id2 + '_match').val(length); } var restrictions = new Array('country', 'carrier', 'group', 'cart_rule', 'shop'); for (i in restrictions) { toggleCartRuleFilter($('#' + restrictions[i] + '_restriction')); $('#' + restrictions[i] + '_restriction').click(function() {toggleCartRuleFilter(this);}); $('#' + restrictions[i] + '_select_remove').click(function() {removeCartRuleOption(this);}); $('#' + restrictions[i] + '_select_add').click(function() {addCartRuleOption(this);}); } toggleCartRuleFilter($('#product_restriction')); $('#product_restriction').click(function() {toggleCartRuleFilter(this);}); function toggleApplyDiscount(percent, amount, apply_to) { if (percent) { $('#apply_discount_percent_div').show(400); if ($('#apply_discount_to_product').prop('checked')) toggleApplyDiscountTo(); $('#apply_discount_to_cheapest').show(); $('*[for=apply_discount_to_cheapest]').show(); $('#apply_discount_to_selection').show(); $('*[for=apply_discount_to_selection]').show(); } else { $('#apply_discount_percent_div').hide(200); $('#reduction_percent').val('0'); } if (amount) { $('#apply_discount_amount_div').show(400); if ($('#apply_discount_to_product').prop('checked')) toggleApplyDiscountTo(); $('#apply_discount_to_cheapest').hide(); $('*[for=apply_discount_to_cheapest]').hide(); $('#apply_discount_to_cheapest').removeAttr('checked'); $('#apply_discount_to_selection').hide(); $('*[for=apply_discount_to_selection]').hide(); $('#apply_discount_to_selection').removeAttr('checked'); } else { $('#apply_discount_amount_div').hide(200); $('#reduction_amount').val('0'); } if (apply_to) $('#apply_discount_to_div').show(400); else { toggleApplyDiscountTo(); $('#apply_discount_to_div').hide(200); } } function toggleApplyDiscountTo() { if ($('#apply_discount_to_product').prop('checked')) $('#apply_discount_to_product_div').show(400); else { $('#apply_discount_to_product_div').hide(200); $('#reductionProductFilter').val(''); if ($('#apply_discount_to_order').prop('checked')) $('#reduction_product').val('0'); if ($('#apply_discount_to_cheapest').prop('checked')) $('#reduction_product').val('-1'); if ($('#apply_discount_to_selection').prop('checked')) $('#reduction_product').val('-2'); } } function toggleGiftProduct() { if ($('#free_gift_on').prop('checked')) $('#free_gift_div').show(400); else { $('#gift_product').val('0'); $('#giftProductFilter').val(''); $('#free_gift_div').hide(200); } } $('#apply_discount_percent').click(function() {toggleApplyDiscount(true, false, true);}); if ($('#apply_discount_percent').prop('checked')) toggleApplyDiscount(true, false, true); $('#apply_discount_amount').click(function() {toggleApplyDiscount(false, true, true);}); if ($('#apply_discount_amount').prop('checked')) toggleApplyDiscount(false, true, true); $('#apply_discount_off').click(function() {toggleApplyDiscount(false, false, false);}); if ($('#apply_discount_off').prop('checked')) toggleApplyDiscount(false, false, false); $('#apply_discount_to_order').click(function() {toggleApplyDiscountTo();}); if ($('#apply_discount_to_order').prop('checked')) toggleApplyDiscountTo(); $('#apply_discount_to_product').click(function() {toggleApplyDiscountTo();}); if ($('#apply_discount_to_product').prop('checked')) toggleApplyDiscountTo(); $('#apply_discount_to_cheapest').click(function() {toggleApplyDiscountTo();}); if ($('#apply_discount_to_cheapest').prop('checked')) toggleApplyDiscountTo(); $('#apply_discount_to_selection').click(function() {toggleApplyDiscountTo();}); if ($('#apply_discount_to_selection').prop('checked')) toggleApplyDiscountTo(); $('#free_gift_on').click(function() {toggleGiftProduct();}); $('#free_gift_off').click(function() {toggleGiftProduct();}); toggleGiftProduct(); // Main form submit $('#cart_rule_form').submit(function() { if ($('#customerFilter').val() == '') $('#id_customer').val('0'); for (i in restrictions) { if ($('#' + restrictions[i] + '_select_1 option').length == 0) $('#' + restrictions[i] + '_restriction').removeAttr('checked'); else { $('#' + restrictions[i] + '_select_2 option').each(function(i) { $(this).attr('selected', true); }); } } $('.product_rule_toselect option').each(function(i) { $(this).attr('selected', true); }); }); $('#reductionProductFilter') .autocomplete( 'ajax-tab.php', { minChars: 2, max: 50, width: 500, selectFirst: false, scroll: false, dataType: 'json', formatItem: function(data, i, max, value, term) { return value; }, parse: function(data) { var mytab = new Array(); for (var i = 0; i < data.length; i++) mytab[mytab.length] = { data: data[i], value: (data[i].reference + ' ' + data[i].name).trim() }; return mytab; }, extraParams: { controller: 'AdminCartRules', token: currentToken, reductionProductFilter: 1 } } ) .result(function(event, data, formatted) { $('#reduction_product').val(data.id_product); $('#reductionProductFilter').val((data.reference + ' ' + data.name).trim()); }); $('#customerFilter') .autocomplete( 'ajax-tab.php', { minChars: 2, max: 50, width: 500, selectFirst: false, scroll: false, dataType: 'json', formatItem: function(data, i, max, value, term) { return value; }, parse: function(data) { var mytab = new Array(); for (var i = 0; i < data.length; i++) mytab[mytab.length] = { data: data[i], value: data[i].cname + ' (' + data[i].email + ')' }; return mytab; }, extraParams: { controller: 'AdminCartRules', token: currentToken, customerFilter: 1 } } ) .result(function(event, data, formatted) { $('#id_customer').val(data.id_customer); $('#customerFilter').val(data.cname + ' (' + data.email + ')'); }); function displayCartRuleTab(tab) { $('.cart_rule_tab').hide(); $('.tab-page').removeClass('selected'); $('#cart_rule_' + tab).show(); $('#cart_rule_link_' + tab).addClass('selected'); $('#currentFormTab').val(tab); } $('.cart_rule_tab').hide(); $('.tab-page').removeClass('selected'); $('#cart_rule_' + currentFormTab).show(); $('#cart_rule_link_' + currentFormTab).addClass('selected'); var date = new Date(); var hours = date.getHours(); if (hours < 10) hours = "0" + hours; var mins = date.getMinutes(); if (mins < 10) mins = "0" + mins; var secs = date.getSeconds(); if (secs < 10) secs = "0" + secs; $('.datepicker').datepicker({ prevText: '', nextText: '', dateFormat: 'yy-mm-dd ' + hours + ':' + mins + ':' + secs }); $('#giftProductFilter').typeWatch({ captureLength: 2, highlight: false, wait: 100, callback: function(){ searchProducts(); } }); var gift_product_search = $('#giftProductFilter').val(); function searchProducts() { if ($('#giftProductFilter').val() == gift_product_search) return; gift_product_search = $('#giftProductFilter').val(); $.ajax({ type: 'POST', headers: { "cache-control": "no-cache" }, url: 'ajax-tab.php' + '?rand=' + new Date().getTime(), async: true, dataType: 'json', data: { controller: 'AdminCartRules', token: currentToken, action: 'searchProducts', product_search: $('#giftProductFilter').val() }, success : function(res) { var products_found = ''; var attributes_html = ''; stock = {}; if (res.found) { $('#gift_products_err').hide(); $('#gift_products_found').show(); $.each(res.products, function() { products_found += '<option value="' + this.id_product + '">' + this.name + (this.combinations.length == 0 ? ' - ' + this.formatted_price : '') + '</option>'; attributes_html += '<select class="id_product_attribute" id="ipa_' + this.id_product + '" name="ipa_' + this.id_product + '" style="display:none">'; $.each(this.combinations, function() { attributes_html += '<option ' + (this.default_on == 1 ? 'selected="selected"' : '') + ' value="' + this.id_product_attribute + '">' + this.attributes + ' - ' + this.formatted_price + '</option>'; }); attributes_html += '</select>'; }); $('#gift_product_list #gift_product').html(products_found); $('#gift_attributes_list #gift_attributes_list_select').html(attributes_html); displayProductAttributes(); } else { $('#products_found').hide(); $('#products_err').html(res.notfound); $('#products_err').show(); } } }); } function displayProductAttributes() { if ($('#ipa_' + $('#gift_product option:selected').val() + ' option').length === 0) $('#gift_attributes_list').hide(); else { $('#gift_attributes_list').show(); $('.id_product_attribute').hide(); $('#ipa_' + $('#gift_product option:selected').val()).show(); } }
//+------------------------------------------------------------------------- // // Microsoft Windows // // Copyright (C) Microsoft Corporation, 1998 - 1999 // // File: dxglob7obj.h // //-------------------------------------------------------------------------- #include "resource.h" // main symbols typedef HRESULT (__stdcall *DDRAWCREATE)( GUID FAR *lpGUID, LPDIRECTDRAW FAR *lplpDD, IUnknown FAR *pUnkOuter ); typedef HRESULT (__stdcall *DDCREATECLIPPER)( DWORD dwFlags, LPDIRECTDRAWCLIPPER FAR *lplpDDClipper, IUnknown FAR *pUnkOuter ); typedef HRESULT (__stdcall *DDENUMERATE)(LPDDENUMCALLBACK, LPVOID); typedef HRESULT (__stdcall *DDENUMERATEEX)(LPDDENUMCALLBACKEX, LPVOID, DWORD); typedef HRESULT (__stdcall *DIRECT3DRMCREATE)(LPDIRECT3DRM *lpCreate); typedef HRESULT (__stdcall *DSOUNDCREATE)(GUID FAR * lpGUID, LPDIRECTSOUND * ppDS, IUnknown FAR *pUnkOuter ); typedef HRESULT (__stdcall *DSOUNDCAPTURECREATE)(GUID FAR * lpGUID, LPDIRECTSOUNDCAPTURE * ppDS, IUnknown FAR *pUnkOuter ); typedef HRESULT (CALLBACK *DSOUNDENUMERATE)(LPDSENUMCALLBACK lpCallback, LPVOID lpContext ); typedef HRESULT (CALLBACK *DSOUNDCAPTUREENUMERATE)(LPDSENUMCALLBACK lpCallback, LPVOID lpContext ); typedef HRESULT (__stdcall *DIRECTPLAYCREATE)( LPGUID lpGUID, LPDIRECTPLAY *lplpDP, IUnknown *pUnk); typedef HRESULT (__stdcall *DIRECTPLAYENUMERATE)( LPDPENUMDPCALLBACK, LPVOID ); typedef HRESULT (__stdcall *DIRECTPLAYLOBBYCREATE)(LPGUID, LPDIRECTPLAYLOBBY *, IUnknown *, LPVOID, DWORD ); typedef HRESULT (__stdcall *DDRAWCREATEEX)( GUID FAR * rGuid, LPVOID *lplpDD, REFIID iid,IUnknown FAR *pUnkOuter ); typedef struct tag_EVENTTHREADINFO { HANDLE hEvent; struct tag_EVENTTHREADINFO *pNext; IStream *pStream; I_dxj_DirectXEvent *pCallback; DWORD threadID; HANDLE hThread; BOOL fEnd; HANDLE hEndEvent; } EVENTTHREADINFO; class C_dxj_DirectX7Object : public I_dxj_DirectX7, public CComCoClass<C_dxj_DirectX7Object, &CLSID__dxj_DirectX7>, public CComObjectRoot { public: C_dxj_DirectX7Object() ; virtual ~C_dxj_DirectX7Object() ; BEGIN_COM_MAP(C_dxj_DirectX7Object) COM_INTERFACE_ENTRY(I_dxj_DirectX7) END_COM_MAP() DECLARE_REGISTRY(CLSID__dxj_DirectX7, "DIRECT.DirectX6.0", "DIRECT.DirectX6.0", IDS_DIRECTX6_DESC, THREADFLAGS_BOTH) // Use DECLARE_NOT_AGGREGATABLE(C_dxj_DirectSoundResourceObject) if you don't want your object // to support aggregation DECLARE_AGGREGATABLE(C_dxj_DirectX7Object) public: HRESULT STDMETHODCALLTYPE direct3dRMCreate( /* [retval][out] */ I_dxj_Direct3dRM3 __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directDrawCreate( /* [in] */ BSTR guid, /* [retval][out] */ I_dxj_DirectDraw7 __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE getDDEnum( /* [retval][out] */ I_dxj_DirectDrawEnum __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directSoundCreate( /* [in] */ BSTR guid, /* [retval][out] */ I_dxj_DirectSound __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directSoundCaptureCreate( /* [in] */ BSTR guid, /* [retval][out] */ I_dxj_DirectSoundCapture __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE getDSEnum( /* [retval][out] */ I_dxj_DSEnum __RPC_FAR *__RPC_FAR *retVal); HRESULT STDMETHODCALLTYPE getDSCaptureEnum( /* [retval][out] */ I_dxj_DSEnum __RPC_FAR *__RPC_FAR *retVal); HRESULT STDMETHODCALLTYPE directInputCreate( /* [retval][out] */ I_dxj_DirectInput __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directPlayCreate( /* [in] */ BSTR guid, /* [retval][out] */ I_dxj_DirectPlay4 __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directPlayLobbyCreate( /* [retval][out] */ I_dxj_DirectPlayLobby3 __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE getDPEnum( /* [retval][out] */ I_dxj_DPEnumServiceProviders __RPC_FAR *__RPC_FAR *retval); HRESULT STDMETHODCALLTYPE colorGetAlpha( /* [in] */ long color, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE colorGetBlue( /* [in] */ long color, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE colorGetGreen( /* [in] */ long color, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE colorGetRed( /* [in] */ long color, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE createColorRGB( /* [in] */ float r, /* [in] */ float g, /* [in] */ float b, /* [retval][out] */ long __RPC_FAR *color); HRESULT STDMETHODCALLTYPE createColorRGBA( /* [in] */ float r, /* [in] */ float g, /* [in] */ float b, /* [in] */ float a, /* [retval][out] */ long __RPC_FAR *color); HRESULT STDMETHODCALLTYPE matrixFromQuaternion( /* [out] */ D3dMatrix __RPC_FAR *matrix, /* [in] */ D3dRMQuaternion __RPC_FAR *quat); HRESULT STDMETHODCALLTYPE quaternionRotation( /* [out] */ D3dRMQuaternion __RPC_FAR *quat, /* [in] */ D3dVector __RPC_FAR *axis, /* [in] */ float theta); HRESULT STDMETHODCALLTYPE quaternionMultiply( /* [out] */ D3dRMQuaternion __RPC_FAR *quat, /* [in] */ D3dRMQuaternion __RPC_FAR *quatA, /* [in] */ D3dRMQuaternion __RPC_FAR *quatB); HRESULT STDMETHODCALLTYPE quaternionSlerp( /* [out] */ D3dRMQuaternion __RPC_FAR *quat, /* [in] */ D3dRMQuaternion __RPC_FAR *quatA, /* [in] */ D3dRMQuaternion __RPC_FAR *quatB, /* [in] */ float alpha); HRESULT STDMETHODCALLTYPE vectorAdd( /* [out] */ D3dVector __RPC_FAR *v, /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ D3dVector __RPC_FAR *vB); HRESULT STDMETHODCALLTYPE vectorCrossProduct( /* [out] */ D3dVector __RPC_FAR *v, /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ D3dVector __RPC_FAR *vB); HRESULT STDMETHODCALLTYPE vectorDotProduct( /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ D3dVector __RPC_FAR *vB, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE vectorModulus( /* [in] */ D3dVector __RPC_FAR *vA, /* [retval][out] */ float __RPC_FAR *ret); HRESULT STDMETHODCALLTYPE vectorNormalize( /* [out][in] */ D3dVector __RPC_FAR *v); HRESULT STDMETHODCALLTYPE vectorRandom( /* [out][in] */ D3dVector __RPC_FAR *v); HRESULT STDMETHODCALLTYPE vectorReflect( /* [out] */ D3dVector __RPC_FAR *vDest, /* [in] */ D3dVector __RPC_FAR *vRay, /* [in] */ D3dVector __RPC_FAR *vNormal); HRESULT STDMETHODCALLTYPE vectorRotate( /* [out] */ D3dVector __RPC_FAR *vDest, /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ D3dVector __RPC_FAR *vAxis, /* [in] */ float theta); HRESULT STDMETHODCALLTYPE vectorScale( /* [out] */ D3dVector __RPC_FAR *vDest, /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ float factor); HRESULT STDMETHODCALLTYPE vectorSubtract( /* [out] */ D3dVector __RPC_FAR *v, /* [in] */ D3dVector __RPC_FAR *vA, /* [in] */ D3dVector __RPC_FAR *vB); HRESULT STDMETHODCALLTYPE vectorCopy( /* [out][in] */ D3dVector __RPC_FAR *vDest, /* [in] */ D3dVector __RPC_FAR *vSrc); HRESULT STDMETHODCALLTYPE rotateXMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ float radians); HRESULT STDMETHODCALLTYPE rotateYMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ float radians); HRESULT STDMETHODCALLTYPE rotateZMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ float radians); HRESULT STDMETHODCALLTYPE viewMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ D3dVector __RPC_FAR *vFrom, /* [in] */ D3dVector __RPC_FAR *vTo, /* [in] */ D3dVector __RPC_FAR *vUp, /* [in] */ float roll); HRESULT STDMETHODCALLTYPE matrixMultiply( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ D3dMatrix __RPC_FAR *mA, /* [in] */ D3dMatrix __RPC_FAR *mB); HRESULT STDMETHODCALLTYPE projectionMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ float nearPlane, /* [in] */ float farplane, /* [in] */ float fov); HRESULT STDMETHODCALLTYPE copyMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest, /* [in] */ D3dMatrix __RPC_FAR *mSrc); HRESULT STDMETHODCALLTYPE identityMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest); HRESULT STDMETHODCALLTYPE zeroMatrix( /* [out][in] */ D3dMatrix __RPC_FAR *mDest); HRESULT STDMETHODCALLTYPE tickCount( /* [retval][out] */ long __RPC_FAR *retv); HRESULT STDMETHODCALLTYPE systemBpp( /* [retval][out] */ long __RPC_FAR *retv); HRESULT STDMETHODCALLTYPE directMusicLoaderCreate( /* [retval][out] */ I_dxj_DirectMusicLoader __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directMusicComposerCreate( /* [retval][out] */ I_dxj_DirectMusicComposer __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE directMusicPerformanceCreate( /* [retval][out] */ I_dxj_DirectMusicPerformance __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE getWindowRect( /* [in] */ long hwnd, /* [out][in] */ Rect __RPC_FAR *r) ; HRESULT STDMETHODCALLTYPE createEvent( /* [in] */ I_dxj_DirectXEvent __RPC_FAR *event, /* [retval][out] */ long __RPC_FAR *h) ; HRESULT STDMETHODCALLTYPE setEvent( /* [in] */ long eventId) ; HRESULT STDMETHODCALLTYPE destroyEvent( /* [in] */ long eventId) ; HRESULT STDMETHODCALLTYPE createD3DVertex(float x, float y, float z, float nx, float ny, float nz, float tu, float tv, D3dVertex *v); HRESULT STDMETHODCALLTYPE createD3DLVertex(float x, float y, float z, long color, long specular, float tu, float tv, D3dLVertex *v); HRESULT STDMETHODCALLTYPE createD3DTLVertex(float sx, float sy, float sz, float rhw, long color, long specular, float tu, float tv, D3dTLVertex *v); HRESULT STDMETHODCALLTYPE directDraw4Create( /* [in] */ BSTR guid, /* [retval][out] */ I_dxj_DirectDraw4 __RPC_FAR *__RPC_FAR *ret); HRESULT STDMETHODCALLTYPE createNewGuid(BSTR *ret); void LoadDDRAW(); void LoadDPLAY(); void LoadDSOUND(); void LoadDINPUT(); void LoadD3DRM(); private: DDRAWCREATE m_pDirectDrawCreate; DDRAWCREATEEX m_pDirectDrawCreateEx; DDCREATECLIPPER m_pDirectDrawCreateClipper; DSOUNDCREATE m_pDirectSoundCreate; DSOUNDCAPTURECREATE m_pDirectSoundCaptureCreate; DSOUNDENUMERATE m_pDirectSoundEnumerate; DSOUNDCAPTUREENUMERATE m_pDirectSoundCaptureEnumerate; DIRECTPLAYCREATE m_pDirectPlayCreate; DIRECTPLAYENUMERATE m_pDirectPlayEnumerate; DIRECTPLAYLOBBYCREATE m_pDirectPlayLobbyCreate; DIRECT3DRMCREATE m_pDirect3DRMCreate; DDENUMERATE m_pDirectDrawEnumerate; DDENUMERATEEX m_pDirectDrawEnumerateEx; EVENTTHREADINFO *m_pEventList; };
/* eslint-disable no-unused-vars */ import path from "path"; const config = { all: { root: path.join(__dirname, ".."), port: process.env.PORT || 9000, ip: process.env.IP || "0.0.0.0", apiRoot: process.env.API_ROOT || "", mongo: { uri: process.env.MONGODB_URI || "mongodb://localhost/hackathon", options: { db: { safe: true }, debug: true } } } }; module.exports = Object.assign(config.all, config[config.all.env]); export default module.exports;
#! /usr/bin/env python # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import logging import re import unicodedata from abc import abstractmethod from collections import Counter import numpy as np from ludwig.data.dataframe.pandas import PANDAS from ludwig.utils.math_utils import int_type from ludwig.utils.misc_utils import get_from_registry from ludwig.utils.nlp_utils import load_nlp_pipeline, process_text UNKNOWN_SYMBOL = '<UNK>' PADDING_SYMBOL = '<PAD>' PADDING_IDX = 0 SPLIT_REGEX = re.compile(r'\s+') SPACE_PUNCTUATION_REGEX = re.compile(r'\w+|[^\w\s]') COMMA_REGEX = re.compile(r'\s*,\s*') UNDERSCORE_REGEX = re.compile(r'\s*_\s*') BOOL_TRUE_STRS = {'yes', 'y', 'true', 't', '1'} def make_safe_filename(s): def safe_char(c): if c.isalnum(): return c else: return '_' return ''.join(safe_char(c) for c in s).rstrip('_') def strip_accents(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def str2bool(v): return str(v).lower() in BOOL_TRUE_STRS def match_replace(string_to_match, list_regex): """Matches strings against regular expressions. arguments: string_to_match -- the string to match returns: string_to_match -- the cleaned string matched -- the list of regular expressions that matched """ matched = [] for regex in list_regex: match = re.search(regex[0], string_to_match) if match: string_to_match = re.sub(regex[0], regex[1], string_to_match) matched.append(regex[0].pattern) return string_to_match, matched def load_vocabulary(vocab_file): with open(vocab_file, 'r', encoding='utf-8') as f: vocabulary = [] for line in f: line = line.strip() if ' ' in line: line = line.split(' ')[0] vocabulary.append(line) return vocabulary # return [line.strip() for line in f] def create_vocabulary( data, tokenizer_type='space', add_unknown=True, add_padding=True, lowercase=True, num_most_frequent=None, vocab_file=None, unknown_symbol=UNKNOWN_SYMBOL, padding_symbol=PADDING_SYMBOL, pretrained_model_name_or_path=None, processor=PANDAS, ): vocab = None tokenizer = get_from_registry( tokenizer_type, tokenizer_registry )( vocab_file=vocab_file, pretrained_model_name_or_path=pretrained_model_name_or_path, ) if tokenizer_type == 'hf_tokenizer': try: vocab = tokenizer.tokenizer.get_vocab() vocab = list(vocab.keys()) except NotImplementedError: vocab = [] for idx in range(tokenizer.tokenizer.vocab_size): vocab.append(tokenizer.tokenizer._convert_id_to_token(idx)) vocab += tokenizer.tokenizer.added_tokens_encoder.keys() pad_token = tokenizer.tokenizer.pad_token unk_token = tokenizer.tokenizer.unk_token if pad_token is None: vocab = vocab + [padding_symbol] else: padding_symbol = pad_token if unk_token is None: vocab = vocab + [unknown_symbol] else: unknown_symbol = unk_token elif vocab_file is not None: vocab = load_vocabulary(vocab_file) processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line)) processed_counts = processed_lines.explode().value_counts(sort=False) processed_counts = processor.compute(processed_counts) unit_counts = Counter(dict(processed_counts)) max_line_length = processor.compute(processed_lines.map(len).max()) if vocab is None: vocab = [unit for unit, count in unit_counts.most_common(num_most_frequent)] vocab_set = set(vocab) if add_unknown and tokenizer_type != 'hf_tokenizer': if unknown_symbol in vocab_set: vocab.remove(unknown_symbol) vocab = [unknown_symbol] + vocab if add_padding and tokenizer_type != 'hf_tokenizer': if padding_symbol in vocab_set: vocab.remove(padding_symbol) vocab = [padding_symbol] + vocab str2idx = {unit: i for i, unit in enumerate(vocab)} str2freq = {unit: unit_counts.get(unit) if unit in unit_counts else 0 for unit in vocab} pad_idx = None if padding_symbol in str2idx.keys(): pad_idx = str2idx[padding_symbol] return vocab, str2idx, str2freq, max_line_length, pad_idx, padding_symbol, unknown_symbol def get_sequence_vector(sequence, tokenizer_type, unit_to_id, lowercase=True): tokenizer = get_from_registry(tokenizer_type, tokenizer_registry)() format_dtype = int_type(len(unit_to_id) - 1) return _get_sequence_vector( sequence, tokenizer, tokenizer_type, format_dtype, unit_to_id, lowercase=lowercase ) def _get_sequence_vector( sequence, tokenizer, tokenizer_type, format_dtype, unit_to_id, lowercase=True, unknown_symbol=UNKNOWN_SYMBOL ): unit_sequence = tokenizer( sequence.lower() if lowercase else sequence ) unit_indices_vector = np.empty(len(unit_sequence), dtype=format_dtype) for i in range(len(unit_sequence)): curr_unit = unit_sequence[i] if tokenizer_type == 'hf_tokenizer': unit_indices_vector[i] = curr_unit else: if curr_unit in unit_to_id: unit_indices_vector[i] = unit_to_id[curr_unit] else: unit_indices_vector[i] = unit_to_id[unknown_symbol] return unit_indices_vector def build_sequence_matrix( sequences, inverse_vocabulary, tokenizer_type, length_limit, padding_symbol, padding='right', unknown_symbol=UNKNOWN_SYMBOL, lowercase=True, tokenizer_vocab_file=None, pretrained_model_name_or_path=None, processor=PANDAS, ): tokenizer = get_from_registry(tokenizer_type, tokenizer_registry)( vocab_file=tokenizer_vocab_file, pretrained_model_name_or_path=pretrained_model_name_or_path, ) format_dtype = int_type(len(inverse_vocabulary) - 1) unit_vectors = sequences.map(lambda sequence: _get_sequence_vector( sequence, tokenizer, tokenizer_type, format_dtype, inverse_vocabulary, lowercase=lowercase, unknown_symbol=unknown_symbol )) max_length = processor.compute(unit_vectors.map(len).max()) if max_length < length_limit: logging.debug('max length of {0}: {1} < limit: {2}'.format( format, max_length, length_limit )) max_length = length_limit def pad(vector): sequence = np.full((max_length,), inverse_vocabulary[padding_symbol], dtype=format_dtype) limit = min(vector.shape[0], max_length) if padding == 'right': sequence[:limit] = vector[:limit] else: # if padding == 'left sequence[max_length - limit:] = vector[:limit] return sequence padded = processor.map_objects(unit_vectors, pad) return padded class BaseTokenizer: @abstractmethod def __init__(self, **kwargs): pass @abstractmethod def __call__(self, text): pass class CharactersToListTokenizer(BaseTokenizer): def __call__(self, text): return text class SpaceStringToListTokenizer(BaseTokenizer): def __call__(self, text): return SPLIT_REGEX.split(text.strip()) class SpacePunctuationStringToListTokenizer(BaseTokenizer): def __call__(self, text): return SPACE_PUNCTUATION_REGEX.findall(text.strip()) class UnderscoreStringToListTokenizer(BaseTokenizer): def __call__(self, text): return UNDERSCORE_REGEX.split(text.strip()) class CommaStringToListTokenizer(BaseTokenizer): def __call__(self, text): return COMMA_REGEX.split(text.strip()) class UntokenizedStringToListTokenizer(BaseTokenizer): def __call__(self, text): return [text] class StrippedStringToListTokenizer(BaseTokenizer): def __call__(self, text): return [text.strip()] class EnglishTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('en')) class EnglishFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('en'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class EnglishRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('en'), filter_stopwords=True ) class EnglishLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): process_text(text, load_nlp_pipeline('en'), return_lemma=True) class EnglishLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('en'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class EnglishLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('en'), return_lemma=True, filter_stopwords=True ) class ItalianTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('it')) class ItalianFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('it'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class ItalianRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('it'), filter_stopwords=True ) class ItalianLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('it'), return_lemma=True ) class ItalianLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('it'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class ItalianLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('it'), return_lemma=True, filter_stopwords=True ) class SpanishTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('es')) class SpanishFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('es'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class SpanishRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('es'), filter_stopwords=True ) class SpanishLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('es'), return_lemma=True ) class SpanishLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('es'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class SpanishLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('es'), return_lemma=True, filter_stopwords=True ) class GermanTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('de')) class GermanFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('de'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class GermanRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('de'), filter_stopwords=True ) class GermanLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('de'), return_lemma=True ) class GermanLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('de'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class GermanLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('de'), return_lemma=True, filter_stopwords=True ) class FrenchTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('fr')) class FrenchFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('fr'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class FrenchRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('fr'), filter_stopwords=True ) class FrenchLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('fr'), return_lemma=True ) class FrenchLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('fr'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class FrenchLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('fr'), return_lemma=True, filter_stopwords=True ) class PortugueseTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('pt')) class PortugueseFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pt'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class PortugueseRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pt'), filter_stopwords=True ) class PortugueseLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('pt'), return_lemma=True) class PortugueseLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pt'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class PortugueseLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pt'), return_lemma=True, filter_stopwords=True ) class DutchTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('nl')) class DutchFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nl'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class DutchRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nl'), filter_stopwords=True ) class DutchLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('nl'), return_lemma=True) class DutchLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nl'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class DutchLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nl'), return_lemma=True, filter_stopwords=True ) class GreekTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('el')) class GreekFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('el'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class GreekRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('el'), filter_stopwords=True ) class GreekLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('el'), return_lemma=True) class GreekLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('el'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class GreekLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('el'), return_lemma=True, filter_stopwords=True ) class NorwegianTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('nb')) class NorwegianFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nb'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class NorwegianRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nb'), filter_stopwords=True ) class NorwegianLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('nb'), return_lemma=True) class NorwegianLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nb'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class NorwegianLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('nb'), return_lemma=True, filter_stopwords=True ) class LithuanianTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('lt')) class LithuanianFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('lt'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class LithuanianRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('lt'), filter_stopwords=True ) class LithuanianLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('lt'), return_lemma=True) class LithuanianLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('lt'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class LithuanianLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('lt'), return_lemma=True, filter_stopwords=True ) class DanishTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('da')) class DanishFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('da'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class DanishRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('da'), filter_stopwords=True ) class DanishLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('da'), return_lemma=True) class DanishLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('da'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class DanishLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('da'), return_lemma=True, filter_stopwords=True ) class PolishTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('pl')) class PolishFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pl'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class PolishRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pl'), filter_stopwords=True ) class PolishLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('pl'), return_lemma=True) class PolishLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pl'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class PolishLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('pl'), return_lemma=True, filter_stopwords=True ) class RomanianTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('ro')) class RomanianFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('ro'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class RomanianRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('ro'), filter_stopwords=True ) class RomanianLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('ro'), return_lemma=True) class RomanianLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('ro'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class RomanianLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('ro'), return_lemma=True, filter_stopwords=True ) class JapaneseTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('jp')) class JapaneseFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('jp'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class JapaneseRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('jp'), filter_stopwords=True ) class JapaneseLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('jp'), return_lemma=True) class JapaneseLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('jp'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class JapaneseLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('jp'), return_lemma=True, filter_stopwords=True ) class ChineseTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('zh')) class ChineseFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('zh'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class ChineseRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('zh'), filter_stopwords=True ) class ChineseLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('zh'), return_lemma=True) class ChineseLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('zh'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class ChineseLemmatizeRemoveStopwordsFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('zh'), return_lemma=True, filter_stopwords=True ) class MultiTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('xx')) class MultiFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('xx'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class MultiRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('xx'), filter_stopwords=True ) class MultiLemmatizeTokenizer(BaseTokenizer): def __call__(self, text): return process_text(text, load_nlp_pipeline('xx'), return_lemma=True) class MultiLemmatizeFilterTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('xx'), return_lemma=True, filter_numbers=True, filter_punctuation=True, filter_short_tokens=True ) class MultiLemmatizeRemoveStopwordsTokenizer(BaseTokenizer): def __call__(self, text): return process_text( text, load_nlp_pipeline('xx'), return_lemma=True, filter_stopwords=True ) class HFTokenizer(BaseTokenizer): def __init__(self, pretrained_model_name_or_path, **kwargs ): super().__init__() from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, ) def __call__(self, text): return self.tokenizer.encode(text, truncation=True) tokenizer_registry = { 'characters': CharactersToListTokenizer, 'space': SpaceStringToListTokenizer, 'space_punct': SpacePunctuationStringToListTokenizer, 'underscore': UnderscoreStringToListTokenizer, 'comma': CommaStringToListTokenizer, 'untokenized': UntokenizedStringToListTokenizer, 'stripped': StrippedStringToListTokenizer, 'english_tokenize': EnglishTokenizer, 'english_tokenize_filter': EnglishFilterTokenizer, 'english_tokenize_remove_stopwords': EnglishRemoveStopwordsTokenizer, 'english_lemmatize': EnglishLemmatizeTokenizer, 'english_lemmatize_filter': EnglishLemmatizeFilterTokenizer, 'english_lemmatize_remove_stopwords': EnglishLemmatizeRemoveStopwordsTokenizer, 'italian_tokenize': ItalianTokenizer, 'italian_tokenize_filter': ItalianFilterTokenizer, 'italian_tokenize_remove_stopwords': ItalianRemoveStopwordsTokenizer, 'italian_lemmatize': ItalianLemmatizeTokenizer, 'italian_lemmatize_filter': ItalianLemmatizeFilterTokenizer, 'italian_lemmatize_remove_stopwords': ItalianLemmatizeRemoveStopwordsTokenizer, 'spanish_tokenize': SpanishTokenizer, 'spanish_tokenize_filter': SpanishFilterTokenizer, 'spanish_tokenize_remove_stopwords': SpanishRemoveStopwordsTokenizer, 'spanish_lemmatize': SpanishLemmatizeTokenizer, 'spanish_lemmatize_filter': SpanishLemmatizeFilterTokenizer, 'spanish_lemmatize_remove_stopwords': SpanishLemmatizeRemoveStopwordsTokenizer, 'german_tokenize': GermanTokenizer, 'german_tokenize_filter': GermanFilterTokenizer, 'german_tokenize_remove_stopwords': GermanRemoveStopwordsTokenizer, 'german_lemmatize': GermanLemmatizeTokenizer, 'german_lemmatize_filter': GermanLemmatizeFilterTokenizer, 'german_lemmatize_remove_stopwords': GermanLemmatizeRemoveStopwordsTokenizer, 'french_tokenize': FrenchTokenizer, 'french_tokenize_filter': FrenchFilterTokenizer, 'french_tokenize_remove_stopwords': FrenchRemoveStopwordsTokenizer, 'french_lemmatize': FrenchLemmatizeTokenizer, 'french_lemmatize_filter': FrenchLemmatizeFilterTokenizer, 'french_lemmatize_remove_stopwords': FrenchLemmatizeRemoveStopwordsTokenizer, 'portuguese_tokenize': PortugueseTokenizer, 'portuguese_tokenize_filter': PortugueseFilterTokenizer, 'portuguese_tokenize_remove_stopwords': PortugueseRemoveStopwordsTokenizer, 'portuguese_lemmatize': PortugueseLemmatizeTokenizer, 'portuguese_lemmatize_filter': PortugueseLemmatizeFilterTokenizer, 'portuguese_lemmatize_remove_stopwords': PortugueseLemmatizeRemoveStopwordsTokenizer, 'dutch_tokenize': DutchTokenizer, 'dutch_tokenize_filter': DutchFilterTokenizer, 'dutch_tokenize_remove_stopwords': DutchRemoveStopwordsTokenizer, 'dutch_lemmatize': DutchLemmatizeTokenizer, 'dutch_lemmatize_filter': DutchLemmatizeFilterTokenizer, 'dutch_lemmatize_remove_stopwords': DutchLemmatizeRemoveStopwordsTokenizer, 'greek_tokenize': GreekTokenizer, 'greek_tokenize_filter': GreekFilterTokenizer, 'greek_tokenize_remove_stopwords': GreekRemoveStopwordsTokenizer, 'greek_lemmatize': GreekLemmatizeTokenizer, 'greek_lemmatize_filter': GreekLemmatizeFilterTokenizer, 'greek_lemmatize_remove_stopwords': GreekLemmatizeRemoveStopwordsFilterTokenizer, 'norwegian_tokenize': NorwegianTokenizer, 'norwegian_tokenize_filter': NorwegianFilterTokenizer, 'norwegian_tokenize_remove_stopwords': NorwegianRemoveStopwordsTokenizer, 'norwegian_lemmatize': NorwegianLemmatizeTokenizer, 'norwegian_lemmatize_filter': NorwegianLemmatizeFilterTokenizer, 'norwegian_lemmatize_remove_stopwords': NorwegianLemmatizeRemoveStopwordsFilterTokenizer, 'lithuanian_tokenize': LithuanianTokenizer, 'lithuanian_tokenize_filter': LithuanianFilterTokenizer, 'lithuanian_tokenize_remove_stopwords': LithuanianRemoveStopwordsTokenizer, 'lithuanian_lemmatize': LithuanianLemmatizeTokenizer, 'lithuanian_lemmatize_filter': LithuanianLemmatizeFilterTokenizer, 'lithuanian_lemmatize_remove_stopwords': LithuanianLemmatizeRemoveStopwordsFilterTokenizer, 'danish_tokenize': DanishTokenizer, 'danish_tokenize_filter': DanishFilterTokenizer, 'danish_tokenize_remove_stopwords': DanishRemoveStopwordsTokenizer, 'danish_lemmatize': DanishLemmatizeTokenizer, 'danish_lemmatize_filter': DanishLemmatizeFilterTokenizer, 'danish_lemmatize_remove_stopwords': DanishLemmatizeRemoveStopwordsFilterTokenizer, 'polish_tokenize': PolishTokenizer, 'polish_tokenize_filter': PolishFilterTokenizer, 'polish_tokenize_remove_stopwords': PolishRemoveStopwordsTokenizer, 'polish_lemmatize': PolishLemmatizeTokenizer, 'polish_lemmatize_filter': PolishLemmatizeFilterTokenizer, 'polish_lemmatize_remove_stopwords': PolishLemmatizeRemoveStopwordsFilterTokenizer, 'romanian_tokenize': RomanianTokenizer, 'romanian_tokenize_filter': RomanianFilterTokenizer, 'romanian_tokenize_remove_stopwords': RomanianRemoveStopwordsTokenizer, 'romanian_lemmatize': RomanianLemmatizeTokenizer, 'romanian_lemmatize_filter': RomanianLemmatizeFilterTokenizer, 'romanian_lemmatize_remove_stopwords': RomanianLemmatizeRemoveStopwordsFilterTokenizer, 'japanese_tokenize': JapaneseTokenizer, 'japanese_tokenize_filter': JapaneseFilterTokenizer, 'japanese_tokenize_remove_stopwords': JapaneseRemoveStopwordsTokenizer, 'japanese_lemmatize': JapaneseLemmatizeTokenizer, 'japanese_lemmatize_filter': JapaneseLemmatizeFilterTokenizer, 'japanese_lemmatize_remove_stopwords': JapaneseLemmatizeRemoveStopwordsFilterTokenizer, 'chinese_tokenize': ChineseTokenizer, 'chinese_tokenize_filter': ChineseFilterTokenizer, 'chinese_tokenize_remove_stopwords': ChineseRemoveStopwordsTokenizer, 'chinese_lemmatize': ChineseLemmatizeTokenizer, 'chinese_lemmatize_filter': ChineseLemmatizeFilterTokenizer, 'chinese_lemmatize_remove_stopwords': ChineseLemmatizeRemoveStopwordsFilterTokenizer, 'multi_tokenize': MultiTokenizer, 'multi_tokenize_filter': MultiFilterTokenizer, 'multi_tokenize_remove_stopwords': MultiRemoveStopwordsTokenizer, 'multi_lemmatize': MultiLemmatizeTokenizer, 'multi_lemmatize_filter': MultiLemmatizeFilterTokenizer, 'multi_lemmatize_remove_stopwords': MultiLemmatizeRemoveStopwordsTokenizer, 'hf_tokenizer': HFTokenizer }
import demo from "./index.vue" demo.install=(Vue)=>{ Vue.component(demo.name,demo) } export default demo;
#ifndef __M_SYSMANAGER_H__ #define __M_SYSMANAGER_H__ #ifdef ASYNCH void M_continue(); void M_wait(); #endif #ifdef SYSMAN #define BASEDIR_BUFF 256 #define ENT_REF_SIZE 0x10000 #endif void M_systemsinit(); void M_systemsclose(); #endif
// // SPUScheduledUpdateDriver.h // Sparkle // // Created by Mayur Pawashe on 3/15/16. // Copyright © 2016 Sparkle Project. All rights reserved. // #import <Foundation/Foundation.h> #import "SPUUpdateDriver.h" #import "SPUUIBasedUpdateDriver.h" NS_ASSUME_NONNULL_BEGIN @class SUHost; @protocol SPUUserDriver, SPUUpdaterDelegate; @interface SPUScheduledUpdateDriver : NSObject <SPUUpdateDriver> - (instancetype)initWithHost:(SUHost *)host applicationBundle:(NSBundle *)applicationBundle updater:(id)updater userDriver:(id <SPUUserDriver>)userDriver updaterDelegate:(nullable id <SPUUpdaterDelegate>)updaterDelegate; @end NS_ASSUME_NONNULL_END
import functools import operator import os import os.path import sys import numpy as np # Bamboo utilities current_file = os.path.realpath(__file__) current_dir = os.path.dirname(current_file) sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python')) import tools # ============================================== # Objects for Python data reader # ============================================== # Note: The Python data reader imports this file as a module and calls # the functions below to ingest data. # Data np.random.seed(20220118) _num_samples = 27 _sample_size = 5 _samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32) # Sample access functions def get_sample(index): return _samples[index,:] def num_samples(): return _num_samples def sample_dims(): return (_sample_size,) # ============================================== # Setup LBANN experiment # ============================================== def setup_experiment(lbann, weekly): """Construct LBANN experiment. Args: lbann (module): Module for LBANN Python frontend """ mini_batch_size = num_samples() // 2 trainer = lbann.Trainer(mini_batch_size) model = construct_model(lbann) data_reader = construct_data_reader(lbann) optimizer = lbann.NoOptimizer() return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes def construct_model(lbann): """Construct LBANN model. Args: lbann (module): Module for LBANN Python frontend """ # Input data # Note: Sum with a weights layer so that gradient checking will # verify that error signals are correct. x_weights = lbann.Weights(optimizer=lbann.SGD(), initializer=lbann.ConstantInitializer(value=0.0), name='input_weights') x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'), dims=_sample_size), lbann.WeightsLayer(weights=x_weights, dims=_sample_size)) x_lbann = x # Objects for LBANN model obj = [] metrics = [] callbacks = [] # ------------------------------------------ # Data-parallel layout # ------------------------------------------ # LBANN implementation w = lbann.Weights( optimizer=lbann.SGD(), initializer=lbann.ConstantInitializer(value=0.0), name='grid1_dataparallel_weights') w = lbann.WeightsLayer( weights=w, dims=_sample_size, data_layout='data_parallel', parallel_strategy={'grid_tag':1}) x = lbann.Identity(x_lbann, data_layout='data_parallel') y1 = lbann.Sin( x, data_layout='data_parallel', parallel_strategy={'grid_tag':2}) y2 = lbann.Square( y1, data_layout='data_parallel', parallel_strategy={'grid_tag':1}) y2 = lbann.Sum(w, y2, data_layout='data_parallel') y3 = lbann.Scale( y2, constant=2, data_layout='data_parallel', parallel_strategy={'grid_tag':2}) y = lbann.Identity( y3, data_layout='data_parallel', parallel_strategy={'grid_tag':0}) z = lbann.L2Norm2(y) obj.append(z) metrics.append(lbann.Metric(z, name='data-parallel layout')) # NumPy implementation vals = [] for i in range(num_samples()): x = get_sample(i).astype(np.float64) y = 2*np.sin(x)**2 z = tools.numpy_l2norm2(y) vals.append(z) val = np.mean(vals) tol = 8 * val * np.finfo(np.float32).eps callbacks.append(lbann.CallbackCheckMetric( metric=metrics[-1].name, lower_bound=val-tol, upper_bound=val+tol, error_on_failure=True, execution_modes='test')) # ------------------------------------------ # Model-parallel layout # ------------------------------------------ # LBANN implementation w = lbann.Weights( optimizer=lbann.SGD(), initializer=lbann.ConstantInitializer(value=0.5), name='grid2_modelparallel_weights') x = lbann.Identity(x_lbann, data_layout='model_parallel') y1 = lbann.Cos( x, data_layout='model_parallel', parallel_strategy={'grid_tag':1}) y2 = lbann.Scale( y1, constant=2, data_layout='model_parallel', parallel_strategy={'grid_tag':2}) y2 = lbann.FullyConnected( y2, num_neurons=1, weights=w, data_layout='model_parallel') y3 = lbann.Square( y2, data_layout='data_parallel', parallel_strategy={'grid_tag':1}) y = lbann.Identity( y3, data_layout='data_parallel', parallel_strategy={'grid_tag':0}) z = lbann.L2Norm2(y) obj.append(z) metrics.append(lbann.Metric(z, name='model-parallel layout')) # NumPy implementation vals = [] for i in range(num_samples()): x = get_sample(i).astype(np.float64) y = np.sum(np.cos(x))**2 z = tools.numpy_l2norm2(y) vals.append(z) val = np.mean(vals) tol = 8 * val * np.finfo(np.float32).eps callbacks.append(lbann.CallbackCheckMetric( metric=metrics[-1].name, lower_bound=val-tol, upper_bound=val+tol, error_on_failure=True, execution_modes='test')) # ------------------------------------------ # Gradient checking # ------------------------------------------ callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True)) # ------------------------------------------ # Construct model # ------------------------------------------ num_epochs = 0 return lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(x_lbann), objective_function=obj, metrics=metrics, callbacks=callbacks) def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train' ) ]) message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test' ) ]) return message # ============================================== # Setup PyTest # ============================================== # Create test functions that can interact with PyTest for _test_func in tools.create_tests( setup_experiment, __file__, environment={'LBANN_NUM_SUBGRIDS': 2}): globals()[_test_func.__name__] = _test_func
const colors = require('tailwindcss/colors') module.exports = { content: [ './resources/**/*.blade.php', './vendor/filament/**/*.blade.php', ], theme: { extend: { colors: { danger: colors.rose, primary: colors.blue, success: colors.green, warning: colors.yellow, }, }, }, plugins: [ require('@tailwindcss/forms'), require('@tailwindcss/typography'), ], }
// Copyright 2018-2021 by Boris Feld import Mousetrap from "mousetrap"; import React from "react"; import { faSearch } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import store from "../store"; class SearchBox extends React.Component { constructor(props) { super(props); this.myRef = React.createRef(); } componentDidMount() { Mousetrap.bind(["ctrl+k"], this.focusSearchBox); } componentWillUnmount() { Mousetrap.unbind(["ctrl+k"]); } focusSearchBox = () => { this.myRef.current.focus(); return false; }; render() { return ( <div className="field is-expanded"> <p className="control has-icons-left"> <input className="input" placeholder="Search" value={store.query_text} onChange={event => store.set_query_text(event.target.value)} ref={this.myRef} /> <span className="icon is-small is-left"> <FontAwesomeIcon icon={faSearch} /> </span> </p> </div> ); } } export default SearchBox;
//独立COOKIE文件 ck在``里面填写,多账号换行 let refreshtokenVal= `` let iboxpaycookie = { refreshtokenVal: refreshtokenVal, } module.exports = iboxpaycookie
from cogdl.experiments import check_experiment from tabulate import tabulate import json import os def load_hyperparameter_config(): path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'configs.json' with open(path, 'r') as file: configuration = json.load(file) return configuration def save_hyperparameter_config(configuration): path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'configs.json' with open(path, 'w') as file: json.dump(configuration, file, indent=4, ensure_ascii=False) return configuration def result_table(dataset=None, model=None, print_test_accuracy=True): configuration = load_hyperparameter_config() model = sorted(list(set([_mk for _mk, _mv in configuration.items() for _dk, _dv in _mv.items() if _dk!='general' and 'val_acc_mean' in _dv]))) if model is None else model dataset = sorted(list(set([_dk for _mk, _mv in configuration.items() for _dk, _dv in _mv.items() if _dk!='general' and 'val_acc_mean' in _dv]))) if dataset is None else dataset col_names = ["Variant"] + dataset tab_data = [] for m in model: results = [m] for d in dataset: items = configuration.get(m, {}).get(d, {}) if 'val_acc_mean' in items: if print_test_accuracy: acc_str = "%5.2f±%4.2f / %5.2f±%4.2f" % (items['test_acc_mean']*100, items['test_acc_std']*100, items['val_acc_mean']*100, items['val_acc_std']*100) else: acc_str = "%5.2f±%4.2f" % (items['test_acc_mean']*100, items['test_acc_std']*100) else: acc_str = '' results.append(acc_str) tab_data.append(results) print(tabulate(tab_data, headers=col_names, tablefmt="github", stralign=u'center')) def result_check(dataset=None, model=None): configuration = load_hyperparameter_config() model = sorted(list(set([_mk for _mk, _mv in configuration.items() for _dk, _dv in _mv.items() if _dk!='general' and 'val_acc_mean' in _dv]))) if model is None else model dataset = sorted(list(set([_dk for _mk, _mv in configuration.items() for _dk, _dv in _mv.items() if _dk!='general' and 'val_acc_mean' in _dv]))) if dataset is None else dataset col_names = ["Variant"] + ["Record", "Experiment", "Deviation"] tab_data = [] for d in dataset: for m in model: items = configuration.get(m, {}).get(d, {}) if 'val_acc_mean' in items: record_val_acc_mean = items['val_acc_mean'] record_val_acc_std = items['val_acc_std'] record_test_acc_mean = items["test_acc_mean"] record_test_acc_std = items['test_acc_std'] result_mean = check_experiment(dataset=d, model=m, use_best_config=True) experiment_val_acc_mean = result_mean['val_acc_mean'] experiment_val_acc_std = result_mean['val_acc_std'] experiment_test_acc_mean = result_mean["test_acc_mean"] experiment_test_acc_std = result_mean['test_acc_std'] deviation_valid_acc_mean = abs(experiment_val_acc_mean - record_val_acc_mean) deviation_valid_acc_std = abs(experiment_val_acc_std - record_val_acc_std) deviation_test_acc_mean = abs(experiment_test_acc_mean - record_test_acc_mean) deviation_test_acc_std = abs(experiment_test_acc_std - record_test_acc_std) record_acc = "%5.2f±%4.2f / %5.2f±%4.2f" % (record_test_acc_mean*100, record_test_acc_std*100, record_val_acc_mean*100, record_val_acc_std*100) experiment_acc = "%5.2f±%4.2f / %5.2f±%4.2f" % (experiment_test_acc_mean*100, experiment_test_acc_std*100, experiment_val_acc_mean*100, experiment_val_acc_std*100) deviation_acc = "%4.2f±%4.2f / %4.2f±%4.2f" % (deviation_test_acc_mean*100, deviation_test_acc_std*100, deviation_valid_acc_mean*100, deviation_valid_acc_std*100) tab_data.append(["%s, %s" % (d, m), record_acc, experiment_acc, deviation_acc]) print(tabulate(tab_data, headers=col_names, tablefmt="github", stralign=u'center'))
# -*- coding: utf-8 -*- """'Current Source Density analysis (CSD) is a class of methods of analysis of extracellular electric potentials recorded at multiple sites leading to estimates of current sources generating the measured potentials. It is usually applied to low-frequency part of the potential (called the Local Field Potential, LFP) and to simultaneous recordings or to recordings taken with fixed time reference to the onset of specific stimulus (Evoked Potentials)' (Definition by Prof.Daniel K. Wójcik for Encyclopedia of Computational Neuroscience) CSD is also called as Source Localization or Source Imaging in the EEG circles. Here are CSD methods for different types of electrode configurations. 1D - laminar probe like electrodes. 2D - Microelectrode Array like 3D - UtahArray or multiple laminar probes. The following methods have been implemented so far 1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D 2D - KCSD2D, MoIKCSD (Saline layer on top of slice) 3D - KCSD3D Each of these methods listed have some advantages. The KCSD methods for instance can handle broken or irregular electrode configurations electrode Keywords: LFP; CSD; Multielectrode; Laminar electrode; Barrel cortex Citation Policy: See ./current_source_density_src/README.md Contributors to this current source density estimation module are: Chaitanya Chintaluri(CC), Espen Hagen(EH) and Michał Czerwinski(MC). EH implemented the iCSD methods and StandardCSD CC implemented the kCSD methods, kCSD1D(MC and CC) CC and EH developed the interface to elephant. """ from __future__ import division, print_function, unicode_literals import neo import numpy as np import quantities as pq from scipy.integrate import simps import elephant.current_source_density_src.utility_functions as utils from elephant.current_source_density_src import KCSD, icsd from elephant.utils import deprecated_alias __all__ = [ "estimate_csd", "generate_lfp" ] utils.patch_quantities() available_1d = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD', 'KCSD1D'] available_2d = ['KCSD2D', 'MoIKCSD'] available_3d = ['KCSD3D'] kernel_methods = ['KCSD1D', 'KCSD2D', 'KCSD3D', 'MoIKCSD'] icsd_methods = ['DeltaiCSD', 'StepiCSD', 'SplineiCSD'] py_iCSD_toolbox = ['StandardCSD'] + icsd_methods @deprecated_alias(coords='coordinates') def estimate_csd(lfp, coordinates=None, method=None, process_estimate=True, **kwargs): """ Function call to compute the current source density (CSD) from extracellular potential recordings(local-field potentials - LFP) using laminar electrodes or multi-contact electrodes with 2D or 3D geometries. Parameters ---------- lfp : neo.AnalogSignal positions of electrodes can be added as neo.RecordingChannel coordinate or sent externally as a func argument (See coords) coordinates : [Optional] corresponding spatial coordinates of the electrodes. Defaults to None Otherwise looks for ChannelIndex coordinate method : string Pick a method corresponding to the setup, in this implementation For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD', or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD' For MEA probe style (2D), use 'KCSD2D', or 'MoIKCSD' For array of laminar probes (3D), use 'KCSD3D' Defaults to None process_estimate : bool In the py_iCSD_toolbox this corresponds to the filter_csd - the parameters are passed as kwargs here ie., f_type and f_order In the kcsd methods this corresponds to cross_validate - the parameters are passed as kwargs here ie., lambdas and Rs Defaults to True kwargs : parameters to each method The parameters corresponding to the method chosen See the documentation of the individual method Default is {} - picks the best parameters, Returns ------- Estimated CSD neo.AnalogSignal object annotated with the spatial coordinates Raises ------ AttributeError No units specified for electrode spatial coordinates ValueError Invalid function arguments, wrong method name, or mismatching coordinates TypeError Invalid cv_param argument passed """ if not isinstance(lfp, neo.AnalogSignal): raise TypeError('Parameter `lfp` must be a neo.AnalogSignal object') if coordinates is None: coordinates = lfp.channel_index.coordinates else: scaled_coords = [] for coord in coordinates: try: scaled_coords.append(coord.rescale(pq.mm)) except AttributeError: raise AttributeError('No units given for electrode spatial \ coordinates') coordinates = scaled_coords if method is None: raise ValueError('Must specify a method of CSD implementation') if len(coordinates) != lfp.shape[1]: raise ValueError('Number of signals and coords is not same') for ii in coordinates: # CHECK for Dimensionality of electrodes if len(ii) > 3: raise ValueError('Invalid number of coordinate positions') dim = len(coordinates[0]) # TODO : Generic co-ordinates! if dim == 1 and (method not in available_1d): raise ValueError('Invalid method, Available options are:', available_1d) if dim == 2 and (method not in available_2d): raise ValueError('Invalid method, Available options are:', available_2d) if dim == 3 and (method not in available_3d): raise ValueError('Invalid method, Available options are:', available_3d) if method in kernel_methods: input_array = np.zeros((len(lfp), lfp[0].magnitude.shape[0])) for ii, jj in enumerate(lfp): input_array[ii, :] = jj.rescale(pq.mV).magnitude kernel_method = getattr(KCSD, method) # fetch the class 'KCSD1D' lambdas = kwargs.pop('lambdas', None) Rs = kwargs.pop('Rs', None) k = kernel_method(np.array(coordinates), input_array.T, **kwargs) if process_estimate: k.cross_validate(lambdas, Rs) estm_csd = k.values() estm_csd = np.rollaxis(estm_csd, -1, 0) output = neo.AnalogSignal(estm_csd * pq.uA / pq.mm**3, t_start=lfp.t_start, sampling_rate=lfp.sampling_rate) if dim == 1: output.annotate(x_coords=k.estm_x) elif dim == 2: output.annotate(x_coords=k.estm_x, y_coords=k.estm_y) elif dim == 3: output.annotate(x_coords=k.estm_x, y_coords=k.estm_y, z_coords=k.estm_z) elif method in py_iCSD_toolbox: coordinates = np.array(coordinates) * coordinates[0].units if method in icsd_methods: try: coordinates = coordinates.rescale(kwargs['diam'].units) except KeyError: # Then why specify as a default in icsd? # All iCSD methods explicitly assume a source # diameter in contrast to the stdCSD that # implicitly assume infinite source radius raise ValueError("Parameter diam must be specified for iCSD \ methods: {}".format(", ".join(icsd_methods))) if 'f_type' in kwargs: if (kwargs['f_type'] != 'identity') and \ (kwargs['f_order'] is None): raise ValueError("The order of {} filter must be \ specified".format(kwargs['f_type'])) lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units, sampling_rate=lfp.sampling_rate) csd_method = getattr(icsd, method) # fetch class from icsd.py file csd_estimator = csd_method(lfp=lfp.magnitude * lfp.units, coord_electrode=coordinates.flatten(), **kwargs) csd_pqarr = csd_estimator.get_csd() if process_estimate: csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr) output = neo.AnalogSignal(csd_pqarr_filtered.T, t_start=lfp.t_start, sampling_rate=lfp.sampling_rate) else: output = neo.AnalogSignal(csd_pqarr.T, t_start=lfp.t_start, sampling_rate=lfp.sampling_rate) output.annotate(x_coords=coordinates) return output @deprecated_alias(ele_xx='x_positions', ele_yy='y_positions', ele_zz='z_positions', xlims='x_limits', ylims='y_limits', zlims='z_limits', res='resolution') def generate_lfp(csd_profile, x_positions, y_positions=None, z_positions=None, x_limits=[0., 1.], y_limits=[0., 1.], z_limits=[0., 1.], resolution=50): """ Forward modelling for getting the potentials for testing Current Source Density (CSD). Parameters ---------- csd_profile : callable A function that computes true CSD profile. Available options are (see ./csd/utility_functions.py) 1D : gauss_1d_dipole 2D : large_source_2D and small_source_2D 3D : gauss_3d_dipole x_positions : np.ndarray Positions of the x coordinates of the electrodes y_positions : np.ndarray, optional Positions of the y coordinates of the electrodes Defaults to None, use in 2D or 3D cases only z_positions : np.ndarray, optional Positions of the z coordinates of the electrodes Defaults to None, use in 3D case only x_limits : list, optional A list of [start, end]. The starting spatial coordinate and the ending for integration Defaults to [0.,1.] y_limits : list, optional A list of [start, end]. The starting spatial coordinate and the ending for integration Defaults to [0.,1.], use only in 2D and 3D case z_limits : list, optional A list of [start, end]. The starting spatial coordinate and the ending for integration Defaults to [0.,1.], use only in 3D case resolution : int, optional The resolution of the integration Defaults to 50 Returns ------- LFP : neo.AnalogSignal The potentials created by the csd profile at the electrode positions. The electrode positions are attached as RecordingChannel's coordinate. """ def integrate_1D(x0, csd_x, csd, h): m = np.sqrt((csd_x - x0) ** 2 + h ** 2) - abs(csd_x - x0) y = csd * m I = simps(y, csd_x) return I def integrate_2D(x, y, xlin, ylin, csd, h, X, Y): x = np.reshape(x, (1, 1, len(x))) y = np.reshape(y, (1, 1, len(y))) X = np.expand_dims(X, axis=2) Y = np.expand_dims(Y, axis=2) csd = np.expand_dims(csd, axis=2) m = np.sqrt((x - X) ** 2 + (y - Y) ** 2) np.clip(m, a_min=0.0000001, a_max=None, out=m) y = np.arcsinh(2 * h / m) * csd I = simps(y.T, ylin) F = simps(I, xlin) return F def integrate_3D(x, y, z, csd, xlin, ylin, zlin, X, Y, Z): m = np.sqrt((x - X) ** 2 + (y - Y) ** 2 + (z - Z) ** 2) np.clip(m, a_min=0.0000001, a_max=None, out=m) z = csd / m Iy = simps(np.transpose(z, (1, 0, 2)), zlin) Iy = simps(Iy, ylin) F = simps(Iy, xlin) return F dim = 1 if z_positions is not None: dim = 3 elif y_positions is not None: dim = 2 x = np.linspace(x_limits[0], x_limits[1], resolution) sigma = 1.0 h = 50. if dim == 1: chrg_x = x csd = csd_profile(chrg_x) pots = integrate_1D(x_positions, chrg_x, csd, h) pots /= 2. * sigma # eq.: 26 from Potworowski et al ele_pos = x_positions elif dim == 2: y = np.linspace(y_limits[0], y_limits[1], resolution) chrg_x = np.expand_dims(x, axis=1) chrg_y = np.expand_dims(y, axis=0) csd = csd_profile(chrg_x, chrg_y) pots = integrate_2D(x_positions, y_positions, x, y, csd, h, chrg_x, chrg_y) pots /= 2 * np.pi * sigma ele_pos = np.vstack((x_positions, y_positions)).T elif dim == 3: y = np.linspace(y_limits[0], y_limits[1], resolution) z = np.linspace(z_limits[0], z_limits[1], resolution) chrg_x, chrg_y, chrg_z = np.mgrid[ x_limits[0]: x_limits[1]: np.complex(0, resolution), y_limits[0]: y_limits[1]: np.complex(0, resolution), z_limits[0]: z_limits[1]: np.complex(0, resolution) ] csd = csd_profile(chrg_x, chrg_y, chrg_z) pots = np.zeros(len(x_positions)) for ii in range(len(x_positions)): pots[ii] = integrate_3D(x_positions[ii], y_positions[ii], z_positions[ii], csd, x, y, z, chrg_x, chrg_y, chrg_z) pots /= 4 * np.pi * sigma ele_pos = np.vstack((x_positions, y_positions, z_positions)).T ele_pos = ele_pos * pq.mm ch = neo.ChannelIndex(index=range(len(pots))) asig = neo.AnalogSignal(np.expand_dims(pots, axis=0), sampling_rate=pq.kHz, units='mV') ch.coordinates = ele_pos ch.analogsignals.append(asig) ch.create_relationship() return asig
#pragma once #include <iostream> #include <string> #include <fmt/format.h> #include <cstdint> #include <libgen.h> #include <spdlog/spdlog.h> #pragma GCC system_header namespace CustomLogger { void initLogger(const spdlog::level::level_enum& LoggingLevel, const std::string& log_file_name, std::ostream* gui_stream = nullptr); void closeLogger(); } #define LOG(Severity, Format, ...) spdlog::log(Severity, Format, ##__VA_ARGS__) #define CRIT(Format, ...) LOG(spdlog::level::critical, Format, ##__VA_ARGS__) #define ERR(Format, ...) LOG(spdlog::level::err, Format, ##__VA_ARGS__) #define WARN(Format, ...) LOG(spdlog::level::warn, Format, ##__VA_ARGS__) #define INFO(Format, ...) LOG(spdlog::level::info, Format, ##__VA_ARGS__) #define DBG(Format, ...) LOG(spdlog::level::debug, Format, ##__VA_ARGS__) #define TRC(Format, ...) LOG(spdlog::level::trace, Format, ##__VA_ARGS__) //#define LOGL(Severity, Format, ...) Log::Msg(Severity, fmt::format(Format, ##__VA_ARGS__), {{"file", std::string(__FILE__)}, {"line", std::int64_t(__LINE__)}}) // //#define CRITL(Format, ...) LOGL(Log::Severity::Critical, Format, ##__VA_ARGS__) //#define ERRL(Format, ...) LOGL(Log::Severity::Error, Format, ##__VA_ARGS__) //#define WARNL(Format, ...) LOGL(Log::Severity::Warning, Format, ##__VA_ARGS__) //#define INFOL(Format, ...) LOGL(Log::Severity::Info, Format, ##__VA_ARGS__) //#define DBGL(Format, ...) LOGL(Log::Severity::Debug, Format, ##__VA_ARGS__)
exports.delete = require('keyarray-delete') exports.get = require('keyarray-get') exports.has = require('keyarray-has') exports.set = require('keyarray-set')
""" Module: 'btree' on micropython-v1.17-esp32 """ # MCU: {'ver': 'v1.17', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.17.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.17.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'} # Stubber: 1.5.4 from typing import Any def open(*args, **kwargs) -> Any: ... DESC = 2 # type: int INCL = 1 # type: int
define(["./when-ca391574","./Transforms-0cb18884","./Cartesian2-e7edc838","./Check-6d10d1a9","./ComponentDatatype-fd1cb55e","./FrustumGeometry-4df5fb18","./GeometryAttribute-861caa10","./GeometryAttributes-a356f820","./Math-272c9861","./RuntimeError-19cb26ba","./WebGLConstants-4739ce15","./Plane-7cca3bb7","./VertexFormat-fa3de4f4"],function(o,m,c,e,h,d,f,g,t,r,n,a,u){"use strict";var s=0,i=1;function p(e){var t,r,n=e.frustum,a=e.orientation,u=e.origin,e=o.defaultValue(e._drawNearPlane,!0);n instanceof d.PerspectiveFrustum?(t=s,r=d.PerspectiveFrustum.packedLength):n instanceof d.OrthographicFrustum&&(t=i,r=d.OrthographicFrustum.packedLength),this._frustumType=t,this._frustum=n.clone(),this._origin=c.Cartesian3.clone(u),this._orientation=m.Quaternion.clone(a),this._drawNearPlane=e,this._workerName="createFrustumOutlineGeometry",this.packedLength=2+r+c.Cartesian3.packedLength+m.Quaternion.packedLength}p.pack=function(e,t,r){r=o.defaultValue(r,0);var n=e._frustumType,a=e._frustum;return(t[r++]=n)===s?(d.PerspectiveFrustum.pack(a,t,r),r+=d.PerspectiveFrustum.packedLength):(d.OrthographicFrustum.pack(a,t,r),r+=d.OrthographicFrustum.packedLength),c.Cartesian3.pack(e._origin,t,r),r+=c.Cartesian3.packedLength,m.Quaternion.pack(e._orientation,t,r),t[r+=m.Quaternion.packedLength]=e._drawNearPlane?1:0,t};var _=new d.PerspectiveFrustum,k=new d.OrthographicFrustum,l=new m.Quaternion,y=new c.Cartesian3;return p.unpack=function(e,t,r){t=o.defaultValue(t,0);var n,a=e[t++];a===s?(n=d.PerspectiveFrustum.unpack(e,t,_),t+=d.PerspectiveFrustum.packedLength):(n=d.OrthographicFrustum.unpack(e,t,k),t+=d.OrthographicFrustum.packedLength);var u=c.Cartesian3.unpack(e,t,y);t+=c.Cartesian3.packedLength;var i=m.Quaternion.unpack(e,t,l),e=1===e[t+=m.Quaternion.packedLength];if(!o.defined(r))return new p({frustum:n,origin:u,orientation:i,_drawNearPlane:e});t=a===r._frustumType?r._frustum:void 0;return r._frustum=n.clone(t),r._frustumType=a,r._origin=c.Cartesian3.clone(u,r._origin),r._orientation=m.Quaternion.clone(i,r._orientation),r._drawNearPlane=e,r},p.createGeometry=function(e){var t=e._frustumType,r=e._frustum,n=e._origin,a=e._orientation,u=e._drawNearPlane,e=new Float64Array(24);d.FrustumGeometry._computeNearFarPlanes(n,a,t,r,e);for(var i,o,r=new g.GeometryAttributes({position:new f.GeometryAttribute({componentDatatype:h.ComponentDatatype.DOUBLE,componentsPerAttribute:3,values:e})}),c=u?2:1,s=new Uint16Array(8*(1+c)),p=u?0:1;p<2;++p)s[i=u?8*p:0]=o=4*p,s[i+1]=o+1,s[i+2]=o+1,s[i+3]=o+2,s[i+4]=o+2,s[i+5]=o+3,s[i+6]=o+3,s[i+7]=o;for(p=0;p<2;++p)s[i=8*(c+p)]=o=4*p,s[i+1]=o+4,s[i+2]=o+1,s[i+3]=o+5,s[i+4]=o+2,s[i+5]=o+6,s[i+6]=o+3,s[i+7]=o+7;return new f.Geometry({attributes:r,indices:s,primitiveType:f.PrimitiveType.LINES,boundingSphere:m.BoundingSphere.fromVertices(e)})},function(e,t){return o.defined(t)&&(e=p.unpack(e,t)),p.createGeometry(e)}});
"""new db Revision ID: 44597fd0211f Revises: Create Date: 2019-09-25 11:17:10.238454 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '44597fd0211f' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('users', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=255), nullable=True), sa.Column('email', sa.String(length=255), nullable=True), sa.Column('password_hash', sa.String(length=255), nullable=False), sa.Column('bio', sa.String(length=255), nullable=True), sa.Column('profile_pic_path', sa.String(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True) op.create_table('blogs', sa.Column('id', sa.Integer(), nullable=False), sa.Column('title', sa.String(length=50), nullable=True), sa.Column('blog', sa.String(length=120), nullable=True), sa.Column('author_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['author_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('comments', sa.Column('id', sa.Integer(), nullable=False), sa.Column('comment', sa.Text(), nullable=False), sa.Column('user_id', sa.Integer(), nullable=True), sa.Column('blog_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('comments') op.drop_table('blogs') op.drop_index(op.f('ix_users_email'), table_name='users') op.drop_table('users') # ### end Alembic commands ###
from collections import defaultdict from bluesky.run_engine import Msg, RunEngineInterrupted from bluesky.plans import scan, grid_scan, count, inner_product_scan from bluesky.object_plans import AbsScanPlan from bluesky.preprocessors import run_wrapper, subs_wrapper from bluesky.plan_stubs import pause import bluesky.plans as bp import bluesky.preprocessors as bpp from bluesky.callbacks import CallbackCounter, LiveTable, LiveFit, CallbackBase from bluesky.callbacks.core import make_callback_safe, make_class_safe from bluesky.callbacks.mpl_plotting import ( LiveScatter, LivePlot, LiveGrid, LiveFitPlot, LiveRaster, LiveMesh, ) from bluesky.callbacks.broker import BrokerCallbackBase from bluesky.tests.utils import _print_redirect, MsgCollector, DocCollector from event_model import compose_run, DocumentNames import pytest import numpy as np import matplotlib.pyplot as plt from io import StringIO from unittest.mock import MagicMock from itertools import permutations import time # copied from examples.py to avoid import def stepscan(det, motor): yield Msg('open_run') for i in range(-5, 5): yield Msg('create', name='primary') yield Msg('set', motor, i) yield Msg('trigger', det) yield Msg('read', motor) yield Msg('read', det) yield Msg('save') yield Msg('close_run') def exception_raiser(name, doc): raise Exception("it's an exception that better not kill the scan!!") def test_all(RE, hw): c = CallbackCounter() RE(stepscan(hw.det, hw.motor), {'all': c}) assert c.value == 10 + 1 + 2 # events, descriptor, start and stop c = CallbackCounter() token = RE.subscribe(c) RE(stepscan(hw.det, hw.motor)) RE.unsubscribe(token) assert c.value == 10 + 1 + 2 def test_raising_ignored_or_not(RE, hw): RE.ignore_callback_exceptions = True assert RE.ignore_callback_exceptions def cb(name, doc): raise Exception # by default (with ignore... = True) it warns with pytest.warns(UserWarning): RE(stepscan(hw.det, hw.motor), cb) RE.ignore_callback_exceptions = False with pytest.raises(Exception): RE(stepscan(hw.det, hw.motor), cb) def test_subs_input(hw): def cb1(name, doc): pass def cb2(name, doc): pass def cb3(name, doc): pass def cb_fact4(scan): def cb4(name, doc): pass return cb4 def cb_fact5(scan): def cb5(name, doc): pass return cb5 # Test input normalization on OO plans obj_ascan = AbsScanPlan([hw.det], hw.motor, 1, 5, 4) obj_ascan.subs = cb1 assert obj_ascan.subs == {'all': [cb1], 'start': [], 'stop': [], 'descriptor': [], 'event': []} obj_ascan.subs.update({'start': [cb2]}) assert obj_ascan.subs == {'all': [cb1], 'start': [cb2], 'stop': [], 'descriptor': [], 'event': []} obj_ascan.subs = [cb2, cb3] assert obj_ascan.subs == {'all': [cb2, cb3], 'start': [], 'stop': [], 'descriptor': [], 'event': []} def test_subscribe_msg(RE, hw): assert RE.state == 'idle' c = CallbackCounter() def counting_stepscan(det, motor): yield Msg('subscribe', None, c, 'start') yield from stepscan(det, motor) RE(counting_stepscan(hw.det, hw.motor)) # should advance c assert c.value == 1 RE(counting_stepscan(hw.det, hw.motor)) # should advance c assert c.value == 2 RE(stepscan(hw.det, hw.motor)) # should not assert c.value == 2 def test_unknown_cb_raises(RE): def f(name, doc): pass with pytest.raises(KeyError): RE.subscribe(f, 'not a thing') # back-compat alias for subscribe with pytest.raises(KeyError): RE.subscribe_lossless(f, 'not a thing') with pytest.raises(KeyError): RE._subscribe_lossless(f, 'not a thing') def test_table_warns(): table = LiveTable(['field']) table('start', {}) with pytest.warns(UserWarning): table('descriptor', {'uid': 'asdf', 'name': 'primary', 'data_keys': {'field': {'dtype': 'array'}}}) def test_table_external(RE, hw, db): RE.subscribe(db.insert) RE(count([hw.img]), LiveTable(['img'])) def _compare_tables(fout, known_table): for ln, kn in zip(fout, known_table.split('\n')): # this is to strip the `\n` from the print output ln = ln.rstrip() if ln[0] == '+': # test the full line on the divider lines assert ln == kn else: # skip the 'time' column on data rows # this is easier than faking up times in the scan! assert ln[:16] == kn[:16] assert ln[26:] == kn[26:] def test_table(RE, hw): with _print_redirect() as fout: hw.det.precision = 2 hw.motor.precision = 2 hw.motor.setpoint.put(0.0) # Make dtype 'number' not 'integer'. hw.det.trigger() assert hw.det.describe()['det']['precision'] == 2 assert hw.motor.describe()['motor']['precision'] == 2 assert hw.det.describe()['det']['dtype'] == 'number' assert hw.motor.describe()['motor']['dtype'] == 'number' table = LiveTable(['det', 'motor'], min_width=16, extra_pad=2, separator_lines=False) ad_scan = bp.adaptive_scan([hw.det], 'det', hw.motor, -15.0, 5., .01, 1, .05, True) # use lossless sub here because rows can get dropped token = RE.subscribe(table) RE(ad_scan) RE.unsubscribe_lossless(token) fout.seek(0) _compare_tables(fout, KNOWN_TABLE) KNOWN_TABLE = """+------------+--------------+----------------+----------------+ | seq_num | time | det | motor | +------------+--------------+----------------+----------------+ | 1 | 04:17:20.6 | 0.00 | -15.00 | | 2 | 04:17:20.6 | 0.00 | -14.51 | | 3 | 04:17:20.7 | 0.00 | -13.91 | | 4 | 04:17:20.8 | 0.00 | -13.23 | | 5 | 04:17:20.9 | 0.00 | -12.49 | | 6 | 04:17:20.9 | 0.00 | -11.70 | | 7 | 04:17:21.0 | 0.00 | -10.86 | | 8 | 04:17:21.1 | 0.00 | -10.00 | | 9 | 04:17:21.2 | 0.00 | -9.10 | | 10 | 04:17:21.2 | 0.00 | -8.19 | | 11 | 04:17:21.3 | 0.00 | -7.25 | | 12 | 04:17:21.4 | 0.00 | -6.31 | | 13 | 04:17:21.5 | 0.00 | -5.35 | | 14 | 04:17:21.5 | 0.00 | -4.39 | | 15 | 04:17:21.6 | 0.00 | -3.41 | | 16 | 04:17:21.7 | 0.05 | -2.44 | | 17 | 04:17:21.8 | 0.35 | -1.45 | | 18 | 04:17:21.8 | 0.08 | -2.27 | | 19 | 04:17:21.9 | 0.12 | -2.07 | | 20 | 04:17:22.0 | 0.18 | -1.86 | | 21 | 04:17:22.1 | 0.25 | -1.66 | | 22 | 04:17:22.1 | 0.22 | -1.73 | | 23 | 04:17:22.2 | 0.28 | -1.59 | | 24 | 04:17:22.3 | 0.34 | -1.46 | | 25 | 04:17:22.4 | 0.33 | -1.49 | | 26 | 04:17:22.4 | 0.38 | -1.38 | | 27 | 04:17:22.5 | 0.44 | -1.28 | | 28 | 04:17:22.6 | 0.50 | -1.18 | | 29 | 04:17:22.7 | 0.56 | -1.08 | | 30 | 04:17:22.7 | 0.62 | -0.98 | | 31 | 04:17:22.8 | 0.67 | -0.89 | | 32 | 04:17:22.9 | 0.73 | -0.80 | | 33 | 04:17:23.0 | 0.78 | -0.71 | | 34 | 04:17:23.1 | 0.82 | -0.62 | | 35 | 04:17:23.1 | 0.87 | -0.53 | | 36 | 04:17:23.2 | 0.91 | -0.44 | | 37 | 04:17:23.3 | 0.94 | -0.34 | | 38 | 04:17:23.4 | 0.97 | -0.23 | | 39 | 04:17:23.5 | 0.99 | -0.11 | | 40 | 04:17:23.5 | 1.00 | 0.04 | | 41 | 04:17:23.6 | 0.94 | 0.36 | | 42 | 04:17:23.7 | 0.96 | 0.30 | | 43 | 04:17:23.8 | 0.85 | 0.56 | | 44 | 04:17:23.9 | 0.91 | 0.42 | | 45 | 04:17:23.9 | 0.86 | 0.56 | | 46 | 04:17:24.0 | 0.79 | 0.69 | | 47 | 04:17:24.1 | 0.81 | 0.66 | | 48 | 04:17:24.2 | 0.75 | 0.75 | | 49 | 04:17:24.3 | 0.70 | 0.85 | +------------+--------------+----------------+----------------+ | seq_num | time | det | motor | +------------+--------------+----------------+----------------+ | 50 | 04:17:24.3 | 0.64 | 0.94 | | 51 | 04:17:24.4 | 0.58 | 1.04 | | 52 | 04:17:24.5 | 0.53 | 1.13 | | 53 | 04:17:24.6 | 0.48 | 1.22 | | 54 | 04:17:24.7 | 0.43 | 1.30 | | 55 | 04:17:24.7 | 0.38 | 1.39 | | 56 | 04:17:24.8 | 0.33 | 1.48 | | 57 | 04:17:24.9 | 0.29 | 1.57 | | 58 | 04:17:25.0 | 0.25 | 1.66 | | 59 | 04:17:25.0 | 0.21 | 1.76 | | 60 | 04:17:25.1 | 0.18 | 1.87 | | 61 | 04:17:25.2 | 0.14 | 1.98 | | 62 | 04:17:25.3 | 0.11 | 2.10 | | 63 | 04:17:25.4 | 0.08 | 2.24 | | 64 | 04:17:25.4 | 0.06 | 2.39 | | 65 | 04:17:25.5 | 0.04 | 2.58 | | 66 | 04:17:25.6 | 0.02 | 2.82 | | 67 | 04:17:25.7 | 0.01 | 3.16 | | 68 | 04:17:25.8 | 0.00 | 3.62 | | 69 | 04:17:25.8 | 0.00 | 4.20 | | 70 | 04:17:25.9 | 0.00 | 4.85 | +------------+--------------+----------------+----------------+""" def test_evil_table_names(RE): from ophyd import Signal sigs = [ Signal(value=0, name="a:b"), Signal(value=0, name="a,b"), Signal(value=0, name="a'b"), Signal(value=0, name="🐍"), ] table = LiveTable( [s.name for s in sigs], min_width=5, extra_pad=2, separator_lines=False ) with _print_redirect() as fout: print() # get a blank line in camptured output RE(bpp.subs_wrapper(bp.count(sigs, num=2), table)) reference = """ +------------+--------------+--------+--------+--------+--------+ | seq_num | time | a:b | a,b | a'b | 🐍 | +------------+--------------+--------+--------+--------+--------+ | 1 | 12:47:09.7 | 0 | 0 | 0 | 0 | | 2 | 12:47:09.7 | 0 | 0 | 0 | 0 | +------------+--------------+--------+--------+--------+--------+""" _compare_tables(fout, reference) def test_live_fit(RE, hw): try: import lmfit except ImportError: raise pytest.skip('requires lmfit') def gaussian(x, A, sigma, x0): return A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) model = lmfit.Model(gaussian) init_guess = {'A': 2, 'sigma': lmfit.Parameter('sigma', 3, min=0), 'x0': -0.2} cb = LiveFit(model, 'det', {'x': 'motor'}, init_guess, update_every=50) RE(scan([hw.det], hw.motor, -1, 1, 50), cb) # results are in cb.result.values expected = {'A': 1, 'sigma': 1, 'x0': 0} for k, v in expected.items(): assert np.allclose(cb.result.values[k], v, atol=1e-6) def test_live_fit_multidim(RE, hw): try: import lmfit except ImportError: raise pytest.skip('requires lmfit') hw.motor1.delay = 0 hw.motor2.delay = 0 hw.det4.exposure_time = 0 def gaussian(x, y, A, sigma, x0, y0): return A * np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) model = lmfit.Model(gaussian, ['x', 'y']) init_guess = {'A': 2, 'sigma': lmfit.Parameter('sigma', 3, min=0), 'x0': -0.2, 'y0': 0.3} cb = LiveFit(model, 'det4', {'x': 'motor1', 'y': 'motor2'}, init_guess, update_every=50) RE(grid_scan([hw.det4], hw.motor1, -1, 1, 10, hw.motor2, -1, 1, 10, False), cb) expected = {'A': 1, 'sigma': 1, 'x0': 0, 'y0': 0} for k, v in expected.items(): assert np.allclose(cb.result.values[k], v, atol=1e-6) def test_live_plot_from_callbacks(): import bluesky.callbacks.core # We don't want the shims in callbacks.core, see #1133 for discussion assert not hasattr(bluesky.callbacks.core, 'LivePlot') # We still want the shims in callbacks.__init__ from bluesky.callbacks import LivePlot as LivePlotFromCallbacks assert LivePlotFromCallbacks is LivePlot # Make sure we can subclass it class FromCallbacks(LivePlotFromCallbacks): ... FromCallbacks('det', 'motor') def test_live_fit_plot(RE, hw): try: import lmfit except ImportError: raise pytest.skip('requires lmfit') def gaussian(x, A, sigma, x0): return A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) model = lmfit.Model(gaussian) init_guess = {'A': 2, 'sigma': lmfit.Parameter('sigma', 3, min=0), 'x0': -0.2} livefit = LiveFit(model, 'det', {'x': 'motor'}, init_guess, update_every=50) lfplot = LiveFitPlot(livefit, color='r') lplot = LivePlot('det', 'motor', ax=plt.gca(), marker='o', ls='none') RE(scan([hw.det], hw.motor, -1, 1, 50), [lplot, lfplot]) expected = {'A': 1, 'sigma': 1, 'x0': 0} for k, v in expected.items(): assert np.allclose(livefit.result.values[k], v, atol=1e-6) @pytest.mark.parametrize('int_meth, stop_num, msg_num', [('stop', 1, 5), ('abort', 1, 5), ('halt', 1, 3)]) def test_interrupted_with_callbacks(RE, int_meth, stop_num, msg_num): docs = defaultdict(list) def collector_cb(name, doc): nonlocal docs docs[name].append(doc) RE.msg_hook = MsgCollector() with pytest.raises(RunEngineInterrupted): RE(subs_wrapper(run_wrapper(pause()), {'all': collector_cb})) getattr(RE, int_meth)() assert len(docs['start']) == 1 assert len(docs['event']) == 0 assert len(docs['descriptor']) == 0 assert len(docs['stop']) == stop_num assert len(RE.msg_hook.msgs) == msg_num def test_live_grid(RE, hw): hw.motor1.delay = 0 hw.motor2.delay = 0 RE(grid_scan([hw.det4], hw.motor1, -3, 3, 6, hw.motor2, -5, 5, 10, False), LiveGrid((6, 10), 'det4')) # Test the deprecated name. with pytest.warns(UserWarning): RE(grid_scan([hw.det4], hw.motor1, -3, 3, 6, hw.motor2, -5, 5, 10, False), LiveRaster((6, 10), 'det4')) def test_live_scatter(RE, hw): RE(grid_scan([hw.det5], hw.jittery_motor1, -3, 3, 6, hw.jittery_motor2, -5, 5, 10, False), LiveScatter('jittery_motor1', 'jittery_motor2', 'det5', xlim=(-3, 3), ylim=(-5, 5))) # Test the deprecated name. with pytest.warns(UserWarning): RE(grid_scan([hw.det5], hw.jittery_motor1, -3, 3, 6, hw.jittery_motor2, -5, 5, 10, False), LiveMesh('jittery_motor1', 'jittery_motor2', 'det5', xlim=(-3, 3), ylim=(-5, 5))) @pytest.mark.xfail(raises=NotImplementedError, reason='This tests an API databroker has removed, and needs updating.') def test_broker_base(RE, hw, db): class BrokerChecker(BrokerCallbackBase): def __init__(self, field, *, db=None): super().__init__(field, db=db) def event(self, doc): super().event(doc) assert isinstance(doc['data'][self.fields[0]], np.ndarray) RE.subscribe(db.insert) bc = BrokerChecker(('img',), db=db) RE.subscribe(bc) RE(count([hw.img])) def test_broker_base_no_unpack(RE, hw, db): class BrokerChecker(BrokerCallbackBase): def __init__(self, field, *, db=None): super().__init__(field, db=db) def event(self, doc): super().event(doc) assert isinstance(doc['data'][self.fields[0]], np.ndarray) bc = BrokerChecker(('img',), db=db) RE.subscribe(bc) RE(count([hw.direct_img])) def test_plotting_hints(RE, hw, db): ''' This tests the run and checks that the correct hints are created. Hints are mainly created to help the BestEffortCallback in plotting the data. Use a callback to do the checking. ''' dc = DocCollector() RE.subscribe(dc.insert) # check that the inner product hints are passed correctly hint = {'dimensions': [([hw.motor1.name, hw.motor2.name, hw.motor3.name], 'primary')]} RE(inner_product_scan([hw.det], 20, hw.motor1, -1, 1, hw.motor2, -1, 1, hw.motor3, -2, 0)) assert dc.start[-1]['hints'] == hint # check that the outer product (grid_scan) hints are passed correctly hint = {'dimensions': [(['motor1'], 'primary'), (['motor2'], 'primary'), (['motor3'], 'primary')]} # grid_scan passes "rectilinear" gridding as well # make sure this is also passed output_hint = hint.copy() output_hint['gridding'] = 'rectilinear' RE(grid_scan([hw.det], hw.motor1, -1, 1, 2, hw.motor2, -1, 1, 2, True, hw.motor3, -2, 0, 2, True)) assert dc.start[-1]['hints'] == output_hint # check that if gridding is supplied, it's not overwritten by grid_scan # check that the outer product (grid_scan) hints are passed correctly hint = {'dimensions': [(['motor1'], 'primary'), (['motor2'], 'primary'), (['motor3'], 'primary')], 'gridding': 'rectilinear'} RE(grid_scan([hw.det], hw.motor1, -1, 1, 2, hw.motor2, -1, 1, 2, True, hw.motor3, -2, 0, 2, True)) assert dc.start[-1]['hints'] == hint def test_broken_table(): start_doc, descriptor_factory, *_ = compose_run() desc, compose_event, _ = descriptor_factory( name="primary", data_keys={ "x": {"dtype": "integer", "source": "", "shape": []}, "y": {"dtype": "number", "source": "", "shape": []}, }, ) ev1 = compose_event( data={"x": 1, "y": 2.0}, timestamps={k: time.time() for k in ("x", "y")} ) ev2 = compose_event( data={"x": 1, "y": 2}, timestamps={k: time.time() for k in ("x", "y")} ) ev3 = compose_event( data={"x": 1.0, "y": 2.0}, timestamps={k: time.time() for k in ("x", "y")} ) ev4 = compose_event( data={"x": 1.0, "y": "aardvark"}, timestamps={k: time.time() for k in ("x", "y")}, ) sio = StringIO() LT = LiveTable(["x", "y"], out=lambda s: sio.write(s + "\n")) LT("start", start_doc) LT("descriptor", desc) LT("event", ev1) LT("event", ev2) LT("event", ev3) LT("event", ev4) sio.seek(0) lines = sio.readlines() # The instance of LiveTable will include two empty separator lines by default assert len(lines) == 9 for ln in lines[-2:]: assert ln.strip() == "failed to format row" def test_callback_safe(): @make_callback_safe def test_function(to_fail): if to_fail: raise RuntimeError return to_fail assert test_function(True) is None assert test_function(False) is False def test_callback_safe_logger(): from unittest.mock import MagicMock from types import SimpleNamespace logger = SimpleNamespace(exception=MagicMock()) @make_callback_safe(logger=logger) def test_function(to_fail): if to_fail: raise RuntimeError return to_fail assert test_function(True) is None assert logger.exception.call_count == 1 assert test_function(False) is False assert logger.exception.call_count == 1 @pytest.fixture def EvilBaseClass(request): class MyError(RuntimeError): ... class EvilCallback(CallbackBase): my_excepttion_type = MyError def event(self, doc): raise MyError def bulk_events(self, doc): raise MyError def resource(self, doc): raise MyError def datum(self, doc): raise MyError def bulk_datum(self, doc): raise MyError def descriptor(self, doc): raise MyError def start(self, doc): raise MyError def stop(self, doc): raise MyError def event_page(self, doc): raise MyError def datum_page(self, doc): raise MyError return EvilCallback def test_callbackclass(EvilBaseClass): ecb = EvilBaseClass() for n in DocumentNames: with pytest.raises(EvilBaseClass.my_excepttion_type): ecb(n.name, {}) def test_callbackclass_safe(EvilBaseClass): @make_class_safe class SafeEvilBaseClass(EvilBaseClass): ... scb = SafeEvilBaseClass() for n in DocumentNames: scb(n.name, {}) def test_callbackclass_safe_logger(EvilBaseClass): logger = MagicMock() @make_class_safe(logger=logger) class SafeEvilBaseClass2(EvilBaseClass): ... scb = SafeEvilBaseClass2() for n in DocumentNames: scb(n.name, {}) assert logger.exception.call_count == len(DocumentNames) @pytest.mark.parametrize("strict", ["1", None]) @pytest.mark.parametrize( "documents", ( list( set( tuple(sorted(x, key=lambda x: x.name)) for x in permutations(DocumentNames, 1) ) ) + list( set( tuple(sorted(x, key=lambda x: x.name)) for x in permutations(DocumentNames, 2) ) ) + list( set( tuple(sorted(x, key=lambda x: x.name)) for x in permutations(DocumentNames, 3) ) ) + [list(DocumentNames)] ), ) def test_callbackclass_safe_filtered(EvilBaseClass, documents, monkeypatch, strict): if strict is not None: monkeypatch.setenv("BLUESKY_DEBUG_CALLBACKS", strict) else: monkeypatch.delenv("BLUESKY_DEBUG_CALLBACKS", raising=False) logger = MagicMock() @make_class_safe(logger=logger, to_wrap=tuple(x.name for x in documents)) class SafeEvilBaseClass2(EvilBaseClass): ... scb = SafeEvilBaseClass2() for n in documents: if strict: with pytest.raises(EvilBaseClass.my_excepttion_type): scb(n.name, {}) else: scb(n.name, {}) for n in set(DocumentNames) - set(documents): with pytest.raises(EvilBaseClass.my_excepttion_type): scb(n.name, {}) assert logger.exception.call_count == len(documents) def test_in_plan_qt_callback(RE, hw): from bluesky.callbacks.mpl_plotting import _get_teleporter _get_teleporter() def my_plan(): motor = hw.motor det = hw.det motor.delay = 1 plan = bp.scan([det], motor, -5, 5, 25) plan = subs_wrapper( bp.scan([det], motor, -5, 5, 25), LivePlot(det.name, motor.name) ) return (yield from plan) RE(my_plan())
from django.db import models # Create your models here. from model_utils.models import TimeStampedModel
// import { Link } from "gatsby" import React from "react"; import prev from "../img/cursos/taller-hacks-cover.jpg"; const BannerTaller = ({showImage}) => { return ( <> <div className="row no-gutters"> <div className="col-md-6"> <p className="text-light text-uppercase mb-0 font-sm">TALLER EN ZOOM - 17 Agosto - 6pm</p> <h2 className="mb-2 text-primary"> Hacks del SAT para Freelancers (2da fecha) </h2> {showImage && ( <img src={prev} className="img-fluid mb-4" style={{ borderRadius: 24, width: "80%" }} /> )} <p className="text-light text-primary"> <strong>Temario</strong> </p> <ul className="mb-3"> <li className="text-light font-sm mb-1">Cómo revisar tu Buzón Tributario.</li> <li className="text-light font-sm mb-1">Dónde encontrar tus documentos más importantes dentro del portal.</li> <li className="text-light font-sm mb-1">Cómo realizar una factura (CFDI).</li> <li className="text-light font-sm mb-1">¿Qué puedo deducir como freelancer?</li> <li className="text-light font-sm mb-1">¿Estoy declarando de manera correcta?</li> <li className="text-light font-sm mb-1">¿Qué se llena en una declaración y qué impuestos se presentan?</li> <li className="text-light font-sm mb-1">Sección de preguntas y consultas personales </li> </ul> <div className="bg-primary text-secondary p-3 font-xs me-5 mb-4"> <ul> <li>El taller se impartirá por Zoom y tendrá una duración de 2hrs</li> <li>La liga se te enviará una vez hecha tu compra</li> <li> Incluye certificado, material de apoyo, y la liga de la grabación por si no pudiste asistir. </li> </ul> </div> </div> <div className="col-md-6"> <p className="text-light text-primary"> <strong>Destinado a freelancers y emprendedores (Personas Físicas)...</strong> </p> <ul className="mb-4"> <li className="font-sm mb-1">Que llevan su propia contabilidad o quieren aprender a hacerlo.</li> <li className="font-sm mb-1">Apenas se incorporaron al SAT.</li> <li className="font-sm mb-1">Ya tienen contador/a pero quieren aprender a usar el portal.</li> </ul> <p className="text-light text-primary"> <strong>Requisitos *</strong> </p> <ul className="mb-2"> <li className="font-sm mb-1">Tener conocimientos básicos sobre el SAT (si no los tienes, puedes tomar nuestro curso grabado <a href="#">aquí</a></li> <li className="font-sm mb-1">Laptop</li> <li className="font-sm mb-1">E.firma y contraseña para acceder al SAT y/o tu RFC y contraseña.</li> </ul> <p className="text-primary mb-4 font-xs">*Si no cuentas con los requisitos podrás estar como espectador interactuando y tomando notas.</p> <div className="mb-4"> <h4> Costo: <span className="text-primary">$790</span> </h4> </div> <div className="d-flex flex-column flex-md-row"> <a className="btn btn-primary btn-lg mb-3 me-3" style={{ width: "100%" }} href="https://lahorasat.podia.com/ba5f7661-7174-48f0-af8a-c1352d91be2c" data-podia-embed="link" > Pagar en línea </a> <a className="btn btn-outline-primary btn-lg mb-3" style={{ width: "100%" }} target="_blank" href="https://wa.me/+525510722129" > Agendar por Whatsapp </a> </div> </div> </div> </> ); }; export default BannerTaller;
// Copyright (C) 2021 Igalia, S.L. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- esid: sec-temporal.plaindatetime.prototype.since description: The dateUntil() method on the calendar is called with a copy of the options bag features: [Temporal] ---*/ const originalOptions = { largestUnit: "year", shouldBeCopied: {}, }; let called = false; class Calendar extends Temporal.Calendar { constructor() { super("iso8601"); } dateUntil(d1, d2, options) { called = true; assert.notSameValue(options, originalOptions, "options bag should be a copy"); assert.sameValue(options.shouldBeCopied, originalOptions.shouldBeCopied, "options bag should be a shallow copy"); return new Temporal.Duration(); } } const calendar = new Calendar(); const earlier = new Temporal.PlainDateTime(2000, 5, 2, 12, 34, 56, 987, 654, 321, calendar); const later = new Temporal.PlainDateTime(2001, 6, 3, 13, 35, 57, 988, 655, 322, calendar); earlier.since(later, originalOptions); assert(called, "calendar.dateUntil must be called");
'use strict'; var http = require('http'); function IoResponse(respond) { this.respond = respond || function() {}; } IoResponse.prototype.json = function(body) { return this.respond(body); }; IoResponse.prototype.jsonp = IoResponse.prototype.json; IoResponse.prototype.send = IoResponse.prototype.json; IoResponse.prototype.end = IoResponse.prototype.json; IoResponse.prototype.status = function(statusCode) { return this; }; IoResponse.prototype.sendStatus = function(statusCode) { var body = http.STATUS_CODES[statusCode] || String(statusCode); return this.respond(body); }; module.exports = IoResponse;
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for the preactivation form of Residual Networks. Residual networks (ResNets) were originally proposed in: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 The full preactivation 'v2' ResNet variant implemented in this module was introduced by: [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 The key difference of the full preactivation 'v2' variant compared to the 'v1' variant in [1] is the use of batch normalization before every weight layer. Typical use: from tensorflow.contrib.slim.nets import resnet_v2 ResNet-101 for image classification into 1000 classes: # inputs has shape [batch, 224, 224, 3] with slim.arg_scope(resnet_v2.resnet_arg_scope()): net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False) ResNet-101 for semantic segmentation into 21 classes: # inputs has shape [batch, 513, 513, 3] with slim.arg_scope(resnet_v2.resnet_arg_scope(is_training)): net, end_points = resnet_v2.resnet_v2_101(inputs, 21, is_training=False, global_pool=False, output_stride=16) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets import resnet_utils slim = tf.contrib.slim resnet_arg_scope = resnet_utils.resnet_arg_scope @slim.add_arg_scope def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None): """Bottleneck residual unit variant with BN before convolutions. This is the full preactivation residual unit variant proposed in [2]. See Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck variant which has an extra bottleneck layer. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. Returns: The ResNet unit's output. """ with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact') if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None, scope='shortcut') residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3') output = shortcut + residual return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output) def resnet_v2(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope=None): """Generator for v2 (preactivation) ResNet models. This function generates a family of ResNet v2 models. See the resnet_v2_*() methods for specific model instantiations, obtained by selecting different block instantiations that produce ResNets of various depths. Training for image classification on Imagenet is usually done with [224, 224] inputs, resulting in [7, 7] feature maps at the output of the last ResNet block for the ResNets defined in [1] that have nominal stride equal to 32. However, for dense prediction tasks we advise that one uses inputs with spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In this case the feature maps at the ResNet output will have spatial shape [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] and corners exactly aligned with the input image corners, which greatly facilitates alignment of the features to the image. Using as input [225, 225] images results in [8, 8] feature maps at the output of the last ResNet block. For dense prediction tasks, the ResNet needs to run in fully-convolutional (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all have nominal stride equal to 32 and a good choice in FCN mode is to use output_stride=16 in order to increase the density of the computed features at small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. blocks: A list of length equal to the number of ResNet blocks. Each element is a resnet_utils.Block object describing the units in the block. num_classes: Number of predicted classes for classification tasks. If None we return the features before the logit layer. is_training: whether is training or not. global_pool: If True, we perform global average pooling before computing the logits. Set to True for image classification, False for dense prediction. output_stride: If None, then the output will be computed at the nominal network stride. If output_stride is not None, it specifies the requested ratio of input to output spatial resolution. include_root_block: If True, include the initial convolution followed by max-pooling, if False excludes it. If excluded, `inputs` should be the results of an activation-less convolution. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. To use this parameter, the input images must be smaller than 300x300 pixels, in which case the output logit layer does not contain spatial information and can be removed. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. If global_pool is False, then height_out and width_out are reduced by a factor of output_stride compared to the respective height_in and width_in, else both height_out and width_out equal one. If num_classes is None, then net is the output of the last ResNet block, potentially after global average pooling. If num_classes is not None, net contains the pre-softmax activations. end_points: A dictionary from components of the network to the corresponding activation. Raises: ValueError: If the target output_stride is not valid. """ with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc: end_points_collection = sc.name + '_end_points' with slim.arg_scope([slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection): with slim.arg_scope([slim.batch_norm], is_training=is_training): net = inputs if include_root_block: if output_stride is not None: if output_stride % 4 != 0: raise ValueError('The output_stride needs to be a multiple of 4.') output_stride /= 4 # We do not include batch normalization or activation functions in # conv1 because the first ResNet unit will perform these. Cf. # Appendix of [2]. with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) # This is needed because the pre-activation variant does not have batch # normalization or activation functions in the residual unit output. See # Appendix of [2]. net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm') if global_pool: # Global average pooling. net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) if num_classes is not None: net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits') if spatial_squeeze: net = tf.squeeze(net, [1, 2], name='SpatialSqueeze') # Convert end_points_collection into a dictionary of end_points. end_points = slim.utils.convert_collection_to_dict( end_points_collection) if num_classes is not None: end_points['predictions'] = slim.softmax(net, scope='predictions') return net, end_points resnet_v2.default_image_size = 224 def resnet_v2_block(scope, base_depth, num_units, stride): """Helper function for creating a resnet_v2 bottleneck block. Args: scope: The scope of the block. base_depth: The depth of the bottleneck layer for each unit. num_units: The number of units in the block. stride: The stride of the block, implemented as a stride in the last unit. All other units have stride=1. Returns: A resnet_v2 bottleneck block. """ return resnet_utils.Block(scope, bottleneck, [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': 1 }] * (num_units - 1) + [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': stride }]) resnet_v2.default_image_size = 224 def resnet_v2_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_50'): """ResNet-50 model of [1]. See resnet_v2() for arg and return description.""" blocks = [ resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=4, stride=2), resnet_v2_block('block3', base_depth=256, num_units=6, stride=2), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1), ] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) resnet_v2_50.default_image_size = resnet_v2.default_image_size def resnet_v2_101(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_101'): """ResNet-101 model of [1]. See resnet_v2() for arg and return description.""" blocks = [ resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=4, stride=2), resnet_v2_block('block3', base_depth=256, num_units=23, stride=2), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1), ] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) resnet_v2_101.default_image_size = resnet_v2.default_image_size def resnet_v2_152(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_152'): """ResNet-152 model of [1]. See resnet_v2() for arg and return description.""" blocks = [ resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=8, stride=2), resnet_v2_block('block3', base_depth=256, num_units=36, stride=2), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1), ] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) resnet_v2_152.default_image_size = resnet_v2.default_image_size def resnet_v2_200(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_200'): """ResNet-200 model of [2]. See resnet_v2() for arg and return description.""" blocks = [ resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=24, stride=2), resnet_v2_block('block3', base_depth=256, num_units=36, stride=2), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1), ] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) resnet_v2_200.default_image_size = resnet_v2.default_image_size
module.exports = function ({ config }) { config.module.rules.push({ test: /(\/|\\)stories(\/|\\).*\.tsx$/, loaders: [{ loader: require.resolve('@storybook/addon-storysource/loader'), options: { parser: 'typescript', prettierConfig: { printWidth: 120, singleQuote: false, }, } }], enforce: 'pre', }); return config; };
from marshmallow import fields, post_load, validate, validates from .base import BaseSchema class ItemSchema(BaseSchema): id = fields.Integer() user_id = fields.Integer() category_id = fields.Integer() name = fields.String(required=True, validate=validate.Length(min=1, max=100)) description = fields.String(required=True, validate=validate.Length(min=1, max=100)) @validates("name") def name_validator(self, value): self.ascii_validator(value) @validates("description") def description_validator(self, value): self.ascii_validator(value) @post_load def strip_data(self, data, **__): data["description"] = data["description"].strip() data["name"] = data["name"].strip() return data
from flask import Flask, render_template, request, make_response, g from redis import Redis import os import socket import random import json option_a = os.getenv('OPTION_A', "Apple") option_b = os.getenv('OPTION_B', "Samsung") hostname = socket.gethostname() version = 'v2' app = Flask(__name__) def get_redis(): if not hasattr(g, 'redis'): g.redis = Redis(host="redis", db=0, socket_timeout=5) return g.redis @app.route("/", methods=['POST','GET']) def hello(): voter_id = request.cookies.get('voter_id') if not voter_id: voter_id = hex(random.getrandbits(64))[2:-1] vote = None if request.method == 'POST': redis = get_redis() vote = request.form['vote'] data = json.dumps({'voter_id': voter_id, 'vote': vote}) redis.rpush('votes', data) resp = make_response(render_template( 'index.html', option_a=option_a, option_b=option_b, hostname=hostname, vote=vote, version=version, )) resp.set_cookie('voter_id', voter_id) return resp if __name__ == "__main__": app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
import sqlite3 from fuzzywuzzy import fuzz class SearchCursor: ''' To be used with SQLite ''' def __init__(self, db_name): self.db_name = db_name self.connection = None self.cursor = None @staticmethod def _similarityScore(s1, s2): return fuzz.token_set_ratio(s1, s2) def __enter__(self): self.connection = sqlite3.connect(self.db_name) self.connection.create_function("SIMILARITYSCORE", 2, self._similarityScore) self.cursor = self.connection.cursor() return self.cursor def __exit__(self, exc_type, exc_val, exc_tb): if self.cursor: self.cursor.close() if self.connection: self.connection.close()
import time import vivisect import vivisect.cli as viv_cli import vivisect.qt.main as viv_qt_main def remotemain(appsrv): # The "appsrv" is a remote workspace... vw = viv_cli.VivCli() vw.initWorkspaceClient(appsrv) # If we are interactive, lets turn on extended output... vw.verbose = True viv_qt_main.main(vw)
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['PrivateEndpointConnection'] warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:keyvault:PrivateEndpointConnection'.""", DeprecationWarning) class PrivateEndpointConnection(pulumi.CustomResource): warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:keyvault:PrivateEndpointConnection'.""", DeprecationWarning) def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, etag: Optional[pulumi.Input[str]] = None, private_endpoint_connection_name: Optional[pulumi.Input[str]] = None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, vault_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Private endpoint connection resource. Latest API Version: 2019-09-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] etag: Modified whenever there is a change in the state of private endpoint connection. :param pulumi.Input[str] private_endpoint_connection_name: Name of the private endpoint connection associated with the key vault. :param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: Approval state of the private link connection. :param pulumi.Input[str] resource_group_name: Name of the resource group that contains the key vault. :param pulumi.Input[str] vault_name: The name of the key vault. """ pulumi.log.warn("""PrivateEndpointConnection is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:keyvault:PrivateEndpointConnection'.""") if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['etag'] = etag __props__['private_endpoint_connection_name'] = private_endpoint_connection_name __props__['private_link_service_connection_state'] = private_link_service_connection_state if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name if vault_name is None and not opts.urn: raise TypeError("Missing required property 'vault_name'") __props__['vault_name'] = vault_name __props__['location'] = None __props__['name'] = None __props__['private_endpoint'] = None __props__['provisioning_state'] = None __props__['tags'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:keyvault/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:keyvault:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:keyvault:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:keyvault/v20180214:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:keyvault/v20180214:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:keyvault/v20190901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:keyvault/v20190901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:keyvault/v20200401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:keyvault/v20200401preview:PrivateEndpointConnection")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(PrivateEndpointConnection, __self__).__init__( 'azure-native:keyvault/latest:PrivateEndpointConnection', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection': """ Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["etag"] = None __props__["location"] = None __props__["name"] = None __props__["private_endpoint"] = None __props__["private_link_service_connection_state"] = None __props__["provisioning_state"] = None __props__["tags"] = None __props__["type"] = None return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: """ Modified whenever there is a change in the state of private endpoint connection. """ return pulumi.get(self, "etag") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Azure location of the key vault resource. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the key vault resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateEndpoint") def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]: """ Properties of the private endpoint object. """ return pulumi.get(self, "private_endpoint") @property @pulumi.getter(name="privateLinkServiceConnectionState") def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStateResponse']]: """ Approval state of the private link connection. """ return pulumi.get(self, "private_link_service_connection_state") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ Provisioning state of the private endpoint connection. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Mapping[str, str]]: """ Tags assigned to the key vault resource. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type of the key vault resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop