file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.js | var mainJs = function() {
if ( !Detector.webgl ) {
container = document.createElement( 'div' );
container =
$('<div class="addGetWebGLMessage">' +
'あなたのブラウザは最新ではありませんので、インターネット上にあるイケてる技術をみることはできません。<br>' +
'Your browser does not seem to support WebGL. Take a step to the future.' +
'<div>')
$('body').append(container);
return;
}
var container;
var camera, scene, renderer;
var group, text, plane;
var speed = 50;
var pointLight;
var targetRotation = 0;
var targetRotationOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
var delta = 1, clock = new THREE.Clock();
var circleShape, particleCloud, sparksEmitter, emitterPos;
var _rotation = 0;
var timeOnShapePath = 0;
var composer;
var effectBlurX, effectBlurY, hblur, vblur;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
// CAMERA
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 2000 );
camera.position.set( 0, 150, 400 );
// SCENE
scene = new THREE.Scene();
// LIGHTS
var directionalLight = new THREE.DirectionalLight( 0xffffff, 0.5 );
directionalLight.position.set( 0, -1, 1 );
directionalLight.position.normalize();
scene.add( directionalLight );
pointLight = new THREE.PointLight( 0xffffff, 2, 300 );
pointLight.position.set( 0, 0, 0 );
scene.add( pointLight );
group = new THREE.Group();
scene.add( group );
// Create particle objects for Three.js
var particlesLength = 70000;
var particles = new THREE.Geometry();
function newpos( x, y, z ) {
return new THREE.Vector3( x, y, z );
}
var Pool = {
__pools: [],
// Get a new Vector
get: function() {
if ( this.__pools.length > 0 ) {
return this.__pools.pop();
}
console.log( "pool ran out!" )
return location.reload();
},
// Release a vector back into the pool
add: function( v ) {
this.__pools.push( v );
}
};
for ( i = 0; i < particlesLength; i ++ ) {
particles.vertices.push( newpos( Math.random() * 200 - 100, Math.random() * 100 + 150, Math.random() * 50 ) );
Pool.add( i );
}
// Create pools of vectors
attributes = {
size: { type: 'f', value: [] },
pcolor: { type: 'c', value: [] }
};
var sprite = generateSprite() ;
texture = new THREE.Texture( sprite );
texture.needsUpdate = true;
uniforms = {
texture: { type: "t", value: texture }
};
// PARAMETERS
// Steadycounter
// Life
// Opacity
// Hue Speed
// Movement Speed
function generateSprite() {
var canvas = document.createElement( 'canvas' );
canvas.width = 128;
canvas.height = 128;
var context = canvas.getContext( '2d' );
// Just a square, doesnt work too bad with blur pp.
// context.fillStyle = "white";
// context.strokeStyle = "white";
// context.fillRect(0, 0, 63, 63) ;
// Heart Shapes are not too pretty here
// var x = 4, y = 0;
// context.save();
// context.scale(8, 8); // Scale so canvas render can redraw within bounds
// context.beginPath();
// context.bezierCurveTo( x + 2.5, y + 2.5, x + 2.0, y, x, y );
// context.bezierCurveTo( x - 3.0, y, x - 3.0, y + 3.5,x - 3.0,y + 3.5 );
// context.bezierCurveTo( x - 3.0, y + 5.5, x - 1.0, y + 7.7, x + 2.5, y + 9.5 );
// context.bezierCurveTo( x + 6.0, y + 7.7, x + 8.0, y + 5.5, x + 8.0, y + 3.5 );
// context.bezierCurveTo( x + 8.0, y + 3.5, x + 8.0, y, x + 5.0, y );
// context.bezierCurveTo( x + 3.5, y, x + 2.5, y + 2.5, x + 2.5, y + 2.5 );
context.beginPath();
context.arc( 64, 64, 60, 0, Math.PI * 2, false) ;
context.lineWidth = 0.5; //0.05
context.stroke();
context.restore();
var gradient = context.createRadialGradient( canvas.width / 2, canvas.height / 2, 0, canvas.width / 2, canvas.height / 2, canvas.width / 2 );
gradient.addColorStop( 0, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.2, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.4, 'rgba(200,200,200,1)' );
gradient.addColorStop( 1, 'rgba(0,0,0,1)' );
context.fillStyle = gradient;
context.fill();
return canvas;
}
var shaderMaterial = new THREE.ShaderMaterial( {
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById( 'vertexshader' ).textContent,
fragmentShader: document.getElementById( 'fragmentshader' ).textContent,
blending: THREE.AdditiveBlending,
depthWrite: false,
transparent: true
});
particleCloud = new THREE.PointCloud( particles, shaderMaterial );
var vertices = particleCloud.geometry.vertices;
var values_size = attributes.size.value;
var values_color = attributes.pcolor.value;
for( var v = 0; v < vertices.length; v ++ ) {
values_size[ v ] = 50;
values_color[ v ] = new THREE.Color( 0x000000 );
particles.vertices[ v ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
}
group.add( particleCloud );
particleCloud.y = 800;
// Create Particle Systems
// EMITTER STUFF
// circleShape
var x = 0, y = 0;
var circleRadius = 40;
circleShape = new THREE.Shape();
for (var i = 0; i < 16; i++) {
var pct = (i + 1) / 16;
var theta = pct * Math.PI * 2.0;
var x = circleRadius * Math.cos(theta) + 20;
var y = circleRadius * Math.sin(theta) + 50;
if (i == 0) {
circleShape.moveTo(x, y);
} else {
circleShape.lineTo(x, y);
}
}
var hue = 0;
var lightness = 0;
var setTargetParticle = function() {
var target = Pool.get();
values_size[ target ] = Math.random() * 200 + 100;
return target;
};
var onParticleCreated = function( p ) {
var position = p.position;
p.target.position = position;
var target = p.target;
if ( target ) {
hue += 0.0003 * delta;
if ( hue > 0.1 ) hue -= 0.1;
lightness += 0.0003 * delta;
if ( lightness > 0.05 ) lightness -= 0.05;
// TODO Create a PointOnShape Action/Zone in the particle engine
timeOnShapePath += 0.00035 * delta;
if ( timeOnShapePath > 1 ) timeOnShapePath -= 1;
var pointOnShape = circleShape.getPointAt( timeOnShapePath );
if (!emitterpos) emitterpos = new THREE.Vector3( 0, 0, 0 );
if (!pointOnShape) pointOnShape = circleShape.getPointAt( 0.00035 * delta );
emitterpos.x = pointOnShape.x * 5 - 100;
emitterpos.y = -pointOnShape.y * 5 + 400;
pointLight.position.x = emitterpos.x;
pointLight.position.y = emitterpos.y;
pointLight.position.z = 100;
particles.vertices[ target ] = p.position;
values_color[ target ].setHSL( hue, 0.6, lightness );
pointLight.color.setHSL( hue, 0.6, lightness );
};
};
var onParticleDead = function( particle ) {
var target = particle.target;
if ( target ) {
// Hide the particle
values_color[ target ].setRGB( 0, 0, 0 );
particles.vertices[ target ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
// Mark particle system as available by returning to pool
Pool.add( particle.target );
}
};
var engineLoopUpdate = function() {
};
sparksEmitter = new SPARKS.Emitter( new SPARKS.SteadyCounter( 800 ) );
emitterpos = new THREE.Vector3( 0, 0, 0 );
sparksEmitter.addInitializer( new SPARKS.Position( new SPARKS.PointZone( emitterpos ) ) );
sparksEmitter.addInitializer( new SPARKS.Lifetime( 1, 15 ));
sparksEmitter.addInitializer( new SPARKS.Target( null, setTargetParticle ) );
sparksEmitter.addInitializer( new SPARKS.Velocity( new SPARKS.PointZone( new THREE.Vector3( 0, -5, 1 ) ) ) );
sparksEmitter.addAction( new SPARKS.Age() );
sparksEmitter.addAction( new SPARKS.Accelerate( 0, 0, -50 ) );
sparksEmitter.addAction( new SPARKS.Move() );
sparksEmitter.addAction( new SPARKS.RandomDrift( 20, 100, 2000 ) );
sparksEmitter.addCallback( "created", onParticleCreated );
sparksEmitter.addCallback( "dead", onParticleDead );
sparksEmitter.start();
// End Particles
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
// POST PROCESSING
var effectFocus = new THREE.ShaderPass( THREE.FocusShader );
var effectCopy = new THREE.ShaderPass( THREE.CopyShader );
effectFilm = new THREE.FilmPass( 0.5, 0.25, 2048, false );
var shaderBlur = THREE.TriangleBlurShader;
effectBlurX = new THREE.ShaderPass( shaderBlur, 'texture' );
effectBlurY = new THREE.ShaderPass( shaderBlur, 'texture' );
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
hblur = new THREE.ShaderPass( THREE.HorizontalBlurShader );
vblur = new THREE.ShaderPass( THREE.VerticalBlurShader);
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
effectFocus.uniforms[ 'sampleDistance' ].value = 0.99; //0.94
effectFocus.uniforms[ 'waveFactor' ].value = 0.003; //0.00125
var renderScene = new THREE.RenderPass( scene, camera );
composer = new THREE.EffectComposer( renderer );
composer.addPass( renderScene );
composer.addPass( hblur );
composer.addPass( vblur );
vblur.renderToScreen = true;
effectBlurY.renderToScreen = true;
effectFocus.renderToScreen = true;
effectCopy.renderToScreen = true;
effectFilm.renderToScreen = true;
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
document.addEventListener( 'touchmove', onDocumentTouchMove, false );
document.addEventListener( 'mousemove', onDocumentMouseMove, false );
//
window.addEventListener( 'resize', onWindowResize, false );
}
function onWindowResize() {
windowHalfX = window.innerWidth / 2;
windowHalfY = window.innerHeight / 2;
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
//
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
composer.reset();
}
function onDocumentMouseDown( event ) {
event.preventDefault();
mouseXOnMouseDown = event.clientX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
if ( sparksEmitter.isRunning() ) {
sparksEmitter.stop();
} else {
sparksEmitter.start();
}
}
function onDocumentMouseMove( event ) {
mouseX = event.clientX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mou | reventDefault();
mouseXOnMouseDown = event.touches[ 0 ].pageX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
}
}
function onDocumentTouchMove( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseX = event.touches[ 0 ].pageX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.05;
}
}
//
function animate() {
requestAnimationFrame( animate );
render();
}
function render() {
delta = speed * clock.getDelta();
particleCloud.geometry.verticesNeedUpdate = true;
attributes.size.needsUpdate = true;
attributes.pcolor.needsUpdate = true;
// Pretty cool effect if you enable this
// particleCloud.rotation.y += 0.05;
group.rotation.y += ( targetRotation - group.rotation.y ) * 0.05;
renderer.clear();
// renderer.render( scene, camera );
composer.render( 0.1 );
}
}
try {
mainJs();
} catch(e) {
location.reload();
}
| seX - mouseXOnMouseDown ) * 0.02;
}
function onDocumentTouchStart( event ) {
if ( event.touches.length === 1 ) {
event.p | identifier_body |
main.js | var mainJs = function() {
if ( !Detector.webgl ) {
container = document.createElement( 'div' );
container =
$('<div class="addGetWebGLMessage">' +
'あなたのブラウザは最新ではありませんので、インターネット上にあるイケてる技術をみることはできません。<br>' +
'Your browser does not seem to support WebGL. Take a step to the future.' +
'<div>')
$('body').append(container);
return;
}
var container;
var camera, scene, renderer;
var group, text, plane;
var speed = 50;
var pointLight;
var targetRotation = 0;
var targetRotationOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
var delta = 1, clock = new THREE.Clock();
var circleShape, particleCloud, sparksEmitter, emitterPos;
var _rotation = 0;
var timeOnShapePath = 0;
var composer;
var effectBlurX, effectBlurY, hblur, vblur;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
// CAMERA
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 2000 );
camera.position.set( 0, 150, 400 );
// SCENE
scene = new THREE.Scene();
// LIGHTS
var directionalLight = new THREE.DirectionalLight( 0xffffff, 0.5 );
directionalLight.position.set( 0, -1, 1 );
directionalLight.position.normalize();
scene.add( directionalLight );
pointLight = new THREE.PointLight( 0xffffff, 2, 300 );
pointLight.position.set( 0, 0, 0 );
scene.add( pointLight );
group = new THREE.Group();
scene.add( group );
// Create particle objects for Three.js
var particlesLength = 70000;
var particles = new THREE.Geometry();
function newpos( x, y, z ) {
return new THREE.Vector3( x, y, z );
}
var Pool = {
__pools: [],
// Get a new Vector
get: function() {
if ( this.__pools.length > 0 ) {
return this.__pools.pop();
}
console.log( "pool ran out!" )
return location.reload();
},
// Release a vector back into the pool
add: function( v ) {
this.__pools.push( v );
}
};
for ( i = 0; i < particlesLength; i ++ ) {
particles.vertices.push( newpos( Math.random() * 200 - 100, Math.random() * 100 + 150, Math.random() * 50 ) );
Pool.add( i );
}
// Create pools of vectors
attributes = {
size: { type: 'f', value: [] },
pcolor: { type: 'c', value: [] }
};
var sprite = generateSprite() ;
texture = new THREE.Texture( sprite );
texture.needsUpdate = true;
uniforms = {
texture: { type: "t", value: texture }
};
// PARAMETERS
// Steadycounter
// Life
// Opacity
// Hue Speed
// Movement Speed
function generateSprite() {
var canvas = document.createElement( 'canvas' );
canvas.width = 128;
canvas.height = 128;
var context = canvas.getContext( '2d' );
// Just a square, doesnt work too bad with blur pp.
// context.fillStyle = "white";
// context.strokeStyle = "white";
// context.fillRect(0, 0, 63, 63) ;
// Heart Shapes are not too pretty here
// var x = 4, y = 0;
// context.save();
// context.scale(8, 8); // Scale so canvas render can redraw within bounds
// context.beginPath();
// context.bezierCurveTo( x + 2.5, y + 2.5, x + 2.0, y, x, y );
// context.bezierCurveTo( x - 3.0, y, x - 3.0, y + 3.5,x - 3.0,y + 3.5 );
// context.bezierCurveTo( x - 3.0, y + 5.5, x - 1.0, y + 7.7, x + 2.5, y + 9.5 );
// context.bezierCurveTo( x + 6.0, y + 7.7, x + 8.0, y + 5.5, x + 8.0, y + 3.5 );
// context.bezierCurveTo( x + 8.0, y + 3.5, x + 8.0, y, x + 5.0, y );
// context.bezierCurveTo( x + 3.5, y, x + 2.5, y + 2.5, x + 2.5, y + 2.5 );
context.beginPath();
context.arc( 64, 64, 60, 0, Math.PI * 2, false) ;
context.lineWidth = 0.5; //0.05
context.stroke();
context.restore();
var gradient = context.createRadialGradient( canvas.width / 2, canvas.height / 2, 0, canvas.width / 2, canvas.height / 2, canvas.width / 2 );
gradient.addColorStop( 0, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.2, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.4, 'rgba(200,200,200,1)' );
gradient.addColorStop( 1, 'rgba(0,0,0,1)' );
context.fillStyle = gradient;
context.fill();
return canvas;
}
var shaderMaterial = new THREE.ShaderMaterial( {
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById( 'vertexshader' ).textContent,
fragmentShader: document.getElementById( 'fragmentshader' ).textContent,
blending: THREE.AdditiveBlending,
depthWrite: false,
transparent: true
});
particleCloud = new THREE.PointCloud( particles, shaderMaterial );
var vertices = particleCloud.geometry.vertices;
var values_size = attributes.size.value;
var values_color = attributes.pcolor.value;
for( var v = 0; v < vertices.length; v ++ ) {
values_size[ v ] = 50;
values_color[ v ] = new THREE.Color( 0x000000 );
particles.vertices[ v ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
}
group.add( particleCloud );
particleCloud.y = 800;
// Create Particle Systems
// EMITTER STUFF
// circleShape
var x = 0, y = 0;
var circleRadius = 40;
circleShape = new THREE.Shape();
for (var i = 0; i < 16; i++) {
var pct = (i + 1) / 16;
var theta = pct * Math.PI * 2.0;
var x = circleRadius * Math.cos(theta) + 20;
var y = circleRadius * Math.sin(theta) + 50;
if (i == 0) {
circleShape.moveTo(x, y);
} else {
circleShape.lineTo(x, y);
}
}
var hue = 0;
var lightness = 0;
var setTargetParticle = function() {
var target = Pool.get();
values_size[ target ] = Math.random() * 200 + 100;
return target;
};
var onParticleCreated = function( p ) {
var position = p.position;
p.target.position = position;
var target = p.target;
if ( target ) { |
lightness += 0.0003 * delta;
if ( lightness > 0.05 ) lightness -= 0.05;
// TODO Create a PointOnShape Action/Zone in the particle engine
timeOnShapePath += 0.00035 * delta;
if ( timeOnShapePath > 1 ) timeOnShapePath -= 1;
var pointOnShape = circleShape.getPointAt( timeOnShapePath );
if (!emitterpos) emitterpos = new THREE.Vector3( 0, 0, 0 );
if (!pointOnShape) pointOnShape = circleShape.getPointAt( 0.00035 * delta );
emitterpos.x = pointOnShape.x * 5 - 100;
emitterpos.y = -pointOnShape.y * 5 + 400;
pointLight.position.x = emitterpos.x;
pointLight.position.y = emitterpos.y;
pointLight.position.z = 100;
particles.vertices[ target ] = p.position;
values_color[ target ].setHSL( hue, 0.6, lightness );
pointLight.color.setHSL( hue, 0.6, lightness );
};
};
var onParticleDead = function( particle ) {
var target = particle.target;
if ( target ) {
// Hide the particle
values_color[ target ].setRGB( 0, 0, 0 );
particles.vertices[ target ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
// Mark particle system as available by returning to pool
Pool.add( particle.target );
}
};
var engineLoopUpdate = function() {
};
sparksEmitter = new SPARKS.Emitter( new SPARKS.SteadyCounter( 800 ) );
emitterpos = new THREE.Vector3( 0, 0, 0 );
sparksEmitter.addInitializer( new SPARKS.Position( new SPARKS.PointZone( emitterpos ) ) );
sparksEmitter.addInitializer( new SPARKS.Lifetime( 1, 15 ));
sparksEmitter.addInitializer( new SPARKS.Target( null, setTargetParticle ) );
sparksEmitter.addInitializer( new SPARKS.Velocity( new SPARKS.PointZone( new THREE.Vector3( 0, -5, 1 ) ) ) );
sparksEmitter.addAction( new SPARKS.Age() );
sparksEmitter.addAction( new SPARKS.Accelerate( 0, 0, -50 ) );
sparksEmitter.addAction( new SPARKS.Move() );
sparksEmitter.addAction( new SPARKS.RandomDrift( 20, 100, 2000 ) );
sparksEmitter.addCallback( "created", onParticleCreated );
sparksEmitter.addCallback( "dead", onParticleDead );
sparksEmitter.start();
// End Particles
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
// POST PROCESSING
var effectFocus = new THREE.ShaderPass( THREE.FocusShader );
var effectCopy = new THREE.ShaderPass( THREE.CopyShader );
effectFilm = new THREE.FilmPass( 0.5, 0.25, 2048, false );
var shaderBlur = THREE.TriangleBlurShader;
effectBlurX = new THREE.ShaderPass( shaderBlur, 'texture' );
effectBlurY = new THREE.ShaderPass( shaderBlur, 'texture' );
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
hblur = new THREE.ShaderPass( THREE.HorizontalBlurShader );
vblur = new THREE.ShaderPass( THREE.VerticalBlurShader);
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
effectFocus.uniforms[ 'sampleDistance' ].value = 0.99; //0.94
effectFocus.uniforms[ 'waveFactor' ].value = 0.003; //0.00125
var renderScene = new THREE.RenderPass( scene, camera );
composer = new THREE.EffectComposer( renderer );
composer.addPass( renderScene );
composer.addPass( hblur );
composer.addPass( vblur );
vblur.renderToScreen = true;
effectBlurY.renderToScreen = true;
effectFocus.renderToScreen = true;
effectCopy.renderToScreen = true;
effectFilm.renderToScreen = true;
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
document.addEventListener( 'touchmove', onDocumentTouchMove, false );
document.addEventListener( 'mousemove', onDocumentMouseMove, false );
//
window.addEventListener( 'resize', onWindowResize, false );
}
function onWindowResize() {
windowHalfX = window.innerWidth / 2;
windowHalfY = window.innerHeight / 2;
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
//
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
composer.reset();
}
function onDocumentMouseDown( event ) {
event.preventDefault();
mouseXOnMouseDown = event.clientX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
if ( sparksEmitter.isRunning() ) {
sparksEmitter.stop();
} else {
sparksEmitter.start();
}
}
function onDocumentMouseMove( event ) {
mouseX = event.clientX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.02;
}
function onDocumentTouchStart( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseXOnMouseDown = event.touches[ 0 ].pageX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
}
}
function onDocumentTouchMove( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseX = event.touches[ 0 ].pageX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.05;
}
}
//
function animate() {
requestAnimationFrame( animate );
render();
}
function render() {
delta = speed * clock.getDelta();
particleCloud.geometry.verticesNeedUpdate = true;
attributes.size.needsUpdate = true;
attributes.pcolor.needsUpdate = true;
// Pretty cool effect if you enable this
// particleCloud.rotation.y += 0.05;
group.rotation.y += ( targetRotation - group.rotation.y ) * 0.05;
renderer.clear();
// renderer.render( scene, camera );
composer.render( 0.1 );
}
}
try {
mainJs();
} catch(e) {
location.reload();
} |
hue += 0.0003 * delta;
if ( hue > 0.1 ) hue -= 0.1; | random_line_split |
main.js | var mainJs = function() {
if ( !Detector.webgl ) {
container = document.createElement( 'div' );
container =
$('<div class="addGetWebGLMessage">' +
'あなたのブラウザは最新ではありませんので、インターネット上にあるイケてる技術をみることはできません。<br>' +
'Your browser does not seem to support WebGL. Take a step to the future.' +
'<div>')
$('body').append(container);
return;
}
var container;
var camera, scene, renderer;
var group, text, plane;
var speed = 50;
var pointLight;
var targetRotation = 0;
var targetRotationOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
var delta = 1, clock = new THREE.Clock();
var circleShape, particleCloud, sparksEmitter, emitterPos;
var _rotation = 0;
var timeOnShapePath = 0;
var composer;
var effectBlurX, effectBlurY, hblur, vblur;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
// CAMERA
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 2000 );
camera.position.set( 0, 150, 400 );
// SCENE
scene = new THREE.Scene();
// LIGHTS
var directionalLight = new THREE.DirectionalLight( 0xffffff, 0.5 );
directionalLight.position.set( 0, -1, 1 );
directionalLight.position.normalize();
scene.add( directionalLight );
pointLight = new THREE.PointLight( 0xffffff, 2, 300 );
pointLight.position.set( 0, 0, 0 );
scene.add( pointLight );
group = new THREE.Group();
scene.add( group );
// Create particle objects for Three.js
var particlesLength = 70000;
var particles = new THREE.Geometry();
function newpos( x, y, z ) {
return new THREE.Vector3( x, y, z );
}
var Pool = {
__pools: [],
// Get a new Vector
get: function() {
if ( this.__pools.length > 0 ) {
return this.__pools.pop();
}
console.log( "pool ran out!" )
return location.reload();
},
// Release a vector back into the pool
add: function( v ) {
this.__pools.push( v );
}
};
for ( i = 0; i < particlesLength; i ++ ) {
particles.vertices.push( newpos( Math.random() * 200 - 100, Math.random() * 100 + 150, Math.random() * 50 ) );
Pool.add( i );
}
// Create pools of vectors
attributes = {
size: { type: 'f', value: [] },
pcolor: { type: 'c', value: [] }
};
var sprite = generateSprite() ;
texture = new THREE.Texture( sprite );
texture.needsUpdate = true;
uniforms = {
texture: { type: "t", value: texture }
};
// PARAMETERS
// Steadycounter
// Life
// Opacity
// Hue Speed
// Movement Speed
function generateSprite() {
var canvas = document.createElement( 'canvas' );
canvas.width = 128;
canvas.height = 128;
var context = canvas.getContext( '2d' );
// Just a square, doesnt work too bad with blur pp.
// context.fillStyle = "white";
// context.strokeStyle = "white";
// context.fillRect(0, 0, 63, 63) ;
// Heart Shapes are not too pretty here
// var x = 4, y = 0;
// context.save();
// context.scale(8, 8); // Scale so canvas render can redraw within bounds
// context.beginPath();
// context.bezierCurveTo( x + 2.5, y + 2.5, x + 2.0, y, x, y );
// context.bezierCurveTo( x - 3.0, y, x - 3.0, y + 3.5,x - 3.0,y + 3.5 );
// context.bezierCurveTo( x - 3.0, y + 5.5, x - 1.0, y + 7.7, x + 2.5, y + 9.5 );
// context.bezierCurveTo( x + 6.0, y + 7.7, x + 8.0, y + 5.5, x + 8.0, y + 3.5 );
// context.bezierCurveTo( x + 8.0, y + 3.5, x + 8.0, y, x + 5.0, y );
// context.bezierCurveTo( x + 3.5, y, x + 2.5, y + 2.5, x + 2.5, y + 2.5 );
context.beginPath();
context.arc( 64, 64, 60, 0, Math.PI * 2, false) ;
context.lineWidth = 0.5; //0.05
context.stroke();
context.restore();
var gradient = context.createRadialGradient( canvas.width / 2, canvas.height / 2, 0, canvas.width / 2, canvas.height / 2, canvas.width / 2 );
gradient.addColorStop( 0, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.2, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.4, 'rgba(200,200,200,1)' );
gradient.addColorStop( 1, 'rgba(0,0,0,1)' );
context.fillStyle = gradient;
context.fill();
return canvas;
}
var shaderMaterial = new THREE.ShaderMaterial( {
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById( 'vertexshader' ).textContent,
fragmentShader: document.getElementById( 'fragmentshader' ).textContent,
blending: THREE.AdditiveBlending,
depthWrite: false,
transparent: true
});
particleCloud = new THREE.PointCloud( particles, shaderMaterial );
var vertices = particleCloud.geometry.vertices;
var values_size = attributes.size.value;
var values_color = attributes.pcolor.value;
for( var v = 0; v < vertices.length; v ++ ) {
values_size[ v ] = 50;
values_color[ v ] = new THREE.Color( 0x000000 );
particles.vertices[ v ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
}
group.add( particleCloud );
particleCloud.y = 800;
// Create Particle Systems
// EMITTER STUFF
// circleShape
var x = 0, y = 0;
var circleRadius = 40;
circleShape = new THREE.Shape();
for (var i = 0; i < 16; i++) {
var pct = (i + 1) / 16;
var theta = pct * Math.PI * 2.0;
var x = circleRadius * Math.cos(theta) + 20;
var y = circleRadius * Math.sin(theta) + 50;
if (i == 0) {
circleShape.moveTo(x, y);
} else {
circleShape.lineTo(x, y);
}
}
var hue = 0;
var lightness = 0;
var setTargetParticle = function() {
var target = Pool.get();
values_size[ target ] = Math.random() * 200 + 100;
return target;
};
var onParticleCreated = function( p ) {
var position = p.position;
p.target.position = position;
var target = p.target;
if ( target ) {
hue += 0.0003 * delta;
if ( hue > 0.1 ) hue -= 0.1;
lightness += 0.0003 * delta;
if ( lightness > 0.05 ) lightness -= 0.05;
// TODO Create a PointOnShape Action/Zone in the particle engine
timeOnShapePath += 0.00035 * delta;
if ( timeOnShapePath > 1 ) timeOnShapePath -= 1;
var pointOnShape = circleShape.getPointAt( timeOnShapePath );
if (!emitterpos) emitterpos = new THREE.Vector3( 0, 0, 0 );
if (!pointOnShape) pointOnShape = circleShape.getPointAt( 0.00035 * delta );
emitterpos.x = pointOnShape.x * 5 - 100;
emitterpos.y = -pointOnShape.y * 5 + 400;
pointLight.position.x = emitterpos.x;
pointLight.position.y = emitterpos.y;
pointLight.position.z = 100;
particles.vertices[ target ] = p.position;
values_color[ target ].setHSL( hue, 0.6, lightness );
pointLight.color.setHSL( hue, 0.6, lightness );
};
};
var onParticleDead = function( particle ) {
var target = particle.target;
if ( target ) {
// Hide the particle
values_color[ target ].setRGB( 0, 0, 0 );
particles.vertices[ target ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
// Mark particle system as available by returning to pool
Pool.add( particle.target );
}
};
var engineLoopUpdate = function() {
};
sparksEmitter = new SPARKS.Emitter( new SPARKS.SteadyCounter( 800 ) );
emitterpos = new THREE.Vector3( 0, 0, 0 );
sparksEmitter.addInitializer( new SPARKS.Position( new SPARKS.PointZone( emitterpos ) ) );
sparksEmitter.addInitializer( new SPARKS.Lifetime( 1, 15 ));
sparksEmitter.addInitializer( new SPARKS.Target( null, setTargetParticle ) );
sparksEmitter.addInitializer( new SPARKS.Velocity( new SPARKS.PointZone( new THREE.Vector3( 0, -5, 1 ) ) ) );
sparksEmitter.addAction( new SPARKS.Age() );
sparksEmitter.addAction( new SPARKS.Accelerate( 0, 0, -50 ) );
sparksEmitter.addAction( new SPARKS.Move() );
sparksEmitter.addAction( new SPARKS.RandomDrift( 20, 100, 2000 ) );
sparksEmitter.addCallback( "created", onParticleCreated );
sparksEmitter.addCallback( "dead", onParticleDead );
sparksEmitter.start();
// End Particles
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
// POST PROCESSING
var effectFocus = new THREE.ShaderPass( THREE.FocusShader );
var effectCopy = new THREE.ShaderPass( THREE.CopyShader );
effectFilm = new THREE.FilmPass( 0.5, 0.25, 2048, false );
var shaderBlur = THREE.TriangleBlurShader;
effectBlurX = new THREE.ShaderPass( shaderBlur, 'texture' );
effectBlurY = new THREE.ShaderPass( shaderBlur, 'texture' );
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
hblur = new THREE.ShaderPass( THREE.HorizontalBlurShader );
vblur = new THREE.ShaderPass( THREE.VerticalBlurShader);
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
effectFocus.uniforms[ 'sampleDistance' ].value = 0.99; //0.94
effectFocus.uniforms[ 'waveFactor' ].value = 0.003; //0.00125
var renderScene = new THREE.RenderPass( scene, camera );
composer = new THREE.EffectComposer( renderer );
composer.addPass( renderScene );
composer.addPass( hblur );
composer.addPass( vblur );
vblur.renderToScreen = true;
effectBlurY.renderToScreen = true;
effectFocus.renderToScreen = true;
effectCopy.renderToScreen = true;
effectFilm.renderToScreen = true;
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
document.addEventListener( 'touchmove', onDocumentTouchMove, false );
document.addEventListener( 'mousemove', onDocumentMouseMove, false );
//
window.addEventListener( 'resize', onWindowResize, false );
}
function onWindowResize() {
windowHalfX = window.innerWidth / 2;
windowHalfY = window.innerHeight / 2;
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
//
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
composer.reset();
}
function onDocumentMouseDown( event ) {
event.preventDefault();
mouseXOnMouseDown = event.clientX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
if ( sparksEmitter.isRunning() ) {
sparksEmitter.stop();
} else {
sparksEmitter.start();
}
}
function onDocumentMouseMove( event ) {
mouseX = event.clientX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.02;
}
function onDocumentTouchStart( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseXOnMouseDown = event.touches[ 0 ].pageX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
}
}
function onDocumentTouchMove( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseX = event.touches[ 0 ].pageX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.05;
}
}
//
function animate() {
requestAnimationFrame( animate );
render();
}
function render() {
| = speed * clock.getDelta();
particleCloud.geometry.verticesNeedUpdate = true;
attributes.size.needsUpdate = true;
attributes.pcolor.needsUpdate = true;
// Pretty cool effect if you enable this
// particleCloud.rotation.y += 0.05;
group.rotation.y += ( targetRotation - group.rotation.y ) * 0.05;
renderer.clear();
// renderer.render( scene, camera );
composer.render( 0.1 );
}
}
try {
mainJs();
} catch(e) {
location.reload();
}
| delta | identifier_name |
main.js | var mainJs = function() {
if ( !Detector.webgl ) {
container = document.createElement( 'div' );
container =
$('<div class="addGetWebGLMessage">' +
'あなたのブラウザは最新ではありませんので、インターネット上にあるイケてる技術をみることはできません。<br>' +
'Your browser does not seem to support WebGL. Take a step to the future.' +
'<div>')
$('body').append(container);
return;
}
var container;
var camera, scene, renderer;
var group, text, plane;
var speed = 50;
var pointLight;
var targetRotation = 0;
var targetRotationOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
var delta = 1, clock = new THREE.Clock();
var circleShape, particleCloud, sparksEmitter, emitterPos;
var _rotation = 0;
var timeOnShapePath = 0;
var composer;
var effectBlurX, effectBlurY, hblur, vblur;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
// CAMERA
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 2000 );
camera.position.set( 0, 150, 400 );
// SCENE
scene = new THREE.Scene();
// LIGHTS
var directionalLight = new THREE.DirectionalLight( 0xffffff, 0.5 );
directionalLight.position.set( 0, -1, 1 );
directionalLight.position.normalize();
scene.add( directionalLight );
pointLight = new THREE.PointLight( 0xffffff, 2, 300 );
pointLight.position.set( 0, 0, 0 );
scene.add( pointLight );
group = new THREE.Group();
scene.add( group );
// Create particle objects for Three.js
var particlesLength = 70000;
var particles = new THREE.Geometry();
function newpos( x, y, z ) {
return new THREE.Vector3( x, y, z );
}
var Pool = {
__pools: [],
// Get a new Vector
get: function() {
if ( this.__pools.length > 0 ) {
return this.__pools.pop();
}
console.log( "pool ran out!" )
return location.reload();
},
// Release a vector back into the pool
add: function( v ) {
this.__pools.push( v );
}
};
for ( i = 0; i < particlesLength; i ++ ) {
particles.vertices.push( newpos( Math.random() * 200 - 100, Math.random() * 100 + 150, Math.random() * 50 ) );
Pool.add( i );
}
// Create pools of vectors
attributes = {
size: { type: 'f', value: [] },
pcolor: { type: 'c', value: [] }
};
var sprite = generateSprite() ;
texture = new THREE.Texture( sprite );
texture.needsUpdate = true;
uniforms = {
texture: { type: "t", value: texture }
};
// PARAMETERS
// Steadycounter
// Life
// Opacity
// Hue Speed
// Movement Speed
function generateSprite() {
var canvas = document.createElement( 'canvas' );
canvas.width = 128;
canvas.height = 128;
var context = canvas.getContext( '2d' );
// Just a square, doesnt work too bad with blur pp.
// context.fillStyle = "white";
// context.strokeStyle = "white";
// context.fillRect(0, 0, 63, 63) ;
// Heart Shapes are not too pretty here
// var x = 4, y = 0;
// context.save();
// context.scale(8, 8); // Scale so canvas render can redraw within bounds
// context.beginPath();
// context.bezierCurveTo( x + 2.5, y + 2.5, x + 2.0, y, x, y );
// context.bezierCurveTo( x - 3.0, y, x - 3.0, y + 3.5,x - 3.0,y + 3.5 );
// context.bezierCurveTo( x - 3.0, y + 5.5, x - 1.0, y + 7.7, x + 2.5, y + 9.5 );
// context.bezierCurveTo( x + 6.0, y + 7.7, x + 8.0, y + 5.5, x + 8.0, y + 3.5 );
// context.bezierCurveTo( x + 8.0, y + 3.5, x + 8.0, y, x + 5.0, y );
// context.bezierCurveTo( x + 3.5, y, x + 2.5, y + 2.5, x + 2.5, y + 2.5 );
context.beginPath();
context.arc( 64, 64, 60, 0, Math.PI * 2, false) ;
context.lineWidth = 0.5; //0.05
context.stroke();
context.restore();
var gradient = context.createRadialGradient( canvas.width / 2, canvas.height / 2, 0, canvas.width / 2, canvas.height / 2, canvas.width / 2 );
gradient.addColorStop( 0, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.2, 'rgba(255,255,255,1)' );
gradient.addColorStop( 0.4, 'rgba(200,200,200,1)' );
gradient.addColorStop( 1, 'rgba(0,0,0,1)' );
context.fillStyle = gradient;
context.fill();
return canvas;
}
var shaderMaterial = new THREE.ShaderMaterial( {
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById( 'vertexshader' ).textContent,
fragmentShader: document.getElementById( 'fragmentshader' ).textContent,
blending: THREE.AdditiveBlending,
depthWrite: false,
transparent: true
});
particleCloud = new THREE.PointCloud( particles, shaderMaterial );
var vertices = particleCloud.geometry.vertices;
var values_size = attributes.size.value;
var values_color = attributes.pcolor.value;
for( var v = 0; v < vertices.length; v ++ ) {
values_size[ v ] = 50;
values_color[ v ] = new THREE.Color( 0x000000 );
particles.vertices[ v ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
}
group.add( particleCloud );
particleCloud.y = 800;
// Create Particle Systems
// EMITTER STUFF
// circleShape
var x = 0, y = 0;
var circleRadius = 40;
circleShape = new THREE.Shape();
for (var i = 0; i < 16; i++) {
var pct = (i + 1) / 16;
var theta = pct * Math.PI * 2.0;
var x = circleRadius * Math.cos(theta) + 20;
var y = circleRadius * Math.sin(theta) + 50;
if (i == 0) {
circleShape.moveTo(x, y);
} else {
circleShape.lineTo(x, y);
}
}
var hue = 0;
var lightness = 0;
var setTargetParticle = function() {
var target = Pool.get();
values_size[ target ] = Math.random() * 200 + 100;
return target;
};
var onParticleCreated = function( p ) {
var position = p.position;
p.target.position = position;
var target = p.target;
if ( target ) {
hue += 0.0003 * delta;
if ( hue > 0.1 ) hue -= 0.1;
lightness += 0.0003 | if ( target ) {
// Hide the particle
values_color[ target ].setRGB( 0, 0, 0 );
particles.vertices[ target ].set( Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY );
// Mark particle system as available by returning to pool
Pool.add( particle.target );
}
};
var engineLoopUpdate = function() {
};
sparksEmitter = new SPARKS.Emitter( new SPARKS.SteadyCounter( 800 ) );
emitterpos = new THREE.Vector3( 0, 0, 0 );
sparksEmitter.addInitializer( new SPARKS.Position( new SPARKS.PointZone( emitterpos ) ) );
sparksEmitter.addInitializer( new SPARKS.Lifetime( 1, 15 ));
sparksEmitter.addInitializer( new SPARKS.Target( null, setTargetParticle ) );
sparksEmitter.addInitializer( new SPARKS.Velocity( new SPARKS.PointZone( new THREE.Vector3( 0, -5, 1 ) ) ) );
sparksEmitter.addAction( new SPARKS.Age() );
sparksEmitter.addAction( new SPARKS.Accelerate( 0, 0, -50 ) );
sparksEmitter.addAction( new SPARKS.Move() );
sparksEmitter.addAction( new SPARKS.RandomDrift( 20, 100, 2000 ) );
sparksEmitter.addCallback( "created", onParticleCreated );
sparksEmitter.addCallback( "dead", onParticleDead );
sparksEmitter.start();
// End Particles
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
// POST PROCESSING
var effectFocus = new THREE.ShaderPass( THREE.FocusShader );
var effectCopy = new THREE.ShaderPass( THREE.CopyShader );
effectFilm = new THREE.FilmPass( 0.5, 0.25, 2048, false );
var shaderBlur = THREE.TriangleBlurShader;
effectBlurX = new THREE.ShaderPass( shaderBlur, 'texture' );
effectBlurY = new THREE.ShaderPass( shaderBlur, 'texture' );
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
hblur = new THREE.ShaderPass( THREE.HorizontalBlurShader );
vblur = new THREE.ShaderPass( THREE.VerticalBlurShader);
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
effectFocus.uniforms[ 'sampleDistance' ].value = 0.99; //0.94
effectFocus.uniforms[ 'waveFactor' ].value = 0.003; //0.00125
var renderScene = new THREE.RenderPass( scene, camera );
composer = new THREE.EffectComposer( renderer );
composer.addPass( renderScene );
composer.addPass( hblur );
composer.addPass( vblur );
vblur.renderToScreen = true;
effectBlurY.renderToScreen = true;
effectFocus.renderToScreen = true;
effectCopy.renderToScreen = true;
effectFilm.renderToScreen = true;
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
document.addEventListener( 'touchmove', onDocumentTouchMove, false );
document.addEventListener( 'mousemove', onDocumentMouseMove, false );
//
window.addEventListener( 'resize', onWindowResize, false );
}
function onWindowResize() {
windowHalfX = window.innerWidth / 2;
windowHalfY = window.innerHeight / 2;
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
//
hblur.uniforms[ 'h' ].value = 1 / window.innerWidth;
vblur.uniforms[ 'v' ].value = 1 / window.innerHeight;
var radius = 15;
var blurAmountX = radius / window.innerWidth;
var blurAmountY = radius / window.innerHeight;
effectBlurX.uniforms[ 'delta' ].value = new THREE.Vector2( blurAmountX, 0 );
effectBlurY.uniforms[ 'delta' ].value = new THREE.Vector2( 0, blurAmountY );
composer.reset();
}
function onDocumentMouseDown( event ) {
event.preventDefault();
mouseXOnMouseDown = event.clientX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
if ( sparksEmitter.isRunning() ) {
sparksEmitter.stop();
} else {
sparksEmitter.start();
}
}
function onDocumentMouseMove( event ) {
mouseX = event.clientX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.02;
}
function onDocumentTouchStart( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseXOnMouseDown = event.touches[ 0 ].pageX - windowHalfX;
targetRotationOnMouseDown = targetRotation;
}
}
function onDocumentTouchMove( event ) {
if ( event.touches.length === 1 ) {
event.preventDefault();
mouseX = event.touches[ 0 ].pageX - windowHalfX;
targetRotation = targetRotationOnMouseDown + ( mouseX - mouseXOnMouseDown ) * 0.05;
}
}
//
function animate() {
requestAnimationFrame( animate );
render();
}
function render() {
delta = speed * clock.getDelta();
particleCloud.geometry.verticesNeedUpdate = true;
attributes.size.needsUpdate = true;
attributes.pcolor.needsUpdate = true;
// Pretty cool effect if you enable this
// particleCloud.rotation.y += 0.05;
group.rotation.y += ( targetRotation - group.rotation.y ) * 0.05;
renderer.clear();
// renderer.render( scene, camera );
composer.render( 0.1 );
}
}
try {
mainJs();
} catch(e) {
location.reload();
}
| * delta;
if ( lightness > 0.05 ) lightness -= 0.05;
// TODO Create a PointOnShape Action/Zone in the particle engine
timeOnShapePath += 0.00035 * delta;
if ( timeOnShapePath > 1 ) timeOnShapePath -= 1;
var pointOnShape = circleShape.getPointAt( timeOnShapePath );
if (!emitterpos) emitterpos = new THREE.Vector3( 0, 0, 0 );
if (!pointOnShape) pointOnShape = circleShape.getPointAt( 0.00035 * delta );
emitterpos.x = pointOnShape.x * 5 - 100;
emitterpos.y = -pointOnShape.y * 5 + 400;
pointLight.position.x = emitterpos.x;
pointLight.position.y = emitterpos.y;
pointLight.position.z = 100;
particles.vertices[ target ] = p.position;
values_color[ target ].setHSL( hue, 0.6, lightness );
pointLight.color.setHSL( hue, 0.6, lightness );
};
};
var onParticleDead = function( particle ) {
var target = particle.target;
| conditional_block |
roles.go | package model
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"gopkg.in/yaml.v2"
)
// RoleType is the type of the role; see the constants below
type RoleType string
// These are the types of roles available
const (
RoleTypeBoshTask = RoleType("bosh-task") // A role that is a BOSH task
RoleTypeBosh = RoleType("bosh") // A role that is a BOSH job
RoleTypeDocker = RoleType("docker") // A role that is a raw Docker image
)
// FlightStage describes when a role should be executed
type FlightStage string
// These are the flight stages available
const (
FlightStagePreFlight = FlightStage("pre-flight") // A role that runs before the main jobs start
FlightStageFlight = FlightStage("flight") // A role that is a main job
FlightStagePostFlight = FlightStage("post-flight") // A role that runs after the main jobs are up
FlightStageManual = FlightStage("manual") // A role that only runs via user intervention
)
// RoleManifest represents a collection of roles
type RoleManifest struct {
Roles Roles `yaml:"roles"`
Configuration *Configuration `yaml:"configuration"`
manifestFilePath string
rolesByName map[string]*Role
}
// Role represents a collection of jobs that are colocated on a container
type Role struct {
Name string `yaml:"name"`
Jobs Jobs `yaml:"_,omitempty"`
EnvironScripts []string `yaml:"environment_scripts"`
Scripts []string `yaml:"scripts"`
PostConfigScripts []string `yaml:"post_config_scripts"`
Type RoleType `yaml:"type,omitempty"`
JobNameList []*roleJob `yaml:"jobs"`
Configuration *Configuration `yaml:"configuration"`
Run *RoleRun `yaml:"run"`
Tags []string `yaml:"tags"`
rolesManifest *RoleManifest
}
// RoleRun describes how a role should behave at runtime
type RoleRun struct {
Scaling *RoleRunScaling `yaml:"scaling"`
Capabilities []string `yaml:"capabilities"`
PersistentVolumes []*RoleRunVolume `yaml:"persistent-volumes"`
SharedVolumes []*RoleRunVolume `yaml:"shared-volumes"`
Memory int `yaml:"memory"`
VirtualCPUs int `yaml:"virtual-cpus"`
ExposedPorts []*RoleRunExposedPort `yaml:"exposed-ports"`
FlightStage FlightStage `yaml:"flight-stage"`
HealthCheck *HealthCheck `yaml:"healthcheck,omitempty"`
}
// RoleRunScaling describes how a role should scale out at runtime
type RoleRunScaling struct {
Min int32 `yaml:"min"`
Max int32 `yaml:"max"`
}
// RoleRunVolume describes a volume to be attached at runtime
type RoleRunVolume struct {
Path string `yaml:"path"`
Tag string `yaml:"tag"`
Size int `yaml:"size"`
}
// RoleRunExposedPort describes a port to be available to other roles, or the outside world
type RoleRunExposedPort struct {
Name string `yaml:"name"`
Protocol string `yaml:"protocol"`
External string `yaml:"external"`
Internal string `yaml:"internal"`
Public bool `yaml:"public"`
}
// HealthCheck describes a non-standard health check endpoint
type HealthCheck struct {
URL string `yaml:"url"` // URL for a HTTP GET to return 200~399. Cannot be used with other checks.
Headers map[string]string `yaml:"headers"` // Custom headers; only used for URL.
Command []string `yaml:"command"` // Custom command. Cannot be used with other checks.
Port int32 `yaml:"port"` // Port for a TCP probe. Cannot be used with other checks.
}
// Roles is an array of Role*
type Roles []*Role
// Configuration contains information about how to configure the
// resulting images
type Configuration struct {
Templates map[string]string `yaml:"templates"`
Variables ConfigurationVariableSlice `yaml:"variables"`
}
// ConfigurationVariable is a configuration to be exposed to the IaaS
type ConfigurationVariable struct {
Name string `yaml:"name"`
Default interface{} `yaml:"default"`
Description string `yaml:"description"`
Generator *ConfigurationVariableGenerator `yaml:"generator"`
}
// ConfigurationVariableSlice is a sortable slice of ConfigurationVariables
type ConfigurationVariableSlice []*ConfigurationVariable
// Len is the number of ConfigurationVariables in the slice
func (confVars ConfigurationVariableSlice) Len() int {
return len(confVars)
}
// Less reports whether config variable at index i sort before the one at index j
func (confVars ConfigurationVariableSlice) Less(i, j int) bool {
return strings.Compare(confVars[i].Name, confVars[j].Name) < 0
}
// Swap exchanges configuration variables at index i and index j
func (confVars ConfigurationVariableSlice) Swap(i, j int) {
confVars[i], confVars[j] = confVars[j], confVars[i]
}
// ConfigurationVariableGenerator describes how to automatically generate values
// for a configuration variable
type ConfigurationVariableGenerator struct {
ID string `yaml:"id"`
Type string `yaml:"type"`
ValueType string `yaml:"value_type"`
}
type roleJob struct {
Name string `yaml:"name"`
ReleaseName string `yaml:"release_name"`
}
// Len is the number of roles in the slice
func (roles Roles) Len() int {
return len(roles)
}
// Less reports whether role at index i sort before role at index j
func (roles Roles) Less(i, j int) bool {
return strings.Compare(roles[i].Name, roles[j].Name) < 0
}
// Swap exchanges roles at index i and index j
func (roles Roles) Swap(i, j int) {
roles[i], roles[j] = roles[j], roles[i]
}
// LoadRoleManifest loads a yaml manifest that details how jobs get grouped into roles
func LoadRoleManifest(manifestFilePath string, releases []*Release) (*RoleManifest, error) {
manifestContents, err := ioutil.ReadFile(manifestFilePath)
if err != nil {
return nil, err
}
mappedReleases := map[string]*Release{}
for _, release := range releases {
_, ok := mappedReleases[release.Name]
if ok {
return nil, fmt.Errorf("Error - release %s has been loaded more than once", release.Name)
}
mappedReleases[release.Name] = release
}
rolesManifest := RoleManifest{}
rolesManifest.manifestFilePath = manifestFilePath
if err := yaml.Unmarshal(manifestContents, &rolesManifest); err != nil {
return nil, err
}
for i := len(rolesManifest.Roles) - 1; i >= 0; i-- {
role := rolesManifest.Roles[i]
// Normalize flight stage
if role.Run != nil {
switch role.Run.FlightStage {
case "":
role.Run.FlightStage = FlightStageFlight
case FlightStagePreFlight:
case FlightStageFlight:
case FlightStagePostFlight:
case FlightStageManual:
default:
return nil, fmt.Errorf("Role %s has an invalid flight stage %s", role.Name, role.Run.FlightStage)
}
}
// Remove all roles that are not of the "bosh" or "bosh-task" type
// Default type is considered to be "bosh"
switch role.Type {
case "":
role.Type = RoleTypeBosh
case RoleTypeBosh, RoleTypeBoshTask:
continue
case RoleTypeDocker:
rolesManifest.Roles = append(rolesManifest.Roles[:i], rolesManifest.Roles[i+1:]...)
default:
return nil, fmt.Errorf("Role %s has an invalid type %s", role.Name, role.Type)
}
// Ensure that we don't have conflicting health checks
if role.Run != nil && role.Run.HealthCheck != nil {
checks := make([]string, 0, 3)
if role.Run.HealthCheck.URL != "" {
checks = append(checks, "url")
}
if len(role.Run.HealthCheck.Command) > 0 {
checks = append(checks, "command")
}
if role.Run.HealthCheck.Port != 0 {
checks = append(checks, "port")
}
if len(checks) != 1 {
return nil, fmt.Errorf("Health check for role %s should have exactly one of url, command, or port; got %v", role.Name, checks)
}
}
}
if rolesManifest.Configuration == nil {
rolesManifest.Configuration = &Configuration{}
}
if rolesManifest.Configuration.Templates == nil {
rolesManifest.Configuration.Templates = map[string]string{}
}
rolesManifest.rolesByName = make(map[string]*Role, len(rolesManifest.Roles))
for _, role := range rolesManifest.Roles {
role.rolesManifest = &rolesManifest
role.Jobs = make(Jobs, 0, len(role.JobNameList))
for _, roleJob := range role.JobNameList {
release, ok := mappedReleases[roleJob.ReleaseName]
if !ok {
return nil, fmt.Errorf("Error - release %s has not been loaded and is referenced by job %s in role %s",
roleJob.ReleaseName, roleJob.Name, role.Name)
}
job, err := release.LookupJob(roleJob.Name)
if err != nil {
return nil, err
}
role.Jobs = append(role.Jobs, job)
}
role.calculateRoleConfigurationTemplates()
rolesManifest.rolesByName[role.Name] = role
}
return &rolesManifest, nil
}
// GetRoleManifestDevPackageVersion gets the aggregate signature of all the packages
func (m *RoleManifest) GetRoleManifestDevPackageVersion(extra string) string {
// Make sure our roles are sorted, to have consistent output
roles := append(Roles{}, m.Roles...)
sort.Sort(roles)
hasher := sha1.New()
hasher.Write([]byte(extra))
for _, role := range roles {
hasher.Write([]byte(role.GetRoleDevVersion()))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// LookupRole will find the given role in the role manifest
func (m *RoleManifest) LookupRole(roleName string) *Role {
return m.rolesByName[roleName]
}
// GetScriptPaths returns the paths to the startup / post configgin scripts for a role
func (r *Role) GetScriptPaths() map[string]string {
result := map[string]string{}
for _, scriptList := range [][]string{r.EnvironScripts, r.Scripts, r.PostConfigScripts} {
for _, script := range scriptList {
if filepath.IsAbs(script) {
// Absolute paths _inside_ the container; there is nothing to copy
continue
}
result[script] = filepath.Join(filepath.Dir(r.rolesManifest.manifestFilePath), script)
}
}
return result
}
// GetRoleDevVersion gets the aggregate signature of all jobs and packages
func (r *Role) GetRoleDevVersion() string {
roleSignature := ""
var packages Packages
// Jobs are *not* sorted because they are an array and the order may be
// significant, in particular for bosh-task roles.
for _, job := range r.Jobs {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, job.SHA1)
packages = append(packages, job.Packages...)
}
sort.Sort(packages)
for _, pkg := range packages |
hasher := sha1.New()
hasher.Write([]byte(roleSignature))
return hex.EncodeToString(hasher.Sum(nil))
}
// HasTag returns true if the role has a specific tag
func (r *Role) HasTag(tag string) bool {
for _, t := range r.Tags {
if t == tag {
return true
}
}
return false
}
func (r *Role) calculateRoleConfigurationTemplates() {
if r.Configuration == nil {
r.Configuration = &Configuration{}
}
if r.Configuration.Templates == nil {
r.Configuration.Templates = map[string]string{}
}
roleConfigs := map[string]string{}
for k, v := range r.rolesManifest.Configuration.Templates {
roleConfigs[k] = v
}
for k, v := range r.Configuration.Templates {
roleConfigs[k] = v
}
r.Configuration.Templates = roleConfigs
}
| {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, pkg.SHA1)
} | conditional_block |
roles.go | package model
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"gopkg.in/yaml.v2"
)
// RoleType is the type of the role; see the constants below
type RoleType string
// These are the types of roles available
const (
RoleTypeBoshTask = RoleType("bosh-task") // A role that is a BOSH task
RoleTypeBosh = RoleType("bosh") // A role that is a BOSH job
RoleTypeDocker = RoleType("docker") // A role that is a raw Docker image
)
// FlightStage describes when a role should be executed
type FlightStage string
// These are the flight stages available
const (
FlightStagePreFlight = FlightStage("pre-flight") // A role that runs before the main jobs start
FlightStageFlight = FlightStage("flight") // A role that is a main job
FlightStagePostFlight = FlightStage("post-flight") // A role that runs after the main jobs are up
FlightStageManual = FlightStage("manual") // A role that only runs via user intervention
)
// RoleManifest represents a collection of roles
type RoleManifest struct {
Roles Roles `yaml:"roles"`
Configuration *Configuration `yaml:"configuration"`
manifestFilePath string
rolesByName map[string]*Role
}
// Role represents a collection of jobs that are colocated on a container
type Role struct {
Name string `yaml:"name"`
Jobs Jobs `yaml:"_,omitempty"`
EnvironScripts []string `yaml:"environment_scripts"`
Scripts []string `yaml:"scripts"`
PostConfigScripts []string `yaml:"post_config_scripts"`
Type RoleType `yaml:"type,omitempty"`
JobNameList []*roleJob `yaml:"jobs"`
Configuration *Configuration `yaml:"configuration"`
Run *RoleRun `yaml:"run"`
Tags []string `yaml:"tags"`
rolesManifest *RoleManifest
}
// RoleRun describes how a role should behave at runtime
type RoleRun struct {
Scaling *RoleRunScaling `yaml:"scaling"`
Capabilities []string `yaml:"capabilities"`
PersistentVolumes []*RoleRunVolume `yaml:"persistent-volumes"`
SharedVolumes []*RoleRunVolume `yaml:"shared-volumes"`
Memory int `yaml:"memory"`
VirtualCPUs int `yaml:"virtual-cpus"`
ExposedPorts []*RoleRunExposedPort `yaml:"exposed-ports"`
FlightStage FlightStage `yaml:"flight-stage"`
HealthCheck *HealthCheck `yaml:"healthcheck,omitempty"`
}
// RoleRunScaling describes how a role should scale out at runtime
type RoleRunScaling struct {
Min int32 `yaml:"min"`
Max int32 `yaml:"max"`
}
// RoleRunVolume describes a volume to be attached at runtime
type RoleRunVolume struct {
Path string `yaml:"path"`
Tag string `yaml:"tag"`
Size int `yaml:"size"`
}
// RoleRunExposedPort describes a port to be available to other roles, or the outside world
type RoleRunExposedPort struct {
Name string `yaml:"name"`
Protocol string `yaml:"protocol"`
External string `yaml:"external"`
Internal string `yaml:"internal"`
Public bool `yaml:"public"`
}
// HealthCheck describes a non-standard health check endpoint
type HealthCheck struct {
URL string `yaml:"url"` // URL for a HTTP GET to return 200~399. Cannot be used with other checks.
Headers map[string]string `yaml:"headers"` // Custom headers; only used for URL.
Command []string `yaml:"command"` // Custom command. Cannot be used with other checks.
Port int32 `yaml:"port"` // Port for a TCP probe. Cannot be used with other checks.
}
// Roles is an array of Role*
type Roles []*Role
// Configuration contains information about how to configure the
// resulting images
type Configuration struct {
Templates map[string]string `yaml:"templates"`
Variables ConfigurationVariableSlice `yaml:"variables"`
}
// ConfigurationVariable is a configuration to be exposed to the IaaS
type ConfigurationVariable struct {
Name string `yaml:"name"`
Default interface{} `yaml:"default"`
Description string `yaml:"description"`
Generator *ConfigurationVariableGenerator `yaml:"generator"`
}
// ConfigurationVariableSlice is a sortable slice of ConfigurationVariables
type ConfigurationVariableSlice []*ConfigurationVariable
// Len is the number of ConfigurationVariables in the slice
func (confVars ConfigurationVariableSlice) Len() int {
return len(confVars)
}
// Less reports whether config variable at index i sort before the one at index j
func (confVars ConfigurationVariableSlice) Less(i, j int) bool {
return strings.Compare(confVars[i].Name, confVars[j].Name) < 0
}
// Swap exchanges configuration variables at index i and index j
func (confVars ConfigurationVariableSlice) Swap(i, j int) {
confVars[i], confVars[j] = confVars[j], confVars[i]
}
// ConfigurationVariableGenerator describes how to automatically generate values
// for a configuration variable
type ConfigurationVariableGenerator struct {
ID string `yaml:"id"`
Type string `yaml:"type"`
ValueType string `yaml:"value_type"`
}
type roleJob struct {
Name string `yaml:"name"`
ReleaseName string `yaml:"release_name"`
}
// Len is the number of roles in the slice
func (roles Roles) Len() int {
return len(roles)
}
// Less reports whether role at index i sort before role at index j
func (roles Roles) Less(i, j int) bool {
return strings.Compare(roles[i].Name, roles[j].Name) < 0
}
// Swap exchanges roles at index i and index j
func (roles Roles) Swap(i, j int) {
roles[i], roles[j] = roles[j], roles[i]
}
// LoadRoleManifest loads a yaml manifest that details how jobs get grouped into roles
func LoadRoleManifest(manifestFilePath string, releases []*Release) (*RoleManifest, error) {
manifestContents, err := ioutil.ReadFile(manifestFilePath)
if err != nil {
return nil, err
}
mappedReleases := map[string]*Release{}
for _, release := range releases {
_, ok := mappedReleases[release.Name]
if ok {
return nil, fmt.Errorf("Error - release %s has been loaded more than once", release.Name)
}
mappedReleases[release.Name] = release
}
rolesManifest := RoleManifest{}
rolesManifest.manifestFilePath = manifestFilePath
if err := yaml.Unmarshal(manifestContents, &rolesManifest); err != nil {
return nil, err
}
for i := len(rolesManifest.Roles) - 1; i >= 0; i-- {
role := rolesManifest.Roles[i]
// Normalize flight stage
if role.Run != nil {
switch role.Run.FlightStage {
case "":
role.Run.FlightStage = FlightStageFlight
case FlightStagePreFlight:
case FlightStageFlight:
case FlightStagePostFlight:
case FlightStageManual:
default:
return nil, fmt.Errorf("Role %s has an invalid flight stage %s", role.Name, role.Run.FlightStage)
}
}
// Remove all roles that are not of the "bosh" or "bosh-task" type
// Default type is considered to be "bosh"
switch role.Type {
case "":
role.Type = RoleTypeBosh
case RoleTypeBosh, RoleTypeBoshTask:
continue
case RoleTypeDocker:
rolesManifest.Roles = append(rolesManifest.Roles[:i], rolesManifest.Roles[i+1:]...)
default:
return nil, fmt.Errorf("Role %s has an invalid type %s", role.Name, role.Type)
}
// Ensure that we don't have conflicting health checks
if role.Run != nil && role.Run.HealthCheck != nil {
checks := make([]string, 0, 3)
if role.Run.HealthCheck.URL != "" {
checks = append(checks, "url")
}
if len(role.Run.HealthCheck.Command) > 0 {
checks = append(checks, "command")
}
if role.Run.HealthCheck.Port != 0 {
checks = append(checks, "port")
}
if len(checks) != 1 {
return nil, fmt.Errorf("Health check for role %s should have exactly one of url, command, or port; got %v", role.Name, checks)
}
}
}
if rolesManifest.Configuration == nil {
rolesManifest.Configuration = &Configuration{}
}
if rolesManifest.Configuration.Templates == nil {
rolesManifest.Configuration.Templates = map[string]string{}
}
rolesManifest.rolesByName = make(map[string]*Role, len(rolesManifest.Roles))
for _, role := range rolesManifest.Roles {
role.rolesManifest = &rolesManifest
role.Jobs = make(Jobs, 0, len(role.JobNameList))
for _, roleJob := range role.JobNameList {
release, ok := mappedReleases[roleJob.ReleaseName]
if !ok {
return nil, fmt.Errorf("Error - release %s has not been loaded and is referenced by job %s in role %s",
roleJob.ReleaseName, roleJob.Name, role.Name)
}
job, err := release.LookupJob(roleJob.Name)
if err != nil {
return nil, err
}
role.Jobs = append(role.Jobs, job)
}
role.calculateRoleConfigurationTemplates()
rolesManifest.rolesByName[role.Name] = role
}
return &rolesManifest, nil
}
// GetRoleManifestDevPackageVersion gets the aggregate signature of all the packages
func (m *RoleManifest) GetRoleManifestDevPackageVersion(extra string) string {
// Make sure our roles are sorted, to have consistent output
roles := append(Roles{}, m.Roles...)
sort.Sort(roles)
hasher := sha1.New()
hasher.Write([]byte(extra))
for _, role := range roles {
hasher.Write([]byte(role.GetRoleDevVersion()))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// LookupRole will find the given role in the role manifest
func (m *RoleManifest) | (roleName string) *Role {
return m.rolesByName[roleName]
}
// GetScriptPaths returns the paths to the startup / post configgin scripts for a role
func (r *Role) GetScriptPaths() map[string]string {
result := map[string]string{}
for _, scriptList := range [][]string{r.EnvironScripts, r.Scripts, r.PostConfigScripts} {
for _, script := range scriptList {
if filepath.IsAbs(script) {
// Absolute paths _inside_ the container; there is nothing to copy
continue
}
result[script] = filepath.Join(filepath.Dir(r.rolesManifest.manifestFilePath), script)
}
}
return result
}
// GetRoleDevVersion gets the aggregate signature of all jobs and packages
func (r *Role) GetRoleDevVersion() string {
roleSignature := ""
var packages Packages
// Jobs are *not* sorted because they are an array and the order may be
// significant, in particular for bosh-task roles.
for _, job := range r.Jobs {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, job.SHA1)
packages = append(packages, job.Packages...)
}
sort.Sort(packages)
for _, pkg := range packages {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, pkg.SHA1)
}
hasher := sha1.New()
hasher.Write([]byte(roleSignature))
return hex.EncodeToString(hasher.Sum(nil))
}
// HasTag returns true if the role has a specific tag
func (r *Role) HasTag(tag string) bool {
for _, t := range r.Tags {
if t == tag {
return true
}
}
return false
}
func (r *Role) calculateRoleConfigurationTemplates() {
if r.Configuration == nil {
r.Configuration = &Configuration{}
}
if r.Configuration.Templates == nil {
r.Configuration.Templates = map[string]string{}
}
roleConfigs := map[string]string{}
for k, v := range r.rolesManifest.Configuration.Templates {
roleConfigs[k] = v
}
for k, v := range r.Configuration.Templates {
roleConfigs[k] = v
}
r.Configuration.Templates = roleConfigs
}
| LookupRole | identifier_name |
roles.go | package model
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"gopkg.in/yaml.v2"
)
// RoleType is the type of the role; see the constants below
type RoleType string
// These are the types of roles available
const (
RoleTypeBoshTask = RoleType("bosh-task") // A role that is a BOSH task
RoleTypeBosh = RoleType("bosh") // A role that is a BOSH job
RoleTypeDocker = RoleType("docker") // A role that is a raw Docker image
)
// FlightStage describes when a role should be executed
type FlightStage string
// These are the flight stages available
const (
FlightStagePreFlight = FlightStage("pre-flight") // A role that runs before the main jobs start
FlightStageFlight = FlightStage("flight") // A role that is a main job |
// RoleManifest represents a collection of roles
type RoleManifest struct {
Roles Roles `yaml:"roles"`
Configuration *Configuration `yaml:"configuration"`
manifestFilePath string
rolesByName map[string]*Role
}
// Role represents a collection of jobs that are colocated on a container
type Role struct {
Name string `yaml:"name"`
Jobs Jobs `yaml:"_,omitempty"`
EnvironScripts []string `yaml:"environment_scripts"`
Scripts []string `yaml:"scripts"`
PostConfigScripts []string `yaml:"post_config_scripts"`
Type RoleType `yaml:"type,omitempty"`
JobNameList []*roleJob `yaml:"jobs"`
Configuration *Configuration `yaml:"configuration"`
Run *RoleRun `yaml:"run"`
Tags []string `yaml:"tags"`
rolesManifest *RoleManifest
}
// RoleRun describes how a role should behave at runtime
type RoleRun struct {
Scaling *RoleRunScaling `yaml:"scaling"`
Capabilities []string `yaml:"capabilities"`
PersistentVolumes []*RoleRunVolume `yaml:"persistent-volumes"`
SharedVolumes []*RoleRunVolume `yaml:"shared-volumes"`
Memory int `yaml:"memory"`
VirtualCPUs int `yaml:"virtual-cpus"`
ExposedPorts []*RoleRunExposedPort `yaml:"exposed-ports"`
FlightStage FlightStage `yaml:"flight-stage"`
HealthCheck *HealthCheck `yaml:"healthcheck,omitempty"`
}
// RoleRunScaling describes how a role should scale out at runtime
type RoleRunScaling struct {
Min int32 `yaml:"min"`
Max int32 `yaml:"max"`
}
// RoleRunVolume describes a volume to be attached at runtime
type RoleRunVolume struct {
Path string `yaml:"path"`
Tag string `yaml:"tag"`
Size int `yaml:"size"`
}
// RoleRunExposedPort describes a port to be available to other roles, or the outside world
type RoleRunExposedPort struct {
Name string `yaml:"name"`
Protocol string `yaml:"protocol"`
External string `yaml:"external"`
Internal string `yaml:"internal"`
Public bool `yaml:"public"`
}
// HealthCheck describes a non-standard health check endpoint
type HealthCheck struct {
URL string `yaml:"url"` // URL for a HTTP GET to return 200~399. Cannot be used with other checks.
Headers map[string]string `yaml:"headers"` // Custom headers; only used for URL.
Command []string `yaml:"command"` // Custom command. Cannot be used with other checks.
Port int32 `yaml:"port"` // Port for a TCP probe. Cannot be used with other checks.
}
// Roles is an array of Role*
type Roles []*Role
// Configuration contains information about how to configure the
// resulting images
type Configuration struct {
Templates map[string]string `yaml:"templates"`
Variables ConfigurationVariableSlice `yaml:"variables"`
}
// ConfigurationVariable is a configuration to be exposed to the IaaS
type ConfigurationVariable struct {
Name string `yaml:"name"`
Default interface{} `yaml:"default"`
Description string `yaml:"description"`
Generator *ConfigurationVariableGenerator `yaml:"generator"`
}
// ConfigurationVariableSlice is a sortable slice of ConfigurationVariables
type ConfigurationVariableSlice []*ConfigurationVariable
// Len is the number of ConfigurationVariables in the slice
func (confVars ConfigurationVariableSlice) Len() int {
return len(confVars)
}
// Less reports whether config variable at index i sort before the one at index j
func (confVars ConfigurationVariableSlice) Less(i, j int) bool {
return strings.Compare(confVars[i].Name, confVars[j].Name) < 0
}
// Swap exchanges configuration variables at index i and index j
func (confVars ConfigurationVariableSlice) Swap(i, j int) {
confVars[i], confVars[j] = confVars[j], confVars[i]
}
// ConfigurationVariableGenerator describes how to automatically generate values
// for a configuration variable
type ConfigurationVariableGenerator struct {
ID string `yaml:"id"`
Type string `yaml:"type"`
ValueType string `yaml:"value_type"`
}
type roleJob struct {
Name string `yaml:"name"`
ReleaseName string `yaml:"release_name"`
}
// Len is the number of roles in the slice
func (roles Roles) Len() int {
return len(roles)
}
// Less reports whether role at index i sort before role at index j
func (roles Roles) Less(i, j int) bool {
return strings.Compare(roles[i].Name, roles[j].Name) < 0
}
// Swap exchanges roles at index i and index j
func (roles Roles) Swap(i, j int) {
roles[i], roles[j] = roles[j], roles[i]
}
// LoadRoleManifest loads a yaml manifest that details how jobs get grouped into roles
func LoadRoleManifest(manifestFilePath string, releases []*Release) (*RoleManifest, error) {
manifestContents, err := ioutil.ReadFile(manifestFilePath)
if err != nil {
return nil, err
}
mappedReleases := map[string]*Release{}
for _, release := range releases {
_, ok := mappedReleases[release.Name]
if ok {
return nil, fmt.Errorf("Error - release %s has been loaded more than once", release.Name)
}
mappedReleases[release.Name] = release
}
rolesManifest := RoleManifest{}
rolesManifest.manifestFilePath = manifestFilePath
if err := yaml.Unmarshal(manifestContents, &rolesManifest); err != nil {
return nil, err
}
for i := len(rolesManifest.Roles) - 1; i >= 0; i-- {
role := rolesManifest.Roles[i]
// Normalize flight stage
if role.Run != nil {
switch role.Run.FlightStage {
case "":
role.Run.FlightStage = FlightStageFlight
case FlightStagePreFlight:
case FlightStageFlight:
case FlightStagePostFlight:
case FlightStageManual:
default:
return nil, fmt.Errorf("Role %s has an invalid flight stage %s", role.Name, role.Run.FlightStage)
}
}
// Remove all roles that are not of the "bosh" or "bosh-task" type
// Default type is considered to be "bosh"
switch role.Type {
case "":
role.Type = RoleTypeBosh
case RoleTypeBosh, RoleTypeBoshTask:
continue
case RoleTypeDocker:
rolesManifest.Roles = append(rolesManifest.Roles[:i], rolesManifest.Roles[i+1:]...)
default:
return nil, fmt.Errorf("Role %s has an invalid type %s", role.Name, role.Type)
}
// Ensure that we don't have conflicting health checks
if role.Run != nil && role.Run.HealthCheck != nil {
checks := make([]string, 0, 3)
if role.Run.HealthCheck.URL != "" {
checks = append(checks, "url")
}
if len(role.Run.HealthCheck.Command) > 0 {
checks = append(checks, "command")
}
if role.Run.HealthCheck.Port != 0 {
checks = append(checks, "port")
}
if len(checks) != 1 {
return nil, fmt.Errorf("Health check for role %s should have exactly one of url, command, or port; got %v", role.Name, checks)
}
}
}
if rolesManifest.Configuration == nil {
rolesManifest.Configuration = &Configuration{}
}
if rolesManifest.Configuration.Templates == nil {
rolesManifest.Configuration.Templates = map[string]string{}
}
rolesManifest.rolesByName = make(map[string]*Role, len(rolesManifest.Roles))
for _, role := range rolesManifest.Roles {
role.rolesManifest = &rolesManifest
role.Jobs = make(Jobs, 0, len(role.JobNameList))
for _, roleJob := range role.JobNameList {
release, ok := mappedReleases[roleJob.ReleaseName]
if !ok {
return nil, fmt.Errorf("Error - release %s has not been loaded and is referenced by job %s in role %s",
roleJob.ReleaseName, roleJob.Name, role.Name)
}
job, err := release.LookupJob(roleJob.Name)
if err != nil {
return nil, err
}
role.Jobs = append(role.Jobs, job)
}
role.calculateRoleConfigurationTemplates()
rolesManifest.rolesByName[role.Name] = role
}
return &rolesManifest, nil
}
// GetRoleManifestDevPackageVersion gets the aggregate signature of all the packages
func (m *RoleManifest) GetRoleManifestDevPackageVersion(extra string) string {
// Make sure our roles are sorted, to have consistent output
roles := append(Roles{}, m.Roles...)
sort.Sort(roles)
hasher := sha1.New()
hasher.Write([]byte(extra))
for _, role := range roles {
hasher.Write([]byte(role.GetRoleDevVersion()))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// LookupRole will find the given role in the role manifest
func (m *RoleManifest) LookupRole(roleName string) *Role {
return m.rolesByName[roleName]
}
// GetScriptPaths returns the paths to the startup / post configgin scripts for a role
func (r *Role) GetScriptPaths() map[string]string {
result := map[string]string{}
for _, scriptList := range [][]string{r.EnvironScripts, r.Scripts, r.PostConfigScripts} {
for _, script := range scriptList {
if filepath.IsAbs(script) {
// Absolute paths _inside_ the container; there is nothing to copy
continue
}
result[script] = filepath.Join(filepath.Dir(r.rolesManifest.manifestFilePath), script)
}
}
return result
}
// GetRoleDevVersion gets the aggregate signature of all jobs and packages
func (r *Role) GetRoleDevVersion() string {
roleSignature := ""
var packages Packages
// Jobs are *not* sorted because they are an array and the order may be
// significant, in particular for bosh-task roles.
for _, job := range r.Jobs {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, job.SHA1)
packages = append(packages, job.Packages...)
}
sort.Sort(packages)
for _, pkg := range packages {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, pkg.SHA1)
}
hasher := sha1.New()
hasher.Write([]byte(roleSignature))
return hex.EncodeToString(hasher.Sum(nil))
}
// HasTag returns true if the role has a specific tag
func (r *Role) HasTag(tag string) bool {
for _, t := range r.Tags {
if t == tag {
return true
}
}
return false
}
func (r *Role) calculateRoleConfigurationTemplates() {
if r.Configuration == nil {
r.Configuration = &Configuration{}
}
if r.Configuration.Templates == nil {
r.Configuration.Templates = map[string]string{}
}
roleConfigs := map[string]string{}
for k, v := range r.rolesManifest.Configuration.Templates {
roleConfigs[k] = v
}
for k, v := range r.Configuration.Templates {
roleConfigs[k] = v
}
r.Configuration.Templates = roleConfigs
} | FlightStagePostFlight = FlightStage("post-flight") // A role that runs after the main jobs are up
FlightStageManual = FlightStage("manual") // A role that only runs via user intervention
) | random_line_split |
roles.go | package model
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"gopkg.in/yaml.v2"
)
// RoleType is the type of the role; see the constants below
type RoleType string
// These are the types of roles available
const (
RoleTypeBoshTask = RoleType("bosh-task") // A role that is a BOSH task
RoleTypeBosh = RoleType("bosh") // A role that is a BOSH job
RoleTypeDocker = RoleType("docker") // A role that is a raw Docker image
)
// FlightStage describes when a role should be executed
type FlightStage string
// These are the flight stages available
const (
FlightStagePreFlight = FlightStage("pre-flight") // A role that runs before the main jobs start
FlightStageFlight = FlightStage("flight") // A role that is a main job
FlightStagePostFlight = FlightStage("post-flight") // A role that runs after the main jobs are up
FlightStageManual = FlightStage("manual") // A role that only runs via user intervention
)
// RoleManifest represents a collection of roles
type RoleManifest struct {
Roles Roles `yaml:"roles"`
Configuration *Configuration `yaml:"configuration"`
manifestFilePath string
rolesByName map[string]*Role
}
// Role represents a collection of jobs that are colocated on a container
type Role struct {
Name string `yaml:"name"`
Jobs Jobs `yaml:"_,omitempty"`
EnvironScripts []string `yaml:"environment_scripts"`
Scripts []string `yaml:"scripts"`
PostConfigScripts []string `yaml:"post_config_scripts"`
Type RoleType `yaml:"type,omitempty"`
JobNameList []*roleJob `yaml:"jobs"`
Configuration *Configuration `yaml:"configuration"`
Run *RoleRun `yaml:"run"`
Tags []string `yaml:"tags"`
rolesManifest *RoleManifest
}
// RoleRun describes how a role should behave at runtime
type RoleRun struct {
Scaling *RoleRunScaling `yaml:"scaling"`
Capabilities []string `yaml:"capabilities"`
PersistentVolumes []*RoleRunVolume `yaml:"persistent-volumes"`
SharedVolumes []*RoleRunVolume `yaml:"shared-volumes"`
Memory int `yaml:"memory"`
VirtualCPUs int `yaml:"virtual-cpus"`
ExposedPorts []*RoleRunExposedPort `yaml:"exposed-ports"`
FlightStage FlightStage `yaml:"flight-stage"`
HealthCheck *HealthCheck `yaml:"healthcheck,omitempty"`
}
// RoleRunScaling describes how a role should scale out at runtime
type RoleRunScaling struct {
Min int32 `yaml:"min"`
Max int32 `yaml:"max"`
}
// RoleRunVolume describes a volume to be attached at runtime
type RoleRunVolume struct {
Path string `yaml:"path"`
Tag string `yaml:"tag"`
Size int `yaml:"size"`
}
// RoleRunExposedPort describes a port to be available to other roles, or the outside world
type RoleRunExposedPort struct {
Name string `yaml:"name"`
Protocol string `yaml:"protocol"`
External string `yaml:"external"`
Internal string `yaml:"internal"`
Public bool `yaml:"public"`
}
// HealthCheck describes a non-standard health check endpoint
type HealthCheck struct {
URL string `yaml:"url"` // URL for a HTTP GET to return 200~399. Cannot be used with other checks.
Headers map[string]string `yaml:"headers"` // Custom headers; only used for URL.
Command []string `yaml:"command"` // Custom command. Cannot be used with other checks.
Port int32 `yaml:"port"` // Port for a TCP probe. Cannot be used with other checks.
}
// Roles is an array of Role*
type Roles []*Role
// Configuration contains information about how to configure the
// resulting images
type Configuration struct {
Templates map[string]string `yaml:"templates"`
Variables ConfigurationVariableSlice `yaml:"variables"`
}
// ConfigurationVariable is a configuration to be exposed to the IaaS
type ConfigurationVariable struct {
Name string `yaml:"name"`
Default interface{} `yaml:"default"`
Description string `yaml:"description"`
Generator *ConfigurationVariableGenerator `yaml:"generator"`
}
// ConfigurationVariableSlice is a sortable slice of ConfigurationVariables
type ConfigurationVariableSlice []*ConfigurationVariable
// Len is the number of ConfigurationVariables in the slice
func (confVars ConfigurationVariableSlice) Len() int {
return len(confVars)
}
// Less reports whether config variable at index i sort before the one at index j
func (confVars ConfigurationVariableSlice) Less(i, j int) bool {
return strings.Compare(confVars[i].Name, confVars[j].Name) < 0
}
// Swap exchanges configuration variables at index i and index j
func (confVars ConfigurationVariableSlice) Swap(i, j int) {
confVars[i], confVars[j] = confVars[j], confVars[i]
}
// ConfigurationVariableGenerator describes how to automatically generate values
// for a configuration variable
type ConfigurationVariableGenerator struct {
ID string `yaml:"id"`
Type string `yaml:"type"`
ValueType string `yaml:"value_type"`
}
type roleJob struct {
Name string `yaml:"name"`
ReleaseName string `yaml:"release_name"`
}
// Len is the number of roles in the slice
func (roles Roles) Len() int {
return len(roles)
}
// Less reports whether role at index i sort before role at index j
func (roles Roles) Less(i, j int) bool {
return strings.Compare(roles[i].Name, roles[j].Name) < 0
}
// Swap exchanges roles at index i and index j
func (roles Roles) Swap(i, j int) |
// LoadRoleManifest loads a yaml manifest that details how jobs get grouped into roles
func LoadRoleManifest(manifestFilePath string, releases []*Release) (*RoleManifest, error) {
manifestContents, err := ioutil.ReadFile(manifestFilePath)
if err != nil {
return nil, err
}
mappedReleases := map[string]*Release{}
for _, release := range releases {
_, ok := mappedReleases[release.Name]
if ok {
return nil, fmt.Errorf("Error - release %s has been loaded more than once", release.Name)
}
mappedReleases[release.Name] = release
}
rolesManifest := RoleManifest{}
rolesManifest.manifestFilePath = manifestFilePath
if err := yaml.Unmarshal(manifestContents, &rolesManifest); err != nil {
return nil, err
}
for i := len(rolesManifest.Roles) - 1; i >= 0; i-- {
role := rolesManifest.Roles[i]
// Normalize flight stage
if role.Run != nil {
switch role.Run.FlightStage {
case "":
role.Run.FlightStage = FlightStageFlight
case FlightStagePreFlight:
case FlightStageFlight:
case FlightStagePostFlight:
case FlightStageManual:
default:
return nil, fmt.Errorf("Role %s has an invalid flight stage %s", role.Name, role.Run.FlightStage)
}
}
// Remove all roles that are not of the "bosh" or "bosh-task" type
// Default type is considered to be "bosh"
switch role.Type {
case "":
role.Type = RoleTypeBosh
case RoleTypeBosh, RoleTypeBoshTask:
continue
case RoleTypeDocker:
rolesManifest.Roles = append(rolesManifest.Roles[:i], rolesManifest.Roles[i+1:]...)
default:
return nil, fmt.Errorf("Role %s has an invalid type %s", role.Name, role.Type)
}
// Ensure that we don't have conflicting health checks
if role.Run != nil && role.Run.HealthCheck != nil {
checks := make([]string, 0, 3)
if role.Run.HealthCheck.URL != "" {
checks = append(checks, "url")
}
if len(role.Run.HealthCheck.Command) > 0 {
checks = append(checks, "command")
}
if role.Run.HealthCheck.Port != 0 {
checks = append(checks, "port")
}
if len(checks) != 1 {
return nil, fmt.Errorf("Health check for role %s should have exactly one of url, command, or port; got %v", role.Name, checks)
}
}
}
if rolesManifest.Configuration == nil {
rolesManifest.Configuration = &Configuration{}
}
if rolesManifest.Configuration.Templates == nil {
rolesManifest.Configuration.Templates = map[string]string{}
}
rolesManifest.rolesByName = make(map[string]*Role, len(rolesManifest.Roles))
for _, role := range rolesManifest.Roles {
role.rolesManifest = &rolesManifest
role.Jobs = make(Jobs, 0, len(role.JobNameList))
for _, roleJob := range role.JobNameList {
release, ok := mappedReleases[roleJob.ReleaseName]
if !ok {
return nil, fmt.Errorf("Error - release %s has not been loaded and is referenced by job %s in role %s",
roleJob.ReleaseName, roleJob.Name, role.Name)
}
job, err := release.LookupJob(roleJob.Name)
if err != nil {
return nil, err
}
role.Jobs = append(role.Jobs, job)
}
role.calculateRoleConfigurationTemplates()
rolesManifest.rolesByName[role.Name] = role
}
return &rolesManifest, nil
}
// GetRoleManifestDevPackageVersion gets the aggregate signature of all the packages
func (m *RoleManifest) GetRoleManifestDevPackageVersion(extra string) string {
// Make sure our roles are sorted, to have consistent output
roles := append(Roles{}, m.Roles...)
sort.Sort(roles)
hasher := sha1.New()
hasher.Write([]byte(extra))
for _, role := range roles {
hasher.Write([]byte(role.GetRoleDevVersion()))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// LookupRole will find the given role in the role manifest
func (m *RoleManifest) LookupRole(roleName string) *Role {
return m.rolesByName[roleName]
}
// GetScriptPaths returns the paths to the startup / post configgin scripts for a role
func (r *Role) GetScriptPaths() map[string]string {
result := map[string]string{}
for _, scriptList := range [][]string{r.EnvironScripts, r.Scripts, r.PostConfigScripts} {
for _, script := range scriptList {
if filepath.IsAbs(script) {
// Absolute paths _inside_ the container; there is nothing to copy
continue
}
result[script] = filepath.Join(filepath.Dir(r.rolesManifest.manifestFilePath), script)
}
}
return result
}
// GetRoleDevVersion gets the aggregate signature of all jobs and packages
func (r *Role) GetRoleDevVersion() string {
roleSignature := ""
var packages Packages
// Jobs are *not* sorted because they are an array and the order may be
// significant, in particular for bosh-task roles.
for _, job := range r.Jobs {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, job.SHA1)
packages = append(packages, job.Packages...)
}
sort.Sort(packages)
for _, pkg := range packages {
roleSignature = fmt.Sprintf("%s\n%s", roleSignature, pkg.SHA1)
}
hasher := sha1.New()
hasher.Write([]byte(roleSignature))
return hex.EncodeToString(hasher.Sum(nil))
}
// HasTag returns true if the role has a specific tag
func (r *Role) HasTag(tag string) bool {
for _, t := range r.Tags {
if t == tag {
return true
}
}
return false
}
func (r *Role) calculateRoleConfigurationTemplates() {
if r.Configuration == nil {
r.Configuration = &Configuration{}
}
if r.Configuration.Templates == nil {
r.Configuration.Templates = map[string]string{}
}
roleConfigs := map[string]string{}
for k, v := range r.rolesManifest.Configuration.Templates {
roleConfigs[k] = v
}
for k, v := range r.Configuration.Templates {
roleConfigs[k] = v
}
r.Configuration.Templates = roleConfigs
}
| {
roles[i], roles[j] = roles[j], roles[i]
} | identifier_body |
autoComplete.js |
var search_box;
var search_option;
var m_now = 0, s_now = 0, shl = 0, a_now = 0, a_on = 0, arr_on = 0, frm_on = 0;
var cn_use = "use_ac";
var wi_len = 2;
var wi_int = 500;
var max_row = 4;
var B = "block", I = "inline", N = "none", UD = "undefined";
var bak = "", old = "";
var qs_ac_list = "", qs_ac_id = "", qs_q = "", qs_m = 0, qs_ac_len = 0;
var acuse = 1; //자동완성사용여부
var cc = new Object();
var ac_layer_visibility = 0;
var goGoodsNo;
function get_nav() {
var ver = navigator.appVersion;
if (navigator.appName.indexOf("Microsoft") != -1 && ver.indexOf("MSIE 4") == -1 && ver.indexOf("MSIE 3") == -1) {
return 1;
} else if (navigator.appName.indexOf("Netscape") != -1) return 2;
else return 0;
}
//기능끄기 버튼을 눌렀을때
function ac_off() {
if ($j("#schInput").val() == "") {
popup_ac(0);
//document.all.noquery_ac_body.style.display = "none";
} else {
ac_hide();
// search_box.autocomplete = "off" ;
}
acuse = 0;
}
//기능켜기 버튼을 눌렀을때
function ac_on() {
acuse = 1;
popup_ac(1);
if ($j("#schInput").val() != "")
wd();
setTimeout("wi()", wi_int);
$j("#schInput").focus();
}
//type=0 : 모두 감춘준다.
//type=1 : 검색어가 있을때 자동완성창 보이기
//type=2 : 기능이 꺼져있을때 자동완성창 보이기
//type=3 : '검색어를 입력해달라'는 자동완성창 보이기
function popup_ac(type) {
if (type == 0) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
//검색창내 세모 이미지변경
switch_image(0);
} else if (type == 1) {
$j("#ac_body").css("display", "block");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 2) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "block");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 3) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body")[0].style.display = "block";
switch_image(1);
}
}
//인풋박스의 세모 버튼을 눌렀을때 자동완성창을 보여준다.
function show_ac() {
if (acuse == 0) {
if ($j("#ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(2);
}
else {
if ($j("#schInput").val() == "") {
if ($j("#noquery_ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(3);
}
else {
//wd();
//alert(document.all.schInput.value);
req_ipc();
}
}
}
function wd() {
search_box.onclick = req_ipc;
document.body.onclick = dis_p;
}
var dnc = 0;
function req_ipc() {
dnc = 1;
frm_on = 0;
req_ac2(1);
}
function dis_p() {
//alert(5);
if (dnc) {
dnc = 0;
return;
}
if (arr_on) {
return;
}
if (frm_on) {
return;
}
alw = 0;
ac_hide();
}
function req_ac2(me) {
if (search_box.value == "" || acuse == 0) return;
if (a_on && dnc) {
ac_hide();
return;
}
var o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else reqAC(me);
}
function showAC(res) {
eval(res);
set_cc(qs_q, qs_ac_list, qs_ac_id, qs_m);
ac_show(qs_q, qs_ac_list, qs_ac_id, qs_m);
}
function reqAC(me) {
var sv;
var ke = trim_space(search_box.value, me);
var sTarget = "공연";
ke = ke.replace(/ /g, "%20");
if (ke == "") {
ac_hide();
return;
}
$j.ajax({
async: true,
type: "POST",
url: "/Pages/Search/Ajax/get_ac_2.aspx",
data: { p: me, q: ke, t: sTarget },
dataType: "text",
success: showAC
});
}
function ac_show(aq, al, ai, am) {
if (aq && aq != "" && aq != trim_space(search_box.value, am)) return;
qs_q = aq;
qs_m = am;
qs_ac_list = al;
qs_ac_id = ai;
qs_ac_len = qs_ac_list.length;
var h = (qs_ac_len > 7) ? 7 : qs_ac_len;
h = h * 19;
print_ac();
if (qs_ac_list[0] == "" && (qs_m == 1 || qs_m == 2)) {
qs_ac_len = 1;
h = 19;
if (qs_ac_list[0] == "") h = h + 19;
}
h = 140;
scrol.style.height = h + 4;
if (qs_ac_len) {
h += 38;
//a_on=1;
} else {
//a_on=0;
}
a_on = 1;
ac_body.width = 330;
ac_body.height = h;
popup_ac(1);
if (a_on) {
set_acpos(0, 0);
scrol.scrollTop = 0;
search_box.onkeydown = ackhl;
}
}
function set_acpos(v, bookImgsrc) {
a_now = v;
setTimeout('set_ahl();', 10);
if (v > 0 && bookImgsrc) {
$j("#bookImg").show();
$j("#bookImg").attr("src", bookImgsrc);
goGoodsNo = $j("#gNo" + a_now).val();
}
}
function goGoods() {
location.href = "/Pages/Perf/Detail/Detail.aspx?IdPerf=" + (goGoodsNo);
}
function set_ahl() {
if (!a_on) return;
var o1, o2;
for (i = 0; i < qs_ac_len; i++) {
o1 = document.getElementById('ac' + (i + 1));
if ((i + 1) == a_now) {
o1.style.backgroundColor = '#e3edf5';
} else {
o1.style.backgroundColor = '';
// document.all.bookImg.src = '';
}
}
}
function Keycode(e) {
var result;
if (window.event)
result = window.event.keyCode;
else if (e)
result = e.which;
return result;
}
//키를 누를때 이벤트 검사하는 함수
function ackhl(event) {
var key = Keycode(event);
var o1, o2;
var img;
if (key == 39) {
req_ac2(1);
}
if (key == 13) {
//if (a_now>0) search_keyword(qs_ac_list[a_now]);
}
if (key == 40 || (key == 9)) {
if (!a_on) {
req_ac2(1);
return;
}
if (a_now < qs_ac_len) {
if (a_now == 0) bak = search_box.value;
a_now++;
if (a_now > max_row) scrol.scrollTop = parseInt((a_now - 1) / max_row) * max_row * 19;
o1 = document.getElementById('ac' + a_now);
//o2 = document.getElementById('acq' + a_now) ;
//img = document.getElementById("img" + a_now) ;
//old = search_box.value = o2.outerText ;
//set_acpos(a_now, img.outerText) ;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false;
}
}
if (a_on && (key == 38 || key == 9)) {
if (!a_on) return;
if (a_now <= 1) {
ac_hide();
old = search_box.value = bak;
}
else {
a_now--;
if ((qs_ac_len - a_now) + 1 > max_row) scrol.scrollTop = (qs_ac_len - (parseInt((qs_ac_len - a_now) / max_row) + 1) * 4) * 19;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false ;
}
}
}
function print_ac() {
if (qs_ac_list[0] == "") {
scrol.innerHTML = get_ac0();
}
else {
scrol.innerHTML = get_aclist();
}
//alert(scrol.innerHTML);
popup_ac(1); //자동완성창 보여줌.
//document.all.ac_body.style.display = B ;
setTimeout('set_ahl();', 10);
}
function get_aclist() {
var d = "", ds = "", l = 0, s = "", cnt = 0, pos = 0, qlen = 0, img = "";
if (qs_ac_list[0] != "") {
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-left:6px>";
s += "<tr><td width=65% valign=top>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-top:10px>";
for (i = 0; i < qs_ac_len; i++) {
var query = qs_ac_list[i].split("^");
ds = d = query[0];
goodsNo = query[2];
age_lmt_yn = query[3];
lmt_age = query[4];
img = query[1];
l = js_strlen(d);
if (l > 20) ds = js_substring(d, 0, 26) + "..";
pos = d.indexOf(search_box.value);
if (pos >= 0) {
if (pos == 0) {
ds = js_highlight(ds, search_box.value, 0);
}
else if (pos == d.length - 1) {
ds = js_highlight(ds, search_box.value, -1);
} else {
ds = js_highlight(ds, search_box.value, pos);
}
}
s += "<input type='hidden' Id='gNo" + (i + 1) + "' value='" + goodsNo + "'>";
s += "<a href='javascript:jsf_mm_Search();'>";
s += "<tr id='ac" + (i + 1) + "' onmouseover=\"set_acpos('" + (i + 1) + "', '" + img + "');\" onmouseout=\"set_acpos(0,0); \" onclick=\"set_acinput('" + (i + 1) + "')\" style=\"this.style.backgroundColor=''\" style='cursor:pointer;'>";
s += "<td style=padding-left:5px; height=22 align=left title=\"" + d + "\">" + ds + "</td>";
s += "<td height=22 align=right></td>";
s += "</tr></a>";
s += "<span id='acq" + (i + 1) + "' style='display:none'>" + d + "</span>";
s += "<span id='img" + (i + 1) + "' style='display:none'>" + img + "</span>";
}
s += "</table>";
s += "</td><td width=10px></td><td>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=\"margin-top:10px;\">";
s += "<tr><td valign='top' height=140 width=70><img border=0 id='bookImg' width='70px' height='87px' style='display:none;cursor:pointer;' onClick=\"goGoods();\"> </td></tr></table>";
s += "</td></tr></table>"
}
return s;
}
function js_makehigh_pre(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(0, t1.length)) {
d = "<font color=#4b7bcd>";
for (var i = 0, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1)
}
d += "</font>" + s.substring(i, s.length)
}
return d;
}
function js_makehigh_suf(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(s1.length - t1.length)) {
for (var i = 0, j = 0; j < s1.length - t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "<font color=#4b7bcd>";
for (var k = i, l = 0; l < t1.length; k++) {
if (s.substring(k, k + 1) != " ") l++;
d += s.substring(k, k + 1);
}
d += "</font>";
}
return d;
}
function js_makehigh_mid(s, t, pos) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
d = s.substring(0, pos);
d += "<font color=#4b7bcd>";
for (var i = pos, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "</font>" + s.substring(i, s.length);
return d;
}
function js_highlight(s, d, is_suf) {
var ret = "";
if (is_suf == 0) {
ret = js_makehigh_pre(s, d);
}
else if (is_suf == -1) {
ret = js_makehigh_suf(s, d);
}
else {
ret = js_makehigh_mid(s, d, is_suf);
}
if (ret == "") return s;
else return ret;
}
function set_acinput(v) {
if (!a_on) return;
old = search_box.value = $j('#acq' + a_now).text();
search_box.focus();
ac_hide();
}
function get_ac0() {
var s = "", ment = "";
if (qs_m == 1) ment = "추천 상품명이 없습니다.";
else if (qs_m == 2) ment = "추천 상품명이 없습니다.";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0>";
s += "<tr id=ac1 onmouseover=\"set_acpos(1,0); \" onmouseout=\"set_acpos(0,0); \" style=\"backgroundColor=''\">";
s += "<td height=20 align=left style=padding:5px;font-size:11px;>" + ment + "<img border=0 id='bookImg' style='display:none'></td></tr>";
s += "</table>";
s += "<span id=acq1 style='display:none'>" + old + "</span>";
return s;
}
function js_strlen(s) {
var i, l = 0;
for (i = 0; i < s.length; i++)
if (s.charCodeAt(i) > 127) l += 2;
else l++;
return l;
}
function js_substring(s, start, len) {
var i, l = 0; d = "";
for (i = start; i < s.length && l < len; i++) {
if (s.charCodeAt(i) > 127) l += 2;
else l++;
d += s.substr(i, 1);
}
return d;
}
function trim_space(ke, me) {
if (me != 2) {
ke = ke.replace(/^ +/g, "");
ke = ke.replace(/ +$/g, " ");
} else {
ke = ke.replace(/^ +/g, " ");
ke = ke.replace(/ +$/g, "");
}
ke = ke.replace(/ +/g, " ");
return ke;
}
function get_cc(me) {
var ke = trim_space(search_box.value, me) + me;
return cc[ke];
// alert(typeof(cc[ke]));
// return typeof(cc[ke])==UD ? null : cc[ke] ;
}
function set_cc(aq, al, ai, me) {
cc[aq + me] = new Array(aq, al, ai);
}
function ac_hide() {
if (document.getElementById("ac_body").style.display == N) return;
popup_ac(0); //hide all
a_on = a_now = 0;
}
function wi() {
if (acuse == 0) return;
var now = search_box.value;
if (now == "" && now != old) ac_hide();
if (now.length >= wi_len && now != "" && now != old) {
var o = null | ); }
}
old = now;
setTimeout("wi()", wi_int);
}
function set_mouseon(f) {
if (f == 1) arr_on = 1;
else if (f == 2) frm_on = 1;
}
function set_mouseoff(f) {
if (f == 1) arr_on = 0;
else if (f == 2) frm_on = 0;
}
//검색어입력창의 자동완성 화살표를 위, 아래로 변경한다.
//type 0 : 창이 닫혔을때 화살표 아래로.
//type 1 : 창이 펼처졌을때 위로
function switch_image(type) {
if (type == 0) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow.gif");
} else if (type == 1) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow2.gif");
}
}
function debug(msg) {
window.status = msg;
}
function s_init() {
search_box = document.getElementById("schInput");
bak = old = search_box.value;
wd();
setTimeout("wi()", wi_int);
}
$j(document).ready(function () {
setTimeout("s_init()", 2000);
});
| , me = 1;
o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else { reqAC(me | identifier_body |
autoComplete.js | var search_box;
var search_option;
var m_now = 0, s_now = 0, shl = 0, a_now = 0, a_on = 0, arr_on = 0, frm_on = 0;
var cn_use = "use_ac";
var wi_len = 2;
var wi_int = 500;
var max_row = 4;
var B = "block", I = "inline", N = "none", UD = "undefined";
var bak = "", old = "";
var qs_ac_list = "", qs_ac_id = "", qs_q = "", qs_m = 0, qs_ac_len = 0;
var acuse = 1; //자동완성사용여부
var cc = new Object();
var ac_layer_visibility = 0;
var goGoodsNo;
function get_nav() {
var ver = navigator.appVersion;
if (navigator.appName.indexOf("Microsoft") != -1 && ver.indexOf("MSIE 4") == -1 && ver.indexOf("MSIE 3") == -1) {
return 1;
} else if (navigator.appName.indexOf("Netscape") != -1) return 2;
else return 0;
}
//기능끄기 버튼을 눌렀을때
function ac_off() {
if ($j("#schInput").val() == "") {
popup_ac(0);
//document.all.noquery_ac_body.style.display = "none";
} else {
ac_hide();
// search_box.autocomplete = "off" ;
}
acuse = 0;
}
//기능켜기 버튼을 눌렀을때
function ac_on() {
acuse = 1;
popup_ac(1);
if ($j("#schInput").val() != "")
wd();
setTimeout("wi()", wi_int);
$j("#schInput").focus();
}
//type=0 : 모두 감춘준다.
//type=1 : 검색어가 있을때 자동완성창 보이기
//type=2 : 기능이 꺼져있을때 자동완성창 보이기
//type=3 : '검색어를 입력해달라'는 자동완성창 보이기
function popup_ac(type) {
if (type == 0) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
//검색창내 세모 이미지변경
switch_image(0);
} else if (type == 1) {
$j("#ac_body").css("display", "block");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 2) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "block");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 3) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body")[0].style.display = "block";
switch_image(1);
}
}
//인풋박스의 세모 버튼을 눌렀을때 자동완성창을 보여준다.
function show_ac() {
if (acuse == 0) {
if ($j("#ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(2);
}
else {
if ($j("#schInput").val() == "") {
if ($j("#noquery_ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(3);
}
else {
//wd();
//alert(document.all.schInput.value);
req_ipc();
}
}
}
function wd() {
search_box.onclick = req_ipc;
document.body.onclick = dis_p;
}
var dnc = 0;
function req_ipc() {
dnc = 1;
frm_on = 0;
req_ac2(1);
}
function dis_p() {
//alert(5);
if (dnc) {
dnc = 0;
return;
}
if (arr_on) {
return;
}
if (frm_on) {
return;
}
alw = 0;
ac_hide();
}
function req_ac2(me) {
if (search_box.value == "" || acuse == 0) return;
if (a_on && dnc) {
ac_hide();
return;
}
var o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else reqAC(me);
}
function showAC(res) {
eval(res);
set_cc(qs_q, qs_ac_list, qs_ac_id, qs_m);
ac_show(qs_q, qs_ac_list, qs_ac_id, qs_m);
}
function reqAC(me) {
var sv;
var ke = trim_space(search_box.value, me);
var sTarget = "공연";
ke = ke.replace(/ /g, "%20");
if (ke == "") {
ac_hide();
return;
}
$j.ajax({
async: true,
type: "POST",
url: "/Pages/Search/Ajax/get_ac_2.aspx",
data: { p: me, q: ke, t: sTarget },
dataType: "text",
success: showAC
});
}
function ac_show(aq, al, ai, am) {
if (aq && aq != "" && aq != trim_space(search_box.value, am)) return;
qs_q = aq;
qs_m = am;
qs_ac_list = al;
qs_ac_id = ai;
qs_ac_len = qs_ac_list.length;
var h = (qs_ac_len > 7) ? 7 : qs_ac_len;
h = h * 19;
print_ac();
if (qs_ac_list[0] == "" && (qs_m == 1 || qs_m == 2)) {
qs_ac_len = 1;
h = 19;
if (qs_ac_list[0] == "") h = h + 19;
}
h = 140;
scrol.style.height = h + 4;
if (qs_ac_len) {
h += 38;
//a_on=1;
} else {
//a_on=0;
}
a_on = 1;
ac_body.width = 330;
ac_body.height = h;
popup_ac(1);
if (a_on) {
set_acpos(0, 0);
scrol.scrollTop = 0;
search_box.onkeydown = ackhl;
}
}
function set_acpos(v, bookImgsrc) {
a_now = v;
setTimeout('set_ahl();', 10);
if (v > 0 && bookImgsrc) {
$j("#bookImg").show();
$j("#bookImg").attr("src", bookImgsrc);
goGoodsNo = $j("#gNo" + a_now).val();
}
}
function goGoods() {
location.href = "/Pages/Perf/Detail/Detail.aspx?IdPerf=" + (goGoodsNo);
}
function set_ahl() {
if (!a_on) return;
var o1, o2;
for (i = 0; i < qs_ac_len; i++) {
o1 = document.getElementById('ac' + (i + 1));
if ((i + 1) == a_now) {
o1.style.backgroundColor = '#e3edf5';
} else {
o1.style.backgroundColor = '';
// document.all.bookImg.src = '';
}
}
}
function Keycode(e) {
var result;
if (window.event)
result = window.event.keyCode;
else if (e)
result = e.which;
return result;
}
//키를 누를때 이벤트 검사하는 함수
function ackhl(event) {
var key = Keycode(event);
var o1, o2;
var img;
if (key == 39) {
req_ac2(1);
}
if (key == 13) {
//if (a_now>0) search_keyword(qs_ac_list[a_now]);
}
if (key == 40 || (key == 9)) {
if (!a_on) {
req_ac2(1);
return;
}
if (a_now < qs_ac_len) {
if (a_now == 0) bak = search_box.value;
a_now++;
if (a_now > max_row) scrol.scrollTop = parseInt((a_now - 1) / max_row) * max_row * 19;
o1 = document.getElementById('ac' + a_now);
//o2 = document.getElementById('acq' + a_now) ;
//img = document.getElementById("img" + a_now) ;
//old = search_box.value = o2.outerText ;
//set_acpos(a_now, img.outerText) ;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false;
}
}
if (a_on && (key == 38 || key == 9)) {
if (!a_on) return;
if (a_now <= 1) {
ac_hide();
old = search_box.value = bak;
}
else {
a_now--;
if ((qs_ac_len - a_now) + 1 > max_row) scrol.scrollTop = (qs_ac_len - (parseInt((qs_ac_len - a_now) / max_row) + 1) * 4) * 19;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false ;
}
}
}
function print_ac() {
if (qs_ac_list[0] == "") {
scrol.innerHTML = get_ac0();
}
else {
scrol.innerHTML = get_aclist();
}
//alert(scrol.innerHTML);
popup_ac(1); //자동완성창 보여줌.
//document.all.ac_body.style.display = B ;
setTimeout('set_ahl();', 10);
}
function get_aclist() {
var d = "", ds = "", l = 0, s = "", cnt = 0, pos = 0, qlen = 0, img = "";
if (qs_ac_list[0] != "") {
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-left:6px>";
s += "<tr><td width=65% valign=top>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-top:10px>";
for (i = 0; i < qs_ac_len; i++) {
var query = qs_ac_list[i].split("^");
ds = d = query[0];
goodsNo = query[2];
age_lmt_yn = query[3];
lmt_age = query[4];
img = query[1];
l = js_strlen(d);
if (l > 20) ds = js_substring(d, 0, 26) + "..";
pos = d.indexOf(search_box.value);
if (pos >= 0) {
if (pos == 0) {
ds = js_highlight(ds, search_box.value, 0);
}
else if (pos == d.length - 1) {
ds = js_highlight(ds, search_box.value, -1);
} else {
ds = js_highlight(ds, search_box.value, pos);
}
}
s += "<input type='hidden' Id='gNo" + (i + 1) + "' value='" + goodsNo + "'>";
s += "<a href='javascript:jsf_mm_Search();'>";
s += "<tr id='ac" + (i + 1) + "' onmouseover=\"set_acpos('" + (i + 1) + "', '" + img + "');\" onmouseout=\"set_acpos(0,0); \" onclick=\"set_acinput('" + (i + 1) + "')\" style=\"this.style.backgroundColor=''\" style='cursor:pointer;'>";
s += "<td style=padding-left:5px; height=22 align=left title=\"" + d + "\">" + ds + "</td>";
s += "<td height=22 align=right></td>";
s += "</tr></a>";
s += "<span id='acq" + (i + 1) + "' style='display:none'>" + d + "</span>";
s += "<span id='img" + (i + 1) + "' style='display:none'>" + img + "</span>";
}
s += "</table>";
s += "</td><td width=10px></td><td>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=\"margin-top:10px;\">";
s += "<tr><td valign='top' height=140 width=70><img border=0 id='bookImg' width='70px' height='87px' style='display:none;cursor:pointer;' onClick=\"goGoods();\"> </td></tr></table>";
s += "</td></tr></table>"
}
return s;
}
function js_makehigh_pre(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(0, t1.length)) {
d = "<font color=#4b7bcd>";
for (var i = 0, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1)
}
d += "</font>" + s.substring(i, s.length)
}
return d;
}
function js_makehigh_suf(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(s1.length - t1.length)) {
for (var i = 0, j = 0; j < s1.length - t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "<font color=#4b7bcd>";
for (var k = i, l = 0; l < t1.length; k++) {
if (s.substring(k, k + 1) != " ") l++;
d += s.substring(k, k + 1);
}
d += "</font>";
}
return d;
}
function js_makehigh_mid(s, t, pos) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
d = s.substring(0, pos);
d += "<font color=#4b7bcd>";
for (var i = pos, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "</font>" + s.substring(i, s.length);
return d;
}
function js_highlight(s, d, is_suf) {
var ret = "";
if (is_suf == 0) {
ret = js_makehigh_pre(s, d);
}
else if (is_suf == -1) {
ret = js_makehigh_suf(s, d);
}
else {
ret = js_makehigh_mid(s, d, is_suf);
}
if (ret == "") return s;
else return ret;
}
function set_acinput(v) {
if (!a_on) return;
old = search_box.value = $j('#acq' + a_now).text();
search_box.focus();
ac_hide();
}
function get_ac0() {
var s = "", ment = "";
if (qs_m == 1) ment = "추천 상품명이 없습니다.";
else if (qs_m == 2) ment = "추천 상품명이 없습니다.";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0>";
s += "<tr id=ac1 onmouseover=\"set_acpos(1,0); \" onmouseout=\"set_acpos(0,0); \" style=\"backgroundColor=''\">";
s += "<td height=20 align=left style=padding:5px;font-size:11px;>" + ment + "<img border=0 id='bookImg' style='display:none'></td></tr>";
s += "</table>";
s += "<span id=acq1 style='display:none'>" + old + "</span>";
return s;
}
| function js_strlen(s) {
var i, l = 0;
for (i = 0; i < s.length; i++)
if (s.charCodeAt(i) > 127) l += 2;
else l++;
return l;
}
function js_substring(s, start, len) {
var i, l = 0; d = "";
for (i = start; i < s.length && l < len; i++) {
if (s.charCodeAt(i) > 127) l += 2;
else l++;
d += s.substr(i, 1);
}
return d;
}
function trim_space(ke, me) {
if (me != 2) {
ke = ke.replace(/^ +/g, "");
ke = ke.replace(/ +$/g, " ");
} else {
ke = ke.replace(/^ +/g, " ");
ke = ke.replace(/ +$/g, "");
}
ke = ke.replace(/ +/g, " ");
return ke;
}
function get_cc(me) {
var ke = trim_space(search_box.value, me) + me;
return cc[ke];
// alert(typeof(cc[ke]));
// return typeof(cc[ke])==UD ? null : cc[ke] ;
}
function set_cc(aq, al, ai, me) {
cc[aq + me] = new Array(aq, al, ai);
}
function ac_hide() {
if (document.getElementById("ac_body").style.display == N) return;
popup_ac(0); //hide all
a_on = a_now = 0;
}
function wi() {
if (acuse == 0) return;
var now = search_box.value;
if (now == "" && now != old) ac_hide();
if (now.length >= wi_len && now != "" && now != old) {
var o = null, me = 1;
o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else { reqAC(me); }
}
old = now;
setTimeout("wi()", wi_int);
}
function set_mouseon(f) {
if (f == 1) arr_on = 1;
else if (f == 2) frm_on = 1;
}
function set_mouseoff(f) {
if (f == 1) arr_on = 0;
else if (f == 2) frm_on = 0;
}
//검색어입력창의 자동완성 화살표를 위, 아래로 변경한다.
//type 0 : 창이 닫혔을때 화살표 아래로.
//type 1 : 창이 펼처졌을때 위로
function switch_image(type) {
if (type == 0) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow.gif");
} else if (type == 1) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow2.gif");
}
}
function debug(msg) {
window.status = msg;
}
function s_init() {
search_box = document.getElementById("schInput");
bak = old = search_box.value;
wd();
setTimeout("wi()", wi_int);
}
$j(document).ready(function () {
setTimeout("s_init()", 2000);
}); | random_line_split | |
autoComplete.js |
var search_box;
var search_option;
var m_now = 0, s_now = 0, shl = 0, a_now = 0, a_on = 0, arr_on = 0, frm_on = 0;
var cn_use = "use_ac";
var wi_len = 2;
var wi_int = 500;
var max_row = 4;
var B = "block", I = "inline", N = "none", UD = "undefined";
var bak = "", old = "";
var qs_ac_list = "", qs_ac_id = "", qs_q = "", qs_m = 0, qs_ac_len = 0;
var acuse = 1; //자동완성사용여부
var cc = new Object();
var ac_layer_visibility = 0;
var goGoodsNo;
function get_nav() {
var ver = navigator.appVersion;
if (navigator.appName.indexOf("Microsoft") != -1 && ver.indexOf("MSIE 4") == -1 && ver.indexOf("MSIE 3") == -1) {
return 1;
} else if (navigator.appName.indexOf("Netscape") != -1) return 2;
else return 0;
}
//기능끄기 버튼을 눌렀을때
function ac_off() {
if ($j("#schInput").val() == "") {
popup_ac(0);
//document.all.noquery_ac_body.style.display = "none";
} else {
ac_hide();
// search_box.autocomplete = "off" ;
}
acuse = 0;
}
//기능켜기 버튼을 눌렀을때
function ac_on() {
acuse = 1;
popup_ac(1);
if ($j("#schInput").val() != "")
wd();
setTimeout("wi()", wi_int);
$j("#schInput").focus();
}
//type=0 : 모두 감춘준다.
//type=1 : 검색어가 있을때 자동완성창 보이기
//type=2 : 기능이 꺼져있을때 자동완성창 보이기
//type=3 : '검색어를 입력해달라'는 자동완성창 보이기
function popup_ac(type) {
if (type == 0) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
//검색창내 세모 이미지변경
switch_image(0);
} else if (type == 1) {
$j("#ac_body").css("display", "block");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 2) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "block");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 3) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body")[0].style.display = "block";
switch_image(1);
}
}
//인풋박스의 세모 버튼을 눌렀을때 자동완성창을 보여준다.
function show_ac() {
if (acuse == 0) {
if ($j("#ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(2);
}
else {
if ($j("#schInput").val() == "") {
if ($j("#noquery_ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(3);
}
else {
//wd();
//alert(document.all.schInput.value);
req_ipc();
}
}
}
function wd() {
search_box.onclick = req_ipc;
document.body.onclick = dis_p;
}
var dnc = 0;
function req_ipc() {
dnc = 1;
frm_on = 0;
req_ac2(1);
}
function dis_p() {
//alert(5);
if (dnc) {
dnc = 0;
re | rn;
}
if (arr_on) {
return;
}
if (frm_on) {
return;
}
alw = 0;
ac_hide();
}
function req_ac2(me) {
if (search_box.value == "" || acuse == 0) return;
if (a_on && dnc) {
ac_hide();
return;
}
var o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else reqAC(me);
}
function showAC(res) {
eval(res);
set_cc(qs_q, qs_ac_list, qs_ac_id, qs_m);
ac_show(qs_q, qs_ac_list, qs_ac_id, qs_m);
}
function reqAC(me) {
var sv;
var ke = trim_space(search_box.value, me);
var sTarget = "공연";
ke = ke.replace(/ /g, "%20");
if (ke == "") {
ac_hide();
return;
}
$j.ajax({
async: true,
type: "POST",
url: "/Pages/Search/Ajax/get_ac_2.aspx",
data: { p: me, q: ke, t: sTarget },
dataType: "text",
success: showAC
});
}
function ac_show(aq, al, ai, am) {
if (aq && aq != "" && aq != trim_space(search_box.value, am)) return;
qs_q = aq;
qs_m = am;
qs_ac_list = al;
qs_ac_id = ai;
qs_ac_len = qs_ac_list.length;
var h = (qs_ac_len > 7) ? 7 : qs_ac_len;
h = h * 19;
print_ac();
if (qs_ac_list[0] == "" && (qs_m == 1 || qs_m == 2)) {
qs_ac_len = 1;
h = 19;
if (qs_ac_list[0] == "") h = h + 19;
}
h = 140;
scrol.style.height = h + 4;
if (qs_ac_len) {
h += 38;
//a_on=1;
} else {
//a_on=0;
}
a_on = 1;
ac_body.width = 330;
ac_body.height = h;
popup_ac(1);
if (a_on) {
set_acpos(0, 0);
scrol.scrollTop = 0;
search_box.onkeydown = ackhl;
}
}
function set_acpos(v, bookImgsrc) {
a_now = v;
setTimeout('set_ahl();', 10);
if (v > 0 && bookImgsrc) {
$j("#bookImg").show();
$j("#bookImg").attr("src", bookImgsrc);
goGoodsNo = $j("#gNo" + a_now).val();
}
}
function goGoods() {
location.href = "/Pages/Perf/Detail/Detail.aspx?IdPerf=" + (goGoodsNo);
}
function set_ahl() {
if (!a_on) return;
var o1, o2;
for (i = 0; i < qs_ac_len; i++) {
o1 = document.getElementById('ac' + (i + 1));
if ((i + 1) == a_now) {
o1.style.backgroundColor = '#e3edf5';
} else {
o1.style.backgroundColor = '';
// document.all.bookImg.src = '';
}
}
}
function Keycode(e) {
var result;
if (window.event)
result = window.event.keyCode;
else if (e)
result = e.which;
return result;
}
//키를 누를때 이벤트 검사하는 함수
function ackhl(event) {
var key = Keycode(event);
var o1, o2;
var img;
if (key == 39) {
req_ac2(1);
}
if (key == 13) {
//if (a_now>0) search_keyword(qs_ac_list[a_now]);
}
if (key == 40 || (key == 9)) {
if (!a_on) {
req_ac2(1);
return;
}
if (a_now < qs_ac_len) {
if (a_now == 0) bak = search_box.value;
a_now++;
if (a_now > max_row) scrol.scrollTop = parseInt((a_now - 1) / max_row) * max_row * 19;
o1 = document.getElementById('ac' + a_now);
//o2 = document.getElementById('acq' + a_now) ;
//img = document.getElementById("img" + a_now) ;
//old = search_box.value = o2.outerText ;
//set_acpos(a_now, img.outerText) ;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false;
}
}
if (a_on && (key == 38 || key == 9)) {
if (!a_on) return;
if (a_now <= 1) {
ac_hide();
old = search_box.value = bak;
}
else {
a_now--;
if ((qs_ac_len - a_now) + 1 > max_row) scrol.scrollTop = (qs_ac_len - (parseInt((qs_ac_len - a_now) / max_row) + 1) * 4) * 19;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false ;
}
}
}
function print_ac() {
if (qs_ac_list[0] == "") {
scrol.innerHTML = get_ac0();
}
else {
scrol.innerHTML = get_aclist();
}
//alert(scrol.innerHTML);
popup_ac(1); //자동완성창 보여줌.
//document.all.ac_body.style.display = B ;
setTimeout('set_ahl();', 10);
}
function get_aclist() {
var d = "", ds = "", l = 0, s = "", cnt = 0, pos = 0, qlen = 0, img = "";
if (qs_ac_list[0] != "") {
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-left:6px>";
s += "<tr><td width=65% valign=top>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-top:10px>";
for (i = 0; i < qs_ac_len; i++) {
var query = qs_ac_list[i].split("^");
ds = d = query[0];
goodsNo = query[2];
age_lmt_yn = query[3];
lmt_age = query[4];
img = query[1];
l = js_strlen(d);
if (l > 20) ds = js_substring(d, 0, 26) + "..";
pos = d.indexOf(search_box.value);
if (pos >= 0) {
if (pos == 0) {
ds = js_highlight(ds, search_box.value, 0);
}
else if (pos == d.length - 1) {
ds = js_highlight(ds, search_box.value, -1);
} else {
ds = js_highlight(ds, search_box.value, pos);
}
}
s += "<input type='hidden' Id='gNo" + (i + 1) + "' value='" + goodsNo + "'>";
s += "<a href='javascript:jsf_mm_Search();'>";
s += "<tr id='ac" + (i + 1) + "' onmouseover=\"set_acpos('" + (i + 1) + "', '" + img + "');\" onmouseout=\"set_acpos(0,0); \" onclick=\"set_acinput('" + (i + 1) + "')\" style=\"this.style.backgroundColor=''\" style='cursor:pointer;'>";
s += "<td style=padding-left:5px; height=22 align=left title=\"" + d + "\">" + ds + "</td>";
s += "<td height=22 align=right></td>";
s += "</tr></a>";
s += "<span id='acq" + (i + 1) + "' style='display:none'>" + d + "</span>";
s += "<span id='img" + (i + 1) + "' style='display:none'>" + img + "</span>";
}
s += "</table>";
s += "</td><td width=10px></td><td>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=\"margin-top:10px;\">";
s += "<tr><td valign='top' height=140 width=70><img border=0 id='bookImg' width='70px' height='87px' style='display:none;cursor:pointer;' onClick=\"goGoods();\"> </td></tr></table>";
s += "</td></tr></table>"
}
return s;
}
function js_makehigh_pre(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(0, t1.length)) {
d = "<font color=#4b7bcd>";
for (var i = 0, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1)
}
d += "</font>" + s.substring(i, s.length)
}
return d;
}
function js_makehigh_suf(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(s1.length - t1.length)) {
for (var i = 0, j = 0; j < s1.length - t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "<font color=#4b7bcd>";
for (var k = i, l = 0; l < t1.length; k++) {
if (s.substring(k, k + 1) != " ") l++;
d += s.substring(k, k + 1);
}
d += "</font>";
}
return d;
}
function js_makehigh_mid(s, t, pos) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
d = s.substring(0, pos);
d += "<font color=#4b7bcd>";
for (var i = pos, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "</font>" + s.substring(i, s.length);
return d;
}
function js_highlight(s, d, is_suf) {
var ret = "";
if (is_suf == 0) {
ret = js_makehigh_pre(s, d);
}
else if (is_suf == -1) {
ret = js_makehigh_suf(s, d);
}
else {
ret = js_makehigh_mid(s, d, is_suf);
}
if (ret == "") return s;
else return ret;
}
function set_acinput(v) {
if (!a_on) return;
old = search_box.value = $j('#acq' + a_now).text();
search_box.focus();
ac_hide();
}
function get_ac0() {
var s = "", ment = "";
if (qs_m == 1) ment = "추천 상품명이 없습니다.";
else if (qs_m == 2) ment = "추천 상품명이 없습니다.";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0>";
s += "<tr id=ac1 onmouseover=\"set_acpos(1,0); \" onmouseout=\"set_acpos(0,0); \" style=\"backgroundColor=''\">";
s += "<td height=20 align=left style=padding:5px;font-size:11px;>" + ment + "<img border=0 id='bookImg' style='display:none'></td></tr>";
s += "</table>";
s += "<span id=acq1 style='display:none'>" + old + "</span>";
return s;
}
function js_strlen(s) {
var i, l = 0;
for (i = 0; i < s.length; i++)
if (s.charCodeAt(i) > 127) l += 2;
else l++;
return l;
}
function js_substring(s, start, len) {
var i, l = 0; d = "";
for (i = start; i < s.length && l < len; i++) {
if (s.charCodeAt(i) > 127) l += 2;
else l++;
d += s.substr(i, 1);
}
return d;
}
function trim_space(ke, me) {
if (me != 2) {
ke = ke.replace(/^ +/g, "");
ke = ke.replace(/ +$/g, " ");
} else {
ke = ke.replace(/^ +/g, " ");
ke = ke.replace(/ +$/g, "");
}
ke = ke.replace(/ +/g, " ");
return ke;
}
function get_cc(me) {
var ke = trim_space(search_box.value, me) + me;
return cc[ke];
// alert(typeof(cc[ke]));
// return typeof(cc[ke])==UD ? null : cc[ke] ;
}
function set_cc(aq, al, ai, me) {
cc[aq + me] = new Array(aq, al, ai);
}
function ac_hide() {
if (document.getElementById("ac_body").style.display == N) return;
popup_ac(0); //hide all
a_on = a_now = 0;
}
function wi() {
if (acuse == 0) return;
var now = search_box.value;
if (now == "" && now != old) ac_hide();
if (now.length >= wi_len && now != "" && now != old) {
var o = null, me = 1;
o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else { reqAC(me); }
}
old = now;
setTimeout("wi()", wi_int);
}
function set_mouseon(f) {
if (f == 1) arr_on = 1;
else if (f == 2) frm_on = 1;
}
function set_mouseoff(f) {
if (f == 1) arr_on = 0;
else if (f == 2) frm_on = 0;
}
//검색어입력창의 자동완성 화살표를 위, 아래로 변경한다.
//type 0 : 창이 닫혔을때 화살표 아래로.
//type 1 : 창이 펼처졌을때 위로
function switch_image(type) {
if (type == 0) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow.gif");
} else if (type == 1) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow2.gif");
}
}
function debug(msg) {
window.status = msg;
}
function s_init() {
search_box = document.getElementById("schInput");
bak = old = search_box.value;
wd();
setTimeout("wi()", wi_int);
}
$j(document).ready(function () {
setTimeout("s_init()", 2000);
});
| tu | identifier_name |
autoComplete.js |
var search_box;
var search_option;
var m_now = 0, s_now = 0, shl = 0, a_now = 0, a_on = 0, arr_on = 0, frm_on = 0;
var cn_use = "use_ac";
var wi_len = 2;
var wi_int = 500;
var max_row = 4;
var B = "block", I = "inline", N = "none", UD = "undefined";
var bak = "", old = "";
var qs_ac_list = "", qs_ac_id = "", qs_q = "", qs_m = 0, qs_ac_len = 0;
var acuse = 1; //자동완성사용여부
var cc = new Object();
var ac_layer_visibility = 0;
var goGoodsNo;
function get_nav() {
var ver = navigator.appVersion;
if (navigator.appName.indexOf("Microsoft") != -1 && ver.indexOf("MSIE 4") == -1 && ver.indexOf("MSIE 3") == -1) {
return 1;
} else if (navigator.appName.indexOf("Netscape") != -1) return 2;
else return 0;
}
//기능끄기 버튼을 눌렀을때
function ac_off() {
if ($j("#schInput").val() == "") {
popup_ac(0);
//document.all.noquery_ac_body.style.display = "none";
} else {
ac_hide();
// search_box.autocomplete = "off" ;
}
acuse = 0;
}
//기능켜기 버튼을 눌렀을때
function ac_on() {
acuse = 1;
popup_ac(1);
if ($j("#schInput").val() != "")
wd();
setTimeout("wi()", wi_int);
$j("#schInput").focus();
}
//type=0 : 모두 감춘준다.
//type=1 : 검색어가 있을때 자동완성창 보이기
//type=2 : 기능이 꺼져있을때 자동완성창 보이기
//type=3 : '검색어를 입력해달라'는 자동완성창 보이기
function popup_ac(type) {
if (type == 0) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
//검색창내 세모 이미지변경
switch_image(0);
} else if (type == 1) {
$j("#ac_body").css("display", "block");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else if (type == 2) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "block");
$j("#noquery_ac_body").css("display", "none");
switch_image(1);
} else | tch_image(1);
}
}
//인풋박스의 세모 버튼을 눌렀을때 자동완성창을 보여준다.
function show_ac() {
if (acuse == 0) {
if ($j("#ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(2);
}
else {
if ($j("#schInput").val() == "") {
if ($j("#noquery_ac_body")[0].style.display == "block")
popup_ac(0);
else
popup_ac(3);
}
else {
//wd();
//alert(document.all.schInput.value);
req_ipc();
}
}
}
function wd() {
search_box.onclick = req_ipc;
document.body.onclick = dis_p;
}
var dnc = 0;
function req_ipc() {
dnc = 1;
frm_on = 0;
req_ac2(1);
}
function dis_p() {
//alert(5);
if (dnc) {
dnc = 0;
return;
}
if (arr_on) {
return;
}
if (frm_on) {
return;
}
alw = 0;
ac_hide();
}
function req_ac2(me) {
if (search_box.value == "" || acuse == 0) return;
if (a_on && dnc) {
ac_hide();
return;
}
var o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else reqAC(me);
}
function showAC(res) {
eval(res);
set_cc(qs_q, qs_ac_list, qs_ac_id, qs_m);
ac_show(qs_q, qs_ac_list, qs_ac_id, qs_m);
}
function reqAC(me) {
var sv;
var ke = trim_space(search_box.value, me);
var sTarget = "공연";
ke = ke.replace(/ /g, "%20");
if (ke == "") {
ac_hide();
return;
}
$j.ajax({
async: true,
type: "POST",
url: "/Pages/Search/Ajax/get_ac_2.aspx",
data: { p: me, q: ke, t: sTarget },
dataType: "text",
success: showAC
});
}
function ac_show(aq, al, ai, am) {
if (aq && aq != "" && aq != trim_space(search_box.value, am)) return;
qs_q = aq;
qs_m = am;
qs_ac_list = al;
qs_ac_id = ai;
qs_ac_len = qs_ac_list.length;
var h = (qs_ac_len > 7) ? 7 : qs_ac_len;
h = h * 19;
print_ac();
if (qs_ac_list[0] == "" && (qs_m == 1 || qs_m == 2)) {
qs_ac_len = 1;
h = 19;
if (qs_ac_list[0] == "") h = h + 19;
}
h = 140;
scrol.style.height = h + 4;
if (qs_ac_len) {
h += 38;
//a_on=1;
} else {
//a_on=0;
}
a_on = 1;
ac_body.width = 330;
ac_body.height = h;
popup_ac(1);
if (a_on) {
set_acpos(0, 0);
scrol.scrollTop = 0;
search_box.onkeydown = ackhl;
}
}
function set_acpos(v, bookImgsrc) {
a_now = v;
setTimeout('set_ahl();', 10);
if (v > 0 && bookImgsrc) {
$j("#bookImg").show();
$j("#bookImg").attr("src", bookImgsrc);
goGoodsNo = $j("#gNo" + a_now).val();
}
}
function goGoods() {
location.href = "/Pages/Perf/Detail/Detail.aspx?IdPerf=" + (goGoodsNo);
}
function set_ahl() {
if (!a_on) return;
var o1, o2;
for (i = 0; i < qs_ac_len; i++) {
o1 = document.getElementById('ac' + (i + 1));
if ((i + 1) == a_now) {
o1.style.backgroundColor = '#e3edf5';
} else {
o1.style.backgroundColor = '';
// document.all.bookImg.src = '';
}
}
}
function Keycode(e) {
var result;
if (window.event)
result = window.event.keyCode;
else if (e)
result = e.which;
return result;
}
//키를 누를때 이벤트 검사하는 함수
function ackhl(event) {
var key = Keycode(event);
var o1, o2;
var img;
if (key == 39) {
req_ac2(1);
}
if (key == 13) {
//if (a_now>0) search_keyword(qs_ac_list[a_now]);
}
if (key == 40 || (key == 9)) {
if (!a_on) {
req_ac2(1);
return;
}
if (a_now < qs_ac_len) {
if (a_now == 0) bak = search_box.value;
a_now++;
if (a_now > max_row) scrol.scrollTop = parseInt((a_now - 1) / max_row) * max_row * 19;
o1 = document.getElementById('ac' + a_now);
//o2 = document.getElementById('acq' + a_now) ;
//img = document.getElementById("img" + a_now) ;
//old = search_box.value = o2.outerText ;
//set_acpos(a_now, img.outerText) ;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false;
}
}
if (a_on && (key == 38 || key == 9)) {
if (!a_on) return;
if (a_now <= 1) {
ac_hide();
old = search_box.value = bak;
}
else {
a_now--;
if ((qs_ac_len - a_now) + 1 > max_row) scrol.scrollTop = (qs_ac_len - (parseInt((qs_ac_len - a_now) / max_row) + 1) * 4) * 19;
old = search_box.value = $j('#acq' + a_now).text();
set_acpos(a_now, $j('#img' + a_now).text());
search_box.focus();
set_ahl();
//e.returnValue = false ;
}
}
}
function print_ac() {
if (qs_ac_list[0] == "") {
scrol.innerHTML = get_ac0();
}
else {
scrol.innerHTML = get_aclist();
}
//alert(scrol.innerHTML);
popup_ac(1); //자동완성창 보여줌.
//document.all.ac_body.style.display = B ;
setTimeout('set_ahl();', 10);
}
function get_aclist() {
var d = "", ds = "", l = 0, s = "", cnt = 0, pos = 0, qlen = 0, img = "";
if (qs_ac_list[0] != "") {
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-left:6px>";
s += "<tr><td width=65% valign=top>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=margin-top:10px>";
for (i = 0; i < qs_ac_len; i++) {
var query = qs_ac_list[i].split("^");
ds = d = query[0];
goodsNo = query[2];
age_lmt_yn = query[3];
lmt_age = query[4];
img = query[1];
l = js_strlen(d);
if (l > 20) ds = js_substring(d, 0, 26) + "..";
pos = d.indexOf(search_box.value);
if (pos >= 0) {
if (pos == 0) {
ds = js_highlight(ds, search_box.value, 0);
}
else if (pos == d.length - 1) {
ds = js_highlight(ds, search_box.value, -1);
} else {
ds = js_highlight(ds, search_box.value, pos);
}
}
s += "<input type='hidden' Id='gNo" + (i + 1) + "' value='" + goodsNo + "'>";
s += "<a href='javascript:jsf_mm_Search();'>";
s += "<tr id='ac" + (i + 1) + "' onmouseover=\"set_acpos('" + (i + 1) + "', '" + img + "');\" onmouseout=\"set_acpos(0,0); \" onclick=\"set_acinput('" + (i + 1) + "')\" style=\"this.style.backgroundColor=''\" style='cursor:pointer;'>";
s += "<td style=padding-left:5px; height=22 align=left title=\"" + d + "\">" + ds + "</td>";
s += "<td height=22 align=right></td>";
s += "</tr></a>";
s += "<span id='acq" + (i + 1) + "' style='display:none'>" + d + "</span>";
s += "<span id='img" + (i + 1) + "' style='display:none'>" + img + "</span>";
}
s += "</table>";
s += "</td><td width=10px></td><td>";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0 style=\"margin-top:10px;\">";
s += "<tr><td valign='top' height=140 width=70><img border=0 id='bookImg' width='70px' height='87px' style='display:none;cursor:pointer;' onClick=\"goGoods();\"> </td></tr></table>";
s += "</td></tr></table>"
}
return s;
}
function js_makehigh_pre(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(0, t1.length)) {
d = "<font color=#4b7bcd>";
for (var i = 0, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1)
}
d += "</font>" + s.substring(i, s.length)
}
return d;
}
function js_makehigh_suf(s, t) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
if (t1 == s1.substring(s1.length - t1.length)) {
for (var i = 0, j = 0; j < s1.length - t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "<font color=#4b7bcd>";
for (var k = i, l = 0; l < t1.length; k++) {
if (s.substring(k, k + 1) != " ") l++;
d += s.substring(k, k + 1);
}
d += "</font>";
}
return d;
}
function js_makehigh_mid(s, t, pos) {
var d = "";
var s1 = s.replace(/ /g, "");
var t1 = t.replace(/ /g, "");
t1 = t1.toLowerCase();
d = s.substring(0, pos);
d += "<font color=#4b7bcd>";
for (var i = pos, j = 0; j < t1.length; i++) {
if (s.substring(i, i + 1) != " ") j++;
d += s.substring(i, i + 1);
}
d += "</font>" + s.substring(i, s.length);
return d;
}
function js_highlight(s, d, is_suf) {
var ret = "";
if (is_suf == 0) {
ret = js_makehigh_pre(s, d);
}
else if (is_suf == -1) {
ret = js_makehigh_suf(s, d);
}
else {
ret = js_makehigh_mid(s, d, is_suf);
}
if (ret == "") return s;
else return ret;
}
function set_acinput(v) {
if (!a_on) return;
old = search_box.value = $j('#acq' + a_now).text();
search_box.focus();
ac_hide();
}
function get_ac0() {
var s = "", ment = "";
if (qs_m == 1) ment = "추천 상품명이 없습니다.";
else if (qs_m == 2) ment = "추천 상품명이 없습니다.";
s += "<table width=100% cellpadding=0 cellspacing=0 border=0>";
s += "<tr id=ac1 onmouseover=\"set_acpos(1,0); \" onmouseout=\"set_acpos(0,0); \" style=\"backgroundColor=''\">";
s += "<td height=20 align=left style=padding:5px;font-size:11px;>" + ment + "<img border=0 id='bookImg' style='display:none'></td></tr>";
s += "</table>";
s += "<span id=acq1 style='display:none'>" + old + "</span>";
return s;
}
function js_strlen(s) {
var i, l = 0;
for (i = 0; i < s.length; i++)
if (s.charCodeAt(i) > 127) l += 2;
else l++;
return l;
}
function js_substring(s, start, len) {
var i, l = 0; d = "";
for (i = start; i < s.length && l < len; i++) {
if (s.charCodeAt(i) > 127) l += 2;
else l++;
d += s.substr(i, 1);
}
return d;
}
function trim_space(ke, me) {
if (me != 2) {
ke = ke.replace(/^ +/g, "");
ke = ke.replace(/ +$/g, " ");
} else {
ke = ke.replace(/^ +/g, " ");
ke = ke.replace(/ +$/g, "");
}
ke = ke.replace(/ +/g, " ");
return ke;
}
function get_cc(me) {
var ke = trim_space(search_box.value, me) + me;
return cc[ke];
// alert(typeof(cc[ke]));
// return typeof(cc[ke])==UD ? null : cc[ke] ;
}
function set_cc(aq, al, ai, me) {
cc[aq + me] = new Array(aq, al, ai);
}
function ac_hide() {
if (document.getElementById("ac_body").style.display == N) return;
popup_ac(0); //hide all
a_on = a_now = 0;
}
function wi() {
if (acuse == 0) return;
var now = search_box.value;
if (now == "" && now != old) ac_hide();
if (now.length >= wi_len && now != "" && now != old) {
var o = null, me = 1;
o = get_cc(me);
if (o && o[1][0] != "") { ac_show(o[0], o[1], o[2], me); }
else { reqAC(me); }
}
old = now;
setTimeout("wi()", wi_int);
}
function set_mouseon(f) {
if (f == 1) arr_on = 1;
else if (f == 2) frm_on = 1;
}
function set_mouseoff(f) {
if (f == 1) arr_on = 0;
else if (f == 2) frm_on = 0;
}
//검색어입력창의 자동완성 화살표를 위, 아래로 변경한다.
//type 0 : 창이 닫혔을때 화살표 아래로.
//type 1 : 창이 펼처졌을때 위로
function switch_image(type) {
if (type == 0) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow.gif");
} else if (type == 1) {
$j("#imgIntro0").attr("src", "http://tkfile.yes24.com/img/common/ic_search_arrow2.gif");
}
}
function debug(msg) {
window.status = msg;
}
function s_init() {
search_box = document.getElementById("schInput");
bak = old = search_box.value;
wd();
setTimeout("wi()", wi_int);
}
$j(document).ready(function () {
setTimeout("s_init()", 2000);
});
| if (type == 3) {
$j("#ac_body").css("display", "none");
$j("#off_ac_body").css("display", "none");
$j("#noquery_ac_body")[0].style.display = "block";
swi | conditional_block |
Marriah_Lewis_Cinema_Project.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 18:52:17 2021
@author: lewis
"""
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
#Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names
def groupby_count(df, groupby_column, count_column):
|
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
#Create a function to process the string into an integer by using re.sub()
def process_num(num):
return float(re.sub(r'[^\w\s.]','',num))
#test function
num1 = float(re.sub(r'[^\w\s.]','','1,156.30'))
#print(num1)
#Create array to hold the data extracted
gross=[]
year=[]
film=[]
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
# put the data in the pandas dataframe
movie_df= pd.DataFrame({'Gross': gross,
'first': year,
'Franchise': film
})
#print(movie_df)
#print(movie_df.dtypes)
#movies_df_count = movie_df.groupby(["Franchise", "first"])["first"].count()
#print(movies_df_count)
#WIKI_df=movie_df.groupby(["first"])["first"].count()
#print(WIKI_df)
#WIKI_df.plot(kind='bar',x='first',y='count')
#plt.title("Most Movies Release count by Year(Top 68 on WIKI)",fontsize=20)
#TMDB Kaggle Data
movies_TMDB_kaggle= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv', encoding= 'ISO-8859-1')
#print(len(movies_TMDB_kaggle)) #result 4803 and 20 columns
#print(movies_TMDB_kaggle.isnull().sum()) #tagline and homepage has the most NaN, unnecessary columns
#Clean the dataframe, removed any unnecessary columns
clean_TMDB_movies= movies_TMDB_kaggle.drop(columns=['homepage', 'id', 'overview', 'status', 'tagline', 'original_title'])
#print(clean_TMDB_movies) #result 4803 rows and 14 columns
#print(clean_TMDB_movies.isnull().sum()) # NaNs in the release_date and runtime column
clean_TMDB_movies.dropna(inplace= True)
#print(clean_TMDB_movies.isnull().sum())
#Removing any movie that has a budget of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
#Removing any movie with a revenue of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
#review the profit for each movie therefore a profit column was created
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies['budget']
#Creating a percent profit column in order to compare profits.
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit']/clean_TMDB_movies['budget']*100
#print the top five
#print(clean_TMDB_movies.head())
#checking the data types
#print(clean_TMDB_movies.dtypes)
#change release_date to the date/time and separate it by month, day, and year
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies['release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies['release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
#After new columns were added it is time to concat.
cat = list(range(1,13))
#Changing the month data type from int to ordered category
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'], ordered = True, categories = cat)
#confirmation
#print(clean_TMDB_movies.month.dtype)
#print(len(clean_TMDB_movies))
#print(clean_TMDB_movies.describe())
#print(clean_TMDB_movies.revenue.describe())
#print(clean_TMDB_movies.profit.describe())
#print(clean_TMDB_movies.vote_count.describe())
#print(clean_TMDB_movies.percent_profit.describe())
#discretize the budget column
categories = ["very_low", "low", "high", "very_high"]
#saving the clean_TMDB df as a discretized df
movies_discretized = clean_TMDB_movies
#creating a budget cutoff using pandas cut function
movies_discretized["budget"] = pd.cut(movies_discretized["budget"], [0, 13000000, 30000000, 62192550, 400000000], labels = categories)
#repeat the step for revenue
#print(movies_discretized.revenue.describe())
movies_discretized["revenue"] = pd.cut(movies_discretized["revenue"], [0, 21458200, 62954020, 187976900, 2887965000], labels = categories)
#profit
categories_profit = ["negative", "low", "high", "very_high"]
movies_discretized["profit"] = pd.cut(movies_discretized["profit"], [-165710100 , 0, 29314900, 140784100, 2560965000], labels = categories_profit)
#print(movies_discretized["profit"].head())
#Vote_average-very_low: vote averages less than 6, low are between 6 to 6.5, high between 6.5 and 7 and very_high 7 and 8.5
movies_discretized["vote_average"] = pd.cut(movies_discretized["vote_average"], [0, 6, 6.5, 7, 8.5], labels = categories)
#print(movies_discretized["vote_average"].head())
#Vote_count
movies_discretized["vote_count"] = pd.cut(movies_discretized["vote_count"], [0, 440, 1151, 2522, 14000], labels = categories)
#print(movies_discretized["vote_count"].head())
#percent_profit
movies_discretized["percent_profit"] = pd.cut(movies_discretized["percent_profit"], [-100, 0, 108, 436, 6528], labels = categories_profit)
movies_discretized["percent_profit"]
#Categorizing days into weeks
#print(movies_discretized.day.describe())
categories_weeks = ["week_1", "week_2", "week_3", "week_4"]
movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories_weeks)
#print(movies_discretized["week"].head())
#day and release_date are no longer needed columns
movies_discretized.drop(columns=['day', 'release_date'], inplace = True)
#print(movies_discretized.head())
#Do major production companies have an impact the profit margin?
production_company = []
for movie in movies_discretized['production_companies']:
if "Universal" in movie:
production_company.append("Universal")
elif "Sony" in movie:
production_company.append("Sony")
elif "Fox" in movie:
production_company.append("Fox")
elif "DreamWorks" in movie:
production_company.append("DW")
elif "MGM" in movie:
production_company.append("MGM")
elif "Paramount" in movie:
production_company.append("Paramount")
elif "Disney" in movie:
production_company.append("Disney")
elif "Warner Bros" in movie:
production_company.append("WB")
else:
production_company.append("None")
movies_discretized["main_production"] = production_company
#print(movies_discretized["main_production"].head())
movies_discretized_count = movies_discretized.groupby(["main_production", "percent_profit"])["main_production"].count()
movies_discretized_count_df= pd.DataFrame(movies_discretized_count)
#print(movies_discretized_count_df)
#change the last column to count instead of main production
movies_discretized_count_df.columns = ["counts"]
#print(movies_discretized_count_df.head())
#total count for the number of percent_profit counts for each main production.
movies_discretized_count_df["production_company"]=movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df["percent_profit_category"] = movies_discretized_count_df.index.get_level_values(1)
#print(movies_discretized_count_df)
#drop the indexes to create another column with the sum of the counts of each production
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop = True)
#The sum of each production company category.
production_company_discretized_count_df = movies_discretized_count_df.groupby(["production_company"])["counts"].sum()
#print(production_company_discretized_count_df)
#column with the overall counts for each production, construct a new column called production company count that replicates the production company, and then use the replace function to replace the 1s and 2s with the total count
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["DW"], 82)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Disney"], 116)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Fox"], 298)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["MGM"], 87)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["None"], 1782)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Paramount"], 235)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Sony"], 42)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Universal"], 282)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["WB"], 269)
#print(movies_discretized_count_df)
#percentage
movies_discretized_count_df["percent"] = movies_discretized_count_df["counts"]/movies_discretized_count_df["production_company_count"] *100
#print(movies_discretized_count_df.head())
#dropping production_company_count and count column no longer needed
movies_discretized_count_df.drop(["counts", "production_company_count"], axis = 1, inplace = True )
#graphing question 1 using Matplot lib
#graph = movies_discretized_count_df.pivot("production_company", "percent_profit_category","percent").plot(kind="bar", color= ['blue', 'green', 'purple', 'red'], title='Profit Margin amongst Production Companies')
#change the x and y axis for graph
#plt.ylabel("Percent Profit")
#plt.xlabel("Production")
#plt.xticks(rotation = 0)
#position the legends underneath the graph; Now the graph looks beautiful
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit Category")
#plt.show()
#Question 2: Is it true that the month in which a film is released has an impact on its profit margin?
movies_discretized_count_week = movies_discretized.groupby(["week", "percent_profit"])["week"].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
#Checking the dataframe
#print(movies_discretized_count_df_week)
#changing column that is labeled week to count
movies_discretized_count_df_week.columns = ["counts"]
#total count for the number of % profit for each week
movies_discretized_count_df_week["week"]=movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week["percent_profit_category"] = movies_discretized_count_df_week.index.get_level_values(1)
#print(movies_discretized_count_df_week)
movies_discretized_count_df_week = movies_discretized_count_df_week.reset_index(drop = True) #drop the index
#what is the sum of each production
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby(["week"])["counts"].sum()
#print(sum_discretized_count_df_week) #the sums are centered around 700-800s
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_1"], 783)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_2"], 817)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_3"], 782)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_4"], 811)
#print(movies_discretized_count_df_week.head())
#received an error Object with dtype category cannot perform the numpy op true_divide
movies_discretized_count_df_week["week_count"]= movies_discretized_count_df_week["week_count"].astype(np.int64)
#convert into percentage; counts/week_count * 100
movies_discretized_count_df_week["percent"] = movies_discretized_count_df_week["counts"]/movies_discretized_count_df_week["week_count"] *100
#print(movies_discretized_count_df_week.head())
#dropping the week_count and count column since the percent column is there those columns are no longer needed
movies_discretized_count_df_week.drop(["counts", "week_count"], axis = 1, inplace = True )
#Time to create a visual
#graph_question_2 = movies_discretized_count_df_week.pivot("week", "percent_profit_category", "percent").plot(kind="bar", color = ["blue", "green", "purple", "red"], title = "Impact of Percent Profit by Week")
#plt.ylabel("Percent")
#plt.xlabel("Week")
#plt.xticks(rotation = 0)
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit")
#plt.show()
#IMDb Kaggle Data
movies_IMDb= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb= movies_IMDb.drop(columns=['imdb_title_id','original_title','description', 'reviews_from_users', 'reviews_from_critics'])
#print(clean_IMDb) #85,855 rows and 17 columns
#print(clean_IMDb.isnull().sum())
clean_IMDb.dropna(inplace = True) #drop all the NaNs
#print(clean_IMDb.isnull().sum()) #no more NaNs
#print(len(clean_IMDb)) #6635
#print(clean_IMDb.dtypes)
# QUESTION 3: How does budget impact vote average?
#plt.plot(clean_IMDb.budget, clean_IMDb.avg_vote, 'o')
#plt.title('How does Budget Impact Vote Average?')
#plt.xlabel('Budget')
#plt.ylabel('Vote Average')
#plt.show()
#print(clean_IMDb['budget'].head())
#print the top five
#print(clean_IMDb.head())
#Using the groupby_count function that takes the following arguments (df, groupby_column, count_column)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
#Sorting the df, so the bar graph will be in descending order
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace = True)
#Statista movie theatre revenue and prediction to 2025 post COVID saving to a pd dataframe
revenue_covid= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv')
#print(AMC_revenue)
#print(AMC_revenue.info())
print(AMC_revenue.head())
#During 2020, AMC Theatres reported annual revenues of 1.24 billion U.S. dollars, a dramatic decrease from previous years as a consequence of the COVID-19 pandemic.
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
#Global box office revenue coronavirus impact 2020-2025
#revenue_covid.plot(x="Year", y=["Originalforecast", "Marchrevision", "Julyrevision"], kind="bar")
#plt.show()
| new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop = True, inplace = True)
return(new_df) | identifier_body |
Marriah_Lewis_Cinema_Project.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 18:52:17 2021
@author: lewis
"""
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
#Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop = True, inplace = True)
return(new_df)
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
#Create a function to process the string into an integer by using re.sub()
def process_num(num):
return float(re.sub(r'[^\w\s.]','',num))
#test function
num1 = float(re.sub(r'[^\w\s.]','','1,156.30'))
#print(num1)
#Create array to hold the data extracted
gross=[]
year=[]
film=[]
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
# put the data in the pandas dataframe
movie_df= pd.DataFrame({'Gross': gross,
'first': year,
'Franchise': film
})
#print(movie_df)
#print(movie_df.dtypes)
#movies_df_count = movie_df.groupby(["Franchise", "first"])["first"].count()
#print(movies_df_count)
#WIKI_df=movie_df.groupby(["first"])["first"].count()
#print(WIKI_df)
#WIKI_df.plot(kind='bar',x='first',y='count')
#plt.title("Most Movies Release count by Year(Top 68 on WIKI)",fontsize=20)
#TMDB Kaggle Data
movies_TMDB_kaggle= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv', encoding= 'ISO-8859-1')
#print(len(movies_TMDB_kaggle)) #result 4803 and 20 columns
#print(movies_TMDB_kaggle.isnull().sum()) #tagline and homepage has the most NaN, unnecessary columns
#Clean the dataframe, removed any unnecessary columns
clean_TMDB_movies= movies_TMDB_kaggle.drop(columns=['homepage', 'id', 'overview', 'status', 'tagline', 'original_title'])
#print(clean_TMDB_movies) #result 4803 rows and 14 columns
#print(clean_TMDB_movies.isnull().sum()) # NaNs in the release_date and runtime column
| clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
#Removing any movie with a revenue of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
#review the profit for each movie therefore a profit column was created
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies['budget']
#Creating a percent profit column in order to compare profits.
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit']/clean_TMDB_movies['budget']*100
#print the top five
#print(clean_TMDB_movies.head())
#checking the data types
#print(clean_TMDB_movies.dtypes)
#change release_date to the date/time and separate it by month, day, and year
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies['release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies['release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
#After new columns were added it is time to concat.
cat = list(range(1,13))
#Changing the month data type from int to ordered category
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'], ordered = True, categories = cat)
#confirmation
#print(clean_TMDB_movies.month.dtype)
#print(len(clean_TMDB_movies))
#print(clean_TMDB_movies.describe())
#print(clean_TMDB_movies.revenue.describe())
#print(clean_TMDB_movies.profit.describe())
#print(clean_TMDB_movies.vote_count.describe())
#print(clean_TMDB_movies.percent_profit.describe())
#discretize the budget column
categories = ["very_low", "low", "high", "very_high"]
#saving the clean_TMDB df as a discretized df
movies_discretized = clean_TMDB_movies
#creating a budget cutoff using pandas cut function
movies_discretized["budget"] = pd.cut(movies_discretized["budget"], [0, 13000000, 30000000, 62192550, 400000000], labels = categories)
#repeat the step for revenue
#print(movies_discretized.revenue.describe())
movies_discretized["revenue"] = pd.cut(movies_discretized["revenue"], [0, 21458200, 62954020, 187976900, 2887965000], labels = categories)
#profit
categories_profit = ["negative", "low", "high", "very_high"]
movies_discretized["profit"] = pd.cut(movies_discretized["profit"], [-165710100 , 0, 29314900, 140784100, 2560965000], labels = categories_profit)
#print(movies_discretized["profit"].head())
#Vote_average-very_low: vote averages less than 6, low are between 6 to 6.5, high between 6.5 and 7 and very_high 7 and 8.5
movies_discretized["vote_average"] = pd.cut(movies_discretized["vote_average"], [0, 6, 6.5, 7, 8.5], labels = categories)
#print(movies_discretized["vote_average"].head())
#Vote_count
movies_discretized["vote_count"] = pd.cut(movies_discretized["vote_count"], [0, 440, 1151, 2522, 14000], labels = categories)
#print(movies_discretized["vote_count"].head())
#percent_profit
movies_discretized["percent_profit"] = pd.cut(movies_discretized["percent_profit"], [-100, 0, 108, 436, 6528], labels = categories_profit)
movies_discretized["percent_profit"]
#Categorizing days into weeks
#print(movies_discretized.day.describe())
categories_weeks = ["week_1", "week_2", "week_3", "week_4"]
movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories_weeks)
#print(movies_discretized["week"].head())
#day and release_date are no longer needed columns
movies_discretized.drop(columns=['day', 'release_date'], inplace = True)
#print(movies_discretized.head())
#Do major production companies have an impact the profit margin?
production_company = []
for movie in movies_discretized['production_companies']:
if "Universal" in movie:
production_company.append("Universal")
elif "Sony" in movie:
production_company.append("Sony")
elif "Fox" in movie:
production_company.append("Fox")
elif "DreamWorks" in movie:
production_company.append("DW")
elif "MGM" in movie:
production_company.append("MGM")
elif "Paramount" in movie:
production_company.append("Paramount")
elif "Disney" in movie:
production_company.append("Disney")
elif "Warner Bros" in movie:
production_company.append("WB")
else:
production_company.append("None")
movies_discretized["main_production"] = production_company
#print(movies_discretized["main_production"].head())
movies_discretized_count = movies_discretized.groupby(["main_production", "percent_profit"])["main_production"].count()
movies_discretized_count_df= pd.DataFrame(movies_discretized_count)
#print(movies_discretized_count_df)
#change the last column to count instead of main production
movies_discretized_count_df.columns = ["counts"]
#print(movies_discretized_count_df.head())
#total count for the number of percent_profit counts for each main production.
movies_discretized_count_df["production_company"]=movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df["percent_profit_category"] = movies_discretized_count_df.index.get_level_values(1)
#print(movies_discretized_count_df)
#drop the indexes to create another column with the sum of the counts of each production
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop = True)
#The sum of each production company category.
production_company_discretized_count_df = movies_discretized_count_df.groupby(["production_company"])["counts"].sum()
#print(production_company_discretized_count_df)
#column with the overall counts for each production, construct a new column called production company count that replicates the production company, and then use the replace function to replace the 1s and 2s with the total count
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["DW"], 82)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Disney"], 116)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Fox"], 298)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["MGM"], 87)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["None"], 1782)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Paramount"], 235)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Sony"], 42)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Universal"], 282)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["WB"], 269)
#print(movies_discretized_count_df)
#percentage
movies_discretized_count_df["percent"] = movies_discretized_count_df["counts"]/movies_discretized_count_df["production_company_count"] *100
#print(movies_discretized_count_df.head())
#dropping production_company_count and count column no longer needed
movies_discretized_count_df.drop(["counts", "production_company_count"], axis = 1, inplace = True )
#graphing question 1 using Matplot lib
#graph = movies_discretized_count_df.pivot("production_company", "percent_profit_category","percent").plot(kind="bar", color= ['blue', 'green', 'purple', 'red'], title='Profit Margin amongst Production Companies')
#change the x and y axis for graph
#plt.ylabel("Percent Profit")
#plt.xlabel("Production")
#plt.xticks(rotation = 0)
#position the legends underneath the graph; Now the graph looks beautiful
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit Category")
#plt.show()
#Question 2: Is it true that the month in which a film is released has an impact on its profit margin?
movies_discretized_count_week = movies_discretized.groupby(["week", "percent_profit"])["week"].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
#Checking the dataframe
#print(movies_discretized_count_df_week)
#changing column that is labeled week to count
movies_discretized_count_df_week.columns = ["counts"]
#total count for the number of % profit for each week
movies_discretized_count_df_week["week"]=movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week["percent_profit_category"] = movies_discretized_count_df_week.index.get_level_values(1)
#print(movies_discretized_count_df_week)
movies_discretized_count_df_week = movies_discretized_count_df_week.reset_index(drop = True) #drop the index
#what is the sum of each production
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby(["week"])["counts"].sum()
#print(sum_discretized_count_df_week) #the sums are centered around 700-800s
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_1"], 783)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_2"], 817)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_3"], 782)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_4"], 811)
#print(movies_discretized_count_df_week.head())
#received an error Object with dtype category cannot perform the numpy op true_divide
movies_discretized_count_df_week["week_count"]= movies_discretized_count_df_week["week_count"].astype(np.int64)
#convert into percentage; counts/week_count * 100
movies_discretized_count_df_week["percent"] = movies_discretized_count_df_week["counts"]/movies_discretized_count_df_week["week_count"] *100
#print(movies_discretized_count_df_week.head())
#dropping the week_count and count column since the percent column is there those columns are no longer needed
movies_discretized_count_df_week.drop(["counts", "week_count"], axis = 1, inplace = True )
#Time to create a visual
#graph_question_2 = movies_discretized_count_df_week.pivot("week", "percent_profit_category", "percent").plot(kind="bar", color = ["blue", "green", "purple", "red"], title = "Impact of Percent Profit by Week")
#plt.ylabel("Percent")
#plt.xlabel("Week")
#plt.xticks(rotation = 0)
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit")
#plt.show()
#IMDb Kaggle Data
movies_IMDb= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb= movies_IMDb.drop(columns=['imdb_title_id','original_title','description', 'reviews_from_users', 'reviews_from_critics'])
#print(clean_IMDb) #85,855 rows and 17 columns
#print(clean_IMDb.isnull().sum())
clean_IMDb.dropna(inplace = True) #drop all the NaNs
#print(clean_IMDb.isnull().sum()) #no more NaNs
#print(len(clean_IMDb)) #6635
#print(clean_IMDb.dtypes)
# QUESTION 3: How does budget impact vote average?
#plt.plot(clean_IMDb.budget, clean_IMDb.avg_vote, 'o')
#plt.title('How does Budget Impact Vote Average?')
#plt.xlabel('Budget')
#plt.ylabel('Vote Average')
#plt.show()
#print(clean_IMDb['budget'].head())
#print the top five
#print(clean_IMDb.head())
#Using the groupby_count function that takes the following arguments (df, groupby_column, count_column)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
#Sorting the df, so the bar graph will be in descending order
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace = True)
#Statista movie theatre revenue and prediction to 2025 post COVID saving to a pd dataframe
revenue_covid= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv')
#print(AMC_revenue)
#print(AMC_revenue.info())
print(AMC_revenue.head())
#During 2020, AMC Theatres reported annual revenues of 1.24 billion U.S. dollars, a dramatic decrease from previous years as a consequence of the COVID-19 pandemic.
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
#Global box office revenue coronavirus impact 2020-2025
#revenue_covid.plot(x="Year", y=["Originalforecast", "Marchrevision", "Julyrevision"], kind="bar")
#plt.show() | clean_TMDB_movies.dropna(inplace= True)
#print(clean_TMDB_movies.isnull().sum())
#Removing any movie that has a budget of 0
| random_line_split |
Marriah_Lewis_Cinema_Project.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 18:52:17 2021
@author: lewis
"""
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
#Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop = True, inplace = True)
return(new_df)
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
#Create a function to process the string into an integer by using re.sub()
def process_num(num):
return float(re.sub(r'[^\w\s.]','',num))
#test function
num1 = float(re.sub(r'[^\w\s.]','','1,156.30'))
#print(num1)
#Create array to hold the data extracted
gross=[]
year=[]
film=[]
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
# put the data in the pandas dataframe
movie_df= pd.DataFrame({'Gross': gross,
'first': year,
'Franchise': film
})
#print(movie_df)
#print(movie_df.dtypes)
#movies_df_count = movie_df.groupby(["Franchise", "first"])["first"].count()
#print(movies_df_count)
#WIKI_df=movie_df.groupby(["first"])["first"].count()
#print(WIKI_df)
#WIKI_df.plot(kind='bar',x='first',y='count')
#plt.title("Most Movies Release count by Year(Top 68 on WIKI)",fontsize=20)
#TMDB Kaggle Data
movies_TMDB_kaggle= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv', encoding= 'ISO-8859-1')
#print(len(movies_TMDB_kaggle)) #result 4803 and 20 columns
#print(movies_TMDB_kaggle.isnull().sum()) #tagline and homepage has the most NaN, unnecessary columns
#Clean the dataframe, removed any unnecessary columns
clean_TMDB_movies= movies_TMDB_kaggle.drop(columns=['homepage', 'id', 'overview', 'status', 'tagline', 'original_title'])
#print(clean_TMDB_movies) #result 4803 rows and 14 columns
#print(clean_TMDB_movies.isnull().sum()) # NaNs in the release_date and runtime column
clean_TMDB_movies.dropna(inplace= True)
#print(clean_TMDB_movies.isnull().sum())
#Removing any movie that has a budget of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
#Removing any movie with a revenue of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
#review the profit for each movie therefore a profit column was created
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies['budget']
#Creating a percent profit column in order to compare profits.
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit']/clean_TMDB_movies['budget']*100
#print the top five
#print(clean_TMDB_movies.head())
#checking the data types
#print(clean_TMDB_movies.dtypes)
#change release_date to the date/time and separate it by month, day, and year
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies['release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies['release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
#After new columns were added it is time to concat.
cat = list(range(1,13))
#Changing the month data type from int to ordered category
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'], ordered = True, categories = cat)
#confirmation
#print(clean_TMDB_movies.month.dtype)
#print(len(clean_TMDB_movies))
#print(clean_TMDB_movies.describe())
#print(clean_TMDB_movies.revenue.describe())
#print(clean_TMDB_movies.profit.describe())
#print(clean_TMDB_movies.vote_count.describe())
#print(clean_TMDB_movies.percent_profit.describe())
#discretize the budget column
categories = ["very_low", "low", "high", "very_high"]
#saving the clean_TMDB df as a discretized df
movies_discretized = clean_TMDB_movies
#creating a budget cutoff using pandas cut function
movies_discretized["budget"] = pd.cut(movies_discretized["budget"], [0, 13000000, 30000000, 62192550, 400000000], labels = categories)
#repeat the step for revenue
#print(movies_discretized.revenue.describe())
movies_discretized["revenue"] = pd.cut(movies_discretized["revenue"], [0, 21458200, 62954020, 187976900, 2887965000], labels = categories)
#profit
categories_profit = ["negative", "low", "high", "very_high"]
movies_discretized["profit"] = pd.cut(movies_discretized["profit"], [-165710100 , 0, 29314900, 140784100, 2560965000], labels = categories_profit)
#print(movies_discretized["profit"].head())
#Vote_average-very_low: vote averages less than 6, low are between 6 to 6.5, high between 6.5 and 7 and very_high 7 and 8.5
movies_discretized["vote_average"] = pd.cut(movies_discretized["vote_average"], [0, 6, 6.5, 7, 8.5], labels = categories)
#print(movies_discretized["vote_average"].head())
#Vote_count
movies_discretized["vote_count"] = pd.cut(movies_discretized["vote_count"], [0, 440, 1151, 2522, 14000], labels = categories)
#print(movies_discretized["vote_count"].head())
#percent_profit
movies_discretized["percent_profit"] = pd.cut(movies_discretized["percent_profit"], [-100, 0, 108, 436, 6528], labels = categories_profit)
movies_discretized["percent_profit"]
#Categorizing days into weeks
#print(movies_discretized.day.describe())
categories_weeks = ["week_1", "week_2", "week_3", "week_4"]
movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories_weeks)
#print(movies_discretized["week"].head())
#day and release_date are no longer needed columns
movies_discretized.drop(columns=['day', 'release_date'], inplace = True)
#print(movies_discretized.head())
#Do major production companies have an impact the profit margin?
production_company = []
for movie in movies_discretized['production_companies']:
if "Universal" in movie:
production_company.append("Universal")
elif "Sony" in movie:
production_company.append("Sony")
elif "Fox" in movie:
production_company.append("Fox")
elif "DreamWorks" in movie:
production_company.append("DW")
elif "MGM" in movie:
production_company.append("MGM")
elif "Paramount" in movie:
production_company.append("Paramount")
elif "Disney" in movie:
production_company.append("Disney")
elif "Warner Bros" in movie:
production_company.append("WB")
else:
|
movies_discretized["main_production"] = production_company
#print(movies_discretized["main_production"].head())
movies_discretized_count = movies_discretized.groupby(["main_production", "percent_profit"])["main_production"].count()
movies_discretized_count_df= pd.DataFrame(movies_discretized_count)
#print(movies_discretized_count_df)
#change the last column to count instead of main production
movies_discretized_count_df.columns = ["counts"]
#print(movies_discretized_count_df.head())
#total count for the number of percent_profit counts for each main production.
movies_discretized_count_df["production_company"]=movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df["percent_profit_category"] = movies_discretized_count_df.index.get_level_values(1)
#print(movies_discretized_count_df)
#drop the indexes to create another column with the sum of the counts of each production
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop = True)
#The sum of each production company category.
production_company_discretized_count_df = movies_discretized_count_df.groupby(["production_company"])["counts"].sum()
#print(production_company_discretized_count_df)
#column with the overall counts for each production, construct a new column called production company count that replicates the production company, and then use the replace function to replace the 1s and 2s with the total count
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["DW"], 82)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Disney"], 116)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Fox"], 298)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["MGM"], 87)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["None"], 1782)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Paramount"], 235)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Sony"], 42)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Universal"], 282)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["WB"], 269)
#print(movies_discretized_count_df)
#percentage
movies_discretized_count_df["percent"] = movies_discretized_count_df["counts"]/movies_discretized_count_df["production_company_count"] *100
#print(movies_discretized_count_df.head())
#dropping production_company_count and count column no longer needed
movies_discretized_count_df.drop(["counts", "production_company_count"], axis = 1, inplace = True )
#graphing question 1 using Matplot lib
#graph = movies_discretized_count_df.pivot("production_company", "percent_profit_category","percent").plot(kind="bar", color= ['blue', 'green', 'purple', 'red'], title='Profit Margin amongst Production Companies')
#change the x and y axis for graph
#plt.ylabel("Percent Profit")
#plt.xlabel("Production")
#plt.xticks(rotation = 0)
#position the legends underneath the graph; Now the graph looks beautiful
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit Category")
#plt.show()
#Question 2: Is it true that the month in which a film is released has an impact on its profit margin?
movies_discretized_count_week = movies_discretized.groupby(["week", "percent_profit"])["week"].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
#Checking the dataframe
#print(movies_discretized_count_df_week)
#changing column that is labeled week to count
movies_discretized_count_df_week.columns = ["counts"]
#total count for the number of % profit for each week
movies_discretized_count_df_week["week"]=movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week["percent_profit_category"] = movies_discretized_count_df_week.index.get_level_values(1)
#print(movies_discretized_count_df_week)
movies_discretized_count_df_week = movies_discretized_count_df_week.reset_index(drop = True) #drop the index
#what is the sum of each production
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby(["week"])["counts"].sum()
#print(sum_discretized_count_df_week) #the sums are centered around 700-800s
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_1"], 783)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_2"], 817)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_3"], 782)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_4"], 811)
#print(movies_discretized_count_df_week.head())
#received an error Object with dtype category cannot perform the numpy op true_divide
movies_discretized_count_df_week["week_count"]= movies_discretized_count_df_week["week_count"].astype(np.int64)
#convert into percentage; counts/week_count * 100
movies_discretized_count_df_week["percent"] = movies_discretized_count_df_week["counts"]/movies_discretized_count_df_week["week_count"] *100
#print(movies_discretized_count_df_week.head())
#dropping the week_count and count column since the percent column is there those columns are no longer needed
movies_discretized_count_df_week.drop(["counts", "week_count"], axis = 1, inplace = True )
#Time to create a visual
#graph_question_2 = movies_discretized_count_df_week.pivot("week", "percent_profit_category", "percent").plot(kind="bar", color = ["blue", "green", "purple", "red"], title = "Impact of Percent Profit by Week")
#plt.ylabel("Percent")
#plt.xlabel("Week")
#plt.xticks(rotation = 0)
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit")
#plt.show()
#IMDb Kaggle Data
movies_IMDb= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb= movies_IMDb.drop(columns=['imdb_title_id','original_title','description', 'reviews_from_users', 'reviews_from_critics'])
#print(clean_IMDb) #85,855 rows and 17 columns
#print(clean_IMDb.isnull().sum())
clean_IMDb.dropna(inplace = True) #drop all the NaNs
#print(clean_IMDb.isnull().sum()) #no more NaNs
#print(len(clean_IMDb)) #6635
#print(clean_IMDb.dtypes)
# QUESTION 3: How does budget impact vote average?
#plt.plot(clean_IMDb.budget, clean_IMDb.avg_vote, 'o')
#plt.title('How does Budget Impact Vote Average?')
#plt.xlabel('Budget')
#plt.ylabel('Vote Average')
#plt.show()
#print(clean_IMDb['budget'].head())
#print the top five
#print(clean_IMDb.head())
#Using the groupby_count function that takes the following arguments (df, groupby_column, count_column)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
#Sorting the df, so the bar graph will be in descending order
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace = True)
#Statista movie theatre revenue and prediction to 2025 post COVID saving to a pd dataframe
revenue_covid= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv')
#print(AMC_revenue)
#print(AMC_revenue.info())
print(AMC_revenue.head())
#During 2020, AMC Theatres reported annual revenues of 1.24 billion U.S. dollars, a dramatic decrease from previous years as a consequence of the COVID-19 pandemic.
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
#Global box office revenue coronavirus impact 2020-2025
#revenue_covid.plot(x="Year", y=["Originalforecast", "Marchrevision", "Julyrevision"], kind="bar")
#plt.show()
| production_company.append("None") | conditional_block |
Marriah_Lewis_Cinema_Project.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 18:52:17 2021
@author: lewis
"""
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
#Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names
def | (df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop = True, inplace = True)
return(new_df)
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
#Create a function to process the string into an integer by using re.sub()
def process_num(num):
return float(re.sub(r'[^\w\s.]','',num))
#test function
num1 = float(re.sub(r'[^\w\s.]','','1,156.30'))
#print(num1)
#Create array to hold the data extracted
gross=[]
year=[]
film=[]
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
# put the data in the pandas dataframe
movie_df= pd.DataFrame({'Gross': gross,
'first': year,
'Franchise': film
})
#print(movie_df)
#print(movie_df.dtypes)
#movies_df_count = movie_df.groupby(["Franchise", "first"])["first"].count()
#print(movies_df_count)
#WIKI_df=movie_df.groupby(["first"])["first"].count()
#print(WIKI_df)
#WIKI_df.plot(kind='bar',x='first',y='count')
#plt.title("Most Movies Release count by Year(Top 68 on WIKI)",fontsize=20)
#TMDB Kaggle Data
movies_TMDB_kaggle= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv', encoding= 'ISO-8859-1')
#print(len(movies_TMDB_kaggle)) #result 4803 and 20 columns
#print(movies_TMDB_kaggle.isnull().sum()) #tagline and homepage has the most NaN, unnecessary columns
#Clean the dataframe, removed any unnecessary columns
clean_TMDB_movies= movies_TMDB_kaggle.drop(columns=['homepage', 'id', 'overview', 'status', 'tagline', 'original_title'])
#print(clean_TMDB_movies) #result 4803 rows and 14 columns
#print(clean_TMDB_movies.isnull().sum()) # NaNs in the release_date and runtime column
clean_TMDB_movies.dropna(inplace= True)
#print(clean_TMDB_movies.isnull().sum())
#Removing any movie that has a budget of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
#Removing any movie with a revenue of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
#review the profit for each movie therefore a profit column was created
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies['budget']
#Creating a percent profit column in order to compare profits.
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit']/clean_TMDB_movies['budget']*100
#print the top five
#print(clean_TMDB_movies.head())
#checking the data types
#print(clean_TMDB_movies.dtypes)
#change release_date to the date/time and separate it by month, day, and year
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies['release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies['release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
#After new columns were added it is time to concat.
cat = list(range(1,13))
#Changing the month data type from int to ordered category
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'], ordered = True, categories = cat)
#confirmation
#print(clean_TMDB_movies.month.dtype)
#print(len(clean_TMDB_movies))
#print(clean_TMDB_movies.describe())
#print(clean_TMDB_movies.revenue.describe())
#print(clean_TMDB_movies.profit.describe())
#print(clean_TMDB_movies.vote_count.describe())
#print(clean_TMDB_movies.percent_profit.describe())
#discretize the budget column
categories = ["very_low", "low", "high", "very_high"]
#saving the clean_TMDB df as a discretized df
movies_discretized = clean_TMDB_movies
#creating a budget cutoff using pandas cut function
movies_discretized["budget"] = pd.cut(movies_discretized["budget"], [0, 13000000, 30000000, 62192550, 400000000], labels = categories)
#repeat the step for revenue
#print(movies_discretized.revenue.describe())
movies_discretized["revenue"] = pd.cut(movies_discretized["revenue"], [0, 21458200, 62954020, 187976900, 2887965000], labels = categories)
#profit
categories_profit = ["negative", "low", "high", "very_high"]
movies_discretized["profit"] = pd.cut(movies_discretized["profit"], [-165710100 , 0, 29314900, 140784100, 2560965000], labels = categories_profit)
#print(movies_discretized["profit"].head())
#Vote_average-very_low: vote averages less than 6, low are between 6 to 6.5, high between 6.5 and 7 and very_high 7 and 8.5
movies_discretized["vote_average"] = pd.cut(movies_discretized["vote_average"], [0, 6, 6.5, 7, 8.5], labels = categories)
#print(movies_discretized["vote_average"].head())
#Vote_count
movies_discretized["vote_count"] = pd.cut(movies_discretized["vote_count"], [0, 440, 1151, 2522, 14000], labels = categories)
#print(movies_discretized["vote_count"].head())
#percent_profit
movies_discretized["percent_profit"] = pd.cut(movies_discretized["percent_profit"], [-100, 0, 108, 436, 6528], labels = categories_profit)
movies_discretized["percent_profit"]
#Categorizing days into weeks
#print(movies_discretized.day.describe())
categories_weeks = ["week_1", "week_2", "week_3", "week_4"]
movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories_weeks)
#print(movies_discretized["week"].head())
#day and release_date are no longer needed columns
movies_discretized.drop(columns=['day', 'release_date'], inplace = True)
#print(movies_discretized.head())
#Do major production companies have an impact the profit margin?
production_company = []
for movie in movies_discretized['production_companies']:
if "Universal" in movie:
production_company.append("Universal")
elif "Sony" in movie:
production_company.append("Sony")
elif "Fox" in movie:
production_company.append("Fox")
elif "DreamWorks" in movie:
production_company.append("DW")
elif "MGM" in movie:
production_company.append("MGM")
elif "Paramount" in movie:
production_company.append("Paramount")
elif "Disney" in movie:
production_company.append("Disney")
elif "Warner Bros" in movie:
production_company.append("WB")
else:
production_company.append("None")
movies_discretized["main_production"] = production_company
#print(movies_discretized["main_production"].head())
movies_discretized_count = movies_discretized.groupby(["main_production", "percent_profit"])["main_production"].count()
movies_discretized_count_df= pd.DataFrame(movies_discretized_count)
#print(movies_discretized_count_df)
#change the last column to count instead of main production
movies_discretized_count_df.columns = ["counts"]
#print(movies_discretized_count_df.head())
#total count for the number of percent_profit counts for each main production.
movies_discretized_count_df["production_company"]=movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df["percent_profit_category"] = movies_discretized_count_df.index.get_level_values(1)
#print(movies_discretized_count_df)
#drop the indexes to create another column with the sum of the counts of each production
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop = True)
#The sum of each production company category.
production_company_discretized_count_df = movies_discretized_count_df.groupby(["production_company"])["counts"].sum()
#print(production_company_discretized_count_df)
#column with the overall counts for each production, construct a new column called production company count that replicates the production company, and then use the replace function to replace the 1s and 2s with the total count
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["DW"], 82)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Disney"], 116)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Fox"], 298)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["MGM"], 87)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["None"], 1782)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Paramount"], 235)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Sony"], 42)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Universal"], 282)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["WB"], 269)
#print(movies_discretized_count_df)
#percentage
movies_discretized_count_df["percent"] = movies_discretized_count_df["counts"]/movies_discretized_count_df["production_company_count"] *100
#print(movies_discretized_count_df.head())
#dropping production_company_count and count column no longer needed
movies_discretized_count_df.drop(["counts", "production_company_count"], axis = 1, inplace = True )
#graphing question 1 using Matplot lib
#graph = movies_discretized_count_df.pivot("production_company", "percent_profit_category","percent").plot(kind="bar", color= ['blue', 'green', 'purple', 'red'], title='Profit Margin amongst Production Companies')
#change the x and y axis for graph
#plt.ylabel("Percent Profit")
#plt.xlabel("Production")
#plt.xticks(rotation = 0)
#position the legends underneath the graph; Now the graph looks beautiful
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit Category")
#plt.show()
#Question 2: Is it true that the month in which a film is released has an impact on its profit margin?
movies_discretized_count_week = movies_discretized.groupby(["week", "percent_profit"])["week"].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
#Checking the dataframe
#print(movies_discretized_count_df_week)
#changing column that is labeled week to count
movies_discretized_count_df_week.columns = ["counts"]
#total count for the number of % profit for each week
movies_discretized_count_df_week["week"]=movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week["percent_profit_category"] = movies_discretized_count_df_week.index.get_level_values(1)
#print(movies_discretized_count_df_week)
movies_discretized_count_df_week = movies_discretized_count_df_week.reset_index(drop = True) #drop the index
#what is the sum of each production
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby(["week"])["counts"].sum()
#print(sum_discretized_count_df_week) #the sums are centered around 700-800s
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_1"], 783)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_2"], 817)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_3"], 782)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_4"], 811)
#print(movies_discretized_count_df_week.head())
#received an error Object with dtype category cannot perform the numpy op true_divide
movies_discretized_count_df_week["week_count"]= movies_discretized_count_df_week["week_count"].astype(np.int64)
#convert into percentage; counts/week_count * 100
movies_discretized_count_df_week["percent"] = movies_discretized_count_df_week["counts"]/movies_discretized_count_df_week["week_count"] *100
#print(movies_discretized_count_df_week.head())
#dropping the week_count and count column since the percent column is there those columns are no longer needed
movies_discretized_count_df_week.drop(["counts", "week_count"], axis = 1, inplace = True )
#Time to create a visual
#graph_question_2 = movies_discretized_count_df_week.pivot("week", "percent_profit_category", "percent").plot(kind="bar", color = ["blue", "green", "purple", "red"], title = "Impact of Percent Profit by Week")
#plt.ylabel("Percent")
#plt.xlabel("Week")
#plt.xticks(rotation = 0)
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit")
#plt.show()
#IMDb Kaggle Data
movies_IMDb= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb= movies_IMDb.drop(columns=['imdb_title_id','original_title','description', 'reviews_from_users', 'reviews_from_critics'])
#print(clean_IMDb) #85,855 rows and 17 columns
#print(clean_IMDb.isnull().sum())
clean_IMDb.dropna(inplace = True) #drop all the NaNs
#print(clean_IMDb.isnull().sum()) #no more NaNs
#print(len(clean_IMDb)) #6635
#print(clean_IMDb.dtypes)
# QUESTION 3: How does budget impact vote average?
#plt.plot(clean_IMDb.budget, clean_IMDb.avg_vote, 'o')
#plt.title('How does Budget Impact Vote Average?')
#plt.xlabel('Budget')
#plt.ylabel('Vote Average')
#plt.show()
#print(clean_IMDb['budget'].head())
#print the top five
#print(clean_IMDb.head())
#Using the groupby_count function that takes the following arguments (df, groupby_column, count_column)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
#Sorting the df, so the bar graph will be in descending order
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace = True)
#Statista movie theatre revenue and prediction to 2025 post COVID saving to a pd dataframe
revenue_covid= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv')
#print(AMC_revenue)
#print(AMC_revenue.info())
print(AMC_revenue.head())
#During 2020, AMC Theatres reported annual revenues of 1.24 billion U.S. dollars, a dramatic decrease from previous years as a consequence of the COVID-19 pandemic.
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
#Global box office revenue coronavirus impact 2020-2025
#revenue_covid.plot(x="Year", y=["Originalforecast", "Marchrevision", "Julyrevision"], kind="bar")
#plt.show()
| groupby_count | identifier_name |
trace_processing.py | import os, pickle
import data
DATA_DIR = os.path.join("traces_05_withmobs")
MODEL_DIR = os.path.join("models")
SCHEMA_LIB_FILE = 'schema_lib_3_gather_mobs.pkl'
##############################################################
# Load traces from disk
##############################################################
def load_traces(dir, limit=None):
"""Read in game traces, return as a list."""
games = []
for i, fn in enumerate(os.listdir(dir)):
if limit is not None and i == limit: break
f = open(os.path.join(dir, fn), 'rb')
seed, trace = pickle.load(f)
f.close()
games.append(trace)
return games
##############################################################
# Create hierarchical interactions in traces
##############################################################
def convert_traces(traces):
return [convert_to_interactions(trace) for trace in traces]
def convert_to_interactions(trace):
used = set()
return [create_interaction(trace, i, used) for i in range(len(trace.decisions)) if i not in used]
def create_interaction(trace, offset, used):
start = trace.decisions[offset]
# construct the complete set of behaviors in this interaction, in order
interaction = [start]
agent = start.agent(trace)
if agent is None:
# non-behavioral event, done here
return interaction
# keep track of what each agent in the interaction is doing
doing = {agent.eid : start}
# consider each next (unused) behavior for inclusion
for i in range(offset+1,len(trace.decisions)):
if i in used: continue
dn = trace.decisions[i]
# time moves on, retire active behaviors that have ended at the start of this behavior
retire = set()
for eid,active in doing.items():
if active.end_clock < dn.start_clock:
# == or > are still active (to support meets)
# otherwise, this agent isn't in the interaction anymore (off doing something that didn't replace)
retire.add(eid)
for eid in retire: del doing[eid]
# now consider this one
next_agent = dn.agent(trace)
if next_agent is None:
# non-behavioral event, done here
continue
next_target = dn.behavior_target(trace)
if current_involvement(doing, next_agent.eid):
# agent already in interaction
if next_target is not None and current_involvement(doing, next_target.eid):
# target also in, extend the interaction
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# since target is new, agent must continue to be involved to extend
elif continued_involvement(doing, next_agent.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
else:
# agent off to something else, no longer in this interaction
if next_agent.eid in doing:
del doing[next_agent.eid]
else:
# agent not in interaction, if target is continuing then extend
if next_target is not None and continued_involvement(doing, next_target.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# otherwise nothing to do
if len(doing.keys()) == 0:
#print("Interaction ended at {}".format(dn.decision_state.clock))
break
return interaction
def active_target_of(doing, eid):
"""Returns all the dn behaviors in doing where eid is the target."""
if eid is None: return
for dn in doing.values():
tgt_eid = dn.behavior_target_id()
if tgt_eid == eid:
yield dn
def current_involvement(doing, eid):
"""True if acting or targeted in current doing set."""
if eid in doing: return True
for dn in active_target_of(doing, eid):
return True
return False
def continued_involvement(doing, eid, clock, step=0.01):
"""True if acting or targeted beyond the next frame."""
if eid in doing:
if doing[eid].end_clock > (clock+step): return True
for dn in active_target_of(doing, eid):
if dn.end_clock > (clock+step): return True
return False
##############################################################
# Working with htrace (should probably convert to class)
##############################################################
def interaction_context(interaction, i):
"""Return the dn representing what the target of decision[i] was doing at the time."""
if i == 0: return None
tgt_eid = interaction[i].behavior_target_id()
if tgt_eid is None: |
for j in range(i-1,-1,-1):
print("interaction: {} {}".format(interaction[j].agent_eid, tgt_eid))
if interaction[j].agent_eid == tgt_eid:
return interaction[j]
return None
def interaction_responses(interaction, i):
"""Return the dns that act on the agent of i during that behavior."""
agent_eid = interaction[i].agent_eid
# no agent, no response (REM: that's not right, but is given how we're creating interactions right now)
if agent_eid is None: return []
end = interaction[i].end_clock
for j in range(i+1, len(interaction)):
next = interaction[j]
if next.start_clock <= end and next.behavior_target_id() == agent_eid:
yield (interaction[j])
##############################################################
# Markov event chain
##############################################################
class EntityMapping:
def __init__(self):
self.counts = {'A':0,'C':0,'G':0}
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
ct = self.counts[prefix]
self.counts[prefix] += 1
tok = "{}{}".format(prefix, ct)
self.lookup[ent.eid] = tok
return tok
class EntityMapping2:
def __init__(self):
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
self.lookup[ent.eid] = prefix
return prefix
class MarkovChain:
def __init__(self, htraces, mapper=EntityMapping2):
self.nodes = {'Start' : MCNode()}
self.reverse_edges = {}
for htrace in htraces:
# treat each agent as a separate run
agents = self.extract_agents(htrace)
for agent_eid in agents:
entity_mapping = mapper()
current_tag = 'Start'
current_grp = None
for grp in htrace:
# only things relevant to this agent!
head_dn = grp[0]
tgt = head_dn.behavior_target()
if head_dn.agent_eid == agent_eid or (tgt is not None and tgt.eid == agent_eid):
current_tag = self.add_transition(current_tag, grp, current_grp, entity_mapping)
current_grp = grp
self.add_transition(current_tag, None, current_grp, entity_mapping)
def add_transition(self, current_tag, to_grp, from_grp, entity_mapping):
"""Each node is a dict of tag : [transition_ct,case list]."""
# add destination node if necessary
if to_grp == None:
to_tag = 'End'
else:
to_tag = self.make_tag(to_grp, entity_mapping)
if to_tag not in self.nodes:
self.nodes[to_tag] = MCNode()
# add transition out of current node
node = self.nodes[current_tag]
node.add_transition(to_tag, from_grp, to_grp)
# add reverse edge index
if to_tag in self.reverse_edges: self.reverse_edges[to_tag].add(current_tag)
else: self.reverse_edges[to_tag] = set((current_tag,))
# move current
return to_tag
def make_tag(self, grp, entity_mapping):
head_dn = grp[0]
tgt = head_dn.behavior_target()
# if len(grp) > 1: pre = 'GRP'
# else: pre = ''
pre = ''
if tgt is None:
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig)
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig.replace(str(tgt.eid), entity_mapping.token(tgt)))
def extract_agents(self, htrace, trace):
A = set()
for grp in htrace:
for dn in grp:
agent = dn.agent(trace)
if agent.tid == data.AGENT_TYPE_ID:
A.add(agent.eid)
return A
def all_case_groups(self, node_tag):
"""All the cases for a given node are in the transitions *to* that node."""
for tag in self.reverse_edges[node_tag]:
for from_grp,to_grp in self.nodes[tag].transitions[node_tag]:
yield to_grp
def make_sub_chain(self, node_tag, mapper=EntityMapping2):
if node_tag != 'End':
self.nodes[node_tag].chain = MarkovChain(([(dn,) for dn in grp] for grp in self.all_case_groups(node_tag)), mapper)
def __str__(self):
return "\n".join(("{} {} => {}".format(node.out_ct, tag, str(node)) for tag,node in sorted(self.nodes.items(), key=lambda e: e[0]) if tag != 'End'))
class MCNode:
""""""
def __init__(self):
self.out_ct = 0
self.transitions = {}
self.chain = None
def add_transition(self, to_tag, from_grp, to_grp):
"""Transitions is a dict of tag : [cases]."""
self.out_ct += 1
if to_tag in self.transitions:
self.transitions[to_tag].append((from_grp, to_grp))
else:
self.transitions[to_tag] = [(from_grp, to_grp)]
def __str__(self):
return ", ".join(("{:.2f}: {}".format(len(cases)/self.out_ct, tag) for tag,cases in self.transitions.items()))
##############################################################
if __name__ == '__main__':
traces = load_traces(DATA_DIR)
# convert to hierarchical events
htraces = convert_traces(traces)
for t,ht in zip(traces,htraces):
print("=========================")
print(t)
print("=========================")
for intr in ht:
print("-----------")
for dn in intr:
print(dn)
# # make hierarchical Markov Chain
# print("==================== Top level chain ===========================")
# chain = MarkovChain(htraces)
# print(chain, flush=True)
#
# for tag in chain.nodes.keys():
# if tag not in ('Start', 'End'):
# print(" ========== Sub chain for {}".format(tag))
# chain.make_sub_chain(tag)
# print(chain.nodes[tag].chain)
| return None | conditional_block |
trace_processing.py | import os, pickle
import data
DATA_DIR = os.path.join("traces_05_withmobs")
MODEL_DIR = os.path.join("models")
SCHEMA_LIB_FILE = 'schema_lib_3_gather_mobs.pkl'
##############################################################
# Load traces from disk
##############################################################
def load_traces(dir, limit=None):
"""Read in game traces, return as a list."""
games = []
for i, fn in enumerate(os.listdir(dir)):
if limit is not None and i == limit: break
f = open(os.path.join(dir, fn), 'rb')
seed, trace = pickle.load(f)
f.close()
games.append(trace)
return games
##############################################################
# Create hierarchical interactions in traces
##############################################################
def convert_traces(traces):
return [convert_to_interactions(trace) for trace in traces]
def convert_to_interactions(trace):
used = set()
return [create_interaction(trace, i, used) for i in range(len(trace.decisions)) if i not in used]
def create_interaction(trace, offset, used):
start = trace.decisions[offset]
# construct the complete set of behaviors in this interaction, in order
interaction = [start]
agent = start.agent(trace)
if agent is None:
# non-behavioral event, done here
return interaction
# keep track of what each agent in the interaction is doing
doing = {agent.eid : start}
# consider each next (unused) behavior for inclusion
for i in range(offset+1,len(trace.decisions)):
if i in used: continue
dn = trace.decisions[i]
# time moves on, retire active behaviors that have ended at the start of this behavior
retire = set()
for eid,active in doing.items():
if active.end_clock < dn.start_clock:
# == or > are still active (to support meets)
# otherwise, this agent isn't in the interaction anymore (off doing something that didn't replace)
retire.add(eid)
for eid in retire: del doing[eid]
# now consider this one
next_agent = dn.agent(trace)
if next_agent is None:
# non-behavioral event, done here
continue
next_target = dn.behavior_target(trace)
if current_involvement(doing, next_agent.eid):
# agent already in interaction
if next_target is not None and current_involvement(doing, next_target.eid):
# target also in, extend the interaction
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# since target is new, agent must continue to be involved to extend
elif continued_involvement(doing, next_agent.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
else:
# agent off to something else, no longer in this interaction
if next_agent.eid in doing:
del doing[next_agent.eid]
else:
# agent not in interaction, if target is continuing then extend
if next_target is not None and continued_involvement(doing, next_target.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# otherwise nothing to do
if len(doing.keys()) == 0:
#print("Interaction ended at {}".format(dn.decision_state.clock))
break
return interaction
def active_target_of(doing, eid):
"""Returns all the dn behaviors in doing where eid is the target."""
if eid is None: return
for dn in doing.values():
tgt_eid = dn.behavior_target_id()
if tgt_eid == eid:
yield dn
def current_involvement(doing, eid):
|
def continued_involvement(doing, eid, clock, step=0.01):
"""True if acting or targeted beyond the next frame."""
if eid in doing:
if doing[eid].end_clock > (clock+step): return True
for dn in active_target_of(doing, eid):
if dn.end_clock > (clock+step): return True
return False
##############################################################
# Working with htrace (should probably convert to class)
##############################################################
def interaction_context(interaction, i):
"""Return the dn representing what the target of decision[i] was doing at the time."""
if i == 0: return None
tgt_eid = interaction[i].behavior_target_id()
if tgt_eid is None: return None
for j in range(i-1,-1,-1):
print("interaction: {} {}".format(interaction[j].agent_eid, tgt_eid))
if interaction[j].agent_eid == tgt_eid:
return interaction[j]
return None
def interaction_responses(interaction, i):
"""Return the dns that act on the agent of i during that behavior."""
agent_eid = interaction[i].agent_eid
# no agent, no response (REM: that's not right, but is given how we're creating interactions right now)
if agent_eid is None: return []
end = interaction[i].end_clock
for j in range(i+1, len(interaction)):
next = interaction[j]
if next.start_clock <= end and next.behavior_target_id() == agent_eid:
yield (interaction[j])
##############################################################
# Markov event chain
##############################################################
class EntityMapping:
def __init__(self):
self.counts = {'A':0,'C':0,'G':0}
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
ct = self.counts[prefix]
self.counts[prefix] += 1
tok = "{}{}".format(prefix, ct)
self.lookup[ent.eid] = tok
return tok
class EntityMapping2:
def __init__(self):
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
self.lookup[ent.eid] = prefix
return prefix
class MarkovChain:
def __init__(self, htraces, mapper=EntityMapping2):
self.nodes = {'Start' : MCNode()}
self.reverse_edges = {}
for htrace in htraces:
# treat each agent as a separate run
agents = self.extract_agents(htrace)
for agent_eid in agents:
entity_mapping = mapper()
current_tag = 'Start'
current_grp = None
for grp in htrace:
# only things relevant to this agent!
head_dn = grp[0]
tgt = head_dn.behavior_target()
if head_dn.agent_eid == agent_eid or (tgt is not None and tgt.eid == agent_eid):
current_tag = self.add_transition(current_tag, grp, current_grp, entity_mapping)
current_grp = grp
self.add_transition(current_tag, None, current_grp, entity_mapping)
def add_transition(self, current_tag, to_grp, from_grp, entity_mapping):
"""Each node is a dict of tag : [transition_ct,case list]."""
# add destination node if necessary
if to_grp == None:
to_tag = 'End'
else:
to_tag = self.make_tag(to_grp, entity_mapping)
if to_tag not in self.nodes:
self.nodes[to_tag] = MCNode()
# add transition out of current node
node = self.nodes[current_tag]
node.add_transition(to_tag, from_grp, to_grp)
# add reverse edge index
if to_tag in self.reverse_edges: self.reverse_edges[to_tag].add(current_tag)
else: self.reverse_edges[to_tag] = set((current_tag,))
# move current
return to_tag
def make_tag(self, grp, entity_mapping):
head_dn = grp[0]
tgt = head_dn.behavior_target()
# if len(grp) > 1: pre = 'GRP'
# else: pre = ''
pre = ''
if tgt is None:
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig)
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig.replace(str(tgt.eid), entity_mapping.token(tgt)))
def extract_agents(self, htrace, trace):
A = set()
for grp in htrace:
for dn in grp:
agent = dn.agent(trace)
if agent.tid == data.AGENT_TYPE_ID:
A.add(agent.eid)
return A
def all_case_groups(self, node_tag):
"""All the cases for a given node are in the transitions *to* that node."""
for tag in self.reverse_edges[node_tag]:
for from_grp,to_grp in self.nodes[tag].transitions[node_tag]:
yield to_grp
def make_sub_chain(self, node_tag, mapper=EntityMapping2):
if node_tag != 'End':
self.nodes[node_tag].chain = MarkovChain(([(dn,) for dn in grp] for grp in self.all_case_groups(node_tag)), mapper)
def __str__(self):
return "\n".join(("{} {} => {}".format(node.out_ct, tag, str(node)) for tag,node in sorted(self.nodes.items(), key=lambda e: e[0]) if tag != 'End'))
class MCNode:
""""""
def __init__(self):
self.out_ct = 0
self.transitions = {}
self.chain = None
def add_transition(self, to_tag, from_grp, to_grp):
"""Transitions is a dict of tag : [cases]."""
self.out_ct += 1
if to_tag in self.transitions:
self.transitions[to_tag].append((from_grp, to_grp))
else:
self.transitions[to_tag] = [(from_grp, to_grp)]
def __str__(self):
return ", ".join(("{:.2f}: {}".format(len(cases)/self.out_ct, tag) for tag,cases in self.transitions.items()))
##############################################################
if __name__ == '__main__':
traces = load_traces(DATA_DIR)
# convert to hierarchical events
htraces = convert_traces(traces)
for t,ht in zip(traces,htraces):
print("=========================")
print(t)
print("=========================")
for intr in ht:
print("-----------")
for dn in intr:
print(dn)
# # make hierarchical Markov Chain
# print("==================== Top level chain ===========================")
# chain = MarkovChain(htraces)
# print(chain, flush=True)
#
# for tag in chain.nodes.keys():
# if tag not in ('Start', 'End'):
# print(" ========== Sub chain for {}".format(tag))
# chain.make_sub_chain(tag)
# print(chain.nodes[tag].chain)
| """True if acting or targeted in current doing set."""
if eid in doing: return True
for dn in active_target_of(doing, eid):
return True
return False | identifier_body |
trace_processing.py | import os, pickle
import data
DATA_DIR = os.path.join("traces_05_withmobs")
MODEL_DIR = os.path.join("models")
SCHEMA_LIB_FILE = 'schema_lib_3_gather_mobs.pkl'
##############################################################
# Load traces from disk
##############################################################
def load_traces(dir, limit=None):
"""Read in game traces, return as a list."""
games = []
for i, fn in enumerate(os.listdir(dir)):
if limit is not None and i == limit: break
f = open(os.path.join(dir, fn), 'rb')
seed, trace = pickle.load(f)
f.close()
games.append(trace)
return games
##############################################################
# Create hierarchical interactions in traces
##############################################################
def convert_traces(traces):
return [convert_to_interactions(trace) for trace in traces]
def convert_to_interactions(trace):
used = set()
return [create_interaction(trace, i, used) for i in range(len(trace.decisions)) if i not in used]
def create_interaction(trace, offset, used):
start = trace.decisions[offset]
# construct the complete set of behaviors in this interaction, in order
interaction = [start]
agent = start.agent(trace)
if agent is None:
# non-behavioral event, done here
return interaction
# keep track of what each agent in the interaction is doing
doing = {agent.eid : start}
# consider each next (unused) behavior for inclusion
for i in range(offset+1,len(trace.decisions)):
if i in used: continue
dn = trace.decisions[i]
# time moves on, retire active behaviors that have ended at the start of this behavior
retire = set()
for eid,active in doing.items():
if active.end_clock < dn.start_clock:
# == or > are still active (to support meets)
# otherwise, this agent isn't in the interaction anymore (off doing something that didn't replace)
retire.add(eid)
for eid in retire: del doing[eid]
# now consider this one
next_agent = dn.agent(trace)
if next_agent is None:
# non-behavioral event, done here
continue
next_target = dn.behavior_target(trace)
if current_involvement(doing, next_agent.eid):
# agent already in interaction
if next_target is not None and current_involvement(doing, next_target.eid):
# target also in, extend the interaction
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# since target is new, agent must continue to be involved to extend
elif continued_involvement(doing, next_agent.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
else:
# agent off to something else, no longer in this interaction
if next_agent.eid in doing:
del doing[next_agent.eid]
else:
# agent not in interaction, if target is continuing then extend
if next_target is not None and continued_involvement(doing, next_target.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# otherwise nothing to do
if len(doing.keys()) == 0:
#print("Interaction ended at {}".format(dn.decision_state.clock))
break
return interaction
def active_target_of(doing, eid):
"""Returns all the dn behaviors in doing where eid is the target."""
if eid is None: return
for dn in doing.values():
tgt_eid = dn.behavior_target_id()
if tgt_eid == eid:
yield dn
def current_involvement(doing, eid):
"""True if acting or targeted in current doing set."""
if eid in doing: return True
for dn in active_target_of(doing, eid):
return True
return False
def continued_involvement(doing, eid, clock, step=0.01):
"""True if acting or targeted beyond the next frame."""
if eid in doing:
if doing[eid].end_clock > (clock+step): return True
for dn in active_target_of(doing, eid):
if dn.end_clock > (clock+step): return True
return False
##############################################################
# Working with htrace (should probably convert to class)
##############################################################
def interaction_context(interaction, i):
"""Return the dn representing what the target of decision[i] was doing at the time."""
if i == 0: return None
tgt_eid = interaction[i].behavior_target_id()
if tgt_eid is None: return None
| return interaction[j]
return None
def interaction_responses(interaction, i):
"""Return the dns that act on the agent of i during that behavior."""
agent_eid = interaction[i].agent_eid
# no agent, no response (REM: that's not right, but is given how we're creating interactions right now)
if agent_eid is None: return []
end = interaction[i].end_clock
for j in range(i+1, len(interaction)):
next = interaction[j]
if next.start_clock <= end and next.behavior_target_id() == agent_eid:
yield (interaction[j])
##############################################################
# Markov event chain
##############################################################
class EntityMapping:
def __init__(self):
self.counts = {'A':0,'C':0,'G':0}
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
ct = self.counts[prefix]
self.counts[prefix] += 1
tok = "{}{}".format(prefix, ct)
self.lookup[ent.eid] = tok
return tok
class EntityMapping2:
def __init__(self):
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
self.lookup[ent.eid] = prefix
return prefix
class MarkovChain:
def __init__(self, htraces, mapper=EntityMapping2):
self.nodes = {'Start' : MCNode()}
self.reverse_edges = {}
for htrace in htraces:
# treat each agent as a separate run
agents = self.extract_agents(htrace)
for agent_eid in agents:
entity_mapping = mapper()
current_tag = 'Start'
current_grp = None
for grp in htrace:
# only things relevant to this agent!
head_dn = grp[0]
tgt = head_dn.behavior_target()
if head_dn.agent_eid == agent_eid or (tgt is not None and tgt.eid == agent_eid):
current_tag = self.add_transition(current_tag, grp, current_grp, entity_mapping)
current_grp = grp
self.add_transition(current_tag, None, current_grp, entity_mapping)
def add_transition(self, current_tag, to_grp, from_grp, entity_mapping):
"""Each node is a dict of tag : [transition_ct,case list]."""
# add destination node if necessary
if to_grp == None:
to_tag = 'End'
else:
to_tag = self.make_tag(to_grp, entity_mapping)
if to_tag not in self.nodes:
self.nodes[to_tag] = MCNode()
# add transition out of current node
node = self.nodes[current_tag]
node.add_transition(to_tag, from_grp, to_grp)
# add reverse edge index
if to_tag in self.reverse_edges: self.reverse_edges[to_tag].add(current_tag)
else: self.reverse_edges[to_tag] = set((current_tag,))
# move current
return to_tag
def make_tag(self, grp, entity_mapping):
head_dn = grp[0]
tgt = head_dn.behavior_target()
# if len(grp) > 1: pre = 'GRP'
# else: pre = ''
pre = ''
if tgt is None:
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig)
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig.replace(str(tgt.eid), entity_mapping.token(tgt)))
def extract_agents(self, htrace, trace):
A = set()
for grp in htrace:
for dn in grp:
agent = dn.agent(trace)
if agent.tid == data.AGENT_TYPE_ID:
A.add(agent.eid)
return A
def all_case_groups(self, node_tag):
"""All the cases for a given node are in the transitions *to* that node."""
for tag in self.reverse_edges[node_tag]:
for from_grp,to_grp in self.nodes[tag].transitions[node_tag]:
yield to_grp
def make_sub_chain(self, node_tag, mapper=EntityMapping2):
if node_tag != 'End':
self.nodes[node_tag].chain = MarkovChain(([(dn,) for dn in grp] for grp in self.all_case_groups(node_tag)), mapper)
def __str__(self):
return "\n".join(("{} {} => {}".format(node.out_ct, tag, str(node)) for tag,node in sorted(self.nodes.items(), key=lambda e: e[0]) if tag != 'End'))
class MCNode:
""""""
def __init__(self):
self.out_ct = 0
self.transitions = {}
self.chain = None
def add_transition(self, to_tag, from_grp, to_grp):
"""Transitions is a dict of tag : [cases]."""
self.out_ct += 1
if to_tag in self.transitions:
self.transitions[to_tag].append((from_grp, to_grp))
else:
self.transitions[to_tag] = [(from_grp, to_grp)]
def __str__(self):
return ", ".join(("{:.2f}: {}".format(len(cases)/self.out_ct, tag) for tag,cases in self.transitions.items()))
##############################################################
if __name__ == '__main__':
traces = load_traces(DATA_DIR)
# convert to hierarchical events
htraces = convert_traces(traces)
for t,ht in zip(traces,htraces):
print("=========================")
print(t)
print("=========================")
for intr in ht:
print("-----------")
for dn in intr:
print(dn)
# # make hierarchical Markov Chain
# print("==================== Top level chain ===========================")
# chain = MarkovChain(htraces)
# print(chain, flush=True)
#
# for tag in chain.nodes.keys():
# if tag not in ('Start', 'End'):
# print(" ========== Sub chain for {}".format(tag))
# chain.make_sub_chain(tag)
# print(chain.nodes[tag].chain) | for j in range(i-1,-1,-1):
print("interaction: {} {}".format(interaction[j].agent_eid, tgt_eid))
if interaction[j].agent_eid == tgt_eid: | random_line_split |
trace_processing.py | import os, pickle
import data
DATA_DIR = os.path.join("traces_05_withmobs")
MODEL_DIR = os.path.join("models")
SCHEMA_LIB_FILE = 'schema_lib_3_gather_mobs.pkl'
##############################################################
# Load traces from disk
##############################################################
def load_traces(dir, limit=None):
"""Read in game traces, return as a list."""
games = []
for i, fn in enumerate(os.listdir(dir)):
if limit is not None and i == limit: break
f = open(os.path.join(dir, fn), 'rb')
seed, trace = pickle.load(f)
f.close()
games.append(trace)
return games
##############################################################
# Create hierarchical interactions in traces
##############################################################
def convert_traces(traces):
return [convert_to_interactions(trace) for trace in traces]
def convert_to_interactions(trace):
used = set()
return [create_interaction(trace, i, used) for i in range(len(trace.decisions)) if i not in used]
def create_interaction(trace, offset, used):
start = trace.decisions[offset]
# construct the complete set of behaviors in this interaction, in order
interaction = [start]
agent = start.agent(trace)
if agent is None:
# non-behavioral event, done here
return interaction
# keep track of what each agent in the interaction is doing
doing = {agent.eid : start}
# consider each next (unused) behavior for inclusion
for i in range(offset+1,len(trace.decisions)):
if i in used: continue
dn = trace.decisions[i]
# time moves on, retire active behaviors that have ended at the start of this behavior
retire = set()
for eid,active in doing.items():
if active.end_clock < dn.start_clock:
# == or > are still active (to support meets)
# otherwise, this agent isn't in the interaction anymore (off doing something that didn't replace)
retire.add(eid)
for eid in retire: del doing[eid]
# now consider this one
next_agent = dn.agent(trace)
if next_agent is None:
# non-behavioral event, done here
continue
next_target = dn.behavior_target(trace)
if current_involvement(doing, next_agent.eid):
# agent already in interaction
if next_target is not None and current_involvement(doing, next_target.eid):
# target also in, extend the interaction
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# since target is new, agent must continue to be involved to extend
elif continued_involvement(doing, next_agent.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
else:
# agent off to something else, no longer in this interaction
if next_agent.eid in doing:
del doing[next_agent.eid]
else:
# agent not in interaction, if target is continuing then extend
if next_target is not None and continued_involvement(doing, next_target.eid, dn.start_clock):
doing[next_agent.eid] = dn
interaction.append(dn)
used.add(i)
# otherwise nothing to do
if len(doing.keys()) == 0:
#print("Interaction ended at {}".format(dn.decision_state.clock))
break
return interaction
def active_target_of(doing, eid):
"""Returns all the dn behaviors in doing where eid is the target."""
if eid is None: return
for dn in doing.values():
tgt_eid = dn.behavior_target_id()
if tgt_eid == eid:
yield dn
def | (doing, eid):
"""True if acting or targeted in current doing set."""
if eid in doing: return True
for dn in active_target_of(doing, eid):
return True
return False
def continued_involvement(doing, eid, clock, step=0.01):
"""True if acting or targeted beyond the next frame."""
if eid in doing:
if doing[eid].end_clock > (clock+step): return True
for dn in active_target_of(doing, eid):
if dn.end_clock > (clock+step): return True
return False
##############################################################
# Working with htrace (should probably convert to class)
##############################################################
def interaction_context(interaction, i):
"""Return the dn representing what the target of decision[i] was doing at the time."""
if i == 0: return None
tgt_eid = interaction[i].behavior_target_id()
if tgt_eid is None: return None
for j in range(i-1,-1,-1):
print("interaction: {} {}".format(interaction[j].agent_eid, tgt_eid))
if interaction[j].agent_eid == tgt_eid:
return interaction[j]
return None
def interaction_responses(interaction, i):
"""Return the dns that act on the agent of i during that behavior."""
agent_eid = interaction[i].agent_eid
# no agent, no response (REM: that's not right, but is given how we're creating interactions right now)
if agent_eid is None: return []
end = interaction[i].end_clock
for j in range(i+1, len(interaction)):
next = interaction[j]
if next.start_clock <= end and next.behavior_target_id() == agent_eid:
yield (interaction[j])
##############################################################
# Markov event chain
##############################################################
class EntityMapping:
def __init__(self):
self.counts = {'A':0,'C':0,'G':0}
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
ct = self.counts[prefix]
self.counts[prefix] += 1
tok = "{}{}".format(prefix, ct)
self.lookup[ent.eid] = tok
return tok
class EntityMapping2:
def __init__(self):
self.lookup = {}
def token(self, ent):
"""Lookup or add generic token for entity."""
if ent.eid in self.lookup: return self.lookup[ent.eid]
if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'
elif ent.tid in data.combatants: prefix = 'C'
elif ent.tid in data.gatherable: prefix = 'G'
self.lookup[ent.eid] = prefix
return prefix
class MarkovChain:
def __init__(self, htraces, mapper=EntityMapping2):
self.nodes = {'Start' : MCNode()}
self.reverse_edges = {}
for htrace in htraces:
# treat each agent as a separate run
agents = self.extract_agents(htrace)
for agent_eid in agents:
entity_mapping = mapper()
current_tag = 'Start'
current_grp = None
for grp in htrace:
# only things relevant to this agent!
head_dn = grp[0]
tgt = head_dn.behavior_target()
if head_dn.agent_eid == agent_eid or (tgt is not None and tgt.eid == agent_eid):
current_tag = self.add_transition(current_tag, grp, current_grp, entity_mapping)
current_grp = grp
self.add_transition(current_tag, None, current_grp, entity_mapping)
def add_transition(self, current_tag, to_grp, from_grp, entity_mapping):
"""Each node is a dict of tag : [transition_ct,case list]."""
# add destination node if necessary
if to_grp == None:
to_tag = 'End'
else:
to_tag = self.make_tag(to_grp, entity_mapping)
if to_tag not in self.nodes:
self.nodes[to_tag] = MCNode()
# add transition out of current node
node = self.nodes[current_tag]
node.add_transition(to_tag, from_grp, to_grp)
# add reverse edge index
if to_tag in self.reverse_edges: self.reverse_edges[to_tag].add(current_tag)
else: self.reverse_edges[to_tag] = set((current_tag,))
# move current
return to_tag
def make_tag(self, grp, entity_mapping):
head_dn = grp[0]
tgt = head_dn.behavior_target()
# if len(grp) > 1: pre = 'GRP'
# else: pre = ''
pre = ''
if tgt is None:
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig)
return "{}{}{}".format(pre, entity_mapping.token(head_dn.agent()), head_dn.behavior_sig.replace(str(tgt.eid), entity_mapping.token(tgt)))
def extract_agents(self, htrace, trace):
A = set()
for grp in htrace:
for dn in grp:
agent = dn.agent(trace)
if agent.tid == data.AGENT_TYPE_ID:
A.add(agent.eid)
return A
def all_case_groups(self, node_tag):
"""All the cases for a given node are in the transitions *to* that node."""
for tag in self.reverse_edges[node_tag]:
for from_grp,to_grp in self.nodes[tag].transitions[node_tag]:
yield to_grp
def make_sub_chain(self, node_tag, mapper=EntityMapping2):
if node_tag != 'End':
self.nodes[node_tag].chain = MarkovChain(([(dn,) for dn in grp] for grp in self.all_case_groups(node_tag)), mapper)
def __str__(self):
return "\n".join(("{} {} => {}".format(node.out_ct, tag, str(node)) for tag,node in sorted(self.nodes.items(), key=lambda e: e[0]) if tag != 'End'))
class MCNode:
""""""
def __init__(self):
self.out_ct = 0
self.transitions = {}
self.chain = None
def add_transition(self, to_tag, from_grp, to_grp):
"""Transitions is a dict of tag : [cases]."""
self.out_ct += 1
if to_tag in self.transitions:
self.transitions[to_tag].append((from_grp, to_grp))
else:
self.transitions[to_tag] = [(from_grp, to_grp)]
def __str__(self):
return ", ".join(("{:.2f}: {}".format(len(cases)/self.out_ct, tag) for tag,cases in self.transitions.items()))
##############################################################
if __name__ == '__main__':
traces = load_traces(DATA_DIR)
# convert to hierarchical events
htraces = convert_traces(traces)
for t,ht in zip(traces,htraces):
print("=========================")
print(t)
print("=========================")
for intr in ht:
print("-----------")
for dn in intr:
print(dn)
# # make hierarchical Markov Chain
# print("==================== Top level chain ===========================")
# chain = MarkovChain(htraces)
# print(chain, flush=True)
#
# for tag in chain.nodes.keys():
# if tag not in ('Start', 'End'):
# print(" ========== Sub chain for {}".format(tag))
# chain.make_sub_chain(tag)
# print(chain.nodes[tag].chain)
| current_involvement | identifier_name |
label.go | package label
import (
"fmt"
"regexp"
"strings"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const PluginName = "ti-community-label"
var (
labelRegexp = `(?m)^/(%s)\s*(.*)$`
removeLabelRegexp = `(?m)^/remove-(%s)\s*(.*)$`
customLabelRegex = regexp.MustCompile(`(?m)^/label\s*(.*)$`)
customRemoveLabelRegex = regexp.MustCompile(`(?m)^/remove-label\s*(.*)$`)
nonExistentAdditionalLabels = "The label(s) `%s` cannot be applied. These labels are supported: `%s`."
nonExistentLabelInRepo = "The label(s) `%s` cannot be applied, because the repository doesn't have them."
nonExistentLabelOnIssue = "These labels are not set on the issue: `%v`."
)
type githubClient interface {
CreateComment(owner, repo string, number int, comment string) error
AddLabel(owner, repo string, number int, label string) error
RemoveLabel(owner, repo string, number int, label string) error
GetRepoLabels(owner, repo string) ([]github.Label, error)
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
labelConfig := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.LabelFor(repo.Org, repo.Repo)
var prefixConfigMsg, additionalLabelsConfigMsg, excludeLabelsConfigMsg string
if opts.Prefixes != nil {
prefixConfigMsg = fmt.Sprintf("The label plugin includes commands based on %v prefixes.\n", opts.Prefixes)
}
if opts.AdditionalLabels != nil {
additionalLabelsConfigMsg = fmt.Sprintf("%v labels can be used with the `/[remove-]label` command.\n",
opts.AdditionalLabels)
}
if opts.ExcludeLabels != nil {
excludeLabelsConfigMsg = fmt.Sprintf("%v labels cannot be added by command.\n",
opts.ExcludeLabels)
}
labelConfig[repo.String()] = prefixConfigMsg + additionalLabelsConfigMsg + excludeLabelsConfigMsg
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityLabel: []tiexternalplugins.TiCommunityLabel{
{
Repos: []string{"ti-community-infra/test-dev"},
AdditionalLabels: []string{"needs-cherry-pick-1.1", "needs-cherry-pick-1.0"},
Prefixes: []string{"type", "status"},
ExcludeLabels: []string{"stats/can-merge"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The label plugin provides commands that add or remove certain types of labels. " +
"For example, the labels like 'status/*', 'sig/*' and bare labels can be " +
"managed by using `/status`, `/sig` and `/label`.",
Config: labelConfig,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.IssueCommentEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "/[remove-](status|sig|type|label|component) <target>",
Description: "Add or remove a label of the given type.",
Featured: false,
WhoCanUse: "Everyone can trigger this command.",
Examples: []string{"/type bug", "/remove-sig engine", "/sig engine"},
})
return pluginHelp, nil
}
}
func HandleIssueCommentEvent(gc githubClient, ice *github.IssueCommentEvent,
cfg *tiexternalplugins.Configuration, log *logrus.Entry) error {
opts := cfg.LabelFor(ice.Repo.Owner.Login, ice.Repo.Name)
var additionalLabels []string
var prefixes []string
var excludeLabels []string
if opts.AdditionalLabels != nil {
additionalLabels = opts.AdditionalLabels
}
if opts.Prefixes != nil {
prefixes = opts.Prefixes
}
if opts.ExcludeLabels != nil {
excludeLabels = opts.ExcludeLabels
}
return handle(gc, log, additionalLabels, prefixes, excludeLabels, ice)
}
// Get labels from RegExp matches.
func getLabelsFromREMatches(matches [][]string) (labels []string) {
for _, match := range matches {
parts := strings.Split(strings.TrimSpace(match[0]), " ")
for _, label := range parts[1:] {
// Filter out invisible characters that may be matched.
if len(strings.TrimSpace(label)) == 0 {
continue
}
label = strings.ToLower(match[1] + "/" + strings.TrimSpace(label))
labels = append(labels, label)
}
}
return
}
// getLabelsFromGenericMatches returns label matches with extra labels if those
// have been configured in the plugin config.
func getLabelsFromGenericMatches(matches [][]string, additionalLabels []string, invalidLabels *[]string) []string {
if len(additionalLabels) == 0 {
return nil
}
var labels []string
labelFilter := sets.String{}
for _, l := range additionalLabels {
labelFilter.Insert(strings.ToLower(l))
}
for _, match := range matches {
// Use trim to filter out \r characters that may be matched.
parts := strings.Split(strings.TrimSpace(match[0]), " ")
if ((parts[0] != "/label") && (parts[0] != "/remove-label")) || len(parts) != 2 {
continue
}
label := strings.ToLower(parts[1])
if labelFilter.Has(label) {
labels = append(labels, label)
} else {
*invalidLabels = append(*invalidLabels, label)
}
}
return labels
}
func handle(gc githubClient, log *logrus.Entry, additionalLabels,
prefixes, excludeLabels []string, e *github.IssueCommentEvent) error | {
// Arrange prefixes in the format "sig|kind|priority|...",
// so that they can be used to create labelRegex and removeLabelRegex.
labelPrefixes := strings.Join(prefixes, "|")
labelRegex, err := regexp.Compile(fmt.Sprintf(labelRegexp, labelPrefixes))
if err != nil {
return err
}
removeLabelRegex, err := regexp.Compile(fmt.Sprintf(removeLabelRegexp, labelPrefixes))
if err != nil {
return err
}
labelMatches := labelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
removeLabelMatches := removeLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customLabelMatches := customLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customRemoveLabelMatches := customRemoveLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
if len(labelMatches) == 0 && len(removeLabelMatches) == 0 &&
len(customLabelMatches) == 0 && len(customRemoveLabelMatches) == 0 {
return nil
}
org := e.Repo.Owner.Login
repo := e.Repo.Name
repoLabels, err := gc.GetRepoLabels(org, repo)
if err != nil {
return err
}
issueLabels, err := gc.GetIssueLabels(org, repo, e.Issue.Number)
if err != nil {
return err
}
repoExistingLabels := map[string]string{}
for _, l := range repoLabels {
repoExistingLabels[strings.ToLower(l.Name)] = l.Name
}
excludeLabelsSet := sets.NewString()
for _, l := range excludeLabels {
excludeLabelsSet.Insert(strings.ToLower(l))
}
var (
nonexistent []string
noSuchLabelsInRepo []string
noSuchLabelsOnIssue []string
labelsToAdd []string
labelsToRemove []string
)
// Get labels to add and labels to remove from the RegExp matches.
// Notice: The returned label is lowercase.
labelsToAdd = append(getLabelsFromREMatches(labelMatches),
getLabelsFromGenericMatches(customLabelMatches, additionalLabels, &nonexistent)...)
labelsToRemove = append(getLabelsFromREMatches(removeLabelMatches),
getLabelsFromGenericMatches(customRemoveLabelMatches, additionalLabels, &nonexistent)...)
// Add labels.
for _, labelToAdd := range labelsToAdd {
if github.HasLabel(labelToAdd, issueLabels) {
continue
}
if _, ok := repoExistingLabels[labelToAdd]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToAdd)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToAdd) {
log.Infof("Ignore add exclude label: %s.", labelToAdd)
continue
}
if err := gc.AddLabel(org, repo, e.Issue.Number, repoExistingLabels[labelToAdd]); err != nil {
log.WithError(err).Errorf("Github failed to add the following label: %s", labelToAdd)
}
}
// Remove labels.
for _, labelToRemove := range labelsToRemove {
if !github.HasLabel(labelToRemove, issueLabels) {
noSuchLabelsOnIssue = append(noSuchLabelsOnIssue, labelToRemove)
continue
}
if _, ok := repoExistingLabels[labelToRemove]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToRemove)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToRemove) {
log.Infof("Ignore remove exclude label: %s", labelToRemove)
continue
}
if err := gc.RemoveLabel(org, repo, e.Issue.Number, labelToRemove); err != nil {
log.WithError(err).Errorf("Github failed to remove the following label: %s", labelToRemove)
}
}
// Tried to add/remove labels that were not in the configuration.
if len(nonexistent) > 0 {
log.Infof("Nonexistent labels: %v", nonexistent)
msg := fmt.Sprintf(nonExistentAdditionalLabels, strings.Join(nonexistent, ", "),
strings.Join(additionalLabels, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to add labels that were not present in the repository.
if len(noSuchLabelsInRepo) > 0 {
log.Infof("Labels missing in repo: %v", noSuchLabelsInRepo)
msg := fmt.Sprintf(nonExistentLabelInRepo, strings.Join(noSuchLabelsInRepo, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to remove labels that were not present on the issue.
if len(noSuchLabelsOnIssue) > 0 {
msg := fmt.Sprintf(nonExistentLabelOnIssue, strings.Join(noSuchLabelsOnIssue, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
return nil
} | identifier_body | |
label.go | package label
import (
"fmt"
"regexp"
"strings"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const PluginName = "ti-community-label"
var (
labelRegexp = `(?m)^/(%s)\s*(.*)$`
removeLabelRegexp = `(?m)^/remove-(%s)\s*(.*)$`
customLabelRegex = regexp.MustCompile(`(?m)^/label\s*(.*)$`)
customRemoveLabelRegex = regexp.MustCompile(`(?m)^/remove-label\s*(.*)$`)
nonExistentAdditionalLabels = "The label(s) `%s` cannot be applied. These labels are supported: `%s`."
nonExistentLabelInRepo = "The label(s) `%s` cannot be applied, because the repository doesn't have them."
nonExistentLabelOnIssue = "These labels are not set on the issue: `%v`."
)
type githubClient interface {
CreateComment(owner, repo string, number int, comment string) error
AddLabel(owner, repo string, number int, label string) error
RemoveLabel(owner, repo string, number int, label string) error
GetRepoLabels(owner, repo string) ([]github.Label, error)
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
labelConfig := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.LabelFor(repo.Org, repo.Repo)
var prefixConfigMsg, additionalLabelsConfigMsg, excludeLabelsConfigMsg string
if opts.Prefixes != nil {
prefixConfigMsg = fmt.Sprintf("The label plugin includes commands based on %v prefixes.\n", opts.Prefixes)
}
if opts.AdditionalLabels != nil {
additionalLabelsConfigMsg = fmt.Sprintf("%v labels can be used with the `/[remove-]label` command.\n",
opts.AdditionalLabels)
}
if opts.ExcludeLabels != nil {
excludeLabelsConfigMsg = fmt.Sprintf("%v labels cannot be added by command.\n",
opts.ExcludeLabels)
}
labelConfig[repo.String()] = prefixConfigMsg + additionalLabelsConfigMsg + excludeLabelsConfigMsg
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityLabel: []tiexternalplugins.TiCommunityLabel{
{
Repos: []string{"ti-community-infra/test-dev"},
AdditionalLabels: []string{"needs-cherry-pick-1.1", "needs-cherry-pick-1.0"},
Prefixes: []string{"type", "status"},
ExcludeLabels: []string{"stats/can-merge"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The label plugin provides commands that add or remove certain types of labels. " +
"For example, the labels like 'status/*', 'sig/*' and bare labels can be " +
"managed by using `/status`, `/sig` and `/label`.",
Config: labelConfig,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.IssueCommentEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "/[remove-](status|sig|type|label|component) <target>",
Description: "Add or remove a label of the given type.",
Featured: false,
WhoCanUse: "Everyone can trigger this command.",
Examples: []string{"/type bug", "/remove-sig engine", "/sig engine"},
})
return pluginHelp, nil
}
}
func HandleIssueCommentEvent(gc githubClient, ice *github.IssueCommentEvent,
cfg *tiexternalplugins.Configuration, log *logrus.Entry) error {
opts := cfg.LabelFor(ice.Repo.Owner.Login, ice.Repo.Name)
var additionalLabels []string
var prefixes []string
var excludeLabels []string
if opts.AdditionalLabels != nil {
additionalLabels = opts.AdditionalLabels
}
if opts.Prefixes != nil {
prefixes = opts.Prefixes
}
if opts.ExcludeLabels != nil {
excludeLabels = opts.ExcludeLabels
}
return handle(gc, log, additionalLabels, prefixes, excludeLabels, ice)
}
// Get labels from RegExp matches.
func getLabelsFromREMatches(matches [][]string) (labels []string) {
for _, match := range matches {
parts := strings.Split(strings.TrimSpace(match[0]), " ")
for _, label := range parts[1:] {
// Filter out invisible characters that may be matched.
if len(strings.TrimSpace(label)) == 0 {
continue
}
label = strings.ToLower(match[1] + "/" + strings.TrimSpace(label))
labels = append(labels, label)
}
}
return
}
// getLabelsFromGenericMatches returns label matches with extra labels if those
// have been configured in the plugin config.
func getLabelsFromGenericMatches(matches [][]string, additionalLabels []string, invalidLabels *[]string) []string {
if len(additionalLabels) == 0 {
return nil
}
var labels []string
labelFilter := sets.String{}
for _, l := range additionalLabels {
labelFilter.Insert(strings.ToLower(l))
}
for _, match := range matches {
// Use trim to filter out \r characters that may be matched.
parts := strings.Split(strings.TrimSpace(match[0]), " ")
if ((parts[0] != "/label") && (parts[0] != "/remove-label")) || len(parts) != 2 {
continue
}
label := strings.ToLower(parts[1])
if labelFilter.Has(label) {
labels = append(labels, label)
} else {
*invalidLabels = append(*invalidLabels, label)
}
}
return labels
}
func handle(gc githubClient, log *logrus.Entry, additionalLabels,
prefixes, excludeLabels []string, e *github.IssueCommentEvent) error {
// Arrange prefixes in the format "sig|kind|priority|...",
// so that they can be used to create labelRegex and removeLabelRegex.
labelPrefixes := strings.Join(prefixes, "|")
labelRegex, err := regexp.Compile(fmt.Sprintf(labelRegexp, labelPrefixes))
if err != nil {
return err
}
removeLabelRegex, err := regexp.Compile(fmt.Sprintf(removeLabelRegexp, labelPrefixes))
if err != nil {
return err
}
labelMatches := labelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
removeLabelMatches := removeLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customLabelMatches := customLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customRemoveLabelMatches := customRemoveLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
if len(labelMatches) == 0 && len(removeLabelMatches) == 0 &&
len(customLabelMatches) == 0 && len(customRemoveLabelMatches) == 0 {
return nil
}
org := e.Repo.Owner.Login
repo := e.Repo.Name
repoLabels, err := gc.GetRepoLabels(org, repo)
if err != nil {
return err
}
issueLabels, err := gc.GetIssueLabels(org, repo, e.Issue.Number)
if err != nil {
return err
}
repoExistingLabels := map[string]string{}
for _, l := range repoLabels {
repoExistingLabels[strings.ToLower(l.Name)] = l.Name
}
excludeLabelsSet := sets.NewString()
for _, l := range excludeLabels {
excludeLabelsSet.Insert(strings.ToLower(l))
} | noSuchLabelsOnIssue []string
labelsToAdd []string
labelsToRemove []string
)
// Get labels to add and labels to remove from the RegExp matches.
// Notice: The returned label is lowercase.
labelsToAdd = append(getLabelsFromREMatches(labelMatches),
getLabelsFromGenericMatches(customLabelMatches, additionalLabels, &nonexistent)...)
labelsToRemove = append(getLabelsFromREMatches(removeLabelMatches),
getLabelsFromGenericMatches(customRemoveLabelMatches, additionalLabels, &nonexistent)...)
// Add labels.
for _, labelToAdd := range labelsToAdd {
if github.HasLabel(labelToAdd, issueLabels) {
continue
}
if _, ok := repoExistingLabels[labelToAdd]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToAdd)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToAdd) {
log.Infof("Ignore add exclude label: %s.", labelToAdd)
continue
}
if err := gc.AddLabel(org, repo, e.Issue.Number, repoExistingLabels[labelToAdd]); err != nil {
log.WithError(err).Errorf("Github failed to add the following label: %s", labelToAdd)
}
}
// Remove labels.
for _, labelToRemove := range labelsToRemove {
if !github.HasLabel(labelToRemove, issueLabels) {
noSuchLabelsOnIssue = append(noSuchLabelsOnIssue, labelToRemove)
continue
}
if _, ok := repoExistingLabels[labelToRemove]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToRemove)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToRemove) {
log.Infof("Ignore remove exclude label: %s", labelToRemove)
continue
}
if err := gc.RemoveLabel(org, repo, e.Issue.Number, labelToRemove); err != nil {
log.WithError(err).Errorf("Github failed to remove the following label: %s", labelToRemove)
}
}
// Tried to add/remove labels that were not in the configuration.
if len(nonexistent) > 0 {
log.Infof("Nonexistent labels: %v", nonexistent)
msg := fmt.Sprintf(nonExistentAdditionalLabels, strings.Join(nonexistent, ", "),
strings.Join(additionalLabels, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to add labels that were not present in the repository.
if len(noSuchLabelsInRepo) > 0 {
log.Infof("Labels missing in repo: %v", noSuchLabelsInRepo)
msg := fmt.Sprintf(nonExistentLabelInRepo, strings.Join(noSuchLabelsInRepo, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to remove labels that were not present on the issue.
if len(noSuchLabelsOnIssue) > 0 {
msg := fmt.Sprintf(nonExistentLabelOnIssue, strings.Join(noSuchLabelsOnIssue, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
return nil
} |
var (
nonexistent []string
noSuchLabelsInRepo []string | random_line_split |
label.go | package label
import (
"fmt"
"regexp"
"strings"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const PluginName = "ti-community-label"
var (
labelRegexp = `(?m)^/(%s)\s*(.*)$`
removeLabelRegexp = `(?m)^/remove-(%s)\s*(.*)$`
customLabelRegex = regexp.MustCompile(`(?m)^/label\s*(.*)$`)
customRemoveLabelRegex = regexp.MustCompile(`(?m)^/remove-label\s*(.*)$`)
nonExistentAdditionalLabels = "The label(s) `%s` cannot be applied. These labels are supported: `%s`."
nonExistentLabelInRepo = "The label(s) `%s` cannot be applied, because the repository doesn't have them."
nonExistentLabelOnIssue = "These labels are not set on the issue: `%v`."
)
type githubClient interface {
CreateComment(owner, repo string, number int, comment string) error
AddLabel(owner, repo string, number int, label string) error
RemoveLabel(owner, repo string, number int, label string) error
GetRepoLabels(owner, repo string) ([]github.Label, error)
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
labelConfig := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.LabelFor(repo.Org, repo.Repo)
var prefixConfigMsg, additionalLabelsConfigMsg, excludeLabelsConfigMsg string
if opts.Prefixes != nil {
prefixConfigMsg = fmt.Sprintf("The label plugin includes commands based on %v prefixes.\n", opts.Prefixes)
}
if opts.AdditionalLabels != nil {
additionalLabelsConfigMsg = fmt.Sprintf("%v labels can be used with the `/[remove-]label` command.\n",
opts.AdditionalLabels)
}
if opts.ExcludeLabels != nil {
excludeLabelsConfigMsg = fmt.Sprintf("%v labels cannot be added by command.\n",
opts.ExcludeLabels)
}
labelConfig[repo.String()] = prefixConfigMsg + additionalLabelsConfigMsg + excludeLabelsConfigMsg
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityLabel: []tiexternalplugins.TiCommunityLabel{
{
Repos: []string{"ti-community-infra/test-dev"},
AdditionalLabels: []string{"needs-cherry-pick-1.1", "needs-cherry-pick-1.0"},
Prefixes: []string{"type", "status"},
ExcludeLabels: []string{"stats/can-merge"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The label plugin provides commands that add or remove certain types of labels. " +
"For example, the labels like 'status/*', 'sig/*' and bare labels can be " +
"managed by using `/status`, `/sig` and `/label`.",
Config: labelConfig,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.IssueCommentEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "/[remove-](status|sig|type|label|component) <target>",
Description: "Add or remove a label of the given type.",
Featured: false,
WhoCanUse: "Everyone can trigger this command.",
Examples: []string{"/type bug", "/remove-sig engine", "/sig engine"},
})
return pluginHelp, nil
}
}
func HandleIssueCommentEvent(gc githubClient, ice *github.IssueCommentEvent,
cfg *tiexternalplugins.Configuration, log *logrus.Entry) error {
opts := cfg.LabelFor(ice.Repo.Owner.Login, ice.Repo.Name)
var additionalLabels []string
var prefixes []string
var excludeLabels []string
if opts.AdditionalLabels != nil {
additionalLabels = opts.AdditionalLabels
}
if opts.Prefixes != nil {
prefixes = opts.Prefixes
}
if opts.ExcludeLabels != nil {
excludeLabels = opts.ExcludeLabels
}
return handle(gc, log, additionalLabels, prefixes, excludeLabels, ice)
}
// Get labels from RegExp matches.
func getLabelsFromREMatches(matches [][]string) (labels []string) {
for _, match := range matches {
parts := strings.Split(strings.TrimSpace(match[0]), " ")
for _, label := range parts[1:] {
// Filter out invisible characters that may be matched.
if len(strings.TrimSpace(label)) == 0 {
continue
}
label = strings.ToLower(match[1] + "/" + strings.TrimSpace(label))
labels = append(labels, label)
}
}
return
}
// getLabelsFromGenericMatches returns label matches with extra labels if those
// have been configured in the plugin config.
func getLabelsFromGenericMatches(matches [][]string, additionalLabels []string, invalidLabels *[]string) []string {
if len(additionalLabels) == 0 {
return nil
}
var labels []string
labelFilter := sets.String{}
for _, l := range additionalLabels {
labelFilter.Insert(strings.ToLower(l))
}
for _, match := range matches {
// Use trim to filter out \r characters that may be matched.
parts := strings.Split(strings.TrimSpace(match[0]), " ")
if ((parts[0] != "/label") && (parts[0] != "/remove-label")) || len(parts) != 2 {
continue
}
label := strings.ToLower(parts[1])
if labelFilter.Has(label) {
labels = append(labels, label)
} else {
*invalidLabels = append(*invalidLabels, label)
}
}
return labels
}
func | (gc githubClient, log *logrus.Entry, additionalLabels,
prefixes, excludeLabels []string, e *github.IssueCommentEvent) error {
// Arrange prefixes in the format "sig|kind|priority|...",
// so that they can be used to create labelRegex and removeLabelRegex.
labelPrefixes := strings.Join(prefixes, "|")
labelRegex, err := regexp.Compile(fmt.Sprintf(labelRegexp, labelPrefixes))
if err != nil {
return err
}
removeLabelRegex, err := regexp.Compile(fmt.Sprintf(removeLabelRegexp, labelPrefixes))
if err != nil {
return err
}
labelMatches := labelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
removeLabelMatches := removeLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customLabelMatches := customLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customRemoveLabelMatches := customRemoveLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
if len(labelMatches) == 0 && len(removeLabelMatches) == 0 &&
len(customLabelMatches) == 0 && len(customRemoveLabelMatches) == 0 {
return nil
}
org := e.Repo.Owner.Login
repo := e.Repo.Name
repoLabels, err := gc.GetRepoLabels(org, repo)
if err != nil {
return err
}
issueLabels, err := gc.GetIssueLabels(org, repo, e.Issue.Number)
if err != nil {
return err
}
repoExistingLabels := map[string]string{}
for _, l := range repoLabels {
repoExistingLabels[strings.ToLower(l.Name)] = l.Name
}
excludeLabelsSet := sets.NewString()
for _, l := range excludeLabels {
excludeLabelsSet.Insert(strings.ToLower(l))
}
var (
nonexistent []string
noSuchLabelsInRepo []string
noSuchLabelsOnIssue []string
labelsToAdd []string
labelsToRemove []string
)
// Get labels to add and labels to remove from the RegExp matches.
// Notice: The returned label is lowercase.
labelsToAdd = append(getLabelsFromREMatches(labelMatches),
getLabelsFromGenericMatches(customLabelMatches, additionalLabels, &nonexistent)...)
labelsToRemove = append(getLabelsFromREMatches(removeLabelMatches),
getLabelsFromGenericMatches(customRemoveLabelMatches, additionalLabels, &nonexistent)...)
// Add labels.
for _, labelToAdd := range labelsToAdd {
if github.HasLabel(labelToAdd, issueLabels) {
continue
}
if _, ok := repoExistingLabels[labelToAdd]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToAdd)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToAdd) {
log.Infof("Ignore add exclude label: %s.", labelToAdd)
continue
}
if err := gc.AddLabel(org, repo, e.Issue.Number, repoExistingLabels[labelToAdd]); err != nil {
log.WithError(err).Errorf("Github failed to add the following label: %s", labelToAdd)
}
}
// Remove labels.
for _, labelToRemove := range labelsToRemove {
if !github.HasLabel(labelToRemove, issueLabels) {
noSuchLabelsOnIssue = append(noSuchLabelsOnIssue, labelToRemove)
continue
}
if _, ok := repoExistingLabels[labelToRemove]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToRemove)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToRemove) {
log.Infof("Ignore remove exclude label: %s", labelToRemove)
continue
}
if err := gc.RemoveLabel(org, repo, e.Issue.Number, labelToRemove); err != nil {
log.WithError(err).Errorf("Github failed to remove the following label: %s", labelToRemove)
}
}
// Tried to add/remove labels that were not in the configuration.
if len(nonexistent) > 0 {
log.Infof("Nonexistent labels: %v", nonexistent)
msg := fmt.Sprintf(nonExistentAdditionalLabels, strings.Join(nonexistent, ", "),
strings.Join(additionalLabels, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to add labels that were not present in the repository.
if len(noSuchLabelsInRepo) > 0 {
log.Infof("Labels missing in repo: %v", noSuchLabelsInRepo)
msg := fmt.Sprintf(nonExistentLabelInRepo, strings.Join(noSuchLabelsInRepo, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to remove labels that were not present on the issue.
if len(noSuchLabelsOnIssue) > 0 {
msg := fmt.Sprintf(nonExistentLabelOnIssue, strings.Join(noSuchLabelsOnIssue, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
return nil
}
| handle | identifier_name |
label.go | package label
import (
"fmt"
"regexp"
"strings"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const PluginName = "ti-community-label"
var (
labelRegexp = `(?m)^/(%s)\s*(.*)$`
removeLabelRegexp = `(?m)^/remove-(%s)\s*(.*)$`
customLabelRegex = regexp.MustCompile(`(?m)^/label\s*(.*)$`)
customRemoveLabelRegex = regexp.MustCompile(`(?m)^/remove-label\s*(.*)$`)
nonExistentAdditionalLabels = "The label(s) `%s` cannot be applied. These labels are supported: `%s`."
nonExistentLabelInRepo = "The label(s) `%s` cannot be applied, because the repository doesn't have them."
nonExistentLabelOnIssue = "These labels are not set on the issue: `%v`."
)
type githubClient interface {
CreateComment(owner, repo string, number int, comment string) error
AddLabel(owner, repo string, number int, label string) error
RemoveLabel(owner, repo string, number int, label string) error
GetRepoLabels(owner, repo string) ([]github.Label, error)
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
labelConfig := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.LabelFor(repo.Org, repo.Repo)
var prefixConfigMsg, additionalLabelsConfigMsg, excludeLabelsConfigMsg string
if opts.Prefixes != nil {
prefixConfigMsg = fmt.Sprintf("The label plugin includes commands based on %v prefixes.\n", opts.Prefixes)
}
if opts.AdditionalLabels != nil |
if opts.ExcludeLabels != nil {
excludeLabelsConfigMsg = fmt.Sprintf("%v labels cannot be added by command.\n",
opts.ExcludeLabels)
}
labelConfig[repo.String()] = prefixConfigMsg + additionalLabelsConfigMsg + excludeLabelsConfigMsg
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityLabel: []tiexternalplugins.TiCommunityLabel{
{
Repos: []string{"ti-community-infra/test-dev"},
AdditionalLabels: []string{"needs-cherry-pick-1.1", "needs-cherry-pick-1.0"},
Prefixes: []string{"type", "status"},
ExcludeLabels: []string{"stats/can-merge"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The label plugin provides commands that add or remove certain types of labels. " +
"For example, the labels like 'status/*', 'sig/*' and bare labels can be " +
"managed by using `/status`, `/sig` and `/label`.",
Config: labelConfig,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.IssueCommentEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "/[remove-](status|sig|type|label|component) <target>",
Description: "Add or remove a label of the given type.",
Featured: false,
WhoCanUse: "Everyone can trigger this command.",
Examples: []string{"/type bug", "/remove-sig engine", "/sig engine"},
})
return pluginHelp, nil
}
}
func HandleIssueCommentEvent(gc githubClient, ice *github.IssueCommentEvent,
cfg *tiexternalplugins.Configuration, log *logrus.Entry) error {
opts := cfg.LabelFor(ice.Repo.Owner.Login, ice.Repo.Name)
var additionalLabels []string
var prefixes []string
var excludeLabels []string
if opts.AdditionalLabels != nil {
additionalLabels = opts.AdditionalLabels
}
if opts.Prefixes != nil {
prefixes = opts.Prefixes
}
if opts.ExcludeLabels != nil {
excludeLabels = opts.ExcludeLabels
}
return handle(gc, log, additionalLabels, prefixes, excludeLabels, ice)
}
// Get labels from RegExp matches.
func getLabelsFromREMatches(matches [][]string) (labels []string) {
for _, match := range matches {
parts := strings.Split(strings.TrimSpace(match[0]), " ")
for _, label := range parts[1:] {
// Filter out invisible characters that may be matched.
if len(strings.TrimSpace(label)) == 0 {
continue
}
label = strings.ToLower(match[1] + "/" + strings.TrimSpace(label))
labels = append(labels, label)
}
}
return
}
// getLabelsFromGenericMatches returns label matches with extra labels if those
// have been configured in the plugin config.
func getLabelsFromGenericMatches(matches [][]string, additionalLabels []string, invalidLabels *[]string) []string {
if len(additionalLabels) == 0 {
return nil
}
var labels []string
labelFilter := sets.String{}
for _, l := range additionalLabels {
labelFilter.Insert(strings.ToLower(l))
}
for _, match := range matches {
// Use trim to filter out \r characters that may be matched.
parts := strings.Split(strings.TrimSpace(match[0]), " ")
if ((parts[0] != "/label") && (parts[0] != "/remove-label")) || len(parts) != 2 {
continue
}
label := strings.ToLower(parts[1])
if labelFilter.Has(label) {
labels = append(labels, label)
} else {
*invalidLabels = append(*invalidLabels, label)
}
}
return labels
}
func handle(gc githubClient, log *logrus.Entry, additionalLabels,
prefixes, excludeLabels []string, e *github.IssueCommentEvent) error {
// Arrange prefixes in the format "sig|kind|priority|...",
// so that they can be used to create labelRegex and removeLabelRegex.
labelPrefixes := strings.Join(prefixes, "|")
labelRegex, err := regexp.Compile(fmt.Sprintf(labelRegexp, labelPrefixes))
if err != nil {
return err
}
removeLabelRegex, err := regexp.Compile(fmt.Sprintf(removeLabelRegexp, labelPrefixes))
if err != nil {
return err
}
labelMatches := labelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
removeLabelMatches := removeLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customLabelMatches := customLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
customRemoveLabelMatches := customRemoveLabelRegex.FindAllStringSubmatch(e.Comment.Body, -1)
if len(labelMatches) == 0 && len(removeLabelMatches) == 0 &&
len(customLabelMatches) == 0 && len(customRemoveLabelMatches) == 0 {
return nil
}
org := e.Repo.Owner.Login
repo := e.Repo.Name
repoLabels, err := gc.GetRepoLabels(org, repo)
if err != nil {
return err
}
issueLabels, err := gc.GetIssueLabels(org, repo, e.Issue.Number)
if err != nil {
return err
}
repoExistingLabels := map[string]string{}
for _, l := range repoLabels {
repoExistingLabels[strings.ToLower(l.Name)] = l.Name
}
excludeLabelsSet := sets.NewString()
for _, l := range excludeLabels {
excludeLabelsSet.Insert(strings.ToLower(l))
}
var (
nonexistent []string
noSuchLabelsInRepo []string
noSuchLabelsOnIssue []string
labelsToAdd []string
labelsToRemove []string
)
// Get labels to add and labels to remove from the RegExp matches.
// Notice: The returned label is lowercase.
labelsToAdd = append(getLabelsFromREMatches(labelMatches),
getLabelsFromGenericMatches(customLabelMatches, additionalLabels, &nonexistent)...)
labelsToRemove = append(getLabelsFromREMatches(removeLabelMatches),
getLabelsFromGenericMatches(customRemoveLabelMatches, additionalLabels, &nonexistent)...)
// Add labels.
for _, labelToAdd := range labelsToAdd {
if github.HasLabel(labelToAdd, issueLabels) {
continue
}
if _, ok := repoExistingLabels[labelToAdd]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToAdd)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToAdd) {
log.Infof("Ignore add exclude label: %s.", labelToAdd)
continue
}
if err := gc.AddLabel(org, repo, e.Issue.Number, repoExistingLabels[labelToAdd]); err != nil {
log.WithError(err).Errorf("Github failed to add the following label: %s", labelToAdd)
}
}
// Remove labels.
for _, labelToRemove := range labelsToRemove {
if !github.HasLabel(labelToRemove, issueLabels) {
noSuchLabelsOnIssue = append(noSuchLabelsOnIssue, labelToRemove)
continue
}
if _, ok := repoExistingLabels[labelToRemove]; !ok {
noSuchLabelsInRepo = append(noSuchLabelsInRepo, labelToRemove)
continue
}
// Ignore the exclude label.
if excludeLabelsSet.Has(labelToRemove) {
log.Infof("Ignore remove exclude label: %s", labelToRemove)
continue
}
if err := gc.RemoveLabel(org, repo, e.Issue.Number, labelToRemove); err != nil {
log.WithError(err).Errorf("Github failed to remove the following label: %s", labelToRemove)
}
}
// Tried to add/remove labels that were not in the configuration.
if len(nonexistent) > 0 {
log.Infof("Nonexistent labels: %v", nonexistent)
msg := fmt.Sprintf(nonExistentAdditionalLabels, strings.Join(nonexistent, ", "),
strings.Join(additionalLabels, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to add labels that were not present in the repository.
if len(noSuchLabelsInRepo) > 0 {
log.Infof("Labels missing in repo: %v", noSuchLabelsInRepo)
msg := fmt.Sprintf(nonExistentLabelInRepo, strings.Join(noSuchLabelsInRepo, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
// Tried to remove labels that were not present on the issue.
if len(noSuchLabelsOnIssue) > 0 {
msg := fmt.Sprintf(nonExistentLabelOnIssue, strings.Join(noSuchLabelsOnIssue, ", "))
msg = tiexternalplugins.FormatResponseRaw(e.Comment.Body, e.Comment.HTMLURL, e.Comment.User.Login, msg)
return gc.CreateComment(org, repo, e.Issue.Number, msg)
}
return nil
}
| {
additionalLabelsConfigMsg = fmt.Sprintf("%v labels can be used with the `/[remove-]label` command.\n",
opts.AdditionalLabels)
} | conditional_block |
iphelper.go | package iphelper
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
const (
HEADER_LENGTH = 4
BODYLINE_LENGTH = 20
)
const (
AREA_COUNTRY = "country"
AREA_PROVINCE = "province"
AREA_CITY = "city"
AREA_ZONE = "zone"
AREA_LOCATION = "location"
AREA_OPERATOR = "operator"
)
// 获取ip段信息
type IpRow struct {
Start uint32
End uint32
Country uint16
Province uint16
City uint16
Zone uint16
Location uint16
Operator uint16
}
type IpStore struct {
bodyLength uint32
metaLength uint32
headerBuffer []byte
bodyBuffer []byte
metaBuffer []byte
IpTable []IpRow // ip信息表 按范围自增
metaTable map[string][]string
}
func NewIpStore(filename string) *IpStore {
store := IpStore{headerBuffer: make([]byte, HEADER_LENGTH), metaTable: make(map[string][]string)}
store.parseStore(filename)
return &store
}
// 获取ip的位置信息
func (this *IpStore) GetGeoByIp(ipSearch string) (location map[string]string, err error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return location, err
}
location, err = this.parseIpGeo(row)
return location, err
}
// 获取ip的区域编码
func (this *IpStore) GetGeocodeByIp(ipSearch string) (uint64, error) {
ro | .searchIpRow(ipSearch)
if err != nil {
return 0, err
}
areacode := this.getGeocodeByRow(row)
codeUint64, err := strconv.ParseUint(areacode, 10, 64)
if err != nil {
return 0, err
}
return codeUint64, nil
}
func (this *IpStore) GetGeoByGeocode(areacode uint64) map[string]string {
result := map[string]string{}
result[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][areacode%100]
areacode /= 100
result[AREA_LOCATION] = this.metaTable[AREA_LOCATION][areacode%100]
areacode /= 100
result[AREA_ZONE] = this.metaTable[AREA_ZONE][areacode%10000]
areacode /= 10000
result[AREA_CITY] = this.metaTable[AREA_CITY][areacode%10000]
areacode /= 10000
result[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][areacode%10000]
areacode /= 10000
result[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][areacode%10000]
return result
}
// 获取ip的区域信息列表
func (this *IpStore) GetMetaTable() map[string][]string {
return this.metaTable
}
// 获取ip所在ip段的信息
func (this *IpStore) searchIpRow(ipSearch string) (row IpRow, err error) {
search := uint32(IP2Num(ipSearch))
// fmt.Println(search)
var start uint32 = 0
var end uint32 = uint32(len(this.IpTable) - 1)
var offset uint32 = 0
for start <= end {
mid := uint32(math.Floor(float64((end - start) / 2)))
offset = start + mid
IpRow := this.IpTable[offset]
// fmt.Println(IpRow)
if search >= IpRow.Start {
if search <= IpRow.End {
return IpRow, nil
} else {
start = offset + 1
continue
}
} else {
end = offset - 1
continue
}
}
return row, errors.New("fail to find")
}
func (this *IpStore) parseStore(filename string) {
file, err := os.Open(filename)
if err != nil {
panic("error opening file: %v\n" + err.Error())
}
defer file.Close()
fmt.Println("open file: ", filename)
var buf [HEADER_LENGTH]byte
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.bodyLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("body length is: ", this.bodyLength)
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.metaLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("meta length is: ", this.metaLength)
if err := this.paseBody(file); err != nil {
panic("parse body failed:" + err.Error())
}
if err := this.parseMeta(file); err != nil {
panic("pase meta failed" + err.Error())
}
}
func (this *IpStore) paseBody(file *os.File) error {
this.bodyBuffer = make([]byte, this.bodyLength)
if _, err := file.ReadAt(this.bodyBuffer, HEADER_LENGTH+HEADER_LENGTH); err != nil {
panic("read body error")
}
buf := bytes.NewBuffer(this.bodyBuffer)
var offset uint32 = 0
for offset < this.bodyLength {
line := buf.Next(BODYLINE_LENGTH)
row, err := this.parseBodyLine(line)
if err != nil {
return err
}
this.IpTable = append(this.IpTable, row)
offset += BODYLINE_LENGTH
}
return nil
}
func (this *IpStore) parseMeta(file *os.File) (err error) {
this.metaBuffer = make([]byte, this.metaLength)
if _, err := file.ReadAt(this.metaBuffer, int64(HEADER_LENGTH+HEADER_LENGTH+this.bodyLength)); err != nil {
panic("read meta error")
}
return json.Unmarshal(this.metaBuffer, &this.metaTable)
}
func (this *IpStore) parseIpGeo(row IpRow) (map[string]string, error) {
geo := make(map[string]string)
geo[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][row.Country]
geo[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][row.Province]
geo[AREA_CITY] = this.metaTable[AREA_CITY][row.City]
geo[AREA_ZONE] = this.metaTable[AREA_ZONE][row.Zone]
geo[AREA_LOCATION] = this.metaTable[AREA_LOCATION][row.Location]
geo[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][row.Operator]
geo["areacode"] = this.getGeocodeByRow(row)
return geo, nil
}
func (this *IpStore) getGeocodeByRow(row IpRow) string {
countryCode := strconv.Itoa(int(row.Country))
provinceCode := fmt.Sprintf("%04d", row.Province)
cityCode := fmt.Sprintf("%04d", row.City)
zoneCode := fmt.Sprintf("%04d", row.Zone)
provoderCode := fmt.Sprintf("%02d", row.Location)
OperatorCode := fmt.Sprintf("%02d", row.Operator)
return countryCode + provinceCode + cityCode + zoneCode + provoderCode + OperatorCode
}
// @TODO Parse by Reflect IpRow
func (this *IpStore) parseBodyLine(buffer []byte) (row IpRow, err error) {
buf := bytes.NewBuffer(buffer)
if err = binary.Read(buf, binary.BigEndian, &row.Start); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.End); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Country); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Province); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.City); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Zone); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Location); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Operator); err != nil {
goto fail
}
fail:
return row, err
}
func IP2Num(requestip string) uint64 {
//获取客户端地址的long
nowip := strings.Split(requestip, ".")
if len(nowip) != 4 {
return 0
}
a, _ := strconv.ParseUint(nowip[0], 10, 64)
b, _ := strconv.ParseUint(nowip[1], 10, 64)
c, _ := strconv.ParseUint(nowip[2], 10, 64)
d, _ := strconv.ParseUint(nowip[3], 10, 64)
ipNum := a<<24 | b<<16 | c<<8 | d
return ipNum
}
func Num2IP(ipnum uint64) string {
byte1 := ipnum & 0xff
byte2 := (ipnum & 0xff00)
byte2 >>= 8
byte3 := (ipnum & 0xff0000)
byte3 >>= 16
byte4 := (ipnum & 0xff000000)
byte4 >>= 24
result := strconv.FormatUint(byte4, 10) + "." +
strconv.FormatUint(byte3, 10) + "." +
strconv.FormatUint(byte2, 10) + "." +
strconv.FormatUint(byte1, 10)
return result
}
type datFile struct {
err error
*bytes.Buffer
headerLength int
bodyLength int
geoMap map[string]map[string]uint16
geoSlice map[string][]string
operator map[string]int
writer io.Writer
}
func NewDatFile(w io.Writer) *datFile {
m := map[string]map[string]uint16{
AREA_COUNTRY: make(map[string]uint16),
AREA_PROVINCE: make(map[string]uint16),
AREA_CITY: make(map[string]uint16),
AREA_ZONE: make(map[string]uint16),
AREA_LOCATION: make(map[string]uint16),
AREA_OPERATOR: make(map[string]uint16),
}
return &datFile{
Buffer: bytes.NewBuffer(nil),
geoMap: m,
geoSlice: make(map[string][]string),
writer: bufio.NewWriter(w),
}
}
// get area code by typ
func (d *datFile) getCode(typ string, area string) uint16 {
var code uint16
code, ok := d.geoMap[typ][area]
if !ok {
code = uint16(len(d.geoMap[typ]))
d.geoMap[typ][area] = code
d.geoSlice[typ] = append(d.geoSlice[typ], area)
}
return code
}
// @TODO parse fields by reflect the ip row
func (d *datFile) writeBody(fields []string) error {
if d.err != nil {
return d.err
}
start, _ := strconv.ParseUint(fields[0], 10, 32)
end, _ := strconv.ParseUint(fields[1], 10, 32)
binary.Write(d, binary.BigEndian, uint32(start))
binary.Write(d, binary.BigEndian, uint32(end))
binary.Write(d, binary.BigEndian, d.getCode(AREA_COUNTRY, fields[2]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_PROVINCE, fields[3]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_CITY, fields[4]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_ZONE, fields[5]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_LOCATION, fields[6]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_OPERATOR, fields[7]))
return d.err
}
// bodylength|body|metalength|meta
func (d *datFile) writeFile() error {
if d.err != nil {
return d.err
}
bodyLength := d.Buffer.Len()
meta, err := json.Marshal(d.geoSlice)
if err != nil {
d.err = err
return d.err
}
metaLength := len(meta)
binary.Write(d.writer, binary.BigEndian, uint32(bodyLength))
binary.Write(d.writer, binary.BigEndian, uint32(metaLength))
d.writer.Write(d.Buffer.Bytes())
d.writer.Write(meta)
fmt.Println("meta length is: ", metaLength)
fmt.Println("body length is: ", bodyLength)
return err
}
func MakeDat(infile, outfile string) error {
in, err := os.Open(infile)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(outfile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 755)
if err != nil {
return err
}
defer out.Close()
output := NewDatFile(out)
r := bufio.NewReader(in)
count := 0
for {
count++
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if len(line) != 0 {
fields := strings.Fields(line)
if len(fields) != 8 {
return errors.New("invalid input file invalid line string")
}
if err := output.writeBody(fields); err != nil {
return err
}
}
if err == io.EOF {
break
}
}
if err := output.writeFile(); err != nil {
return err
}
fmt.Println("amount ip range from ip source: ", count)
return nil
}
| w, err := this | identifier_name |
iphelper.go | package iphelper
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
const (
HEADER_LENGTH = 4
BODYLINE_LENGTH = 20
)
const (
AREA_COUNTRY = "country"
AREA_PROVINCE = "province"
AREA_CITY = "city"
AREA_ZONE = "zone"
AREA_LOCATION = "location"
AREA_OPERATOR = "operator"
)
// 获取ip段信息
type IpRow struct {
Start uint32
End uint32
Country uint16
Province uint16
City uint16
Zone uint16
Location uint16
Operator uint16
}
type IpStore struct {
bodyLength uint32
metaLength uint32
headerBuffer []byte
bodyBuffer []byte
metaBuffer []byte
IpTable []IpRow // ip信息表 按范围自增
metaTable map[string][]string
}
func NewIpStore(filename string) *IpStore {
store := IpStore{headerBuffer: make([]byte, HEADER_LENGTH), metaTable: make(map[string][]string)}
store.parseStore(filename)
return &store
}
// 获取ip的位置信息
func (this *IpStore) GetGeoByIp(ipSearch string) (location map[string]string, err error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return location, err
}
location, err = this.parseIpGeo(row)
return location, err
}
// 获取ip的区域编码
func (this *IpStore) GetGeocodeByIp(ipSearch string) (uint64, error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return 0, err
}
areacode := this.getGeocodeByRow(row)
codeUint64, err := strconv.ParseUint(areacode, 10, 64)
if err != nil {
return 0, err
}
return codeUint64, nil
}
func (this *IpStore) GetGeoByGeocode(areacode uint64) map[string]string {
result := map[string]string{}
result[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][areacode%100]
areacode /= 100
result[AREA_LOCATION] = this.metaTable[AREA_LOCATION][areacode%100]
areacode /= 100
result[AREA_ZONE] = this.metaTable[AREA_ZONE][areacode%10000]
areacode /= 10000
result[AREA_CITY] = this.metaTable[AREA_CITY][areacode%10000]
areacode /= 10000
result[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][areacode%10000]
areacode /= 10000
result[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][areacode%10000]
return result
}
// 获取ip的区域信息列表
func (this *IpStore) GetMetaTable() map[string][]string {
return this.metaTable
}
// 获取ip所在ip段的信息
func (this *IpStore) searchIpRow(ipSearch string) (row IpRow, err error) {
search := uint32(IP2Num(ipSearch))
// fmt.Println(search)
var start uint32 = 0
var end uint32 = uint32(len(this.IpTable) - 1)
var offset uint32 = 0
for start <= end {
mid := uint32(math.Floor(float64((end - start) / 2)))
offset = start + mid
IpRow := this.IpTable[offset]
// fmt.Println(IpRow)
if search >= IpRow.Start {
if search <= IpRow.End {
return IpRow, nil
} else {
start = offset + 1
continue
}
} else {
end = offset - 1
continue
}
}
return row, errors.New("fail to find")
}
func (this *IpStore) parseStore(filename string) {
file, err := os.Open(filename)
if err != nil {
panic("error opening file: %v\n" + err.Error())
}
defer file.Close()
fmt.Println("open file: ", filename)
var buf [HEADER_LENGTH]byte
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.bodyLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("body length is: ", this.bodyLength)
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.metaLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("meta length is: ", this.metaLength)
if err := this.paseBody(file); err != nil {
panic("parse body failed:" + err.Error())
}
if err := this.parseMeta(file); err != nil {
panic("pase meta failed" + err.Error())
}
}
func (this *IpStore) paseBody(file *os.File) error {
this.bodyBuffer = make([]byte, this.bodyLength)
if _, err := file.ReadAt(this.bodyBuffer, HEADER_LENGTH+HEADER_LENGTH); err != nil {
panic("read body error")
}
buf := bytes.NewBuffer(this.bodyBuffer)
var offset uint32 = 0
for offset < this.bodyLength {
line := buf.Next(BODYLINE_LENGTH)
row, err := this.parseBodyLine(line)
if err != nil {
return err
}
this.IpTable = append(this.IpTable, row)
offset += BODYLINE_LENGTH
}
return nil
}
func (this *IpStore) parseMeta(file *os.File) (err error) {
this.metaBuffer = make([]byte, this.metaLength)
if _, err := file.ReadAt(this.metaBuffer, int64(HEADER_LENGTH+HEADER_LENGTH+this.bodyLength)); err != nil {
panic("read meta error")
}
return json.Unmarshal(this.metaBuffer, &this.metaTable)
}
func (this *IpStore) parseIpGeo(row IpRow) (map[string]string, error) {
geo := make(map[string]string)
geo[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][row.Country]
geo[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][row.Province]
geo[AREA_CITY] = this.metaTable[AREA_CITY][row.City]
geo[AREA_ZONE] = this.metaTable[AREA_ZONE][row.Zone]
geo[AREA_LOCATION] = this.metaTable[AREA_LOCATION][row.Location]
geo[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][row.Operator]
geo["areacode"] = this.getGeocodeByRow(row)
return geo, nil
}
func (this *IpStore) getGeocodeByRow(row IpRow) string {
countryCode := strconv.Itoa(int(row.Country))
provinceCode := fmt.Sprintf("%04d", row.Province)
cityCode := fmt.Sprintf("%04d", row.City)
zoneCode := fmt.Sprintf("%04d", row.Zone)
provoderCode := fmt.Sprintf("%02d", row.Location)
OperatorCode := fmt.Sprintf("%02d", row.Operator)
return countryCode + provinceCode + cityCode + zoneCode + provoderCode + OperatorCode
}
// @TODO Parse by Reflect IpRow
func (this *IpStore) parseBodyLine(buffer []byte) (row IpRow, err error) {
buf := bytes.NewBuffer(buffer)
if err = binary.Read(buf, binary.BigEndian, &row.St | uestip, ".")
if len(nowip) != 4 {
return 0
}
a, _ := strconv.ParseUint(nowip[0], 10, 64)
b, _ := strconv.ParseUint(nowip[1], 10, 64)
c, _ := strconv.ParseUint(nowip[2], 10, 64)
d, _ := strconv.ParseUint(nowip[3], 10, 64)
ipNum := a<<24 | b<<16 | c<<8 | d
return ipNum
}
func Num2IP(ipnum uint64) string {
byte1 := ipnum & 0xff
byte2 := (ipnum & 0xff00)
byte2 >>= 8
byte3 := (ipnum & 0xff0000)
byte3 >>= 16
byte4 := (ipnum & 0xff000000)
byte4 >>= 24
result := strconv.FormatUint(byte4, 10) + "." +
strconv.FormatUint(byte3, 10) + "." +
strconv.FormatUint(byte2, 10) + "." +
strconv.FormatUint(byte1, 10)
return result
}
type datFile struct {
err error
*bytes.Buffer
headerLength int
bodyLength int
geoMap map[string]map[string]uint16
geoSlice map[string][]string
operator map[string]int
writer io.Writer
}
func NewDatFile(w io.Writer) *datFile {
m := map[string]map[string]uint16{
AREA_COUNTRY: make(map[string]uint16),
AREA_PROVINCE: make(map[string]uint16),
AREA_CITY: make(map[string]uint16),
AREA_ZONE: make(map[string]uint16),
AREA_LOCATION: make(map[string]uint16),
AREA_OPERATOR: make(map[string]uint16),
}
return &datFile{
Buffer: bytes.NewBuffer(nil),
geoMap: m,
geoSlice: make(map[string][]string),
writer: bufio.NewWriter(w),
}
}
// get area code by typ
func (d *datFile) getCode(typ string, area string) uint16 {
var code uint16
code, ok := d.geoMap[typ][area]
if !ok {
code = uint16(len(d.geoMap[typ]))
d.geoMap[typ][area] = code
d.geoSlice[typ] = append(d.geoSlice[typ], area)
}
return code
}
// @TODO parse fields by reflect the ip row
func (d *datFile) writeBody(fields []string) error {
if d.err != nil {
return d.err
}
start, _ := strconv.ParseUint(fields[0], 10, 32)
end, _ := strconv.ParseUint(fields[1], 10, 32)
binary.Write(d, binary.BigEndian, uint32(start))
binary.Write(d, binary.BigEndian, uint32(end))
binary.Write(d, binary.BigEndian, d.getCode(AREA_COUNTRY, fields[2]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_PROVINCE, fields[3]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_CITY, fields[4]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_ZONE, fields[5]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_LOCATION, fields[6]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_OPERATOR, fields[7]))
return d.err
}
// bodylength|body|metalength|meta
func (d *datFile) writeFile() error {
if d.err != nil {
return d.err
}
bodyLength := d.Buffer.Len()
meta, err := json.Marshal(d.geoSlice)
if err != nil {
d.err = err
return d.err
}
metaLength := len(meta)
binary.Write(d.writer, binary.BigEndian, uint32(bodyLength))
binary.Write(d.writer, binary.BigEndian, uint32(metaLength))
d.writer.Write(d.Buffer.Bytes())
d.writer.Write(meta)
fmt.Println("meta length is: ", metaLength)
fmt.Println("body length is: ", bodyLength)
return err
}
func MakeDat(infile, outfile string) error {
in, err := os.Open(infile)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(outfile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 755)
if err != nil {
return err
}
defer out.Close()
output := NewDatFile(out)
r := bufio.NewReader(in)
count := 0
for {
count++
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if len(line) != 0 {
fields := strings.Fields(line)
if len(fields) != 8 {
return errors.New("invalid input file invalid line string")
}
if err := output.writeBody(fields); err != nil {
return err
}
}
if err == io.EOF {
break
}
}
if err := output.writeFile(); err != nil {
return err
}
fmt.Println("amount ip range from ip source: ", count)
return nil
}
| art); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.End); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Country); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Province); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.City); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Zone); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Location); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Operator); err != nil {
goto fail
}
fail:
return row, err
}
func IP2Num(requestip string) uint64 {
//获取客户端地址的long
nowip := strings.Split(req | identifier_body |
iphelper.go | package iphelper
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
const (
HEADER_LENGTH = 4
BODYLINE_LENGTH = 20
)
const (
AREA_COUNTRY = "country"
AREA_PROVINCE = "province"
AREA_CITY = "city"
AREA_ZONE = "zone"
AREA_LOCATION = "location"
AREA_OPERATOR = "operator"
)
// 获取ip段信息
type IpRow struct {
Start uint32
End uint32
Country uint16
Province uint16
City uint16
Zone uint16
Location uint16
Operator uint16
}
type IpStore struct {
bodyLength uint32
metaLength uint32
headerBuffer []byte
bodyBuffer []byte
metaBuffer []byte
IpTable []IpRow // ip信息表 按范围自增
metaTable map[string][]string
}
func NewIpStore(filename string) *IpStore {
store := IpStore{headerBuffer: make([]byte, HEADER_LENGTH), metaTable: make(map[string][]string)}
store.parseStore(filename)
return &store
}
// 获取ip的位置信息
func (this *IpStore) GetGeoByIp(ipSearch string) (location map[string]string, err error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return location, err
}
location, err = this.parseIpGeo(row)
return location, err
}
// 获取ip的区域编码
func (this *IpStore) GetGeocodeByIp(ipSearch string) (uint64, error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return 0, err
}
areacode := this.getGeocodeByRow(row)
codeUint64, err := strconv.ParseUint(areacode, 10, 64)
if err != nil {
return 0, err
}
return codeUint64, nil
}
func (this *IpStore) GetGeoByGeocode(areacode uint64) map[string]string {
result := map[string]string{}
result[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][areacode%100]
areacode /= 100
result[AREA_LOCATION] = this.metaTable[AREA_LOCATION][areacode%100]
areacode /= 100
result[AREA_ZONE] = this.metaTable[AREA_ZONE][areacode%10000]
areacode /= 10000
result[AREA_CITY] = this.metaTable[AREA_CITY][areacode%10000]
areacode /= 10000
result[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][areacode%10000]
areacode /= 10000
result[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][areacode%10000]
return result
}
// 获取ip的区域信息列表
func (this *IpStore) GetMetaTable() map[string][]string {
return this.metaTable
}
// 获取ip所在ip段的信息
func (this *IpStore) searchIpRow(ipSearch string) (row IpRow, err error) {
search := uint32(IP2Num(ipSearch))
// fmt.Println(search)
var start uint32 = 0
var end uint32 = uint32(len(this.IpTable) - 1)
var offset uint32 = 0
for start <= end {
mid := uint32(math.Floor(float64((end - start) / 2)))
offset = start + mid
IpRow := this.IpTable[offset]
// fmt.Println(IpRow)
if search >= IpRow.Start {
if search <= IpRow.End {
return IpRow, nil
} else {
start = offset + 1
continue
}
} else {
end = offset - 1
continue
}
}
return row, errors.New("fail to find")
}
func (this *IpStore) parseStore(filename string) {
file, err := os.Open(filename)
if err != nil {
panic("error opening file: %v\n" + err.Error())
}
defer file.Close()
fmt.Println("open file: ", filename)
var buf [HEADER_LENGTH]byte
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.bodyLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("body length is: ", this.bodyLength)
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.metaLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("meta length is: ", this.metaLength)
if err := this.paseBody(file); err != nil {
panic("parse body failed:" + err.Error())
}
if err := this.parseMeta(file); err != nil {
panic("pase meta failed" + err.Error())
}
}
func (this *IpStore) paseBody(file *os.File) error {
this.bodyBuffer = make([]byte, this.bodyLength)
if _, err := file.ReadAt(this.bodyBuffer, HEADER_LENGTH+HEADER_LENGTH); err != nil {
panic("read body error")
}
buf := bytes.NewBuffer(this.bodyBuffer)
var offset uint32 = 0
for offset < this.bodyLength {
line := buf.Next(BODYLINE_LENGTH)
row, err := this.parseBodyLine(line)
if err != nil {
return err
}
this.IpTable = append(this.IpTable, row)
offset += BODYLINE_LENGTH
}
return nil
}
func (this *IpStore) parseMeta(file *os.File) (err error) {
this.metaBuffer = make([]byte, this.metaLength)
if _, err := file.ReadAt(this.metaBuffer, int64(HEADER_LENGTH+HEADER_LENGTH+this.bodyLength)); err != nil {
panic("read meta error")
}
return json.Unmarshal(this.metaBuffer, &this.metaTable)
}
func (this *IpStore) parseIpGeo(row IpRow) (map[string]string, error) {
geo := make(map[string]string)
geo[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][row.Country]
geo[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][row.Province]
geo[AREA_CITY] = this.metaTable[AREA_CITY][row.City]
geo[AREA_ZONE] = this.metaTable[AREA_ZONE][row.Zone]
geo[AREA_LOCATION] = this.metaTable[AREA_LOCATION][row.Location]
geo[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][row.Operator]
geo["areacode"] = this.getGeocodeByRow(row)
return geo, nil
}
func (this *IpStore) getGeocodeByRow(row IpRow) string {
countryCode := strconv.Itoa(int(row.Country))
provinceCode := fmt.Sprintf("%04d", row.Province)
cityCode := fmt.Sprintf("%04d", row.City)
zoneCode := fmt.Sprintf("%04d", row.Zone)
provoderCode := fmt.Sprintf("%02d", row.Location)
OperatorCode := fmt.Sprintf("%02d", row.Operator)
return countryCode + provinceCode + cityCode + zoneCode + provoderCode + OperatorCode
}
// @TODO Parse by Reflect IpRow
func (this *IpStore) parseBodyLine(buffer []byte) (row IpRow, err error) {
buf := bytes.NewBuffer(buffer)
if err = binary.Read(buf, binary.BigEndian, &row.Start); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.End); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Country); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Province); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.City); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Zone); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Location); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Operator); err != nil {
goto fail
}
fail:
return row, err
}
func IP2Num(requestip string) uint64 {
//获取客户端地址的long
nowip := strings.Split(requestip, ".")
if len(nowip) != 4 {
return 0
}
a, _ := strconv.ParseUint(nowip[0], 10, 64)
b, _ := strconv.ParseUint(nowip[1], 10, 64)
c, _ := strconv.ParseUint(nowip[2], 10, 64)
d, _ := strconv.ParseUint(nowip[3], 10, 64)
ipNum := a<<24 | b<<16 | c<<8 | d
return ipNum
}
func Num2IP(ipnum uint64) string {
byte1 := ipnum & 0xff
byte2 := (ipnum & 0xff00)
byte2 >>= 8
byte3 := (ipnum & 0xff0000)
byte3 >>= 16
byte4 := (ipnum & 0xff000000)
byte4 >>= 24
result := strconv.FormatUint(byte4, 10) + "." +
strconv.FormatUint(byte3, 10) + "." +
strconv.FormatUint(byte2, 10) + "." +
strconv.FormatUint(byte1, 10)
return result
}
type datFile struct {
err error
*bytes.Buffer
headerLength int
bodyLength int
geoMap map[string]map[string]uint16
geoSlice map[string][]string
operator map[string]int
writer io.Writer
}
func NewDatFile(w io.Writer) *datFile {
m := map[string]map[string]uint16{
AREA_COUNTRY: make(map[string]uint16),
AREA_PROVINCE: make(map[string]uint16),
AREA_CITY: make(map[string]uint16),
AREA_ZONE: make(map[string]uint16),
AREA_LOCATION: make(map[string]uint16),
AREA_OPERATOR: make(map[string]uint16),
}
return &datFile{
Buffer: bytes.NewBuffer(nil),
geoMap: m,
geoSlice: make(map[string][]string),
writer: bufio.NewWriter(w),
}
}
// get area code by typ
func (d *datFile) getCode(typ string, area string) uint16 {
var code uint16
code, ok := d.geoMap[typ][area]
if !ok {
code = uint16(len(d.geoMap[typ]))
d.geoMap[typ][area] = code
d.geoSlice[typ] = append(d.geoSlice[typ], area)
}
return code
}
// @TODO parse fields by reflect the ip row
func (d *datFile) writeBody(fields []string) error {
if d.err != nil {
return d.err
}
start, _ := strconv.ParseUint(fields[0], 10, 32)
end, _ := strconv.ParseUint(fields[1], 10, 32)
binary.Write(d, binary.BigEndian, uint32(start))
binary.Write(d, binary.BigEndian, uint32(end))
binary.Write(d, binary.BigEndian, d.getCode(AREA_COUNTRY, fields[2]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_PROVINCE, fields[3]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_CITY, fields[4]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_ZONE, fields[5]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_LOCATION, fields[6]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_OPERATOR, fields[7]))
return d.err
}
// bodylength|body|metalength|meta
func (d *datFile) writeFile() error {
if d.err != nil {
return d.err
}
bodyLength := d.Buffer.Len()
meta, err := json.Marshal(d.geoSlice)
if err != nil {
d.err = err
return d.err
}
metaLength := len(meta)
binary.Write(d.writer, binary.BigEndian, uint32(bodyLength))
binary.Write(d.writer, binary.BigEndian, uint32(metaLength))
d.writer.Write(d.Buffer.Bytes())
d.writer.Write(meta)
fmt.Println("meta length is: ", metaLength)
fmt.Println("body length is: ", bodyLength)
return err
}
func MakeDat(infile, outfile string) error {
in, err := os.Open(infile)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(outfile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 755)
if err != nil {
return err
}
defer out.Close()
output := NewDatFile(out)
r := bufio.NewReader(in)
count := 0
for {
count++
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if len(line) != 0 {
fields := strings.Fields(line)
if len(fields) != 8 {
return errors.New("invalid input file invalid line string")
}
if err := output.writeBody(fields); err != nil {
return err
}
}
if err == io.EOF {
| return err
}
fmt.Println("amount ip range from ip source: ", count)
return nil
} | break
}
}
if err := output.writeFile(); err != nil {
| random_line_split |
iphelper.go | package iphelper
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
const (
HEADER_LENGTH = 4
BODYLINE_LENGTH = 20
)
const (
AREA_COUNTRY = "country"
AREA_PROVINCE = "province"
AREA_CITY = "city"
AREA_ZONE = "zone"
AREA_LOCATION = "location"
AREA_OPERATOR = "operator"
)
// 获取ip段信息
type IpRow struct {
Start uint32
End uint32
Country uint16
Province uint16
City uint16
Zone uint16
Location uint16
Operator uint16
}
type IpStore struct {
bodyLength uint32
metaLength uint32
headerBuffer []byte
bodyBuffer []byte
metaBuffer []byte
IpTable []IpRow // ip信息表 按范围自增
metaTable map[string][]string
}
func NewIpStore(filename string) *IpStore {
store := IpStore{headerBuffer: make([]byte, HEADER_LENGTH), metaTable: make(map[string][]string)}
store.parseStore(filename)
return &store
}
// 获取ip的位置信息
func (this *IpStore) GetGeoByIp(ipSearch string) (location map[string]string, err error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return location, err
}
location, err = this.parseIpGeo(row)
return location, err
}
// 获取ip的区域编码
func (this *IpStore) GetGeocodeByIp(ipSearch string) (uint64, error) {
row, err := this.searchIpRow(ipSearch)
if err != nil {
return 0, err
}
areacode := this.getGeocodeByRow(row)
codeUint64, err := strconv.ParseUint(areacode, 10, 64)
if err != nil {
return 0, err
}
return codeUint64, nil
}
func (this *IpStore) GetGeoByGeocode(areacode uint64) map[string]string {
result := map[string]string{}
result[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][areacode%100]
areacode /= 100
result[AREA_LOCATION] = this.metaTable[AREA_LOCATION][areacode%100]
areacode /= 100
result[AREA_ZONE] = this.metaTable[AREA_ZONE][areacode%10000]
areacode /= 10000
result[AREA_CITY] = this.metaTable[AREA_CITY][areacode%10000]
areacode /= 10000
result[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][areacode%10000]
areacode /= 10000
result[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][areacode%10000]
return result
}
// 获取ip的区域信息列表
func (this *IpStore) GetMetaTable() map[string][]string {
return this.metaTable
}
// 获取ip所在ip段的信息
func (this *IpStore) searchIpRow(ipSearch string) (row IpRow, err error) {
search := uint32(IP2Num(ipSearch))
// fmt.Println(search)
var start uint32 = 0
var end uint32 = uint32(len(this.IpTable) - 1)
var offset uint32 = 0
for start <= end {
mid := uint32(math.Floor(float64((end - start) / 2)))
offset = start + mid
IpRow := this.IpTable[offset]
// fmt.Println(IpRow)
if search >= IpRow.Start {
if search <= IpRow.End {
return IpRow, nil
} else {
start = offset + 1
continue
}
} else {
end = offset - 1
continue
}
}
return row, errors.New("fail to find")
}
func (this *IpStore) parseStore(filename string) {
file, err := os.Open(filename)
if err != nil {
panic("error opening file: %v\n" + err.Error())
}
defer file.Close()
fmt.Println("open file: ", filename)
var buf [HEADER_LENGTH]byte
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.bodyLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("body length is: ", this.bodyLength)
if _, err := file.Read(buf[0:4]); err != nil {
panic("error read header" + err.Error())
}
this.metaLength = binary.BigEndian.Uint32(buf[0:4])
fmt.Println("meta length is: ", this.metaLength)
if err := this.paseBody(file); err != nil {
panic("parse body failed:" + err.Error())
}
if err := this.parseMeta(file); err != nil {
panic("pase meta failed" + err.Error())
}
}
func (this *IpStore) paseBody(file *os.File) error {
this.bodyBuffer = make([]byte, this.bodyLength)
if _, err := file.ReadAt(this.bodyBuffer, HEADER_LENGTH+HEADER_LENGTH); err != nil {
panic("read body error")
}
buf := bytes.NewBuffer(this.bodyBuffer)
var offset uint32 = 0
for offset < this.bodyLength {
line := buf.Next(BODYLINE_LENGTH)
row, err := this.parseBodyLine(line)
if err != nil {
return err
}
this.IpTable = append(this.IpTable, row)
offset += BODYLINE_ | il
}
func (this *IpStore) parseMeta(file *os.File) (err error) {
this.metaBuffer = make([]byte, this.metaLength)
if _, err := file.ReadAt(this.metaBuffer, int64(HEADER_LENGTH+HEADER_LENGTH+this.bodyLength)); err != nil {
panic("read meta error")
}
return json.Unmarshal(this.metaBuffer, &this.metaTable)
}
func (this *IpStore) parseIpGeo(row IpRow) (map[string]string, error) {
geo := make(map[string]string)
geo[AREA_COUNTRY] = this.metaTable[AREA_COUNTRY][row.Country]
geo[AREA_PROVINCE] = this.metaTable[AREA_PROVINCE][row.Province]
geo[AREA_CITY] = this.metaTable[AREA_CITY][row.City]
geo[AREA_ZONE] = this.metaTable[AREA_ZONE][row.Zone]
geo[AREA_LOCATION] = this.metaTable[AREA_LOCATION][row.Location]
geo[AREA_OPERATOR] = this.metaTable[AREA_OPERATOR][row.Operator]
geo["areacode"] = this.getGeocodeByRow(row)
return geo, nil
}
func (this *IpStore) getGeocodeByRow(row IpRow) string {
countryCode := strconv.Itoa(int(row.Country))
provinceCode := fmt.Sprintf("%04d", row.Province)
cityCode := fmt.Sprintf("%04d", row.City)
zoneCode := fmt.Sprintf("%04d", row.Zone)
provoderCode := fmt.Sprintf("%02d", row.Location)
OperatorCode := fmt.Sprintf("%02d", row.Operator)
return countryCode + provinceCode + cityCode + zoneCode + provoderCode + OperatorCode
}
// @TODO Parse by Reflect IpRow
func (this *IpStore) parseBodyLine(buffer []byte) (row IpRow, err error) {
buf := bytes.NewBuffer(buffer)
if err = binary.Read(buf, binary.BigEndian, &row.Start); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.End); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Country); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Province); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.City); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Zone); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Location); err != nil {
goto fail
}
if err = binary.Read(buf, binary.BigEndian, &row.Operator); err != nil {
goto fail
}
fail:
return row, err
}
func IP2Num(requestip string) uint64 {
//获取客户端地址的long
nowip := strings.Split(requestip, ".")
if len(nowip) != 4 {
return 0
}
a, _ := strconv.ParseUint(nowip[0], 10, 64)
b, _ := strconv.ParseUint(nowip[1], 10, 64)
c, _ := strconv.ParseUint(nowip[2], 10, 64)
d, _ := strconv.ParseUint(nowip[3], 10, 64)
ipNum := a<<24 | b<<16 | c<<8 | d
return ipNum
}
func Num2IP(ipnum uint64) string {
byte1 := ipnum & 0xff
byte2 := (ipnum & 0xff00)
byte2 >>= 8
byte3 := (ipnum & 0xff0000)
byte3 >>= 16
byte4 := (ipnum & 0xff000000)
byte4 >>= 24
result := strconv.FormatUint(byte4, 10) + "." +
strconv.FormatUint(byte3, 10) + "." +
strconv.FormatUint(byte2, 10) + "." +
strconv.FormatUint(byte1, 10)
return result
}
type datFile struct {
err error
*bytes.Buffer
headerLength int
bodyLength int
geoMap map[string]map[string]uint16
geoSlice map[string][]string
operator map[string]int
writer io.Writer
}
func NewDatFile(w io.Writer) *datFile {
m := map[string]map[string]uint16{
AREA_COUNTRY: make(map[string]uint16),
AREA_PROVINCE: make(map[string]uint16),
AREA_CITY: make(map[string]uint16),
AREA_ZONE: make(map[string]uint16),
AREA_LOCATION: make(map[string]uint16),
AREA_OPERATOR: make(map[string]uint16),
}
return &datFile{
Buffer: bytes.NewBuffer(nil),
geoMap: m,
geoSlice: make(map[string][]string),
writer: bufio.NewWriter(w),
}
}
// get area code by typ
func (d *datFile) getCode(typ string, area string) uint16 {
var code uint16
code, ok := d.geoMap[typ][area]
if !ok {
code = uint16(len(d.geoMap[typ]))
d.geoMap[typ][area] = code
d.geoSlice[typ] = append(d.geoSlice[typ], area)
}
return code
}
// @TODO parse fields by reflect the ip row
func (d *datFile) writeBody(fields []string) error {
if d.err != nil {
return d.err
}
start, _ := strconv.ParseUint(fields[0], 10, 32)
end, _ := strconv.ParseUint(fields[1], 10, 32)
binary.Write(d, binary.BigEndian, uint32(start))
binary.Write(d, binary.BigEndian, uint32(end))
binary.Write(d, binary.BigEndian, d.getCode(AREA_COUNTRY, fields[2]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_PROVINCE, fields[3]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_CITY, fields[4]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_ZONE, fields[5]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_LOCATION, fields[6]))
binary.Write(d, binary.BigEndian, d.getCode(AREA_OPERATOR, fields[7]))
return d.err
}
// bodylength|body|metalength|meta
func (d *datFile) writeFile() error {
if d.err != nil {
return d.err
}
bodyLength := d.Buffer.Len()
meta, err := json.Marshal(d.geoSlice)
if err != nil {
d.err = err
return d.err
}
metaLength := len(meta)
binary.Write(d.writer, binary.BigEndian, uint32(bodyLength))
binary.Write(d.writer, binary.BigEndian, uint32(metaLength))
d.writer.Write(d.Buffer.Bytes())
d.writer.Write(meta)
fmt.Println("meta length is: ", metaLength)
fmt.Println("body length is: ", bodyLength)
return err
}
func MakeDat(infile, outfile string) error {
in, err := os.Open(infile)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(outfile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 755)
if err != nil {
return err
}
defer out.Close()
output := NewDatFile(out)
r := bufio.NewReader(in)
count := 0
for {
count++
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if len(line) != 0 {
fields := strings.Fields(line)
if len(fields) != 8 {
return errors.New("invalid input file invalid line string")
}
if err := output.writeBody(fields); err != nil {
return err
}
}
if err == io.EOF {
break
}
}
if err := output.writeFile(); err != nil {
return err
}
fmt.Println("amount ip range from ip source: ", count)
return nil
}
| LENGTH
}
return n | conditional_block |
deeqlearning_atari.py | import copy
import gym
import os
import sys
import random
import numpy as np
from matplotlib import pyplot as plt
from gym import wrappers
from datetime import datetime
import tensorflow as tf
from tensorflow.keras import Model, layers, Sequential
from tensorflow.keras.activations import softmax, tanh
tf.keras.backend.set_floatx('float64')
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
#Transfrom raw images for input into neural network
class ImageTransformer:
'''Converts the image to grayscale and crops it to IM_SIZE, IM_SIZE'''
def __init__(self, offset_height=34, offset_width=0, target_height=160, target_width=160):
self.offset_height = offset_height
self.offset_width = offset_width
self.target_height = target_height
self.target_width = target_width
def transfrom(self, state):
output = tf.image.rgb_to_grayscale(state)
output = tf.image.crop_to_bounding_box(output, self.offset_height, self.offset_width, self.target_height, self.target_width)
output = tf.image.resize(output, [IM_SIZE, IM_SIZE], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.squeeze(output)
def update_state(current_state, next_frame):
'''Takes in the current state, next frame and returns the next state'''
return np.append(current_state[:,:,1:], np.expand_dims(next_frame, 2), axis=2)
class ReplayMemory:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32):
'''
Basic Idea here is we are going to pre allocate all of the frames, we plan on storing and then we can sample states from
the individual states later on.
Args
size: (buffer_size): Integer, Number of Stored transitions
frame_height: Height of frame of an Atari Game
frame_width: Width of frame of an Atari Game
agent_history_length: Integer, Number of frames stacked together to create a state.
batch_size: Integer, Number of transactions returned in a minibatch
'''
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
#both count and current keeps track of the insertion point in replay buffer.
self.count = 0
self.current = 0
#Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
#Pre-allocate memory for the states and new states in a minibatch.
self.states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
'''
Args:
action: An integer encoded action
frame: One grayscale image of the frame
reward: reward that the agent recieved for performing the action
terminal: A bool stating whether the episode terminated
'''
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of the frame is wrong!')
self.actions[self.current] = action
self.frames[self.current] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty")
if index < self.agent_history_length - 1:
raise ValueError("Index must be minimum 3")
return self.frames[index-self.agent_history_length+1 : index+1]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current_state:
#checks for frames that is between old an new
continue
if self.terminal_flags[index - self.agent_history_length : index].any():
#checks for done flag
continue
break
self.indices[i] = index
def get_minibatch(self):
'''Returns a minibatch of self.batch_size transitions'''
if self.count < self.agent_history_length:
raise ValueError("Not enough memories to get a mini batch")
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx-1)
self.new_states[i] = self._get_state(idx)
return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], \
self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices ]
def cost(G, Y_hat, actions_taken_list, num_of_actions):
selected_action_values = tf.reduce_sum(Y_hat * tf.one_hot(actions_taken_list, num_of_actions, dtype='float64'), axis=1)
#return tf.reduce_sum(tf.math.square(G - selected_action_values))
return tf.reduce_sum(tf.compat.v1.losses.huber_loss (G, selected_action_values))
class DQN(Model):
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
super(DQN, self).__init__()
self.K = K
self.conv_seq = Sequential()
for num_output_filters, filter_size, pool_size in conv_layer_sizes:
self.conv_seq.add(layers.Conv2D(num_output_filters, filter_size, strides=pool_size, activation='relu', input_shape=(IM_SIZE, IM_SIZE, 4)))
self.flatten = layers.Flatten()
self.linear_sequence = Sequential()
for M in hidden_layer_sizes:
layer = layers.Dense(M)
self.linear_sequence.add(layer)
layer = layers.Dense(K)
self.linear_sequence.add(layer)
self.cost = cost
self.optimizer = tf.keras.optimizers.Adam(10e-4)
def call(self, X):
Z = X/255
Z = self.conv_seq(Z)
Z = self.flatten(Z)
Z = self.linear_sequence(Z)
return Z
def update(self, X, G, actions): | self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss_value
def copy_from(self, other):
weights = other.get_weights()
self.set_weights(weights)
def predict(self, X):
X = np.atleast_2d(X)
return self(X)
def sample_action(self, X, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([X])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
#sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
#calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
#update model
loss = model.update(states, targets, actions)
return loss
def play_one(env, total_t, experience_replay_buffer, model, target_model,
image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min):
'''
playing episodes
Arguments explained
total_t - total number of steps played so far
experience_replay_buffer - ReplayMemory object
'''
t0 = datetime.now()
#Reset the environment
obs = env.reset()
obs_small = image_transformer.transfrom(obs)
state = np.stack([obs_small] * 4, axis=2)
loss=None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done=False
while not done:
#check if it is time to update target network and then update it.
if (total_t % TARGET_UPDATE_PERIOD == 0 and total_t!=0) or total_t == 1:
print("copying")
target_model.copy_from(model)
print ("parameters copied to target network. total steps: ", total_t, "target update period: ", TARGET_UPDATE_PERIOD)
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
next_state = update_state(state, obs_small)
episode_reward += reward
#save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
#Train the Model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now()- t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
#last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i-99)
y[i] = float(x[start:(i+1)].sum())/(i - start + 1)
return y
def main():
#hyperparmeters and initializations
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_size = 32
num_episodes = 3500
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
epsilon_min = 0.1
epsilon = 1.0
epsilon_change = (epsilon - epsilon_min) / 500000
#Create environment
env = gym.envs.make("Breakout-v0")
#Create Models
model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
target_model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
image_transformer = ImageTransformer()
print("Populating Experience Replay Buffer")
obs = env.reset()
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if i % 5000 == 0:
print(i, "random actions taken")
if done:
obs = env.reset()
#Play a number of episodes and learn.
print("start playing")
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(env, total_t, experience_replay_buffer, model, target_model, image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i-100) : i + 1].mean()
print("Episode: ", i, "Duration: ", duration, "Num_steps: ", num_steps_in_episode, "Reward: ", episode_reward, "Training time per step: ", time_per_step, "Average_reward_last_100: ", last_100_avg, "Epsilon: ", epsilon)
print("Total duration", datetime.now() - t0)
model.save_weights('weights.h5')
#plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='org')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
if __name__ == "__main__":
main() | with tf.GradientTape() as tape:
Y_hat = self(X)
# Loss value for this minibatch
loss_value = cost(G, Y_hat, actions, self.K)
grads = tape.gradient(loss_value, self.trainable_weights) | random_line_split |
deeqlearning_atari.py | import copy
import gym
import os
import sys
import random
import numpy as np
from matplotlib import pyplot as plt
from gym import wrappers
from datetime import datetime
import tensorflow as tf
from tensorflow.keras import Model, layers, Sequential
from tensorflow.keras.activations import softmax, tanh
tf.keras.backend.set_floatx('float64')
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
#Transfrom raw images for input into neural network
class ImageTransformer:
'''Converts the image to grayscale and crops it to IM_SIZE, IM_SIZE'''
def __init__(self, offset_height=34, offset_width=0, target_height=160, target_width=160):
self.offset_height = offset_height
self.offset_width = offset_width
self.target_height = target_height
self.target_width = target_width
def transfrom(self, state):
output = tf.image.rgb_to_grayscale(state)
output = tf.image.crop_to_bounding_box(output, self.offset_height, self.offset_width, self.target_height, self.target_width)
output = tf.image.resize(output, [IM_SIZE, IM_SIZE], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.squeeze(output)
def update_state(current_state, next_frame):
'''Takes in the current state, next frame and returns the next state'''
return np.append(current_state[:,:,1:], np.expand_dims(next_frame, 2), axis=2)
class ReplayMemory:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32):
'''
Basic Idea here is we are going to pre allocate all of the frames, we plan on storing and then we can sample states from
the individual states later on.
Args
size: (buffer_size): Integer, Number of Stored transitions
frame_height: Height of frame of an Atari Game
frame_width: Width of frame of an Atari Game
agent_history_length: Integer, Number of frames stacked together to create a state.
batch_size: Integer, Number of transactions returned in a minibatch
'''
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
#both count and current keeps track of the insertion point in replay buffer.
self.count = 0
self.current = 0
#Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
#Pre-allocate memory for the states and new states in a minibatch.
self.states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
'''
Args:
action: An integer encoded action
frame: One grayscale image of the frame
reward: reward that the agent recieved for performing the action
terminal: A bool stating whether the episode terminated
'''
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of the frame is wrong!')
self.actions[self.current] = action
self.frames[self.current] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty")
if index < self.agent_history_length - 1:
raise ValueError("Index must be minimum 3")
return self.frames[index-self.agent_history_length+1 : index+1]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current_state:
#checks for frames that is between old an new
continue
if self.terminal_flags[index - self.agent_history_length : index].any():
#checks for done flag
continue
break
self.indices[i] = index
def get_minibatch(self):
'''Returns a minibatch of self.batch_size transitions'''
if self.count < self.agent_history_length:
raise ValueError("Not enough memories to get a mini batch")
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx-1)
self.new_states[i] = self._get_state(idx)
return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], \
self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices ]
def cost(G, Y_hat, actions_taken_list, num_of_actions):
selected_action_values = tf.reduce_sum(Y_hat * tf.one_hot(actions_taken_list, num_of_actions, dtype='float64'), axis=1)
#return tf.reduce_sum(tf.math.square(G - selected_action_values))
return tf.reduce_sum(tf.compat.v1.losses.huber_loss (G, selected_action_values))
class DQN(Model):
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
super(DQN, self).__init__()
self.K = K
self.conv_seq = Sequential()
for num_output_filters, filter_size, pool_size in conv_layer_sizes:
self.conv_seq.add(layers.Conv2D(num_output_filters, filter_size, strides=pool_size, activation='relu', input_shape=(IM_SIZE, IM_SIZE, 4)))
self.flatten = layers.Flatten()
self.linear_sequence = Sequential()
for M in hidden_layer_sizes:
layer = layers.Dense(M)
self.linear_sequence.add(layer)
layer = layers.Dense(K)
self.linear_sequence.add(layer)
self.cost = cost
self.optimizer = tf.keras.optimizers.Adam(10e-4)
def call(self, X):
Z = X/255
Z = self.conv_seq(Z)
Z = self.flatten(Z)
Z = self.linear_sequence(Z)
return Z
def update(self, X, G, actions):
with tf.GradientTape() as tape:
Y_hat = self(X)
# Loss value for this minibatch
loss_value = cost(G, Y_hat, actions, self.K)
grads = tape.gradient(loss_value, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss_value
def copy_from(self, other):
weights = other.get_weights()
self.set_weights(weights)
def predict(self, X):
X = np.atleast_2d(X)
return self(X)
def sample_action(self, X, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([X])[0])
def | (model, target_model, experience_replay_buffer, gamma, batch_size):
#sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
#calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
#update model
loss = model.update(states, targets, actions)
return loss
def play_one(env, total_t, experience_replay_buffer, model, target_model,
image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min):
'''
playing episodes
Arguments explained
total_t - total number of steps played so far
experience_replay_buffer - ReplayMemory object
'''
t0 = datetime.now()
#Reset the environment
obs = env.reset()
obs_small = image_transformer.transfrom(obs)
state = np.stack([obs_small] * 4, axis=2)
loss=None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done=False
while not done:
#check if it is time to update target network and then update it.
if (total_t % TARGET_UPDATE_PERIOD == 0 and total_t!=0) or total_t == 1:
print("copying")
target_model.copy_from(model)
print ("parameters copied to target network. total steps: ", total_t, "target update period: ", TARGET_UPDATE_PERIOD)
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
next_state = update_state(state, obs_small)
episode_reward += reward
#save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
#Train the Model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now()- t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
#last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i-99)
y[i] = float(x[start:(i+1)].sum())/(i - start + 1)
return y
def main():
#hyperparmeters and initializations
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_size = 32
num_episodes = 3500
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
epsilon_min = 0.1
epsilon = 1.0
epsilon_change = (epsilon - epsilon_min) / 500000
#Create environment
env = gym.envs.make("Breakout-v0")
#Create Models
model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
target_model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
image_transformer = ImageTransformer()
print("Populating Experience Replay Buffer")
obs = env.reset()
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if i % 5000 == 0:
print(i, "random actions taken")
if done:
obs = env.reset()
#Play a number of episodes and learn.
print("start playing")
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(env, total_t, experience_replay_buffer, model, target_model, image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i-100) : i + 1].mean()
print("Episode: ", i, "Duration: ", duration, "Num_steps: ", num_steps_in_episode, "Reward: ", episode_reward, "Training time per step: ", time_per_step, "Average_reward_last_100: ", last_100_avg, "Epsilon: ", epsilon)
print("Total duration", datetime.now() - t0)
model.save_weights('weights.h5')
#plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='org')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
if __name__ == "__main__":
main() | learn | identifier_name |
deeqlearning_atari.py | import copy
import gym
import os
import sys
import random
import numpy as np
from matplotlib import pyplot as plt
from gym import wrappers
from datetime import datetime
import tensorflow as tf
from tensorflow.keras import Model, layers, Sequential
from tensorflow.keras.activations import softmax, tanh
tf.keras.backend.set_floatx('float64')
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
#Transfrom raw images for input into neural network
class ImageTransformer:
'''Converts the image to grayscale and crops it to IM_SIZE, IM_SIZE'''
def __init__(self, offset_height=34, offset_width=0, target_height=160, target_width=160):
self.offset_height = offset_height
self.offset_width = offset_width
self.target_height = target_height
self.target_width = target_width
def transfrom(self, state):
output = tf.image.rgb_to_grayscale(state)
output = tf.image.crop_to_bounding_box(output, self.offset_height, self.offset_width, self.target_height, self.target_width)
output = tf.image.resize(output, [IM_SIZE, IM_SIZE], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.squeeze(output)
def update_state(current_state, next_frame):
'''Takes in the current state, next frame and returns the next state'''
return np.append(current_state[:,:,1:], np.expand_dims(next_frame, 2), axis=2)
class ReplayMemory:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32):
'''
Basic Idea here is we are going to pre allocate all of the frames, we plan on storing and then we can sample states from
the individual states later on.
Args
size: (buffer_size): Integer, Number of Stored transitions
frame_height: Height of frame of an Atari Game
frame_width: Width of frame of an Atari Game
agent_history_length: Integer, Number of frames stacked together to create a state.
batch_size: Integer, Number of transactions returned in a minibatch
'''
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
#both count and current keeps track of the insertion point in replay buffer.
self.count = 0
self.current = 0
#Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
#Pre-allocate memory for the states and new states in a minibatch.
self.states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
'''
Args:
action: An integer encoded action
frame: One grayscale image of the frame
reward: reward that the agent recieved for performing the action
terminal: A bool stating whether the episode terminated
'''
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of the frame is wrong!')
self.actions[self.current] = action
self.frames[self.current] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty")
if index < self.agent_history_length - 1:
raise ValueError("Index must be minimum 3")
return self.frames[index-self.agent_history_length+1 : index+1]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current_state:
#checks for frames that is between old an new
continue
if self.terminal_flags[index - self.agent_history_length : index].any():
#checks for done flag
continue
break
self.indices[i] = index
def get_minibatch(self):
'''Returns a minibatch of self.batch_size transitions'''
if self.count < self.agent_history_length:
raise ValueError("Not enough memories to get a mini batch")
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx-1)
self.new_states[i] = self._get_state(idx)
return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], \
self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices ]
def cost(G, Y_hat, actions_taken_list, num_of_actions):
selected_action_values = tf.reduce_sum(Y_hat * tf.one_hot(actions_taken_list, num_of_actions, dtype='float64'), axis=1)
#return tf.reduce_sum(tf.math.square(G - selected_action_values))
return tf.reduce_sum(tf.compat.v1.losses.huber_loss (G, selected_action_values))
class DQN(Model):
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
super(DQN, self).__init__()
self.K = K
self.conv_seq = Sequential()
for num_output_filters, filter_size, pool_size in conv_layer_sizes:
self.conv_seq.add(layers.Conv2D(num_output_filters, filter_size, strides=pool_size, activation='relu', input_shape=(IM_SIZE, IM_SIZE, 4)))
self.flatten = layers.Flatten()
self.linear_sequence = Sequential()
for M in hidden_layer_sizes:
layer = layers.Dense(M)
self.linear_sequence.add(layer)
layer = layers.Dense(K)
self.linear_sequence.add(layer)
self.cost = cost
self.optimizer = tf.keras.optimizers.Adam(10e-4)
def call(self, X):
Z = X/255
Z = self.conv_seq(Z)
Z = self.flatten(Z)
Z = self.linear_sequence(Z)
return Z
def update(self, X, G, actions):
with tf.GradientTape() as tape:
Y_hat = self(X)
# Loss value for this minibatch
loss_value = cost(G, Y_hat, actions, self.K)
grads = tape.gradient(loss_value, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss_value
def copy_from(self, other):
weights = other.get_weights()
self.set_weights(weights)
def predict(self, X):
|
def sample_action(self, X, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([X])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
#sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
#calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
#update model
loss = model.update(states, targets, actions)
return loss
def play_one(env, total_t, experience_replay_buffer, model, target_model,
image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min):
'''
playing episodes
Arguments explained
total_t - total number of steps played so far
experience_replay_buffer - ReplayMemory object
'''
t0 = datetime.now()
#Reset the environment
obs = env.reset()
obs_small = image_transformer.transfrom(obs)
state = np.stack([obs_small] * 4, axis=2)
loss=None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done=False
while not done:
#check if it is time to update target network and then update it.
if (total_t % TARGET_UPDATE_PERIOD == 0 and total_t!=0) or total_t == 1:
print("copying")
target_model.copy_from(model)
print ("parameters copied to target network. total steps: ", total_t, "target update period: ", TARGET_UPDATE_PERIOD)
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
next_state = update_state(state, obs_small)
episode_reward += reward
#save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
#Train the Model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now()- t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
#last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i-99)
y[i] = float(x[start:(i+1)].sum())/(i - start + 1)
return y
def main():
#hyperparmeters and initializations
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_size = 32
num_episodes = 3500
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
epsilon_min = 0.1
epsilon = 1.0
epsilon_change = (epsilon - epsilon_min) / 500000
#Create environment
env = gym.envs.make("Breakout-v0")
#Create Models
model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
target_model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
image_transformer = ImageTransformer()
print("Populating Experience Replay Buffer")
obs = env.reset()
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if i % 5000 == 0:
print(i, "random actions taken")
if done:
obs = env.reset()
#Play a number of episodes and learn.
print("start playing")
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(env, total_t, experience_replay_buffer, model, target_model, image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i-100) : i + 1].mean()
print("Episode: ", i, "Duration: ", duration, "Num_steps: ", num_steps_in_episode, "Reward: ", episode_reward, "Training time per step: ", time_per_step, "Average_reward_last_100: ", last_100_avg, "Epsilon: ", epsilon)
print("Total duration", datetime.now() - t0)
model.save_weights('weights.h5')
#plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='org')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
if __name__ == "__main__":
main() | X = np.atleast_2d(X)
return self(X) | identifier_body |
deeqlearning_atari.py | import copy
import gym
import os
import sys
import random
import numpy as np
from matplotlib import pyplot as plt
from gym import wrappers
from datetime import datetime
import tensorflow as tf
from tensorflow.keras import Model, layers, Sequential
from tensorflow.keras.activations import softmax, tanh
tf.keras.backend.set_floatx('float64')
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
#Transfrom raw images for input into neural network
class ImageTransformer:
'''Converts the image to grayscale and crops it to IM_SIZE, IM_SIZE'''
def __init__(self, offset_height=34, offset_width=0, target_height=160, target_width=160):
self.offset_height = offset_height
self.offset_width = offset_width
self.target_height = target_height
self.target_width = target_width
def transfrom(self, state):
output = tf.image.rgb_to_grayscale(state)
output = tf.image.crop_to_bounding_box(output, self.offset_height, self.offset_width, self.target_height, self.target_width)
output = tf.image.resize(output, [IM_SIZE, IM_SIZE], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.squeeze(output)
def update_state(current_state, next_frame):
'''Takes in the current state, next frame and returns the next state'''
return np.append(current_state[:,:,1:], np.expand_dims(next_frame, 2), axis=2)
class ReplayMemory:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32):
'''
Basic Idea here is we are going to pre allocate all of the frames, we plan on storing and then we can sample states from
the individual states later on.
Args
size: (buffer_size): Integer, Number of Stored transitions
frame_height: Height of frame of an Atari Game
frame_width: Width of frame of an Atari Game
agent_history_length: Integer, Number of frames stacked together to create a state.
batch_size: Integer, Number of transactions returned in a minibatch
'''
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
#both count and current keeps track of the insertion point in replay buffer.
self.count = 0
self.current = 0
#Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
#Pre-allocate memory for the states and new states in a minibatch.
self.states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length, self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
'''
Args:
action: An integer encoded action
frame: One grayscale image of the frame
reward: reward that the agent recieved for performing the action
terminal: A bool stating whether the episode terminated
'''
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of the frame is wrong!')
self.actions[self.current] = action
self.frames[self.current] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty")
if index < self.agent_history_length - 1:
raise ValueError("Index must be minimum 3")
return self.frames[index-self.agent_history_length+1 : index+1]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current_state:
#checks for frames that is between old an new
|
if self.terminal_flags[index - self.agent_history_length : index].any():
#checks for done flag
continue
break
self.indices[i] = index
def get_minibatch(self):
'''Returns a minibatch of self.batch_size transitions'''
if self.count < self.agent_history_length:
raise ValueError("Not enough memories to get a mini batch")
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx-1)
self.new_states[i] = self._get_state(idx)
return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], \
self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices ]
def cost(G, Y_hat, actions_taken_list, num_of_actions):
selected_action_values = tf.reduce_sum(Y_hat * tf.one_hot(actions_taken_list, num_of_actions, dtype='float64'), axis=1)
#return tf.reduce_sum(tf.math.square(G - selected_action_values))
return tf.reduce_sum(tf.compat.v1.losses.huber_loss (G, selected_action_values))
class DQN(Model):
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
super(DQN, self).__init__()
self.K = K
self.conv_seq = Sequential()
for num_output_filters, filter_size, pool_size in conv_layer_sizes:
self.conv_seq.add(layers.Conv2D(num_output_filters, filter_size, strides=pool_size, activation='relu', input_shape=(IM_SIZE, IM_SIZE, 4)))
self.flatten = layers.Flatten()
self.linear_sequence = Sequential()
for M in hidden_layer_sizes:
layer = layers.Dense(M)
self.linear_sequence.add(layer)
layer = layers.Dense(K)
self.linear_sequence.add(layer)
self.cost = cost
self.optimizer = tf.keras.optimizers.Adam(10e-4)
def call(self, X):
Z = X/255
Z = self.conv_seq(Z)
Z = self.flatten(Z)
Z = self.linear_sequence(Z)
return Z
def update(self, X, G, actions):
with tf.GradientTape() as tape:
Y_hat = self(X)
# Loss value for this minibatch
loss_value = cost(G, Y_hat, actions, self.K)
grads = tape.gradient(loss_value, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss_value
def copy_from(self, other):
weights = other.get_weights()
self.set_weights(weights)
def predict(self, X):
X = np.atleast_2d(X)
return self(X)
def sample_action(self, X, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([X])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
#sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
#calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
#update model
loss = model.update(states, targets, actions)
return loss
def play_one(env, total_t, experience_replay_buffer, model, target_model,
image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min):
'''
playing episodes
Arguments explained
total_t - total number of steps played so far
experience_replay_buffer - ReplayMemory object
'''
t0 = datetime.now()
#Reset the environment
obs = env.reset()
obs_small = image_transformer.transfrom(obs)
state = np.stack([obs_small] * 4, axis=2)
loss=None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done=False
while not done:
#check if it is time to update target network and then update it.
if (total_t % TARGET_UPDATE_PERIOD == 0 and total_t!=0) or total_t == 1:
print("copying")
target_model.copy_from(model)
print ("parameters copied to target network. total steps: ", total_t, "target update period: ", TARGET_UPDATE_PERIOD)
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
next_state = update_state(state, obs_small)
episode_reward += reward
#save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
#Train the Model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now()- t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
#last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i-99)
y[i] = float(x[start:(i+1)].sum())/(i - start + 1)
return y
def main():
#hyperparmeters and initializations
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_size = 32
num_episodes = 3500
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
epsilon_min = 0.1
epsilon = 1.0
epsilon_change = (epsilon - epsilon_min) / 500000
#Create environment
env = gym.envs.make("Breakout-v0")
#Create Models
model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
target_model = DQN(K, conv_layer_sizes, hidden_layer_sizes)
image_transformer = ImageTransformer()
print("Populating Experience Replay Buffer")
obs = env.reset()
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = image_transformer.transfrom(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if i % 5000 == 0:
print(i, "random actions taken")
if done:
obs = env.reset()
#Play a number of episodes and learn.
print("start playing")
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(env, total_t, experience_replay_buffer, model, target_model, image_transformer, gamma, batch_size, epsilon, epsilon_change, epsilon_min)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i-100) : i + 1].mean()
print("Episode: ", i, "Duration: ", duration, "Num_steps: ", num_steps_in_episode, "Reward: ", episode_reward, "Training time per step: ", time_per_step, "Average_reward_last_100: ", last_100_avg, "Epsilon: ", epsilon)
print("Total duration", datetime.now() - t0)
model.save_weights('weights.h5')
#plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='org')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
if __name__ == "__main__":
main() | continue | conditional_block |
seasonal_cycle_utils.py | import glob
import sys
import cdms2 as cdms
import numpy as np
import MV2 as MV
import difflib
import scipy.stats as stats
global crunchy
import socket
if socket.gethostname().find("crunchy")>=0:
crunchy = True
else:
crunchy = False
#import peakfinder as pf
import cdtime,cdutil,genutil
from eofs.cdms import Eof
from eofs.multivariate.cdms import MultivariateEof
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import CMIP5_tools as cmip5
### Set classic Netcdf (ver 3)
cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)
from scipy import signal
def FourierPlot(tas):
"""Plot the Fourier power spectrum as a function of Fourier period (1/frequency)"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag)
def annual_cycle_dominant(tas):
"""Check to see whether the annual cycle is dominant"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period
def get_dominant_cycle(tas):
"""For a 2D variable, calculate the period of the dominant Fourier mode everywhere """
nt,nlat,nlon = tas.shape
to_mask = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])
to_mask.setAxisList(tas.getAxisList()[1:])
return to_mask
def mask_cycle_subdominant(tas,period = 12):
#find the closest Fourier frequency to 1/12 months
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
i = np.argmin(closest)
cutoff = 1/freqs[i]
to_mask = get_dominant_cycle(tas)
return to_mask != cutoff
def get_cycle(tas,period=12,return_complex=False):
"""Return the Fourier magnitude and phase for a given period (default 12 months)"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
if return_complex:
return R[i],Im[i]
else:
mag = 2*np.sqrt(R**2+Im**2)
phase = np.arctan2(Im,R)
return mag[i],phase[i]
def get_tan_phase(tas,period=12):
|
def get_semiannual_cycle(tas):
"""Helper function: get the magnitude and phase for the mode with period 6 """
return get_cycle(tas,period=6)
def get_cycle_map(tas,period = 12):
ntime,nlat,nlon = tas.shape
AMP = MV.zeros((nlat,nlon))
PHI = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
mag,phase = get_cycle(tas[:,i,j],period=period)
AMP[i,j] = mag
# PHI[i,j]= phase_to_day(phase)
PHI[i,j] = phase
AMP.setAxis(0,tas.getLatitude())
AMP.setAxis(1,tas.getLongitude())
PHI.setAxis(0,tas.getLatitude())
PHI.setAxis(1,tas.getLongitude())
return AMP,PHI
def get_zonal_cycle(tas,period=12.):
if "longitude" in tas.getAxisIds():
tas = cdutil.averager(tas,axis='x')
AMP = MV.zeros(tas.shape[1])
PHI = MV.zeros(tas.shape[1])
for j in range(tas.shape[1]):
mag,phase = get_cycle(tas[:,j],period=period)
AMP[j] = mag
PHI[j] = phase
AMP.setAxis(0,tas.getLatitude())
PHI.setAxis(0,tas.getLatitude())
return AMP,PHI
def get_annual_cycle_trends(X):
"""Given a variable X, get amplitude gain and phase lag time series """
nt,nlat,nlon=X.shape
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax,X.getLatitude(),X.getLongitude()]
nyears = nt/12
yrs = X.reshape((nyears,12,nlat,nlon))
for i in [2,3]:
yrs.setAxis(i,X.getAxis(i-1))
AMP = MV.zeros((nyears,nlat,nlon))
PHI = MV.zeros((nyears,nlat,nlon))
for y in range(nyears):
amp,phi = get_cycle_map(yrs[y])
AMP[y] = amp
PHI[y] = phi
amp_solar,phase_solar = get_insolation()
AMP = AMP/amp_solar
AMP.setAxisList(axlist)
PHI.setAxisList(axlist)
PHI = correct_phase(PHI)
return AMP,PHI
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def subtract_months(P, reference):
""" Write P in terms of days relative to solar insolation phase. Return "forward" phases (leads solar insolation) and "backward" (lags solar insolation)"""
ref = phase_to_day(reference)
phase = phase_to_day(P)
if ref < phase:
backward = phase - ref #Ref lags phase
forward = -ref - (365 - phase) #Move into the next year
else:
forward = phase + 365 - ref
backward = phase - ref
return forward, backward
import sys
sys
from Helper import cdms_clone,get_plottable_time,get_orientation,get_slopes
def get_extremum(P,func = np.argmin):
return np.unravel_index(func(P),P.shape)
def merge(forward,backward):
X = np.ma.zeros(forward.shape)
mask = np.abs(forward) > np.abs(backward)
I = np.where(mask.flatten())[0]
J = np.where(~mask.flatten())[0]
Xf = X.flatten()
Xf[I] = np.array(backward).flatten()[I]
Xf[J] = np.array(forward).flatten()[J]
Xt = Xf.reshape(X.shape)
if 1:
return Xt
Diff = np.diff(Xf.reshape(X.shape),axis=0)
bad = np.where(np.abs(Diff)>365./2)
for ibad in range(len(bad[0])):
i= bad[0][::2][ibad]+1
j = bad[1][::2][ibad]
k = bad[2][::2][ibad]
if Xt[i,j,k] == forward[i,j,k]:
Xt[i,j,k] = backward[i,j,k]
else:
Xt[i,j,k] = forward[i,j,k]
return Xt
def check_for_badness(t):
Diff = np.diff(t)
if len(np.where(Diff>365/2.)[0]) >0:
return True
else:
return False
def where_bad(P):
nt,nlat,nlon = P.shape
X = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
X[i,j] = check_for_badness(P[:,i,j])
return X
def fix_bad(t,debug = False,discont=365./2.):
bad = t.copy()
too_big = []
too_small = []
smallest = 0
for i in range(len(t)):
if (bad[i] - bad[smallest]) >= discont:
if np.abs(365 - bad[i]) <= 365: #Is it an allowed value?
if np.abs( (bad[i]-365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = bad[i] - 365.
too_big += [i]
elif (bad[i] - bad[smallest]) <= -discont:
if np.abs(bad[i]+ 365) <= 365: #Is it an allowed value?
if np.abs( (bad[i]+365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = 365 + bad[i]
too_small += [i]
#Need to ensure that no points in the time series are more than discont away from each other
if debug:
return bad ,np.array(too_big),np.array(too_small)
else:
return bad
def fix_all_bad(P):
Fix = P.copy()
nt,nlat,nlon = P.shape
for j in range(nlat):
for k in range(nlon):
time_series = Fix[:,j,k]
if check_for_badness(time_series):
Fix[:,j,k] = fix_bad(time_series)
return Fix
def correct_phase(P,reference = None):
if reference is None:
amp_solar, phase_solar = get_insolation()
if phase_solar.shape != P.shape:
grid = P.getGrid()
phase_solar = phase_solar.regrid(grid,regridTool='regrid2')
reference = phase_solar
#print "got insolation"
Convert2Day = np.vectorize(subtract_months)
#print "vectorized"
forward,backward = Convert2Day(P,reference)
#print "converted to days"
Merged = merge(forward,backward)
#print "Merged"
Pnew = fix_all_bad(Merged)
#print "fixed discontinuities"
Pnew = MV.array(Pnew)
Pnew.setAxisList(P.getAxisList())
return Pnew
def phase_angle_form(obs,period=12,anom=False):
mag,phase = get_cycle(obs,period = period)
themean = np.ma.average(obs)
if anom:
themean = 0
###TEST
# mag=2*mag
return mag*np.cos(2*np.pi/period*np.arange(len(obs))+phase)+themean
def var_expl_by_annual_cycle(obs,period = 12,detrend = True):
# mag,phase = get_cycle(obs)
if detrend:
obs = signal.detrend(obs)
recon = phase_angle_form(obs,period=period,anom=True)
return np.corrcoef(recon,obs)[0,1]**2
def variance_map(X,period = 12,detrend = True):
if len(X.shape )==3:
nt,nlat,nlon = X.shape
V = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
if X.mask.shape != ():
if len(X[:,i,j]) == len(np.where(X.mask[:,i,j])[0]):
V[i,j] = 1.e20
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
elif len(X.shape)==2:
nt,nlat = X.shape
V = MV.zeros((nlat))
for i in range(nlat):
V[i]=var_expl_by_annual_cycle(X[:,i],period = period,detrend=detrend)
V = MV.masked_where(V>1.e10,V)
V.setAxisList(X.getAxisList()[1:])
return V
##### Vectorizing stuff #####
def broadcast(fvec):
def inner(vec, *args, **kwargs):
if len(vec.shape) > 1:
return np.array([inner(row, *args, **kwargs) for row in vec])
else:
return fvec(vec, *args, **kwargs)
return inner
def fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
elif len(X.shape)==2:
nt,nlat = X.shape
has_models=False
elif len(X.shape)==1:
nt, = X.shape
has_models = False
if zonal_average:
if 'lon' in X.getAxisIds():
X = cdutil.averager(X,axis='x')
nyears = nt/12
newshape = (nyears, 12) + X.shape[1:]
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
return R,P
def decade_fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False,return_Pdays=True):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
if zonal_average:
X = cdutil.averager(X,axis='x')
nyears = nt/60
newshape = (nyears, 60) + X.shape[1:]
print newshape
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::60]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
print "got new time"
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
if return_Pdays is False:
return R,P
#Pnew = P.copy()
#Pold = P.copy()
Pnew = MV.zeros(P.shape)
if has_models:
for mod_i in range(nmod):
chunk = P[:,:,:,mod_i]
Pnew[:,:,:,mod_i] = correct_phase(chunk)
else:
chunk = P[:,:,:]
Pnew = correct_phase(chunk)
Pnew = MV.array(Pnew)
Pnew.setAxisList(axlist)
return R,P,Pnew
def mask_data(data,basicmask):
if type(basicmask) != type(np.array([])):
basicmask = basicmask.asma()
dim = len(data.shape)
if dim == 2:
mask= basicmask
elif dim ==3:
nt = data.shape[0]
mask= np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)
elif dim ==4:
nmod,nt,nx,ny = data.shape
mask= np.repeat(np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)[np.newaxis],nmod,axis=0)
return MV.masked_where(mask,data)
def get_variance_maps_models(variable="pr",models=None,cmip_dir = None,period=12):
""" find latitudes in each model where the annual cycle is not dominant"""
if models is None:
f = cdms.open("/work/marvel1/SEASONAL/MMA/cmip5.ZONALMMA.historical-rcp85.rip.mo.atm.Amon.pr.ver-1.AmpPhase.nc")
phase = f("phase")
models = eval(phase.getAxis(0).models)
f.close()
if cmip_dir is None:
cmip_dir = "/work/cmip5/historical-rcp85/atm/mo/"+variable+"/"
fobs = cdms.open("/work/marvel1/SEASONAL/OBS/GPCP.precip.mon.mean.nc")
the_grid = fobs("precip").getGrid()
nlat,nlon=the_grid.shape
fobs.close()
VarianceMaps = np.zeros((len(models),nlat))+1.e20
counter=0
for model in models:
print model
try:
fname = sorted(glob.glob(cmip_dir+"*."+model+".*"))[0]
fp = cdms.open(fname)
prtest = fp(variable,time=("1979-1-1","2014-12-31")).regrid(the_grid,regridTool='regrid2')
przonal = cdutil.averager(prtest,axis='x')
dom = variance_map(przonal,period=period)
VarianceMaps[counter]=dom
fp.close()
counter+=1
except:
continue
modax = cdms.createAxis(range(len(models)))
modax.id = "model"
modax.models = str(models)
VarianceMaps = MV.array(VarianceMaps)
VarianceMaps.setAxis(0,modax)
VarianceMaps.setAxis(1,the_grid.getLatitude())
return MV.masked_where(VarianceMaps>1.e10,VarianceMaps)
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def phase_anomaly(phase,reference):
lead = phase_to_day(phase-reference)
lag = -1*phase_to_day(reference - phase)
LL = np.array([lead,lag])
i = np.argmin(np.abs(LL))
return LL[i]
def get_phase_anomalies(P,historical=True):
if historical:
reference = stats.circmean(P(time=('1996-1-1','2009-12-31')),axis=0)
else:
reference = stats.circmean(P,axis=0)
pa = np.vectorize(phase_anomaly)
PANOM = MV.zeros(P.shape)
if len(P.shape)==3:
nt,nlat,nlon = P.shape
for i in range(nlat):
for j in range(nlon):
PANOM[:,i,j] = pa(P[:,i,j],reference[i,j])
else:
nt,nlat = P.shape
for i in range(nlat):
PANOM[:,i] = pa(P[:,i],reference[i])
PANOM.setAxisList(P.getAxisList())
return MV.masked_where(np.isnan(PANOM),PANOM)
def phase_to_month(P):
## O is jan 1, 6 is july 1, 11 is dec 1
return cmip5.cdms_clone((12*(1-P/(2*np.pi))) %12,P)
def phase_climatology(P):
return cmip5.cdms_clone(stats.circmean(P,low=-np.pi,high=np.pi,axis=0),P[0])
| """Return the tangent of the phase associated with Fourier mode"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
phase = Im/R
return phase[i] | identifier_body |
seasonal_cycle_utils.py | import glob
import sys
import cdms2 as cdms
import numpy as np
import MV2 as MV
import difflib
import scipy.stats as stats
global crunchy
import socket
if socket.gethostname().find("crunchy")>=0:
crunchy = True
else:
crunchy = False
#import peakfinder as pf
import cdtime,cdutil,genutil
from eofs.cdms import Eof
from eofs.multivariate.cdms import MultivariateEof
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import CMIP5_tools as cmip5
### Set classic Netcdf (ver 3)
cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)
from scipy import signal
def FourierPlot(tas):
"""Plot the Fourier power spectrum as a function of Fourier period (1/frequency)"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag)
def annual_cycle_dominant(tas):
"""Check to see whether the annual cycle is dominant"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period
def get_dominant_cycle(tas):
"""For a 2D variable, calculate the period of the dominant Fourier mode everywhere """
nt,nlat,nlon = tas.shape
to_mask = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])
to_mask.setAxisList(tas.getAxisList()[1:])
return to_mask
def mask_cycle_subdominant(tas,period = 12):
#find the closest Fourier frequency to 1/12 months
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
i = np.argmin(closest)
cutoff = 1/freqs[i]
to_mask = get_dominant_cycle(tas)
return to_mask != cutoff
def get_cycle(tas,period=12,return_complex=False):
"""Return the Fourier magnitude and phase for a given period (default 12 months)"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
if return_complex:
return R[i],Im[i]
else:
mag = 2*np.sqrt(R**2+Im**2)
phase = np.arctan2(Im,R)
return mag[i],phase[i]
def get_tan_phase(tas,period=12):
"""Return the tangent of the phase associated with Fourier mode"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
phase = Im/R
return phase[i]
def get_semiannual_cycle(tas):
"""Helper function: get the magnitude and phase for the mode with period 6 """
return get_cycle(tas,period=6)
def get_cycle_map(tas,period = 12):
ntime,nlat,nlon = tas.shape
AMP = MV.zeros((nlat,nlon))
PHI = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
mag,phase = get_cycle(tas[:,i,j],period=period)
AMP[i,j] = mag
# PHI[i,j]= phase_to_day(phase)
PHI[i,j] = phase
AMP.setAxis(0,tas.getLatitude())
AMP.setAxis(1,tas.getLongitude())
PHI.setAxis(0,tas.getLatitude())
PHI.setAxis(1,tas.getLongitude())
return AMP,PHI
def get_zonal_cycle(tas,period=12.):
if "longitude" in tas.getAxisIds():
tas = cdutil.averager(tas,axis='x')
AMP = MV.zeros(tas.shape[1])
PHI = MV.zeros(tas.shape[1])
for j in range(tas.shape[1]):
mag,phase = get_cycle(tas[:,j],period=period)
AMP[j] = mag
PHI[j] = phase
AMP.setAxis(0,tas.getLatitude())
PHI.setAxis(0,tas.getLatitude())
return AMP,PHI
def get_annual_cycle_trends(X):
"""Given a variable X, get amplitude gain and phase lag time series """
nt,nlat,nlon=X.shape
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax,X.getLatitude(),X.getLongitude()]
nyears = nt/12
yrs = X.reshape((nyears,12,nlat,nlon))
for i in [2,3]:
yrs.setAxis(i,X.getAxis(i-1))
AMP = MV.zeros((nyears,nlat,nlon))
PHI = MV.zeros((nyears,nlat,nlon))
for y in range(nyears):
amp,phi = get_cycle_map(yrs[y])
AMP[y] = amp
PHI[y] = phi
amp_solar,phase_solar = get_insolation()
AMP = AMP/amp_solar
AMP.setAxisList(axlist)
PHI.setAxisList(axlist)
PHI = correct_phase(PHI)
return AMP,PHI
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def subtract_months(P, reference):
""" Write P in terms of days relative to solar insolation phase. Return "forward" phases (leads solar insolation) and "backward" (lags solar insolation)"""
ref = phase_to_day(reference)
phase = phase_to_day(P)
if ref < phase:
backward = phase - ref #Ref lags phase
forward = -ref - (365 - phase) #Move into the next year
else:
forward = phase + 365 - ref
backward = phase - ref
return forward, backward
import sys
sys
from Helper import cdms_clone,get_plottable_time,get_orientation,get_slopes
def get_extremum(P,func = np.argmin):
return np.unravel_index(func(P),P.shape)
def merge(forward,backward):
X = np.ma.zeros(forward.shape)
mask = np.abs(forward) > np.abs(backward)
I = np.where(mask.flatten())[0]
J = np.where(~mask.flatten())[0]
Xf = X.flatten()
Xf[I] = np.array(backward).flatten()[I]
Xf[J] = np.array(forward).flatten()[J]
Xt = Xf.reshape(X.shape)
if 1:
return Xt
Diff = np.diff(Xf.reshape(X.shape),axis=0)
bad = np.where(np.abs(Diff)>365./2)
for ibad in range(len(bad[0])):
i= bad[0][::2][ibad]+1
j = bad[1][::2][ibad]
k = bad[2][::2][ibad]
if Xt[i,j,k] == forward[i,j,k]:
Xt[i,j,k] = backward[i,j,k]
else:
Xt[i,j,k] = forward[i,j,k]
return Xt
def check_for_badness(t):
Diff = np.diff(t)
if len(np.where(Diff>365/2.)[0]) >0:
return True
else:
return False
def where_bad(P):
nt,nlat,nlon = P.shape
X = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
X[i,j] = check_for_badness(P[:,i,j])
return X
def fix_bad(t,debug = False,discont=365./2.):
bad = t.copy()
too_big = []
too_small = []
smallest = 0
for i in range(len(t)):
if (bad[i] - bad[smallest]) >= discont:
if np.abs(365 - bad[i]) <= 365: #Is it an allowed value?
if np.abs( (bad[i]-365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = bad[i] - 365.
too_big += [i]
elif (bad[i] - bad[smallest]) <= -discont:
if np.abs(bad[i]+ 365) <= 365: #Is it an allowed value?
if np.abs( (bad[i]+365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = 365 + bad[i]
too_small += [i]
#Need to ensure that no points in the time series are more than discont away from each other
if debug:
return bad ,np.array(too_big),np.array(too_small)
else:
return bad
def fix_all_bad(P):
Fix = P.copy()
nt,nlat,nlon = P.shape
for j in range(nlat):
for k in range(nlon):
time_series = Fix[:,j,k]
if check_for_badness(time_series):
Fix[:,j,k] = fix_bad(time_series)
return Fix
def correct_phase(P,reference = None):
if reference is None:
amp_solar, phase_solar = get_insolation()
if phase_solar.shape != P.shape:
grid = P.getGrid()
phase_solar = phase_solar.regrid(grid,regridTool='regrid2')
reference = phase_solar
#print "got insolation"
Convert2Day = np.vectorize(subtract_months)
#print "vectorized"
forward,backward = Convert2Day(P,reference)
#print "converted to days"
Merged = merge(forward,backward)
#print "Merged"
Pnew = fix_all_bad(Merged)
#print "fixed discontinuities"
Pnew = MV.array(Pnew)
Pnew.setAxisList(P.getAxisList())
return Pnew
def phase_angle_form(obs,period=12,anom=False):
mag,phase = get_cycle(obs,period = period)
themean = np.ma.average(obs)
if anom:
themean = 0
###TEST
# mag=2*mag
return mag*np.cos(2*np.pi/period*np.arange(len(obs))+phase)+themean
def var_expl_by_annual_cycle(obs,period = 12,detrend = True):
# mag,phase = get_cycle(obs)
if detrend:
obs = signal.detrend(obs)
recon = phase_angle_form(obs,period=period,anom=True)
return np.corrcoef(recon,obs)[0,1]**2
def variance_map(X,period = 12,detrend = True):
if len(X.shape )==3:
nt,nlat,nlon = X.shape
V = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
if X.mask.shape != ():
if len(X[:,i,j]) == len(np.where(X.mask[:,i,j])[0]):
V[i,j] = 1.e20
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
elif len(X.shape)==2:
nt,nlat = X.shape
V = MV.zeros((nlat))
for i in range(nlat):
V[i]=var_expl_by_annual_cycle(X[:,i],period = period,detrend=detrend)
V = MV.masked_where(V>1.e10,V)
V.setAxisList(X.getAxisList()[1:])
return V
##### Vectorizing stuff #####
def broadcast(fvec):
def inner(vec, *args, **kwargs):
if len(vec.shape) > 1:
return np.array([inner(row, *args, **kwargs) for row in vec])
else:
return fvec(vec, *args, **kwargs)
return inner
def fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
elif len(X.shape)==2:
nt,nlat = X.shape
has_models=False
elif len(X.shape)==1:
|
if zonal_average:
if 'lon' in X.getAxisIds():
X = cdutil.averager(X,axis='x')
nyears = nt/12
newshape = (nyears, 12) + X.shape[1:]
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
return R,P
def decade_fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False,return_Pdays=True):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
if zonal_average:
X = cdutil.averager(X,axis='x')
nyears = nt/60
newshape = (nyears, 60) + X.shape[1:]
print newshape
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::60]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
print "got new time"
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
if return_Pdays is False:
return R,P
#Pnew = P.copy()
#Pold = P.copy()
Pnew = MV.zeros(P.shape)
if has_models:
for mod_i in range(nmod):
chunk = P[:,:,:,mod_i]
Pnew[:,:,:,mod_i] = correct_phase(chunk)
else:
chunk = P[:,:,:]
Pnew = correct_phase(chunk)
Pnew = MV.array(Pnew)
Pnew.setAxisList(axlist)
return R,P,Pnew
def mask_data(data,basicmask):
if type(basicmask) != type(np.array([])):
basicmask = basicmask.asma()
dim = len(data.shape)
if dim == 2:
mask= basicmask
elif dim ==3:
nt = data.shape[0]
mask= np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)
elif dim ==4:
nmod,nt,nx,ny = data.shape
mask= np.repeat(np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)[np.newaxis],nmod,axis=0)
return MV.masked_where(mask,data)
def get_variance_maps_models(variable="pr",models=None,cmip_dir = None,period=12):
""" find latitudes in each model where the annual cycle is not dominant"""
if models is None:
f = cdms.open("/work/marvel1/SEASONAL/MMA/cmip5.ZONALMMA.historical-rcp85.rip.mo.atm.Amon.pr.ver-1.AmpPhase.nc")
phase = f("phase")
models = eval(phase.getAxis(0).models)
f.close()
if cmip_dir is None:
cmip_dir = "/work/cmip5/historical-rcp85/atm/mo/"+variable+"/"
fobs = cdms.open("/work/marvel1/SEASONAL/OBS/GPCP.precip.mon.mean.nc")
the_grid = fobs("precip").getGrid()
nlat,nlon=the_grid.shape
fobs.close()
VarianceMaps = np.zeros((len(models),nlat))+1.e20
counter=0
for model in models:
print model
try:
fname = sorted(glob.glob(cmip_dir+"*."+model+".*"))[0]
fp = cdms.open(fname)
prtest = fp(variable,time=("1979-1-1","2014-12-31")).regrid(the_grid,regridTool='regrid2')
przonal = cdutil.averager(prtest,axis='x')
dom = variance_map(przonal,period=period)
VarianceMaps[counter]=dom
fp.close()
counter+=1
except:
continue
modax = cdms.createAxis(range(len(models)))
modax.id = "model"
modax.models = str(models)
VarianceMaps = MV.array(VarianceMaps)
VarianceMaps.setAxis(0,modax)
VarianceMaps.setAxis(1,the_grid.getLatitude())
return MV.masked_where(VarianceMaps>1.e10,VarianceMaps)
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def phase_anomaly(phase,reference):
lead = phase_to_day(phase-reference)
lag = -1*phase_to_day(reference - phase)
LL = np.array([lead,lag])
i = np.argmin(np.abs(LL))
return LL[i]
def get_phase_anomalies(P,historical=True):
if historical:
reference = stats.circmean(P(time=('1996-1-1','2009-12-31')),axis=0)
else:
reference = stats.circmean(P,axis=0)
pa = np.vectorize(phase_anomaly)
PANOM = MV.zeros(P.shape)
if len(P.shape)==3:
nt,nlat,nlon = P.shape
for i in range(nlat):
for j in range(nlon):
PANOM[:,i,j] = pa(P[:,i,j],reference[i,j])
else:
nt,nlat = P.shape
for i in range(nlat):
PANOM[:,i] = pa(P[:,i],reference[i])
PANOM.setAxisList(P.getAxisList())
return MV.masked_where(np.isnan(PANOM),PANOM)
def phase_to_month(P):
## O is jan 1, 6 is july 1, 11 is dec 1
return cmip5.cdms_clone((12*(1-P/(2*np.pi))) %12,P)
def phase_climatology(P):
return cmip5.cdms_clone(stats.circmean(P,low=-np.pi,high=np.pi,axis=0),P[0])
| nt, = X.shape
has_models = False | conditional_block |
seasonal_cycle_utils.py | import glob
import sys
import cdms2 as cdms
import numpy as np
import MV2 as MV
import difflib
import scipy.stats as stats
global crunchy
import socket
if socket.gethostname().find("crunchy")>=0:
crunchy = True
else:
crunchy = False
#import peakfinder as pf
import cdtime,cdutil,genutil
from eofs.cdms import Eof
from eofs.multivariate.cdms import MultivariateEof
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import CMIP5_tools as cmip5
### Set classic Netcdf (ver 3)
cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)
from scipy import signal
def FourierPlot(tas):
"""Plot the Fourier power spectrum as a function of Fourier period (1/frequency)"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag)
def annual_cycle_dominant(tas):
"""Check to see whether the annual cycle is dominant"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period
def get_dominant_cycle(tas):
"""For a 2D variable, calculate the period of the dominant Fourier mode everywhere """
nt,nlat,nlon = tas.shape
to_mask = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])
to_mask.setAxisList(tas.getAxisList()[1:])
return to_mask
def mask_cycle_subdominant(tas,period = 12):
#find the closest Fourier frequency to 1/12 months
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
i = np.argmin(closest)
cutoff = 1/freqs[i]
to_mask = get_dominant_cycle(tas)
return to_mask != cutoff
def get_cycle(tas,period=12,return_complex=False):
"""Return the Fourier magnitude and phase for a given period (default 12 months)"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
if return_complex:
return R[i],Im[i]
else:
mag = 2*np.sqrt(R**2+Im**2)
phase = np.arctan2(Im,R)
return mag[i],phase[i]
def get_tan_phase(tas,period=12):
"""Return the tangent of the phase associated with Fourier mode"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
phase = Im/R
return phase[i]
def get_semiannual_cycle(tas):
"""Helper function: get the magnitude and phase for the mode with period 6 """
return get_cycle(tas,period=6)
def get_cycle_map(tas,period = 12):
ntime,nlat,nlon = tas.shape
AMP = MV.zeros((nlat,nlon))
PHI = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
mag,phase = get_cycle(tas[:,i,j],period=period)
AMP[i,j] = mag
# PHI[i,j]= phase_to_day(phase)
PHI[i,j] = phase
AMP.setAxis(0,tas.getLatitude())
AMP.setAxis(1,tas.getLongitude())
PHI.setAxis(0,tas.getLatitude())
PHI.setAxis(1,tas.getLongitude())
return AMP,PHI
def get_zonal_cycle(tas,period=12.):
if "longitude" in tas.getAxisIds():
tas = cdutil.averager(tas,axis='x')
AMP = MV.zeros(tas.shape[1])
PHI = MV.zeros(tas.shape[1])
for j in range(tas.shape[1]):
mag,phase = get_cycle(tas[:,j],period=period)
AMP[j] = mag
PHI[j] = phase
AMP.setAxis(0,tas.getLatitude())
PHI.setAxis(0,tas.getLatitude())
return AMP,PHI
def get_annual_cycle_trends(X):
"""Given a variable X, get amplitude gain and phase lag time series """
nt,nlat,nlon=X.shape
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax,X.getLatitude(),X.getLongitude()]
nyears = nt/12
yrs = X.reshape((nyears,12,nlat,nlon))
for i in [2,3]:
yrs.setAxis(i,X.getAxis(i-1))
AMP = MV.zeros((nyears,nlat,nlon))
PHI = MV.zeros((nyears,nlat,nlon))
for y in range(nyears):
amp,phi = get_cycle_map(yrs[y])
AMP[y] = amp
PHI[y] = phi
amp_solar,phase_solar = get_insolation()
AMP = AMP/amp_solar
AMP.setAxisList(axlist)
PHI.setAxisList(axlist)
PHI = correct_phase(PHI)
return AMP,PHI
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def subtract_months(P, reference):
""" Write P in terms of days relative to solar insolation phase. Return "forward" phases (leads solar insolation) and "backward" (lags solar insolation)"""
ref = phase_to_day(reference)
phase = phase_to_day(P)
if ref < phase:
backward = phase - ref #Ref lags phase
forward = -ref - (365 - phase) #Move into the next year
else:
forward = phase + 365 - ref
backward = phase - ref
return forward, backward
import sys
sys
from Helper import cdms_clone,get_plottable_time,get_orientation,get_slopes
def get_extremum(P,func = np.argmin):
return np.unravel_index(func(P),P.shape)
def merge(forward,backward):
X = np.ma.zeros(forward.shape)
mask = np.abs(forward) > np.abs(backward)
I = np.where(mask.flatten())[0]
J = np.where(~mask.flatten())[0]
Xf = X.flatten()
Xf[I] = np.array(backward).flatten()[I]
Xf[J] = np.array(forward).flatten()[J]
Xt = Xf.reshape(X.shape)
if 1:
return Xt
Diff = np.diff(Xf.reshape(X.shape),axis=0)
bad = np.where(np.abs(Diff)>365./2)
for ibad in range(len(bad[0])):
i= bad[0][::2][ibad]+1
j = bad[1][::2][ibad]
k = bad[2][::2][ibad]
if Xt[i,j,k] == forward[i,j,k]:
Xt[i,j,k] = backward[i,j,k]
else:
Xt[i,j,k] = forward[i,j,k]
return Xt
def check_for_badness(t):
Diff = np.diff(t)
if len(np.where(Diff>365/2.)[0]) >0:
return True
else:
return False
def where_bad(P):
nt,nlat,nlon = P.shape
X = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
X[i,j] = check_for_badness(P[:,i,j])
return X
def fix_bad(t,debug = False,discont=365./2.):
bad = t.copy()
too_big = []
too_small = []
smallest = 0
for i in range(len(t)):
if (bad[i] - bad[smallest]) >= discont:
if np.abs(365 - bad[i]) <= 365: #Is it an allowed value?
if np.abs( (bad[i]-365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = bad[i] - 365.
too_big += [i]
elif (bad[i] - bad[smallest]) <= -discont:
if np.abs(bad[i]+ 365) <= 365: #Is it an allowed value?
if np.abs( (bad[i]+365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = 365 + bad[i]
too_small += [i]
#Need to ensure that no points in the time series are more than discont away from each other
if debug:
return bad ,np.array(too_big),np.array(too_small)
else:
return bad
def fix_all_bad(P):
Fix = P.copy()
nt,nlat,nlon = P.shape
for j in range(nlat):
for k in range(nlon):
time_series = Fix[:,j,k]
if check_for_badness(time_series):
Fix[:,j,k] = fix_bad(time_series)
return Fix
def correct_phase(P,reference = None):
if reference is None:
amp_solar, phase_solar = get_insolation()
if phase_solar.shape != P.shape:
grid = P.getGrid()
phase_solar = phase_solar.regrid(grid,regridTool='regrid2')
reference = phase_solar
#print "got insolation"
Convert2Day = np.vectorize(subtract_months)
#print "vectorized"
forward,backward = Convert2Day(P,reference)
#print "converted to days"
Merged = merge(forward,backward)
#print "Merged"
Pnew = fix_all_bad(Merged)
#print "fixed discontinuities"
Pnew = MV.array(Pnew)
Pnew.setAxisList(P.getAxisList())
return Pnew
def phase_angle_form(obs,period=12,anom=False):
mag,phase = get_cycle(obs,period = period)
themean = np.ma.average(obs)
if anom:
themean = 0
###TEST
# mag=2*mag
return mag*np.cos(2*np.pi/period*np.arange(len(obs))+phase)+themean
def var_expl_by_annual_cycle(obs,period = 12,detrend = True):
# mag,phase = get_cycle(obs)
if detrend:
obs = signal.detrend(obs)
recon = phase_angle_form(obs,period=period,anom=True)
return np.corrcoef(recon,obs)[0,1]**2
def variance_map(X,period = 12,detrend = True):
if len(X.shape )==3:
nt,nlat,nlon = X.shape
V = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
if X.mask.shape != ():
if len(X[:,i,j]) == len(np.where(X.mask[:,i,j])[0]):
V[i,j] = 1.e20
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
elif len(X.shape)==2:
nt,nlat = X.shape
V = MV.zeros((nlat))
for i in range(nlat):
V[i]=var_expl_by_annual_cycle(X[:,i],period = period,detrend=detrend)
V = MV.masked_where(V>1.e10,V)
V.setAxisList(X.getAxisList()[1:])
return V
##### Vectorizing stuff #####
def broadcast(fvec):
def inner(vec, *args, **kwargs):
if len(vec.shape) > 1:
return np.array([inner(row, *args, **kwargs) for row in vec])
else:
return fvec(vec, *args, **kwargs)
return inner
def | (X,debug=False,semiann=False,zonal_average=False):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
elif len(X.shape)==2:
nt,nlat = X.shape
has_models=False
elif len(X.shape)==1:
nt, = X.shape
has_models = False
if zonal_average:
if 'lon' in X.getAxisIds():
X = cdutil.averager(X,axis='x')
nyears = nt/12
newshape = (nyears, 12) + X.shape[1:]
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
return R,P
def decade_fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False,return_Pdays=True):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
if zonal_average:
X = cdutil.averager(X,axis='x')
nyears = nt/60
newshape = (nyears, 60) + X.shape[1:]
print newshape
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::60]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
print "got new time"
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
if return_Pdays is False:
return R,P
#Pnew = P.copy()
#Pold = P.copy()
Pnew = MV.zeros(P.shape)
if has_models:
for mod_i in range(nmod):
chunk = P[:,:,:,mod_i]
Pnew[:,:,:,mod_i] = correct_phase(chunk)
else:
chunk = P[:,:,:]
Pnew = correct_phase(chunk)
Pnew = MV.array(Pnew)
Pnew.setAxisList(axlist)
return R,P,Pnew
def mask_data(data,basicmask):
if type(basicmask) != type(np.array([])):
basicmask = basicmask.asma()
dim = len(data.shape)
if dim == 2:
mask= basicmask
elif dim ==3:
nt = data.shape[0]
mask= np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)
elif dim ==4:
nmod,nt,nx,ny = data.shape
mask= np.repeat(np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)[np.newaxis],nmod,axis=0)
return MV.masked_where(mask,data)
def get_variance_maps_models(variable="pr",models=None,cmip_dir = None,period=12):
""" find latitudes in each model where the annual cycle is not dominant"""
if models is None:
f = cdms.open("/work/marvel1/SEASONAL/MMA/cmip5.ZONALMMA.historical-rcp85.rip.mo.atm.Amon.pr.ver-1.AmpPhase.nc")
phase = f("phase")
models = eval(phase.getAxis(0).models)
f.close()
if cmip_dir is None:
cmip_dir = "/work/cmip5/historical-rcp85/atm/mo/"+variable+"/"
fobs = cdms.open("/work/marvel1/SEASONAL/OBS/GPCP.precip.mon.mean.nc")
the_grid = fobs("precip").getGrid()
nlat,nlon=the_grid.shape
fobs.close()
VarianceMaps = np.zeros((len(models),nlat))+1.e20
counter=0
for model in models:
print model
try:
fname = sorted(glob.glob(cmip_dir+"*."+model+".*"))[0]
fp = cdms.open(fname)
prtest = fp(variable,time=("1979-1-1","2014-12-31")).regrid(the_grid,regridTool='regrid2')
przonal = cdutil.averager(prtest,axis='x')
dom = variance_map(przonal,period=period)
VarianceMaps[counter]=dom
fp.close()
counter+=1
except:
continue
modax = cdms.createAxis(range(len(models)))
modax.id = "model"
modax.models = str(models)
VarianceMaps = MV.array(VarianceMaps)
VarianceMaps.setAxis(0,modax)
VarianceMaps.setAxis(1,the_grid.getLatitude())
return MV.masked_where(VarianceMaps>1.e10,VarianceMaps)
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def phase_anomaly(phase,reference):
lead = phase_to_day(phase-reference)
lag = -1*phase_to_day(reference - phase)
LL = np.array([lead,lag])
i = np.argmin(np.abs(LL))
return LL[i]
def get_phase_anomalies(P,historical=True):
if historical:
reference = stats.circmean(P(time=('1996-1-1','2009-12-31')),axis=0)
else:
reference = stats.circmean(P,axis=0)
pa = np.vectorize(phase_anomaly)
PANOM = MV.zeros(P.shape)
if len(P.shape)==3:
nt,nlat,nlon = P.shape
for i in range(nlat):
for j in range(nlon):
PANOM[:,i,j] = pa(P[:,i,j],reference[i,j])
else:
nt,nlat = P.shape
for i in range(nlat):
PANOM[:,i] = pa(P[:,i],reference[i])
PANOM.setAxisList(P.getAxisList())
return MV.masked_where(np.isnan(PANOM),PANOM)
def phase_to_month(P):
## O is jan 1, 6 is july 1, 11 is dec 1
return cmip5.cdms_clone((12*(1-P/(2*np.pi))) %12,P)
def phase_climatology(P):
return cmip5.cdms_clone(stats.circmean(P,low=-np.pi,high=np.pi,axis=0),P[0])
| fast_annual_cycle | identifier_name |
seasonal_cycle_utils.py | import glob
import sys
import cdms2 as cdms
import numpy as np
import MV2 as MV
import difflib
import scipy.stats as stats
global crunchy
import socket
if socket.gethostname().find("crunchy")>=0:
crunchy = True
else:
crunchy = False
#import peakfinder as pf
import cdtime,cdutil,genutil
from eofs.cdms import Eof
from eofs.multivariate.cdms import MultivariateEof
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import CMIP5_tools as cmip5
### Set classic Netcdf (ver 3)
cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)
from scipy import signal
def FourierPlot(tas):
"""Plot the Fourier power spectrum as a function of Fourier period (1/frequency)"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag)
def annual_cycle_dominant(tas):
"""Check to see whether the annual cycle is dominant"""
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period
def get_dominant_cycle(tas):
"""For a 2D variable, calculate the period of the dominant Fourier mode everywhere """
nt,nlat,nlon = tas.shape
to_mask = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])
to_mask.setAxisList(tas.getAxisList()[1:])
return to_mask
def mask_cycle_subdominant(tas,period = 12):
#find the closest Fourier frequency to 1/12 months
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
i = np.argmin(closest)
cutoff = 1/freqs[i]
to_mask = get_dominant_cycle(tas)
return to_mask != cutoff
def get_cycle(tas,period=12,return_complex=False):
"""Return the Fourier magnitude and phase for a given period (default 12 months)"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
if return_complex:
return R[i],Im[i]
else:
mag = 2*np.sqrt(R**2+Im**2)
phase = np.arctan2(Im,R)
return mag[i],phase[i]
def get_tan_phase(tas,period=12):
"""Return the tangent of the phase associated with Fourier mode"""
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
phase = Im/R
return phase[i]
def get_semiannual_cycle(tas):
"""Helper function: get the magnitude and phase for the mode with period 6 """
return get_cycle(tas,period=6)
def get_cycle_map(tas,period = 12):
ntime,nlat,nlon = tas.shape
AMP = MV.zeros((nlat,nlon))
PHI = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
mag,phase = get_cycle(tas[:,i,j],period=period)
AMP[i,j] = mag
# PHI[i,j]= phase_to_day(phase)
PHI[i,j] = phase
AMP.setAxis(0,tas.getLatitude())
AMP.setAxis(1,tas.getLongitude())
PHI.setAxis(0,tas.getLatitude())
PHI.setAxis(1,tas.getLongitude())
return AMP,PHI
def get_zonal_cycle(tas,period=12.):
if "longitude" in tas.getAxisIds():
tas = cdutil.averager(tas,axis='x')
AMP = MV.zeros(tas.shape[1])
PHI = MV.zeros(tas.shape[1])
for j in range(tas.shape[1]):
mag,phase = get_cycle(tas[:,j],period=period)
AMP[j] = mag
PHI[j] = phase
AMP.setAxis(0,tas.getLatitude())
PHI.setAxis(0,tas.getLatitude())
return AMP,PHI
def get_annual_cycle_trends(X):
"""Given a variable X, get amplitude gain and phase lag time series """
nt,nlat,nlon=X.shape
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax,X.getLatitude(),X.getLongitude()]
nyears = nt/12
yrs = X.reshape((nyears,12,nlat,nlon))
for i in [2,3]:
yrs.setAxis(i,X.getAxis(i-1))
AMP = MV.zeros((nyears,nlat,nlon))
PHI = MV.zeros((nyears,nlat,nlon))
for y in range(nyears):
amp,phi = get_cycle_map(yrs[y])
AMP[y] = amp
PHI[y] = phi
amp_solar,phase_solar = get_insolation()
AMP = AMP/amp_solar
AMP.setAxisList(axlist)
PHI.setAxisList(axlist)
PHI = correct_phase(PHI)
return AMP,PHI
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def subtract_months(P, reference):
""" Write P in terms of days relative to solar insolation phase. Return "forward" phases (leads solar insolation) and "backward" (lags solar insolation)"""
ref = phase_to_day(reference)
phase = phase_to_day(P)
if ref < phase:
backward = phase - ref #Ref lags phase
forward = -ref - (365 - phase) #Move into the next year
else:
forward = phase + 365 - ref
backward = phase - ref
return forward, backward
import sys
sys
from Helper import cdms_clone,get_plottable_time,get_orientation,get_slopes
def get_extremum(P,func = np.argmin):
return np.unravel_index(func(P),P.shape)
def merge(forward,backward):
X = np.ma.zeros(forward.shape)
mask = np.abs(forward) > np.abs(backward)
I = np.where(mask.flatten())[0]
J = np.where(~mask.flatten())[0]
Xf = X.flatten()
Xf[I] = np.array(backward).flatten()[I]
Xf[J] = np.array(forward).flatten()[J]
Xt = Xf.reshape(X.shape)
if 1:
return Xt
Diff = np.diff(Xf.reshape(X.shape),axis=0)
bad = np.where(np.abs(Diff)>365./2)
for ibad in range(len(bad[0])):
i= bad[0][::2][ibad]+1
j = bad[1][::2][ibad]
k = bad[2][::2][ibad]
if Xt[i,j,k] == forward[i,j,k]:
Xt[i,j,k] = backward[i,j,k]
else:
Xt[i,j,k] = forward[i,j,k]
return Xt
def check_for_badness(t):
Diff = np.diff(t)
if len(np.where(Diff>365/2.)[0]) >0:
return True
else:
return False
def where_bad(P):
nt,nlat,nlon = P.shape
X = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
X[i,j] = check_for_badness(P[:,i,j])
return X
def fix_bad(t,debug = False,discont=365./2.):
bad = t.copy()
too_big = []
too_small = []
smallest = 0
for i in range(len(t)):
if (bad[i] - bad[smallest]) >= discont:
if np.abs(365 - bad[i]) <= 365: #Is it an allowed value?
if np.abs( (bad[i]-365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = bad[i] - 365.
too_big += [i]
elif (bad[i] - bad[smallest]) <= -discont:
if np.abs(bad[i]+ 365) <= 365: #Is it an allowed value?
if np.abs( (bad[i]+365.) - bad[smallest]) < np.abs(bad[i]-bad[smallest]): #Does it make things better?
bad[i] = 365 + bad[i]
too_small += [i]
#Need to ensure that no points in the time series are more than discont away from each other
if debug:
return bad ,np.array(too_big),np.array(too_small)
else:
return bad
def fix_all_bad(P):
Fix = P.copy()
nt,nlat,nlon = P.shape
for j in range(nlat):
for k in range(nlon):
time_series = Fix[:,j,k]
if check_for_badness(time_series):
Fix[:,j,k] = fix_bad(time_series)
return Fix
def correct_phase(P,reference = None):
if reference is None:
amp_solar, phase_solar = get_insolation()
if phase_solar.shape != P.shape:
grid = P.getGrid()
phase_solar = phase_solar.regrid(grid,regridTool='regrid2')
reference = phase_solar
#print "got insolation"
Convert2Day = np.vectorize(subtract_months)
#print "vectorized"
forward,backward = Convert2Day(P,reference)
#print "converted to days"
Merged = merge(forward,backward)
#print "Merged"
Pnew = fix_all_bad(Merged)
#print "fixed discontinuities"
Pnew = MV.array(Pnew)
Pnew.setAxisList(P.getAxisList())
return Pnew
def phase_angle_form(obs,period=12,anom=False):
mag,phase = get_cycle(obs,period = period)
themean = np.ma.average(obs)
if anom:
themean = 0
###TEST
# mag=2*mag
return mag*np.cos(2*np.pi/period*np.arange(len(obs))+phase)+themean
def var_expl_by_annual_cycle(obs,period = 12,detrend = True):
# mag,phase = get_cycle(obs)
if detrend:
obs = signal.detrend(obs)
recon = phase_angle_form(obs,period=period,anom=True)
return np.corrcoef(recon,obs)[0,1]**2
def variance_map(X,period = 12,detrend = True):
if len(X.shape )==3:
nt,nlat,nlon = X.shape
V = MV.zeros((nlat,nlon))
for i in range(nlat):
for j in range(nlon):
if X.mask.shape != ():
if len(X[:,i,j]) == len(np.where(X.mask[:,i,j])[0]):
V[i,j] = 1.e20
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
else:
V[i,j]=var_expl_by_annual_cycle(X[:,i,j],period = period,detrend=detrend)
elif len(X.shape)==2:
nt,nlat = X.shape
V = MV.zeros((nlat))
for i in range(nlat):
V[i]=var_expl_by_annual_cycle(X[:,i],period = period,detrend=detrend)
V = MV.masked_where(V>1.e10,V)
V.setAxisList(X.getAxisList()[1:])
return V
##### Vectorizing stuff #####
def broadcast(fvec):
def inner(vec, *args, **kwargs):
if len(vec.shape) > 1:
return np.array([inner(row, *args, **kwargs) for row in vec])
else:
return fvec(vec, *args, **kwargs)
return inner
def fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
elif len(X.shape)==2:
nt,nlat = X.shape
has_models=False
elif len(X.shape)==1:
nt, = X.shape
has_models = False
if zonal_average:
if 'lon' in X.getAxisIds():
X = cdutil.averager(X,axis='x')
nyears = nt/12
newshape = (nyears, 12) + X.shape[1:]
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::12]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
return R,P
def decade_fast_annual_cycle(X,debug=False,semiann=False,zonal_average=False,return_Pdays=True):
if len(X.shape)==4:
nt,nlat,nlon,nmod = X.shape
has_models=True
elif len(X.shape)==3:
nt,nlat,nlon = X.shape
has_models = False
if zonal_average:
X = cdutil.averager(X,axis='x')
nyears = nt/60
newshape = (nyears, 60) + X.shape[1:]
print newshape
yrs = X.reshape(newshape)
if semiann:
vec_cycle=broadcast(get_semiannual_cycle)
else:
vec_cycle = broadcast(get_cycle)
# print "vectorized"
apply_everywhere = np.apply_along_axis(vec_cycle,1,yrs)
R = MV.array(apply_everywhere[:,0])
P = MV.array(apply_everywhere[:,1])
#print "got R and P"
if debug:
return R,P
time = X.getTime()[::60]
tax = cdms.createAxis(time)
tax.designateTime()
tax.id = "time"
atts = X.getTime().attributes
for k in atts:
setattr(tax,k,atts[k])
print "got new time"
axlist = [tax]+X.getAxisList()[1:]
R.setAxisList(axlist)
P.setAxisList(axlist)
if return_Pdays is False:
return R,P
#Pnew = P.copy()
#Pold = P.copy()
Pnew = MV.zeros(P.shape)
if has_models:
for mod_i in range(nmod):
chunk = P[:,:,:,mod_i]
Pnew[:,:,:,mod_i] = correct_phase(chunk)
else:
chunk = P[:,:,:]
Pnew = correct_phase(chunk)
Pnew = MV.array(Pnew)
Pnew.setAxisList(axlist)
return R,P,Pnew
def mask_data(data,basicmask):
if type(basicmask) != type(np.array([])):
basicmask = basicmask.asma()
dim = len(data.shape)
if dim == 2:
mask= basicmask
elif dim ==3:
nt = data.shape[0]
mask= np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)
elif dim ==4:
nmod,nt,nx,ny = data.shape
mask= np.repeat(np.repeat(basicmask[np.newaxis,:,:],nt,axis=0)[np.newaxis],nmod,axis=0)
return MV.masked_where(mask,data)
def get_variance_maps_models(variable="pr",models=None,cmip_dir = None,period=12):
""" find latitudes in each model where the annual cycle is not dominant"""
if models is None:
f = cdms.open("/work/marvel1/SEASONAL/MMA/cmip5.ZONALMMA.historical-rcp85.rip.mo.atm.Amon.pr.ver-1.AmpPhase.nc")
phase = f("phase")
models = eval(phase.getAxis(0).models)
f.close()
if cmip_dir is None:
cmip_dir = "/work/cmip5/historical-rcp85/atm/mo/"+variable+"/"
fobs = cdms.open("/work/marvel1/SEASONAL/OBS/GPCP.precip.mon.mean.nc")
the_grid = fobs("precip").getGrid()
nlat,nlon=the_grid.shape
fobs.close()
VarianceMaps = np.zeros((len(models),nlat))+1.e20
counter=0
for model in models:
print model
try:
fname = sorted(glob.glob(cmip_dir+"*."+model+".*"))[0]
fp = cdms.open(fname)
prtest = fp(variable,time=("1979-1-1","2014-12-31")).regrid(the_grid,regridTool='regrid2')
przonal = cdutil.averager(prtest,axis='x')
dom = variance_map(przonal,period=period)
VarianceMaps[counter]=dom
fp.close()
counter+=1
except:
continue
modax = cdms.createAxis(range(len(models)))
modax.id = "model"
modax.models = str(models)
VarianceMaps = MV.array(VarianceMaps)
VarianceMaps.setAxis(0,modax)
VarianceMaps.setAxis(1,the_grid.getLatitude())
return MV.masked_where(VarianceMaps>1.e10,VarianceMaps)
def phase_to_day(phase):
"""Convert phase to day of the year """
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi))
def phase_anomaly(phase,reference):
lead = phase_to_day(phase-reference)
lag = -1*phase_to_day(reference - phase)
LL = np.array([lead,lag])
i = np.argmin(np.abs(LL))
return LL[i]
def get_phase_anomalies(P,historical=True):
if historical:
reference = stats.circmean(P(time=('1996-1-1','2009-12-31')),axis=0)
else:
reference = stats.circmean(P,axis=0)
pa = np.vectorize(phase_anomaly)
PANOM = MV.zeros(P.shape)
if len(P.shape)==3: | PANOM[:,i,j] = pa(P[:,i,j],reference[i,j])
else:
nt,nlat = P.shape
for i in range(nlat):
PANOM[:,i] = pa(P[:,i],reference[i])
PANOM.setAxisList(P.getAxisList())
return MV.masked_where(np.isnan(PANOM),PANOM)
def phase_to_month(P):
## O is jan 1, 6 is july 1, 11 is dec 1
return cmip5.cdms_clone((12*(1-P/(2*np.pi))) %12,P)
def phase_climatology(P):
return cmip5.cdms_clone(stats.circmean(P,low=-np.pi,high=np.pi,axis=0),P[0]) | nt,nlat,nlon = P.shape
for i in range(nlat):
for j in range(nlon): | random_line_split |
web.rs | use std::path::PathBuf;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::prelude::*;
use std::ffi::OsStr;
use std::sync::Arc;
use std::time::{Duration as SDuration, Instant};
use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2};
use crate::HeaterControlMode;
use failure::{Error, ResultExt, bail}; |
use crate::Shared;
use crate::DataLogEntry;
use crate::TSDataLogEntry;
use file_db::{create_intervall_filtermap, TimestampedMethods};
use hyper::StatusCode;
use hyper::server::{Http, NewService, Request, Response, Server, Service};
use hyper::header;
use futures::future::{FutureExt as _, TryFutureExt}; // for conversion
use futures01::future::{self, Future};
use futures01;
use futures01::Stream;
use handlebars::Handlebars;
use tokio_inotify::AsyncINotify;
use rmp_serde::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use serde_derive::{Deserialize, Serialize};
use chrono::NaiveDate;
use chrono::NaiveDateTime;
use chrono::{Duration, Timelike};
pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> {
let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER")
.context("Environment variable WEBASSETS_FOLDER must be set.")?)?;
if !assets_folder.is_dir() {
bail!(
"WEBASSETS_FOLDER not found ({})",
assets_folder.to_string_lossy()
);
}
let index_html = assets_folder.join("index.html");
if !index_html.is_file() {
bail!("Missing index.html in WEBASSETS_FOLDER.");
}
let template_registry = Rc::new(RefCell::new(Handlebars::new()));
let addr = "0.0.0.0:12345".parse().unwrap();
let server = Http::new()
.bind(
&addr,
HelloWorldSpawner {
shared: shared.clone(),
template_registry: template_registry.clone(),
assets_folder: Rc::new(assets_folder.clone()),
},
)
.unwrap();
// handlebars template
template_registry
.borrow_mut()
.register_template_file("index.html", &index_html)
.with_context(|_e| {
format!("Cannot compile {}", &index_html.to_string_lossy())
})?;
// todo find all other .html files in the folder
// React live on asset changes
let path_notify = AsyncINotify::init(&server.handle())?;
const IN_CLOSE_WRITE: u32 = 8;
path_notify
.add_watch(&assets_folder, IN_CLOSE_WRITE)
.context("Web server can not watch the webassets folder for changes.")?;
let template_registry1 = Rc::clone(&template_registry);
let webassets_updater = path_notify.for_each(move |_event| {
if _event.name.extension().unwrap_or(OsStr::new("")) == "html" {
template_registry1
.try_borrow_mut()
.map(|mut registry| {
registry
.register_template_file(
&_event.name.to_string_lossy(),
assets_folder.join(&_event.name),
)
.with_context(|_e| {
format!("Cannot compile {}", &_event.name.to_string_lossy())
})
.print_error_and_causes();
})
.print_error_and_causes();
}
future::ok(())
});
server
.handle()
.spawn(webassets_updater.map_err(|e| print_error_and_causes(e) ));
Ok(server)
}
pub struct HelloWorldSpawner {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
impl NewService for HelloWorldSpawner {
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
type Instance = HelloWorld;
fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> {
Ok(HelloWorld {
shared: async_std::sync::Arc::clone(&self.shared),
template_registry: Rc::clone(&self.template_registry),
assets_folder: Rc::clone(&self.assets_folder),
})
}
}
pub struct HelloWorld {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>;
impl Service for HelloWorld {
// boilerplate hooking up hyper's server types
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
// The future representing the eventual Response your call will
// resolve to. This can change to whatever Future you need.
type Future = HandlerResult;
fn call(&self, _req: Request) -> Self::Future {
let mut path_segments = _req.path().split("/").skip(1);
let response_body = match path_segments.next() {
Some("") | Some("index.html") => self.indexhtml(),
Some("assets") => self.serve_asset(path_segments),
Some("history") => self.serve_history(path_segments.next()),
Some("dates") => self.serve_available_dates(),
Some("current") => self.serve_current_temperatures(),
Some("set_heater_control_strategy") if _req.query().is_some() => {
self.set_heater_control_strategy(_req.query().unwrap())
}
_ => make404(),
};
response_body
}
}
impl HelloWorld {
fn indexhtml(&self) -> HandlerResult {
let template_registry = Rc::clone(&self.template_registry);
box_and_convert_error(future::lazy(move || {
let data: BTreeMap<String, String> = BTreeMap::new();
let resp = template_registry
.borrow()
.render("index.html", &data)
.map_err(|err| ::failure::Context::new(format!("{}", err)))?;
Ok(resp).map(str_to_response)
}))
}
fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult {
match path_segments.next() {
Some(filename) => {
let path = self.assets_folder.join(filename);
box_and_convert_error(future::lazy(move || {
if path.is_file() {
let mut f = File::open(path).unwrap();
let mut buffer = String::new();
f.read_to_string(&mut buffer).unwrap();
Ok(buffer).map(str_to_response)
} else {
Err(::failure::err_msg("Unknown asset"))
}
}))
}
None => make404(),
}
}
fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult {
match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") {
Ok(date) => {
let shared = self.shared.clone();
let every_3_minutes = create_intervall_filtermap(
Duration::minutes(3),
|data: &TSDataLogEntry| JsData::from(data),
0.25,
);
use file_db::Key;
struct CachedAndFilteredMarker;
impl Key for CachedAndFilteredMarker {
type Value = Vec<u8>;
}
let fut = async move {
let serialized = shared
.db
.custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>(
date.into(),
Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| {
let as_vec: Vec<_> = every_3_minutes(data);
let mut buf = Vec::with_capacity(0);
as_vec
.serialize(&mut Serializer::new(&mut buf))
.print_error_and_causes();
buf
}),
).await?;
let resp = Response::new()
.with_header(header::ContentLength(serialized.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK))
// TODO: Performance by using stream and without copy
.with_body((*serialized).clone());
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
Err(_err) => make404(),
}
}
fn serve_available_dates(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let datesvec = shared.db.get_non_empty_chunk_keys_async().await?;
let json_str = serde_json::to_string(&datesvec)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn serve_current_temperatures(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let data = DataLogEntry::new_from_current(&shared).await;
#[derive(Serialize)]
struct Current {
block : JsData,
control_strategy : String,
}
let data = Current {
block : (&data).into(), // do better
control_strategy : format!("{:?}", shared.control_strategy.load()),
};
let json_str = serde_json::to_string(&data)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn set_heater_control_strategy(&self, query: &str) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let mut action = None;
for k_v in query.split('&') {
if !k_v.contains("=") {
continue;
}
let mut k_v = k_v.split("=");
if k_v.next() == Some("action") {
action = k_v.next();
}
}
let answer = match action {
Some("on") => {
shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: on"
}
Some("off") => {
shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: off"
}
Some("auto") => {
shared.control_strategy.store(HeaterControlMode::Auto);
"set: auto"
}
_ => {
"do nothing"
}
};
box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response)))
}
}
fn str_to_response(body: String) -> Response {
Response::new()
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
fn box_and_convert_error<F>(result: F) -> HandlerResult
where
F: Future<Item = Response, Error = Error> + Sized + 'static,
{
Box::new(result.then(|result| {
let f = match result {
Ok(response) => response,
Err(err) => {
use std::fmt::Write;
let mut buf = String::with_capacity(1000);
for (i, cause) in err.iter_chain().enumerate() {
if i == 0 {
write!(buf, "<p>{}</p>", cause).unwrap();
} else {
write!(buf, "<p> > caused by: {} </p>", cause).unwrap();
}
}
write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap();
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>505 Internal Server Error</title>
</head>
<body>
<h1>505 Internal Server Error</h1>
{}
</body></html>"#,
buf
);
print_error_and_causes(err);
Response::new()
.with_status(StatusCode::InternalServerError)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
};
Ok(f)
}))
}
fn make404() -> HandlerResult {
Box::new(future::lazy(|| {
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
<h1>404 Not Found</h1>
</body></html>"#
);
Ok(
Response::new()
.with_status(StatusCode::NotFound)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body),
)
}))
}
#[derive(Serialize, Deserialize, Clone)]
pub struct JsData {
pub time: String,
pub high: f64,
pub highmid: f64,
pub mid: f64,
pub midlow: f64,
pub low: f64,
pub outside: f64,
pub heater_state: u8,
pub reference: f64,
}
impl<'a> From<&'a TSDataLogEntry> for JsData {
fn from(d: &TSDataLogEntry) -> JsData {
JsData {
time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(),
high: d.celsius[0] as f64 / 100.0,
highmid: d.celsius[1] as f64 / 100.0,
mid: d.celsius[2] as f64 / 100.0,
midlow: d.celsius[3] as f64 / 100.0,
low: d.celsius[4] as f64 / 100.0,
outside: d.celsius[5] as f64 / 100.0,
heater_state: if d.heater_state { 1 } else { 0 },
reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0,
}
}
} | random_line_split | |
web.rs |
use std::path::PathBuf;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::prelude::*;
use std::ffi::OsStr;
use std::sync::Arc;
use std::time::{Duration as SDuration, Instant};
use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2};
use crate::HeaterControlMode;
use failure::{Error, ResultExt, bail};
use crate::Shared;
use crate::DataLogEntry;
use crate::TSDataLogEntry;
use file_db::{create_intervall_filtermap, TimestampedMethods};
use hyper::StatusCode;
use hyper::server::{Http, NewService, Request, Response, Server, Service};
use hyper::header;
use futures::future::{FutureExt as _, TryFutureExt}; // for conversion
use futures01::future::{self, Future};
use futures01;
use futures01::Stream;
use handlebars::Handlebars;
use tokio_inotify::AsyncINotify;
use rmp_serde::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use serde_derive::{Deserialize, Serialize};
use chrono::NaiveDate;
use chrono::NaiveDateTime;
use chrono::{Duration, Timelike};
pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> {
let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER")
.context("Environment variable WEBASSETS_FOLDER must be set.")?)?;
if !assets_folder.is_dir() {
bail!(
"WEBASSETS_FOLDER not found ({})",
assets_folder.to_string_lossy()
);
}
let index_html = assets_folder.join("index.html");
if !index_html.is_file() {
bail!("Missing index.html in WEBASSETS_FOLDER.");
}
let template_registry = Rc::new(RefCell::new(Handlebars::new()));
let addr = "0.0.0.0:12345".parse().unwrap();
let server = Http::new()
.bind(
&addr,
HelloWorldSpawner {
shared: shared.clone(),
template_registry: template_registry.clone(),
assets_folder: Rc::new(assets_folder.clone()),
},
)
.unwrap();
// handlebars template
template_registry
.borrow_mut()
.register_template_file("index.html", &index_html)
.with_context(|_e| {
format!("Cannot compile {}", &index_html.to_string_lossy())
})?;
// todo find all other .html files in the folder
// React live on asset changes
let path_notify = AsyncINotify::init(&server.handle())?;
const IN_CLOSE_WRITE: u32 = 8;
path_notify
.add_watch(&assets_folder, IN_CLOSE_WRITE)
.context("Web server can not watch the webassets folder for changes.")?;
let template_registry1 = Rc::clone(&template_registry);
let webassets_updater = path_notify.for_each(move |_event| {
if _event.name.extension().unwrap_or(OsStr::new("")) == "html" {
template_registry1
.try_borrow_mut()
.map(|mut registry| {
registry
.register_template_file(
&_event.name.to_string_lossy(),
assets_folder.join(&_event.name),
)
.with_context(|_e| {
format!("Cannot compile {}", &_event.name.to_string_lossy())
})
.print_error_and_causes();
})
.print_error_and_causes();
}
future::ok(())
});
server
.handle()
.spawn(webassets_updater.map_err(|e| print_error_and_causes(e) ));
Ok(server)
}
pub struct HelloWorldSpawner {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
impl NewService for HelloWorldSpawner {
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
type Instance = HelloWorld;
fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> {
Ok(HelloWorld {
shared: async_std::sync::Arc::clone(&self.shared),
template_registry: Rc::clone(&self.template_registry),
assets_folder: Rc::clone(&self.assets_folder),
})
}
}
pub struct HelloWorld {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>;
impl Service for HelloWorld {
// boilerplate hooking up hyper's server types
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
// The future representing the eventual Response your call will
// resolve to. This can change to whatever Future you need.
type Future = HandlerResult;
fn call(&self, _req: Request) -> Self::Future {
let mut path_segments = _req.path().split("/").skip(1);
let response_body = match path_segments.next() {
Some("") | Some("index.html") => self.indexhtml(),
Some("assets") => self.serve_asset(path_segments),
Some("history") => self.serve_history(path_segments.next()),
Some("dates") => self.serve_available_dates(),
Some("current") => self.serve_current_temperatures(),
Some("set_heater_control_strategy") if _req.query().is_some() => {
self.set_heater_control_strategy(_req.query().unwrap())
}
_ => make404(),
};
response_body
}
}
impl HelloWorld {
fn indexhtml(&self) -> HandlerResult {
let template_registry = Rc::clone(&self.template_registry);
box_and_convert_error(future::lazy(move || {
let data: BTreeMap<String, String> = BTreeMap::new();
let resp = template_registry
.borrow()
.render("index.html", &data)
.map_err(|err| ::failure::Context::new(format!("{}", err)))?;
Ok(resp).map(str_to_response)
}))
}
fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult {
match path_segments.next() {
Some(filename) => {
let path = self.assets_folder.join(filename);
box_and_convert_error(future::lazy(move || {
if path.is_file() {
let mut f = File::open(path).unwrap();
let mut buffer = String::new();
f.read_to_string(&mut buffer).unwrap();
Ok(buffer).map(str_to_response)
} else {
Err(::failure::err_msg("Unknown asset"))
}
}))
}
None => make404(),
}
}
fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult {
match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") {
Ok(date) => {
let shared = self.shared.clone();
let every_3_minutes = create_intervall_filtermap(
Duration::minutes(3),
|data: &TSDataLogEntry| JsData::from(data),
0.25,
);
use file_db::Key;
struct CachedAndFilteredMarker;
impl Key for CachedAndFilteredMarker {
type Value = Vec<u8>;
}
let fut = async move {
let serialized = shared
.db
.custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>(
date.into(),
Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| {
let as_vec: Vec<_> = every_3_minutes(data);
let mut buf = Vec::with_capacity(0);
as_vec
.serialize(&mut Serializer::new(&mut buf))
.print_error_and_causes();
buf
}),
).await?;
let resp = Response::new()
.with_header(header::ContentLength(serialized.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK))
// TODO: Performance by using stream and without copy
.with_body((*serialized).clone());
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
Err(_err) => make404(),
}
}
fn serve_available_dates(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let datesvec = shared.db.get_non_empty_chunk_keys_async().await?;
let json_str = serde_json::to_string(&datesvec)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn serve_current_temperatures(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let data = DataLogEntry::new_from_current(&shared).await;
#[derive(Serialize)]
struct Current {
block : JsData,
control_strategy : String,
}
let data = Current {
block : (&data).into(), // do better
control_strategy : format!("{:?}", shared.control_strategy.load()),
};
let json_str = serde_json::to_string(&data)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn set_heater_control_strategy(&self, query: &str) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let mut action = None;
for k_v in query.split('&') {
if !k_v.contains("=") {
continue;
}
let mut k_v = k_v.split("=");
if k_v.next() == Some("action") {
action = k_v.next();
}
}
let answer = match action {
Some("on") => {
shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: on"
}
Some("off") => {
shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: off"
}
Some("auto") => {
shared.control_strategy.store(HeaterControlMode::Auto);
"set: auto"
}
_ => {
"do nothing"
}
};
box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response)))
}
}
fn str_to_response(body: String) -> Response {
Response::new()
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
fn | <F>(result: F) -> HandlerResult
where
F: Future<Item = Response, Error = Error> + Sized + 'static,
{
Box::new(result.then(|result| {
let f = match result {
Ok(response) => response,
Err(err) => {
use std::fmt::Write;
let mut buf = String::with_capacity(1000);
for (i, cause) in err.iter_chain().enumerate() {
if i == 0 {
write!(buf, "<p>{}</p>", cause).unwrap();
} else {
write!(buf, "<p> > caused by: {} </p>", cause).unwrap();
}
}
write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap();
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>505 Internal Server Error</title>
</head>
<body>
<h1>505 Internal Server Error</h1>
{}
</body></html>"#,
buf
);
print_error_and_causes(err);
Response::new()
.with_status(StatusCode::InternalServerError)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
};
Ok(f)
}))
}
fn make404() -> HandlerResult {
Box::new(future::lazy(|| {
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
<h1>404 Not Found</h1>
</body></html>"#
);
Ok(
Response::new()
.with_status(StatusCode::NotFound)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body),
)
}))
}
#[derive(Serialize, Deserialize, Clone)]
pub struct JsData {
pub time: String,
pub high: f64,
pub highmid: f64,
pub mid: f64,
pub midlow: f64,
pub low: f64,
pub outside: f64,
pub heater_state: u8,
pub reference: f64,
}
impl<'a> From<&'a TSDataLogEntry> for JsData {
fn from(d: &TSDataLogEntry) -> JsData {
JsData {
time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(),
high: d.celsius[0] as f64 / 100.0,
highmid: d.celsius[1] as f64 / 100.0,
mid: d.celsius[2] as f64 / 100.0,
midlow: d.celsius[3] as f64 / 100.0,
low: d.celsius[4] as f64 / 100.0,
outside: d.celsius[5] as f64 / 100.0,
heater_state: if d.heater_state { 1 } else { 0 },
reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0,
}
}
}
| box_and_convert_error | identifier_name |
web.rs |
use std::path::PathBuf;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::prelude::*;
use std::ffi::OsStr;
use std::sync::Arc;
use std::time::{Duration as SDuration, Instant};
use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2};
use crate::HeaterControlMode;
use failure::{Error, ResultExt, bail};
use crate::Shared;
use crate::DataLogEntry;
use crate::TSDataLogEntry;
use file_db::{create_intervall_filtermap, TimestampedMethods};
use hyper::StatusCode;
use hyper::server::{Http, NewService, Request, Response, Server, Service};
use hyper::header;
use futures::future::{FutureExt as _, TryFutureExt}; // for conversion
use futures01::future::{self, Future};
use futures01;
use futures01::Stream;
use handlebars::Handlebars;
use tokio_inotify::AsyncINotify;
use rmp_serde::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use serde_derive::{Deserialize, Serialize};
use chrono::NaiveDate;
use chrono::NaiveDateTime;
use chrono::{Duration, Timelike};
pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> {
let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER")
.context("Environment variable WEBASSETS_FOLDER must be set.")?)?;
if !assets_folder.is_dir() {
bail!(
"WEBASSETS_FOLDER not found ({})",
assets_folder.to_string_lossy()
);
}
let index_html = assets_folder.join("index.html");
if !index_html.is_file() {
bail!("Missing index.html in WEBASSETS_FOLDER.");
}
let template_registry = Rc::new(RefCell::new(Handlebars::new()));
let addr = "0.0.0.0:12345".parse().unwrap();
let server = Http::new()
.bind(
&addr,
HelloWorldSpawner {
shared: shared.clone(),
template_registry: template_registry.clone(),
assets_folder: Rc::new(assets_folder.clone()),
},
)
.unwrap();
// handlebars template
template_registry
.borrow_mut()
.register_template_file("index.html", &index_html)
.with_context(|_e| {
format!("Cannot compile {}", &index_html.to_string_lossy())
})?;
// todo find all other .html files in the folder
// React live on asset changes
let path_notify = AsyncINotify::init(&server.handle())?;
const IN_CLOSE_WRITE: u32 = 8;
path_notify
.add_watch(&assets_folder, IN_CLOSE_WRITE)
.context("Web server can not watch the webassets folder for changes.")?;
let template_registry1 = Rc::clone(&template_registry);
let webassets_updater = path_notify.for_each(move |_event| {
if _event.name.extension().unwrap_or(OsStr::new("")) == "html" {
template_registry1
.try_borrow_mut()
.map(|mut registry| {
registry
.register_template_file(
&_event.name.to_string_lossy(),
assets_folder.join(&_event.name),
)
.with_context(|_e| {
format!("Cannot compile {}", &_event.name.to_string_lossy())
})
.print_error_and_causes();
})
.print_error_and_causes();
}
future::ok(())
});
server
.handle()
.spawn(webassets_updater.map_err(|e| print_error_and_causes(e) ));
Ok(server)
}
pub struct HelloWorldSpawner {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
impl NewService for HelloWorldSpawner {
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
type Instance = HelloWorld;
fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> {
Ok(HelloWorld {
shared: async_std::sync::Arc::clone(&self.shared),
template_registry: Rc::clone(&self.template_registry),
assets_folder: Rc::clone(&self.assets_folder),
})
}
}
pub struct HelloWorld {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>;
impl Service for HelloWorld {
// boilerplate hooking up hyper's server types
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
// The future representing the eventual Response your call will
// resolve to. This can change to whatever Future you need.
type Future = HandlerResult;
fn call(&self, _req: Request) -> Self::Future {
let mut path_segments = _req.path().split("/").skip(1);
let response_body = match path_segments.next() {
Some("") | Some("index.html") => self.indexhtml(),
Some("assets") => self.serve_asset(path_segments),
Some("history") => self.serve_history(path_segments.next()),
Some("dates") => self.serve_available_dates(),
Some("current") => self.serve_current_temperatures(),
Some("set_heater_control_strategy") if _req.query().is_some() => {
self.set_heater_control_strategy(_req.query().unwrap())
}
_ => make404(),
};
response_body
}
}
impl HelloWorld {
fn indexhtml(&self) -> HandlerResult {
let template_registry = Rc::clone(&self.template_registry);
box_and_convert_error(future::lazy(move || {
let data: BTreeMap<String, String> = BTreeMap::new();
let resp = template_registry
.borrow()
.render("index.html", &data)
.map_err(|err| ::failure::Context::new(format!("{}", err)))?;
Ok(resp).map(str_to_response)
}))
}
fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult {
match path_segments.next() {
Some(filename) => {
let path = self.assets_folder.join(filename);
box_and_convert_error(future::lazy(move || {
if path.is_file() | else {
Err(::failure::err_msg("Unknown asset"))
}
}))
}
None => make404(),
}
}
fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult {
match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") {
Ok(date) => {
let shared = self.shared.clone();
let every_3_minutes = create_intervall_filtermap(
Duration::minutes(3),
|data: &TSDataLogEntry| JsData::from(data),
0.25,
);
use file_db::Key;
struct CachedAndFilteredMarker;
impl Key for CachedAndFilteredMarker {
type Value = Vec<u8>;
}
let fut = async move {
let serialized = shared
.db
.custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>(
date.into(),
Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| {
let as_vec: Vec<_> = every_3_minutes(data);
let mut buf = Vec::with_capacity(0);
as_vec
.serialize(&mut Serializer::new(&mut buf))
.print_error_and_causes();
buf
}),
).await?;
let resp = Response::new()
.with_header(header::ContentLength(serialized.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK))
// TODO: Performance by using stream and without copy
.with_body((*serialized).clone());
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
Err(_err) => make404(),
}
}
fn serve_available_dates(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let datesvec = shared.db.get_non_empty_chunk_keys_async().await?;
let json_str = serde_json::to_string(&datesvec)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn serve_current_temperatures(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let data = DataLogEntry::new_from_current(&shared).await;
#[derive(Serialize)]
struct Current {
block : JsData,
control_strategy : String,
}
let data = Current {
block : (&data).into(), // do better
control_strategy : format!("{:?}", shared.control_strategy.load()),
};
let json_str = serde_json::to_string(&data)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn set_heater_control_strategy(&self, query: &str) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let mut action = None;
for k_v in query.split('&') {
if !k_v.contains("=") {
continue;
}
let mut k_v = k_v.split("=");
if k_v.next() == Some("action") {
action = k_v.next();
}
}
let answer = match action {
Some("on") => {
shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: on"
}
Some("off") => {
shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: off"
}
Some("auto") => {
shared.control_strategy.store(HeaterControlMode::Auto);
"set: auto"
}
_ => {
"do nothing"
}
};
box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response)))
}
}
fn str_to_response(body: String) -> Response {
Response::new()
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
fn box_and_convert_error<F>(result: F) -> HandlerResult
where
F: Future<Item = Response, Error = Error> + Sized + 'static,
{
Box::new(result.then(|result| {
let f = match result {
Ok(response) => response,
Err(err) => {
use std::fmt::Write;
let mut buf = String::with_capacity(1000);
for (i, cause) in err.iter_chain().enumerate() {
if i == 0 {
write!(buf, "<p>{}</p>", cause).unwrap();
} else {
write!(buf, "<p> > caused by: {} </p>", cause).unwrap();
}
}
write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap();
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>505 Internal Server Error</title>
</head>
<body>
<h1>505 Internal Server Error</h1>
{}
</body></html>"#,
buf
);
print_error_and_causes(err);
Response::new()
.with_status(StatusCode::InternalServerError)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
};
Ok(f)
}))
}
fn make404() -> HandlerResult {
Box::new(future::lazy(|| {
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
<h1>404 Not Found</h1>
</body></html>"#
);
Ok(
Response::new()
.with_status(StatusCode::NotFound)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body),
)
}))
}
#[derive(Serialize, Deserialize, Clone)]
pub struct JsData {
pub time: String,
pub high: f64,
pub highmid: f64,
pub mid: f64,
pub midlow: f64,
pub low: f64,
pub outside: f64,
pub heater_state: u8,
pub reference: f64,
}
impl<'a> From<&'a TSDataLogEntry> for JsData {
fn from(d: &TSDataLogEntry) -> JsData {
JsData {
time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(),
high: d.celsius[0] as f64 / 100.0,
highmid: d.celsius[1] as f64 / 100.0,
mid: d.celsius[2] as f64 / 100.0,
midlow: d.celsius[3] as f64 / 100.0,
low: d.celsius[4] as f64 / 100.0,
outside: d.celsius[5] as f64 / 100.0,
heater_state: if d.heater_state { 1 } else { 0 },
reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0,
}
}
}
| {
let mut f = File::open(path).unwrap();
let mut buffer = String::new();
f.read_to_string(&mut buffer).unwrap();
Ok(buffer).map(str_to_response)
} | conditional_block |
web.rs |
use std::path::PathBuf;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::prelude::*;
use std::ffi::OsStr;
use std::sync::Arc;
use std::time::{Duration as SDuration, Instant};
use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2};
use crate::HeaterControlMode;
use failure::{Error, ResultExt, bail};
use crate::Shared;
use crate::DataLogEntry;
use crate::TSDataLogEntry;
use file_db::{create_intervall_filtermap, TimestampedMethods};
use hyper::StatusCode;
use hyper::server::{Http, NewService, Request, Response, Server, Service};
use hyper::header;
use futures::future::{FutureExt as _, TryFutureExt}; // for conversion
use futures01::future::{self, Future};
use futures01;
use futures01::Stream;
use handlebars::Handlebars;
use tokio_inotify::AsyncINotify;
use rmp_serde::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use serde_derive::{Deserialize, Serialize};
use chrono::NaiveDate;
use chrono::NaiveDateTime;
use chrono::{Duration, Timelike};
pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> {
let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER")
.context("Environment variable WEBASSETS_FOLDER must be set.")?)?;
if !assets_folder.is_dir() {
bail!(
"WEBASSETS_FOLDER not found ({})",
assets_folder.to_string_lossy()
);
}
let index_html = assets_folder.join("index.html");
if !index_html.is_file() {
bail!("Missing index.html in WEBASSETS_FOLDER.");
}
let template_registry = Rc::new(RefCell::new(Handlebars::new()));
let addr = "0.0.0.0:12345".parse().unwrap();
let server = Http::new()
.bind(
&addr,
HelloWorldSpawner {
shared: shared.clone(),
template_registry: template_registry.clone(),
assets_folder: Rc::new(assets_folder.clone()),
},
)
.unwrap();
// handlebars template
template_registry
.borrow_mut()
.register_template_file("index.html", &index_html)
.with_context(|_e| {
format!("Cannot compile {}", &index_html.to_string_lossy())
})?;
// todo find all other .html files in the folder
// React live on asset changes
let path_notify = AsyncINotify::init(&server.handle())?;
const IN_CLOSE_WRITE: u32 = 8;
path_notify
.add_watch(&assets_folder, IN_CLOSE_WRITE)
.context("Web server can not watch the webassets folder for changes.")?;
let template_registry1 = Rc::clone(&template_registry);
let webassets_updater = path_notify.for_each(move |_event| {
if _event.name.extension().unwrap_or(OsStr::new("")) == "html" {
template_registry1
.try_borrow_mut()
.map(|mut registry| {
registry
.register_template_file(
&_event.name.to_string_lossy(),
assets_folder.join(&_event.name),
)
.with_context(|_e| {
format!("Cannot compile {}", &_event.name.to_string_lossy())
})
.print_error_and_causes();
})
.print_error_and_causes();
}
future::ok(())
});
server
.handle()
.spawn(webassets_updater.map_err(|e| print_error_and_causes(e) ));
Ok(server)
}
pub struct HelloWorldSpawner {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
impl NewService for HelloWorldSpawner {
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
type Instance = HelloWorld;
fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> {
Ok(HelloWorld {
shared: async_std::sync::Arc::clone(&self.shared),
template_registry: Rc::clone(&self.template_registry),
assets_folder: Rc::clone(&self.assets_folder),
})
}
}
pub struct HelloWorld {
shared: Shared,
template_registry: Rc<RefCell<Handlebars>>,
assets_folder: Rc<PathBuf>,
}
type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>;
impl Service for HelloWorld {
// boilerplate hooking up hyper's server types
type Request = Request;
type Response = Response;
type Error = ::hyper::Error;
// The future representing the eventual Response your call will
// resolve to. This can change to whatever Future you need.
type Future = HandlerResult;
fn call(&self, _req: Request) -> Self::Future {
let mut path_segments = _req.path().split("/").skip(1);
let response_body = match path_segments.next() {
Some("") | Some("index.html") => self.indexhtml(),
Some("assets") => self.serve_asset(path_segments),
Some("history") => self.serve_history(path_segments.next()),
Some("dates") => self.serve_available_dates(),
Some("current") => self.serve_current_temperatures(),
Some("set_heater_control_strategy") if _req.query().is_some() => {
self.set_heater_control_strategy(_req.query().unwrap())
}
_ => make404(),
};
response_body
}
}
impl HelloWorld {
fn indexhtml(&self) -> HandlerResult {
let template_registry = Rc::clone(&self.template_registry);
box_and_convert_error(future::lazy(move || {
let data: BTreeMap<String, String> = BTreeMap::new();
let resp = template_registry
.borrow()
.render("index.html", &data)
.map_err(|err| ::failure::Context::new(format!("{}", err)))?;
Ok(resp).map(str_to_response)
}))
}
fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult {
match path_segments.next() {
Some(filename) => {
let path = self.assets_folder.join(filename);
box_and_convert_error(future::lazy(move || {
if path.is_file() {
let mut f = File::open(path).unwrap();
let mut buffer = String::new();
f.read_to_string(&mut buffer).unwrap();
Ok(buffer).map(str_to_response)
} else {
Err(::failure::err_msg("Unknown asset"))
}
}))
}
None => make404(),
}
}
fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult {
match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") {
Ok(date) => {
let shared = self.shared.clone();
let every_3_minutes = create_intervall_filtermap(
Duration::minutes(3),
|data: &TSDataLogEntry| JsData::from(data),
0.25,
);
use file_db::Key;
struct CachedAndFilteredMarker;
impl Key for CachedAndFilteredMarker {
type Value = Vec<u8>;
}
let fut = async move {
let serialized = shared
.db
.custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>(
date.into(),
Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| {
let as_vec: Vec<_> = every_3_minutes(data);
let mut buf = Vec::with_capacity(0);
as_vec
.serialize(&mut Serializer::new(&mut buf))
.print_error_and_causes();
buf
}),
).await?;
let resp = Response::new()
.with_header(header::ContentLength(serialized.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK))
// TODO: Performance by using stream and without copy
.with_body((*serialized).clone());
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
Err(_err) => make404(),
}
}
fn serve_available_dates(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let datesvec = shared.db.get_non_empty_chunk_keys_async().await?;
let json_str = serde_json::to_string(&datesvec)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn serve_current_temperatures(&self) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let fut = async move {
let data = DataLogEntry::new_from_current(&shared).await;
#[derive(Serialize)]
struct Current {
block : JsData,
control_strategy : String,
}
let data = Current {
block : (&data).into(), // do better
control_strategy : format!("{:?}", shared.control_strategy.load()),
};
let json_str = serde_json::to_string(&data)?;
let resp = Response::new()
.with_header(header::ContentLength(json_str.len() as u64))
.with_header(header::ContentType(::hyper::mime::APPLICATION_JSON))
.with_body(json_str);
Ok(resp)
};
box_and_convert_error(fut.boxed().compat())
}
fn set_heater_control_strategy(&self, query: &str) -> HandlerResult {
let shared = async_std::sync::Arc::clone(&self.shared);
let mut action = None;
for k_v in query.split('&') {
if !k_v.contains("=") {
continue;
}
let mut k_v = k_v.split("=");
if k_v.next() == Some("action") {
action = k_v.next();
}
}
let answer = match action {
Some("on") => {
shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: on"
}
Some("off") => {
shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) });
"set: off"
}
Some("auto") => {
shared.control_strategy.store(HeaterControlMode::Auto);
"set: auto"
}
_ => {
"do nothing"
}
};
box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response)))
}
}
fn str_to_response(body: String) -> Response |
fn box_and_convert_error<F>(result: F) -> HandlerResult
where
F: Future<Item = Response, Error = Error> + Sized + 'static,
{
Box::new(result.then(|result| {
let f = match result {
Ok(response) => response,
Err(err) => {
use std::fmt::Write;
let mut buf = String::with_capacity(1000);
for (i, cause) in err.iter_chain().enumerate() {
if i == 0 {
write!(buf, "<p>{}</p>", cause).unwrap();
} else {
write!(buf, "<p> > caused by: {} </p>", cause).unwrap();
}
}
write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap();
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>505 Internal Server Error</title>
</head>
<body>
<h1>505 Internal Server Error</h1>
{}
</body></html>"#,
buf
);
print_error_and_causes(err);
Response::new()
.with_status(StatusCode::InternalServerError)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
}
};
Ok(f)
}))
}
fn make404() -> HandlerResult {
Box::new(future::lazy(|| {
let body = format!(
r#"<!doctype html>
<html lang="en"><head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
<h1>404 Not Found</h1>
</body></html>"#
);
Ok(
Response::new()
.with_status(StatusCode::NotFound)
.with_header(header::ContentLength(body.len() as u64))
.with_body(body),
)
}))
}
#[derive(Serialize, Deserialize, Clone)]
pub struct JsData {
pub time: String,
pub high: f64,
pub highmid: f64,
pub mid: f64,
pub midlow: f64,
pub low: f64,
pub outside: f64,
pub heater_state: u8,
pub reference: f64,
}
impl<'a> From<&'a TSDataLogEntry> for JsData {
fn from(d: &TSDataLogEntry) -> JsData {
JsData {
time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(),
high: d.celsius[0] as f64 / 100.0,
highmid: d.celsius[1] as f64 / 100.0,
mid: d.celsius[2] as f64 / 100.0,
midlow: d.celsius[3] as f64 / 100.0,
low: d.celsius[4] as f64 / 100.0,
outside: d.celsius[5] as f64 / 100.0,
heater_state: if d.heater_state { 1 } else { 0 },
reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0,
}
}
}
| {
Response::new()
.with_header(header::ContentLength(body.len() as u64))
.with_body(body)
} | identifier_body |
cmds.py | import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers | winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString | # get index, get win count, increment and update
winnerIndex = getIndex(winner, rows) | random_line_split |
cmds.py | import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def | (msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
| dumpStats | identifier_name |
cmds.py | import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
|
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
| data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR | identifier_body |
cmds.py | import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
|
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
| print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player) | conditional_block |
jherax.js | //******************************
// Utils for validations
// Author: David Rivera
// Created: 26/06/2013
//******************************
// jherax.github.io
// github.com/jherax/js-utils
//******************************
;
// Essential JavaScript Namespacing Patterns
// http://addyosmani.com/blog/essential-js-namespacing/
// Create a custom exception notifier
var CustomException = function(message) {
this.name = "js-utils exception";
this.message = message || "An error has occurred";
this.toString = function() {
return this.name + ": " + this.message;
};
};
// We need to do a check before we create the namespace
var js = window.js || { author: 'jherax' };
if (js.author != 'jherax') {
throw new CustomException("A variable with namespace [js] is already in use");
}
// Create a general purpose namespace method
js.createNS = js.createNS || function (namespace) {
var nsparts = namespace.toString().split(".");
var parent = js;
// we want to be able to include or exclude the root namespace so we strip it if it's in the namespace
if (nsparts[0] === "js") nsparts = nsparts.slice(1);
// loop through the parts and create a nested namespace if necessary
for (var i = 0; i < nsparts.length; i++) {
var subns = nsparts[i];
// check if the namespace is a valid variable name
if (!(/\w+/).test(subns)) throw new CustomException("Invalid namespace");
// check if the current parent already has the namespace declared
// if it isn't, then create it
if (typeof parent[subns] === "undefined") {
parent[subns] = {};
}
parent = parent[subns];
}
// the parent is now constructed with empty namespaces and can be used.
// we return the outermost namespace
return parent;
};
// We expose a property to specify where the tooltip element will be appended
js.wrapper = "body"; //#main-section
//-----------------------------------
// Immediately-invoked Function Expressions (IIFE)
// We pass the namespace as an argument to a self-invoking function.
// jherax is the namespace context, and $ is the jQuery Object
(function (jherax, $) {
//-----------------------------------
/* PRIVATE MEMBERS */
//-----------------------------------
// Adds support for browser detect.
// jquery 1.9+ deprecates $.browser
var getBrowser = (function() {
var ua = navigator.userAgent.toLowerCase();
var match =
/(msie) ([\w.]+)/.exec(ua) ||
/(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
var b = {}, o = {
browser: match[1] || "unknown",
version: match[2] || "0"
};
b[o.browser] = true;
b.version = o.version;
return b;
})();
//-----------------------------------
// Determines if a object is DOM element
var isDOM = function(obj) {
return (!!obj && typeof obj === "object" && !!obj.nodeType);
};
//-----------------------------------
// Determines if the entry parameter is a normalized Event Object
var isEvent = function(obj) {
return (!!obj && typeof obj === "object" && obj.which !== undefined && !!obj.target);
};
//-----------------------------------
// Determines if the entry parameter is a function
var isFunction = function(obj) {
return (!!obj && Object.prototype.toString.call(obj) == '[object Function]');
};
//-----------------------------------
// Determines whether the entry parameter is a text input or checkable input
// www.quackit.com/html_5/tags/html_input_tag.cfm
var input = {
isText: function(_dom) {
if(!isDOM(_dom)) return false;
if ((/textarea/i).test(_dom.nodeName)) return true;
var regx = /text|password|file|number|search|tel|url|email|datetime|datetime-local|date|time|month|week/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
},
isCheck: function(_dom) {
if(!isDOM(_dom)) return false;
var regx = /checkbox|radio/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
}
};
//-----------------------------------
// This is a facade of JSON.stringify and provides support in old browsers
var fnStringify = typeof JSON != "undefined" ? JSON.stringify : function (json) {
var arr = [];
$.each(json, function (key, val) {
var prop = "\"" + key + "\":";
prop += ($.isPlainObject(val) ? fnStringify(val) :
(typeof val === "string" ? "\"" + val + "\"" : val));
arr.push(prop);
});
return "{" + arr.join(",") + "}";
};
//-----------------------------------
// Escaping user input to be treated as a literal string within a regular expression
function fnEscapeRegExp(txt){
if (typeof txt !== "string") return null;
return txt.replace(/([.*+?=!:${}()|\^\[\]\/\\])/g, "\\$1");
}
//-----------------------------------
// Gets the text of current date in es-CO culture. dd/MM/yyyy HH:mm:ss
function fnGetDate() {
var f = new Date();
var fillZero = function(n) { return ("0" + n.toString()).slice(-2); };
var fnDate = function() { return (fillZero(f.getDate()) +"/"+ fillZero(f.getMonth() + 1) +"/"+ f.getFullYear()); };
var fnTime = function() { return (fillZero(f.getHours()) +":"+ fillZero(f.getMinutes()) +":"+ fillZero(f.getSeconds())); };
var fnDateTime = function() { return fnDate() + " " + fnTime(); };
return {
date: fnDate(),
time: fnTime(),
dateTime: fnDateTime()
};
}
//-----------------------------------
// Gets the text as html encoded
// This is a delegate for $.val()
function fnGetHtmlText(i, value) {
var html = $("<div/>").text(value).html();
return $.trim(html);
}
//-----------------------------------
// Gets selected text in the document
function fnGetSelectedText() {
var _dom = document.activeElement;
var _sel = { text: "", slice: "", start: -1, end: -1 };
if (window.getSelection) {
// Get selected text from input fields
if (input.isText(_dom)) {
_sel.start = _dom.selectionStart;
_sel.end = _dom.selectionEnd;
if (_sel.end > _sel.start) {
_sel.text = _dom.value.substring(_sel.start, _sel.end);
_sel.slice = _dom.value.slice(0, _sel.start) + _dom.value.slice(_sel.end);
}
}
// Get selected text from document
else _sel.text = window.getSelection().toString();
} else if (document.selection.createRange)
_sel.text = document.selection.createRange().text;
if (_sel.text !== "") _sel.text = $.trim(_sel.text);
return _sel;
}
//-----------------------------------
// Gets the cursor position in the text
function fnGetCaretPosition(_dom) {
if ('selectionStart' in _dom) {
return (_dom.selectionStart);
} else { // IE below version 9
var _sel = document.selection.createRange();
_sel.moveStart('character', -_dom.value.length);
return (_sel.text.length);
}
}
//-----------------------------------
// Sets the cursor position in the text
function fnSetCaretPosition(_dom, pos) {
if ('selectionStart' in _dom) {
_dom.setSelectionRange(pos, pos);
} else { // IE below version 9
var range = _dom.createTextRange();
range.collapse(true);
range.moveEnd('character', pos);
range.moveStart('character', pos);
range.select();
}
}
//-----------------------------------
// Transforms the text to capital letter.
// Also removes all consecutive spaces
function fnCapitalize(obj, _type) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
if (!_text || _text.length === 0) return "";
if ((/textarea/i).test(obj.nodeName)) {
_text = _text.replace(/\r|\n/g, "¶").replace(/\s{2,}/g, " ");
while ((/^[¶\s]|[¶\s]$/g).test(_text))
_text = _text.replace(/^[¶\s]+|[¶\s]+$/g, "");
_text = _text.replace(/\s*¶+\s*/g, "\n");
}
else _text = $.trim(_text.replace(/\s{2,}/g, " "));
if (parseFloat(_text) === 0) _text = "0";
if (_type) {
if (_type == "upper") _text = _text.toUpperCase();
if (_type == "lower" || _type == "word") _text = _text.toLowerCase();
if (_type == "title" || _type == "word") {
_text = _text.replace(/(?:^|-|:|;|\s|\.|\(|\/)[a-záéíóúüñ]/g, function (m) { return m.toUpperCase(); });
_text = _text.replace(/\s(?:Y|O|De[l]?|Por|A[l]?|L[ao]s?|[SC]on|En|Se|Que|Un[a]?)\b/g, function (m) { return m.toLowerCase(); });
}
}
else _text = _text.replace(/^\w/, _text.charAt(0).toUpperCase());
if (_isDOM) obj.value = _text;
return _text;
}
//-----------------------------------
// Sets the numeric format in es-CO culture.
// Places decimal "." and thousand "," separator
function fnNumericFormat(obj) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
var x = _text.replace(/\./g, "").split(",") || [""];
var num = x[0].replace(/\B(?=(\d{3})+(?!\d))/g, ".");
var dec = x.length > 1 ? "," + x[1] : "";
if (_isDOM) obj.value = num + dec;
return (num + dec);
}
//-----------------------------------
// Validates the format of text,
// depending on the type supplied.
// Date validations are performed according to es-CO culture
function fnIsValidFormat(obj, _type) {
var _pattern = null,
_text = input.isText(obj) ? obj.value : obj.toString();
switch (_type) {
case "d": //Validates Date format: dd/MM/yyyy
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])$/;
break;
case "t": //Validates Time format: HH:mm:ss
_pattern = /^([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "dt": //Validates DateTime format: dd/MM/yyyy HH:mm:ss
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])\s([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "email": //Validates an email address
_pattern = /^([0-9a-zA-Zñ](?:[\-.\w]*[0-9a-zA-Zñ])*@(?:[0-9a-zA-Zñ][\-\wñ]*[0-9a-zA-Zñ]\.)+[a-zA-Z]{2,9})$/i;
break;
case "pass": //Validates the password strength (must have 8-20 characters, at least one number, at least one uppercase)
_pattern = /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,20}$/;
break;
case "lat": //Validates the latitude
_pattern = /^-?([1-8]?[1-9]|[1-9]0|0)\,{1}\d{1,6}$/;
break;
case "lon": //Validates the longitude
_pattern = /^-?([1]?[1-7][1-9]|[1]?[1-8][0]|[1-9]?[0-9])\,{1}\d{1,6}$/;
break;
}
return !!_pattern && _pattern.test(_text);
}
//-----------------------------------
// Evaluates whether the value of text is a date or not.
// The validation outcome will be shown in a tooltip
var fnIsValidDate = function(_dom, o) {
if (!input.isText(_dom)) return false;
var error = false;
o = $.extend({
isFuture: false,
compareTo: new Date(),
warning: 'La fecha no puede ser {0} a hoy'}, o);
var _type = _dom.value.length > 10 ? "dt" : "d";
var _date = _dom.value.substr(0, 10);
var parser = function (date) {
if (date instanceof Date) return date;
if (typeof date !== "string") return new Date();
if (!fnIsValidFormat(date, _type)) { error = true; return new Date(); }
return new Date(date.replace(/^(\d{2})\/(\d{2})\/(\d{4})$/, '$3/$2/$1'));
};
var dif = (parser(_date) - parser(o.compareTo)) / 1000 / 3600 / 24;
if (error) return fnShowTooltip(_dom, fnIsValidDate.formatError);
if ( o.isFuture && dif < 0) return fnShowTooltip(_dom, o.warning.replace("{0}","menor"));
if (!o.isFuture && dif > 0) return fnShowTooltip(_dom, o.warning.replace("{0}","mayor"));
return true;
};
// We expose a property to set default message for the format error
fnIsValidDate.formatError = 'El formato de fecha es incorrecto';
//-----------------------------------
// Shows a custom warning message
function fnShowTooltip(_dom, _msg) {
if (isDO | ------------------------
// Shows the loading overlay screen
function fnLoading(o) {
var d = $.extend({
show: true,
hide: false,
delay: 2600
}, o);
$("#loadingWrapper").remove();
if (d.hide) return true;
var blockG = [];
for (var i = 1; i < 9; i++) blockG.push('<div class="blockG"></div>');
var loading = $('<div id="floatingBarsG" />').append(blockG.join(""));
var overlay = $('<div class="bg-fixed bg-opacity" />');
$('<div id="loadingWrapper" />').append(overlay, loading).appendTo(js.wrapper).hide().fadeIn(d.delay);
loading.fnCenter();
return true;
}
//-----------------------------------
// Sets the focus on input elements
function fnSetFocus() {
$($('input[type="text"], textarea').filter(':not(input:disabled)').get().reverse()).each(function () {
if (!$(this).hasClass("no-auto-focus")) $(this).focus();
});
}
//-----------------------------------
/* jQUERY EXTENSIONS */
//-----------------------------------
// Sets the jquery objects in the center of screen
$.fn.fnCenter = function() {
this.css({
'position': 'fixed',
'left': '50%',
'top': '50%'
});
this.css({
'margin-left': -this.outerWidth() / 2 + 'px',
'margin-top': -this.outerHeight() / 2 + 'px'
});
return this;
};
//-----------------------------------
// Limits the max length in the input:text
$.fn.fnMaxLength = function(length) {
return this.each(function (i, dom) {
var count = "Max: " + length;
var vld = '#max' + dom.id;
if (!input.isText(dom)) return true; //continue
$(dom).on("blur", function() { $(vld).remove(); });
$(dom).on("keypress input paste", function (e) {
var len = dom.value.length;
var max = len >= length ? 1 : 0;
if (getBrowser.mozilla) max = !e.keyCode && max;
if (max) {
len = length;
dom.value = dom.value.substr(0, len);
e.preventDefault();
}
count = "Max: " + len + "/" + length;
if(!$(vld).text(count).length) {
$('<span class="vld-tooltip" id="max' + dom.id + '" />')
.text(count).appendTo(js.wrapper).position({
of: dom,
at: "right top",
my: "left+6 top",
collision: "flipfit"
}).hide().fadeIn(400);
}
});
});
};
//-----------------------------------
// Apply the capitalized format to text when blur event occurs
$.fn.fnCapitalize = function(type) {
return this.each(function (i, dom) {
$(dom).on("blur", function() {
fnCapitalize(this, type);
});
});
};
//-----------------------------------
// Sets numeric format with decimal/thousand separators
$.fn.fnNumericFormat = function() {
return this.each(function (i, dom) {
$(dom).on("keyup blur", function() {
fnNumericFormat(this);
});
});
};
//-----------------------------------
// Allows only numeric characters
$.fn.fnNumericInput = function () {
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _digits = _value.match(/\d/g);
_value = !_digits ? "" : _digits.join("").substr(0, e.data.max);
if (e.type == "blur" && parseFloat(_value) === 0) _value = "0";
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keydown", function (e) {
var _key = e.which || e.keyCode;
var _ctrl = !!(e.ctrlKey || e.metaKey);
// Allow: (numbers), (keypad numbers),
// Allow: (backspace, tab, delete), (home, end, arrows)
// Allow: (Ctrl+A), (Ctrl+C)
// Allow: (Ctrl+V), (Ctrl+X)
return ((_key >= 48 && _key <= 57) || (_key >= 96 && _key <= 105) ||
(_key == 8 || _key == 9 || _key == 46) || (_key >= 35 && _key <= 40) ||
(_ctrl && _key == 65) || (_ctrl && _key == 67) ||
(_ctrl && _key == 86) || (_ctrl && _key == 88));
});
});
};
//-----------------------------------
// Sets a mask for the allowed characters
$.fn.fnCustomInput = function (mask) {
mask = mask instanceof RegExp ? mask : fnEscapeRegExp(mask);
if (!mask) throw new CustomException("Mask must be RegExp or string");
if (typeof mask === "string") mask = "[" + mask + "]";
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _pattern = new RegExp(mask.source || mask, "gi");
var _matched = _value.match(_pattern);
_value = !_matched ? "" : _matched.join("").substr(0, e.data.max);
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keypress", function (e) {
var _pattern = new RegExp(mask.source || mask, "i");
var _key = e.which || e.keyCode;
var _vk = (_key == 8 || _key == 9 || _key == 46 || (_key >= 35 && _key <= 40));
return _pattern.test(String.fromCharCode(_key)) || _vk;
});
});
};
//-----------------------------------
// Disables the specified keyboard keys.
// To allow a set of keys, better use $.fnCustomInput
$.fn.fnDisableKey = function (key) {
if (!key) return this;
var keys = key.toString().split("");
keys = keys.filter(function(n){ return (n && n.length); });
return this.each(function() {
$(this).on("keypress", function (e) {
var _key = e.which || e.keyCode;
_key = String.fromCharCode(_key);
return $.inArray(_key, keys) == -1;
});
});
};
//-----------------------------------
// Validates the required form fields
$.fn.fnEasyValidate = function (fnValidator) {
return this.each(function () {
var btn = this;
if (!window.jQuery.ui) {
throw new CustomException("jQuery.UI is required");
}
if (!btn.type || btn.type.toLowerCase() != "submit") {
fnShowTooltip(btn, "this method can be performed only on submit buttons");
return true; //continue with next element
}
if (!$(btn).closest("form").length) {
fnShowTooltip(btn, "The button must be inside a form");
return true;
}
// Prevents send the form if any field is not valid
$(btn).on("click", { handler: "easyValidate" }, function (event) {
btn.blur(); $(".vld-tooltip").remove();
var _submit = true; fnSetFocus();
// Validates each [input, select] element
$(".vld-required").each(function (i, _dom) {
var _tag = _dom.nodeName.toLowerCase();
// Gets the html5 validation data storage, modern browsers admit: _dom.dataset.validation
if (btn.getAttribute('data-validation') !== _dom.getAttribute('data-validation')) return true; //continue
// If the element is [select], the first option and the option with a value="0" will be invalid
if ((_tag == "select" && (_dom.selectedIndex === 0 || _dom.value === "0")) || _tag == "span" ||
(input.isText(_dom) && !_dom.value.length) || (input.isCheck(_dom) && !_dom.checked)) {
var dom = _dom;
// Asp radiobutton or checkbox
if (_tag == "span" || input.isCheck(_dom)) {
if (_tag == "input") dom = $(_dom);
else dom = $(_dom).find("input:first-child");
if (dom.is(":checked") || $('[name="' + dom.attr("name") + '"]').filter(":checked").length) return true; //continue
if (_tag == "span") dom.addClass("vld-required");
dom = dom.get(0);
}
// Shows the tooltip for required field
var vld = $('<span class="vld-tooltip" />').data("target-id", dom.id);
vld.appendTo(js.wrapper).html("Este campo es requerido").position({
of: dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
event.preventDefault();
dom.focus();
return (_submit = false); //break
} //end if
}); //end $.each
// Removes the validation message
$(".vld-required").on("blur", function (e) {
var dom = e.target;
if (dom.selectedIndex !== 0 || dom.checked || dom.value.length) {
$(".vld-tooltip").each(function (i, _vld) {
if (dom.id == $(_vld).data("target-id"))
{ $(_vld).remove(); return false; }
});
}
});
// Calls the function to validate the form if it was provided
if (_submit && isFunction(fnValidator) && !fnValidator(btn)) {
event.preventDefault();
_submit = false;
}
return _submit;
}); //end $.click
}); //return jquery
};
//-----------------------------------
/* PUBLIC API */
//-----------------------------------
jherax.browser = getBrowser;
jherax.isDOM = isDOM;
jherax.isEvent = isEvent;
jherax.isFunction = isFunction;
//jherax.input = input;
jherax.fnStringify = fnStringify;
jherax.fnEscapeRegExp = fnEscapeRegExp;
jherax.fnGetDate = fnGetDate;
jherax.fnGetHtmlText = fnGetHtmlText;
jherax.fnGetSelectedText = fnGetSelectedText;
jherax.fnGetCaretPosition = fnGetCaretPosition;
jherax.fnSetCaretPosition = fnSetCaretPosition;
jherax.fnCapitalize = fnCapitalize;
jherax.fnNumericFormat = fnNumericFormat;
jherax.fnIsValidFormat = fnIsValidFormat;
jherax.fnIsValidDate = fnIsValidDate;
jherax.fnShowTooltip = fnShowTooltip;
jherax.fnLoading = fnLoading;
jherax.fnSetFocus = fnSetFocus;
})(js.createNS("js.utils"), jQuery);
// Create the namespace for utils
| M(_dom)) _dom = $(_dom);
_dom.on("blur", function () { $(".vld-tooltip").remove(); });
var vld = $('<span class="vld-tooltip">' + _msg + '</span>');
vld.appendTo(js.wrapper).position({
of: _dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
_dom.focus();
return false;
}
//----------- | identifier_body |
jherax.js | //******************************
// Utils for validations
// Author: David Rivera
// Created: 26/06/2013
//******************************
// jherax.github.io
// github.com/jherax/js-utils
//******************************
;
// Essential JavaScript Namespacing Patterns
// http://addyosmani.com/blog/essential-js-namespacing/
// Create a custom exception notifier
var CustomException = function(message) {
this.name = "js-utils exception";
this.message = message || "An error has occurred";
this.toString = function() {
return this.name + ": " + this.message;
};
};
// We need to do a check before we create the namespace
var js = window.js || { author: 'jherax' };
if (js.author != 'jherax') {
throw new CustomException("A variable with namespace [js] is already in use");
}
// Create a general purpose namespace method
js.createNS = js.createNS || function (namespace) {
var nsparts = namespace.toString().split(".");
var parent = js;
// we want to be able to include or exclude the root namespace so we strip it if it's in the namespace
if (nsparts[0] === "js") nsparts = nsparts.slice(1);
// loop through the parts and create a nested namespace if necessary
for (var i = 0; i < nsparts.length; i++) {
var subns = nsparts[i];
// check if the namespace is a valid variable name
if (!(/\w+/).test(subns)) throw new CustomException("Invalid namespace");
// check if the current parent already has the namespace declared
// if it isn't, then create it
if (typeof parent[subns] === "undefined") {
parent[subns] = {};
}
parent = parent[subns];
}
// the parent is now constructed with empty namespaces and can be used.
// we return the outermost namespace
return parent;
};
// We expose a property to specify where the tooltip element will be appended
js.wrapper = "body"; //#main-section
//-----------------------------------
// Immediately-invoked Function Expressions (IIFE)
// We pass the namespace as an argument to a self-invoking function.
// jherax is the namespace context, and $ is the jQuery Object
(function (jherax, $) {
//-----------------------------------
/* PRIVATE MEMBERS */
//-----------------------------------
// Adds support for browser detect.
// jquery 1.9+ deprecates $.browser
var getBrowser = (function() {
var ua = navigator.userAgent.toLowerCase();
var match =
/(msie) ([\w.]+)/.exec(ua) ||
/(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
var b = {}, o = {
browser: match[1] || "unknown",
version: match[2] || "0"
};
b[o.browser] = true;
b.version = o.version;
return b;
})();
//-----------------------------------
// Determines if a object is DOM element
var isDOM = function(obj) {
return (!!obj && typeof obj === "object" && !!obj.nodeType);
};
//-----------------------------------
// Determines if the entry parameter is a normalized Event Object
var isEvent = function(obj) {
return (!!obj && typeof obj === "object" && obj.which !== undefined && !!obj.target);
};
//-----------------------------------
// Determines if the entry parameter is a function
var isFunction = function(obj) {
return (!!obj && Object.prototype.toString.call(obj) == '[object Function]');
};
//-----------------------------------
// Determines whether the entry parameter is a text input or checkable input
// www.quackit.com/html_5/tags/html_input_tag.cfm
var input = {
isText: function(_dom) {
if(!isDOM(_dom)) return false;
if ((/textarea/i).test(_dom.nodeName)) return true;
var regx = /text|password|file|number|search|tel|url|email|datetime|datetime-local|date|time|month|week/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
},
isCheck: function(_dom) {
if(!isDOM(_dom)) return false;
var regx = /checkbox|radio/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
}
};
//-----------------------------------
// This is a facade of JSON.stringify and provides support in old browsers
var fnStringify = typeof JSON != "undefined" ? JSON.stringify : function (json) {
var arr = [];
$.each(json, function (key, val) {
var prop = "\"" + key + "\":";
prop += ($.isPlainObject(val) ? fnStringify(val) :
(typeof val === "string" ? "\"" + val + "\"" : val));
arr.push(prop);
});
return "{" + arr.join(",") + "}";
};
//-----------------------------------
// Escaping user input to be treated as a literal string within a regular expression
function fnEscapeRegExp(txt){
if (typeof txt !== "string") return null;
return txt.replace(/([.*+?=!:${}()|\^\[\]\/\\])/g, "\\$1");
}
//-----------------------------------
// Gets the text of current date in es-CO culture. dd/MM/yyyy HH:mm:ss
function fnGetDate() {
var f = new Date();
var fillZero = function(n) { return ("0" + n.toString()).slice(-2); };
var fnDate = function() { return (fillZero(f.getDate()) +"/"+ fillZero(f.getMonth() + 1) +"/"+ f.getFullYear()); };
var fnTime = function() { return (fillZero(f.getHours()) +":"+ fillZero(f.getMinutes()) +":"+ fillZero(f.getSeconds())); };
var fnDateTime = function() { return fnDate() + " " + fnTime(); };
return {
date: fnDate(),
time: fnTime(),
dateTime: fnDateTime()
};
}
//-----------------------------------
// Gets the text as html encoded
// This is a delegate for $.val()
function | (i, value) {
var html = $("<div/>").text(value).html();
return $.trim(html);
}
//-----------------------------------
// Gets selected text in the document
function fnGetSelectedText() {
var _dom = document.activeElement;
var _sel = { text: "", slice: "", start: -1, end: -1 };
if (window.getSelection) {
// Get selected text from input fields
if (input.isText(_dom)) {
_sel.start = _dom.selectionStart;
_sel.end = _dom.selectionEnd;
if (_sel.end > _sel.start) {
_sel.text = _dom.value.substring(_sel.start, _sel.end);
_sel.slice = _dom.value.slice(0, _sel.start) + _dom.value.slice(_sel.end);
}
}
// Get selected text from document
else _sel.text = window.getSelection().toString();
} else if (document.selection.createRange)
_sel.text = document.selection.createRange().text;
if (_sel.text !== "") _sel.text = $.trim(_sel.text);
return _sel;
}
//-----------------------------------
// Gets the cursor position in the text
function fnGetCaretPosition(_dom) {
if ('selectionStart' in _dom) {
return (_dom.selectionStart);
} else { // IE below version 9
var _sel = document.selection.createRange();
_sel.moveStart('character', -_dom.value.length);
return (_sel.text.length);
}
}
//-----------------------------------
// Sets the cursor position in the text
function fnSetCaretPosition(_dom, pos) {
if ('selectionStart' in _dom) {
_dom.setSelectionRange(pos, pos);
} else { // IE below version 9
var range = _dom.createTextRange();
range.collapse(true);
range.moveEnd('character', pos);
range.moveStart('character', pos);
range.select();
}
}
//-----------------------------------
// Transforms the text to capital letter.
// Also removes all consecutive spaces
function fnCapitalize(obj, _type) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
if (!_text || _text.length === 0) return "";
if ((/textarea/i).test(obj.nodeName)) {
_text = _text.replace(/\r|\n/g, "¶").replace(/\s{2,}/g, " ");
while ((/^[¶\s]|[¶\s]$/g).test(_text))
_text = _text.replace(/^[¶\s]+|[¶\s]+$/g, "");
_text = _text.replace(/\s*¶+\s*/g, "\n");
}
else _text = $.trim(_text.replace(/\s{2,}/g, " "));
if (parseFloat(_text) === 0) _text = "0";
if (_type) {
if (_type == "upper") _text = _text.toUpperCase();
if (_type == "lower" || _type == "word") _text = _text.toLowerCase();
if (_type == "title" || _type == "word") {
_text = _text.replace(/(?:^|-|:|;|\s|\.|\(|\/)[a-záéíóúüñ]/g, function (m) { return m.toUpperCase(); });
_text = _text.replace(/\s(?:Y|O|De[l]?|Por|A[l]?|L[ao]s?|[SC]on|En|Se|Que|Un[a]?)\b/g, function (m) { return m.toLowerCase(); });
}
}
else _text = _text.replace(/^\w/, _text.charAt(0).toUpperCase());
if (_isDOM) obj.value = _text;
return _text;
}
//-----------------------------------
// Sets the numeric format in es-CO culture.
// Places decimal "." and thousand "," separator
function fnNumericFormat(obj) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
var x = _text.replace(/\./g, "").split(",") || [""];
var num = x[0].replace(/\B(?=(\d{3})+(?!\d))/g, ".");
var dec = x.length > 1 ? "," + x[1] : "";
if (_isDOM) obj.value = num + dec;
return (num + dec);
}
//-----------------------------------
// Validates the format of text,
// depending on the type supplied.
// Date validations are performed according to es-CO culture
function fnIsValidFormat(obj, _type) {
var _pattern = null,
_text = input.isText(obj) ? obj.value : obj.toString();
switch (_type) {
case "d": //Validates Date format: dd/MM/yyyy
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])$/;
break;
case "t": //Validates Time format: HH:mm:ss
_pattern = /^([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "dt": //Validates DateTime format: dd/MM/yyyy HH:mm:ss
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])\s([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "email": //Validates an email address
_pattern = /^([0-9a-zA-Zñ](?:[\-.\w]*[0-9a-zA-Zñ])*@(?:[0-9a-zA-Zñ][\-\wñ]*[0-9a-zA-Zñ]\.)+[a-zA-Z]{2,9})$/i;
break;
case "pass": //Validates the password strength (must have 8-20 characters, at least one number, at least one uppercase)
_pattern = /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,20}$/;
break;
case "lat": //Validates the latitude
_pattern = /^-?([1-8]?[1-9]|[1-9]0|0)\,{1}\d{1,6}$/;
break;
case "lon": //Validates the longitude
_pattern = /^-?([1]?[1-7][1-9]|[1]?[1-8][0]|[1-9]?[0-9])\,{1}\d{1,6}$/;
break;
}
return !!_pattern && _pattern.test(_text);
}
//-----------------------------------
// Evaluates whether the value of text is a date or not.
// The validation outcome will be shown in a tooltip
var fnIsValidDate = function(_dom, o) {
if (!input.isText(_dom)) return false;
var error = false;
o = $.extend({
isFuture: false,
compareTo: new Date(),
warning: 'La fecha no puede ser {0} a hoy'}, o);
var _type = _dom.value.length > 10 ? "dt" : "d";
var _date = _dom.value.substr(0, 10);
var parser = function (date) {
if (date instanceof Date) return date;
if (typeof date !== "string") return new Date();
if (!fnIsValidFormat(date, _type)) { error = true; return new Date(); }
return new Date(date.replace(/^(\d{2})\/(\d{2})\/(\d{4})$/, '$3/$2/$1'));
};
var dif = (parser(_date) - parser(o.compareTo)) / 1000 / 3600 / 24;
if (error) return fnShowTooltip(_dom, fnIsValidDate.formatError);
if ( o.isFuture && dif < 0) return fnShowTooltip(_dom, o.warning.replace("{0}","menor"));
if (!o.isFuture && dif > 0) return fnShowTooltip(_dom, o.warning.replace("{0}","mayor"));
return true;
};
// We expose a property to set default message for the format error
fnIsValidDate.formatError = 'El formato de fecha es incorrecto';
//-----------------------------------
// Shows a custom warning message
function fnShowTooltip(_dom, _msg) {
if (isDOM(_dom)) _dom = $(_dom);
_dom.on("blur", function () { $(".vld-tooltip").remove(); });
var vld = $('<span class="vld-tooltip">' + _msg + '</span>');
vld.appendTo(js.wrapper).position({
of: _dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
_dom.focus();
return false;
}
//-----------------------------------
// Shows the loading overlay screen
function fnLoading(o) {
var d = $.extend({
show: true,
hide: false,
delay: 2600
}, o);
$("#loadingWrapper").remove();
if (d.hide) return true;
var blockG = [];
for (var i = 1; i < 9; i++) blockG.push('<div class="blockG"></div>');
var loading = $('<div id="floatingBarsG" />').append(blockG.join(""));
var overlay = $('<div class="bg-fixed bg-opacity" />');
$('<div id="loadingWrapper" />').append(overlay, loading).appendTo(js.wrapper).hide().fadeIn(d.delay);
loading.fnCenter();
return true;
}
//-----------------------------------
// Sets the focus on input elements
function fnSetFocus() {
$($('input[type="text"], textarea').filter(':not(input:disabled)').get().reverse()).each(function () {
if (!$(this).hasClass("no-auto-focus")) $(this).focus();
});
}
//-----------------------------------
/* jQUERY EXTENSIONS */
//-----------------------------------
// Sets the jquery objects in the center of screen
$.fn.fnCenter = function() {
this.css({
'position': 'fixed',
'left': '50%',
'top': '50%'
});
this.css({
'margin-left': -this.outerWidth() / 2 + 'px',
'margin-top': -this.outerHeight() / 2 + 'px'
});
return this;
};
//-----------------------------------
// Limits the max length in the input:text
$.fn.fnMaxLength = function(length) {
return this.each(function (i, dom) {
var count = "Max: " + length;
var vld = '#max' + dom.id;
if (!input.isText(dom)) return true; //continue
$(dom).on("blur", function() { $(vld).remove(); });
$(dom).on("keypress input paste", function (e) {
var len = dom.value.length;
var max = len >= length ? 1 : 0;
if (getBrowser.mozilla) max = !e.keyCode && max;
if (max) {
len = length;
dom.value = dom.value.substr(0, len);
e.preventDefault();
}
count = "Max: " + len + "/" + length;
if(!$(vld).text(count).length) {
$('<span class="vld-tooltip" id="max' + dom.id + '" />')
.text(count).appendTo(js.wrapper).position({
of: dom,
at: "right top",
my: "left+6 top",
collision: "flipfit"
}).hide().fadeIn(400);
}
});
});
};
//-----------------------------------
// Apply the capitalized format to text when blur event occurs
$.fn.fnCapitalize = function(type) {
return this.each(function (i, dom) {
$(dom).on("blur", function() {
fnCapitalize(this, type);
});
});
};
//-----------------------------------
// Sets numeric format with decimal/thousand separators
$.fn.fnNumericFormat = function() {
return this.each(function (i, dom) {
$(dom).on("keyup blur", function() {
fnNumericFormat(this);
});
});
};
//-----------------------------------
// Allows only numeric characters
$.fn.fnNumericInput = function () {
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _digits = _value.match(/\d/g);
_value = !_digits ? "" : _digits.join("").substr(0, e.data.max);
if (e.type == "blur" && parseFloat(_value) === 0) _value = "0";
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keydown", function (e) {
var _key = e.which || e.keyCode;
var _ctrl = !!(e.ctrlKey || e.metaKey);
// Allow: (numbers), (keypad numbers),
// Allow: (backspace, tab, delete), (home, end, arrows)
// Allow: (Ctrl+A), (Ctrl+C)
// Allow: (Ctrl+V), (Ctrl+X)
return ((_key >= 48 && _key <= 57) || (_key >= 96 && _key <= 105) ||
(_key == 8 || _key == 9 || _key == 46) || (_key >= 35 && _key <= 40) ||
(_ctrl && _key == 65) || (_ctrl && _key == 67) ||
(_ctrl && _key == 86) || (_ctrl && _key == 88));
});
});
};
//-----------------------------------
// Sets a mask for the allowed characters
$.fn.fnCustomInput = function (mask) {
mask = mask instanceof RegExp ? mask : fnEscapeRegExp(mask);
if (!mask) throw new CustomException("Mask must be RegExp or string");
if (typeof mask === "string") mask = "[" + mask + "]";
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _pattern = new RegExp(mask.source || mask, "gi");
var _matched = _value.match(_pattern);
_value = !_matched ? "" : _matched.join("").substr(0, e.data.max);
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keypress", function (e) {
var _pattern = new RegExp(mask.source || mask, "i");
var _key = e.which || e.keyCode;
var _vk = (_key == 8 || _key == 9 || _key == 46 || (_key >= 35 && _key <= 40));
return _pattern.test(String.fromCharCode(_key)) || _vk;
});
});
};
//-----------------------------------
// Disables the specified keyboard keys.
// To allow a set of keys, better use $.fnCustomInput
$.fn.fnDisableKey = function (key) {
if (!key) return this;
var keys = key.toString().split("");
keys = keys.filter(function(n){ return (n && n.length); });
return this.each(function() {
$(this).on("keypress", function (e) {
var _key = e.which || e.keyCode;
_key = String.fromCharCode(_key);
return $.inArray(_key, keys) == -1;
});
});
};
//-----------------------------------
// Validates the required form fields
$.fn.fnEasyValidate = function (fnValidator) {
return this.each(function () {
var btn = this;
if (!window.jQuery.ui) {
throw new CustomException("jQuery.UI is required");
}
if (!btn.type || btn.type.toLowerCase() != "submit") {
fnShowTooltip(btn, "this method can be performed only on submit buttons");
return true; //continue with next element
}
if (!$(btn).closest("form").length) {
fnShowTooltip(btn, "The button must be inside a form");
return true;
}
// Prevents send the form if any field is not valid
$(btn).on("click", { handler: "easyValidate" }, function (event) {
btn.blur(); $(".vld-tooltip").remove();
var _submit = true; fnSetFocus();
// Validates each [input, select] element
$(".vld-required").each(function (i, _dom) {
var _tag = _dom.nodeName.toLowerCase();
// Gets the html5 validation data storage, modern browsers admit: _dom.dataset.validation
if (btn.getAttribute('data-validation') !== _dom.getAttribute('data-validation')) return true; //continue
// If the element is [select], the first option and the option with a value="0" will be invalid
if ((_tag == "select" && (_dom.selectedIndex === 0 || _dom.value === "0")) || _tag == "span" ||
(input.isText(_dom) && !_dom.value.length) || (input.isCheck(_dom) && !_dom.checked)) {
var dom = _dom;
// Asp radiobutton or checkbox
if (_tag == "span" || input.isCheck(_dom)) {
if (_tag == "input") dom = $(_dom);
else dom = $(_dom).find("input:first-child");
if (dom.is(":checked") || $('[name="' + dom.attr("name") + '"]').filter(":checked").length) return true; //continue
if (_tag == "span") dom.addClass("vld-required");
dom = dom.get(0);
}
// Shows the tooltip for required field
var vld = $('<span class="vld-tooltip" />').data("target-id", dom.id);
vld.appendTo(js.wrapper).html("Este campo es requerido").position({
of: dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
event.preventDefault();
dom.focus();
return (_submit = false); //break
} //end if
}); //end $.each
// Removes the validation message
$(".vld-required").on("blur", function (e) {
var dom = e.target;
if (dom.selectedIndex !== 0 || dom.checked || dom.value.length) {
$(".vld-tooltip").each(function (i, _vld) {
if (dom.id == $(_vld).data("target-id"))
{ $(_vld).remove(); return false; }
});
}
});
// Calls the function to validate the form if it was provided
if (_submit && isFunction(fnValidator) && !fnValidator(btn)) {
event.preventDefault();
_submit = false;
}
return _submit;
}); //end $.click
}); //return jquery
};
//-----------------------------------
/* PUBLIC API */
//-----------------------------------
jherax.browser = getBrowser;
jherax.isDOM = isDOM;
jherax.isEvent = isEvent;
jherax.isFunction = isFunction;
//jherax.input = input;
jherax.fnStringify = fnStringify;
jherax.fnEscapeRegExp = fnEscapeRegExp;
jherax.fnGetDate = fnGetDate;
jherax.fnGetHtmlText = fnGetHtmlText;
jherax.fnGetSelectedText = fnGetSelectedText;
jherax.fnGetCaretPosition = fnGetCaretPosition;
jherax.fnSetCaretPosition = fnSetCaretPosition;
jherax.fnCapitalize = fnCapitalize;
jherax.fnNumericFormat = fnNumericFormat;
jherax.fnIsValidFormat = fnIsValidFormat;
jherax.fnIsValidDate = fnIsValidDate;
jherax.fnShowTooltip = fnShowTooltip;
jherax.fnLoading = fnLoading;
jherax.fnSetFocus = fnSetFocus;
})(js.createNS("js.utils"), jQuery);
// Create the namespace for utils
| fnGetHtmlText | identifier_name |
jherax.js | //******************************
// Utils for validations
// Author: David Rivera
// Created: 26/06/2013
//******************************
// jherax.github.io
// github.com/jherax/js-utils
//******************************
;
// Essential JavaScript Namespacing Patterns
// http://addyosmani.com/blog/essential-js-namespacing/
// Create a custom exception notifier
var CustomException = function(message) {
this.name = "js-utils exception";
this.message = message || "An error has occurred";
this.toString = function() {
return this.name + ": " + this.message;
};
};
// We need to do a check before we create the namespace
var js = window.js || { author: 'jherax' };
if (js.author != 'jherax') {
throw new CustomException("A variable with namespace [js] is already in use");
}
// Create a general purpose namespace method
js.createNS = js.createNS || function (namespace) {
var nsparts = namespace.toString().split(".");
var parent = js;
// we want to be able to include or exclude the root namespace so we strip it if it's in the namespace
if (nsparts[0] === "js") nsparts = nsparts.slice(1);
// loop through the parts and create a nested namespace if necessary
for (var i = 0; i < nsparts.length; i++) {
var subns = nsparts[i];
// check if the namespace is a valid variable name
if (!(/\w+/).test(subns)) throw new CustomException("Invalid namespace");
// check if the current parent already has the namespace declared
// if it isn't, then create it
if (typeof parent[subns] === "undefined") {
parent[subns] = {};
}
parent = parent[subns];
}
// the parent is now constructed with empty namespaces and can be used.
// we return the outermost namespace
return parent;
};
// We expose a property to specify where the tooltip element will be appended
js.wrapper = "body"; //#main-section
//-----------------------------------
// Immediately-invoked Function Expressions (IIFE)
// We pass the namespace as an argument to a self-invoking function.
// jherax is the namespace context, and $ is the jQuery Object
(function (jherax, $) {
//-----------------------------------
/* PRIVATE MEMBERS */
//-----------------------------------
// Adds support for browser detect.
// jquery 1.9+ deprecates $.browser
var getBrowser = (function() {
var ua = navigator.userAgent.toLowerCase();
var match =
/(msie) ([\w.]+)/.exec(ua) ||
/(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
var b = {}, o = {
browser: match[1] || "unknown",
version: match[2] || "0"
};
b[o.browser] = true;
b.version = o.version;
return b;
})();
//-----------------------------------
// Determines if a object is DOM element
var isDOM = function(obj) {
return (!!obj && typeof obj === "object" && !!obj.nodeType);
};
//-----------------------------------
// Determines if the entry parameter is a normalized Event Object
var isEvent = function(obj) {
return (!!obj && typeof obj === "object" && obj.which !== undefined && !!obj.target);
};
//-----------------------------------
// Determines if the entry parameter is a function
var isFunction = function(obj) {
return (!!obj && Object.prototype.toString.call(obj) == '[object Function]');
};
//-----------------------------------
// Determines whether the entry parameter is a text input or checkable input
// www.quackit.com/html_5/tags/html_input_tag.cfm
var input = {
isText: function(_dom) {
if(!isDOM(_dom)) return false;
if ((/textarea/i).test(_dom.nodeName)) return true;
var regx = /text|password|file|number|search|tel|url|email|datetime|datetime-local|date|time|month|week/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
},
isCheck: function(_dom) {
if(!isDOM(_dom)) return false;
var regx = /checkbox|radio/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
}
};
//-----------------------------------
// This is a facade of JSON.stringify and provides support in old browsers
var fnStringify = typeof JSON != "undefined" ? JSON.stringify : function (json) {
var arr = [];
$.each(json, function (key, val) {
var prop = "\"" + key + "\":";
prop += ($.isPlainObject(val) ? fnStringify(val) :
(typeof val === "string" ? "\"" + val + "\"" : val));
arr.push(prop);
});
return "{" + arr.join(",") + "}";
};
//-----------------------------------
// Escaping user input to be treated as a literal string within a regular expression
function fnEscapeRegExp(txt){
if (typeof txt !== "string") return null;
return txt.replace(/([.*+?=!:${}()|\^\[\]\/\\])/g, "\\$1");
}
//-----------------------------------
// Gets the text of current date in es-CO culture. dd/MM/yyyy HH:mm:ss
function fnGetDate() {
var f = new Date();
var fillZero = function(n) { return ("0" + n.toString()).slice(-2); };
var fnDate = function() { return (fillZero(f.getDate()) +"/"+ fillZero(f.getMonth() + 1) +"/"+ f.getFullYear()); };
var fnTime = function() { return (fillZero(f.getHours()) +":"+ fillZero(f.getMinutes()) +":"+ fillZero(f.getSeconds())); };
var fnDateTime = function() { return fnDate() + " " + fnTime(); };
return {
date: fnDate(),
time: fnTime(),
dateTime: fnDateTime()
};
}
//-----------------------------------
// Gets the text as html encoded
// This is a delegate for $.val()
function fnGetHtmlText(i, value) {
var html = $("<div/>").text(value).html();
return $.trim(html);
}
//-----------------------------------
// Gets selected text in the document
function fnGetSelectedText() {
var _dom = document.activeElement;
var _sel = { text: "", slice: "", start: -1, end: -1 };
if (window.getSelection) {
// Get selected text from input fields
if (input.isText(_dom)) {
_sel.start = _dom.selectionStart;
_sel.end = _dom.selectionEnd;
if (_sel.end > _sel.start) {
_sel.text = _dom.value.substring(_sel.start, _sel.end);
_sel.slice = _dom.value.slice(0, _sel.start) + _dom.value.slice(_sel.end);
}
}
// Get selected text from document
else _sel.text = window.getSelection().toString();
} else if (document.selection.createRange)
_sel.text = document.selection.createRange().text;
if (_sel.text !== "") _sel.text = $.trim(_sel.text);
return _sel;
}
//-----------------------------------
// Gets the cursor position in the text
function fnGetCaretPosition(_dom) {
if ('selectionStart' in _dom) {
return (_dom.selectionStart);
} else { // IE below version 9
var _sel = document.selection.createRange();
_sel.moveStart('character', -_dom.value.length);
return (_sel.text.length);
}
}
//-----------------------------------
// Sets the cursor position in the text
function fnSetCaretPosition(_dom, pos) {
if ('selectionStart' in _dom) {
_dom.setSelectionRange(pos, pos);
} else { // IE below version 9
var range = _dom.createTextRange();
range.collapse(true);
range.moveEnd('character', pos);
range.moveStart('character', pos);
range.select();
}
}
//-----------------------------------
// Transforms the text to capital letter.
// Also removes all consecutive spaces
function fnCapitalize(obj, _type) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
if (!_text || _text.length === 0) return "";
if ((/textarea/i).test(obj.nodeName)) {
_text = _text.replace(/\r|\n/g, "¶").replace(/\s{2,}/g, " ");
while ((/^[¶\s]|[¶\s]$/g).test(_text))
_text = _text.replace(/^[¶\s]+|[¶\s]+$/g, "");
_text = _text.replace(/\s*¶+\s*/g, "\n");
}
else _text = $.trim(_text.replace(/\s{2,}/g, " "));
if (parseFloat(_text) === 0) _text = "0";
if (_type) {
| _text = _text.replace(/^\w/, _text.charAt(0).toUpperCase());
if (_isDOM) obj.value = _text;
return _text;
}
//-----------------------------------
// Sets the numeric format in es-CO culture.
// Places decimal "." and thousand "," separator
function fnNumericFormat(obj) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
var x = _text.replace(/\./g, "").split(",") || [""];
var num = x[0].replace(/\B(?=(\d{3})+(?!\d))/g, ".");
var dec = x.length > 1 ? "," + x[1] : "";
if (_isDOM) obj.value = num + dec;
return (num + dec);
}
//-----------------------------------
// Validates the format of text,
// depending on the type supplied.
// Date validations are performed according to es-CO culture
function fnIsValidFormat(obj, _type) {
var _pattern = null,
_text = input.isText(obj) ? obj.value : obj.toString();
switch (_type) {
case "d": //Validates Date format: dd/MM/yyyy
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])$/;
break;
case "t": //Validates Time format: HH:mm:ss
_pattern = /^([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "dt": //Validates DateTime format: dd/MM/yyyy HH:mm:ss
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])\s([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "email": //Validates an email address
_pattern = /^([0-9a-zA-Zñ](?:[\-.\w]*[0-9a-zA-Zñ])*@(?:[0-9a-zA-Zñ][\-\wñ]*[0-9a-zA-Zñ]\.)+[a-zA-Z]{2,9})$/i;
break;
case "pass": //Validates the password strength (must have 8-20 characters, at least one number, at least one uppercase)
_pattern = /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,20}$/;
break;
case "lat": //Validates the latitude
_pattern = /^-?([1-8]?[1-9]|[1-9]0|0)\,{1}\d{1,6}$/;
break;
case "lon": //Validates the longitude
_pattern = /^-?([1]?[1-7][1-9]|[1]?[1-8][0]|[1-9]?[0-9])\,{1}\d{1,6}$/;
break;
}
return !!_pattern && _pattern.test(_text);
}
//-----------------------------------
// Evaluates whether the value of text is a date or not.
// The validation outcome will be shown in a tooltip
var fnIsValidDate = function(_dom, o) {
if (!input.isText(_dom)) return false;
var error = false;
o = $.extend({
isFuture: false,
compareTo: new Date(),
warning: 'La fecha no puede ser {0} a hoy'}, o);
var _type = _dom.value.length > 10 ? "dt" : "d";
var _date = _dom.value.substr(0, 10);
var parser = function (date) {
if (date instanceof Date) return date;
if (typeof date !== "string") return new Date();
if (!fnIsValidFormat(date, _type)) { error = true; return new Date(); }
return new Date(date.replace(/^(\d{2})\/(\d{2})\/(\d{4})$/, '$3/$2/$1'));
};
var dif = (parser(_date) - parser(o.compareTo)) / 1000 / 3600 / 24;
if (error) return fnShowTooltip(_dom, fnIsValidDate.formatError);
if ( o.isFuture && dif < 0) return fnShowTooltip(_dom, o.warning.replace("{0}","menor"));
if (!o.isFuture && dif > 0) return fnShowTooltip(_dom, o.warning.replace("{0}","mayor"));
return true;
};
// We expose a property to set default message for the format error
fnIsValidDate.formatError = 'El formato de fecha es incorrecto';
//-----------------------------------
// Shows a custom warning message
function fnShowTooltip(_dom, _msg) {
if (isDOM(_dom)) _dom = $(_dom);
_dom.on("blur", function () { $(".vld-tooltip").remove(); });
var vld = $('<span class="vld-tooltip">' + _msg + '</span>');
vld.appendTo(js.wrapper).position({
of: _dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
_dom.focus();
return false;
}
//-----------------------------------
// Shows the loading overlay screen
function fnLoading(o) {
var d = $.extend({
show: true,
hide: false,
delay: 2600
}, o);
$("#loadingWrapper").remove();
if (d.hide) return true;
var blockG = [];
for (var i = 1; i < 9; i++) blockG.push('<div class="blockG"></div>');
var loading = $('<div id="floatingBarsG" />').append(blockG.join(""));
var overlay = $('<div class="bg-fixed bg-opacity" />');
$('<div id="loadingWrapper" />').append(overlay, loading).appendTo(js.wrapper).hide().fadeIn(d.delay);
loading.fnCenter();
return true;
}
//-----------------------------------
// Sets the focus on input elements
function fnSetFocus() {
$($('input[type="text"], textarea').filter(':not(input:disabled)').get().reverse()).each(function () {
if (!$(this).hasClass("no-auto-focus")) $(this).focus();
});
}
//-----------------------------------
/* jQUERY EXTENSIONS */
//-----------------------------------
// Sets the jquery objects in the center of screen
$.fn.fnCenter = function() {
this.css({
'position': 'fixed',
'left': '50%',
'top': '50%'
});
this.css({
'margin-left': -this.outerWidth() / 2 + 'px',
'margin-top': -this.outerHeight() / 2 + 'px'
});
return this;
};
//-----------------------------------
// Limits the max length in the input:text
$.fn.fnMaxLength = function(length) {
return this.each(function (i, dom) {
var count = "Max: " + length;
var vld = '#max' + dom.id;
if (!input.isText(dom)) return true; //continue
$(dom).on("blur", function() { $(vld).remove(); });
$(dom).on("keypress input paste", function (e) {
var len = dom.value.length;
var max = len >= length ? 1 : 0;
if (getBrowser.mozilla) max = !e.keyCode && max;
if (max) {
len = length;
dom.value = dom.value.substr(0, len);
e.preventDefault();
}
count = "Max: " + len + "/" + length;
if(!$(vld).text(count).length) {
$('<span class="vld-tooltip" id="max' + dom.id + '" />')
.text(count).appendTo(js.wrapper).position({
of: dom,
at: "right top",
my: "left+6 top",
collision: "flipfit"
}).hide().fadeIn(400);
}
});
});
};
//-----------------------------------
// Apply the capitalized format to text when blur event occurs
$.fn.fnCapitalize = function(type) {
return this.each(function (i, dom) {
$(dom).on("blur", function() {
fnCapitalize(this, type);
});
});
};
//-----------------------------------
// Sets numeric format with decimal/thousand separators
$.fn.fnNumericFormat = function() {
return this.each(function (i, dom) {
$(dom).on("keyup blur", function() {
fnNumericFormat(this);
});
});
};
//-----------------------------------
// Allows only numeric characters
$.fn.fnNumericInput = function () {
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _digits = _value.match(/\d/g);
_value = !_digits ? "" : _digits.join("").substr(0, e.data.max);
if (e.type == "blur" && parseFloat(_value) === 0) _value = "0";
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keydown", function (e) {
var _key = e.which || e.keyCode;
var _ctrl = !!(e.ctrlKey || e.metaKey);
// Allow: (numbers), (keypad numbers),
// Allow: (backspace, tab, delete), (home, end, arrows)
// Allow: (Ctrl+A), (Ctrl+C)
// Allow: (Ctrl+V), (Ctrl+X)
return ((_key >= 48 && _key <= 57) || (_key >= 96 && _key <= 105) ||
(_key == 8 || _key == 9 || _key == 46) || (_key >= 35 && _key <= 40) ||
(_ctrl && _key == 65) || (_ctrl && _key == 67) ||
(_ctrl && _key == 86) || (_ctrl && _key == 88));
});
});
};
//-----------------------------------
// Sets a mask for the allowed characters
$.fn.fnCustomInput = function (mask) {
mask = mask instanceof RegExp ? mask : fnEscapeRegExp(mask);
if (!mask) throw new CustomException("Mask must be RegExp or string");
if (typeof mask === "string") mask = "[" + mask + "]";
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _pattern = new RegExp(mask.source || mask, "gi");
var _matched = _value.match(_pattern);
_value = !_matched ? "" : _matched.join("").substr(0, e.data.max);
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keypress", function (e) {
var _pattern = new RegExp(mask.source || mask, "i");
var _key = e.which || e.keyCode;
var _vk = (_key == 8 || _key == 9 || _key == 46 || (_key >= 35 && _key <= 40));
return _pattern.test(String.fromCharCode(_key)) || _vk;
});
});
};
//-----------------------------------
// Disables the specified keyboard keys.
// To allow a set of keys, better use $.fnCustomInput
$.fn.fnDisableKey = function (key) {
if (!key) return this;
var keys = key.toString().split("");
keys = keys.filter(function(n){ return (n && n.length); });
return this.each(function() {
$(this).on("keypress", function (e) {
var _key = e.which || e.keyCode;
_key = String.fromCharCode(_key);
return $.inArray(_key, keys) == -1;
});
});
};
//-----------------------------------
// Validates the required form fields
$.fn.fnEasyValidate = function (fnValidator) {
return this.each(function () {
var btn = this;
if (!window.jQuery.ui) {
throw new CustomException("jQuery.UI is required");
}
if (!btn.type || btn.type.toLowerCase() != "submit") {
fnShowTooltip(btn, "this method can be performed only on submit buttons");
return true; //continue with next element
}
if (!$(btn).closest("form").length) {
fnShowTooltip(btn, "The button must be inside a form");
return true;
}
// Prevents send the form if any field is not valid
$(btn).on("click", { handler: "easyValidate" }, function (event) {
btn.blur(); $(".vld-tooltip").remove();
var _submit = true; fnSetFocus();
// Validates each [input, select] element
$(".vld-required").each(function (i, _dom) {
var _tag = _dom.nodeName.toLowerCase();
// Gets the html5 validation data storage, modern browsers admit: _dom.dataset.validation
if (btn.getAttribute('data-validation') !== _dom.getAttribute('data-validation')) return true; //continue
// If the element is [select], the first option and the option with a value="0" will be invalid
if ((_tag == "select" && (_dom.selectedIndex === 0 || _dom.value === "0")) || _tag == "span" ||
(input.isText(_dom) && !_dom.value.length) || (input.isCheck(_dom) && !_dom.checked)) {
var dom = _dom;
// Asp radiobutton or checkbox
if (_tag == "span" || input.isCheck(_dom)) {
if (_tag == "input") dom = $(_dom);
else dom = $(_dom).find("input:first-child");
if (dom.is(":checked") || $('[name="' + dom.attr("name") + '"]').filter(":checked").length) return true; //continue
if (_tag == "span") dom.addClass("vld-required");
dom = dom.get(0);
}
// Shows the tooltip for required field
var vld = $('<span class="vld-tooltip" />').data("target-id", dom.id);
vld.appendTo(js.wrapper).html("Este campo es requerido").position({
of: dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
event.preventDefault();
dom.focus();
return (_submit = false); //break
} //end if
}); //end $.each
// Removes the validation message
$(".vld-required").on("blur", function (e) {
var dom = e.target;
if (dom.selectedIndex !== 0 || dom.checked || dom.value.length) {
$(".vld-tooltip").each(function (i, _vld) {
if (dom.id == $(_vld).data("target-id"))
{ $(_vld).remove(); return false; }
});
}
});
// Calls the function to validate the form if it was provided
if (_submit && isFunction(fnValidator) && !fnValidator(btn)) {
event.preventDefault();
_submit = false;
}
return _submit;
}); //end $.click
}); //return jquery
};
//-----------------------------------
/* PUBLIC API */
//-----------------------------------
jherax.browser = getBrowser;
jherax.isDOM = isDOM;
jherax.isEvent = isEvent;
jherax.isFunction = isFunction;
//jherax.input = input;
jherax.fnStringify = fnStringify;
jherax.fnEscapeRegExp = fnEscapeRegExp;
jherax.fnGetDate = fnGetDate;
jherax.fnGetHtmlText = fnGetHtmlText;
jherax.fnGetSelectedText = fnGetSelectedText;
jherax.fnGetCaretPosition = fnGetCaretPosition;
jherax.fnSetCaretPosition = fnSetCaretPosition;
jherax.fnCapitalize = fnCapitalize;
jherax.fnNumericFormat = fnNumericFormat;
jherax.fnIsValidFormat = fnIsValidFormat;
jherax.fnIsValidDate = fnIsValidDate;
jherax.fnShowTooltip = fnShowTooltip;
jherax.fnLoading = fnLoading;
jherax.fnSetFocus = fnSetFocus;
})(js.createNS("js.utils"), jQuery);
// Create the namespace for utils
| if (_type == "upper") _text = _text.toUpperCase();
if (_type == "lower" || _type == "word") _text = _text.toLowerCase();
if (_type == "title" || _type == "word") {
_text = _text.replace(/(?:^|-|:|;|\s|\.|\(|\/)[a-záéíóúüñ]/g, function (m) { return m.toUpperCase(); });
_text = _text.replace(/\s(?:Y|O|De[l]?|Por|A[l]?|L[ao]s?|[SC]on|En|Se|Que|Un[a]?)\b/g, function (m) { return m.toLowerCase(); });
}
}
else | conditional_block |
jherax.js | //******************************
// Utils for validations
// Author: David Rivera
// Created: 26/06/2013
//******************************
// jherax.github.io
// github.com/jherax/js-utils
//******************************
;
// Essential JavaScript Namespacing Patterns
// http://addyosmani.com/blog/essential-js-namespacing/
// Create a custom exception notifier
var CustomException = function(message) {
this.name = "js-utils exception";
this.message = message || "An error has occurred";
this.toString = function() {
return this.name + ": " + this.message;
};
};
// We need to do a check before we create the namespace
var js = window.js || { author: 'jherax' };
if (js.author != 'jherax') {
throw new CustomException("A variable with namespace [js] is already in use");
}
// Create a general purpose namespace method
js.createNS = js.createNS || function (namespace) {
var nsparts = namespace.toString().split(".");
var parent = js;
// we want to be able to include or exclude the root namespace so we strip it if it's in the namespace
if (nsparts[0] === "js") nsparts = nsparts.slice(1);
// loop through the parts and create a nested namespace if necessary
for (var i = 0; i < nsparts.length; i++) {
var subns = nsparts[i];
// check if the namespace is a valid variable name
if (!(/\w+/).test(subns)) throw new CustomException("Invalid namespace");
// check if the current parent already has the namespace declared
// if it isn't, then create it
if (typeof parent[subns] === "undefined") {
parent[subns] = {};
}
parent = parent[subns];
}
// the parent is now constructed with empty namespaces and can be used.
// we return the outermost namespace
return parent;
};
// We expose a property to specify where the tooltip element will be appended
js.wrapper = "body"; //#main-section
//-----------------------------------
// Immediately-invoked Function Expressions (IIFE)
// We pass the namespace as an argument to a self-invoking function.
// jherax is the namespace context, and $ is the jQuery Object
(function (jherax, $) {
//-----------------------------------
/* PRIVATE MEMBERS */
//-----------------------------------
// Adds support for browser detect.
// jquery 1.9+ deprecates $.browser
var getBrowser = (function() {
var ua = navigator.userAgent.toLowerCase();
var match =
/(msie) ([\w.]+)/.exec(ua) ||
/(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
var b = {}, o = {
browser: match[1] || "unknown",
version: match[2] || "0"
};
b[o.browser] = true;
b.version = o.version;
return b;
})();
//-----------------------------------
// Determines if a object is DOM element
var isDOM = function(obj) {
return (!!obj && typeof obj === "object" && !!obj.nodeType);
};
//-----------------------------------
// Determines if the entry parameter is a normalized Event Object
var isEvent = function(obj) {
return (!!obj && typeof obj === "object" && obj.which !== undefined && !!obj.target);
};
//-----------------------------------
// Determines if the entry parameter is a function
var isFunction = function(obj) {
return (!!obj && Object.prototype.toString.call(obj) == '[object Function]');
};
//-----------------------------------
// Determines whether the entry parameter is a text input or checkable input
// www.quackit.com/html_5/tags/html_input_tag.cfm
var input = {
isText: function(_dom) {
if(!isDOM(_dom)) return false;
if ((/textarea/i).test(_dom.nodeName)) return true;
var regx = /text|password|file|number|search|tel|url|email|datetime|datetime-local|date|time|month|week/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
},
isCheck: function(_dom) {
if(!isDOM(_dom)) return false;
var regx = /checkbox|radio/i;
return regx.test(_dom.type) && (/input/i).test(_dom.nodeName);
}
};
//-----------------------------------
// This is a facade of JSON.stringify and provides support in old browsers
var fnStringify = typeof JSON != "undefined" ? JSON.stringify : function (json) {
var arr = [];
$.each(json, function (key, val) {
var prop = "\"" + key + "\":";
prop += ($.isPlainObject(val) ? fnStringify(val) : | return "{" + arr.join(",") + "}";
};
//-----------------------------------
// Escaping user input to be treated as a literal string within a regular expression
function fnEscapeRegExp(txt){
if (typeof txt !== "string") return null;
return txt.replace(/([.*+?=!:${}()|\^\[\]\/\\])/g, "\\$1");
}
//-----------------------------------
// Gets the text of current date in es-CO culture. dd/MM/yyyy HH:mm:ss
function fnGetDate() {
var f = new Date();
var fillZero = function(n) { return ("0" + n.toString()).slice(-2); };
var fnDate = function() { return (fillZero(f.getDate()) +"/"+ fillZero(f.getMonth() + 1) +"/"+ f.getFullYear()); };
var fnTime = function() { return (fillZero(f.getHours()) +":"+ fillZero(f.getMinutes()) +":"+ fillZero(f.getSeconds())); };
var fnDateTime = function() { return fnDate() + " " + fnTime(); };
return {
date: fnDate(),
time: fnTime(),
dateTime: fnDateTime()
};
}
//-----------------------------------
// Gets the text as html encoded
// This is a delegate for $.val()
function fnGetHtmlText(i, value) {
var html = $("<div/>").text(value).html();
return $.trim(html);
}
//-----------------------------------
// Gets selected text in the document
function fnGetSelectedText() {
var _dom = document.activeElement;
var _sel = { text: "", slice: "", start: -1, end: -1 };
if (window.getSelection) {
// Get selected text from input fields
if (input.isText(_dom)) {
_sel.start = _dom.selectionStart;
_sel.end = _dom.selectionEnd;
if (_sel.end > _sel.start) {
_sel.text = _dom.value.substring(_sel.start, _sel.end);
_sel.slice = _dom.value.slice(0, _sel.start) + _dom.value.slice(_sel.end);
}
}
// Get selected text from document
else _sel.text = window.getSelection().toString();
} else if (document.selection.createRange)
_sel.text = document.selection.createRange().text;
if (_sel.text !== "") _sel.text = $.trim(_sel.text);
return _sel;
}
//-----------------------------------
// Gets the cursor position in the text
function fnGetCaretPosition(_dom) {
if ('selectionStart' in _dom) {
return (_dom.selectionStart);
} else { // IE below version 9
var _sel = document.selection.createRange();
_sel.moveStart('character', -_dom.value.length);
return (_sel.text.length);
}
}
//-----------------------------------
// Sets the cursor position in the text
function fnSetCaretPosition(_dom, pos) {
if ('selectionStart' in _dom) {
_dom.setSelectionRange(pos, pos);
} else { // IE below version 9
var range = _dom.createTextRange();
range.collapse(true);
range.moveEnd('character', pos);
range.moveStart('character', pos);
range.select();
}
}
//-----------------------------------
// Transforms the text to capital letter.
// Also removes all consecutive spaces
function fnCapitalize(obj, _type) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
if (!_text || _text.length === 0) return "";
if ((/textarea/i).test(obj.nodeName)) {
_text = _text.replace(/\r|\n/g, "¶").replace(/\s{2,}/g, " ");
while ((/^[¶\s]|[¶\s]$/g).test(_text))
_text = _text.replace(/^[¶\s]+|[¶\s]+$/g, "");
_text = _text.replace(/\s*¶+\s*/g, "\n");
}
else _text = $.trim(_text.replace(/\s{2,}/g, " "));
if (parseFloat(_text) === 0) _text = "0";
if (_type) {
if (_type == "upper") _text = _text.toUpperCase();
if (_type == "lower" || _type == "word") _text = _text.toLowerCase();
if (_type == "title" || _type == "word") {
_text = _text.replace(/(?:^|-|:|;|\s|\.|\(|\/)[a-záéíóúüñ]/g, function (m) { return m.toUpperCase(); });
_text = _text.replace(/\s(?:Y|O|De[l]?|Por|A[l]?|L[ao]s?|[SC]on|En|Se|Que|Un[a]?)\b/g, function (m) { return m.toLowerCase(); });
}
}
else _text = _text.replace(/^\w/, _text.charAt(0).toUpperCase());
if (_isDOM) obj.value = _text;
return _text;
}
//-----------------------------------
// Sets the numeric format in es-CO culture.
// Places decimal "." and thousand "," separator
function fnNumericFormat(obj) {
var _isDOM = input.isText(obj),
_text = _isDOM ? obj.value : obj.toString();
var x = _text.replace(/\./g, "").split(",") || [""];
var num = x[0].replace(/\B(?=(\d{3})+(?!\d))/g, ".");
var dec = x.length > 1 ? "," + x[1] : "";
if (_isDOM) obj.value = num + dec;
return (num + dec);
}
//-----------------------------------
// Validates the format of text,
// depending on the type supplied.
// Date validations are performed according to es-CO culture
function fnIsValidFormat(obj, _type) {
var _pattern = null,
_text = input.isText(obj) ? obj.value : obj.toString();
switch (_type) {
case "d": //Validates Date format: dd/MM/yyyy
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])$/;
break;
case "t": //Validates Time format: HH:mm:ss
_pattern = /^([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "dt": //Validates DateTime format: dd/MM/yyyy HH:mm:ss
_pattern = /^((0[1-9])|([1-2][0-9])|(3[0-1]))\/((0[1-9])|(1[0-2]))\/([1-2][0,9][0-9][0-9])\s([0-1][0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$/;
break;
case "email": //Validates an email address
_pattern = /^([0-9a-zA-Zñ](?:[\-.\w]*[0-9a-zA-Zñ])*@(?:[0-9a-zA-Zñ][\-\wñ]*[0-9a-zA-Zñ]\.)+[a-zA-Z]{2,9})$/i;
break;
case "pass": //Validates the password strength (must have 8-20 characters, at least one number, at least one uppercase)
_pattern = /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,20}$/;
break;
case "lat": //Validates the latitude
_pattern = /^-?([1-8]?[1-9]|[1-9]0|0)\,{1}\d{1,6}$/;
break;
case "lon": //Validates the longitude
_pattern = /^-?([1]?[1-7][1-9]|[1]?[1-8][0]|[1-9]?[0-9])\,{1}\d{1,6}$/;
break;
}
return !!_pattern && _pattern.test(_text);
}
//-----------------------------------
// Evaluates whether the value of text is a date or not.
// The validation outcome will be shown in a tooltip
var fnIsValidDate = function(_dom, o) {
if (!input.isText(_dom)) return false;
var error = false;
o = $.extend({
isFuture: false,
compareTo: new Date(),
warning: 'La fecha no puede ser {0} a hoy'}, o);
var _type = _dom.value.length > 10 ? "dt" : "d";
var _date = _dom.value.substr(0, 10);
var parser = function (date) {
if (date instanceof Date) return date;
if (typeof date !== "string") return new Date();
if (!fnIsValidFormat(date, _type)) { error = true; return new Date(); }
return new Date(date.replace(/^(\d{2})\/(\d{2})\/(\d{4})$/, '$3/$2/$1'));
};
var dif = (parser(_date) - parser(o.compareTo)) / 1000 / 3600 / 24;
if (error) return fnShowTooltip(_dom, fnIsValidDate.formatError);
if ( o.isFuture && dif < 0) return fnShowTooltip(_dom, o.warning.replace("{0}","menor"));
if (!o.isFuture && dif > 0) return fnShowTooltip(_dom, o.warning.replace("{0}","mayor"));
return true;
};
// We expose a property to set default message for the format error
fnIsValidDate.formatError = 'El formato de fecha es incorrecto';
//-----------------------------------
// Shows a custom warning message
function fnShowTooltip(_dom, _msg) {
if (isDOM(_dom)) _dom = $(_dom);
_dom.on("blur", function () { $(".vld-tooltip").remove(); });
var vld = $('<span class="vld-tooltip">' + _msg + '</span>');
vld.appendTo(js.wrapper).position({
of: _dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
_dom.focus();
return false;
}
//-----------------------------------
// Shows the loading overlay screen
function fnLoading(o) {
var d = $.extend({
show: true,
hide: false,
delay: 2600
}, o);
$("#loadingWrapper").remove();
if (d.hide) return true;
var blockG = [];
for (var i = 1; i < 9; i++) blockG.push('<div class="blockG"></div>');
var loading = $('<div id="floatingBarsG" />').append(blockG.join(""));
var overlay = $('<div class="bg-fixed bg-opacity" />');
$('<div id="loadingWrapper" />').append(overlay, loading).appendTo(js.wrapper).hide().fadeIn(d.delay);
loading.fnCenter();
return true;
}
//-----------------------------------
// Sets the focus on input elements
function fnSetFocus() {
$($('input[type="text"], textarea').filter(':not(input:disabled)').get().reverse()).each(function () {
if (!$(this).hasClass("no-auto-focus")) $(this).focus();
});
}
//-----------------------------------
/* jQUERY EXTENSIONS */
//-----------------------------------
// Sets the jquery objects in the center of screen
$.fn.fnCenter = function() {
this.css({
'position': 'fixed',
'left': '50%',
'top': '50%'
});
this.css({
'margin-left': -this.outerWidth() / 2 + 'px',
'margin-top': -this.outerHeight() / 2 + 'px'
});
return this;
};
//-----------------------------------
// Limits the max length in the input:text
$.fn.fnMaxLength = function(length) {
return this.each(function (i, dom) {
var count = "Max: " + length;
var vld = '#max' + dom.id;
if (!input.isText(dom)) return true; //continue
$(dom).on("blur", function() { $(vld).remove(); });
$(dom).on("keypress input paste", function (e) {
var len = dom.value.length;
var max = len >= length ? 1 : 0;
if (getBrowser.mozilla) max = !e.keyCode && max;
if (max) {
len = length;
dom.value = dom.value.substr(0, len);
e.preventDefault();
}
count = "Max: " + len + "/" + length;
if(!$(vld).text(count).length) {
$('<span class="vld-tooltip" id="max' + dom.id + '" />')
.text(count).appendTo(js.wrapper).position({
of: dom,
at: "right top",
my: "left+6 top",
collision: "flipfit"
}).hide().fadeIn(400);
}
});
});
};
//-----------------------------------
// Apply the capitalized format to text when blur event occurs
$.fn.fnCapitalize = function(type) {
return this.each(function (i, dom) {
$(dom).on("blur", function() {
fnCapitalize(this, type);
});
});
};
//-----------------------------------
// Sets numeric format with decimal/thousand separators
$.fn.fnNumericFormat = function() {
return this.each(function (i, dom) {
$(dom).on("keyup blur", function() {
fnNumericFormat(this);
});
});
};
//-----------------------------------
// Allows only numeric characters
$.fn.fnNumericInput = function () {
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _digits = _value.match(/\d/g);
_value = !_digits ? "" : _digits.join("").substr(0, e.data.max);
if (e.type == "blur" && parseFloat(_value) === 0) _value = "0";
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keydown", function (e) {
var _key = e.which || e.keyCode;
var _ctrl = !!(e.ctrlKey || e.metaKey);
// Allow: (numbers), (keypad numbers),
// Allow: (backspace, tab, delete), (home, end, arrows)
// Allow: (Ctrl+A), (Ctrl+C)
// Allow: (Ctrl+V), (Ctrl+X)
return ((_key >= 48 && _key <= 57) || (_key >= 96 && _key <= 105) ||
(_key == 8 || _key == 9 || _key == 46) || (_key >= 35 && _key <= 40) ||
(_ctrl && _key == 65) || (_ctrl && _key == 67) ||
(_ctrl && _key == 86) || (_ctrl && _key == 88));
});
});
};
//-----------------------------------
// Sets a mask for the allowed characters
$.fn.fnCustomInput = function (mask) {
mask = mask instanceof RegExp ? mask : fnEscapeRegExp(mask);
if (!mask) throw new CustomException("Mask must be RegExp or string");
if (typeof mask === "string") mask = "[" + mask + "]";
return this.each(function (i, dom) {
var len = dom.maxLength;
dom.maxLength = 524000;
if (len < 1) len = 524000;
$(dom).on("focus blur input paste", { max: len }, function (e) {
var _pos = e.type != "blur" ? fnGetCaretPosition(e.target) : 0;
var _value = e.target.value;
if (e.type == "paste") {
var _sel = fnGetSelectedText();
if (_sel.text !== "") _value = _sel.slice;
}
var _pattern = new RegExp(mask.source || mask, "gi");
var _matched = _value.match(_pattern);
_value = !_matched ? "" : _matched.join("").substr(0, e.data.max);
_pos = Math.max(_pos - (e.target.value.length - _value.length), 0);
e.target.value = _value;
e.target.maxLength = e.data.max;
if (e.type != "blur") fnSetCaretPosition(e.target, _pos);
});
$(dom).on("keypress", function (e) {
var _pattern = new RegExp(mask.source || mask, "i");
var _key = e.which || e.keyCode;
var _vk = (_key == 8 || _key == 9 || _key == 46 || (_key >= 35 && _key <= 40));
return _pattern.test(String.fromCharCode(_key)) || _vk;
});
});
};
//-----------------------------------
// Disables the specified keyboard keys.
// To allow a set of keys, better use $.fnCustomInput
$.fn.fnDisableKey = function (key) {
if (!key) return this;
var keys = key.toString().split("");
keys = keys.filter(function(n){ return (n && n.length); });
return this.each(function() {
$(this).on("keypress", function (e) {
var _key = e.which || e.keyCode;
_key = String.fromCharCode(_key);
return $.inArray(_key, keys) == -1;
});
});
};
//-----------------------------------
// Validates the required form fields
$.fn.fnEasyValidate = function (fnValidator) {
return this.each(function () {
var btn = this;
if (!window.jQuery.ui) {
throw new CustomException("jQuery.UI is required");
}
if (!btn.type || btn.type.toLowerCase() != "submit") {
fnShowTooltip(btn, "this method can be performed only on submit buttons");
return true; //continue with next element
}
if (!$(btn).closest("form").length) {
fnShowTooltip(btn, "The button must be inside a form");
return true;
}
// Prevents send the form if any field is not valid
$(btn).on("click", { handler: "easyValidate" }, function (event) {
btn.blur(); $(".vld-tooltip").remove();
var _submit = true; fnSetFocus();
// Validates each [input, select] element
$(".vld-required").each(function (i, _dom) {
var _tag = _dom.nodeName.toLowerCase();
// Gets the html5 validation data storage, modern browsers admit: _dom.dataset.validation
if (btn.getAttribute('data-validation') !== _dom.getAttribute('data-validation')) return true; //continue
// If the element is [select], the first option and the option with a value="0" will be invalid
if ((_tag == "select" && (_dom.selectedIndex === 0 || _dom.value === "0")) || _tag == "span" ||
(input.isText(_dom) && !_dom.value.length) || (input.isCheck(_dom) && !_dom.checked)) {
var dom = _dom;
// Asp radiobutton or checkbox
if (_tag == "span" || input.isCheck(_dom)) {
if (_tag == "input") dom = $(_dom);
else dom = $(_dom).find("input:first-child");
if (dom.is(":checked") || $('[name="' + dom.attr("name") + '"]').filter(":checked").length) return true; //continue
if (_tag == "span") dom.addClass("vld-required");
dom = dom.get(0);
}
// Shows the tooltip for required field
var vld = $('<span class="vld-tooltip" />').data("target-id", dom.id);
vld.appendTo(js.wrapper).html("Este campo es requerido").position({
of: dom,
at: "right center",
my: "left+6 center",
collision: "flipfit"
}).hide().fadeIn(400);
event.preventDefault();
dom.focus();
return (_submit = false); //break
} //end if
}); //end $.each
// Removes the validation message
$(".vld-required").on("blur", function (e) {
var dom = e.target;
if (dom.selectedIndex !== 0 || dom.checked || dom.value.length) {
$(".vld-tooltip").each(function (i, _vld) {
if (dom.id == $(_vld).data("target-id"))
{ $(_vld).remove(); return false; }
});
}
});
// Calls the function to validate the form if it was provided
if (_submit && isFunction(fnValidator) && !fnValidator(btn)) {
event.preventDefault();
_submit = false;
}
return _submit;
}); //end $.click
}); //return jquery
};
//-----------------------------------
/* PUBLIC API */
//-----------------------------------
jherax.browser = getBrowser;
jherax.isDOM = isDOM;
jherax.isEvent = isEvent;
jherax.isFunction = isFunction;
//jherax.input = input;
jherax.fnStringify = fnStringify;
jherax.fnEscapeRegExp = fnEscapeRegExp;
jherax.fnGetDate = fnGetDate;
jherax.fnGetHtmlText = fnGetHtmlText;
jherax.fnGetSelectedText = fnGetSelectedText;
jherax.fnGetCaretPosition = fnGetCaretPosition;
jherax.fnSetCaretPosition = fnSetCaretPosition;
jherax.fnCapitalize = fnCapitalize;
jherax.fnNumericFormat = fnNumericFormat;
jherax.fnIsValidFormat = fnIsValidFormat;
jherax.fnIsValidDate = fnIsValidDate;
jherax.fnShowTooltip = fnShowTooltip;
jherax.fnLoading = fnLoading;
jherax.fnSetFocus = fnSetFocus;
})(js.createNS("js.utils"), jQuery);
// Create the namespace for utils | (typeof val === "string" ? "\"" + val + "\"" : val));
arr.push(prop);
}); | random_line_split |
lib.rs | #[macro_use]
extern crate serde_derive;
extern crate argon2;
extern crate libc;
extern crate liner;
#[macro_use]
extern crate failure;
extern crate pkgutils;
extern crate rand;
extern crate redoxfs;
extern crate syscall;
extern crate termion;
mod config;
mod disk_wrapper;
pub use config::Config;
pub use config::file::FileConfig;
pub use config::package::PackageConfig;
use disk_wrapper::DiskWrapper;
use failure::{Error, err_msg};
use rand::{RngCore, rngs::OsRng};
use redoxfs::{unmount_path, Disk, DiskIo, FileSystem};
use termion::input::TermRead;
use pkgutils::{Repo, Package};
use std::{
collections::BTreeMap,
env,
fs,
io::{self, Seek, SeekFrom, Write},
path::Path,
sync::mpsc::channel,
time::{SystemTime, UNIX_EPOCH},
thread,
};
pub(crate) type Result<T> = std::result::Result<T, Error>;
const REMOTE: &'static str = "https://static.redox-os.org/pkg";
fn get_target() -> String {
env::var("TARGET").unwrap_or(
option_env!("TARGET").map_or(
"x86_64-unknown-redox".to_string(),
|x| x.to_string()
)
)
}
/// Converts a password to a serialized argon2rs hash, understandable
/// by redox_users. If the password is blank, the hash is blank.
fn hash_password(password: &str) -> Result<String> {
if password != "" {
let salt = format!("{:X}", OsRng.next_u64());
let config = argon2::Config::default();
let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?;
Ok(hash)
} else {
Ok("".to_string())
}
}
fn syscall_error(err: syscall::Error) -> io::Error {
io::Error::from_raw_os_error(err.errno)
}
/// Returns a password collected from the user (plaintext)
fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let stdout = io::stdout();
let mut stdout = stdout.lock();
print!("{}", prompt);
let password = stdin.read_passwd(&mut stdout)?;
print!("\n{}", confirm_prompt);
let confirm_password = stdin.read_passwd(&mut stdout)?;
// Note: Actually comparing two Option<String> values
if confirm_password == password {
Ok(password.unwrap_or("".to_string()))
} else {
Err(err_msg("passwords do not match"))
}
}
//TODO: error handling
fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) {
let target = &get_target();
let mut repo = Repo::new(target);
repo.add_remote(REMOTE);
if let Some(cookbook) = cookbook {
let dest_pkg = format!("{}/pkg", dest);
if ! Path::new(&dest_pkg).exists() {
fs::create_dir(&dest_pkg).unwrap();
}
for (packagename, package) in &config.packages {
let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
let from_remote = match (config.general.cooking, package) {
(Some(true), PackageConfig::Empty) => true,
(Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true,
_ => false
};
if from_remote {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
} else if Path::new(&pkgar_path).exists() {
println!("Installing package from local repo: {}", packagename);
let public_path = format!("{}/{}/build/id_ed25519.pub.toml",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref());
pkgar::extract(&public_path, &pkgar_path, dest).unwrap();
let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename);
pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap();
} else {
println!("Installing package tar.gz from local repo: {}", packagename);
let path = format!("{}/{}/repo/{}/{}.tar.gz",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
Package::from_path(&path).unwrap().install(dest).unwrap();
}
}
} else {
for (packagename, _package) in &config.packages {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
}
}
}
pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()> {
//let mut context = liner::Context::new();
macro_rules! prompt {
($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt {
Err(io::Error::new(
io::ErrorKind::Other,
"prompt not currently supported"
))
// match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) {
// Ok(res) => if res.is_empty() {
// Ok($def)
// } else {
// Ok(res)
// },
// Err(err) => Err(err)
// }
} else {
Ok($dst.unwrap_or($def))
})
}
let output_dir = output_dir.as_ref();
let output_dir = output_dir.to_owned();
install_packages(&config, output_dir.to_str().unwrap(), cookbook);
for file in config.files {
file.create(&output_dir)?;
}
let mut passwd = String::new();
let mut shadow = String::new();
let mut next_uid = 1000;
for (username, user) in config.users {
// plaintext
let password = if let Some(password) = user.password {
password
} else if config.general.prompt {
prompt_password(
&format!("{}: enter password: ", username),
&format!("{}: confirm password: ", username))?
} else {
String::new()
};
let uid = user.uid.unwrap_or(next_uid);
if uid >= next_uid {
next_uid = uid + 1;
}
let gid = user.gid.unwrap_or(uid);
let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?;
let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?;
let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?;
println!("Adding user {}:", username);
println!("\tPassword: {}", password);
println!("\tUID: {}", uid);
println!("\tGID: {}", gid);
println!("\tName: {}", name);
println!("\tHome: {}", home);
println!("\tShell: {}", shell);
FileConfig {
path: home.clone(),
data: String::new(),
symlink: false,
directory: true,
mode: Some(0o0700),
uid: Some(uid),
gid: Some(gid),
recursive_chown: true,
}.create(&output_dir)?;
let password = hash_password(&password)?;
passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell));
shadow.push_str(&format!("{};{}\n", username, password));
}
if !passwd.is_empty() {
FileConfig {
path: "/etc/passwd".to_string(),
data: passwd,
symlink: false,
directory: false,
// Take defaults
mode: None,
uid: None,
gid: None,
recursive_chown: false,
}.create(&output_dir)?;
}
if !shadow.is_empty() {
FileConfig {
path: "/etc/shadow".to_string(),
data: shadow,
symlink: false,
directory: false,
mode: Some(0o0600),
uid: Some(0),
gid: Some(0),
recursive_chown: false,
}.create(&output_dir)?;
}
Ok(())
}
pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
D: Disk + Send + 'static,
F: FnOnce(&Path) -> Result<T>
{
let mount_path = if cfg!(target_os = "redox") {
"file/redox_installer"
} else {
"/tmp/redox_installer"
};
if cfg!(not(target_os = "redox")) {
if ! Path::new(mount_path).exists() {
fs::create_dir(mount_path)?;
}
}
let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?;
let fs = FileSystem::create(
disk,
password_opt,
ctime.as_secs(),
ctime.subsec_nanos()
).map_err(syscall_error)?;
let (tx, rx) = channel();
let join_handle = thread::spawn(move || {
let res = redoxfs::mount(
fs,
mount_path,
|real_path| {
tx.send(Ok(real_path.to_owned())).unwrap();
}
);
match res {
Ok(()) => (),
Err(err) => {
tx.send(Err(err)).unwrap();
},
};
});
let res = match rx.recv() {
Ok(ok) => match ok {
Ok(real_path) => callback(&real_path),
Err(err) => return Err(err.into()),
},
Err(_) => return Err(io::Error::new(
io::ErrorKind::NotConnected,
"redoxfs thread did not send a result"
).into()),
};
unmount_path(mount_path)?;
join_handle.join().unwrap();
res
}
pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> {
//TODO: make it safe to run this concurrently
let bootloader_dir = "/tmp/redox_installer_bootloader";
if Path::new(bootloader_dir).exists() {
fs::remove_dir_all(&bootloader_dir)?;
}
fs::create_dir(bootloader_dir)?;
let mut bootloader_config = Config::default();
bootloader_config.general = config.general.clone();
bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default());
install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref());
let boot_dir = Path::new(bootloader_dir).join("boot");
let bios_path = boot_dir.join(if live {
"bootloader-live.bios"
} else {
"bootloader.bios"
});
let efi_path = boot_dir.join(if live {
"bootloader-live.efi"
} else {
"bootloader.efi"
});
Ok((
if bios_path.exists() {
fs::read(bios_path)?
} else {
Vec::new()
},
if efi_path.exists() {
fs::read(efi_path)?
} else {
Vec::new()
},
))
}
//TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions
pub fn with_whole_disk<P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
P: AsRef<Path>,
F: FnOnce(&Path) -> Result<T>
{
let target = get_target();
let bootloader_efi_name = match target.as_str() {
"aarch64-unknown-redox" => "BOOTAA64.EFI",
"i686-unknown-redox" => "BOOTIA32.EFI",
"x86_64-unknown-redox" => "BOOTX64.EFI",
_ => {
return Err(format_err!("target '{}' not supported", target));
}
};
// Open disk and read metadata
eprintln!("Opening disk {}", disk_path.as_ref().display());
let mut disk_file = DiskWrapper::open(disk_path.as_ref())?;
let disk_size = disk_file.size();
let block_size = disk_file.block_size() as u64;
let gpt_block_size = match block_size {
512 => gpt::disk::LogicalBlockSize::Lb512,
_ => {
// TODO: support (and test) other block sizes
return Err(format_err!("block size {} not supported", block_size));
}
};
// Calculate partition offsets
let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors
let mibi = 1024 * 1024;
// First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables
let bios_start = gpt_reserved / block_size;
let bios_end = (mibi / block_size) - 1;
// Second megabyte of the disk is reserved for EFI partition
let efi_start = bios_end + 1;
let efi_end = efi_start + (mibi / block_size) - 1;
// The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk
let redoxfs_start = efi_end + 1;
let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1;
// Format and install BIOS partition
{
// Write BIOS bootloader to disk
eprintln!("Write bootloader with size {:#x}", bootloader_bios.len());
disk_file.seek(SeekFrom::Start(0))?;
disk_file.write_all(&bootloader_bios)?;
// Replace MBR tables with protective MBR
let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1;
eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks);
gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32)
.update_conservative(&mut disk_file)?;
// Open disk, mark it as not initialized
let mut gpt_disk = gpt::GptConfig::new()
.initialized(false)
.writable(true)
.logical_block_size(gpt_block_size)
.create_from_device(Box::new(&mut disk_file), None)?;
// Add BIOS boot partition
let mut partitions = BTreeMap::new();
let mut partition_id = 1;
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::BIOS,
part_guid: uuid::Uuid::new_v4(),
first_lba: bios_start,
last_lba: bios_end,
flags: 0, // TODO
name: "BIOS".to_string(),
});
partition_id += 1;
// Add EFI boot partition
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::EFI,
part_guid: uuid::Uuid::new_v4(),
first_lba: efi_start,
last_lba: efi_end,
flags: 0, // TODO
name: "EFI".to_string(),
});
partition_id += 1;
// Add RedoxFS partition
partitions.insert(partition_id, gpt::partition::Partition {
//TODO: Use REDOX_REDOXFS type (needs GPT crate changes)
part_type_guid: gpt::partition_types::LINUX_FS,
part_guid: uuid::Uuid::new_v4(),
first_lba: redoxfs_start,
last_lba: redoxfs_end,
flags: 0,
name: "REDOX".to_string(),
});
eprintln!("Writing GPT tables: {:#?}", partitions);
// Initialize GPT table
gpt_disk.update_partitions(partitions)?;
// Write partition layout, returning disk file
gpt_disk.write()?;
}
// Format and install EFI partition
{
let disk_efi_start = efi_start * block_size;
let disk_efi_end = (efi_end + 1) * block_size;
let mut disk_efi = fscommon::StreamSlice::new(
&mut disk_file,
disk_efi_start,
disk_efi_end,
)?;
eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start);
fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?;
eprintln!("Opening EFI partition");
let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?;
eprintln!("Creating EFI directory");
let root_dir = fs.root_dir();
root_dir.create_dir("EFI")?;
eprintln!("Creating EFI/BOOT directory");
let efi_dir = root_dir.open_dir("EFI")?;
efi_dir.create_dir("BOOT")?;
eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len());
let boot_dir = efi_dir.open_dir("BOOT")?;
let mut file = boot_dir.create_file(bootloader_efi_name)?;
file.truncate()?;
file.write_all(&bootloader_efi)?;
}
// Format and install RedoxFS partition
eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size);
let disk_redoxfs = DiskIo(fscommon::StreamSlice::new(
disk_file,
redoxfs_start * block_size,
(redoxfs_end + 1) * block_size
)?); | )
}
pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool)
-> Result<()> where
P: AsRef<Path>,
S: AsRef<str>,
{
println!("Install {:#?} to {}", config, output.as_ref().display());
if output.as_ref().is_dir() {
install_dir(config, output, cookbook)
} else {
let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?;
with_whole_disk(output, &bootloader_bios, &bootloader_efi, None,
move |mount_path| {
install_dir(config, mount_path, cookbook)
}
)
}
} | with_redoxfs(
disk_redoxfs,
password_opt,
callback | random_line_split |
lib.rs | #[macro_use]
extern crate serde_derive;
extern crate argon2;
extern crate libc;
extern crate liner;
#[macro_use]
extern crate failure;
extern crate pkgutils;
extern crate rand;
extern crate redoxfs;
extern crate syscall;
extern crate termion;
mod config;
mod disk_wrapper;
pub use config::Config;
pub use config::file::FileConfig;
pub use config::package::PackageConfig;
use disk_wrapper::DiskWrapper;
use failure::{Error, err_msg};
use rand::{RngCore, rngs::OsRng};
use redoxfs::{unmount_path, Disk, DiskIo, FileSystem};
use termion::input::TermRead;
use pkgutils::{Repo, Package};
use std::{
collections::BTreeMap,
env,
fs,
io::{self, Seek, SeekFrom, Write},
path::Path,
sync::mpsc::channel,
time::{SystemTime, UNIX_EPOCH},
thread,
};
pub(crate) type Result<T> = std::result::Result<T, Error>;
const REMOTE: &'static str = "https://static.redox-os.org/pkg";
fn get_target() -> String {
env::var("TARGET").unwrap_or(
option_env!("TARGET").map_or(
"x86_64-unknown-redox".to_string(),
|x| x.to_string()
)
)
}
/// Converts a password to a serialized argon2rs hash, understandable
/// by redox_users. If the password is blank, the hash is blank.
fn hash_password(password: &str) -> Result<String> {
if password != "" {
let salt = format!("{:X}", OsRng.next_u64());
let config = argon2::Config::default();
let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?;
Ok(hash)
} else {
Ok("".to_string())
}
}
fn syscall_error(err: syscall::Error) -> io::Error {
io::Error::from_raw_os_error(err.errno)
}
/// Returns a password collected from the user (plaintext)
fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let stdout = io::stdout();
let mut stdout = stdout.lock();
print!("{}", prompt);
let password = stdin.read_passwd(&mut stdout)?;
print!("\n{}", confirm_prompt);
let confirm_password = stdin.read_passwd(&mut stdout)?;
// Note: Actually comparing two Option<String> values
if confirm_password == password {
Ok(password.unwrap_or("".to_string()))
} else {
Err(err_msg("passwords do not match"))
}
}
//TODO: error handling
fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) {
let target = &get_target();
let mut repo = Repo::new(target);
repo.add_remote(REMOTE);
if let Some(cookbook) = cookbook {
let dest_pkg = format!("{}/pkg", dest);
if ! Path::new(&dest_pkg).exists() {
fs::create_dir(&dest_pkg).unwrap();
}
for (packagename, package) in &config.packages {
let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
let from_remote = match (config.general.cooking, package) {
(Some(true), PackageConfig::Empty) => true,
(Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true,
_ => false
};
if from_remote {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
} else if Path::new(&pkgar_path).exists() {
println!("Installing package from local repo: {}", packagename);
let public_path = format!("{}/{}/build/id_ed25519.pub.toml",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref());
pkgar::extract(&public_path, &pkgar_path, dest).unwrap();
let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename);
pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap();
} else {
println!("Installing package tar.gz from local repo: {}", packagename);
let path = format!("{}/{}/repo/{}/{}.tar.gz",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
Package::from_path(&path).unwrap().install(dest).unwrap();
}
}
} else {
for (packagename, _package) in &config.packages {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
}
}
}
pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()> |
pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
D: Disk + Send + 'static,
F: FnOnce(&Path) -> Result<T>
{
let mount_path = if cfg!(target_os = "redox") {
"file/redox_installer"
} else {
"/tmp/redox_installer"
};
if cfg!(not(target_os = "redox")) {
if ! Path::new(mount_path).exists() {
fs::create_dir(mount_path)?;
}
}
let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?;
let fs = FileSystem::create(
disk,
password_opt,
ctime.as_secs(),
ctime.subsec_nanos()
).map_err(syscall_error)?;
let (tx, rx) = channel();
let join_handle = thread::spawn(move || {
let res = redoxfs::mount(
fs,
mount_path,
|real_path| {
tx.send(Ok(real_path.to_owned())).unwrap();
}
);
match res {
Ok(()) => (),
Err(err) => {
tx.send(Err(err)).unwrap();
},
};
});
let res = match rx.recv() {
Ok(ok) => match ok {
Ok(real_path) => callback(&real_path),
Err(err) => return Err(err.into()),
},
Err(_) => return Err(io::Error::new(
io::ErrorKind::NotConnected,
"redoxfs thread did not send a result"
).into()),
};
unmount_path(mount_path)?;
join_handle.join().unwrap();
res
}
pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> {
//TODO: make it safe to run this concurrently
let bootloader_dir = "/tmp/redox_installer_bootloader";
if Path::new(bootloader_dir).exists() {
fs::remove_dir_all(&bootloader_dir)?;
}
fs::create_dir(bootloader_dir)?;
let mut bootloader_config = Config::default();
bootloader_config.general = config.general.clone();
bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default());
install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref());
let boot_dir = Path::new(bootloader_dir).join("boot");
let bios_path = boot_dir.join(if live {
"bootloader-live.bios"
} else {
"bootloader.bios"
});
let efi_path = boot_dir.join(if live {
"bootloader-live.efi"
} else {
"bootloader.efi"
});
Ok((
if bios_path.exists() {
fs::read(bios_path)?
} else {
Vec::new()
},
if efi_path.exists() {
fs::read(efi_path)?
} else {
Vec::new()
},
))
}
//TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions
pub fn with_whole_disk<P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
P: AsRef<Path>,
F: FnOnce(&Path) -> Result<T>
{
let target = get_target();
let bootloader_efi_name = match target.as_str() {
"aarch64-unknown-redox" => "BOOTAA64.EFI",
"i686-unknown-redox" => "BOOTIA32.EFI",
"x86_64-unknown-redox" => "BOOTX64.EFI",
_ => {
return Err(format_err!("target '{}' not supported", target));
}
};
// Open disk and read metadata
eprintln!("Opening disk {}", disk_path.as_ref().display());
let mut disk_file = DiskWrapper::open(disk_path.as_ref())?;
let disk_size = disk_file.size();
let block_size = disk_file.block_size() as u64;
let gpt_block_size = match block_size {
512 => gpt::disk::LogicalBlockSize::Lb512,
_ => {
// TODO: support (and test) other block sizes
return Err(format_err!("block size {} not supported", block_size));
}
};
// Calculate partition offsets
let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors
let mibi = 1024 * 1024;
// First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables
let bios_start = gpt_reserved / block_size;
let bios_end = (mibi / block_size) - 1;
// Second megabyte of the disk is reserved for EFI partition
let efi_start = bios_end + 1;
let efi_end = efi_start + (mibi / block_size) - 1;
// The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk
let redoxfs_start = efi_end + 1;
let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1;
// Format and install BIOS partition
{
// Write BIOS bootloader to disk
eprintln!("Write bootloader with size {:#x}", bootloader_bios.len());
disk_file.seek(SeekFrom::Start(0))?;
disk_file.write_all(&bootloader_bios)?;
// Replace MBR tables with protective MBR
let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1;
eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks);
gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32)
.update_conservative(&mut disk_file)?;
// Open disk, mark it as not initialized
let mut gpt_disk = gpt::GptConfig::new()
.initialized(false)
.writable(true)
.logical_block_size(gpt_block_size)
.create_from_device(Box::new(&mut disk_file), None)?;
// Add BIOS boot partition
let mut partitions = BTreeMap::new();
let mut partition_id = 1;
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::BIOS,
part_guid: uuid::Uuid::new_v4(),
first_lba: bios_start,
last_lba: bios_end,
flags: 0, // TODO
name: "BIOS".to_string(),
});
partition_id += 1;
// Add EFI boot partition
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::EFI,
part_guid: uuid::Uuid::new_v4(),
first_lba: efi_start,
last_lba: efi_end,
flags: 0, // TODO
name: "EFI".to_string(),
});
partition_id += 1;
// Add RedoxFS partition
partitions.insert(partition_id, gpt::partition::Partition {
//TODO: Use REDOX_REDOXFS type (needs GPT crate changes)
part_type_guid: gpt::partition_types::LINUX_FS,
part_guid: uuid::Uuid::new_v4(),
first_lba: redoxfs_start,
last_lba: redoxfs_end,
flags: 0,
name: "REDOX".to_string(),
});
eprintln!("Writing GPT tables: {:#?}", partitions);
// Initialize GPT table
gpt_disk.update_partitions(partitions)?;
// Write partition layout, returning disk file
gpt_disk.write()?;
}
// Format and install EFI partition
{
let disk_efi_start = efi_start * block_size;
let disk_efi_end = (efi_end + 1) * block_size;
let mut disk_efi = fscommon::StreamSlice::new(
&mut disk_file,
disk_efi_start,
disk_efi_end,
)?;
eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start);
fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?;
eprintln!("Opening EFI partition");
let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?;
eprintln!("Creating EFI directory");
let root_dir = fs.root_dir();
root_dir.create_dir("EFI")?;
eprintln!("Creating EFI/BOOT directory");
let efi_dir = root_dir.open_dir("EFI")?;
efi_dir.create_dir("BOOT")?;
eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len());
let boot_dir = efi_dir.open_dir("BOOT")?;
let mut file = boot_dir.create_file(bootloader_efi_name)?;
file.truncate()?;
file.write_all(&bootloader_efi)?;
}
// Format and install RedoxFS partition
eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size);
let disk_redoxfs = DiskIo(fscommon::StreamSlice::new(
disk_file,
redoxfs_start * block_size,
(redoxfs_end + 1) * block_size
)?);
with_redoxfs(
disk_redoxfs,
password_opt,
callback
)
}
pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool)
-> Result<()> where
P: AsRef<Path>,
S: AsRef<str>,
{
println!("Install {:#?} to {}", config, output.as_ref().display());
if output.as_ref().is_dir() {
install_dir(config, output, cookbook)
} else {
let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?;
with_whole_disk(output, &bootloader_bios, &bootloader_efi, None,
move |mount_path| {
install_dir(config, mount_path, cookbook)
}
)
}
}
| {
//let mut context = liner::Context::new();
macro_rules! prompt {
($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt {
Err(io::Error::new(
io::ErrorKind::Other,
"prompt not currently supported"
))
// match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) {
// Ok(res) => if res.is_empty() {
// Ok($def)
// } else {
// Ok(res)
// },
// Err(err) => Err(err)
// }
} else {
Ok($dst.unwrap_or($def))
})
}
let output_dir = output_dir.as_ref();
let output_dir = output_dir.to_owned();
install_packages(&config, output_dir.to_str().unwrap(), cookbook);
for file in config.files {
file.create(&output_dir)?;
}
let mut passwd = String::new();
let mut shadow = String::new();
let mut next_uid = 1000;
for (username, user) in config.users {
// plaintext
let password = if let Some(password) = user.password {
password
} else if config.general.prompt {
prompt_password(
&format!("{}: enter password: ", username),
&format!("{}: confirm password: ", username))?
} else {
String::new()
};
let uid = user.uid.unwrap_or(next_uid);
if uid >= next_uid {
next_uid = uid + 1;
}
let gid = user.gid.unwrap_or(uid);
let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?;
let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?;
let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?;
println!("Adding user {}:", username);
println!("\tPassword: {}", password);
println!("\tUID: {}", uid);
println!("\tGID: {}", gid);
println!("\tName: {}", name);
println!("\tHome: {}", home);
println!("\tShell: {}", shell);
FileConfig {
path: home.clone(),
data: String::new(),
symlink: false,
directory: true,
mode: Some(0o0700),
uid: Some(uid),
gid: Some(gid),
recursive_chown: true,
}.create(&output_dir)?;
let password = hash_password(&password)?;
passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell));
shadow.push_str(&format!("{};{}\n", username, password));
}
if !passwd.is_empty() {
FileConfig {
path: "/etc/passwd".to_string(),
data: passwd,
symlink: false,
directory: false,
// Take defaults
mode: None,
uid: None,
gid: None,
recursive_chown: false,
}.create(&output_dir)?;
}
if !shadow.is_empty() {
FileConfig {
path: "/etc/shadow".to_string(),
data: shadow,
symlink: false,
directory: false,
mode: Some(0o0600),
uid: Some(0),
gid: Some(0),
recursive_chown: false,
}.create(&output_dir)?;
}
Ok(())
} | identifier_body |
lib.rs | #[macro_use]
extern crate serde_derive;
extern crate argon2;
extern crate libc;
extern crate liner;
#[macro_use]
extern crate failure;
extern crate pkgutils;
extern crate rand;
extern crate redoxfs;
extern crate syscall;
extern crate termion;
mod config;
mod disk_wrapper;
pub use config::Config;
pub use config::file::FileConfig;
pub use config::package::PackageConfig;
use disk_wrapper::DiskWrapper;
use failure::{Error, err_msg};
use rand::{RngCore, rngs::OsRng};
use redoxfs::{unmount_path, Disk, DiskIo, FileSystem};
use termion::input::TermRead;
use pkgutils::{Repo, Package};
use std::{
collections::BTreeMap,
env,
fs,
io::{self, Seek, SeekFrom, Write},
path::Path,
sync::mpsc::channel,
time::{SystemTime, UNIX_EPOCH},
thread,
};
pub(crate) type Result<T> = std::result::Result<T, Error>;
const REMOTE: &'static str = "https://static.redox-os.org/pkg";
fn get_target() -> String {
env::var("TARGET").unwrap_or(
option_env!("TARGET").map_or(
"x86_64-unknown-redox".to_string(),
|x| x.to_string()
)
)
}
/// Converts a password to a serialized argon2rs hash, understandable
/// by redox_users. If the password is blank, the hash is blank.
fn hash_password(password: &str) -> Result<String> {
if password != "" {
let salt = format!("{:X}", OsRng.next_u64());
let config = argon2::Config::default();
let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?;
Ok(hash)
} else {
Ok("".to_string())
}
}
fn syscall_error(err: syscall::Error) -> io::Error {
io::Error::from_raw_os_error(err.errno)
}
/// Returns a password collected from the user (plaintext)
fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let stdout = io::stdout();
let mut stdout = stdout.lock();
print!("{}", prompt);
let password = stdin.read_passwd(&mut stdout)?;
print!("\n{}", confirm_prompt);
let confirm_password = stdin.read_passwd(&mut stdout)?;
// Note: Actually comparing two Option<String> values
if confirm_password == password {
Ok(password.unwrap_or("".to_string()))
} else {
Err(err_msg("passwords do not match"))
}
}
//TODO: error handling
fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) {
let target = &get_target();
let mut repo = Repo::new(target);
repo.add_remote(REMOTE);
if let Some(cookbook) = cookbook {
let dest_pkg = format!("{}/pkg", dest);
if ! Path::new(&dest_pkg).exists() {
fs::create_dir(&dest_pkg).unwrap();
}
for (packagename, package) in &config.packages {
let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
let from_remote = match (config.general.cooking, package) {
(Some(true), PackageConfig::Empty) => true,
(Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true,
_ => false
};
if from_remote {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
} else if Path::new(&pkgar_path).exists() {
println!("Installing package from local repo: {}", packagename);
let public_path = format!("{}/{}/build/id_ed25519.pub.toml",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref());
pkgar::extract(&public_path, &pkgar_path, dest).unwrap();
let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename);
pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap();
} else {
println!("Installing package tar.gz from local repo: {}", packagename);
let path = format!("{}/{}/repo/{}/{}.tar.gz",
env::current_dir().unwrap().to_string_lossy(),
cookbook.as_ref(), target, packagename);
Package::from_path(&path).unwrap().install(dest).unwrap();
}
}
} else {
for (packagename, _package) in &config.packages {
println!("Installing package from remote: {}", packagename);
repo.fetch(&packagename).unwrap().install(dest).unwrap();
}
}
}
pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()> {
//let mut context = liner::Context::new();
macro_rules! prompt {
($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt {
Err(io::Error::new(
io::ErrorKind::Other,
"prompt not currently supported"
))
// match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) {
// Ok(res) => if res.is_empty() {
// Ok($def)
// } else {
// Ok(res)
// },
// Err(err) => Err(err)
// }
} else {
Ok($dst.unwrap_or($def))
})
}
let output_dir = output_dir.as_ref();
let output_dir = output_dir.to_owned();
install_packages(&config, output_dir.to_str().unwrap(), cookbook);
for file in config.files {
file.create(&output_dir)?;
}
let mut passwd = String::new();
let mut shadow = String::new();
let mut next_uid = 1000;
for (username, user) in config.users {
// plaintext
let password = if let Some(password) = user.password {
password
} else if config.general.prompt {
prompt_password(
&format!("{}: enter password: ", username),
&format!("{}: confirm password: ", username))?
} else {
String::new()
};
let uid = user.uid.unwrap_or(next_uid);
if uid >= next_uid {
next_uid = uid + 1;
}
let gid = user.gid.unwrap_or(uid);
let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?;
let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?;
let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?;
println!("Adding user {}:", username);
println!("\tPassword: {}", password);
println!("\tUID: {}", uid);
println!("\tGID: {}", gid);
println!("\tName: {}", name);
println!("\tHome: {}", home);
println!("\tShell: {}", shell);
FileConfig {
path: home.clone(),
data: String::new(),
symlink: false,
directory: true,
mode: Some(0o0700),
uid: Some(uid),
gid: Some(gid),
recursive_chown: true,
}.create(&output_dir)?;
let password = hash_password(&password)?;
passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell));
shadow.push_str(&format!("{};{}\n", username, password));
}
if !passwd.is_empty() {
FileConfig {
path: "/etc/passwd".to_string(),
data: passwd,
symlink: false,
directory: false,
// Take defaults
mode: None,
uid: None,
gid: None,
recursive_chown: false,
}.create(&output_dir)?;
}
if !shadow.is_empty() {
FileConfig {
path: "/etc/shadow".to_string(),
data: shadow,
symlink: false,
directory: false,
mode: Some(0o0600),
uid: Some(0),
gid: Some(0),
recursive_chown: false,
}.create(&output_dir)?;
}
Ok(())
}
pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
D: Disk + Send + 'static,
F: FnOnce(&Path) -> Result<T>
{
let mount_path = if cfg!(target_os = "redox") {
"file/redox_installer"
} else {
"/tmp/redox_installer"
};
if cfg!(not(target_os = "redox")) {
if ! Path::new(mount_path).exists() {
fs::create_dir(mount_path)?;
}
}
let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?;
let fs = FileSystem::create(
disk,
password_opt,
ctime.as_secs(),
ctime.subsec_nanos()
).map_err(syscall_error)?;
let (tx, rx) = channel();
let join_handle = thread::spawn(move || {
let res = redoxfs::mount(
fs,
mount_path,
|real_path| {
tx.send(Ok(real_path.to_owned())).unwrap();
}
);
match res {
Ok(()) => (),
Err(err) => {
tx.send(Err(err)).unwrap();
},
};
});
let res = match rx.recv() {
Ok(ok) => match ok {
Ok(real_path) => callback(&real_path),
Err(err) => return Err(err.into()),
},
Err(_) => return Err(io::Error::new(
io::ErrorKind::NotConnected,
"redoxfs thread did not send a result"
).into()),
};
unmount_path(mount_path)?;
join_handle.join().unwrap();
res
}
pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> {
//TODO: make it safe to run this concurrently
let bootloader_dir = "/tmp/redox_installer_bootloader";
if Path::new(bootloader_dir).exists() {
fs::remove_dir_all(&bootloader_dir)?;
}
fs::create_dir(bootloader_dir)?;
let mut bootloader_config = Config::default();
bootloader_config.general = config.general.clone();
bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default());
install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref());
let boot_dir = Path::new(bootloader_dir).join("boot");
let bios_path = boot_dir.join(if live {
"bootloader-live.bios"
} else {
"bootloader.bios"
});
let efi_path = boot_dir.join(if live {
"bootloader-live.efi"
} else {
"bootloader.efi"
});
Ok((
if bios_path.exists() {
fs::read(bios_path)?
} else {
Vec::new()
},
if efi_path.exists() {
fs::read(efi_path)?
} else {
Vec::new()
},
))
}
//TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions
pub fn | <P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F)
-> Result<T> where
P: AsRef<Path>,
F: FnOnce(&Path) -> Result<T>
{
let target = get_target();
let bootloader_efi_name = match target.as_str() {
"aarch64-unknown-redox" => "BOOTAA64.EFI",
"i686-unknown-redox" => "BOOTIA32.EFI",
"x86_64-unknown-redox" => "BOOTX64.EFI",
_ => {
return Err(format_err!("target '{}' not supported", target));
}
};
// Open disk and read metadata
eprintln!("Opening disk {}", disk_path.as_ref().display());
let mut disk_file = DiskWrapper::open(disk_path.as_ref())?;
let disk_size = disk_file.size();
let block_size = disk_file.block_size() as u64;
let gpt_block_size = match block_size {
512 => gpt::disk::LogicalBlockSize::Lb512,
_ => {
// TODO: support (and test) other block sizes
return Err(format_err!("block size {} not supported", block_size));
}
};
// Calculate partition offsets
let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors
let mibi = 1024 * 1024;
// First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables
let bios_start = gpt_reserved / block_size;
let bios_end = (mibi / block_size) - 1;
// Second megabyte of the disk is reserved for EFI partition
let efi_start = bios_end + 1;
let efi_end = efi_start + (mibi / block_size) - 1;
// The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk
let redoxfs_start = efi_end + 1;
let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1;
// Format and install BIOS partition
{
// Write BIOS bootloader to disk
eprintln!("Write bootloader with size {:#x}", bootloader_bios.len());
disk_file.seek(SeekFrom::Start(0))?;
disk_file.write_all(&bootloader_bios)?;
// Replace MBR tables with protective MBR
let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1;
eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks);
gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32)
.update_conservative(&mut disk_file)?;
// Open disk, mark it as not initialized
let mut gpt_disk = gpt::GptConfig::new()
.initialized(false)
.writable(true)
.logical_block_size(gpt_block_size)
.create_from_device(Box::new(&mut disk_file), None)?;
// Add BIOS boot partition
let mut partitions = BTreeMap::new();
let mut partition_id = 1;
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::BIOS,
part_guid: uuid::Uuid::new_v4(),
first_lba: bios_start,
last_lba: bios_end,
flags: 0, // TODO
name: "BIOS".to_string(),
});
partition_id += 1;
// Add EFI boot partition
partitions.insert(partition_id, gpt::partition::Partition {
part_type_guid: gpt::partition_types::EFI,
part_guid: uuid::Uuid::new_v4(),
first_lba: efi_start,
last_lba: efi_end,
flags: 0, // TODO
name: "EFI".to_string(),
});
partition_id += 1;
// Add RedoxFS partition
partitions.insert(partition_id, gpt::partition::Partition {
//TODO: Use REDOX_REDOXFS type (needs GPT crate changes)
part_type_guid: gpt::partition_types::LINUX_FS,
part_guid: uuid::Uuid::new_v4(),
first_lba: redoxfs_start,
last_lba: redoxfs_end,
flags: 0,
name: "REDOX".to_string(),
});
eprintln!("Writing GPT tables: {:#?}", partitions);
// Initialize GPT table
gpt_disk.update_partitions(partitions)?;
// Write partition layout, returning disk file
gpt_disk.write()?;
}
// Format and install EFI partition
{
let disk_efi_start = efi_start * block_size;
let disk_efi_end = (efi_end + 1) * block_size;
let mut disk_efi = fscommon::StreamSlice::new(
&mut disk_file,
disk_efi_start,
disk_efi_end,
)?;
eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start);
fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?;
eprintln!("Opening EFI partition");
let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?;
eprintln!("Creating EFI directory");
let root_dir = fs.root_dir();
root_dir.create_dir("EFI")?;
eprintln!("Creating EFI/BOOT directory");
let efi_dir = root_dir.open_dir("EFI")?;
efi_dir.create_dir("BOOT")?;
eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len());
let boot_dir = efi_dir.open_dir("BOOT")?;
let mut file = boot_dir.create_file(bootloader_efi_name)?;
file.truncate()?;
file.write_all(&bootloader_efi)?;
}
// Format and install RedoxFS partition
eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size);
let disk_redoxfs = DiskIo(fscommon::StreamSlice::new(
disk_file,
redoxfs_start * block_size,
(redoxfs_end + 1) * block_size
)?);
with_redoxfs(
disk_redoxfs,
password_opt,
callback
)
}
pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool)
-> Result<()> where
P: AsRef<Path>,
S: AsRef<str>,
{
println!("Install {:#?} to {}", config, output.as_ref().display());
if output.as_ref().is_dir() {
install_dir(config, output, cookbook)
} else {
let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?;
with_whole_disk(output, &bootloader_bios, &bootloader_efi, None,
move |mount_path| {
install_dir(config, mount_path, cookbook)
}
)
}
}
| with_whole_disk | identifier_name |
verify.rs | //! The proof verifier itself.
//!
//! This is structured as an analysis pass, however it does not have any outputs
//! beyond the error indications. In particular, it does not generate parsed
//! proofs as a side effect; the proof parser will need to be a separate module.
//!
//! The majority of time spent verifying proofs is spent checking steps, which
//! can be regarded as a kind of interpreter. While checking each step, there
//! is a stack of known results; each step is an operation which pops zero or
//! more results off the stack, does local checks, and pushes a new result.
//! This module has been written such that it does not allocate memory during
//! nominal operation. Memory is reused from one proof to the next, and
//! intermediate results are handled as slices in a long-lived buffer.
//!
//! Results are densely represented as byte strings, using the high bit to mark
//! the end of each token. Since most math tokens are shorter than 4 bytes,
//! this saves memory operations over an atom-based approach; but measurements
//! of the actual speed of the atom approach would not be unwelcome.
//!
//! More speculatively, strings could be represented as their universal hash
//! values, using a concatenable universal hash such as polynomial evaluation
//! mod 2^61-1 (a very convenient Mersenne prime). This would eliminate all
//! branches, and all branch mispredicts, in the memcpy and memcmp parts of this
//! code, at the expense of making scopeck even more useless to other consumers
//! than it is now.
use bit_set::Bitset;
use diag::Diagnostic;
use nameck::Atom;
use nameck::Nameset;
use parser;
use parser::Comparer;
use parser::copy_token;
use parser::NO_STATEMENT;
use parser::Segment;
use parser::SegmentId;
use parser::SegmentOrder;
use parser::SegmentRef;
use parser::StatementAddress;
use parser::StatementRef;
use parser::StatementType;
use parser::TokenPtr;
use scopeck;
use scopeck::ExprFragment;
use scopeck::Frame;
use scopeck::Hyp::*;
use scopeck::ScopeReader;
use scopeck::ScopeResult;
use scopeck::ScopeUsage;
use scopeck::VerifyExpr;
use segment_set::SegmentSet;
use std::cmp::Ordering;
use std::mem;
use std::ops::Range;
use std::result;
use std::sync::Arc;
use std::u32;
use std::usize;
use util::copy_portion;
use util::fast_clear;
use util::fast_extend;
use util::HashMap;
use util::new_map;
use util::ptr_eq;
// Proofs are very fragile and there are very few situations where errors are
// recoverable, so we bail out using Result on any error.
macro_rules! try_assert {
( $cond:expr , $($arg:tt)+ ) => {
if !$cond {
try!(Err($($arg)+))
}
}
}
/// Preparing a step means that it can be referenced using a varint in a
/// compressed proof. Compressed steps are either saved prior
/// results/hypotheses, which are copied directly onto the stack, or previously
/// proved assertions which require substitution before use.
enum PreparedStep<'a, D> {
Hyp(Bitset, Atom, Range<usize>, D),
Assert(&'a Frame),
}
use self::PreparedStep::*;
/// An entry on the stack is notionally just a string of math symbols, but DV
/// checking is faster if we track disjoint variables as a bit vector, and the
/// typecode is not realignable so it can be profitably separated.
///
/// This type would be Copy except for the fact that the bitset can require
/// overflow storage :(.
#[derive(Clone)]
pub struct StackSlot {
vars: Bitset,
code: Atom,
expr: Range<usize>,
}
/// A constructor trait for plugging in to the verifier, to collect extra data during the
/// verification pass
pub trait ProofBuilder {
/// The data type being generated
type Item: Clone;
/// The hyp gathering type
type Accum: Default;
/// Add a new hyp to the accumulation type
fn push(&mut self, hyps: &mut Self::Accum, hyp: Self::Item);
/// Create a proof data node from a statement, the data for the hypotheses,
/// and the compressed constant string
fn build(&mut self,
addr: StatementAddress,
hyps: Self::Accum,
pool: &[u8],
expr: Range<usize>)
-> Self::Item;
}
/// The "null" proof builder, which creates no extra data. This
/// is used for one-shot verification, where no extra data beyond the stack
/// information is needed.
impl ProofBuilder for () {
type Item = ();
type Accum = ();
fn push(&mut self, _: &mut (), _: ()) {}
fn build(&mut self, _: StatementAddress, _: (), _: &[u8], _: Range<usize>) -> () {}
}
/// Working memory used by the verifier on a segment. This expands for the
/// first few proofs and the rest can be handled without allocation.
struct VerifyState<'a, P: 'a + ProofBuilder> {
/// Segment we are working on
this_seg: SegmentRef<'a>,
/// Segment order oracle
order: &'a SegmentOrder,
/// Atom name oracle, used for hypotheses
nameset: &'a Nameset,
/// Used to access previously proved assertions
scoper: ScopeReader<'a>,
/// Used to produce proof trees as a side effect of verification
builder: &'a mut P,
/// The extended frame we are working on
cur_frame: &'a Frame,
/// Steps which can be invoked in the current proof, grows on every Z
prepared: Vec<PreparedStep<'a, P::Item>>,
/// Stack of active subtrees
stack: Vec<(P::Item, StackSlot)>,
/// Buffer for math strings of subtrees and hypotheses; shared to reduce
/// actual copying when a hypothesis or saved step is recalled
stack_buffer: Vec<u8>,
/// Scratch space used only when checking the final step
temp_buffer: Vec<u8>,
/// Scratch space used for a substitution mapping while invoking a prior
/// assertion
subst_info: Vec<(Range<usize>, Bitset)>,
/// Tracks mandatory and optional variables in use in the current proof
var2bit: HashMap<Atom, usize>,
/// Disjoint variable conditions in the current extended frame
dv_map: &'a [Bitset],
}
type Result<T> = result::Result<T, Diagnostic>;
/// Variables are added lazily to the extended frame. All variables which are
/// associated with hypotheses or $d constraints are numbered by scopeck, but if
/// a dummy variable is used in a proof without a $d constraint it won't be
/// discovered until we get here, and a number needs to be assigned to it.
/// Unfortunately this does mean that it'll be outside the valid range of dv_map
/// and dv_map checks need to guard against that.
fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize {
let nbit = state.var2bit.len();
// actually, it _might not_ break anything to have a single variable index
// allocated by scopeck for all non-$d-ed variables. after all, they aren't
// observably disjoint.
*state.var2bit.entry(token).or_insert(nbit)
}
// the initial hypotheses are accessed directly from the initial extended frame
// to avoid having to look up their pseudo-frames by name; also, $e statements
// no longer have pseudo-frames, so this is the only way to prepare an $e
fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) {
let mut vars = Bitset::new();
let tos = state.stack_buffer.len();
match hyp {
&Floating(_addr, var_index, _typecode) => {
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[var_index]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(var_index); // and we have prior knowledge it's identity mapped
}
&Essential(_addr, ref expr) => {
// this is the first of many subtle variations on the "interpret an
// ExprFragment" theme in this module.
for part in &*expr.tail {
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[part.prefix.clone()]);
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[part.var]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(part.var); // and we have prior knowledge it's identity mapped
}
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[expr.rump.clone()]);
}
};
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
hyp.typecode(),
tos..ntos,
state.builder.build(hyp.address(),
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
/// Adds a named $e hypothesis to the prepared array. These are not kept in the
/// frame array due to infrequent use, so other measures are needed. This is
/// not normally used by compressed proofs.
///
/// This is used as a fallback when looking up a $e in the assertion hashtable
/// fails.
fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
for hyp in &*state.cur_frame.hypotheses {
if let &Essential(addr, _) = hyp {
assert!(addr.segment_id == state.this_seg.id);
// we don't allow $e statements to be valid across segments, so this
// can be done as a local lookup in this_seg. Since we always
// invalidate the VerifySegment if the current segment has changed
// in any way, we don't even need to track dependencies here.
if state.this_seg.statement(addr.index).label() == label {
prepare_hypothesis(state, hyp);
return Ok(());
}
}
}
// whoops, not in the assertion table _or_ the extended frame
return Err(Diagnostic::StepMissing(copy_token(label)));
}
/// Used for named step references. For NORMAL proofs this is immediately
/// before execute_step, but for COMPRESSED proofs all used steps are prepared
/// ahead of time, and assigned sequential numbers for later use.
fn | <P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
// it's either an assertion or a hypothesis. $f hyps have pseudo-frames
// which this function can use, $e don't and need to be looked up in the
// local hyp list after the frame lookup fails
let frame = match state.scoper.get(label) {
Some(fp) => fp,
None => return prepare_named_hyp(state, label),
};
// disallow circular reasoning
let valid = frame.valid;
let pos = state.cur_frame.valid.start;
try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater,
Diagnostic::StepUsedBeforeDefinition(copy_token(label)));
try_assert!(valid.end == NO_STATEMENT ||
pos.segment_id == valid.start.segment_id && pos.index < valid.end,
Diagnostic::StepUsedAfterScope(copy_token(label)));
if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable {
state.prepared.push(Assert(frame));
} else {
let mut vars = Bitset::new();
for &var in &*frame.var_list {
vars.set_bit(map_var(state, var));
}
let tos = state.stack_buffer.len();
fast_extend(&mut state.stack_buffer, &frame.stub_expr);
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
frame.target.typecode,
tos..ntos,
state.builder.build(valid.start,
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
Ok(())
}
// perform a substitution after it has been built in `vars`, appending to
// `target`
#[inline(always)]
fn do_substitute(target: &mut Vec<u8>,
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)]) {
for part in &*expr.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
copy_portion(target, vars[part.var].0.clone());
}
fast_extend(target, &frame.const_pool[expr.rump.clone()]);
}
// like a substitution and equality check, but in one pass
#[inline(always)]
fn do_substitute_eq(mut compare: &[u8],
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)],
var_buffer: &[u8])
-> bool {
fn step(compare: &mut &[u8], slice: &[u8]) -> bool {
let len = slice.len();
if (*compare).len() < len {
return true;
}
if slice != &(*compare)[0..len] {
return true;
}
*compare = &(*compare)[len..];
return false;
}
for part in &*expr.tail {
if step(&mut compare, &frame.const_pool[part.prefix.clone()]) {
return false;
}
if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) {
return false;
}
}
if step(&mut compare, &frame.const_pool[expr.rump.clone()]) {
return false;
}
return compare.is_empty();
}
// substitute with the _names_ of variables, for the final "did we prove what we
// claimed we would" check
fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) {
for part in &*frame.target.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
fast_extend(target, nameset.atom_name(frame.var_list[part.var]));
*target.last_mut().unwrap() |= 0x80;
}
fast_extend(target, &frame.const_pool[frame.target.rump.clone()]);
}
// generate a bitmask for a substituted expression
#[inline(always)]
fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset {
let mut out = Bitset::new();
for part in expr {
out |= &vars[part.var].1;
}
out
}
/// This is the main "VM" function, and responsible for ~30% of CPU time during
/// a one-shot verify operation.
fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> {
try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange);
let fref = match state.prepared[index] {
Hyp(ref vars, code, ref expr, ref data) => {
// hypotheses/saved steps are the easy case. unfortunately, this is
// also a very unpredictable branch
state.stack.push((data.clone(),
StackSlot {
vars: vars.clone(),
code: code,
expr: expr.clone(),
}));
return Ok(());
}
Assert(fref) => fref,
};
let sbase = try!(state.stack
.len()
.checked_sub(fref.hypotheses.len())
.ok_or(Diagnostic::ProofUnderflow));
while state.subst_info.len() < fref.mandatory_count {
// this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but
// scopeck shouldn't generate references to them
state.subst_info.push((0..0, Bitset::new()));
}
let mut datavec = Default::default();
// process the hypotheses of the assertion we're about to apply. $f hyps
// allow the caller to define a replacement for a variable; $e hyps are
// logical hypotheses that must have been proved; the result is then
// substituted and pushed.
//
// since a variable must be $f-declared before it can appear in an $e (or
// else we'll ignore the $e), and that logical file order is reflected in
// the stack order of the hypotheses, we can do this in one pass
for (ix, hyp) in fref.hypotheses.iter().enumerate() {
let (ref data, ref slot) = state.stack[sbase + ix];
state.builder.push(&mut datavec, data.clone());
match hyp {
&Floating(_addr, var_index, typecode) => {
try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType);
state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone());
}
&Essential(_addr, ref expr) => {
try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType);
try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()],
fref,
&expr,
&state.subst_info,
&state.stack_buffer),
Diagnostic::StepEssenWrong);
}
}
}
// replace the hypotheses on the stack with the substituted target
// expression. does not physically remove the hypotheses from the stack
// pool, because they might have been saved steps or hypotheses, and
// deciding whether we need to move anything would swamp any savings, anyway
// - remember that this function is largely a branch predictor benchmark
let tos = state.stack_buffer.len();
do_substitute(&mut state.stack_buffer,
fref,
&fref.target,
&state.subst_info);
let ntos = state.stack_buffer.len();
state.stack.truncate(sbase);
state.stack
.push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos),
StackSlot {
code: fref.target.typecode,
vars: do_substitute_vars(&fref.target.tail, &state.subst_info),
expr: tos..ntos,
}));
// check $d constraints on the used assertion now that the dust has settled.
// Remember that we might have variable indexes allocated during the proof
// that are out of range for dv_map
for &(ix1, ix2) in &*fref.mandatory_dv {
for var1 in &state.subst_info[ix1].1 {
for var2 in &state.subst_info[ix2].1 {
try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2),
Diagnostic::ProofDvViolation);
}
}
}
Ok(())
}
fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> {
// if we get here, it's a valid proof, but was it the _right_ valid proof?
try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd);
let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps));
try_assert!(tos.code == state.cur_frame.target.typecode,
Diagnostic::ProofWrongTypeEnd);
fast_clear(&mut state.temp_buffer);
do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset);
try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..],
Diagnostic::ProofWrongExprEnd);
Ok(data.clone())
}
fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) {
let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here");
state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone()));
}
// proofs are not self-synchronizing, so it's not likely to get >1 usable error
fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>,
stmt: StatementRef<'a>)
-> Result<P::Item> {
// clear, but do not free memory
state.stack.clear();
fast_clear(&mut state.stack_buffer);
state.prepared.clear();
state.var2bit.clear();
state.dv_map = &state.cur_frame.optional_dv;
// temp_buffer is cleared before use; subst_info should be overwritten
// before use if scopeck is working correctly
// use scopeck-assigned numbers for mandatory variables and optional
// variables with active $d constraints. optional variables without active
// $d constraints are numbered on demand by map_var
for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() {
state.var2bit.insert(tokr, index);
}
if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" {
// this is a compressed proof
let mut i = 1;
// compressed proofs preload the hypotheses so they don't need to (but
// are not forbidden to) reference them by name
for hyp in &*state.cur_frame.hypotheses {
prepare_hypothesis(state, hyp);
}
// parse and prepare the label list before the )
loop {
try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster);
let chunk = stmt.proof_slice_at(i);
i += 1;
if chunk == b")" {
break;
}
try!(prepare_step(state, chunk));
}
// after ) is a packed list of varints. decode them and execute the
// corresponding steps. the varint decoder is surprisingly CPU-heavy,
// presumably due to branch overhead
let mut k = 0usize;
let mut can_save = false;
while i < stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
for &ch in chunk {
if ch >= b'A' && ch <= b'T' {
k = k * 20 + (ch - b'A') as usize;
try!(execute_step(state, k));
k = 0;
can_save = true;
} else if ch >= b'U' && ch <= b'Y' {
k = k * 5 + 1 + (ch - b'U') as usize;
try_assert!(k < (u32::max_value() as usize / 20) - 1,
Diagnostic::ProofMalformedVarint);
can_save = false;
} else if ch == b'Z' {
try_assert!(can_save, Diagnostic::ProofInvalidSave);
save_step(state);
can_save = false;
} else if ch == b'?' {
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
return Err(Diagnostic::ProofIncomplete);
}
}
i += 1;
}
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
} else {
let mut count = 0;
// NORMAL mode proofs are just a list of steps, with no saving provision
for i in 0..stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
try_assert!(chunk != b"?", Diagnostic::ProofIncomplete);
try!(prepare_step(state, chunk));
try!(execute_step(state, count));
count += 1;
}
}
finalize_step(state)
}
/// Stored result of running the verifier on a segment.
struct VerifySegment {
source: Arc<Segment>,
scope_usage: ScopeUsage,
diagnostics: HashMap<StatementAddress, Diagnostic>,
}
/// Analysis pass result for the verifier.
#[derive(Default,Clone)]
pub struct VerifyResult {
segments: HashMap<SegmentId, Arc<VerifySegment>>,
}
impl VerifyResult {
/// Report errors found during database verification.
pub fn diagnostics(&self) -> Vec<(StatementAddress, Diagnostic)> {
let mut out = Vec::new();
for vsr in self.segments.values() {
for (&sa, &ref diag) in &vsr.diagnostics {
out.push((sa, diag.clone()));
}
}
out
}
}
/// Driver which verifies each statement in a segment.
fn verify_segment(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
sid: SegmentId)
-> VerifySegment {
let mut diagnostics = new_map();
let dummy_frame = Frame::default();
let sref = sset.segment(sid);
let mut state = VerifyState {
this_seg: sref,
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: &mut (),
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
// use the _same_ VerifyState so that memory can be reused
for stmt in sref {
// only intend to check $p statements
if stmt.statement_type() == StatementType::Provable {
// no valid frame -> no use checking
// may wish to record a secondary error?
if let Some(frame) = state.scoper.get(stmt.label()) {
state.cur_frame = frame;
if let Err(diag) = verify_proof(&mut state, stmt) {
diagnostics.insert(stmt.address(), diag);
}
}
}
}
VerifySegment {
source: (*sref).clone(),
diagnostics: diagnostics,
scope_usage: state.scoper.into_usage(),
}
}
/// Calculates or updates the verification result for a database.
pub fn verify(result: &mut VerifyResult,
segments: &Arc<SegmentSet>,
nset: &Arc<Nameset>,
scope: &Arc<ScopeResult>) {
let old = mem::replace(&mut result.segments, new_map());
let mut ssrq = Vec::new();
for sref in segments.segments() {
let segments2 = segments.clone();
let nset = nset.clone();
let scope = scope.clone();
let id = sref.id;
let old_res_o = old.get(&id).cloned();
ssrq.push(segments.exec.exec(sref.bytes(), move || {
let sref = segments2.segment(id);
if let Some(old_res) = old_res_o {
if old_res.scope_usage.valid(&nset, &scope) &&
ptr_eq::<Segment>(&old_res.source, &sref) {
return (id, old_res.clone());
}
}
if segments2.options.trace_recalc {
println!("verify({:?})", parser::guess_buffer_name(&sref.buffer));
}
(id, Arc::new(verify_segment(&segments2, &nset, &scope, id)))
}))
}
result.segments.clear();
for promise in ssrq {
let (id, arc) = promise.wait();
result.segments.insert(id, arc);
}
}
/// Parse a single $p statement, returning the result of the given
/// proof builder, or an error if the proof is faulty
pub fn verify_one<P: ProofBuilder>(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
builder: &mut P,
stmt: StatementRef)
-> result::Result<P::Item, Diagnostic> {
let dummy_frame = Frame::default();
let mut state = VerifyState {
this_seg: stmt.segment(),
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: builder,
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
assert!(stmt.statement_type() == StatementType::Provable);
let frame = state.scoper.get(stmt.label()).unwrap();
state.cur_frame = frame;
verify_proof(&mut state, stmt)
}
| prepare_step | identifier_name |
verify.rs | //! The proof verifier itself.
//!
//! This is structured as an analysis pass, however it does not have any outputs
//! beyond the error indications. In particular, it does not generate parsed
//! proofs as a side effect; the proof parser will need to be a separate module.
//!
//! The majority of time spent verifying proofs is spent checking steps, which
//! can be regarded as a kind of interpreter. While checking each step, there
//! is a stack of known results; each step is an operation which pops zero or
//! more results off the stack, does local checks, and pushes a new result.
//! This module has been written such that it does not allocate memory during
//! nominal operation. Memory is reused from one proof to the next, and
//! intermediate results are handled as slices in a long-lived buffer.
//!
//! Results are densely represented as byte strings, using the high bit to mark
//! the end of each token. Since most math tokens are shorter than 4 bytes,
//! this saves memory operations over an atom-based approach; but measurements
//! of the actual speed of the atom approach would not be unwelcome.
//!
//! More speculatively, strings could be represented as their universal hash
//! values, using a concatenable universal hash such as polynomial evaluation
//! mod 2^61-1 (a very convenient Mersenne prime). This would eliminate all
//! branches, and all branch mispredicts, in the memcpy and memcmp parts of this
//! code, at the expense of making scopeck even more useless to other consumers
//! than it is now.
use bit_set::Bitset;
use diag::Diagnostic;
use nameck::Atom;
use nameck::Nameset;
use parser;
use parser::Comparer;
use parser::copy_token;
use parser::NO_STATEMENT;
use parser::Segment;
use parser::SegmentId;
use parser::SegmentOrder;
use parser::SegmentRef;
use parser::StatementAddress;
use parser::StatementRef;
use parser::StatementType;
use parser::TokenPtr;
use scopeck;
use scopeck::ExprFragment;
use scopeck::Frame;
use scopeck::Hyp::*;
use scopeck::ScopeReader;
use scopeck::ScopeResult;
use scopeck::ScopeUsage;
use scopeck::VerifyExpr;
use segment_set::SegmentSet;
use std::cmp::Ordering;
use std::mem;
use std::ops::Range;
use std::result;
use std::sync::Arc;
use std::u32;
use std::usize;
use util::copy_portion;
use util::fast_clear;
use util::fast_extend;
use util::HashMap;
use util::new_map;
use util::ptr_eq;
// Proofs are very fragile and there are very few situations where errors are
// recoverable, so we bail out using Result on any error.
macro_rules! try_assert {
( $cond:expr , $($arg:tt)+ ) => {
if !$cond {
try!(Err($($arg)+))
}
}
}
/// Preparing a step means that it can be referenced using a varint in a
/// compressed proof. Compressed steps are either saved prior
/// results/hypotheses, which are copied directly onto the stack, or previously
/// proved assertions which require substitution before use.
enum PreparedStep<'a, D> {
Hyp(Bitset, Atom, Range<usize>, D),
Assert(&'a Frame),
}
use self::PreparedStep::*;
/// An entry on the stack is notionally just a string of math symbols, but DV
/// checking is faster if we track disjoint variables as a bit vector, and the
/// typecode is not realignable so it can be profitably separated.
///
/// This type would be Copy except for the fact that the bitset can require
/// overflow storage :(.
#[derive(Clone)]
pub struct StackSlot {
vars: Bitset,
code: Atom,
expr: Range<usize>,
}
/// A constructor trait for plugging in to the verifier, to collect extra data during the
/// verification pass
pub trait ProofBuilder {
/// The data type being generated
type Item: Clone;
/// The hyp gathering type
type Accum: Default;
/// Add a new hyp to the accumulation type
fn push(&mut self, hyps: &mut Self::Accum, hyp: Self::Item);
/// Create a proof data node from a statement, the data for the hypotheses,
/// and the compressed constant string
fn build(&mut self,
addr: StatementAddress,
hyps: Self::Accum,
pool: &[u8],
expr: Range<usize>)
-> Self::Item;
}
/// The "null" proof builder, which creates no extra data. This
/// is used for one-shot verification, where no extra data beyond the stack
/// information is needed.
impl ProofBuilder for () {
type Item = ();
type Accum = ();
fn push(&mut self, _: &mut (), _: ()) {}
fn build(&mut self, _: StatementAddress, _: (), _: &[u8], _: Range<usize>) -> () {}
}
/// Working memory used by the verifier on a segment. This expands for the
/// first few proofs and the rest can be handled without allocation.
struct VerifyState<'a, P: 'a + ProofBuilder> {
/// Segment we are working on
this_seg: SegmentRef<'a>,
/// Segment order oracle
order: &'a SegmentOrder,
/// Atom name oracle, used for hypotheses
nameset: &'a Nameset,
/// Used to access previously proved assertions
scoper: ScopeReader<'a>,
/// Used to produce proof trees as a side effect of verification
builder: &'a mut P,
/// The extended frame we are working on
cur_frame: &'a Frame,
/// Steps which can be invoked in the current proof, grows on every Z
prepared: Vec<PreparedStep<'a, P::Item>>,
/// Stack of active subtrees
stack: Vec<(P::Item, StackSlot)>,
/// Buffer for math strings of subtrees and hypotheses; shared to reduce
/// actual copying when a hypothesis or saved step is recalled
stack_buffer: Vec<u8>,
/// Scratch space used only when checking the final step
temp_buffer: Vec<u8>,
/// Scratch space used for a substitution mapping while invoking a prior
/// assertion
subst_info: Vec<(Range<usize>, Bitset)>,
/// Tracks mandatory and optional variables in use in the current proof
var2bit: HashMap<Atom, usize>,
/// Disjoint variable conditions in the current extended frame
dv_map: &'a [Bitset],
}
type Result<T> = result::Result<T, Diagnostic>;
/// Variables are added lazily to the extended frame. All variables which are
/// associated with hypotheses or $d constraints are numbered by scopeck, but if
/// a dummy variable is used in a proof without a $d constraint it won't be
/// discovered until we get here, and a number needs to be assigned to it.
/// Unfortunately this does mean that it'll be outside the valid range of dv_map
/// and dv_map checks need to guard against that.
fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize {
let nbit = state.var2bit.len();
// actually, it _might not_ break anything to have a single variable index
// allocated by scopeck for all non-$d-ed variables. after all, they aren't
// observably disjoint.
*state.var2bit.entry(token).or_insert(nbit)
}
// the initial hypotheses are accessed directly from the initial extended frame
// to avoid having to look up their pseudo-frames by name; also, $e statements
// no longer have pseudo-frames, so this is the only way to prepare an $e
fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) {
let mut vars = Bitset::new();
let tos = state.stack_buffer.len();
match hyp {
&Floating(_addr, var_index, _typecode) => {
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[var_index]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(var_index); // and we have prior knowledge it's identity mapped
}
&Essential(_addr, ref expr) => {
// this is the first of many subtle variations on the "interpret an
// ExprFragment" theme in this module.
for part in &*expr.tail {
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[part.prefix.clone()]);
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[part.var]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(part.var); // and we have prior knowledge it's identity mapped
}
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[expr.rump.clone()]);
}
};
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
hyp.typecode(), | state.builder.build(hyp.address(),
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
/// Adds a named $e hypothesis to the prepared array. These are not kept in the
/// frame array due to infrequent use, so other measures are needed. This is
/// not normally used by compressed proofs.
///
/// This is used as a fallback when looking up a $e in the assertion hashtable
/// fails.
fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
for hyp in &*state.cur_frame.hypotheses {
if let &Essential(addr, _) = hyp {
assert!(addr.segment_id == state.this_seg.id);
// we don't allow $e statements to be valid across segments, so this
// can be done as a local lookup in this_seg. Since we always
// invalidate the VerifySegment if the current segment has changed
// in any way, we don't even need to track dependencies here.
if state.this_seg.statement(addr.index).label() == label {
prepare_hypothesis(state, hyp);
return Ok(());
}
}
}
// whoops, not in the assertion table _or_ the extended frame
return Err(Diagnostic::StepMissing(copy_token(label)));
}
/// Used for named step references. For NORMAL proofs this is immediately
/// before execute_step, but for COMPRESSED proofs all used steps are prepared
/// ahead of time, and assigned sequential numbers for later use.
fn prepare_step<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
// it's either an assertion or a hypothesis. $f hyps have pseudo-frames
// which this function can use, $e don't and need to be looked up in the
// local hyp list after the frame lookup fails
let frame = match state.scoper.get(label) {
Some(fp) => fp,
None => return prepare_named_hyp(state, label),
};
// disallow circular reasoning
let valid = frame.valid;
let pos = state.cur_frame.valid.start;
try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater,
Diagnostic::StepUsedBeforeDefinition(copy_token(label)));
try_assert!(valid.end == NO_STATEMENT ||
pos.segment_id == valid.start.segment_id && pos.index < valid.end,
Diagnostic::StepUsedAfterScope(copy_token(label)));
if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable {
state.prepared.push(Assert(frame));
} else {
let mut vars = Bitset::new();
for &var in &*frame.var_list {
vars.set_bit(map_var(state, var));
}
let tos = state.stack_buffer.len();
fast_extend(&mut state.stack_buffer, &frame.stub_expr);
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
frame.target.typecode,
tos..ntos,
state.builder.build(valid.start,
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
Ok(())
}
// perform a substitution after it has been built in `vars`, appending to
// `target`
#[inline(always)]
fn do_substitute(target: &mut Vec<u8>,
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)]) {
for part in &*expr.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
copy_portion(target, vars[part.var].0.clone());
}
fast_extend(target, &frame.const_pool[expr.rump.clone()]);
}
// like a substitution and equality check, but in one pass
#[inline(always)]
fn do_substitute_eq(mut compare: &[u8],
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)],
var_buffer: &[u8])
-> bool {
fn step(compare: &mut &[u8], slice: &[u8]) -> bool {
let len = slice.len();
if (*compare).len() < len {
return true;
}
if slice != &(*compare)[0..len] {
return true;
}
*compare = &(*compare)[len..];
return false;
}
for part in &*expr.tail {
if step(&mut compare, &frame.const_pool[part.prefix.clone()]) {
return false;
}
if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) {
return false;
}
}
if step(&mut compare, &frame.const_pool[expr.rump.clone()]) {
return false;
}
return compare.is_empty();
}
// substitute with the _names_ of variables, for the final "did we prove what we
// claimed we would" check
fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) {
for part in &*frame.target.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
fast_extend(target, nameset.atom_name(frame.var_list[part.var]));
*target.last_mut().unwrap() |= 0x80;
}
fast_extend(target, &frame.const_pool[frame.target.rump.clone()]);
}
// generate a bitmask for a substituted expression
#[inline(always)]
fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset {
let mut out = Bitset::new();
for part in expr {
out |= &vars[part.var].1;
}
out
}
/// This is the main "VM" function, and responsible for ~30% of CPU time during
/// a one-shot verify operation.
fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> {
try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange);
let fref = match state.prepared[index] {
Hyp(ref vars, code, ref expr, ref data) => {
// hypotheses/saved steps are the easy case. unfortunately, this is
// also a very unpredictable branch
state.stack.push((data.clone(),
StackSlot {
vars: vars.clone(),
code: code,
expr: expr.clone(),
}));
return Ok(());
}
Assert(fref) => fref,
};
let sbase = try!(state.stack
.len()
.checked_sub(fref.hypotheses.len())
.ok_or(Diagnostic::ProofUnderflow));
while state.subst_info.len() < fref.mandatory_count {
// this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but
// scopeck shouldn't generate references to them
state.subst_info.push((0..0, Bitset::new()));
}
let mut datavec = Default::default();
// process the hypotheses of the assertion we're about to apply. $f hyps
// allow the caller to define a replacement for a variable; $e hyps are
// logical hypotheses that must have been proved; the result is then
// substituted and pushed.
//
// since a variable must be $f-declared before it can appear in an $e (or
// else we'll ignore the $e), and that logical file order is reflected in
// the stack order of the hypotheses, we can do this in one pass
for (ix, hyp) in fref.hypotheses.iter().enumerate() {
let (ref data, ref slot) = state.stack[sbase + ix];
state.builder.push(&mut datavec, data.clone());
match hyp {
&Floating(_addr, var_index, typecode) => {
try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType);
state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone());
}
&Essential(_addr, ref expr) => {
try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType);
try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()],
fref,
&expr,
&state.subst_info,
&state.stack_buffer),
Diagnostic::StepEssenWrong);
}
}
}
// replace the hypotheses on the stack with the substituted target
// expression. does not physically remove the hypotheses from the stack
// pool, because they might have been saved steps or hypotheses, and
// deciding whether we need to move anything would swamp any savings, anyway
// - remember that this function is largely a branch predictor benchmark
let tos = state.stack_buffer.len();
do_substitute(&mut state.stack_buffer,
fref,
&fref.target,
&state.subst_info);
let ntos = state.stack_buffer.len();
state.stack.truncate(sbase);
state.stack
.push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos),
StackSlot {
code: fref.target.typecode,
vars: do_substitute_vars(&fref.target.tail, &state.subst_info),
expr: tos..ntos,
}));
// check $d constraints on the used assertion now that the dust has settled.
// Remember that we might have variable indexes allocated during the proof
// that are out of range for dv_map
for &(ix1, ix2) in &*fref.mandatory_dv {
for var1 in &state.subst_info[ix1].1 {
for var2 in &state.subst_info[ix2].1 {
try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2),
Diagnostic::ProofDvViolation);
}
}
}
Ok(())
}
fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> {
// if we get here, it's a valid proof, but was it the _right_ valid proof?
try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd);
let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps));
try_assert!(tos.code == state.cur_frame.target.typecode,
Diagnostic::ProofWrongTypeEnd);
fast_clear(&mut state.temp_buffer);
do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset);
try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..],
Diagnostic::ProofWrongExprEnd);
Ok(data.clone())
}
fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) {
let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here");
state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone()));
}
// proofs are not self-synchronizing, so it's not likely to get >1 usable error
fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>,
stmt: StatementRef<'a>)
-> Result<P::Item> {
// clear, but do not free memory
state.stack.clear();
fast_clear(&mut state.stack_buffer);
state.prepared.clear();
state.var2bit.clear();
state.dv_map = &state.cur_frame.optional_dv;
// temp_buffer is cleared before use; subst_info should be overwritten
// before use if scopeck is working correctly
// use scopeck-assigned numbers for mandatory variables and optional
// variables with active $d constraints. optional variables without active
// $d constraints are numbered on demand by map_var
for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() {
state.var2bit.insert(tokr, index);
}
if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" {
// this is a compressed proof
let mut i = 1;
// compressed proofs preload the hypotheses so they don't need to (but
// are not forbidden to) reference them by name
for hyp in &*state.cur_frame.hypotheses {
prepare_hypothesis(state, hyp);
}
// parse and prepare the label list before the )
loop {
try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster);
let chunk = stmt.proof_slice_at(i);
i += 1;
if chunk == b")" {
break;
}
try!(prepare_step(state, chunk));
}
// after ) is a packed list of varints. decode them and execute the
// corresponding steps. the varint decoder is surprisingly CPU-heavy,
// presumably due to branch overhead
let mut k = 0usize;
let mut can_save = false;
while i < stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
for &ch in chunk {
if ch >= b'A' && ch <= b'T' {
k = k * 20 + (ch - b'A') as usize;
try!(execute_step(state, k));
k = 0;
can_save = true;
} else if ch >= b'U' && ch <= b'Y' {
k = k * 5 + 1 + (ch - b'U') as usize;
try_assert!(k < (u32::max_value() as usize / 20) - 1,
Diagnostic::ProofMalformedVarint);
can_save = false;
} else if ch == b'Z' {
try_assert!(can_save, Diagnostic::ProofInvalidSave);
save_step(state);
can_save = false;
} else if ch == b'?' {
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
return Err(Diagnostic::ProofIncomplete);
}
}
i += 1;
}
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
} else {
let mut count = 0;
// NORMAL mode proofs are just a list of steps, with no saving provision
for i in 0..stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
try_assert!(chunk != b"?", Diagnostic::ProofIncomplete);
try!(prepare_step(state, chunk));
try!(execute_step(state, count));
count += 1;
}
}
finalize_step(state)
}
/// Stored result of running the verifier on a segment.
struct VerifySegment {
source: Arc<Segment>,
scope_usage: ScopeUsage,
diagnostics: HashMap<StatementAddress, Diagnostic>,
}
/// Analysis pass result for the verifier.
#[derive(Default,Clone)]
pub struct VerifyResult {
segments: HashMap<SegmentId, Arc<VerifySegment>>,
}
impl VerifyResult {
/// Report errors found during database verification.
pub fn diagnostics(&self) -> Vec<(StatementAddress, Diagnostic)> {
let mut out = Vec::new();
for vsr in self.segments.values() {
for (&sa, &ref diag) in &vsr.diagnostics {
out.push((sa, diag.clone()));
}
}
out
}
}
/// Driver which verifies each statement in a segment.
fn verify_segment(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
sid: SegmentId)
-> VerifySegment {
let mut diagnostics = new_map();
let dummy_frame = Frame::default();
let sref = sset.segment(sid);
let mut state = VerifyState {
this_seg: sref,
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: &mut (),
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
// use the _same_ VerifyState so that memory can be reused
for stmt in sref {
// only intend to check $p statements
if stmt.statement_type() == StatementType::Provable {
// no valid frame -> no use checking
// may wish to record a secondary error?
if let Some(frame) = state.scoper.get(stmt.label()) {
state.cur_frame = frame;
if let Err(diag) = verify_proof(&mut state, stmt) {
diagnostics.insert(stmt.address(), diag);
}
}
}
}
VerifySegment {
source: (*sref).clone(),
diagnostics: diagnostics,
scope_usage: state.scoper.into_usage(),
}
}
/// Calculates or updates the verification result for a database.
pub fn verify(result: &mut VerifyResult,
segments: &Arc<SegmentSet>,
nset: &Arc<Nameset>,
scope: &Arc<ScopeResult>) {
let old = mem::replace(&mut result.segments, new_map());
let mut ssrq = Vec::new();
for sref in segments.segments() {
let segments2 = segments.clone();
let nset = nset.clone();
let scope = scope.clone();
let id = sref.id;
let old_res_o = old.get(&id).cloned();
ssrq.push(segments.exec.exec(sref.bytes(), move || {
let sref = segments2.segment(id);
if let Some(old_res) = old_res_o {
if old_res.scope_usage.valid(&nset, &scope) &&
ptr_eq::<Segment>(&old_res.source, &sref) {
return (id, old_res.clone());
}
}
if segments2.options.trace_recalc {
println!("verify({:?})", parser::guess_buffer_name(&sref.buffer));
}
(id, Arc::new(verify_segment(&segments2, &nset, &scope, id)))
}))
}
result.segments.clear();
for promise in ssrq {
let (id, arc) = promise.wait();
result.segments.insert(id, arc);
}
}
/// Parse a single $p statement, returning the result of the given
/// proof builder, or an error if the proof is faulty
pub fn verify_one<P: ProofBuilder>(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
builder: &mut P,
stmt: StatementRef)
-> result::Result<P::Item, Diagnostic> {
let dummy_frame = Frame::default();
let mut state = VerifyState {
this_seg: stmt.segment(),
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: builder,
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
assert!(stmt.statement_type() == StatementType::Provable);
let frame = state.scoper.get(stmt.label()).unwrap();
state.cur_frame = frame;
verify_proof(&mut state, stmt)
} | tos..ntos, | random_line_split |
verify.rs | //! The proof verifier itself.
//!
//! This is structured as an analysis pass, however it does not have any outputs
//! beyond the error indications. In particular, it does not generate parsed
//! proofs as a side effect; the proof parser will need to be a separate module.
//!
//! The majority of time spent verifying proofs is spent checking steps, which
//! can be regarded as a kind of interpreter. While checking each step, there
//! is a stack of known results; each step is an operation which pops zero or
//! more results off the stack, does local checks, and pushes a new result.
//! This module has been written such that it does not allocate memory during
//! nominal operation. Memory is reused from one proof to the next, and
//! intermediate results are handled as slices in a long-lived buffer.
//!
//! Results are densely represented as byte strings, using the high bit to mark
//! the end of each token. Since most math tokens are shorter than 4 bytes,
//! this saves memory operations over an atom-based approach; but measurements
//! of the actual speed of the atom approach would not be unwelcome.
//!
//! More speculatively, strings could be represented as their universal hash
//! values, using a concatenable universal hash such as polynomial evaluation
//! mod 2^61-1 (a very convenient Mersenne prime). This would eliminate all
//! branches, and all branch mispredicts, in the memcpy and memcmp parts of this
//! code, at the expense of making scopeck even more useless to other consumers
//! than it is now.
use bit_set::Bitset;
use diag::Diagnostic;
use nameck::Atom;
use nameck::Nameset;
use parser;
use parser::Comparer;
use parser::copy_token;
use parser::NO_STATEMENT;
use parser::Segment;
use parser::SegmentId;
use parser::SegmentOrder;
use parser::SegmentRef;
use parser::StatementAddress;
use parser::StatementRef;
use parser::StatementType;
use parser::TokenPtr;
use scopeck;
use scopeck::ExprFragment;
use scopeck::Frame;
use scopeck::Hyp::*;
use scopeck::ScopeReader;
use scopeck::ScopeResult;
use scopeck::ScopeUsage;
use scopeck::VerifyExpr;
use segment_set::SegmentSet;
use std::cmp::Ordering;
use std::mem;
use std::ops::Range;
use std::result;
use std::sync::Arc;
use std::u32;
use std::usize;
use util::copy_portion;
use util::fast_clear;
use util::fast_extend;
use util::HashMap;
use util::new_map;
use util::ptr_eq;
// Proofs are very fragile and there are very few situations where errors are
// recoverable, so we bail out using Result on any error.
macro_rules! try_assert {
( $cond:expr , $($arg:tt)+ ) => {
if !$cond {
try!(Err($($arg)+))
}
}
}
/// Preparing a step means that it can be referenced using a varint in a
/// compressed proof. Compressed steps are either saved prior
/// results/hypotheses, which are copied directly onto the stack, or previously
/// proved assertions which require substitution before use.
enum PreparedStep<'a, D> {
Hyp(Bitset, Atom, Range<usize>, D),
Assert(&'a Frame),
}
use self::PreparedStep::*;
/// An entry on the stack is notionally just a string of math symbols, but DV
/// checking is faster if we track disjoint variables as a bit vector, and the
/// typecode is not realignable so it can be profitably separated.
///
/// This type would be Copy except for the fact that the bitset can require
/// overflow storage :(.
#[derive(Clone)]
pub struct StackSlot {
vars: Bitset,
code: Atom,
expr: Range<usize>,
}
/// A constructor trait for plugging in to the verifier, to collect extra data during the
/// verification pass
pub trait ProofBuilder {
/// The data type being generated
type Item: Clone;
/// The hyp gathering type
type Accum: Default;
/// Add a new hyp to the accumulation type
fn push(&mut self, hyps: &mut Self::Accum, hyp: Self::Item);
/// Create a proof data node from a statement, the data for the hypotheses,
/// and the compressed constant string
fn build(&mut self,
addr: StatementAddress,
hyps: Self::Accum,
pool: &[u8],
expr: Range<usize>)
-> Self::Item;
}
/// The "null" proof builder, which creates no extra data. This
/// is used for one-shot verification, where no extra data beyond the stack
/// information is needed.
impl ProofBuilder for () {
type Item = ();
type Accum = ();
fn push(&mut self, _: &mut (), _: ()) {}
fn build(&mut self, _: StatementAddress, _: (), _: &[u8], _: Range<usize>) -> () {}
}
/// Working memory used by the verifier on a segment. This expands for the
/// first few proofs and the rest can be handled without allocation.
struct VerifyState<'a, P: 'a + ProofBuilder> {
/// Segment we are working on
this_seg: SegmentRef<'a>,
/// Segment order oracle
order: &'a SegmentOrder,
/// Atom name oracle, used for hypotheses
nameset: &'a Nameset,
/// Used to access previously proved assertions
scoper: ScopeReader<'a>,
/// Used to produce proof trees as a side effect of verification
builder: &'a mut P,
/// The extended frame we are working on
cur_frame: &'a Frame,
/// Steps which can be invoked in the current proof, grows on every Z
prepared: Vec<PreparedStep<'a, P::Item>>,
/// Stack of active subtrees
stack: Vec<(P::Item, StackSlot)>,
/// Buffer for math strings of subtrees and hypotheses; shared to reduce
/// actual copying when a hypothesis or saved step is recalled
stack_buffer: Vec<u8>,
/// Scratch space used only when checking the final step
temp_buffer: Vec<u8>,
/// Scratch space used for a substitution mapping while invoking a prior
/// assertion
subst_info: Vec<(Range<usize>, Bitset)>,
/// Tracks mandatory and optional variables in use in the current proof
var2bit: HashMap<Atom, usize>,
/// Disjoint variable conditions in the current extended frame
dv_map: &'a [Bitset],
}
type Result<T> = result::Result<T, Diagnostic>;
/// Variables are added lazily to the extended frame. All variables which are
/// associated with hypotheses or $d constraints are numbered by scopeck, but if
/// a dummy variable is used in a proof without a $d constraint it won't be
/// discovered until we get here, and a number needs to be assigned to it.
/// Unfortunately this does mean that it'll be outside the valid range of dv_map
/// and dv_map checks need to guard against that.
fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize {
let nbit = state.var2bit.len();
// actually, it _might not_ break anything to have a single variable index
// allocated by scopeck for all non-$d-ed variables. after all, they aren't
// observably disjoint.
*state.var2bit.entry(token).or_insert(nbit)
}
// the initial hypotheses are accessed directly from the initial extended frame
// to avoid having to look up their pseudo-frames by name; also, $e statements
// no longer have pseudo-frames, so this is the only way to prepare an $e
fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) {
let mut vars = Bitset::new();
let tos = state.stack_buffer.len();
match hyp {
&Floating(_addr, var_index, _typecode) => {
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[var_index]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(var_index); // and we have prior knowledge it's identity mapped
}
&Essential(_addr, ref expr) => {
// this is the first of many subtle variations on the "interpret an
// ExprFragment" theme in this module.
for part in &*expr.tail {
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[part.prefix.clone()]);
fast_extend(&mut state.stack_buffer,
state.nameset.atom_name(state.cur_frame.var_list[part.var]));
*state.stack_buffer.last_mut().unwrap() |= 0x80;
vars.set_bit(part.var); // and we have prior knowledge it's identity mapped
}
fast_extend(&mut state.stack_buffer,
&state.cur_frame.const_pool[expr.rump.clone()]);
}
};
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
hyp.typecode(),
tos..ntos,
state.builder.build(hyp.address(),
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
/// Adds a named $e hypothesis to the prepared array. These are not kept in the
/// frame array due to infrequent use, so other measures are needed. This is
/// not normally used by compressed proofs.
///
/// This is used as a fallback when looking up a $e in the assertion hashtable
/// fails.
fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
for hyp in &*state.cur_frame.hypotheses {
if let &Essential(addr, _) = hyp {
assert!(addr.segment_id == state.this_seg.id);
// we don't allow $e statements to be valid across segments, so this
// can be done as a local lookup in this_seg. Since we always
// invalidate the VerifySegment if the current segment has changed
// in any way, we don't even need to track dependencies here.
if state.this_seg.statement(addr.index).label() == label {
prepare_hypothesis(state, hyp);
return Ok(());
}
}
}
// whoops, not in the assertion table _or_ the extended frame
return Err(Diagnostic::StepMissing(copy_token(label)));
}
/// Used for named step references. For NORMAL proofs this is immediately
/// before execute_step, but for COMPRESSED proofs all used steps are prepared
/// ahead of time, and assigned sequential numbers for later use.
fn prepare_step<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> {
// it's either an assertion or a hypothesis. $f hyps have pseudo-frames
// which this function can use, $e don't and need to be looked up in the
// local hyp list after the frame lookup fails
let frame = match state.scoper.get(label) {
Some(fp) => fp,
None => return prepare_named_hyp(state, label),
};
// disallow circular reasoning
let valid = frame.valid;
let pos = state.cur_frame.valid.start;
try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater,
Diagnostic::StepUsedBeforeDefinition(copy_token(label)));
try_assert!(valid.end == NO_STATEMENT ||
pos.segment_id == valid.start.segment_id && pos.index < valid.end,
Diagnostic::StepUsedAfterScope(copy_token(label)));
if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable {
state.prepared.push(Assert(frame));
} else {
let mut vars = Bitset::new();
for &var in &*frame.var_list {
vars.set_bit(map_var(state, var));
}
let tos = state.stack_buffer.len();
fast_extend(&mut state.stack_buffer, &frame.stub_expr);
let ntos = state.stack_buffer.len();
state.prepared
.push(Hyp(vars,
frame.target.typecode,
tos..ntos,
state.builder.build(valid.start,
Default::default(),
&state.stack_buffer,
tos..ntos)));
}
Ok(())
}
// perform a substitution after it has been built in `vars`, appending to
// `target`
#[inline(always)]
fn do_substitute(target: &mut Vec<u8>,
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)]) {
for part in &*expr.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
copy_portion(target, vars[part.var].0.clone());
}
fast_extend(target, &frame.const_pool[expr.rump.clone()]);
}
// like a substitution and equality check, but in one pass
#[inline(always)]
fn do_substitute_eq(mut compare: &[u8],
frame: &Frame,
expr: &VerifyExpr,
vars: &[(Range<usize>, Bitset)],
var_buffer: &[u8])
-> bool {
fn step(compare: &mut &[u8], slice: &[u8]) -> bool {
let len = slice.len();
if (*compare).len() < len {
return true;
}
if slice != &(*compare)[0..len] {
return true;
}
*compare = &(*compare)[len..];
return false;
}
for part in &*expr.tail {
if step(&mut compare, &frame.const_pool[part.prefix.clone()]) {
return false;
}
if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) {
return false;
}
}
if step(&mut compare, &frame.const_pool[expr.rump.clone()]) {
return false;
}
return compare.is_empty();
}
// substitute with the _names_ of variables, for the final "did we prove what we
// claimed we would" check
fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) {
for part in &*frame.target.tail {
fast_extend(target, &frame.const_pool[part.prefix.clone()]);
fast_extend(target, nameset.atom_name(frame.var_list[part.var]));
*target.last_mut().unwrap() |= 0x80;
}
fast_extend(target, &frame.const_pool[frame.target.rump.clone()]);
}
// generate a bitmask for a substituted expression
#[inline(always)]
fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset {
let mut out = Bitset::new();
for part in expr {
out |= &vars[part.var].1;
}
out
}
/// This is the main "VM" function, and responsible for ~30% of CPU time during
/// a one-shot verify operation.
fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> {
try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange);
let fref = match state.prepared[index] {
Hyp(ref vars, code, ref expr, ref data) => {
// hypotheses/saved steps are the easy case. unfortunately, this is
// also a very unpredictable branch
state.stack.push((data.clone(),
StackSlot {
vars: vars.clone(),
code: code,
expr: expr.clone(),
}));
return Ok(());
}
Assert(fref) => fref,
};
let sbase = try!(state.stack
.len()
.checked_sub(fref.hypotheses.len())
.ok_or(Diagnostic::ProofUnderflow));
while state.subst_info.len() < fref.mandatory_count {
// this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but
// scopeck shouldn't generate references to them
state.subst_info.push((0..0, Bitset::new()));
}
let mut datavec = Default::default();
// process the hypotheses of the assertion we're about to apply. $f hyps
// allow the caller to define a replacement for a variable; $e hyps are
// logical hypotheses that must have been proved; the result is then
// substituted and pushed.
//
// since a variable must be $f-declared before it can appear in an $e (or
// else we'll ignore the $e), and that logical file order is reflected in
// the stack order of the hypotheses, we can do this in one pass
for (ix, hyp) in fref.hypotheses.iter().enumerate() {
let (ref data, ref slot) = state.stack[sbase + ix];
state.builder.push(&mut datavec, data.clone());
match hyp {
&Floating(_addr, var_index, typecode) => {
try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType);
state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone());
}
&Essential(_addr, ref expr) => {
try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType);
try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()],
fref,
&expr,
&state.subst_info,
&state.stack_buffer),
Diagnostic::StepEssenWrong);
}
}
}
// replace the hypotheses on the stack with the substituted target
// expression. does not physically remove the hypotheses from the stack
// pool, because they might have been saved steps or hypotheses, and
// deciding whether we need to move anything would swamp any savings, anyway
// - remember that this function is largely a branch predictor benchmark
let tos = state.stack_buffer.len();
do_substitute(&mut state.stack_buffer,
fref,
&fref.target,
&state.subst_info);
let ntos = state.stack_buffer.len();
state.stack.truncate(sbase);
state.stack
.push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos),
StackSlot {
code: fref.target.typecode,
vars: do_substitute_vars(&fref.target.tail, &state.subst_info),
expr: tos..ntos,
}));
// check $d constraints on the used assertion now that the dust has settled.
// Remember that we might have variable indexes allocated during the proof
// that are out of range for dv_map
for &(ix1, ix2) in &*fref.mandatory_dv {
for var1 in &state.subst_info[ix1].1 {
for var2 in &state.subst_info[ix2].1 {
try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2),
Diagnostic::ProofDvViolation);
}
}
}
Ok(())
}
fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> {
// if we get here, it's a valid proof, but was it the _right_ valid proof?
try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd);
let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps));
try_assert!(tos.code == state.cur_frame.target.typecode,
Diagnostic::ProofWrongTypeEnd);
fast_clear(&mut state.temp_buffer);
do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset);
try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..],
Diagnostic::ProofWrongExprEnd);
Ok(data.clone())
}
fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) {
let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here");
state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone()));
}
// proofs are not self-synchronizing, so it's not likely to get >1 usable error
fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>,
stmt: StatementRef<'a>)
-> Result<P::Item> {
// clear, but do not free memory
state.stack.clear();
fast_clear(&mut state.stack_buffer);
state.prepared.clear();
state.var2bit.clear();
state.dv_map = &state.cur_frame.optional_dv;
// temp_buffer is cleared before use; subst_info should be overwritten
// before use if scopeck is working correctly
// use scopeck-assigned numbers for mandatory variables and optional
// variables with active $d constraints. optional variables without active
// $d constraints are numbered on demand by map_var
for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() {
state.var2bit.insert(tokr, index);
}
if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" {
// this is a compressed proof
let mut i = 1;
// compressed proofs preload the hypotheses so they don't need to (but
// are not forbidden to) reference them by name
for hyp in &*state.cur_frame.hypotheses {
prepare_hypothesis(state, hyp);
}
// parse and prepare the label list before the )
loop {
try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster);
let chunk = stmt.proof_slice_at(i);
i += 1;
if chunk == b")" {
break;
}
try!(prepare_step(state, chunk));
}
// after ) is a packed list of varints. decode them and execute the
// corresponding steps. the varint decoder is surprisingly CPU-heavy,
// presumably due to branch overhead
let mut k = 0usize;
let mut can_save = false;
while i < stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
for &ch in chunk {
if ch >= b'A' && ch <= b'T' {
k = k * 20 + (ch - b'A') as usize;
try!(execute_step(state, k));
k = 0;
can_save = true;
} else if ch >= b'U' && ch <= b'Y' {
k = k * 5 + 1 + (ch - b'U') as usize;
try_assert!(k < (u32::max_value() as usize / 20) - 1,
Diagnostic::ProofMalformedVarint);
can_save = false;
} else if ch == b'Z' {
try_assert!(can_save, Diagnostic::ProofInvalidSave);
save_step(state);
can_save = false;
} else if ch == b'?' {
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
return Err(Diagnostic::ProofIncomplete);
}
}
i += 1;
}
try_assert!(k == 0, Diagnostic::ProofMalformedVarint);
} else {
let mut count = 0;
// NORMAL mode proofs are just a list of steps, with no saving provision
for i in 0..stmt.proof_len() {
let chunk = stmt.proof_slice_at(i);
try_assert!(chunk != b"?", Diagnostic::ProofIncomplete);
try!(prepare_step(state, chunk));
try!(execute_step(state, count));
count += 1;
}
}
finalize_step(state)
}
/// Stored result of running the verifier on a segment.
struct VerifySegment {
source: Arc<Segment>,
scope_usage: ScopeUsage,
diagnostics: HashMap<StatementAddress, Diagnostic>,
}
/// Analysis pass result for the verifier.
#[derive(Default,Clone)]
pub struct VerifyResult {
segments: HashMap<SegmentId, Arc<VerifySegment>>,
}
impl VerifyResult {
/// Report errors found during database verification.
pub fn diagnostics(&self) -> Vec<(StatementAddress, Diagnostic)> {
let mut out = Vec::new();
for vsr in self.segments.values() {
for (&sa, &ref diag) in &vsr.diagnostics {
out.push((sa, diag.clone()));
}
}
out
}
}
/// Driver which verifies each statement in a segment.
fn verify_segment(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
sid: SegmentId)
-> VerifySegment {
let mut diagnostics = new_map();
let dummy_frame = Frame::default();
let sref = sset.segment(sid);
let mut state = VerifyState {
this_seg: sref,
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: &mut (),
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
// use the _same_ VerifyState so that memory can be reused
for stmt in sref {
// only intend to check $p statements
if stmt.statement_type() == StatementType::Provable {
// no valid frame -> no use checking
// may wish to record a secondary error?
if let Some(frame) = state.scoper.get(stmt.label()) |
}
}
VerifySegment {
source: (*sref).clone(),
diagnostics: diagnostics,
scope_usage: state.scoper.into_usage(),
}
}
/// Calculates or updates the verification result for a database.
pub fn verify(result: &mut VerifyResult,
segments: &Arc<SegmentSet>,
nset: &Arc<Nameset>,
scope: &Arc<ScopeResult>) {
let old = mem::replace(&mut result.segments, new_map());
let mut ssrq = Vec::new();
for sref in segments.segments() {
let segments2 = segments.clone();
let nset = nset.clone();
let scope = scope.clone();
let id = sref.id;
let old_res_o = old.get(&id).cloned();
ssrq.push(segments.exec.exec(sref.bytes(), move || {
let sref = segments2.segment(id);
if let Some(old_res) = old_res_o {
if old_res.scope_usage.valid(&nset, &scope) &&
ptr_eq::<Segment>(&old_res.source, &sref) {
return (id, old_res.clone());
}
}
if segments2.options.trace_recalc {
println!("verify({:?})", parser::guess_buffer_name(&sref.buffer));
}
(id, Arc::new(verify_segment(&segments2, &nset, &scope, id)))
}))
}
result.segments.clear();
for promise in ssrq {
let (id, arc) = promise.wait();
result.segments.insert(id, arc);
}
}
/// Parse a single $p statement, returning the result of the given
/// proof builder, or an error if the proof is faulty
pub fn verify_one<P: ProofBuilder>(sset: &SegmentSet,
nset: &Nameset,
scopes: &ScopeResult,
builder: &mut P,
stmt: StatementRef)
-> result::Result<P::Item, Diagnostic> {
let dummy_frame = Frame::default();
let mut state = VerifyState {
this_seg: stmt.segment(),
scoper: ScopeReader::new(scopes),
nameset: nset,
builder: builder,
order: &sset.order,
cur_frame: &dummy_frame,
stack: Vec::new(),
stack_buffer: Vec::new(),
prepared: Vec::new(),
temp_buffer: Vec::new(),
subst_info: Vec::new(),
var2bit: new_map(),
dv_map: &dummy_frame.optional_dv,
};
assert!(stmt.statement_type() == StatementType::Provable);
let frame = state.scoper.get(stmt.label()).unwrap();
state.cur_frame = frame;
verify_proof(&mut state, stmt)
}
| {
state.cur_frame = frame;
if let Err(diag) = verify_proof(&mut state, stmt) {
diagnostics.insert(stmt.address(), diag);
}
} | conditional_block |
pat_fitting.py | """ Functionality to fit PAT models
For more details see: https://arxiv.org/abs/1803.10352
@author: diepencjv / eendebakpt
"""
import matplotlib.pyplot as plt
# %% Load packages
import numpy as np
import scipy.constants
import scipy.ndimage
import scipy.signal
from qtt.pgeometry import robustCost
# %%
ueV2Hz = scipy.constants.e / scipy.constants.h * 1e-6
def one_ele_pat_model(x_data, pp):
r""" Model for one electron pat
This is :math:`\phi=\sqrt{ { ( leverarm * (x-x_0) ) }^2 + 4 t^2 } \mathrm{ueV2Hz}`
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
For more details see: https://arxiv.org/abs/1803.10352
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
y = np.sqrt(np.power((x_data - xoffset) * leverarm, 2) + 4 * t**2) * ueV2Hz
return y
def two_ele_pat_model(x_data, pp):
r""" Model for two electron pat
This is \phi = \pm \frac{leverarm}{2} (x - x0) +
\frac{1}{2} \sqrt{( leverarm (x - x0) )^2 + 8 t^2 }
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
yl = (- leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
ym = np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2) * ueV2Hz
yr = (leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
return yl, ym, yr
# %%
class pat_score():
def __init__(self, even_branches=[True, True, True], branch_reduction=None):
""" Class to calculate scores for PAT fitting """
self.even_branches = even_branches
self.branch_reduction = branch_reduction
def pat_one_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat one electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ydatax = one_ele_pat_model(xd, pp)
charge_change = np.abs(np.abs(pp[1]) * (xd - pp[0]) / np.sqrt((pp[1] * (xd - pp[0]))**2 + 4 * pp[2]**2))
sc = np.abs(ydatax - yd) * charge_change
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc = sc * weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10
if pp[2] > 150:
sc *= 10
return sc
def pat_two_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat two electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ymodel = two_ele_pat_model(xd, pp)
denom = np.sqrt((pp[1] * (xd - pp[0]))**2 + 8 * pp[2]**2)
charge_changes = []
charge_changes.append(1 / 2 * (1 + pp[1] * (xd - pp[0]) / denom))
charge_changes.append(np.abs(pp[1] * (xd - pp[0]) / denom))
charge_changes.append(1 / 2 * (1 - pp[1] * (xd - pp[0]) / denom))
linesize = ymodel[0].shape[0]
if self.branch_reduction is None or self.branch_reduction == 'minimum':
sc = np.inf * np.ones(linesize)
for idval, val in enumerate(self.even_branches):
if val:
sc = np.minimum(sc, np.abs(ymodel[idval] - yd))
elif self.branch_reduction == 'mean':
sc = []
for idval, val in enumerate(self.even_branches):
if val:
sc.append(np.abs(ymodel[idval] - yd))
sc = np.mean(np.array(sc), axis=1)
else:
raise NotImplementedError('branch_reduction %s not implemented' % self.branch_reduction)
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc *= weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10000
return sc
# %%
def pre_process_pat(x_data, y_data, background, z_data, fig=None):
""" Pre-process a pair of background and sensor signal from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequency (Hz)
background (array): e.g. sensor signal of POL scan
z_data (array): sensor signal of PAT scan
fig (None or int)
Returns:
imx (array)
imq (array)
backgr_sm (array)
"""
backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)
imq = z_data - backgr_sm
imq = imq - np.mean(imq, axis=1).reshape((-1, 1))
ks = 5
w = np.ones((1, ks)) / ks
imx = scipy.ndimage.convolve(imq, w, mode='nearest')
qq = np.percentile(imx, [5, 50, 95])
imx = imx - qq[1]
qq = np.percentile(imx, [2, 50, 98])
scale = np.mean([-qq[0], qq[2]])
imx = imx / scale
if fig is not None:
# y_data = np.arange(imq.shape[0])
plt.figure(fig)
plt.clf()
plt.subplot(2, 2, 1)
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('Input data')
plt.subplot(2, 2, 2)
plt.pcolormesh(x_data, y_data, imq, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imq')
plt.subplot(2, 2, 3)
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imx')
plt.tight_layout()
return imx, imq, backgr_sm
# %%
def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):
""" Detect peaks in sensor signal, e.g. from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
imx (array): sensor signal of PAT scan, background is usually already subtracted
Returns:
detected_peaks (array): coordinates of detected peaks
results (dict): additional fitting data
"""
thr = .4
thr2 = .6
# chop off part of the data, because T1 is relatively long
mvedge = .1 * (np.max(x_data) - np.min(x_data))
if model == 'two_ele':
mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \approx .1 ms [Ref]
horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))
& (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]
elif model == 'one_ele':
horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
else:
raise Exception('no such model')
scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel
# smooth input image
kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)
kern = kern / kern.sum()
imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')
# get maximum value for each row
mm1 = np.argmax(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm1]
idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold
xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points
# get minimum value for each row
mm2 = np.argmin(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm2]
# remove points below threshold
idx2 = np.where(np.abs(val) > thr)[0]
xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))
# join the two sets
detected_peaks = np.hstack((xx1, xx2))
# determine weights for the points
qq = np.intersect1d(idx1, idx2)
q1 = np.searchsorted(idx1, qq)
q2 = np.searchsorted(idx2, qq)
w1 = .5 * np.ones(len(idx1))
w1[q1] = 1
w2 = .5 * np.ones(len(idx2))
w2[q2] = 1
wfac = .1
w1[np.abs(val[idx1]) < thr2] = wfac
w2[np.abs(val[idx2]) < thr2] = wfac
weights = np.hstack((w1, w2))
if fig is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')
plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}
# %%
def fit_pat_to_peaks(pp, xd, yd, trans='one_ele', even_branches=[True, True, True], weights=None, xoffset=None, verbose=1, branch_reduction=None):
""" Core fitting function for PAT measurements, based on detected resonance
peaks (see detect_peaks).
Args:
pp (array): initial guess of fit parameters
xd (array): x coordinates of peaks in sensor signal (mV)
yd (array): y coordinates of peaks in sensor signal (Hz)
trans (string): 'one_ele' or 'two_ele'
xoffset (float): the offset from zero detuning in voltage. If this has been determined before, then fixing this
parameter reduces the fitting time.
"""
ppx = pp.copy()
pat_score_class = pat_score(even_branches=even_branches, branch_reduction=branch_reduction)
if trans == 'one_ele':
pat_model_score = pat_score_class.pat_one_ele_score
elif trans == 'two_ele':
pat_model_score = pat_score_class.pat_two_ele_score
else:
raise Exception('This model %s is not implemented.' % trans)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, [x, pp[1], ppx[2]], weights=weights)
r = scipy.optimize.brute(ff, ranges=[(pp[0] - 2, pp[0] + 2)], Ns=20, disp=False)
ppx[0] = r
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose >= 2:
print('fit_pat_model: %s: %.4f -> %.4f' % (['%.2f' % x for x in ppx], sc0 / 1e6, sc / 1e6))
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights, thr=.5e9)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose:
print('fit_pat_model: %.4f -> %.4f' % (sc0 / 1e6, sc / 1e6))
return ppx
# %%
def fit_pat(x_data, y_data, z_data, background, trans='one_ele', period=1e-3,
even_branches=[True, True, True], par_guess=None, xoffset=None, verbose=1):
""" Wrapper for fitting the energy transitions in a PAT scan.
For more details see: https://arxiv.org/abs/1803.10352
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
z_data (array): sensor signal of PAT scan
background (array): sensor signal of POL scan
trans (str): can be 'one_ele' or 'two_ele'
even_branches (list of booleans): indicated which branches of the model to use for fitting
Returns:
pp (array): fitted xoffset (mV), leverarm (ueV/mV) and t (ueV)
results (dict): contains keys par_guess (array), imq (array) re-scaled and re-centered sensor signal, imextent (array), xd, yd, ydf
"""
imx, imq, _ = pre_process_pat(x_data, y_data, background, z_data)
xx, dpresults = detect_peaks(x_data, y_data, imx, model=trans, period=period, sigmamv=.05, fig=None)
xd = xx[0, :]
yd = xx[1, :]
if par_guess is None:
par_guess = np.array([np.nanmean(x_data), 65, 10])
pp = fit_pat_to_peaks(par_guess, xd, yd, trans=trans, even_branches=even_branches,
xoffset=xoffset, verbose=0)
if trans == 'one_ele':
model = one_ele_pat_model
elif trans == 'two_ele':
model = two_ele_pat_model
ydf = model(xd, pp)
return pp, {'imq': imq, 'xd': xd, 'yd': yd, 'ydf': ydf, 'par_guess': par_guess}
# %%
def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):
""" Plot the fitted model of the PAT transition(s)
Args:
x_data (array): detuning in millivolts
y_data (array): frequencies
z_data (array): sensor signal of PAT scan
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
model (function): model describing the PAT transitions
"""
if z_data is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
if trans == 'one_ele':
|
elif trans == 'two_ele':
model = two_ele_pat_model
ylfit, ymfit, yrfit = model(x_data, pp)
plt.plot(x_data, ylfit, '-g', label='S-T')
plt.plot(x_data, ymfit, '-r', label='S-S')
plt.plot(x_data, yrfit, '-b', label='T-S')
plt.ylim([np.min(y_data), np.max(y_data)])
# %%
def show_traces(x_data, z_data, fig=100, direction='h', title=None):
""" Show traces of an image
Args:
x_data (array): detuning in millivolts
z_data (array): input image. rows are taken as the traces
fig (int): number for figure window to use
direction (str): can be 'h' or 'v'
"""
plt.figure(fig)
plt.clf()
if direction == 'v' or direction == 'vertical':
for ii, l in enumerate(z_data.T):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[1])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: left vertical lines, red: right lines'
plt.title(title)
else:
for ii, l in enumerate(z_data):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[0])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: top lines, red: bottom lines'
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Signal (a.u.)')
| model = one_ele_pat_model
yfit = model(x_data, pp)
plt.plot(x_data, yfit, '-g', label=label)
yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))
plt.plot(x_data, yfit_t0, '--g') | conditional_block |
pat_fitting.py | """ Functionality to fit PAT models
For more details see: https://arxiv.org/abs/1803.10352
@author: diepencjv / eendebakpt
"""
import matplotlib.pyplot as plt
# %% Load packages
import numpy as np
import scipy.constants
import scipy.ndimage
import scipy.signal
from qtt.pgeometry import robustCost
# %%
ueV2Hz = scipy.constants.e / scipy.constants.h * 1e-6
def one_ele_pat_model(x_data, pp):
r""" Model for one electron pat
This is :math:`\phi=\sqrt{ { ( leverarm * (x-x_0) ) }^2 + 4 t^2 } \mathrm{ueV2Hz}`
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
For more details see: https://arxiv.org/abs/1803.10352
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
y = np.sqrt(np.power((x_data - xoffset) * leverarm, 2) + 4 * t**2) * ueV2Hz
return y
def two_ele_pat_model(x_data, pp):
r""" Model for two electron pat
This is \phi = \pm \frac{leverarm}{2} (x - x0) +
\frac{1}{2} \sqrt{( leverarm (x - x0) )^2 + 8 t^2 }
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
yl = (- leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
ym = np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2) * ueV2Hz
yr = (leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
return yl, ym, yr
# %%
class pat_score():
def __init__(self, even_branches=[True, True, True], branch_reduction=None):
""" Class to calculate scores for PAT fitting """
self.even_branches = even_branches
self.branch_reduction = branch_reduction
def pat_one_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat one electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ydatax = one_ele_pat_model(xd, pp)
charge_change = np.abs(np.abs(pp[1]) * (xd - pp[0]) / np.sqrt((pp[1] * (xd - pp[0]))**2 + 4 * pp[2]**2))
sc = np.abs(ydatax - yd) * charge_change
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc = sc * weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10
if pp[2] > 150:
sc *= 10
return sc
def pat_two_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat two electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ymodel = two_ele_pat_model(xd, pp)
denom = np.sqrt((pp[1] * (xd - pp[0]))**2 + 8 * pp[2]**2)
charge_changes = []
charge_changes.append(1 / 2 * (1 + pp[1] * (xd - pp[0]) / denom))
charge_changes.append(np.abs(pp[1] * (xd - pp[0]) / denom))
charge_changes.append(1 / 2 * (1 - pp[1] * (xd - pp[0]) / denom))
linesize = ymodel[0].shape[0]
if self.branch_reduction is None or self.branch_reduction == 'minimum':
sc = np.inf * np.ones(linesize)
for idval, val in enumerate(self.even_branches):
if val:
sc = np.minimum(sc, np.abs(ymodel[idval] - yd))
elif self.branch_reduction == 'mean':
sc = []
for idval, val in enumerate(self.even_branches):
if val:
sc.append(np.abs(ymodel[idval] - yd))
sc = np.mean(np.array(sc), axis=1)
else:
raise NotImplementedError('branch_reduction %s not implemented' % self.branch_reduction)
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc *= weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10000
return sc
# %%
def pre_process_pat(x_data, y_data, background, z_data, fig=None):
""" Pre-process a pair of background and sensor signal from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequency (Hz)
background (array): e.g. sensor signal of POL scan
z_data (array): sensor signal of PAT scan
fig (None or int)
Returns:
imx (array)
imq (array)
backgr_sm (array)
"""
backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)
imq = z_data - backgr_sm
imq = imq - np.mean(imq, axis=1).reshape((-1, 1))
ks = 5
w = np.ones((1, ks)) / ks
imx = scipy.ndimage.convolve(imq, w, mode='nearest')
qq = np.percentile(imx, [5, 50, 95])
imx = imx - qq[1]
qq = np.percentile(imx, [2, 50, 98])
scale = np.mean([-qq[0], qq[2]])
imx = imx / scale
if fig is not None:
# y_data = np.arange(imq.shape[0])
plt.figure(fig)
plt.clf()
plt.subplot(2, 2, 1)
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('Input data')
plt.subplot(2, 2, 2)
plt.pcolormesh(x_data, y_data, imq, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imq')
plt.subplot(2, 2, 3)
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imx')
plt.tight_layout()
return imx, imq, backgr_sm
# %%
def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):
""" Detect peaks in sensor signal, e.g. from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
imx (array): sensor signal of PAT scan, background is usually already subtracted
Returns:
detected_peaks (array): coordinates of detected peaks
results (dict): additional fitting data
"""
thr = .4
thr2 = .6
# chop off part of the data, because T1 is relatively long
mvedge = .1 * (np.max(x_data) - np.min(x_data))
if model == 'two_ele':
mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \approx .1 ms [Ref]
horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))
& (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]
elif model == 'one_ele':
horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
else:
raise Exception('no such model')
scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel
# smooth input image
kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)
kern = kern / kern.sum()
imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')
# get maximum value for each row
mm1 = np.argmax(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm1]
idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold
xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points
# get minimum value for each row
mm2 = np.argmin(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm2]
# remove points below threshold
idx2 = np.where(np.abs(val) > thr)[0]
xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))
# join the two sets
detected_peaks = np.hstack((xx1, xx2))
# determine weights for the points
qq = np.intersect1d(idx1, idx2)
q1 = np.searchsorted(idx1, qq)
q2 = np.searchsorted(idx2, qq)
w1 = .5 * np.ones(len(idx1))
w1[q1] = 1
w2 = .5 * np.ones(len(idx2))
w2[q2] = 1
wfac = .1
w1[np.abs(val[idx1]) < thr2] = wfac
w2[np.abs(val[idx2]) < thr2] = wfac
weights = np.hstack((w1, w2))
if fig is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')
plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
|
def fit_pat_to_peaks(pp, xd, yd, trans='one_ele', even_branches=[True, True, True], weights=None, xoffset=None, verbose=1, branch_reduction=None):
""" Core fitting function for PAT measurements, based on detected resonance
peaks (see detect_peaks).
Args:
pp (array): initial guess of fit parameters
xd (array): x coordinates of peaks in sensor signal (mV)
yd (array): y coordinates of peaks in sensor signal (Hz)
trans (string): 'one_ele' or 'two_ele'
xoffset (float): the offset from zero detuning in voltage. If this has been determined before, then fixing this
parameter reduces the fitting time.
"""
ppx = pp.copy()
pat_score_class = pat_score(even_branches=even_branches, branch_reduction=branch_reduction)
if trans == 'one_ele':
pat_model_score = pat_score_class.pat_one_ele_score
elif trans == 'two_ele':
pat_model_score = pat_score_class.pat_two_ele_score
else:
raise Exception('This model %s is not implemented.' % trans)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, [x, pp[1], ppx[2]], weights=weights)
r = scipy.optimize.brute(ff, ranges=[(pp[0] - 2, pp[0] + 2)], Ns=20, disp=False)
ppx[0] = r
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose >= 2:
print('fit_pat_model: %s: %.4f -> %.4f' % (['%.2f' % x for x in ppx], sc0 / 1e6, sc / 1e6))
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights, thr=.5e9)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose:
print('fit_pat_model: %.4f -> %.4f' % (sc0 / 1e6, sc / 1e6))
return ppx
# %%
def fit_pat(x_data, y_data, z_data, background, trans='one_ele', period=1e-3,
even_branches=[True, True, True], par_guess=None, xoffset=None, verbose=1):
""" Wrapper for fitting the energy transitions in a PAT scan.
For more details see: https://arxiv.org/abs/1803.10352
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
z_data (array): sensor signal of PAT scan
background (array): sensor signal of POL scan
trans (str): can be 'one_ele' or 'two_ele'
even_branches (list of booleans): indicated which branches of the model to use for fitting
Returns:
pp (array): fitted xoffset (mV), leverarm (ueV/mV) and t (ueV)
results (dict): contains keys par_guess (array), imq (array) re-scaled and re-centered sensor signal, imextent (array), xd, yd, ydf
"""
imx, imq, _ = pre_process_pat(x_data, y_data, background, z_data)
xx, dpresults = detect_peaks(x_data, y_data, imx, model=trans, period=period, sigmamv=.05, fig=None)
xd = xx[0, :]
yd = xx[1, :]
if par_guess is None:
par_guess = np.array([np.nanmean(x_data), 65, 10])
pp = fit_pat_to_peaks(par_guess, xd, yd, trans=trans, even_branches=even_branches,
xoffset=xoffset, verbose=0)
if trans == 'one_ele':
model = one_ele_pat_model
elif trans == 'two_ele':
model = two_ele_pat_model
ydf = model(xd, pp)
return pp, {'imq': imq, 'xd': xd, 'yd': yd, 'ydf': ydf, 'par_guess': par_guess}
# %%
def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):
""" Plot the fitted model of the PAT transition(s)
Args:
x_data (array): detuning in millivolts
y_data (array): frequencies
z_data (array): sensor signal of PAT scan
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
model (function): model describing the PAT transitions
"""
if z_data is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
if trans == 'one_ele':
model = one_ele_pat_model
yfit = model(x_data, pp)
plt.plot(x_data, yfit, '-g', label=label)
yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))
plt.plot(x_data, yfit_t0, '--g')
elif trans == 'two_ele':
model = two_ele_pat_model
ylfit, ymfit, yrfit = model(x_data, pp)
plt.plot(x_data, ylfit, '-g', label='S-T')
plt.plot(x_data, ymfit, '-r', label='S-S')
plt.plot(x_data, yrfit, '-b', label='T-S')
plt.ylim([np.min(y_data), np.max(y_data)])
# %%
def show_traces(x_data, z_data, fig=100, direction='h', title=None):
""" Show traces of an image
Args:
x_data (array): detuning in millivolts
z_data (array): input image. rows are taken as the traces
fig (int): number for figure window to use
direction (str): can be 'h' or 'v'
"""
plt.figure(fig)
plt.clf()
if direction == 'v' or direction == 'vertical':
for ii, l in enumerate(z_data.T):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[1])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: left vertical lines, red: right lines'
plt.title(title)
else:
for ii, l in enumerate(z_data):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[0])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: top lines, red: bottom lines'
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Signal (a.u.)') | return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}
# %%
| random_line_split |
pat_fitting.py | """ Functionality to fit PAT models
For more details see: https://arxiv.org/abs/1803.10352
@author: diepencjv / eendebakpt
"""
import matplotlib.pyplot as plt
# %% Load packages
import numpy as np
import scipy.constants
import scipy.ndimage
import scipy.signal
from qtt.pgeometry import robustCost
# %%
ueV2Hz = scipy.constants.e / scipy.constants.h * 1e-6
def one_ele_pat_model(x_data, pp):
r""" Model for one electron pat
This is :math:`\phi=\sqrt{ { ( leverarm * (x-x_0) ) }^2 + 4 t^2 } \mathrm{ueV2Hz}`
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
For more details see: https://arxiv.org/abs/1803.10352
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
y = np.sqrt(np.power((x_data - xoffset) * leverarm, 2) + 4 * t**2) * ueV2Hz
return y
def two_ele_pat_model(x_data, pp):
|
# %%
class pat_score():
def __init__(self, even_branches=[True, True, True], branch_reduction=None):
""" Class to calculate scores for PAT fitting """
self.even_branches = even_branches
self.branch_reduction = branch_reduction
def pat_one_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat one electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ydatax = one_ele_pat_model(xd, pp)
charge_change = np.abs(np.abs(pp[1]) * (xd - pp[0]) / np.sqrt((pp[1] * (xd - pp[0]))**2 + 4 * pp[2]**2))
sc = np.abs(ydatax - yd) * charge_change
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc = sc * weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10
if pp[2] > 150:
sc *= 10
return sc
def pat_two_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat two electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ymodel = two_ele_pat_model(xd, pp)
denom = np.sqrt((pp[1] * (xd - pp[0]))**2 + 8 * pp[2]**2)
charge_changes = []
charge_changes.append(1 / 2 * (1 + pp[1] * (xd - pp[0]) / denom))
charge_changes.append(np.abs(pp[1] * (xd - pp[0]) / denom))
charge_changes.append(1 / 2 * (1 - pp[1] * (xd - pp[0]) / denom))
linesize = ymodel[0].shape[0]
if self.branch_reduction is None or self.branch_reduction == 'minimum':
sc = np.inf * np.ones(linesize)
for idval, val in enumerate(self.even_branches):
if val:
sc = np.minimum(sc, np.abs(ymodel[idval] - yd))
elif self.branch_reduction == 'mean':
sc = []
for idval, val in enumerate(self.even_branches):
if val:
sc.append(np.abs(ymodel[idval] - yd))
sc = np.mean(np.array(sc), axis=1)
else:
raise NotImplementedError('branch_reduction %s not implemented' % self.branch_reduction)
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc *= weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10000
return sc
# %%
def pre_process_pat(x_data, y_data, background, z_data, fig=None):
""" Pre-process a pair of background and sensor signal from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequency (Hz)
background (array): e.g. sensor signal of POL scan
z_data (array): sensor signal of PAT scan
fig (None or int)
Returns:
imx (array)
imq (array)
backgr_sm (array)
"""
backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)
imq = z_data - backgr_sm
imq = imq - np.mean(imq, axis=1).reshape((-1, 1))
ks = 5
w = np.ones((1, ks)) / ks
imx = scipy.ndimage.convolve(imq, w, mode='nearest')
qq = np.percentile(imx, [5, 50, 95])
imx = imx - qq[1]
qq = np.percentile(imx, [2, 50, 98])
scale = np.mean([-qq[0], qq[2]])
imx = imx / scale
if fig is not None:
# y_data = np.arange(imq.shape[0])
plt.figure(fig)
plt.clf()
plt.subplot(2, 2, 1)
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('Input data')
plt.subplot(2, 2, 2)
plt.pcolormesh(x_data, y_data, imq, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imq')
plt.subplot(2, 2, 3)
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imx')
plt.tight_layout()
return imx, imq, backgr_sm
# %%
def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):
""" Detect peaks in sensor signal, e.g. from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
imx (array): sensor signal of PAT scan, background is usually already subtracted
Returns:
detected_peaks (array): coordinates of detected peaks
results (dict): additional fitting data
"""
thr = .4
thr2 = .6
# chop off part of the data, because T1 is relatively long
mvedge = .1 * (np.max(x_data) - np.min(x_data))
if model == 'two_ele':
mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \approx .1 ms [Ref]
horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))
& (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]
elif model == 'one_ele':
horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
else:
raise Exception('no such model')
scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel
# smooth input image
kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)
kern = kern / kern.sum()
imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')
# get maximum value for each row
mm1 = np.argmax(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm1]
idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold
xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points
# get minimum value for each row
mm2 = np.argmin(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm2]
# remove points below threshold
idx2 = np.where(np.abs(val) > thr)[0]
xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))
# join the two sets
detected_peaks = np.hstack((xx1, xx2))
# determine weights for the points
qq = np.intersect1d(idx1, idx2)
q1 = np.searchsorted(idx1, qq)
q2 = np.searchsorted(idx2, qq)
w1 = .5 * np.ones(len(idx1))
w1[q1] = 1
w2 = .5 * np.ones(len(idx2))
w2[q2] = 1
wfac = .1
w1[np.abs(val[idx1]) < thr2] = wfac
w2[np.abs(val[idx2]) < thr2] = wfac
weights = np.hstack((w1, w2))
if fig is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')
plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}
# %%
def fit_pat_to_peaks(pp, xd, yd, trans='one_ele', even_branches=[True, True, True], weights=None, xoffset=None, verbose=1, branch_reduction=None):
""" Core fitting function for PAT measurements, based on detected resonance
peaks (see detect_peaks).
Args:
pp (array): initial guess of fit parameters
xd (array): x coordinates of peaks in sensor signal (mV)
yd (array): y coordinates of peaks in sensor signal (Hz)
trans (string): 'one_ele' or 'two_ele'
xoffset (float): the offset from zero detuning in voltage. If this has been determined before, then fixing this
parameter reduces the fitting time.
"""
ppx = pp.copy()
pat_score_class = pat_score(even_branches=even_branches, branch_reduction=branch_reduction)
if trans == 'one_ele':
pat_model_score = pat_score_class.pat_one_ele_score
elif trans == 'two_ele':
pat_model_score = pat_score_class.pat_two_ele_score
else:
raise Exception('This model %s is not implemented.' % trans)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, [x, pp[1], ppx[2]], weights=weights)
r = scipy.optimize.brute(ff, ranges=[(pp[0] - 2, pp[0] + 2)], Ns=20, disp=False)
ppx[0] = r
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose >= 2:
print('fit_pat_model: %s: %.4f -> %.4f' % (['%.2f' % x for x in ppx], sc0 / 1e6, sc / 1e6))
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights, thr=.5e9)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose:
print('fit_pat_model: %.4f -> %.4f' % (sc0 / 1e6, sc / 1e6))
return ppx
# %%
def fit_pat(x_data, y_data, z_data, background, trans='one_ele', period=1e-3,
even_branches=[True, True, True], par_guess=None, xoffset=None, verbose=1):
""" Wrapper for fitting the energy transitions in a PAT scan.
For more details see: https://arxiv.org/abs/1803.10352
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
z_data (array): sensor signal of PAT scan
background (array): sensor signal of POL scan
trans (str): can be 'one_ele' or 'two_ele'
even_branches (list of booleans): indicated which branches of the model to use for fitting
Returns:
pp (array): fitted xoffset (mV), leverarm (ueV/mV) and t (ueV)
results (dict): contains keys par_guess (array), imq (array) re-scaled and re-centered sensor signal, imextent (array), xd, yd, ydf
"""
imx, imq, _ = pre_process_pat(x_data, y_data, background, z_data)
xx, dpresults = detect_peaks(x_data, y_data, imx, model=trans, period=period, sigmamv=.05, fig=None)
xd = xx[0, :]
yd = xx[1, :]
if par_guess is None:
par_guess = np.array([np.nanmean(x_data), 65, 10])
pp = fit_pat_to_peaks(par_guess, xd, yd, trans=trans, even_branches=even_branches,
xoffset=xoffset, verbose=0)
if trans == 'one_ele':
model = one_ele_pat_model
elif trans == 'two_ele':
model = two_ele_pat_model
ydf = model(xd, pp)
return pp, {'imq': imq, 'xd': xd, 'yd': yd, 'ydf': ydf, 'par_guess': par_guess}
# %%
def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):
""" Plot the fitted model of the PAT transition(s)
Args:
x_data (array): detuning in millivolts
y_data (array): frequencies
z_data (array): sensor signal of PAT scan
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
model (function): model describing the PAT transitions
"""
if z_data is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
if trans == 'one_ele':
model = one_ele_pat_model
yfit = model(x_data, pp)
plt.plot(x_data, yfit, '-g', label=label)
yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))
plt.plot(x_data, yfit_t0, '--g')
elif trans == 'two_ele':
model = two_ele_pat_model
ylfit, ymfit, yrfit = model(x_data, pp)
plt.plot(x_data, ylfit, '-g', label='S-T')
plt.plot(x_data, ymfit, '-r', label='S-S')
plt.plot(x_data, yrfit, '-b', label='T-S')
plt.ylim([np.min(y_data), np.max(y_data)])
# %%
def show_traces(x_data, z_data, fig=100, direction='h', title=None):
""" Show traces of an image
Args:
x_data (array): detuning in millivolts
z_data (array): input image. rows are taken as the traces
fig (int): number for figure window to use
direction (str): can be 'h' or 'v'
"""
plt.figure(fig)
plt.clf()
if direction == 'v' or direction == 'vertical':
for ii, l in enumerate(z_data.T):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[1])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: left vertical lines, red: right lines'
plt.title(title)
else:
for ii, l in enumerate(z_data):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[0])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: top lines, red: bottom lines'
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Signal (a.u.)')
| r""" Model for two electron pat
This is \phi = \pm \frac{leverarm}{2} (x - x0) +
\frac{1}{2} \sqrt{( leverarm (x - x0) )^2 + 8 t^2 }
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
yl = (- leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
ym = np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2) * ueV2Hz
yr = (leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
return yl, ym, yr | identifier_body |
pat_fitting.py | """ Functionality to fit PAT models
For more details see: https://arxiv.org/abs/1803.10352
@author: diepencjv / eendebakpt
"""
import matplotlib.pyplot as plt
# %% Load packages
import numpy as np
import scipy.constants
import scipy.ndimage
import scipy.signal
from qtt.pgeometry import robustCost
# %%
ueV2Hz = scipy.constants.e / scipy.constants.h * 1e-6
def one_ele_pat_model(x_data, pp):
r""" Model for one electron pat
This is :math:`\phi=\sqrt{ { ( leverarm * (x-x_0) ) }^2 + 4 t^2 } \mathrm{ueV2Hz}`
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
For more details see: https://arxiv.org/abs/1803.10352
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
y = np.sqrt(np.power((x_data - xoffset) * leverarm, 2) + 4 * t**2) * ueV2Hz
return y
def two_ele_pat_model(x_data, pp):
r""" Model for two electron pat
This is \phi = \pm \frac{leverarm}{2} (x - x0) +
\frac{1}{2} \sqrt{( leverarm (x - x0) )^2 + 8 t^2 }
Args:
x_data (array): detuning (mV)
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
"""
if len(pp) == 1:
pp = pp[0]
xoffset = pp[0]
leverarm = pp[1]
t = pp[2]
yl = (- leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
ym = np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2) * ueV2Hz
yr = (leverarm * (x_data - xoffset) / 2 + 1 / 2 * np.sqrt((leverarm * (x_data - xoffset))**2 + 8 * t**2)) * ueV2Hz
return yl, ym, yr
# %%
class pat_score():
def __init__(self, even_branches=[True, True, True], branch_reduction=None):
""" Class to calculate scores for PAT fitting """
self.even_branches = even_branches
self.branch_reduction = branch_reduction
def pat_one_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat one electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ydatax = one_ele_pat_model(xd, pp)
charge_change = np.abs(np.abs(pp[1]) * (xd - pp[0]) / np.sqrt((pp[1] * (xd - pp[0]))**2 + 4 * pp[2]**2))
sc = np.abs(ydatax - yd) * charge_change
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc = sc * weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10
if pp[2] > 150:
sc *= 10
return sc
def pat_two_ele_score(self, xd, yd, pp, weights=None, thr=2e9):
""" Calculate score for pat two electron model
Args:
xd (array): x coordinates of peaks in sensor signal
yd (array): y coordinates of peaks in sensor signal
pp (array): model parameters
"""
ymodel = two_ele_pat_model(xd, pp)
denom = np.sqrt((pp[1] * (xd - pp[0]))**2 + 8 * pp[2]**2)
charge_changes = []
charge_changes.append(1 / 2 * (1 + pp[1] * (xd - pp[0]) / denom))
charge_changes.append(np.abs(pp[1] * (xd - pp[0]) / denom))
charge_changes.append(1 / 2 * (1 - pp[1] * (xd - pp[0]) / denom))
linesize = ymodel[0].shape[0]
if self.branch_reduction is None or self.branch_reduction == 'minimum':
sc = np.inf * np.ones(linesize)
for idval, val in enumerate(self.even_branches):
if val:
sc = np.minimum(sc, np.abs(ymodel[idval] - yd))
elif self.branch_reduction == 'mean':
sc = []
for idval, val in enumerate(self.even_branches):
if val:
sc.append(np.abs(ymodel[idval] - yd))
sc = np.mean(np.array(sc), axis=1)
else:
raise NotImplementedError('branch_reduction %s not implemented' % self.branch_reduction)
scalefac = thr
sc = np.sqrt(robustCost(sc / scalefac, thr / scalefac, 'BZ0')) * scalefac
if weights is not None:
sc *= weights
sc = np.linalg.norm(sc, ord=4) / sc.size
if pp[1] < 10:
sc *= 10000
return sc
# %%
def pre_process_pat(x_data, y_data, background, z_data, fig=None):
""" Pre-process a pair of background and sensor signal from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequency (Hz)
background (array): e.g. sensor signal of POL scan
z_data (array): sensor signal of PAT scan
fig (None or int)
Returns:
imx (array)
imq (array)
backgr_sm (array)
"""
backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)
imq = z_data - backgr_sm
imq = imq - np.mean(imq, axis=1).reshape((-1, 1))
ks = 5
w = np.ones((1, ks)) / ks
imx = scipy.ndimage.convolve(imq, w, mode='nearest')
qq = np.percentile(imx, [5, 50, 95])
imx = imx - qq[1]
qq = np.percentile(imx, [2, 50, 98])
scale = np.mean([-qq[0], qq[2]])
imx = imx / scale
if fig is not None:
# y_data = np.arange(imq.shape[0])
plt.figure(fig)
plt.clf()
plt.subplot(2, 2, 1)
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('Input data')
plt.subplot(2, 2, 2)
plt.pcolormesh(x_data, y_data, imq, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imq')
plt.subplot(2, 2, 3)
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
plt.title('imx')
plt.tight_layout()
return imx, imq, backgr_sm
# %%
def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):
""" Detect peaks in sensor signal, e.g. from a pat scan.
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
imx (array): sensor signal of PAT scan, background is usually already subtracted
Returns:
detected_peaks (array): coordinates of detected peaks
results (dict): additional fitting data
"""
thr = .4
thr2 = .6
# chop off part of the data, because T1 is relatively long
mvedge = .1 * (np.max(x_data) - np.min(x_data))
if model == 'two_ele':
mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \approx .1 ms [Ref]
horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))
& (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]
elif model == 'one_ele':
horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]
else:
raise Exception('no such model')
scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel
# smooth input image
kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)
kern = kern / kern.sum()
imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')
# get maximum value for each row
mm1 = np.argmax(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm1]
idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold
xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points
# get minimum value for each row
mm2 = np.argmin(imx2, axis=1)
val = imx2[np.arange(0, imx2.shape[0]), mm2]
# remove points below threshold
idx2 = np.where(np.abs(val) > thr)[0]
xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))
# join the two sets
detected_peaks = np.hstack((xx1, xx2))
# determine weights for the points
qq = np.intersect1d(idx1, idx2)
q1 = np.searchsorted(idx1, qq)
q2 = np.searchsorted(idx2, qq)
w1 = .5 * np.ones(len(idx1))
w1[q1] = 1
w2 = .5 * np.ones(len(idx2))
w2[q2] = 1
wfac = .1
w1[np.abs(val[idx1]) < thr2] = wfac
w2[np.abs(val[idx2]) < thr2] = wfac
weights = np.hstack((w1, w2))
if fig is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, imx, shading='auto')
plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')
plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}
# %%
def | (pp, xd, yd, trans='one_ele', even_branches=[True, True, True], weights=None, xoffset=None, verbose=1, branch_reduction=None):
""" Core fitting function for PAT measurements, based on detected resonance
peaks (see detect_peaks).
Args:
pp (array): initial guess of fit parameters
xd (array): x coordinates of peaks in sensor signal (mV)
yd (array): y coordinates of peaks in sensor signal (Hz)
trans (string): 'one_ele' or 'two_ele'
xoffset (float): the offset from zero detuning in voltage. If this has been determined before, then fixing this
parameter reduces the fitting time.
"""
ppx = pp.copy()
pat_score_class = pat_score(even_branches=even_branches, branch_reduction=branch_reduction)
if trans == 'one_ele':
pat_model_score = pat_score_class.pat_one_ele_score
elif trans == 'two_ele':
pat_model_score = pat_score_class.pat_two_ele_score
else:
raise Exception('This model %s is not implemented.' % trans)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, [x, pp[1], ppx[2]], weights=weights)
r = scipy.optimize.brute(ff, ranges=[(pp[0] - 2, pp[0] + 2)], Ns=20, disp=False)
ppx[0] = r
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose >= 2:
print('fit_pat_model: %s: %.4f -> %.4f' % (['%.2f' % x for x in ppx], sc0 / 1e6, sc / 1e6))
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
if xoffset is None:
def ff(x): return pat_model_score(xd, yd, x, weights=weights, thr=.5e9)
r = scipy.optimize.minimize(ff, ppx, method='Powell', options=dict({'disp': verbose >= 1}))
ppx = r['x']
else:
def ff(x): return pat_model_score(xd, yd, np.array([xoffset, x[0], x[1]]), weights=weights)
r = scipy.optimize.minimize(ff, np.array([ppx[1], ppx[2]]),
method='Powell', options=dict({'disp': verbose >= 1}))
ppx = np.insert(r['x'], 0, xoffset)
sc0 = pat_model_score(xd, yd, pp, weights=weights)
sc = pat_model_score(xd, yd, ppx, weights=weights)
if verbose:
print('fit_pat_model: %.4f -> %.4f' % (sc0 / 1e6, sc / 1e6))
return ppx
# %%
def fit_pat(x_data, y_data, z_data, background, trans='one_ele', period=1e-3,
even_branches=[True, True, True], par_guess=None, xoffset=None, verbose=1):
""" Wrapper for fitting the energy transitions in a PAT scan.
For more details see: https://arxiv.org/abs/1803.10352
Args:
x_data (array): detuning (mV)
y_data (array): frequencies (Hz)
z_data (array): sensor signal of PAT scan
background (array): sensor signal of POL scan
trans (str): can be 'one_ele' or 'two_ele'
even_branches (list of booleans): indicated which branches of the model to use for fitting
Returns:
pp (array): fitted xoffset (mV), leverarm (ueV/mV) and t (ueV)
results (dict): contains keys par_guess (array), imq (array) re-scaled and re-centered sensor signal, imextent (array), xd, yd, ydf
"""
imx, imq, _ = pre_process_pat(x_data, y_data, background, z_data)
xx, dpresults = detect_peaks(x_data, y_data, imx, model=trans, period=period, sigmamv=.05, fig=None)
xd = xx[0, :]
yd = xx[1, :]
if par_guess is None:
par_guess = np.array([np.nanmean(x_data), 65, 10])
pp = fit_pat_to_peaks(par_guess, xd, yd, trans=trans, even_branches=even_branches,
xoffset=xoffset, verbose=0)
if trans == 'one_ele':
model = one_ele_pat_model
elif trans == 'two_ele':
model = two_ele_pat_model
ydf = model(xd, pp)
return pp, {'imq': imq, 'xd': xd, 'yd': yd, 'ydf': ydf, 'par_guess': par_guess}
# %%
def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):
""" Plot the fitted model of the PAT transition(s)
Args:
x_data (array): detuning in millivolts
y_data (array): frequencies
z_data (array): sensor signal of PAT scan
pp (array): xoffset (mV), leverarm (ueV/mV) and t (ueV)
model (function): model describing the PAT transitions
"""
if z_data is not None:
plt.figure(fig)
plt.clf()
plt.pcolormesh(x_data, y_data, z_data, shading='auto')
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Frequency (Hz)')
if trans == 'one_ele':
model = one_ele_pat_model
yfit = model(x_data, pp)
plt.plot(x_data, yfit, '-g', label=label)
yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))
plt.plot(x_data, yfit_t0, '--g')
elif trans == 'two_ele':
model = two_ele_pat_model
ylfit, ymfit, yrfit = model(x_data, pp)
plt.plot(x_data, ylfit, '-g', label='S-T')
plt.plot(x_data, ymfit, '-r', label='S-S')
plt.plot(x_data, yrfit, '-b', label='T-S')
plt.ylim([np.min(y_data), np.max(y_data)])
# %%
def show_traces(x_data, z_data, fig=100, direction='h', title=None):
""" Show traces of an image
Args:
x_data (array): detuning in millivolts
z_data (array): input image. rows are taken as the traces
fig (int): number for figure window to use
direction (str): can be 'h' or 'v'
"""
plt.figure(fig)
plt.clf()
if direction == 'v' or direction == 'vertical':
for ii, l in enumerate(z_data.T):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[1])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: left vertical lines, red: right lines'
plt.title(title)
else:
for ii, l in enumerate(z_data):
c = []
c = plt.cm.jet(float(ii) / z_data.shape[0])
plt.plot(x_data, l, '', color=c)
if title is None:
title = 'Blue: top lines, red: bottom lines'
plt.title(title)
plt.xlabel('Detuning (mV)')
plt.ylabel('Signal (a.u.)')
| fit_pat_to_peaks | identifier_name |
gpkg.go | // +build cgo
package gpkg
import (
"database/sql"
"fmt"
"os"
"strings"
"github.com/go-spatial/geom"
_ "github.com/mattn/go-sqlite3"
)
const (
// SQLITE3 is the database driver name
SQLITE3 = "sqlite3"
// ApplicationID is the required application id for the file
ApplicationID = 0x47504B47 // "GPKG"
// UserVersion is the version of the GPKG file format. We support
// 1.2.1, so the the decimal representation is 10201 (1 digit for the major
// two digit for the minor and bug-fix).
UserVersion = 0x000027D9 // 10201
// TableSpatialRefSysSQL is the normative sql for the required spatial ref
// table. http://www.geopackage.org/spec/#gpkg_spatial_ref_sys_sql
TableSpatialRefSysSQL = `
CREATE TABLE IF NOT EXISTS gpkg_spatial_ref_sys (
srs_name TEXT NOT NULL,
srs_id INTEGER NOT NULL PRIMARY KEY,
organization TEXT NOT NULL,
organization_coordsys_id INTEGER NOT NULL,
definition TEXT NOT NULL,
description TEXT
);
`
// TableContentsSQL is the normative sql for the required contents table.
// http://www.geopackage.org/spec/#gpkg_contents_sql
TableContentsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_contents (
table_name TEXT NOT NULL PRIMARY KEY,
data_type TEXT NOT NULL,
identifier TEXT UNIQUE,
description TEXT DEFAULT '',
last_change DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')),
min_x DOUBLE,
min_y DOUBLE,
max_x DOUBLE,
max_y DOUBLE,
srs_id INTEGER,
CONSTRAINT fk_gc_r_srs_id FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys(srs_id)
);
`
// TableGeometryColumnsSQL is the normative sql for the geometry columns table that is
// required if the contents table has at least one table with a data_type of features
// http://www.geopackage.org/spec/#gpkg_geometry_columns_sql
TableGeometryColumnsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_geometry_columns (
table_name TEXT NOT NULL,
column_name TEXT NOT NULL,
geometry_type_name TEXT NOT NULL,
srs_id INTEGER NOT NULL,
z TINYINT NOT NULL, -- 0: z values prohibited; 1: z values mandatory; 2: z values optional
m TINYINT NOT NULL, -- 0: m values prohibited; 1: m values mandatory; 2: m values optional
CONSTRAINT pk_geom_cols PRIMARY KEY (table_name, column_name),
CONSTRAINT uk_gc_table_name UNIQUE (table_name),
CONSTRAINT fk_gc_tn FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name),
CONSTRAINT fk_gc_srs FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys (srs_id)
);
`
)
// Organization names
const (
// ORNone is for basic SRS
ORNone = "none"
OREPSG = "epsg"
)
var (
initialSQL = fmt.Sprintf(
`
PRAGMA application_id = %d;
PRAGMA user_version = %d ;
PRAGMA foreign_keys = ON ;
`,
ApplicationID,
UserVersion,
)
)
const (
DataTypeFeatures = "features"
DataTypeAttributes = "attributes"
DataTypeTitles = "titles"
)
// SpatialReferenceSystem describes the SRS
type SpatialReferenceSystem struct {
Name string
ID int
Organization string
OrganizationCoordsysID int
Definition string
Description string
}
var KnownSRS = map[int32]SpatialReferenceSystem{
-1: {
Name: "any",
ID: -1,
Organization: ORNone,
OrganizationCoordsysID: -1,
Definition: "",
Description: "any",
},
0: {
Name: "any",
ID: 0,
Organization: ORNone,
OrganizationCoordsysID: 0,
Definition: "",
Description: "any",
},
4326: {
Name: "WGS 84",
ID: 4326,
Organization: OREPSG,
OrganizationCoordsysID: 4326,
Definition: `
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
`,
Description: "World Geodetic System: WGS 84",
},
3857: {
Name: "WebMercator",
ID: 3857,
Organization: OREPSG,
OrganizationCoordsysID: 3857,
Definition: `
PROJCS["WGS 84 / Pseudo-Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],
AUTHORITY["EPSG","3857"]]
`,
Description: "WGS83 / Web Mercator",
},
}
// nonZeroFileExists checks if a file exists, and has a size greater then Zero
// and is not a directory before we try using it to prevent further errors.
func nonZeroFileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
if info.IsDir() {
return false
}
return info.Size() > 0
}
// Open will open or create the sqlite file, and return a new db handle to it.
func Open(filename string) (*Handle, error) {
var h = new(Handle)
db, err := sql.Open(SQLITE3, filename)
if err != nil {
return nil, err
}
h.DB = db
if err = initHandle(h); err != nil {
return nil, err
}
return h, nil
}
// New will create a new gpkg file and return a new db handle
func New(filename string) (*Handle, error) {
// First let's check to see if the file exists
// if it does error. We will not overwrite an files
if nonZeroFileExists(filename) {
return nil, os.ErrExist
}
return Open(filename)
}
// initHandle will setup up all the required tables and metadata for
// a new gpkg file.
func initHandle(h *Handle) error {
// Set the pragma's that we need to set for this file
_, err := h.Exec(initialSQL)
if err != nil {
return err
}
// Make sure the required metadata tables are available
for _, sql := range []string{TableSpatialRefSysSQL, TableContentsSQL, TableGeometryColumnsSQL} |
srss := make([]SpatialReferenceSystem, 0, len(KnownSRS))
// Now need to add SRS that we know about
for _, srs := range KnownSRS {
srss = append(srss, srs)
}
return h.UpdateSRS(srss...)
}
// AddGeometryTable will add the given features table to the metadata tables
// This should be called after creating the table.
func (h *Handle) AddGeometryTable(table TableDescription) error {
const (
validateSRSSQL = `
SELECT Count(*)
FROM gpkg_spatial_ref_sys
WHERE
srs_id=?
`
validateTableFieldSQL = `
SELECT "%v"
FROM "%v"
LIMIT 1
`
updateContentsTableSQL = `
INSERT INTO gpkg_contents(
table_name,
data_type,
identifier,
description,
srs_id
)
VALUES (?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
updateGeometryColumnsTableSQL = `
INSERT INTO gpkg_geometry_columns(
table_name,
column_name,
geometry_type_name,
srs_id,
z,
m
)
VALUES(?,?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
)
var (
count int
)
// Validate that the value already exists in the data base.
err := h.QueryRow(validateSRSSQL, table.SRS).Scan(&count)
if err != nil {
return err
}
if count == 0 {
// let's check known srs's to see if we have it and can add it.
srsdef, ok := KnownSRS[table.SRS]
if !ok {
return fmt.Errorf("unknown srs: %v", table.SRS)
}
if err = h.UpdateSRS(srsdef); err != nil {
return err
}
}
rows, err := h.Query(fmt.Sprintf(validateTableFieldSQL, table.GeometryField, table.Name))
if err != nil {
return fmt.Errorf("unknown table %v or field %v : %v", table.Name, table.GeometryField, err)
}
rows.Close()
_, err = h.Exec(updateContentsTableSQL, table.Name, DataTypeFeatures, table.ShortName, table.Description, table.SRS)
if err != nil {
return err
}
_, err = h.Exec(updateGeometryColumnsTableSQL, table.Name, table.GeometryField, table.GeometryType.String(), table.SRS, table.Z, table.M)
return err
}
// UpdateSRS will insert or update the srs table with the given srs
func (h *Handle) UpdateSRS(srss ...SpatialReferenceSystem) error {
const (
UpdateSQL = `
INSERT INTO gpkg_spatial_ref_sys(
srs_name,
srs_id,
organization,
organization_coordsys_id,
definition,
description
)
VALUES %v
ON CONFLICT(srs_id) DO NOTHING;
`
placeHolders = `(?,?,?,?,?,?) `
)
if len(srss) == 0 {
return nil
}
valuePlaceHolder := strings.Join(
strings.SplitN(
strings.Repeat(placeHolders, len(srss)),
" ",
len(srss),
),
",",
)
updateSQL := fmt.Sprintf(UpdateSQL, valuePlaceHolder)
values := make([]interface{}, 0, len(srss)*6)
for _, srs := range srss {
values = append(
values,
srs.Name,
srs.ID,
srs.Organization,
srs.OrganizationCoordsysID,
srs.Definition,
srs.Description,
)
}
_, err := h.Exec(updateSQL, values...)
return err
}
// UpdateGeometryExtent will modify the extent for the given table by adding the passed
// in extent to the extent of the table. Growing the extent as necessary.
func (h *Handle) UpdateGeometryExtent(tablename string, extent *geom.Extent) error {
if extent == nil {
return nil
}
var (
minx,
miny,
maxx,
maxy *float64
ext *geom.Extent
)
const (
selectSQL = `
SELECT
min_x,
min_y,
max_x,
max_y
FROM
gpkg_contents
WHERE
table_name = ?
`
updateSQL = `
UPDATE gpkg_contents
SET
min_x = ?,
min_y = ?,
max_x = ?,
max_y = ?
WHERE
table_name = ?
`
)
err := h.QueryRow(selectSQL, tablename).Scan(&minx, &miny, &maxx, &maxy)
if err != nil {
return err
}
if minx == nil || miny == nil || maxx == nil || maxy == nil {
ext = extent
} else {
ext = geom.NewExtent([2]float64{*minx, *miny}, [2]float64{*maxx, *maxy})
ext.Add(extent)
}
_, err = h.Exec(updateSQL, ext.MinX(), ext.MinY(), ext.MaxX(), ext.MaxY(), tablename)
return err
}
// CalculateGeometryExtent will grab all the geometries from the given table, use it
// to calculate the extent of all geometries in that table.
func (h *Handle) CalculateGeometryExtent(tablename string) (*geom.Extent, error) {
const (
selectGeomColSQL = `
SELECT
column_name
FROM
gpkg_geometry_columns
WHERE
table_name = ?
`
selectAllSQLFormat = ` SELECT "%v" FROM "%v"`
)
var (
columnName string
ext *geom.Extent
err error
rows *sql.Rows
sb StandardBinary
)
// First get the geometry column for table.
if err = h.QueryRow(selectGeomColSQL, tablename).Scan(&columnName); err != nil {
return nil, err
}
if rows, err = h.Query(fmt.Sprintf(selectAllSQLFormat, columnName, tablename)); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
rows.Scan(&sb)
if geom.IsEmpty(sb.Geometry) {
continue
}
if ext == nil {
ext, err = geom.NewExtentFromGeometry(sb.Geometry)
if err != nil {
ext = nil
}
continue
}
ext.AddGeometry(sb.Geometry)
}
return ext, nil
}
| {
_, err := h.Exec(sql)
if err != nil {
return err
}
} | conditional_block |
gpkg.go | // +build cgo
package gpkg
import (
"database/sql"
"fmt"
"os"
"strings"
"github.com/go-spatial/geom"
_ "github.com/mattn/go-sqlite3"
)
const (
// SQLITE3 is the database driver name
SQLITE3 = "sqlite3"
// ApplicationID is the required application id for the file
ApplicationID = 0x47504B47 // "GPKG"
// UserVersion is the version of the GPKG file format. We support
// 1.2.1, so the the decimal representation is 10201 (1 digit for the major
// two digit for the minor and bug-fix).
UserVersion = 0x000027D9 // 10201
// TableSpatialRefSysSQL is the normative sql for the required spatial ref
// table. http://www.geopackage.org/spec/#gpkg_spatial_ref_sys_sql
TableSpatialRefSysSQL = `
CREATE TABLE IF NOT EXISTS gpkg_spatial_ref_sys (
srs_name TEXT NOT NULL,
srs_id INTEGER NOT NULL PRIMARY KEY,
organization TEXT NOT NULL,
organization_coordsys_id INTEGER NOT NULL,
definition TEXT NOT NULL,
description TEXT
);
`
// TableContentsSQL is the normative sql for the required contents table.
// http://www.geopackage.org/spec/#gpkg_contents_sql
TableContentsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_contents (
table_name TEXT NOT NULL PRIMARY KEY,
data_type TEXT NOT NULL,
identifier TEXT UNIQUE,
description TEXT DEFAULT '',
last_change DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')),
min_x DOUBLE,
min_y DOUBLE,
max_x DOUBLE,
max_y DOUBLE,
srs_id INTEGER,
CONSTRAINT fk_gc_r_srs_id FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys(srs_id)
);
`
// TableGeometryColumnsSQL is the normative sql for the geometry columns table that is
// required if the contents table has at least one table with a data_type of features
// http://www.geopackage.org/spec/#gpkg_geometry_columns_sql
TableGeometryColumnsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_geometry_columns (
table_name TEXT NOT NULL,
column_name TEXT NOT NULL,
geometry_type_name TEXT NOT NULL,
srs_id INTEGER NOT NULL,
z TINYINT NOT NULL, -- 0: z values prohibited; 1: z values mandatory; 2: z values optional
m TINYINT NOT NULL, -- 0: m values prohibited; 1: m values mandatory; 2: m values optional
CONSTRAINT pk_geom_cols PRIMARY KEY (table_name, column_name),
CONSTRAINT uk_gc_table_name UNIQUE (table_name),
CONSTRAINT fk_gc_tn FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name),
CONSTRAINT fk_gc_srs FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys (srs_id)
);
`
)
// Organization names
const (
// ORNone is for basic SRS
ORNone = "none"
OREPSG = "epsg"
)
var (
initialSQL = fmt.Sprintf(
`
PRAGMA application_id = %d;
PRAGMA user_version = %d ;
PRAGMA foreign_keys = ON ;
`,
ApplicationID,
UserVersion,
)
)
const (
DataTypeFeatures = "features"
DataTypeAttributes = "attributes"
DataTypeTitles = "titles"
)
// SpatialReferenceSystem describes the SRS
type SpatialReferenceSystem struct {
Name string
ID int
Organization string
OrganizationCoordsysID int
Definition string
Description string
}
var KnownSRS = map[int32]SpatialReferenceSystem{
-1: {
Name: "any",
ID: -1,
Organization: ORNone,
OrganizationCoordsysID: -1,
Definition: "",
Description: "any",
},
0: {
Name: "any",
ID: 0,
Organization: ORNone,
OrganizationCoordsysID: 0,
Definition: "",
Description: "any",
},
4326: {
Name: "WGS 84",
ID: 4326,
Organization: OREPSG,
OrganizationCoordsysID: 4326,
Definition: `
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
`,
Description: "World Geodetic System: WGS 84",
},
3857: {
Name: "WebMercator",
ID: 3857,
Organization: OREPSG,
OrganizationCoordsysID: 3857,
Definition: `
PROJCS["WGS 84 / Pseudo-Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],
AUTHORITY["EPSG","3857"]]
`,
Description: "WGS83 / Web Mercator",
},
}
// nonZeroFileExists checks if a file exists, and has a size greater then Zero
// and is not a directory before we try using it to prevent further errors.
func nonZeroFileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
if info.IsDir() {
return false
}
return info.Size() > 0
}
// Open will open or create the sqlite file, and return a new db handle to it.
func Open(filename string) (*Handle, error) {
var h = new(Handle)
db, err := sql.Open(SQLITE3, filename)
if err != nil {
return nil, err
}
h.DB = db
if err = initHandle(h); err != nil {
return nil, err
}
return h, nil
}
// New will create a new gpkg file and return a new db handle
func New(filename string) (*Handle, error) {
// First let's check to see if the file exists
// if it does error. We will not overwrite an files
if nonZeroFileExists(filename) {
return nil, os.ErrExist
}
return Open(filename)
}
// initHandle will setup up all the required tables and metadata for
// a new gpkg file.
func initHandle(h *Handle) error {
// Set the pragma's that we need to set for this file
_, err := h.Exec(initialSQL)
if err != nil {
return err
}
// Make sure the required metadata tables are available
for _, sql := range []string{TableSpatialRefSysSQL, TableContentsSQL, TableGeometryColumnsSQL} {
_, err := h.Exec(sql)
if err != nil {
return err
}
}
srss := make([]SpatialReferenceSystem, 0, len(KnownSRS))
// Now need to add SRS that we know about
for _, srs := range KnownSRS {
srss = append(srss, srs)
}
return h.UpdateSRS(srss...)
}
// AddGeometryTable will add the given features table to the metadata tables
// This should be called after creating the table.
func (h *Handle) AddGeometryTable(table TableDescription) error {
const (
validateSRSSQL = `
SELECT Count(*)
FROM gpkg_spatial_ref_sys
WHERE
srs_id=?
`
validateTableFieldSQL = `
SELECT "%v"
FROM "%v"
LIMIT 1
`
updateContentsTableSQL = `
INSERT INTO gpkg_contents(
table_name,
data_type,
identifier,
description,
srs_id
)
VALUES (?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
updateGeometryColumnsTableSQL = `
INSERT INTO gpkg_geometry_columns(
table_name,
column_name,
geometry_type_name,
srs_id,
z,
m
)
VALUES(?,?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
)
var ( |
// Validate that the value already exists in the data base.
err := h.QueryRow(validateSRSSQL, table.SRS).Scan(&count)
if err != nil {
return err
}
if count == 0 {
// let's check known srs's to see if we have it and can add it.
srsdef, ok := KnownSRS[table.SRS]
if !ok {
return fmt.Errorf("unknown srs: %v", table.SRS)
}
if err = h.UpdateSRS(srsdef); err != nil {
return err
}
}
rows, err := h.Query(fmt.Sprintf(validateTableFieldSQL, table.GeometryField, table.Name))
if err != nil {
return fmt.Errorf("unknown table %v or field %v : %v", table.Name, table.GeometryField, err)
}
rows.Close()
_, err = h.Exec(updateContentsTableSQL, table.Name, DataTypeFeatures, table.ShortName, table.Description, table.SRS)
if err != nil {
return err
}
_, err = h.Exec(updateGeometryColumnsTableSQL, table.Name, table.GeometryField, table.GeometryType.String(), table.SRS, table.Z, table.M)
return err
}
// UpdateSRS will insert or update the srs table with the given srs
func (h *Handle) UpdateSRS(srss ...SpatialReferenceSystem) error {
const (
UpdateSQL = `
INSERT INTO gpkg_spatial_ref_sys(
srs_name,
srs_id,
organization,
organization_coordsys_id,
definition,
description
)
VALUES %v
ON CONFLICT(srs_id) DO NOTHING;
`
placeHolders = `(?,?,?,?,?,?) `
)
if len(srss) == 0 {
return nil
}
valuePlaceHolder := strings.Join(
strings.SplitN(
strings.Repeat(placeHolders, len(srss)),
" ",
len(srss),
),
",",
)
updateSQL := fmt.Sprintf(UpdateSQL, valuePlaceHolder)
values := make([]interface{}, 0, len(srss)*6)
for _, srs := range srss {
values = append(
values,
srs.Name,
srs.ID,
srs.Organization,
srs.OrganizationCoordsysID,
srs.Definition,
srs.Description,
)
}
_, err := h.Exec(updateSQL, values...)
return err
}
// UpdateGeometryExtent will modify the extent for the given table by adding the passed
// in extent to the extent of the table. Growing the extent as necessary.
func (h *Handle) UpdateGeometryExtent(tablename string, extent *geom.Extent) error {
if extent == nil {
return nil
}
var (
minx,
miny,
maxx,
maxy *float64
ext *geom.Extent
)
const (
selectSQL = `
SELECT
min_x,
min_y,
max_x,
max_y
FROM
gpkg_contents
WHERE
table_name = ?
`
updateSQL = `
UPDATE gpkg_contents
SET
min_x = ?,
min_y = ?,
max_x = ?,
max_y = ?
WHERE
table_name = ?
`
)
err := h.QueryRow(selectSQL, tablename).Scan(&minx, &miny, &maxx, &maxy)
if err != nil {
return err
}
if minx == nil || miny == nil || maxx == nil || maxy == nil {
ext = extent
} else {
ext = geom.NewExtent([2]float64{*minx, *miny}, [2]float64{*maxx, *maxy})
ext.Add(extent)
}
_, err = h.Exec(updateSQL, ext.MinX(), ext.MinY(), ext.MaxX(), ext.MaxY(), tablename)
return err
}
// CalculateGeometryExtent will grab all the geometries from the given table, use it
// to calculate the extent of all geometries in that table.
func (h *Handle) CalculateGeometryExtent(tablename string) (*geom.Extent, error) {
const (
selectGeomColSQL = `
SELECT
column_name
FROM
gpkg_geometry_columns
WHERE
table_name = ?
`
selectAllSQLFormat = ` SELECT "%v" FROM "%v"`
)
var (
columnName string
ext *geom.Extent
err error
rows *sql.Rows
sb StandardBinary
)
// First get the geometry column for table.
if err = h.QueryRow(selectGeomColSQL, tablename).Scan(&columnName); err != nil {
return nil, err
}
if rows, err = h.Query(fmt.Sprintf(selectAllSQLFormat, columnName, tablename)); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
rows.Scan(&sb)
if geom.IsEmpty(sb.Geometry) {
continue
}
if ext == nil {
ext, err = geom.NewExtentFromGeometry(sb.Geometry)
if err != nil {
ext = nil
}
continue
}
ext.AddGeometry(sb.Geometry)
}
return ext, nil
} | count int
) | random_line_split |
gpkg.go | // +build cgo
package gpkg
import (
"database/sql"
"fmt"
"os"
"strings"
"github.com/go-spatial/geom"
_ "github.com/mattn/go-sqlite3"
)
const (
// SQLITE3 is the database driver name
SQLITE3 = "sqlite3"
// ApplicationID is the required application id for the file
ApplicationID = 0x47504B47 // "GPKG"
// UserVersion is the version of the GPKG file format. We support
// 1.2.1, so the the decimal representation is 10201 (1 digit for the major
// two digit for the minor and bug-fix).
UserVersion = 0x000027D9 // 10201
// TableSpatialRefSysSQL is the normative sql for the required spatial ref
// table. http://www.geopackage.org/spec/#gpkg_spatial_ref_sys_sql
TableSpatialRefSysSQL = `
CREATE TABLE IF NOT EXISTS gpkg_spatial_ref_sys (
srs_name TEXT NOT NULL,
srs_id INTEGER NOT NULL PRIMARY KEY,
organization TEXT NOT NULL,
organization_coordsys_id INTEGER NOT NULL,
definition TEXT NOT NULL,
description TEXT
);
`
// TableContentsSQL is the normative sql for the required contents table.
// http://www.geopackage.org/spec/#gpkg_contents_sql
TableContentsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_contents (
table_name TEXT NOT NULL PRIMARY KEY,
data_type TEXT NOT NULL,
identifier TEXT UNIQUE,
description TEXT DEFAULT '',
last_change DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')),
min_x DOUBLE,
min_y DOUBLE,
max_x DOUBLE,
max_y DOUBLE,
srs_id INTEGER,
CONSTRAINT fk_gc_r_srs_id FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys(srs_id)
);
`
// TableGeometryColumnsSQL is the normative sql for the geometry columns table that is
// required if the contents table has at least one table with a data_type of features
// http://www.geopackage.org/spec/#gpkg_geometry_columns_sql
TableGeometryColumnsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_geometry_columns (
table_name TEXT NOT NULL,
column_name TEXT NOT NULL,
geometry_type_name TEXT NOT NULL,
srs_id INTEGER NOT NULL,
z TINYINT NOT NULL, -- 0: z values prohibited; 1: z values mandatory; 2: z values optional
m TINYINT NOT NULL, -- 0: m values prohibited; 1: m values mandatory; 2: m values optional
CONSTRAINT pk_geom_cols PRIMARY KEY (table_name, column_name),
CONSTRAINT uk_gc_table_name UNIQUE (table_name),
CONSTRAINT fk_gc_tn FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name),
CONSTRAINT fk_gc_srs FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys (srs_id)
);
`
)
// Organization names
const (
// ORNone is for basic SRS
ORNone = "none"
OREPSG = "epsg"
)
var (
initialSQL = fmt.Sprintf(
`
PRAGMA application_id = %d;
PRAGMA user_version = %d ;
PRAGMA foreign_keys = ON ;
`,
ApplicationID,
UserVersion,
)
)
const (
DataTypeFeatures = "features"
DataTypeAttributes = "attributes"
DataTypeTitles = "titles"
)
// SpatialReferenceSystem describes the SRS
type SpatialReferenceSystem struct {
Name string
ID int
Organization string
OrganizationCoordsysID int
Definition string
Description string
}
var KnownSRS = map[int32]SpatialReferenceSystem{
-1: {
Name: "any",
ID: -1,
Organization: ORNone,
OrganizationCoordsysID: -1,
Definition: "",
Description: "any",
},
0: {
Name: "any",
ID: 0,
Organization: ORNone,
OrganizationCoordsysID: 0,
Definition: "",
Description: "any",
},
4326: {
Name: "WGS 84",
ID: 4326,
Organization: OREPSG,
OrganizationCoordsysID: 4326,
Definition: `
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
`,
Description: "World Geodetic System: WGS 84",
},
3857: {
Name: "WebMercator",
ID: 3857,
Organization: OREPSG,
OrganizationCoordsysID: 3857,
Definition: `
PROJCS["WGS 84 / Pseudo-Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],
AUTHORITY["EPSG","3857"]]
`,
Description: "WGS83 / Web Mercator",
},
}
// nonZeroFileExists checks if a file exists, and has a size greater then Zero
// and is not a directory before we try using it to prevent further errors.
func nonZeroFileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
if info.IsDir() {
return false
}
return info.Size() > 0
}
// Open will open or create the sqlite file, and return a new db handle to it.
func Open(filename string) (*Handle, error) {
var h = new(Handle)
db, err := sql.Open(SQLITE3, filename)
if err != nil {
return nil, err
}
h.DB = db
if err = initHandle(h); err != nil {
return nil, err
}
return h, nil
}
// New will create a new gpkg file and return a new db handle
func New(filename string) (*Handle, error) {
// First let's check to see if the file exists
// if it does error. We will not overwrite an files
if nonZeroFileExists(filename) {
return nil, os.ErrExist
}
return Open(filename)
}
// initHandle will setup up all the required tables and metadata for
// a new gpkg file.
func | (h *Handle) error {
// Set the pragma's that we need to set for this file
_, err := h.Exec(initialSQL)
if err != nil {
return err
}
// Make sure the required metadata tables are available
for _, sql := range []string{TableSpatialRefSysSQL, TableContentsSQL, TableGeometryColumnsSQL} {
_, err := h.Exec(sql)
if err != nil {
return err
}
}
srss := make([]SpatialReferenceSystem, 0, len(KnownSRS))
// Now need to add SRS that we know about
for _, srs := range KnownSRS {
srss = append(srss, srs)
}
return h.UpdateSRS(srss...)
}
// AddGeometryTable will add the given features table to the metadata tables
// This should be called after creating the table.
func (h *Handle) AddGeometryTable(table TableDescription) error {
const (
validateSRSSQL = `
SELECT Count(*)
FROM gpkg_spatial_ref_sys
WHERE
srs_id=?
`
validateTableFieldSQL = `
SELECT "%v"
FROM "%v"
LIMIT 1
`
updateContentsTableSQL = `
INSERT INTO gpkg_contents(
table_name,
data_type,
identifier,
description,
srs_id
)
VALUES (?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
updateGeometryColumnsTableSQL = `
INSERT INTO gpkg_geometry_columns(
table_name,
column_name,
geometry_type_name,
srs_id,
z,
m
)
VALUES(?,?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
)
var (
count int
)
// Validate that the value already exists in the data base.
err := h.QueryRow(validateSRSSQL, table.SRS).Scan(&count)
if err != nil {
return err
}
if count == 0 {
// let's check known srs's to see if we have it and can add it.
srsdef, ok := KnownSRS[table.SRS]
if !ok {
return fmt.Errorf("unknown srs: %v", table.SRS)
}
if err = h.UpdateSRS(srsdef); err != nil {
return err
}
}
rows, err := h.Query(fmt.Sprintf(validateTableFieldSQL, table.GeometryField, table.Name))
if err != nil {
return fmt.Errorf("unknown table %v or field %v : %v", table.Name, table.GeometryField, err)
}
rows.Close()
_, err = h.Exec(updateContentsTableSQL, table.Name, DataTypeFeatures, table.ShortName, table.Description, table.SRS)
if err != nil {
return err
}
_, err = h.Exec(updateGeometryColumnsTableSQL, table.Name, table.GeometryField, table.GeometryType.String(), table.SRS, table.Z, table.M)
return err
}
// UpdateSRS will insert or update the srs table with the given srs
func (h *Handle) UpdateSRS(srss ...SpatialReferenceSystem) error {
const (
UpdateSQL = `
INSERT INTO gpkg_spatial_ref_sys(
srs_name,
srs_id,
organization,
organization_coordsys_id,
definition,
description
)
VALUES %v
ON CONFLICT(srs_id) DO NOTHING;
`
placeHolders = `(?,?,?,?,?,?) `
)
if len(srss) == 0 {
return nil
}
valuePlaceHolder := strings.Join(
strings.SplitN(
strings.Repeat(placeHolders, len(srss)),
" ",
len(srss),
),
",",
)
updateSQL := fmt.Sprintf(UpdateSQL, valuePlaceHolder)
values := make([]interface{}, 0, len(srss)*6)
for _, srs := range srss {
values = append(
values,
srs.Name,
srs.ID,
srs.Organization,
srs.OrganizationCoordsysID,
srs.Definition,
srs.Description,
)
}
_, err := h.Exec(updateSQL, values...)
return err
}
// UpdateGeometryExtent will modify the extent for the given table by adding the passed
// in extent to the extent of the table. Growing the extent as necessary.
func (h *Handle) UpdateGeometryExtent(tablename string, extent *geom.Extent) error {
if extent == nil {
return nil
}
var (
minx,
miny,
maxx,
maxy *float64
ext *geom.Extent
)
const (
selectSQL = `
SELECT
min_x,
min_y,
max_x,
max_y
FROM
gpkg_contents
WHERE
table_name = ?
`
updateSQL = `
UPDATE gpkg_contents
SET
min_x = ?,
min_y = ?,
max_x = ?,
max_y = ?
WHERE
table_name = ?
`
)
err := h.QueryRow(selectSQL, tablename).Scan(&minx, &miny, &maxx, &maxy)
if err != nil {
return err
}
if minx == nil || miny == nil || maxx == nil || maxy == nil {
ext = extent
} else {
ext = geom.NewExtent([2]float64{*minx, *miny}, [2]float64{*maxx, *maxy})
ext.Add(extent)
}
_, err = h.Exec(updateSQL, ext.MinX(), ext.MinY(), ext.MaxX(), ext.MaxY(), tablename)
return err
}
// CalculateGeometryExtent will grab all the geometries from the given table, use it
// to calculate the extent of all geometries in that table.
func (h *Handle) CalculateGeometryExtent(tablename string) (*geom.Extent, error) {
const (
selectGeomColSQL = `
SELECT
column_name
FROM
gpkg_geometry_columns
WHERE
table_name = ?
`
selectAllSQLFormat = ` SELECT "%v" FROM "%v"`
)
var (
columnName string
ext *geom.Extent
err error
rows *sql.Rows
sb StandardBinary
)
// First get the geometry column for table.
if err = h.QueryRow(selectGeomColSQL, tablename).Scan(&columnName); err != nil {
return nil, err
}
if rows, err = h.Query(fmt.Sprintf(selectAllSQLFormat, columnName, tablename)); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
rows.Scan(&sb)
if geom.IsEmpty(sb.Geometry) {
continue
}
if ext == nil {
ext, err = geom.NewExtentFromGeometry(sb.Geometry)
if err != nil {
ext = nil
}
continue
}
ext.AddGeometry(sb.Geometry)
}
return ext, nil
}
| initHandle | identifier_name |
gpkg.go | // +build cgo
package gpkg
import (
"database/sql"
"fmt"
"os"
"strings"
"github.com/go-spatial/geom"
_ "github.com/mattn/go-sqlite3"
)
const (
// SQLITE3 is the database driver name
SQLITE3 = "sqlite3"
// ApplicationID is the required application id for the file
ApplicationID = 0x47504B47 // "GPKG"
// UserVersion is the version of the GPKG file format. We support
// 1.2.1, so the the decimal representation is 10201 (1 digit for the major
// two digit for the minor and bug-fix).
UserVersion = 0x000027D9 // 10201
// TableSpatialRefSysSQL is the normative sql for the required spatial ref
// table. http://www.geopackage.org/spec/#gpkg_spatial_ref_sys_sql
TableSpatialRefSysSQL = `
CREATE TABLE IF NOT EXISTS gpkg_spatial_ref_sys (
srs_name TEXT NOT NULL,
srs_id INTEGER NOT NULL PRIMARY KEY,
organization TEXT NOT NULL,
organization_coordsys_id INTEGER NOT NULL,
definition TEXT NOT NULL,
description TEXT
);
`
// TableContentsSQL is the normative sql for the required contents table.
// http://www.geopackage.org/spec/#gpkg_contents_sql
TableContentsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_contents (
table_name TEXT NOT NULL PRIMARY KEY,
data_type TEXT NOT NULL,
identifier TEXT UNIQUE,
description TEXT DEFAULT '',
last_change DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')),
min_x DOUBLE,
min_y DOUBLE,
max_x DOUBLE,
max_y DOUBLE,
srs_id INTEGER,
CONSTRAINT fk_gc_r_srs_id FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys(srs_id)
);
`
// TableGeometryColumnsSQL is the normative sql for the geometry columns table that is
// required if the contents table has at least one table with a data_type of features
// http://www.geopackage.org/spec/#gpkg_geometry_columns_sql
TableGeometryColumnsSQL = `
CREATE TABLE IF NOT EXISTS gpkg_geometry_columns (
table_name TEXT NOT NULL,
column_name TEXT NOT NULL,
geometry_type_name TEXT NOT NULL,
srs_id INTEGER NOT NULL,
z TINYINT NOT NULL, -- 0: z values prohibited; 1: z values mandatory; 2: z values optional
m TINYINT NOT NULL, -- 0: m values prohibited; 1: m values mandatory; 2: m values optional
CONSTRAINT pk_geom_cols PRIMARY KEY (table_name, column_name),
CONSTRAINT uk_gc_table_name UNIQUE (table_name),
CONSTRAINT fk_gc_tn FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name),
CONSTRAINT fk_gc_srs FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys (srs_id)
);
`
)
// Organization names
const (
// ORNone is for basic SRS
ORNone = "none"
OREPSG = "epsg"
)
var (
initialSQL = fmt.Sprintf(
`
PRAGMA application_id = %d;
PRAGMA user_version = %d ;
PRAGMA foreign_keys = ON ;
`,
ApplicationID,
UserVersion,
)
)
const (
DataTypeFeatures = "features"
DataTypeAttributes = "attributes"
DataTypeTitles = "titles"
)
// SpatialReferenceSystem describes the SRS
type SpatialReferenceSystem struct {
Name string
ID int
Organization string
OrganizationCoordsysID int
Definition string
Description string
}
var KnownSRS = map[int32]SpatialReferenceSystem{
-1: {
Name: "any",
ID: -1,
Organization: ORNone,
OrganizationCoordsysID: -1,
Definition: "",
Description: "any",
},
0: {
Name: "any",
ID: 0,
Organization: ORNone,
OrganizationCoordsysID: 0,
Definition: "",
Description: "any",
},
4326: {
Name: "WGS 84",
ID: 4326,
Organization: OREPSG,
OrganizationCoordsysID: 4326,
Definition: `
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
`,
Description: "World Geodetic System: WGS 84",
},
3857: {
Name: "WebMercator",
ID: 3857,
Organization: OREPSG,
OrganizationCoordsysID: 3857,
Definition: `
PROJCS["WGS 84 / Pseudo-Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],
AUTHORITY["EPSG","3857"]]
`,
Description: "WGS83 / Web Mercator",
},
}
// nonZeroFileExists checks if a file exists, and has a size greater then Zero
// and is not a directory before we try using it to prevent further errors.
func nonZeroFileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
if info.IsDir() {
return false
}
return info.Size() > 0
}
// Open will open or create the sqlite file, and return a new db handle to it.
func Open(filename string) (*Handle, error) {
var h = new(Handle)
db, err := sql.Open(SQLITE3, filename)
if err != nil {
return nil, err
}
h.DB = db
if err = initHandle(h); err != nil {
return nil, err
}
return h, nil
}
// New will create a new gpkg file and return a new db handle
func New(filename string) (*Handle, error) {
// First let's check to see if the file exists
// if it does error. We will not overwrite an files
if nonZeroFileExists(filename) {
return nil, os.ErrExist
}
return Open(filename)
}
// initHandle will setup up all the required tables and metadata for
// a new gpkg file.
func initHandle(h *Handle) error |
// AddGeometryTable will add the given features table to the metadata tables
// This should be called after creating the table.
func (h *Handle) AddGeometryTable(table TableDescription) error {
const (
validateSRSSQL = `
SELECT Count(*)
FROM gpkg_spatial_ref_sys
WHERE
srs_id=?
`
validateTableFieldSQL = `
SELECT "%v"
FROM "%v"
LIMIT 1
`
updateContentsTableSQL = `
INSERT INTO gpkg_contents(
table_name,
data_type,
identifier,
description,
srs_id
)
VALUES (?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
updateGeometryColumnsTableSQL = `
INSERT INTO gpkg_geometry_columns(
table_name,
column_name,
geometry_type_name,
srs_id,
z,
m
)
VALUES(?,?,?,?,?,?)
ON CONFLICT(table_name) DO NOTHING;
`
)
var (
count int
)
// Validate that the value already exists in the data base.
err := h.QueryRow(validateSRSSQL, table.SRS).Scan(&count)
if err != nil {
return err
}
if count == 0 {
// let's check known srs's to see if we have it and can add it.
srsdef, ok := KnownSRS[table.SRS]
if !ok {
return fmt.Errorf("unknown srs: %v", table.SRS)
}
if err = h.UpdateSRS(srsdef); err != nil {
return err
}
}
rows, err := h.Query(fmt.Sprintf(validateTableFieldSQL, table.GeometryField, table.Name))
if err != nil {
return fmt.Errorf("unknown table %v or field %v : %v", table.Name, table.GeometryField, err)
}
rows.Close()
_, err = h.Exec(updateContentsTableSQL, table.Name, DataTypeFeatures, table.ShortName, table.Description, table.SRS)
if err != nil {
return err
}
_, err = h.Exec(updateGeometryColumnsTableSQL, table.Name, table.GeometryField, table.GeometryType.String(), table.SRS, table.Z, table.M)
return err
}
// UpdateSRS will insert or update the srs table with the given srs
func (h *Handle) UpdateSRS(srss ...SpatialReferenceSystem) error {
const (
UpdateSQL = `
INSERT INTO gpkg_spatial_ref_sys(
srs_name,
srs_id,
organization,
organization_coordsys_id,
definition,
description
)
VALUES %v
ON CONFLICT(srs_id) DO NOTHING;
`
placeHolders = `(?,?,?,?,?,?) `
)
if len(srss) == 0 {
return nil
}
valuePlaceHolder := strings.Join(
strings.SplitN(
strings.Repeat(placeHolders, len(srss)),
" ",
len(srss),
),
",",
)
updateSQL := fmt.Sprintf(UpdateSQL, valuePlaceHolder)
values := make([]interface{}, 0, len(srss)*6)
for _, srs := range srss {
values = append(
values,
srs.Name,
srs.ID,
srs.Organization,
srs.OrganizationCoordsysID,
srs.Definition,
srs.Description,
)
}
_, err := h.Exec(updateSQL, values...)
return err
}
// UpdateGeometryExtent will modify the extent for the given table by adding the passed
// in extent to the extent of the table. Growing the extent as necessary.
func (h *Handle) UpdateGeometryExtent(tablename string, extent *geom.Extent) error {
if extent == nil {
return nil
}
var (
minx,
miny,
maxx,
maxy *float64
ext *geom.Extent
)
const (
selectSQL = `
SELECT
min_x,
min_y,
max_x,
max_y
FROM
gpkg_contents
WHERE
table_name = ?
`
updateSQL = `
UPDATE gpkg_contents
SET
min_x = ?,
min_y = ?,
max_x = ?,
max_y = ?
WHERE
table_name = ?
`
)
err := h.QueryRow(selectSQL, tablename).Scan(&minx, &miny, &maxx, &maxy)
if err != nil {
return err
}
if minx == nil || miny == nil || maxx == nil || maxy == nil {
ext = extent
} else {
ext = geom.NewExtent([2]float64{*minx, *miny}, [2]float64{*maxx, *maxy})
ext.Add(extent)
}
_, err = h.Exec(updateSQL, ext.MinX(), ext.MinY(), ext.MaxX(), ext.MaxY(), tablename)
return err
}
// CalculateGeometryExtent will grab all the geometries from the given table, use it
// to calculate the extent of all geometries in that table.
func (h *Handle) CalculateGeometryExtent(tablename string) (*geom.Extent, error) {
const (
selectGeomColSQL = `
SELECT
column_name
FROM
gpkg_geometry_columns
WHERE
table_name = ?
`
selectAllSQLFormat = ` SELECT "%v" FROM "%v"`
)
var (
columnName string
ext *geom.Extent
err error
rows *sql.Rows
sb StandardBinary
)
// First get the geometry column for table.
if err = h.QueryRow(selectGeomColSQL, tablename).Scan(&columnName); err != nil {
return nil, err
}
if rows, err = h.Query(fmt.Sprintf(selectAllSQLFormat, columnName, tablename)); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
rows.Scan(&sb)
if geom.IsEmpty(sb.Geometry) {
continue
}
if ext == nil {
ext, err = geom.NewExtentFromGeometry(sb.Geometry)
if err != nil {
ext = nil
}
continue
}
ext.AddGeometry(sb.Geometry)
}
return ext, nil
}
| {
// Set the pragma's that we need to set for this file
_, err := h.Exec(initialSQL)
if err != nil {
return err
}
// Make sure the required metadata tables are available
for _, sql := range []string{TableSpatialRefSysSQL, TableContentsSQL, TableGeometryColumnsSQL} {
_, err := h.Exec(sql)
if err != nil {
return err
}
}
srss := make([]SpatialReferenceSystem, 0, len(KnownSRS))
// Now need to add SRS that we know about
for _, srs := range KnownSRS {
srss = append(srss, srs)
}
return h.UpdateSRS(srss...)
} | identifier_body |
test_wikipedia.py | import sys
import time
import datetime
import unittest
import re
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import pages
class WikipediaCommon(unittest.TestCase):
def setUp(self):
if browser == 'firefox':
|
elif browser == 'ie':
self.driver = webdriver.Ie()
elif browser == 'chrome':
self.driver = webdriver.Chrome(executable_path='/selenium_browser_drivers/chromedriver')
elif browser == 'safari':
self.driver = webdriver.Safari()
self.driver.set_window_position(20,20)
self.driver.set_window_size(1200,800)
else:
print('Browser parameter not recognized')
# The implicit wait is not normally necessary
#self.driver.implicitly_wait(5)
def tearDown(self):
self.driver.quit()
class TestHomePage(WikipediaCommon):
#@unittest.skip('')
def test_homepage_title(self):
self.open_home_page()
self.verify_home_page_title()
#@unittest.skip('')
# Safari doesn't display a tab unless multiple tabs are open.
def test_homepage_article_search(self):
search_term = "Buster Keaton"
self.open_home_page()
self.submit_search(search_term)
self.verify_article_page(search_term)
#@unittest.skip('')
def test_homepage_autosuggest(self):
self.open_home_page()
self.type_search("bust")
self.verify_suggestions_start_with("bust")
self.type_search("er")
self.verify_suggestions_start_with("buster")
#@unittest.skip('')
def test_homepage_english_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('English')
self.verify_main_page_text(
title_text="Wikipedia, the free encyclopedia",
body_text="the free encyclopedia that anyone can edit")
#@unittest.skip('')
def test_homepage_french_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Français')
self.verify_main_page_text(
title_text="Wikipédia, l'encyclopédie libre",
body_text="L'encyclopédie libre que chacun peut améliorer")
#@unittest.skip('')
def test_homepage_german_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Deutsch')
self.verify_main_page_text(
title_text="Wikipedia – Die freie Enzyklopädie",
body_text="Wikipedia ist ein Projekt zum Aufbau einer Enzyklopädie aus freien Inhalten")
#@unittest.skip('')
def test_homepage_spanish_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Español')
self.verify_main_page_text(
title_text="Wikipedia, la enciclopedia libre",
body_text="la enciclopedia de contenido libreque todos pueden editar")
####################
# Helper functions
####################
# create the home page object and open the page
def open_home_page(self):
self.home = pages.HomePage(self.driver)
self.home.open_home_page()
def verify_home_page_title(self):
self.assertEqual(self.home.get_page_title(), "Wikipedia")
def submit_search(self, search_term):
self.open_home_page()
self.home.enter_search_term(search_term)
self.home.submit_search()
def click_language_link(self, lang):
self.home.click_language_link(lang)
def verify_article_page(self, search_term):
# check the resulting page has the correct header & title
title_regex = "^{0}.*".format(search_term)
encoded_search_term = search_term.replace(" ", "_")
url_regex = ".*{0}$".format(encoded_search_term)
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search(title_regex, s),
"Page title '{}' does not start with search term '{}'".format(s, search_term))
s = article.get_current_url()
self.assertTrue(re.search(url_regex, s),
"URL '{}' does not end with search term '{}'".format(s, encoded_search_term))
self.assertEqual(article.get_article_header(), search_term)
# type text into search term, but not submit
def type_search(self, search_term):
self.home.enter_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.home.get_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' not found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertNotEqual(matching, [], error_msg.format(search_str, titles))
def verify_suggestions_do_not_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertEqual(matching, [], error_msg.format(search_str, titles))
'''
# Verify text on the main page
# Parameters
# title_text - expected text in the title (browser tab)
# body_text - expected text somewhere in body
#
# declare a main-page object
# asserts the title/tab is the expected text
# asserts the page body contains the expected text
def verify_main_page_text(self, title_text, body_text):
self.main = pages.MainPage(self.driver)
self.assertEqual(title_text, self.main.get_page_title())
self.assertIn(body_text, self.main.get_body_text().replace("\n", ''))
class TestMainPage(WikipediaCommon):
#@unittest.skip('')
def test_mainpage_article_search(self):
self.open_main_page()
self.search_for_article("Disneyland")
# check the resulting page has the correct header & title
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search("^Disneyland.*", s),
"Page title '{}' is unexpected".format(s))
s = article.get_current_url()
self.assertTrue(re.search(".*Disneyland$", s),
"URL '{}' is unexpected".format(s))
self.assertEqual(article.get_article_header(), "Disneyland")
#@unittest.skip('')
def test_mainpage_autosuggest(self):
if browser == "safari":
self.skipTest('main page search does not return autosuggest on Safari')
self.open_main_page()
self.type_search("dou")
self.verify_suggestions_start_with("dou")
self.type_search("glas") # extend search term
self.verify_suggestions_start_with("douglas")
####################
# Helper functions
####################
def open_main_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
def search_for_article(self, search_term):
self.main.open_article_by_search(search_term)
# Type a search term without submitting search
def type_search(self, search_term):
self.main.enter_header_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.main.get_header_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, expected_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertIn(expected_suggestion, titles)
def verify_suggestions_do_not_contain(self, omitted_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertNotIn(omitted_suggestion, titles)
'''
class TestArticlePage(WikipediaCommon):
#@unittest.skip('')
def test_infobox_for_country(self):
expected_values = (('Currency', "Sol"), ('Capital', "Lima"))
self.infobox_test("Peru", expected_values)
#@unittest.skip('')
def test_infobox_for_chemistry(self):
expected_values = (('atomic weight', "15.999"), ('Phase at STP', "gas"))
self.infobox_test("Oxygen", expected_values)
#@unittest.skip('')
def test_infobox_for_person(self):
expected_values = (('Born', '1889'), ('Relatives', 'Chaplin'))
self.infobox_test("Charlie Chaplin", expected_values)
#@unittest.skip('')
def test_infobox_for_movie(self):
expected_values = (('Directed', 'Alfred Hitchcock'), ('Starring', 'Cary Grant'))
self.infobox_test("north by northwest", expected_values)
#@unittest.skip('')
def test_infobox_for_holiday(self):
expected_values = (('Significance', 'pranks'), ('Frequency', 'Annual'))
self.infobox_test("april fool's day", expected_values)
#@unittest.skip('')
def test_infobox_for_song(self):
expected_values = (('Recorded', '1968'), ('Songwriter(s)', 'Lennon'))
self.infobox_test("rocky raccoon", expected_values)
#@unittest.skip('')
def test_compare_toc_and_headlines(self):
self.open_article_by_search("Douglas Adams")
self.verify_article_toc_and_headers()
####################
# Helper functions
####################
def open_article_by_search(self, search_term):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.open_article_by_search(search_term)
# Template for testing info box contents
# Parameters:
# search_term: search text to open an article.
# assumes search does not open a disambiguration page
# expected_value: list of (label, value) tuples where
# label is a string contained in the left side of a row in info box
# value is a string contained in value on the right side
def infobox_test(self, search_term, expected_values):
self.open_article_by_search(search_term)
article = pages.ArticlePage(self.driver)
infobox = article.get_infobox_contents()
# check expected values are in info box
for (label, expected_value) in expected_values:
found_value = article.get_value_from_infobox_contents(infobox, label)
self.assertIn(expected_value, found_value)
def verify_article_toc_and_headers(self):
article = pages.ArticlePage(self.driver)
toc = article.get_toc_items_text()
self.assertTrue(len(toc) > 0, "TOC is empty")
headlines = article.get_headlines_text()
self.assertTrue(len(headlines) > 0, "No headlines found")
self.assertEqual(toc, headlines)
class TestCurrentEventsPage(WikipediaCommon):
#@unittest.skip('')
def test_main_current_events_page(self):
self.navigate_to_current_events_page()
now = datetime.datetime.now()
self.verify_date_headers(
now.strftime('%B'), now.strftime('%Y'), days_ascending=False)
#@unittest.skip('')
def test_main_archived_current_events_page(self):
if browser == "safari":
self.skipTest('Safari does not locate month_year link')
self.navigate_to_current_events_page()
month, year = self.select_random_month_year()
self.verify_date_headers(month, year, days_ascending=True)
####################
# Helper functions
####################
def navigate_to_current_events_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.click_left_panel_link("Current events")
# Randomly select a month from the archives at the bottom of the page
# Earliest expected archive is July 1994
# Latest expected archive is the current month
# Returns the tuple (month, year) which was selected
def select_random_month_year(self):
current_year = datetime.datetime.now().year
current_month = datetime.datetime.now().month
year = str(random.randint(1994, current_year))
first_month = 1
last_month = 12
# adjust month range, if necessary
if year == 1994:
first_month = 7
elif year == current_year:
last_month = current_month
month = self.main.month_name(random.randint(first_month, last_month))
print("Verifying {} {}".format(month, year))
ce = pages.CurrentEventsPage(self.driver)
ce.click_link_archived_month(month, year)
return (month, year)
# Verify the headers for dates
# verify headers are the expected format (ex: Janurary 1, 1999 (Monday))
# verify dates are in sequence
# Parameters
# month - string, full spelling of a month
# year - string, YYYY format
# days_ascending - boolean, dates should be in ascending order if True
# in descending order if False
def verify_date_headers(self, month, year, days_ascending=True):
ce = pages.CurrentEventsPage(self.driver)
dates = ce.get_date_headers()
days = []
for date in dates:
self.assertRegex(date, ce.long_date_regex) # header is expected format
date_parsed = ce.parse_date_header(date)
self.assertEqual(month, date_parsed[0]) # expected month
self.assertEqual(year, date_parsed[2]) # expected year
days.append( (int(date_parsed[2]),
ce.month_index(date_parsed[0]),
int(date_parsed[1])) )
# days are in expected sequence
self.assertEqual(days, sorted(days, reverse=(not days_ascending)))
if __name__ == '__main__':
supported_browsers = ['firefox', 'ie', 'chrome', 'safari']
if (len(sys.argv) == 2) and (sys.argv[1] in supported_browsers):
global browser
browser = sys.argv[1]
del sys.argv[1] # remove so that unittest doesn't attempt to process argument
# Gather one test suite
#tests = unittest.TestLoader().loadTestsFromTestCase(TestCurrentEventsPage)
# Gather set of test suites
suite_list = [
TestHomePage,
TestMainPage,
TestArticlePage,
TestCurrentEventsPage,
]
suites = map(unittest.TestLoader().loadTestsFromTestCase, suite_list)
tests = unittest.TestSuite(suites)
# Run gathered tests
unittest.TextTestRunner(verbosity=2).run(tests)
else:
print("Argument missing or invalid. Expected one of",str(supported_browsers)[1:-1])
| self.driver = webdriver.Firefox(executable_path='/selenium_browser_drivers/geckodriver') | conditional_block |
test_wikipedia.py | import sys
import time
import datetime
import unittest
import re
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import pages
class WikipediaCommon(unittest.TestCase):
def setUp(self):
if browser == 'firefox':
self.driver = webdriver.Firefox(executable_path='/selenium_browser_drivers/geckodriver')
elif browser == 'ie':
self.driver = webdriver.Ie()
elif browser == 'chrome':
self.driver = webdriver.Chrome(executable_path='/selenium_browser_drivers/chromedriver')
elif browser == 'safari':
self.driver = webdriver.Safari()
self.driver.set_window_position(20,20)
self.driver.set_window_size(1200,800)
else:
print('Browser parameter not recognized')
# The implicit wait is not normally necessary
#self.driver.implicitly_wait(5)
def tearDown(self):
self.driver.quit()
class TestHomePage(WikipediaCommon):
#@unittest.skip('')
def test_homepage_title(self):
self.open_home_page()
self.verify_home_page_title()
#@unittest.skip('')
# Safari doesn't display a tab unless multiple tabs are open.
def test_homepage_article_search(self):
search_term = "Buster Keaton"
self.open_home_page()
self.submit_search(search_term)
self.verify_article_page(search_term)
#@unittest.skip('')
def test_homepage_autosuggest(self):
self.open_home_page()
self.type_search("bust")
self.verify_suggestions_start_with("bust")
self.type_search("er")
self.verify_suggestions_start_with("buster")
#@unittest.skip('')
def test_homepage_english_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('English')
self.verify_main_page_text(
title_text="Wikipedia, the free encyclopedia",
body_text="the free encyclopedia that anyone can edit")
#@unittest.skip('')
def test_homepage_french_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Français')
self.verify_main_page_text(
title_text="Wikipédia, l'encyclopédie libre",
body_text="L'encyclopédie libre que chacun peut améliorer")
#@unittest.skip('')
def test_homepage_german_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Deutsch')
self.verify_main_page_text(
title_text="Wikipedia – Die freie Enzyklopädie",
body_text="Wikipedia ist ein Projekt zum Aufbau einer Enzyklopädie aus freien Inhalten")
#@unittest.skip('')
def test_homepage_spanish_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Español')
self.verify_main_page_text(
title_text="Wikipedia, la enciclopedia libre",
body_text="la enciclopedia de contenido libreque todos pueden editar")
####################
# Helper functions
####################
# create the home page object and open the page
def open_home_page(self):
self.home = pages.HomePage(self.driver)
self.home.open_home_page()
def verify_home_page_title(self):
self.assertEqual(self.home.get_page_title(), "Wikipedia")
def submit_search(self, search_term):
self.open_home_page()
self.home.enter_search_term(search_term)
self.home.submit_search()
def click_language_link(self, lang):
self.home.click_language_link(lang)
def verify_article_page(self, search_term):
# check the resulting page has the correct header & title
title_rege | text into search term, but not submit
def type_search(self, search_term):
self.home.enter_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.home.get_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' not found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertNotEqual(matching, [], error_msg.format(search_str, titles))
def verify_suggestions_do_not_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertEqual(matching, [], error_msg.format(search_str, titles))
'''
# Verify text on the main page
# Parameters
# title_text - expected text in the title (browser tab)
# body_text - expected text somewhere in body
#
# declare a main-page object
# asserts the title/tab is the expected text
# asserts the page body contains the expected text
def verify_main_page_text(self, title_text, body_text):
self.main = pages.MainPage(self.driver)
self.assertEqual(title_text, self.main.get_page_title())
self.assertIn(body_text, self.main.get_body_text().replace("\n", ''))
class TestMainPage(WikipediaCommon):
#@unittest.skip('')
def test_mainpage_article_search(self):
self.open_main_page()
self.search_for_article("Disneyland")
# check the resulting page has the correct header & title
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search("^Disneyland.*", s),
"Page title '{}' is unexpected".format(s))
s = article.get_current_url()
self.assertTrue(re.search(".*Disneyland$", s),
"URL '{}' is unexpected".format(s))
self.assertEqual(article.get_article_header(), "Disneyland")
#@unittest.skip('')
def test_mainpage_autosuggest(self):
if browser == "safari":
self.skipTest('main page search does not return autosuggest on Safari')
self.open_main_page()
self.type_search("dou")
self.verify_suggestions_start_with("dou")
self.type_search("glas") # extend search term
self.verify_suggestions_start_with("douglas")
####################
# Helper functions
####################
def open_main_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
def search_for_article(self, search_term):
self.main.open_article_by_search(search_term)
# Type a search term without submitting search
def type_search(self, search_term):
self.main.enter_header_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.main.get_header_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, expected_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertIn(expected_suggestion, titles)
def verify_suggestions_do_not_contain(self, omitted_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertNotIn(omitted_suggestion, titles)
'''
class TestArticlePage(WikipediaCommon):
#@unittest.skip('')
def test_infobox_for_country(self):
expected_values = (('Currency', "Sol"), ('Capital', "Lima"))
self.infobox_test("Peru", expected_values)
#@unittest.skip('')
def test_infobox_for_chemistry(self):
expected_values = (('atomic weight', "15.999"), ('Phase at STP', "gas"))
self.infobox_test("Oxygen", expected_values)
#@unittest.skip('')
def test_infobox_for_person(self):
expected_values = (('Born', '1889'), ('Relatives', 'Chaplin'))
self.infobox_test("Charlie Chaplin", expected_values)
#@unittest.skip('')
def test_infobox_for_movie(self):
expected_values = (('Directed', 'Alfred Hitchcock'), ('Starring', 'Cary Grant'))
self.infobox_test("north by northwest", expected_values)
#@unittest.skip('')
def test_infobox_for_holiday(self):
expected_values = (('Significance', 'pranks'), ('Frequency', 'Annual'))
self.infobox_test("april fool's day", expected_values)
#@unittest.skip('')
def test_infobox_for_song(self):
expected_values = (('Recorded', '1968'), ('Songwriter(s)', 'Lennon'))
self.infobox_test("rocky raccoon", expected_values)
#@unittest.skip('')
def test_compare_toc_and_headlines(self):
self.open_article_by_search("Douglas Adams")
self.verify_article_toc_and_headers()
####################
# Helper functions
####################
def open_article_by_search(self, search_term):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.open_article_by_search(search_term)
# Template for testing info box contents
# Parameters:
# search_term: search text to open an article.
# assumes search does not open a disambiguration page
# expected_value: list of (label, value) tuples where
# label is a string contained in the left side of a row in info box
# value is a string contained in value on the right side
def infobox_test(self, search_term, expected_values):
self.open_article_by_search(search_term)
article = pages.ArticlePage(self.driver)
infobox = article.get_infobox_contents()
# check expected values are in info box
for (label, expected_value) in expected_values:
found_value = article.get_value_from_infobox_contents(infobox, label)
self.assertIn(expected_value, found_value)
def verify_article_toc_and_headers(self):
article = pages.ArticlePage(self.driver)
toc = article.get_toc_items_text()
self.assertTrue(len(toc) > 0, "TOC is empty")
headlines = article.get_headlines_text()
self.assertTrue(len(headlines) > 0, "No headlines found")
self.assertEqual(toc, headlines)
class TestCurrentEventsPage(WikipediaCommon):
#@unittest.skip('')
def test_main_current_events_page(self):
self.navigate_to_current_events_page()
now = datetime.datetime.now()
self.verify_date_headers(
now.strftime('%B'), now.strftime('%Y'), days_ascending=False)
#@unittest.skip('')
def test_main_archived_current_events_page(self):
if browser == "safari":
self.skipTest('Safari does not locate month_year link')
self.navigate_to_current_events_page()
month, year = self.select_random_month_year()
self.verify_date_headers(month, year, days_ascending=True)
####################
# Helper functions
####################
def navigate_to_current_events_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.click_left_panel_link("Current events")
# Randomly select a month from the archives at the bottom of the page
# Earliest expected archive is July 1994
# Latest expected archive is the current month
# Returns the tuple (month, year) which was selected
def select_random_month_year(self):
current_year = datetime.datetime.now().year
current_month = datetime.datetime.now().month
year = str(random.randint(1994, current_year))
first_month = 1
last_month = 12
# adjust month range, if necessary
if year == 1994:
first_month = 7
elif year == current_year:
last_month = current_month
month = self.main.month_name(random.randint(first_month, last_month))
print("Verifying {} {}".format(month, year))
ce = pages.CurrentEventsPage(self.driver)
ce.click_link_archived_month(month, year)
return (month, year)
# Verify the headers for dates
# verify headers are the expected format (ex: Janurary 1, 1999 (Monday))
# verify dates are in sequence
# Parameters
# month - string, full spelling of a month
# year - string, YYYY format
# days_ascending - boolean, dates should be in ascending order if True
# in descending order if False
def verify_date_headers(self, month, year, days_ascending=True):
ce = pages.CurrentEventsPage(self.driver)
dates = ce.get_date_headers()
days = []
for date in dates:
self.assertRegex(date, ce.long_date_regex) # header is expected format
date_parsed = ce.parse_date_header(date)
self.assertEqual(month, date_parsed[0]) # expected month
self.assertEqual(year, date_parsed[2]) # expected year
days.append( (int(date_parsed[2]),
ce.month_index(date_parsed[0]),
int(date_parsed[1])) )
# days are in expected sequence
self.assertEqual(days, sorted(days, reverse=(not days_ascending)))
if __name__ == '__main__':
supported_browsers = ['firefox', 'ie', 'chrome', 'safari']
if (len(sys.argv) == 2) and (sys.argv[1] in supported_browsers):
global browser
browser = sys.argv[1]
del sys.argv[1] # remove so that unittest doesn't attempt to process argument
# Gather one test suite
#tests = unittest.TestLoader().loadTestsFromTestCase(TestCurrentEventsPage)
# Gather set of test suites
suite_list = [
TestHomePage,
TestMainPage,
TestArticlePage,
TestCurrentEventsPage,
]
suites = map(unittest.TestLoader().loadTestsFromTestCase, suite_list)
tests = unittest.TestSuite(suites)
# Run gathered tests
unittest.TextTestRunner(verbosity=2).run(tests)
else:
print("Argument missing or invalid. Expected one of",str(supported_browsers)[1:-1])
| x = "^{0}.*".format(search_term)
encoded_search_term = search_term.replace(" ", "_")
url_regex = ".*{0}$".format(encoded_search_term)
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search(title_regex, s),
"Page title '{}' does not start with search term '{}'".format(s, search_term))
s = article.get_current_url()
self.assertTrue(re.search(url_regex, s),
"URL '{}' does not end with search term '{}'".format(s, encoded_search_term))
self.assertEqual(article.get_article_header(), search_term)
# type | identifier_body |
test_wikipedia.py | import sys
import time
import datetime
import unittest
import re
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import pages
class WikipediaCommon(unittest.TestCase):
def setUp(self):
if browser == 'firefox':
self.driver = webdriver.Firefox(executable_path='/selenium_browser_drivers/geckodriver')
elif browser == 'ie':
self.driver = webdriver.Ie()
elif browser == 'chrome':
self.driver = webdriver.Chrome(executable_path='/selenium_browser_drivers/chromedriver')
elif browser == 'safari':
self.driver = webdriver.Safari()
self.driver.set_window_position(20,20)
self.driver.set_window_size(1200,800)
else:
print('Browser parameter not recognized')
# The implicit wait is not normally necessary
#self.driver.implicitly_wait(5)
def tearDown(self):
self.driver.quit()
class TestHomePage(WikipediaCommon):
#@unittest.skip('')
def test_homepage_title(self):
self.open_home_page()
self.verify_home_page_title()
#@unittest.skip('')
# Safari doesn't display a tab unless multiple tabs are open.
def test_homepage_article_search(self):
search_term = "Buster Keaton"
self.open_home_page()
self.submit_search(search_term)
self.verify_article_page(search_term)
#@unittest.skip('')
def test_homepage_autosuggest(self):
self.open_home_page()
self.type_search("bust")
self.verify_suggestions_start_with("bust")
self.type_search("er")
self.verify_suggestions_start_with("buster")
#@unittest.skip('')
def test_homepage_english_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('English')
self.verify_main_page_text(
title_text="Wikipedia, the free encyclopedia",
body_text="the free encyclopedia that anyone can edit")
#@unittest.skip('')
def test_homepage_french_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Français')
self.verify_main_page_text(
title_text="Wikipédia, l'encyclopédie libre",
body_text="L'encyclopédie libre que chacun peut améliorer")
#@unittest.skip('')
def test_homepage_german_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Deutsch')
self.verify_main_page_text(
title_text="Wikipedia – Die freie Enzyklopädie",
body_text="Wikipedia ist ein Projekt zum Aufbau einer Enzyklopädie aus freien Inhalten")
#@unittest.skip('')
def test_homepage_spanish_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Español')
self.verify_main_page_text(
title_text="Wikipedia, la enciclopedia libre",
body_text="la enciclopedia de contenido libreque todos pueden editar")
####################
# Helper functions
####################
# create the home page object and open the page
def open_home_page(self):
self.home = pages.HomePage(self.driver)
self.home.open_home_page()
def verify_home_page_title(self):
self.assertEqual(self.home.get_page_title(), "Wikipedia")
def submit_search(self, search_term):
self.open_home_page()
self.home.enter_search_term(search_term)
self.home.submit_search()
def click_language_link(self, lang):
self.home.click_language_link(lang)
def verify_article_page(self, search_term):
# check the resulting page has the correct header & title
title_regex = "^{0}.*".format(search_term)
encoded_search_term = search_term.replace(" ", "_")
url_regex = ".*{0}$".format(encoded_search_term)
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search(title_regex, s),
"Page title '{}' does not start with search term '{}'".format(s, search_term))
s = article.get_current_url()
self.assertTrue(re.search(url_regex, s),
"URL '{}' does not end with search term '{}'".format(s, encoded_search_term))
self.assertEqual(article.get_article_header(), search_term)
# type text into search term, but not submit
def type_search(self, search_term):
self.home.enter_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.home.get_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' not found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertNotEqual(matching, [], error_msg.format(search_str, titles))
def verify_suggestions_do_not_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertEqual(matching, [], error_msg.format(search_str, titles))
'''
# Verify text on the main page
# Parameters
# title_text - expected text in the title (browser tab)
# body_text - expected text somewhere in body
#
# declare a main-page object
# asserts the title/tab is the expected text
# asserts the page body contains the expected text
def verify_main_page_text(self, title_text, body_text):
self.main = pages.MainPage(self.driver)
self.assertEqual(title_text, self.main.get_page_title())
self.assertIn(body_text, self.main.get_body_text().replace("\n", ''))
class TestMainPage(WikipediaCommon):
#@unittest.skip('')
def test_mainpage_article_search(self):
self.open_main_page()
self.search_for_article("Disneyland")
# check the resulting page has the correct header & title
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search("^Disneyland.*", s),
"Page title '{}' is unexpected".format(s))
s = article.get_current_url()
self.assertTrue(re.search(".*Disneyland$", s),
"URL '{}' is unexpected".format(s))
self.assertEqual(article.get_article_header(), "Disneyland")
#@unittest.skip('')
def test_mainpage_autosuggest(self):
if browser == "safari":
self.skipTest('main page search does not return autosuggest on Safari')
self.open_main_page()
self.type_search("dou")
self.verify_suggestions_start_with("dou")
self.type_search("glas") # extend search term
self.verify_suggestions_start_with("douglas")
####################
# Helper functions
####################
def open_main_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
def search_for_article(self, search_term):
self.main.open_article_by_search(search_term)
# Type a search term without submitting search
def type_search(self, search_term):
self.main.enter_header_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.main.get_header_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, expected_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertIn(expected_suggestion, titles)
def verify_suggestions_do_not_contain(self, omitted_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertNotIn(omitted_suggestion, titles)
'''
class TestArticlePage(WikipediaCommon):
#@unittest.skip('')
def test_infobox_for_country(self):
expected_values = (('Currency', "Sol"), ('Capital', "Lima"))
self.infobox_test("Peru", expected_values)
#@unittest.skip('')
def test_infobox_for_chemistry(self):
expected_values = (('atomic weight', "15.999"), ('Phase at STP', "gas"))
self.infobox_test("Oxygen", expected_values)
#@unittest.skip('')
def test_infobox_for_person(self):
expected_values = (('Born', '1889'), ('Relatives', 'Chaplin'))
self.infobox_test("Charlie Chaplin", expected_values)
#@unittest.skip('')
def test_infobox_for_movie(self):
expected_values = (('Directed', 'Alfred Hitchcock'), ('Starring', 'Cary Grant'))
self.infobox_test("north by northwest", expected_values)
#@unittest.skip('')
def test_infobox_for_holiday(self):
expected_values = (('Significance', 'pranks'), ('Frequency', 'Annual'))
self.infobox_test("april fool's day", expected_values)
#@unittest.skip('')
def test_infobox_for_song(self):
expected_values = (('Recorded', '1968'), ('Songwriter(s)', 'Lennon'))
self.infobox_test("rocky raccoon", expected_values)
#@unittest.skip('')
def test_compare_toc_and_headlines(self):
self.open_article_by_search("Douglas Adams")
self.verify_article_toc_and_headers()
####################
# Helper functions
####################
def open_article_by_search(self, search_term):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.open_article_by_search(search_term)
# Template for testing info box contents
# Parameters:
# search_term: search text to open an article.
# assumes search does not open a disambiguration page
# expected_value: list of (label, value) tuples where
# label is a string contained in the left side of a row in info box
# value is a string contained in value on the right side
def infobox_te | rch_term, expected_values):
self.open_article_by_search(search_term)
article = pages.ArticlePage(self.driver)
infobox = article.get_infobox_contents()
# check expected values are in info box
for (label, expected_value) in expected_values:
found_value = article.get_value_from_infobox_contents(infobox, label)
self.assertIn(expected_value, found_value)
def verify_article_toc_and_headers(self):
article = pages.ArticlePage(self.driver)
toc = article.get_toc_items_text()
self.assertTrue(len(toc) > 0, "TOC is empty")
headlines = article.get_headlines_text()
self.assertTrue(len(headlines) > 0, "No headlines found")
self.assertEqual(toc, headlines)
class TestCurrentEventsPage(WikipediaCommon):
#@unittest.skip('')
def test_main_current_events_page(self):
self.navigate_to_current_events_page()
now = datetime.datetime.now()
self.verify_date_headers(
now.strftime('%B'), now.strftime('%Y'), days_ascending=False)
#@unittest.skip('')
def test_main_archived_current_events_page(self):
if browser == "safari":
self.skipTest('Safari does not locate month_year link')
self.navigate_to_current_events_page()
month, year = self.select_random_month_year()
self.verify_date_headers(month, year, days_ascending=True)
####################
# Helper functions
####################
def navigate_to_current_events_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.click_left_panel_link("Current events")
# Randomly select a month from the archives at the bottom of the page
# Earliest expected archive is July 1994
# Latest expected archive is the current month
# Returns the tuple (month, year) which was selected
def select_random_month_year(self):
current_year = datetime.datetime.now().year
current_month = datetime.datetime.now().month
year = str(random.randint(1994, current_year))
first_month = 1
last_month = 12
# adjust month range, if necessary
if year == 1994:
first_month = 7
elif year == current_year:
last_month = current_month
month = self.main.month_name(random.randint(first_month, last_month))
print("Verifying {} {}".format(month, year))
ce = pages.CurrentEventsPage(self.driver)
ce.click_link_archived_month(month, year)
return (month, year)
# Verify the headers for dates
# verify headers are the expected format (ex: Janurary 1, 1999 (Monday))
# verify dates are in sequence
# Parameters
# month - string, full spelling of a month
# year - string, YYYY format
# days_ascending - boolean, dates should be in ascending order if True
# in descending order if False
def verify_date_headers(self, month, year, days_ascending=True):
ce = pages.CurrentEventsPage(self.driver)
dates = ce.get_date_headers()
days = []
for date in dates:
self.assertRegex(date, ce.long_date_regex) # header is expected format
date_parsed = ce.parse_date_header(date)
self.assertEqual(month, date_parsed[0]) # expected month
self.assertEqual(year, date_parsed[2]) # expected year
days.append( (int(date_parsed[2]),
ce.month_index(date_parsed[0]),
int(date_parsed[1])) )
# days are in expected sequence
self.assertEqual(days, sorted(days, reverse=(not days_ascending)))
if __name__ == '__main__':
supported_browsers = ['firefox', 'ie', 'chrome', 'safari']
if (len(sys.argv) == 2) and (sys.argv[1] in supported_browsers):
global browser
browser = sys.argv[1]
del sys.argv[1] # remove so that unittest doesn't attempt to process argument
# Gather one test suite
#tests = unittest.TestLoader().loadTestsFromTestCase(TestCurrentEventsPage)
# Gather set of test suites
suite_list = [
TestHomePage,
TestMainPage,
TestArticlePage,
TestCurrentEventsPage,
]
suites = map(unittest.TestLoader().loadTestsFromTestCase, suite_list)
tests = unittest.TestSuite(suites)
# Run gathered tests
unittest.TextTestRunner(verbosity=2).run(tests)
else:
print("Argument missing or invalid. Expected one of",str(supported_browsers)[1:-1])
| st(self, sea | identifier_name |
test_wikipedia.py | import sys
import time
import datetime
import unittest
import re
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import pages
class WikipediaCommon(unittest.TestCase):
def setUp(self):
if browser == 'firefox':
self.driver = webdriver.Firefox(executable_path='/selenium_browser_drivers/geckodriver')
elif browser == 'ie':
self.driver = webdriver.Ie()
elif browser == 'chrome':
self.driver = webdriver.Chrome(executable_path='/selenium_browser_drivers/chromedriver')
elif browser == 'safari':
self.driver = webdriver.Safari()
self.driver.set_window_position(20,20)
self.driver.set_window_size(1200,800)
else:
print('Browser parameter not recognized')
# The implicit wait is not normally necessary
#self.driver.implicitly_wait(5)
def tearDown(self):
self.driver.quit()
class TestHomePage(WikipediaCommon):
#@unittest.skip('')
def test_homepage_title(self):
self.open_home_page()
self.verify_home_page_title()
#@unittest.skip('')
# Safari doesn't display a tab unless multiple tabs are open.
def test_homepage_article_search(self):
search_term = "Buster Keaton"
self.open_home_page()
self.submit_search(search_term)
self.verify_article_page(search_term)
#@unittest.skip('')
def test_homepage_autosuggest(self):
self.open_home_page()
self.type_search("bust")
self.verify_suggestions_start_with("bust")
self.type_search("er")
self.verify_suggestions_start_with("buster")
#@unittest.skip('')
def test_homepage_english_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('English')
self.verify_main_page_text(
title_text="Wikipedia, the free encyclopedia",
body_text="the free encyclopedia that anyone can edit")
#@unittest.skip('')
def test_homepage_french_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Français')
self.verify_main_page_text(
title_text="Wikipédia, l'encyclopédie libre",
body_text="L'encyclopédie libre que chacun peut améliorer")
#@unittest.skip('')
def test_homepage_german_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Deutsch')
self.verify_main_page_text(
title_text="Wikipedia – Die freie Enzyklopädie",
body_text="Wikipedia ist ein Projekt zum Aufbau einer Enzyklopädie aus freien Inhalten")
#@unittest.skip('')
def test_homepage_spanish_link(self):
if browser == "safari":
self.skipTest('Safari does not click on home page language link as expected')
self.open_home_page()
self.click_language_link('Español')
self.verify_main_page_text(
title_text="Wikipedia, la enciclopedia libre",
body_text="la enciclopedia de contenido libreque todos pueden editar")
####################
# Helper functions
####################
# create the home page object and open the page
def open_home_page(self):
self.home = pages.HomePage(self.driver)
self.home.open_home_page()
def verify_home_page_title(self):
self.assertEqual(self.home.get_page_title(), "Wikipedia")
def submit_search(self, search_term):
self.open_home_page()
self.home.enter_search_term(search_term)
self.home.submit_search()
def click_language_link(self, lang):
self.home.click_language_link(lang)
def verify_article_page(self, search_term):
# check the resulting page has the correct header & title
title_regex = "^{0}.*".format(search_term)
encoded_search_term = search_term.replace(" ", "_")
url_regex = ".*{0}$".format(encoded_search_term)
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search(title_regex, s),
"Page title '{}' does not start with search term '{}'".format(s, search_term))
s = article.get_current_url()
self.assertTrue(re.search(url_regex, s),
"URL '{}' does not end with search term '{}'".format(s, encoded_search_term))
self.assertEqual(article.get_article_header(), search_term)
# type text into search term, but not submit
def type_search(self, search_term):
self.home.enter_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.home.get_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' not found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertNotEqual(matching, [], error_msg.format(search_str, titles))
def verify_suggestions_do_not_contain(self, search_str):
suggestions = self.home.get_search_suggestions()
titles = [suggestion['title'] for suggestion in suggestions]
error_msg = "'{}' found in titles {}"
matching = [title for title in titles if title.startswith(search_str)]
self.assertEqual(matching, [], error_msg.format(search_str, titles))
'''
# Verify text on the main page
# Parameters
# title_text - expected text in the title (browser tab)
# body_text - expected text somewhere in body
#
# declare a main-page object
# asserts the title/tab is the expected text
# asserts the page body contains the expected text
def verify_main_page_text(self, title_text, body_text):
self.main = pages.MainPage(self.driver)
self.assertEqual(title_text, self.main.get_page_title())
self.assertIn(body_text, self.main.get_body_text().replace("\n", ''))
class TestMainPage(WikipediaCommon):
#@unittest.skip('')
def test_mainpage_article_search(self):
self.open_main_page()
self.search_for_article("Disneyland")
# check the resulting page has the correct header & title
article = pages.ArticlePage(self.driver)
s = article.get_page_title()
self.assertTrue(re.search("^Disneyland.*", s),
"Page title '{}' is unexpected".format(s))
s = article.get_current_url()
self.assertTrue(re.search(".*Disneyland$", s),
"URL '{}' is unexpected".format(s))
self.assertEqual(article.get_article_header(), "Disneyland")
#@unittest.skip('')
def test_mainpage_autosuggest(self):
if browser == "safari":
self.skipTest('main page search does not return autosuggest on Safari')
self.open_main_page()
self.type_search("dou")
self.verify_suggestions_start_with("dou")
self.type_search("glas") # extend search term
self.verify_suggestions_start_with("douglas")
####################
# Helper functions
####################
def open_main_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
def search_for_article(self, search_term):
self.main.open_article_by_search(search_term)
# Type a search term without submitting search
def type_search(self, search_term):
self.main.enter_header_search_term(search_term)
def verify_suggestions_start_with(self, search_term):
prefix = search_term.lower()
for suggestion in self.main.get_header_search_suggestions():
title = suggestion['title'].lower()
self.assertTrue(title.startswith(prefix),
"Suggestion '{}' expected to start with '{}'".format(title, prefix))
'''
NO LONGER USED. REMOVED IN THE FUTURE
def verify_suggestions_contain(self, expected_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertIn(expected_suggestion, titles)
def verify_suggestions_do_not_contain(self, omitted_suggestion):
titles = []
for suggestion in self.main.get_header_search_suggestions():
titles.append(suggestion['title'])
self.assertNotIn(omitted_suggestion, titles)
'''
class TestArticlePage(WikipediaCommon):
#@unittest.skip('')
def test_infobox_for_country(self):
expected_values = (('Currency', "Sol"), ('Capital', "Lima"))
self.infobox_test("Peru", expected_values)
#@unittest.skip('')
def test_infobox_for_chemistry(self):
expected_values = (('atomic weight', "15.999"), ('Phase at STP', "gas"))
self.infobox_test("Oxygen", expected_values)
#@unittest.skip('')
def test_infobox_for_person(self):
expected_values = (('Born', '1889'), ('Relatives', 'Chaplin'))
self.infobox_test("Charlie Chaplin", expected_values)
#@unittest.skip('')
def test_infobox_for_movie(self):
expected_values = (('Directed', 'Alfred Hitchcock'), ('Starring', 'Cary Grant'))
self.infobox_test("north by northwest", expected_values)
#@unittest.skip('')
def test_infobox_for_holiday(self):
expected_values = (('Significance', 'pranks'), ('Frequency', 'Annual'))
self.infobox_test("april fool's day", expected_values)
#@unittest.skip('')
def test_infobox_for_song(self):
expected_values = (('Recorded', '1968'), ('Songwriter(s)', 'Lennon'))
self.infobox_test("rocky raccoon", expected_values)
#@unittest.skip('')
def test_compare_toc_and_headlines(self):
self.open_article_by_search("Douglas Adams")
self.verify_article_toc_and_headers()
####################
# Helper functions
####################
def open_article_by_search(self, search_term):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.open_article_by_search(search_term)
# Template for testing info box contents
# Parameters:
# search_term: search text to open an article.
# assumes search does not open a disambiguration page
# expected_value: list of (label, value) tuples where
# label is a string contained in the left side of a row in info box
# value is a string contained in value on the right side
def infobox_test(self, search_term, expected_values):
self.open_article_by_search(search_term)
article = pages.ArticlePage(self.driver)
infobox = article.get_infobox_contents()
# check expected values are in info box
for (label, expected_value) in expected_values:
found_value = article.get_value_from_infobox_contents(infobox, label)
self.assertIn(expected_value, found_value)
def verify_article_toc_and_headers(self):
article = pages.ArticlePage(self.driver)
toc = article.get_toc_items_text()
self.assertTrue(len(toc) > 0, "TOC is empty")
headlines = article.get_headlines_text()
self.assertTrue(len(headlines) > 0, "No headlines found")
self.assertEqual(toc, headlines)
| def test_main_current_events_page(self):
self.navigate_to_current_events_page()
now = datetime.datetime.now()
self.verify_date_headers(
now.strftime('%B'), now.strftime('%Y'), days_ascending=False)
#@unittest.skip('')
def test_main_archived_current_events_page(self):
if browser == "safari":
self.skipTest('Safari does not locate month_year link')
self.navigate_to_current_events_page()
month, year = self.select_random_month_year()
self.verify_date_headers(month, year, days_ascending=True)
####################
# Helper functions
####################
def navigate_to_current_events_page(self):
self.main = pages.MainPage(self.driver)
self.main.open_main_page()
self.main.click_left_panel_link("Current events")
# Randomly select a month from the archives at the bottom of the page
# Earliest expected archive is July 1994
# Latest expected archive is the current month
# Returns the tuple (month, year) which was selected
def select_random_month_year(self):
current_year = datetime.datetime.now().year
current_month = datetime.datetime.now().month
year = str(random.randint(1994, current_year))
first_month = 1
last_month = 12
# adjust month range, if necessary
if year == 1994:
first_month = 7
elif year == current_year:
last_month = current_month
month = self.main.month_name(random.randint(first_month, last_month))
print("Verifying {} {}".format(month, year))
ce = pages.CurrentEventsPage(self.driver)
ce.click_link_archived_month(month, year)
return (month, year)
# Verify the headers for dates
# verify headers are the expected format (ex: Janurary 1, 1999 (Monday))
# verify dates are in sequence
# Parameters
# month - string, full spelling of a month
# year - string, YYYY format
# days_ascending - boolean, dates should be in ascending order if True
# in descending order if False
def verify_date_headers(self, month, year, days_ascending=True):
ce = pages.CurrentEventsPage(self.driver)
dates = ce.get_date_headers()
days = []
for date in dates:
self.assertRegex(date, ce.long_date_regex) # header is expected format
date_parsed = ce.parse_date_header(date)
self.assertEqual(month, date_parsed[0]) # expected month
self.assertEqual(year, date_parsed[2]) # expected year
days.append( (int(date_parsed[2]),
ce.month_index(date_parsed[0]),
int(date_parsed[1])) )
# days are in expected sequence
self.assertEqual(days, sorted(days, reverse=(not days_ascending)))
if __name__ == '__main__':
supported_browsers = ['firefox', 'ie', 'chrome', 'safari']
if (len(sys.argv) == 2) and (sys.argv[1] in supported_browsers):
global browser
browser = sys.argv[1]
del sys.argv[1] # remove so that unittest doesn't attempt to process argument
# Gather one test suite
#tests = unittest.TestLoader().loadTestsFromTestCase(TestCurrentEventsPage)
# Gather set of test suites
suite_list = [
TestHomePage,
TestMainPage,
TestArticlePage,
TestCurrentEventsPage,
]
suites = map(unittest.TestLoader().loadTestsFromTestCase, suite_list)
tests = unittest.TestSuite(suites)
# Run gathered tests
unittest.TextTestRunner(verbosity=2).run(tests)
else:
print("Argument missing or invalid. Expected one of",str(supported_browsers)[1:-1]) |
class TestCurrentEventsPage(WikipediaCommon):
#@unittest.skip('') | random_line_split |
instruction.rs | use std::fmt;
use std::hash;
enum_from_primitive! {
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Opcode {
// Two-operand opcodes (2OP)
OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6,
OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12,
OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18,
OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24,
OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28,
// One-operand opcodes (1OP)
OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132,
OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137,
OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142,
OP1_143 = 143,
// Zero-operand opcodes (0OP)
OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180,
OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185,
OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191,
// Variable-operand opcodes (VAR)
VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228,
VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233,
VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238,
VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243,
VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248,
VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253,
VAR_254 = 254, VAR_255 = 255,
// Extended opcodes (EXT)
EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003,
EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007,
EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011,
EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017,
EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021,
EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025,
EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029,
}
}
#[derive(Debug, PartialEq)]
pub enum OperandType {
Small,
Large,
Variable,
Omitted,
}
impl OperandType {
pub fn from(bytes: &[u8]) -> Vec<OperandType> {
bytes
.iter()
.fold(Vec::new(), |mut acc, n| {
acc.push((n & 0b1100_0000) >> 6);
acc.push((n & 0b0011_0000) >> 4);
acc.push((n & 0b0000_1100) >> 2);
acc.push(n & 0b0000_0011);
acc
})
.into_iter()
.map(|b| match b {
0b00 => OperandType::Large,
0b01 => OperandType::Small,
0b10 => OperandType::Variable,
0b11 => OperandType::Omitted,
_ => unreachable!("Can't get operand type of: {:08b}", b),
})
.take_while(|t| *t != OperandType::Omitted)
.collect()
}
}
#[derive(Debug)]
pub enum Operand {
Small(u8),
Large(u16),
Variable(u8),
}
impl fmt::Display for Operand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Operand::Small(x) => write!(f, "#{:02x}", x),
Operand::Large(x) => write!(f, "{:04x}", x),
Operand::Variable(x) => match x {
0 => write!(f, "sp"),
1...15 => write!(f, "local{}", x - 1),
_ => write!(f, "g{}", x - 16),
},
}
}
}
#[derive(Debug)]
pub struct Branch {
pub condition: u16,
pub address: Option<usize>,
pub returns: Option<u16>,
}
#[derive(Debug)]
pub struct Instruction {
pub addr: usize,
pub opcode: Opcode,
pub name: String,
pub operands: Vec<Operand>,
pub store: Option<u8>,
pub branch: Option<Branch>,
pub text: Option<String>,
pub next: usize,
}
impl Instruction {
pub fn does_store(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a store in any version
OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21
| OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132
| OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248
| EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010
| EXT_1019 | EXT_1029 => true,
// only stores in certain versions
OP1_143 => version < 5,
OP0_181 => version == 4, // missing * in spec?
OP0_182 => version == 4, // missing * in spec?
OP0_185 => version >= 5,
VAR_228 => version >= 5,
VAR_233 => version == 6,
_ => false,
}
}
pub fn does_branch(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a branch in any version
OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129
| OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => {
true
}
// only branches in certain versions
OP0_181 => version < 4,
OP0_182 => version < 4,
_ => false,
}
}
pub fn does_text(opcode: Opcode) -> bool {
use self::Opcode::*;
match opcode {
OP0_178 | OP0_179 => true,
_ => false,
}
}
pub fn name(opcode: Opcode, version: u8) -> String {
use self::Opcode::*;
match opcode {
OP2_1 => "je",
OP2_2 => "jl",
OP2_3 => "jg",
OP2_4 => "dec_chk",
OP2_5 => "inc_chk",
OP2_6 => "jin",
OP2_7 => "test",
OP2_8 => "or",
OP2_9 => "and",
OP2_10 => "test_attr",
OP2_11 => "set_attr",
OP2_12 => "clear_attr",
OP2_13 => "store",
OP2_14 => "insert_obj",
OP2_15 => "loadw",
OP2_16 => "loadb",
OP2_17 => "get_prop",
OP2_18 => "get_prop_addr",
OP2_19 => "get_next_prop",
OP2_20 => "add",
OP2_21 => "sub",
OP2_22 => "mul",
OP2_23 => "div",
OP2_24 => "mod",
OP2_25 => "call_2s",
OP2_26 => "call_2n",
OP2_27 => "set_colour",
OP2_28 => "throw",
OP1_128 => "jz",
OP1_129 => "get_sibling",
OP1_130 => "get_child",
OP1_131 => "get_parent",
OP1_132 => "get_prop_len",
OP1_133 => "inc",
OP1_134 => "dec",
OP1_135 => "print_addr",
OP1_136 => "call_1s",
OP1_137 => "remove_obj",
OP1_138 => "print_obj",
OP1_139 => "ret",
OP1_140 => "jump",
OP1_141 => "print_paddr",
OP1_142 => "load",
// actually 2 different operations:
OP1_143 => if version < 4 {
"not"
} else {
"call_1n"
},
OP0_176 => "rtrue",
OP0_177 => "rfalse",
OP0_178 => "print",
OP0_179 => "print_ret",
OP0_180 => "nop",
OP0_181 => "save",
OP0_182 => "restore",
OP0_183 => "restart",
OP0_184 => "ret_popped",
// actually 2 different operations:
OP0_185 => if version < 4 {
"pop"
} else {
"catch"
},
OP0_186 => "quit",
OP0_187 => "new_line",
OP0_188 => "show_status",
OP0_189 => "verify",
OP0_191 => "piracy",
// "call" is the same as "call_vs" (name changed to remove ambiguity)
VAR_224 => if version < 4 {
"call"
} else {
"call_vs"
},
VAR_225 => "storew",
VAR_226 => "storeb",
VAR_227 => "put_prop",
// "sread", "aread", plain "read" are really all the same thing:
VAR_228 => if version < 4 {
"sread"
} else {
"aread"
},
VAR_229 => "print_char",
VAR_230 => "print_num",
VAR_231 => "random",
VAR_232 => "push",
VAR_233 => "pull",
VAR_234 => "split_window",
VAR_235 => "set_window",
VAR_236 => "call_vs2",
VAR_237 => "erase_window",
VAR_238 => "erase_line",
VAR_239 => "set_cursor",
VAR_240 => "get_cursor",
VAR_241 => "set_text_style",
VAR_242 => "buffer_mode",
VAR_243 => "output_stream",
VAR_244 => "input_stream",
VAR_245 => "sound_effect",
VAR_246 => "read_char",
VAR_247 => "scan_table",
VAR_248 => "not",
VAR_249 => "call_vn",
VAR_250 => "call_vn2",
VAR_251 => "tokenise",
VAR_252 => "encode_text",
VAR_253 => "copy_table",
VAR_254 => "print_table",
VAR_255 => "check_arg_count",
EXT_1000 => "save",
EXT_1001 => "restore",
EXT_1002 => "log_shift",
EXT_1003 => "art_shift",
EXT_1004 => "set_font",
EXT_1005 => "draw_picture",
EXT_1006 => "picture_data",
EXT_1007 => "erase_picture",
EXT_1008 => "set_margins",
EXT_1009 => "save_undo",
EXT_1010 => "restore_undo",
EXT_1011 => "print_unicode",
EXT_1012 => "check_unicode",
EXT_1013 => "set_true_colour",
EXT_1016 => "move_window",
EXT_1017 => "window_size",
EXT_1018 => "window_style",
EXT_1019 => "get_wind_prop",
EXT_1020 => "scroll_window",
EXT_1021 => "pop_stack",
EXT_1022 => "read_mouse",
EXT_1023 => "mouse_window",
EXT_1024 => "push_stack",
EXT_1025 => "put_wind_prop",
EXT_1026 => "print_form",
EXT_1027 => "make_menu",
EXT_1028 => "picture_table",
EXT_1029 => "buffer_screen",
}.to_string()
}
}
impl Instruction {
pub fn advances(&self) -> bool {
use self::Opcode::*;
// Some instructions never advance to the next instruction:
// throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped
match self.opcode {
OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184
| OP0_186 => false,
_ => true,
}
}
pub fn does_call(&self, version: u8) -> bool |
pub fn should_advance(&self, version: u8) -> bool {
!self.does_call(version) && self.opcode != Opcode::OP0_181 && self.opcode != Opcode::OP0_182
}
}
impl hash::Hash for Instruction {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
state.write_usize(self.addr);
state.finish();
}
}
impl PartialEq for Instruction {
fn eq(&self, other: &Instruction) -> bool {
self.addr == other.addr
}
}
impl Eq for Instruction {}
impl fmt::Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:5x}: {:16}", self.addr, self.name)?;
for op in &self.operands {
write!(f, " {}", op)?;
}
if let Some(x) = self.store {
match x {
0 => write!(f, " -> sp"),
1...15 => write!(f, " -> local{}", x - 1),
_ => write!(f, " -> g{}", x - 16),
}?;
};
if let Some(Branch {
address,
returns,
condition,
}) = self.branch
{
match (address, returns, condition) {
(Some(addr), _, 1) => write!(f, " ?{:04x}", addr),
(Some(addr), _, 0) => write!(f, " ?~{:04x}", addr),
(None, Some(1), 1) => write!(f, " ?rtrue"),
(None, Some(1), 0) => write!(f, " ?~rtrue"),
(None, Some(0), 1) => write!(f, " ?rfalse"),
(None, Some(0), 0) => write!(f, " ?~rfalse"),
_ => write!(f, ""),
}?;
};
if let Some(ref text) = self.text {
write!(f, " {}", text)?;
};
write!(f, "")
}
}
| {
use self::Opcode::*;
match self.opcode {
OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true,
OP1_143 => version >= 4,
_ => false,
}
} | identifier_body |
instruction.rs | use std::fmt;
use std::hash;
enum_from_primitive! {
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Opcode {
// Two-operand opcodes (2OP)
OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6,
OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12,
OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18,
OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24,
OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28,
// One-operand opcodes (1OP)
OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132,
OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137,
OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142,
OP1_143 = 143,
// Zero-operand opcodes (0OP)
OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180,
OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185,
OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191,
// Variable-operand opcodes (VAR)
VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228,
VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233,
VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238,
VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243,
VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248,
VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253,
VAR_254 = 254, VAR_255 = 255,
// Extended opcodes (EXT)
EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003,
EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007,
EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011,
EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017,
EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021,
EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025,
EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029,
}
}
#[derive(Debug, PartialEq)]
pub enum OperandType {
Small,
Large,
Variable,
Omitted,
}
impl OperandType {
pub fn | (bytes: &[u8]) -> Vec<OperandType> {
bytes
.iter()
.fold(Vec::new(), |mut acc, n| {
acc.push((n & 0b1100_0000) >> 6);
acc.push((n & 0b0011_0000) >> 4);
acc.push((n & 0b0000_1100) >> 2);
acc.push(n & 0b0000_0011);
acc
})
.into_iter()
.map(|b| match b {
0b00 => OperandType::Large,
0b01 => OperandType::Small,
0b10 => OperandType::Variable,
0b11 => OperandType::Omitted,
_ => unreachable!("Can't get operand type of: {:08b}", b),
})
.take_while(|t| *t != OperandType::Omitted)
.collect()
}
}
#[derive(Debug)]
pub enum Operand {
Small(u8),
Large(u16),
Variable(u8),
}
impl fmt::Display for Operand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Operand::Small(x) => write!(f, "#{:02x}", x),
Operand::Large(x) => write!(f, "{:04x}", x),
Operand::Variable(x) => match x {
0 => write!(f, "sp"),
1...15 => write!(f, "local{}", x - 1),
_ => write!(f, "g{}", x - 16),
},
}
}
}
#[derive(Debug)]
pub struct Branch {
pub condition: u16,
pub address: Option<usize>,
pub returns: Option<u16>,
}
#[derive(Debug)]
pub struct Instruction {
pub addr: usize,
pub opcode: Opcode,
pub name: String,
pub operands: Vec<Operand>,
pub store: Option<u8>,
pub branch: Option<Branch>,
pub text: Option<String>,
pub next: usize,
}
impl Instruction {
pub fn does_store(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a store in any version
OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21
| OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132
| OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248
| EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010
| EXT_1019 | EXT_1029 => true,
// only stores in certain versions
OP1_143 => version < 5,
OP0_181 => version == 4, // missing * in spec?
OP0_182 => version == 4, // missing * in spec?
OP0_185 => version >= 5,
VAR_228 => version >= 5,
VAR_233 => version == 6,
_ => false,
}
}
pub fn does_branch(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a branch in any version
OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129
| OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => {
true
}
// only branches in certain versions
OP0_181 => version < 4,
OP0_182 => version < 4,
_ => false,
}
}
pub fn does_text(opcode: Opcode) -> bool {
use self::Opcode::*;
match opcode {
OP0_178 | OP0_179 => true,
_ => false,
}
}
pub fn name(opcode: Opcode, version: u8) -> String {
use self::Opcode::*;
match opcode {
OP2_1 => "je",
OP2_2 => "jl",
OP2_3 => "jg",
OP2_4 => "dec_chk",
OP2_5 => "inc_chk",
OP2_6 => "jin",
OP2_7 => "test",
OP2_8 => "or",
OP2_9 => "and",
OP2_10 => "test_attr",
OP2_11 => "set_attr",
OP2_12 => "clear_attr",
OP2_13 => "store",
OP2_14 => "insert_obj",
OP2_15 => "loadw",
OP2_16 => "loadb",
OP2_17 => "get_prop",
OP2_18 => "get_prop_addr",
OP2_19 => "get_next_prop",
OP2_20 => "add",
OP2_21 => "sub",
OP2_22 => "mul",
OP2_23 => "div",
OP2_24 => "mod",
OP2_25 => "call_2s",
OP2_26 => "call_2n",
OP2_27 => "set_colour",
OP2_28 => "throw",
OP1_128 => "jz",
OP1_129 => "get_sibling",
OP1_130 => "get_child",
OP1_131 => "get_parent",
OP1_132 => "get_prop_len",
OP1_133 => "inc",
OP1_134 => "dec",
OP1_135 => "print_addr",
OP1_136 => "call_1s",
OP1_137 => "remove_obj",
OP1_138 => "print_obj",
OP1_139 => "ret",
OP1_140 => "jump",
OP1_141 => "print_paddr",
OP1_142 => "load",
// actually 2 different operations:
OP1_143 => if version < 4 {
"not"
} else {
"call_1n"
},
OP0_176 => "rtrue",
OP0_177 => "rfalse",
OP0_178 => "print",
OP0_179 => "print_ret",
OP0_180 => "nop",
OP0_181 => "save",
OP0_182 => "restore",
OP0_183 => "restart",
OP0_184 => "ret_popped",
// actually 2 different operations:
OP0_185 => if version < 4 {
"pop"
} else {
"catch"
},
OP0_186 => "quit",
OP0_187 => "new_line",
OP0_188 => "show_status",
OP0_189 => "verify",
OP0_191 => "piracy",
// "call" is the same as "call_vs" (name changed to remove ambiguity)
VAR_224 => if version < 4 {
"call"
} else {
"call_vs"
},
VAR_225 => "storew",
VAR_226 => "storeb",
VAR_227 => "put_prop",
// "sread", "aread", plain "read" are really all the same thing:
VAR_228 => if version < 4 {
"sread"
} else {
"aread"
},
VAR_229 => "print_char",
VAR_230 => "print_num",
VAR_231 => "random",
VAR_232 => "push",
VAR_233 => "pull",
VAR_234 => "split_window",
VAR_235 => "set_window",
VAR_236 => "call_vs2",
VAR_237 => "erase_window",
VAR_238 => "erase_line",
VAR_239 => "set_cursor",
VAR_240 => "get_cursor",
VAR_241 => "set_text_style",
VAR_242 => "buffer_mode",
VAR_243 => "output_stream",
VAR_244 => "input_stream",
VAR_245 => "sound_effect",
VAR_246 => "read_char",
VAR_247 => "scan_table",
VAR_248 => "not",
VAR_249 => "call_vn",
VAR_250 => "call_vn2",
VAR_251 => "tokenise",
VAR_252 => "encode_text",
VAR_253 => "copy_table",
VAR_254 => "print_table",
VAR_255 => "check_arg_count",
EXT_1000 => "save",
EXT_1001 => "restore",
EXT_1002 => "log_shift",
EXT_1003 => "art_shift",
EXT_1004 => "set_font",
EXT_1005 => "draw_picture",
EXT_1006 => "picture_data",
EXT_1007 => "erase_picture",
EXT_1008 => "set_margins",
EXT_1009 => "save_undo",
EXT_1010 => "restore_undo",
EXT_1011 => "print_unicode",
EXT_1012 => "check_unicode",
EXT_1013 => "set_true_colour",
EXT_1016 => "move_window",
EXT_1017 => "window_size",
EXT_1018 => "window_style",
EXT_1019 => "get_wind_prop",
EXT_1020 => "scroll_window",
EXT_1021 => "pop_stack",
EXT_1022 => "read_mouse",
EXT_1023 => "mouse_window",
EXT_1024 => "push_stack",
EXT_1025 => "put_wind_prop",
EXT_1026 => "print_form",
EXT_1027 => "make_menu",
EXT_1028 => "picture_table",
EXT_1029 => "buffer_screen",
}.to_string()
}
}
impl Instruction {
pub fn advances(&self) -> bool {
use self::Opcode::*;
// Some instructions never advance to the next instruction:
// throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped
match self.opcode {
OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184
| OP0_186 => false,
_ => true,
}
}
pub fn does_call(&self, version: u8) -> bool {
use self::Opcode::*;
match self.opcode {
OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true,
OP1_143 => version >= 4,
_ => false,
}
}
pub fn should_advance(&self, version: u8) -> bool {
!self.does_call(version) && self.opcode != Opcode::OP0_181 && self.opcode != Opcode::OP0_182
}
}
impl hash::Hash for Instruction {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
state.write_usize(self.addr);
state.finish();
}
}
impl PartialEq for Instruction {
fn eq(&self, other: &Instruction) -> bool {
self.addr == other.addr
}
}
impl Eq for Instruction {}
impl fmt::Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:5x}: {:16}", self.addr, self.name)?;
for op in &self.operands {
write!(f, " {}", op)?;
}
if let Some(x) = self.store {
match x {
0 => write!(f, " -> sp"),
1...15 => write!(f, " -> local{}", x - 1),
_ => write!(f, " -> g{}", x - 16),
}?;
};
if let Some(Branch {
address,
returns,
condition,
}) = self.branch
{
match (address, returns, condition) {
(Some(addr), _, 1) => write!(f, " ?{:04x}", addr),
(Some(addr), _, 0) => write!(f, " ?~{:04x}", addr),
(None, Some(1), 1) => write!(f, " ?rtrue"),
(None, Some(1), 0) => write!(f, " ?~rtrue"),
(None, Some(0), 1) => write!(f, " ?rfalse"),
(None, Some(0), 0) => write!(f, " ?~rfalse"),
_ => write!(f, ""),
}?;
};
if let Some(ref text) = self.text {
write!(f, " {}", text)?;
};
write!(f, "")
}
}
| from | identifier_name |
instruction.rs | use std::fmt;
use std::hash;
enum_from_primitive! {
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Opcode {
// Two-operand opcodes (2OP)
OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6,
OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12,
OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18,
OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24,
OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28,
// One-operand opcodes (1OP)
OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132,
OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137,
OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142,
OP1_143 = 143,
// Zero-operand opcodes (0OP)
OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180,
OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185,
OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191,
// Variable-operand opcodes (VAR)
VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228,
VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233,
VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238,
VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243,
VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248,
VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253,
VAR_254 = 254, VAR_255 = 255,
// Extended opcodes (EXT)
EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003,
EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007,
EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011,
EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017,
EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021,
EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025,
EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029,
}
}
#[derive(Debug, PartialEq)]
pub enum OperandType {
Small,
Large,
Variable,
Omitted,
}
impl OperandType {
pub fn from(bytes: &[u8]) -> Vec<OperandType> {
bytes
.iter()
.fold(Vec::new(), |mut acc, n| {
acc.push((n & 0b1100_0000) >> 6);
acc.push((n & 0b0011_0000) >> 4);
acc.push((n & 0b0000_1100) >> 2);
acc.push(n & 0b0000_0011);
acc
})
.into_iter()
.map(|b| match b {
0b00 => OperandType::Large,
0b01 => OperandType::Small,
0b10 => OperandType::Variable,
0b11 => OperandType::Omitted,
_ => unreachable!("Can't get operand type of: {:08b}", b),
})
.take_while(|t| *t != OperandType::Omitted)
.collect()
}
}
#[derive(Debug)]
pub enum Operand {
Small(u8),
Large(u16),
Variable(u8),
}
impl fmt::Display for Operand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Operand::Small(x) => write!(f, "#{:02x}", x),
Operand::Large(x) => write!(f, "{:04x}", x),
Operand::Variable(x) => match x {
0 => write!(f, "sp"),
1...15 => write!(f, "local{}", x - 1),
_ => write!(f, "g{}", x - 16),
},
}
}
}
#[derive(Debug)]
pub struct Branch {
pub condition: u16,
pub address: Option<usize>,
pub returns: Option<u16>,
}
| pub addr: usize,
pub opcode: Opcode,
pub name: String,
pub operands: Vec<Operand>,
pub store: Option<u8>,
pub branch: Option<Branch>,
pub text: Option<String>,
pub next: usize,
}
impl Instruction {
pub fn does_store(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a store in any version
OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21
| OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132
| OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248
| EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010
| EXT_1019 | EXT_1029 => true,
// only stores in certain versions
OP1_143 => version < 5,
OP0_181 => version == 4, // missing * in spec?
OP0_182 => version == 4, // missing * in spec?
OP0_185 => version >= 5,
VAR_228 => version >= 5,
VAR_233 => version == 6,
_ => false,
}
}
pub fn does_branch(opcode: Opcode, version: u8) -> bool {
use self::Opcode::*;
match opcode {
// does a branch in any version
OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129
| OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => {
true
}
// only branches in certain versions
OP0_181 => version < 4,
OP0_182 => version < 4,
_ => false,
}
}
pub fn does_text(opcode: Opcode) -> bool {
use self::Opcode::*;
match opcode {
OP0_178 | OP0_179 => true,
_ => false,
}
}
pub fn name(opcode: Opcode, version: u8) -> String {
use self::Opcode::*;
match opcode {
OP2_1 => "je",
OP2_2 => "jl",
OP2_3 => "jg",
OP2_4 => "dec_chk",
OP2_5 => "inc_chk",
OP2_6 => "jin",
OP2_7 => "test",
OP2_8 => "or",
OP2_9 => "and",
OP2_10 => "test_attr",
OP2_11 => "set_attr",
OP2_12 => "clear_attr",
OP2_13 => "store",
OP2_14 => "insert_obj",
OP2_15 => "loadw",
OP2_16 => "loadb",
OP2_17 => "get_prop",
OP2_18 => "get_prop_addr",
OP2_19 => "get_next_prop",
OP2_20 => "add",
OP2_21 => "sub",
OP2_22 => "mul",
OP2_23 => "div",
OP2_24 => "mod",
OP2_25 => "call_2s",
OP2_26 => "call_2n",
OP2_27 => "set_colour",
OP2_28 => "throw",
OP1_128 => "jz",
OP1_129 => "get_sibling",
OP1_130 => "get_child",
OP1_131 => "get_parent",
OP1_132 => "get_prop_len",
OP1_133 => "inc",
OP1_134 => "dec",
OP1_135 => "print_addr",
OP1_136 => "call_1s",
OP1_137 => "remove_obj",
OP1_138 => "print_obj",
OP1_139 => "ret",
OP1_140 => "jump",
OP1_141 => "print_paddr",
OP1_142 => "load",
// actually 2 different operations:
OP1_143 => if version < 4 {
"not"
} else {
"call_1n"
},
OP0_176 => "rtrue",
OP0_177 => "rfalse",
OP0_178 => "print",
OP0_179 => "print_ret",
OP0_180 => "nop",
OP0_181 => "save",
OP0_182 => "restore",
OP0_183 => "restart",
OP0_184 => "ret_popped",
// actually 2 different operations:
OP0_185 => if version < 4 {
"pop"
} else {
"catch"
},
OP0_186 => "quit",
OP0_187 => "new_line",
OP0_188 => "show_status",
OP0_189 => "verify",
OP0_191 => "piracy",
// "call" is the same as "call_vs" (name changed to remove ambiguity)
VAR_224 => if version < 4 {
"call"
} else {
"call_vs"
},
VAR_225 => "storew",
VAR_226 => "storeb",
VAR_227 => "put_prop",
// "sread", "aread", plain "read" are really all the same thing:
VAR_228 => if version < 4 {
"sread"
} else {
"aread"
},
VAR_229 => "print_char",
VAR_230 => "print_num",
VAR_231 => "random",
VAR_232 => "push",
VAR_233 => "pull",
VAR_234 => "split_window",
VAR_235 => "set_window",
VAR_236 => "call_vs2",
VAR_237 => "erase_window",
VAR_238 => "erase_line",
VAR_239 => "set_cursor",
VAR_240 => "get_cursor",
VAR_241 => "set_text_style",
VAR_242 => "buffer_mode",
VAR_243 => "output_stream",
VAR_244 => "input_stream",
VAR_245 => "sound_effect",
VAR_246 => "read_char",
VAR_247 => "scan_table",
VAR_248 => "not",
VAR_249 => "call_vn",
VAR_250 => "call_vn2",
VAR_251 => "tokenise",
VAR_252 => "encode_text",
VAR_253 => "copy_table",
VAR_254 => "print_table",
VAR_255 => "check_arg_count",
EXT_1000 => "save",
EXT_1001 => "restore",
EXT_1002 => "log_shift",
EXT_1003 => "art_shift",
EXT_1004 => "set_font",
EXT_1005 => "draw_picture",
EXT_1006 => "picture_data",
EXT_1007 => "erase_picture",
EXT_1008 => "set_margins",
EXT_1009 => "save_undo",
EXT_1010 => "restore_undo",
EXT_1011 => "print_unicode",
EXT_1012 => "check_unicode",
EXT_1013 => "set_true_colour",
EXT_1016 => "move_window",
EXT_1017 => "window_size",
EXT_1018 => "window_style",
EXT_1019 => "get_wind_prop",
EXT_1020 => "scroll_window",
EXT_1021 => "pop_stack",
EXT_1022 => "read_mouse",
EXT_1023 => "mouse_window",
EXT_1024 => "push_stack",
EXT_1025 => "put_wind_prop",
EXT_1026 => "print_form",
EXT_1027 => "make_menu",
EXT_1028 => "picture_table",
EXT_1029 => "buffer_screen",
}.to_string()
}
}
impl Instruction {
pub fn advances(&self) -> bool {
use self::Opcode::*;
// Some instructions never advance to the next instruction:
// throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped
match self.opcode {
OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184
| OP0_186 => false,
_ => true,
}
}
pub fn does_call(&self, version: u8) -> bool {
use self::Opcode::*;
match self.opcode {
OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true,
OP1_143 => version >= 4,
_ => false,
}
}
pub fn should_advance(&self, version: u8) -> bool {
!self.does_call(version) && self.opcode != Opcode::OP0_181 && self.opcode != Opcode::OP0_182
}
}
impl hash::Hash for Instruction {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
state.write_usize(self.addr);
state.finish();
}
}
impl PartialEq for Instruction {
fn eq(&self, other: &Instruction) -> bool {
self.addr == other.addr
}
}
impl Eq for Instruction {}
impl fmt::Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:5x}: {:16}", self.addr, self.name)?;
for op in &self.operands {
write!(f, " {}", op)?;
}
if let Some(x) = self.store {
match x {
0 => write!(f, " -> sp"),
1...15 => write!(f, " -> local{}", x - 1),
_ => write!(f, " -> g{}", x - 16),
}?;
};
if let Some(Branch {
address,
returns,
condition,
}) = self.branch
{
match (address, returns, condition) {
(Some(addr), _, 1) => write!(f, " ?{:04x}", addr),
(Some(addr), _, 0) => write!(f, " ?~{:04x}", addr),
(None, Some(1), 1) => write!(f, " ?rtrue"),
(None, Some(1), 0) => write!(f, " ?~rtrue"),
(None, Some(0), 1) => write!(f, " ?rfalse"),
(None, Some(0), 0) => write!(f, " ?~rfalse"),
_ => write!(f, ""),
}?;
};
if let Some(ref text) = self.text {
write!(f, " {}", text)?;
};
write!(f, "")
}
} |
#[derive(Debug)]
pub struct Instruction {
| random_line_split |
call_addr_alter.py | #coding: utf-8
import json
import re
# import sys
import time
import pandas as pd
from pandas import DataFrame, Series
# from datetime import datetime
# import pymysql
from call_history_addr_alter import mysql_connection as my
'''
1.利用mysql过滤:
call_addr为“未知”,“-”,“������������”及null
2.原数据形式:
“郑州”、“抚顺;沈阳;铁岭”(可能不在同一个省)、“海南省海口市”、“海南省.海口市”、“湖南永州(冷水滩)”、“上海市.上海市”、“西安.咸阳”、“美国/加拿大”、"其他”
3.统一格式:
“河南省,郑州市”、“北京市”、“广西壮族自治区,钦州市”、
生成新的表
一级行政区,33个
二级行政区,334个(地级市293个,自治州30个,地区8个,盟3个)
三级行政区,2856个(市辖区958个,县1367个,县级市360个,自治县117个,特区1个,林区1个,旗49个,自治旗3个)
问题:
县级市需要考虑么
同一个"mobile"对应多个"call_addr"
最终提供一个csv的本地数据文件
'''
#读取省市规则的json文件,并转换成Series格式
def read_addr_json():
print('read_addr_json()函数开始执行,请稍候...')
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
with open(path + file_name,'r') as json_file:
data = json_file.read()
#data应该为字典格式
print(type(data))
print(data)
data_s = Series(data)
print(data_s)
return data_s
#使用mysql语句将数据库中文件生成本地csv文件,方便后续操作,无返回值。
def get_local_data():
#计算时间
start_1 = time.time()
#路径
path = 'D:\\work\\database\\province-city\\'
file_name = 'call_addr_old.csv'
file_name_test = 'call_addr_alter_test.csv'
print('get_local_data()函数正在执行,请稍候...')
select_string = '''
select call_addr from call_history
group by call_addr
'''
columns = ['call_addr']
data = my.mysql_connection(select_string, columns)
print('正在生成本地文件,请稍候...')
data.to_csv(path + file_name, index=False, sep='\t', encoding='utf-8')
#生成excel
writer = pd.ExcelWriter(path + file_name[:-4] + '.xlsx')
data.to_excel(writer, 'sheet1')
writer.save()
print('本地文件已生成,获取的数据一共%d行\n'%len(data))
print('函数已经结束,共花费时间%d'%(time.time()-start_1))
#读取本地的通话记录文件,并增加新列
def read_local_data():
#不含中文的数据,或者含有国外名称的数据
#既有一级行政区又有二级行政区的数据
#只有二级行政区的数据
#只有三级行政区的数据
#其他
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
file_name_2 = 'call_addr_old.csv'
file = open(path + file_name, encoding='utf-8')
#读取json文件
data = json.load(file)
data = Series(data)
index_new = []
for index in data.index:
index_new.append(int(index))
#原地址规则文件
data = DataFrame(data, columns=['addr'])
data['id'] = index_new
#得到data表,含有addr,id字段————————————————————————————-----------------————
# print(data)
province = data[data['id']%10000==0]
province['addr_new'] = 'NULL'
#第一部分:原数据中含有省名和市名。
print('找出原数据中的省名和市名,请稍候.............................................................................')
index_item = province[(province['addr'].str.contains('省'))| (province['addr'].str.contains('市')) ].index
for item_1 in index_item:
province.ix[item_1, 'addr_new'] = province.ix[item_1,'addr'][:-1]
province.ix['150000', 'addr_new'] = '内蒙'
province.ix['450000', 'addr_new'] = '广西'
province.ix['540000', 'addr_new'] = '西藏'
province.ix['640000', 'addr_new'] = '宁夏'
province.ix['650000', 'addr_new'] = '新疆'
province.ix['810000', 'addr_new'] = '香港'
province.ix['820000', 'addr_new'] = '澳门'
# city = data[(data['id']%100==0) & (data['id']%10000!=0)]
# 建立一个数据表province,只存放省名,含有addr,id ,addr_new----------------------------------------------------
#data_2为原地址表,是处理的数据
data_old = pd.read_table(path + file_name_2, encoding='utf-8', sep='\t')
data_old['call_addr_new'] = "NULL"
#flag判断地址字符中是否含有多个省名,如果只有一个,就确定它属于哪个省,如果有多个省名,则给出提示。
data_old['flag_province'] = 0
data_old['flag_city'] = 0
data_old['flag_county'] = 0
#找出既有省名又有市名的数据------------------------------------------------------------------------------------------------------------
#遍历每一个省名
for index_province in province.index:
province_word = province.ix[index_province, 'addr_new']
if province_word not in ['北京', '天津', '上海', '重庆', '香港', '澳门']:
# print(province_word, '***********************************************************')
#找到含有该省名的地址,比如 海南
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# print(data_old)
#遍历该省下的每一个市名,考虑直辖县,第三位为9的是直辖县,如429004,仙桃市。"469024":"临高县"。市的编码需要大于本省的编码,小于下一省的编码。普通城市编码小于
#9000,且被100整除,或者大于9000的直辖县。
for city_index in data[(data['id'] > int(index_province)) & (((data['id'] < int(index_province) + 9000) & (data['id'] % 100 == 0)
|(data['id']>int(index_province)+8999)&(data['id']<int(index_province)+10000)))].index:
city_word = data.ix[city_index, 'addr'][:-1]
# print(city_word, '_______________')
#找到地址字符中含有该城市数据索引
#该省的data_old数据记录中,寻找对应市的记录
data_old_province = data_old.ix[data_province_index]
if city_word != '吉林':
#data_city_index为data_old中含有特定省名和市名的数据索引,
data_city_index = data_old_province[data_old_province['call_addr'].str.contains(city_word)].index
data_old.ix[data_city_index, 'flag_city'] = data_old.ix[data_city_index, 'flag_city'] + 1
# print(data_old.ix[data_city_index, 'call_addr'], '____________________________________')
#遍历每一个包含该城市名的原数据
for data_item in data_city_index:
if data_old.ix[data_item, 'flag_city'] == 1:
#让标准文件中的城市名作为新的地址
data_old.ix[data_item, 'call_addr_new'] = province.ix[index_province, 'addr'] +','+ data.ix[city_index, 'addr']
elif data_old.ix[data_item, 'flag_city'] > 1:
data_old.ix[data_item, 'call_addr_new'] = "多个城市"
else:
pass
else:
#如果是吉林市,先不考虑
pass
elif province_word in ['北京', '天津', '上海', '重庆']:
#如果是这四个直辖市
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '市,' + province_word + '市'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
#香港、澳门
else:
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '特别行政区'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# 第二部分:原数据中是否含有中文和"未知"。
print('正在判断原数据是否含有非中文和“未知”,请稍候.............................................................................')
# print(data_old)
#-------------------------------------------------------------------------------------------------------------------------------
for item_2 in data_old.index:
addr_old = data_old.ix[item_2, 'call_addr']
#先看看地址中是否含有中文,不含有中文,新地址为“未知”;如果含有国外信息,新地址也为未知。
if re.search(r'[\u4e00-\u9fa5]', addr_old):
if re.search(r'[未知]', addr_old):
data_old.ix[item_2, 'call_addr_new'] = '未知'
else:
data_old.ix[item_2, 'call_addr_new'] = '未知'
#
# # print(data_old)
# #增加新列"call_addr_new"
# data_old['call_addr_new'] = "NULL"
#原数据中只有二级市名的情况--------------------------------------------------------------------------------------------- | -----------------------------
#目前中国的二级行政区由市和盟,自治州,地区组成,分别用data_2, data_3, data_4表示。另外还要考虑省辖县。
data_2 = data[(data['addr'].str.contains('市')|(data['addr'].str.contains('盟'))|(data['id']%10000//1000==9))]
for data_2_item in data_2.index:
data_2.ix[data_2_item, 'addr'] = data_2.ix[data_2_item, 'addr'][:-1]
# print(data_2)
#自治州名字较长,取前两位,如临夏回族自治州
data_3 = data[data['addr'].str.contains('自治州')]
for data_3_item in data_3.index:
data_3.ix[data_3_item, 'addr'] = data_3.ix[data_3_item, 'addr'][:2]
data_4 = data[data['addr'].str.contains('地区')]
for data_4_item in data_4.index:
data_4.ix[data_4_item, 'addr'] = data_4.ix[data_4_item, 'addr'][:-2]
#data_old是要处理的数据,data_2,data_3,data_4是要遍历的二级行政区,吉林省含有吉林市单独考虑
data_other = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
# print(data_other,'__________________________________________________')
for city_2 in data_2.index:
city_2_word = data_2.ix[city_2, 'addr']
# print(city_2_word, '*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-')
city_2_index = data_other[data_other['call_addr'].str.contains(city_2_word)].index
# print(data_other[data_other['call_addr'].str.contains(city_2_word)], '------------------------------------------')
#该市必须被100整除或者第三位为9(省辖县)
if int(city_2)%100==0 or int(city_2)%10000//1000==9:
province_word_2 = data.ix[str(data.ix[city_2, 'id']//10000*10000), 'addr']
# print(province_word_2, '*********************************************************')
data_old.ix[city_2_index, ['call_addr_new']] = province_word_2 + ',' + data.ix[city_2, 'addr']
data_old.ix[city_2_index, 'flag_city'] = data_old.ix[city_2_index, 'flag_city'] + 1
for city_3 in data_3.index:
city_3_word = data_3.ix[city_3, 'addr']
city_3_index = data_other[data_other['call_addr'].str.contains(city_3_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_3) % 100 == 0 or int(city_3)%10000//1000 == 9:
province_word_3= data.ix[str(data.ix[city_3, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_3_index, ['call_addr_new']] = province_word_3 + ',' + data.ix[city_3, 'addr']
data_old.ix[city_3_index, 'flag_city'] = data_old.ix[city_3_index, 'flag_city'] + 1
for city_4 in data_4.index:
city_4_word = data_4.ix[city_4, 'addr']
city_4_index = data_other[data_other['call_addr'].str.contains(city_4_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_4) % 100 == 0 or int(city_4) %10000//1000 == 9:
province_word_4 = data.ix[str(data.ix[city_4, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_4_index, ['call_addr_new']] = province_word_4 + ',' + data.ix[city_4, 'addr']
data_old.ix[city_4_index, 'flag_city'] = data_old.ix[city_4_index, 'flag_city'] + 1
#现在还有一部分数据只有三级行政区的名字, 如海拉尔,但不考虑直辖县,用data_county表示----------------------------------------------------------------
#直辖市--------------------------------------------------
# 第三部分:是否只含有三级行政区的名字。
print('判断是否只含有三级行政区的名字,请稍候.............................................................................')
data_bj_tj = data["110000": "120119"]
data_sh = data['310000':"310151"]
data_cq = data['500000':'500243']
data_am_xg = data['810000':"820109"]
#得到包括四个直辖市和两个特别行政区的数据,行索引不变
data_unique = pd.concat([data_bj_tj, data_sh, data_cq])
data_old_unique = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_unique in data_unique.index:
unique_name = data_unique.ix[index_unique, 'addr']
if len(unique_name)> 5:
#地址太长,只取前两个
unique_name = unique_name[:2]
elif len(unique_name)<=2:
#地址太长,只取前两个
pass
else:
unique_name = unique_name[:-1]
unique_old_index = data_old_unique[data_old_unique['call_addr'].str.contains(unique_name)].index
data_old.ix[unique_old_index, 'call_addr_new'] = data.ix[str(int(index_unique)//10000*10000), 'addr'] + ',' + data.ix[str(int(index_unique)//10000*10000), 'addr']
data_old.ix[unique_old_index, 'flag_county'] += 1
#香港和澳门命名不一样
data_old_am_xg = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_am_xg in data_am_xg.index:
am_xg_name = data_am_xg.ix[index_am_xg, 'addr']
if len(am_xg_name) > 5:
# 地址太长,只取前两个
am_xg_name = am_xg_name[:3]
elif len(am_xg_name)<=2:
am_xg_name = am_xg_name[:2]
else:
am_xg_name = am_xg_name[:-1]
am_xg_old_index = data_old_am_xg[data_old_am_xg['call_addr'].str.contains(am_xg_name)].index
data_old.ix[am_xg_old_index, 'call_addr_new'] = data.ix[str(int(index_am_xg) // 10000 * 10000), 'addr']
data_old.ix[am_xg_old_index, 'flag_county'] += 1
data_county = data[(data['id']%100 != 0)&(data['id']%10000//1000!=9)]
list_county_drop = list(data_unique['id'])
#extend没有返回值和append一样,所以直接修改
list_county_drop.extend(list(data_am_xg['id']))
#data_county不能包括直辖市和香港澳门,因为它们的二级行政区命名不一样。
data_county = data_county[~data_county['id'].isin(list_county_drop)]
data_county_1 = data_county[(data_county['addr'].str.contains('县'))|(data_county['addr'].str.contains('区'))|(data_county['addr'].str.contains('市'))]
data_county_2 = data_county[data_county['addr'].str.contains('自治县')]
#将data_old中处理的数据选进来,还有的县是属于直辖市和特别行政区,得去掉。前两位是11,31,50,12,81,82
data_old_other_2 = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
for county_index_1 in data_county_1.index:
county_word_1 = data_county_1.ix[county_index_1, 'addr']
if len(county_word_1) > 2:
county_word_1 = county_word_1[:-1]
county_old_index = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_1)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 10000 * 10000), 'addr']
city_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index, ['call_addr_new']] = province_word_5 + ',' + city_word_5
data_old.ix[county_old_index,'flag_county'] += 1
for county_index_2 in data_county_2.index:
#对于自治县,只取前两个字符
county_word_2 = data_county_2.ix[county_index_2, 'addr'][:2]
county_old_index_2 = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_2)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 10000 * 10000), 'addr']
city_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index_2, ['call_addr_new']] = province_word_6 + ',' + city_word_6
data_old.ix[county_old_index_2, 'flag_county'] += 1
#生成excel-----------------------------------------------------------------------------------------------------------------------------------------
writer = pd.ExcelWriter(path + file_name_2[:-4] + '_analysis.xlsx' )
data_old.to_excel(writer, 'sheet1')
writer.save()
read_local_data()
| ---- | conditional_block |
call_addr_alter.py | #coding: utf-8
import json
import re
# import sys
import time
import pandas as pd
from pandas import DataFrame, Series
# from datetime import datetime
# import pymysql
from call_history_addr_alter import mysql_connection as my
'''
1.利用mysql过滤:
call_addr为“未知”,“-”,“������������”及null
2.原数据形式:
“郑州”、“抚顺;沈阳;铁岭”(可能不在同一个省)、“海南省海口市”、“海南省.海口市”、“湖南永州(冷水滩)”、“上海市.上海市”、“西安.咸阳”、“美国/加拿大”、"其他”
3.统一格式:
“河南省,郑州市”、“北京市”、“广西壮族自治区,钦州市”、
生成新的表
一级行政区,33个
二级行政区,334个(地级市293个,自治州30个,地区8个,盟3个)
三级行政区,2856个(市辖区958个,县1367个,县级市360个,自治县117个,特区1个,林区1个,旗49个,自治旗3个)
问题:
县级市需要考虑么
同一个"mobile"对应多个"call_addr"
最终提供一个csv的本地数据文件
'''
#读取省市规则的json文件,并转换成Series格式
def read_addr_json():
print('read_addr_json()函数开始执行,请稍候...')
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
with open(path + file_name,'r') as json_file:
data = json_file.read()
#data应该为字典格式
print(type(data))
print(data)
data_s = Series(data)
print(data_s)
return data_s
#使用mysql语句将数据库中文件生成本地csv文件,方便后续操作,无返回值。
def get_local_data():
#计算时间
start_1 = time.time()
#路径
path = 'D:\\work\\database\\province-city\\'
file_name = 'call_addr_old.csv'
file_name_test = 'call_addr_alter_test.csv'
print('get_local_data()函数正在执行,请稍候...')
select_string = '''
select call_addr from call_history
group by call_addr
'''
columns = ['call_addr']
data = my.mysql_connection(select_string, columns)
print('正在生成本地文件,请稍候...')
data.to_csv(path + file_name, index=False, sep='\t', encoding='utf-8')
#生成excel
writer = pd.ExcelWriter(path + file_name[:-4] + '.xlsx')
data.to_excel(writer, 'sheet1')
writer.save()
print('本地文件已生成,获取的数据一共%d行\n'%len(data))
print('函数已经结束,共花费时间%d'%(time.time()-start_1))
#读取本地的通话记录文件,并增加新列
def read_local_data():
#不含中文的数据,或者含有国外名称的数据
#既有一级行政区又有二级行政区的数据
#只有二级行政区的数据
#只有三级行政区的数据
#其他
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
file_name_2 = 'call_addr_old.csv'
file = open(path + file_name, encoding='utf-8')
#读取json文件
data = json.load(file)
data = Series(data)
index_new = []
for index in data.index:
index_new.append(int(index))
#原地址规则文件
data = DataFrame(data, columns=['addr'])
data['id'] = index_new
#得到data表,含有addr,id字段————————————————————————————-----------------————
# print(data)
province = data[data['id']%10000==0]
province['addr_new'] = 'NULL'
#第一部分:原数据中含有省名和市名。
print('找出原数据中的省名和市名,请稍候.............................................................................')
index_item = province[(province['addr'].str.contains('省'))| (province['addr'].str.contains('市')) ].index
for item_1 in index_item:
province.ix[item_1, 'addr_new'] = province.ix[item_1,'addr'][:-1]
province.ix['150000', 'addr_new'] = '内蒙'
province.ix['450000', 'addr_new'] = '广西'
province.ix['540000', 'addr_new'] = '西藏'
province.ix['640000', 'addr_new'] = '宁夏'
province.ix['650000', 'addr_new'] = '新疆'
province.ix['810000', 'addr_new'] = '香港'
province.ix['820000', 'addr_new'] = '澳门'
# city = data[(data['id']%100==0) & (data['id']%10000!=0)]
# 建立一个数据表province,只存放省名,含有addr,id ,addr_new----------------------------------------------------
#data_2为原地址表,是处理的数据
data_old = pd.read_table(path + file_name_2, encoding='utf-8', sep='\t')
data_old['call_addr_new'] = "NULL"
#flag判断地址字符中是否含有多个省名,如果只有一个,就确定它属于哪个省,如果有多个省名,则给出提示。
data_old['flag_province'] = 0
data_old['flag_city'] = 0
data_old['flag_county'] = 0
| if province_word not in ['北京', '天津', '上海', '重庆', '香港', '澳门']:
# print(province_word, '***********************************************************')
#找到含有该省名的地址,比如 海南
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# print(data_old)
#遍历该省下的每一个市名,考虑直辖县,第三位为9的是直辖县,如429004,仙桃市。"469024":"临高县"。市的编码需要大于本省的编码,小于下一省的编码。普通城市编码小于
#9000,且被100整除,或者大于9000的直辖县。
for city_index in data[(data['id'] > int(index_province)) & (((data['id'] < int(index_province) + 9000) & (data['id'] % 100 == 0)
|(data['id']>int(index_province)+8999)&(data['id']<int(index_province)+10000)))].index:
city_word = data.ix[city_index, 'addr'][:-1]
# print(city_word, '_______________')
#找到地址字符中含有该城市数据索引
#该省的data_old数据记录中,寻找对应市的记录
data_old_province = data_old.ix[data_province_index]
if city_word != '吉林':
#data_city_index为data_old中含有特定省名和市名的数据索引,
data_city_index = data_old_province[data_old_province['call_addr'].str.contains(city_word)].index
data_old.ix[data_city_index, 'flag_city'] = data_old.ix[data_city_index, 'flag_city'] + 1
# print(data_old.ix[data_city_index, 'call_addr'], '____________________________________')
#遍历每一个包含该城市名的原数据
for data_item in data_city_index:
if data_old.ix[data_item, 'flag_city'] == 1:
#让标准文件中的城市名作为新的地址
data_old.ix[data_item, 'call_addr_new'] = province.ix[index_province, 'addr'] +','+ data.ix[city_index, 'addr']
elif data_old.ix[data_item, 'flag_city'] > 1:
data_old.ix[data_item, 'call_addr_new'] = "多个城市"
else:
pass
else:
#如果是吉林市,先不考虑
pass
elif province_word in ['北京', '天津', '上海', '重庆']:
#如果是这四个直辖市
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '市,' + province_word + '市'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
#香港、澳门
else:
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '特别行政区'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# 第二部分:原数据中是否含有中文和"未知"。
print('正在判断原数据是否含有非中文和“未知”,请稍候.............................................................................')
# print(data_old)
#-------------------------------------------------------------------------------------------------------------------------------
for item_2 in data_old.index:
addr_old = data_old.ix[item_2, 'call_addr']
#先看看地址中是否含有中文,不含有中文,新地址为“未知”;如果含有国外信息,新地址也为未知。
if re.search(r'[\u4e00-\u9fa5]', addr_old):
if re.search(r'[未知]', addr_old):
data_old.ix[item_2, 'call_addr_new'] = '未知'
else:
data_old.ix[item_2, 'call_addr_new'] = '未知'
#
# # print(data_old)
# #增加新列"call_addr_new"
# data_old['call_addr_new'] = "NULL"
#原数据中只有二级市名的情况------------------------------------------------------------------------------------------------------------------------------
#目前中国的二级行政区由市和盟,自治州,地区组成,分别用data_2, data_3, data_4表示。另外还要考虑省辖县。
data_2 = data[(data['addr'].str.contains('市')|(data['addr'].str.contains('盟'))|(data['id']%10000//1000==9))]
for data_2_item in data_2.index:
data_2.ix[data_2_item, 'addr'] = data_2.ix[data_2_item, 'addr'][:-1]
# print(data_2)
#自治州名字较长,取前两位,如临夏回族自治州
data_3 = data[data['addr'].str.contains('自治州')]
for data_3_item in data_3.index:
data_3.ix[data_3_item, 'addr'] = data_3.ix[data_3_item, 'addr'][:2]
data_4 = data[data['addr'].str.contains('地区')]
for data_4_item in data_4.index:
data_4.ix[data_4_item, 'addr'] = data_4.ix[data_4_item, 'addr'][:-2]
#data_old是要处理的数据,data_2,data_3,data_4是要遍历的二级行政区,吉林省含有吉林市单独考虑
data_other = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
# print(data_other,'__________________________________________________')
for city_2 in data_2.index:
city_2_word = data_2.ix[city_2, 'addr']
# print(city_2_word, '*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-')
city_2_index = data_other[data_other['call_addr'].str.contains(city_2_word)].index
# print(data_other[data_other['call_addr'].str.contains(city_2_word)], '------------------------------------------')
#该市必须被100整除或者第三位为9(省辖县)
if int(city_2)%100==0 or int(city_2)%10000//1000==9:
province_word_2 = data.ix[str(data.ix[city_2, 'id']//10000*10000), 'addr']
# print(province_word_2, '*********************************************************')
data_old.ix[city_2_index, ['call_addr_new']] = province_word_2 + ',' + data.ix[city_2, 'addr']
data_old.ix[city_2_index, 'flag_city'] = data_old.ix[city_2_index, 'flag_city'] + 1
for city_3 in data_3.index:
city_3_word = data_3.ix[city_3, 'addr']
city_3_index = data_other[data_other['call_addr'].str.contains(city_3_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_3) % 100 == 0 or int(city_3)%10000//1000 == 9:
province_word_3= data.ix[str(data.ix[city_3, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_3_index, ['call_addr_new']] = province_word_3 + ',' + data.ix[city_3, 'addr']
data_old.ix[city_3_index, 'flag_city'] = data_old.ix[city_3_index, 'flag_city'] + 1
for city_4 in data_4.index:
city_4_word = data_4.ix[city_4, 'addr']
city_4_index = data_other[data_other['call_addr'].str.contains(city_4_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_4) % 100 == 0 or int(city_4) %10000//1000 == 9:
province_word_4 = data.ix[str(data.ix[city_4, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_4_index, ['call_addr_new']] = province_word_4 + ',' + data.ix[city_4, 'addr']
data_old.ix[city_4_index, 'flag_city'] = data_old.ix[city_4_index, 'flag_city'] + 1
#现在还有一部分数据只有三级行政区的名字, 如海拉尔,但不考虑直辖县,用data_county表示----------------------------------------------------------------
#直辖市--------------------------------------------------
# 第三部分:是否只含有三级行政区的名字。
print('判断是否只含有三级行政区的名字,请稍候.............................................................................')
data_bj_tj = data["110000": "120119"]
data_sh = data['310000':"310151"]
data_cq = data['500000':'500243']
data_am_xg = data['810000':"820109"]
#得到包括四个直辖市和两个特别行政区的数据,行索引不变
data_unique = pd.concat([data_bj_tj, data_sh, data_cq])
data_old_unique = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_unique in data_unique.index:
unique_name = data_unique.ix[index_unique, 'addr']
if len(unique_name)> 5:
#地址太长,只取前两个
unique_name = unique_name[:2]
elif len(unique_name)<=2:
#地址太长,只取前两个
pass
else:
unique_name = unique_name[:-1]
unique_old_index = data_old_unique[data_old_unique['call_addr'].str.contains(unique_name)].index
data_old.ix[unique_old_index, 'call_addr_new'] = data.ix[str(int(index_unique)//10000*10000), 'addr'] + ',' + data.ix[str(int(index_unique)//10000*10000), 'addr']
data_old.ix[unique_old_index, 'flag_county'] += 1
#香港和澳门命名不一样
data_old_am_xg = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_am_xg in data_am_xg.index:
am_xg_name = data_am_xg.ix[index_am_xg, 'addr']
if len(am_xg_name) > 5:
# 地址太长,只取前两个
am_xg_name = am_xg_name[:3]
elif len(am_xg_name)<=2:
am_xg_name = am_xg_name[:2]
else:
am_xg_name = am_xg_name[:-1]
am_xg_old_index = data_old_am_xg[data_old_am_xg['call_addr'].str.contains(am_xg_name)].index
data_old.ix[am_xg_old_index, 'call_addr_new'] = data.ix[str(int(index_am_xg) // 10000 * 10000), 'addr']
data_old.ix[am_xg_old_index, 'flag_county'] += 1
data_county = data[(data['id']%100 != 0)&(data['id']%10000//1000!=9)]
list_county_drop = list(data_unique['id'])
#extend没有返回值和append一样,所以直接修改
list_county_drop.extend(list(data_am_xg['id']))
#data_county不能包括直辖市和香港澳门,因为它们的二级行政区命名不一样。
data_county = data_county[~data_county['id'].isin(list_county_drop)]
data_county_1 = data_county[(data_county['addr'].str.contains('县'))|(data_county['addr'].str.contains('区'))|(data_county['addr'].str.contains('市'))]
data_county_2 = data_county[data_county['addr'].str.contains('自治县')]
#将data_old中处理的数据选进来,还有的县是属于直辖市和特别行政区,得去掉。前两位是11,31,50,12,81,82
data_old_other_2 = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
for county_index_1 in data_county_1.index:
county_word_1 = data_county_1.ix[county_index_1, 'addr']
if len(county_word_1) > 2:
county_word_1 = county_word_1[:-1]
county_old_index = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_1)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 10000 * 10000), 'addr']
city_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index, ['call_addr_new']] = province_word_5 + ',' + city_word_5
data_old.ix[county_old_index,'flag_county'] += 1
for county_index_2 in data_county_2.index:
#对于自治县,只取前两个字符
county_word_2 = data_county_2.ix[county_index_2, 'addr'][:2]
county_old_index_2 = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_2)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 10000 * 10000), 'addr']
city_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index_2, ['call_addr_new']] = province_word_6 + ',' + city_word_6
data_old.ix[county_old_index_2, 'flag_county'] += 1
#生成excel-----------------------------------------------------------------------------------------------------------------------------------------
writer = pd.ExcelWriter(path + file_name_2[:-4] + '_analysis.xlsx' )
data_old.to_excel(writer, 'sheet1')
writer.save()
read_local_data() | #找出既有省名又有市名的数据------------------------------------------------------------------------------------------------------------
#遍历每一个省名
for index_province in province.index:
province_word = province.ix[index_province, 'addr_new'] | random_line_split |
call_addr_alter.py | #coding: utf-8
import json
import re
# import sys
import time
import pandas as pd
from pandas import DataFrame, Series
# from datetime import datetime
# import pymysql
from call_history_addr_alter import mysql_connection as my
'''
1.利用mysql过滤:
call_addr为“未知”,“-”,“������������”及null
2.原数据形式:
“郑州”、“抚顺;沈阳;铁岭”(可能不在同一个省)、“海南省海口市”、“海南省.海口市”、“湖南永州(冷水滩)”、“上海市.上海市”、“西安.咸阳”、“美国/加拿大”、"其他”
3.统一格式:
“河南省,郑州市”、“北京市”、“广西壮族自治区,钦州市”、
生成新的表
一级行政区,33个
二级行政区,334个(地级市293个,自治州30个,地区8个,盟3个)
三级行政区,2856个(市辖区958个,县1367个,县级市360个,自治县117个,特区1个,林区1个,旗49个,自治旗3个)
问题:
县级市需要考虑么
同一个"mobile"对应多个"call_addr"
最终提供一个csv的本地数据文件
'''
#读取省市规则的json文件,并转换成Series格式
def read_addr_json():
print('read_addr_json()函数开始执行,请稍候...')
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
with open(path + file_name,'r') as json_file:
data = json_file.read()
#data应该为字典格式
print(type(data))
print(data)
data_s = Series(data)
print(data_s)
return data_s
#使用mysql语句将数据库中文件生成本地csv文件,方便后续操作,无返回值。
def get_local_data():
#计算时间
start_1 = time.time()
#路径
path = 'D:\\work\\database\\province-city\\'
file_name = 'call_addr_old.csv'
file_name_test = 'call_ | = pd.ExcelWriter(path + file_name[:-4] + '.xlsx')
data.to_excel(writer, 'sheet1')
writer.save()
print('本地文件已生成,获取的数据一共%d行\n'%len(data))
print('函数已经结束,共花费时间%d'%(time.time()-start_1))
#读取本地的通话记录文件,并增加新列
def read_local_data():
#不含中文的数据,或者含有国外名称的数据
#既有一级行政区又有二级行政区的数据
#只有二级行政区的数据
#只有三级行政区的数据
#其他
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
file_name_2 = 'call_addr_old.csv'
file = open(path + file_name, encoding='utf-8')
#读取json文件
data = json.load(file)
data = Series(data)
index_new = []
for index in data.index:
index_new.append(int(index))
#原地址规则文件
data = DataFrame(data, columns=['addr'])
data['id'] = index_new
#得到data表,含有addr,id字段————————————————————————————-----------------————
# print(data)
province = data[data['id']%10000==0]
province['addr_new'] = 'NULL'
#第一部分:原数据中含有省名和市名。
print('找出原数据中的省名和市名,请稍候.............................................................................')
index_item = province[(province['addr'].str.contains('省'))| (province['addr'].str.contains('市')) ].index
for item_1 in index_item:
province.ix[item_1, 'addr_new'] = province.ix[item_1,'addr'][:-1]
province.ix['150000', 'addr_new'] = '内蒙'
province.ix['450000', 'addr_new'] = '广西'
province.ix['540000', 'addr_new'] = '西藏'
province.ix['640000', 'addr_new'] = '宁夏'
province.ix['650000', 'addr_new'] = '新疆'
province.ix['810000', 'addr_new'] = '香港'
province.ix['820000', 'addr_new'] = '澳门'
# city = data[(data['id']%100==0) & (data['id']%10000!=0)]
# 建立一个数据表province,只存放省名,含有addr,id ,addr_new----------------------------------------------------
#data_2为原地址表,是处理的数据
data_old = pd.read_table(path + file_name_2, encoding='utf-8', sep='\t')
data_old['call_addr_new'] = "NULL"
#flag判断地址字符中是否含有多个省名,如果只有一个,就确定它属于哪个省,如果有多个省名,则给出提示。
data_old['flag_province'] = 0
data_old['flag_city'] = 0
data_old['flag_county'] = 0
#找出既有省名又有市名的数据------------------------------------------------------------------------------------------------------------
#遍历每一个省名
for index_province in province.index:
province_word = province.ix[index_province, 'addr_new']
if province_word not in ['北京', '天津', '上海', '重庆', '香港', '澳门']:
# print(province_word, '***********************************************************')
#找到含有该省名的地址,比如 海南
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# print(data_old)
#遍历该省下的每一个市名,考虑直辖县,第三位为9的是直辖县,如429004,仙桃市。"469024":"临高县"。市的编码需要大于本省的编码,小于下一省的编码。普通城市编码小于
#9000,且被100整除,或者大于9000的直辖县。
for city_index in data[(data['id'] > int(index_province)) & (((data['id'] < int(index_province) + 9000) & (data['id'] % 100 == 0)
|(data['id']>int(index_province)+8999)&(data['id']<int(index_province)+10000)))].index:
city_word = data.ix[city_index, 'addr'][:-1]
# print(city_word, '_______________')
#找到地址字符中含有该城市数据索引
#该省的data_old数据记录中,寻找对应市的记录
data_old_province = data_old.ix[data_province_index]
if city_word != '吉林':
#data_city_index为data_old中含有特定省名和市名的数据索引,
data_city_index = data_old_province[data_old_province['call_addr'].str.contains(city_word)].index
data_old.ix[data_city_index, 'flag_city'] = data_old.ix[data_city_index, 'flag_city'] + 1
# print(data_old.ix[data_city_index, 'call_addr'], '____________________________________')
#遍历每一个包含该城市名的原数据
for data_item in data_city_index:
if data_old.ix[data_item, 'flag_city'] == 1:
#让标准文件中的城市名作为新的地址
data_old.ix[data_item, 'call_addr_new'] = province.ix[index_province, 'addr'] +','+ data.ix[city_index, 'addr']
elif data_old.ix[data_item, 'flag_city'] > 1:
data_old.ix[data_item, 'call_addr_new'] = "多个城市"
else:
pass
else:
#如果是吉林市,先不考虑
pass
elif province_word in ['北京', '天津', '上海', '重庆']:
#如果是这四个直辖市
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '市,' + province_word + '市'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
#香港、澳门
else:
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '特别行政区'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# 第二部分:原数据中是否含有中文和"未知"。
print('正在判断原数据是否含有非中文和“未知”,请稍候.............................................................................')
# print(data_old)
#-------------------------------------------------------------------------------------------------------------------------------
for item_2 in data_old.index:
addr_old = data_old.ix[item_2, 'call_addr']
#先看看地址中是否含有中文,不含有中文,新地址为“未知”;如果含有国外信息,新地址也为未知。
if re.search(r'[\u4e00-\u9fa5]', addr_old):
if re.search(r'[未知]', addr_old):
data_old.ix[item_2, 'call_addr_new'] = '未知'
else:
data_old.ix[item_2, 'call_addr_new'] = '未知'
#
# # print(data_old)
# #增加新列"call_addr_new"
# data_old['call_addr_new'] = "NULL"
#原数据中只有二级市名的情况------------------------------------------------------------------------------------------------------------------------------
#目前中国的二级行政区由市和盟,自治州,地区组成,分别用data_2, data_3, data_4表示。另外还要考虑省辖县。
data_2 = data[(data['addr'].str.contains('市')|(data['addr'].str.contains('盟'))|(data['id']%10000//1000==9))]
for data_2_item in data_2.index:
data_2.ix[data_2_item, 'addr'] = data_2.ix[data_2_item, 'addr'][:-1]
# print(data_2)
#自治州名字较长,取前两位,如临夏回族自治州
data_3 = data[data['addr'].str.contains('自治州')]
for data_3_item in data_3.index:
data_3.ix[data_3_item, 'addr'] = data_3.ix[data_3_item, 'addr'][:2]
data_4 = data[data['addr'].str.contains('地区')]
for data_4_item in data_4.index:
data_4.ix[data_4_item, 'addr'] = data_4.ix[data_4_item, 'addr'][:-2]
#data_old是要处理的数据,data_2,data_3,data_4是要遍历的二级行政区,吉林省含有吉林市单独考虑
data_other = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
# print(data_other,'__________________________________________________')
for city_2 in data_2.index:
city_2_word = data_2.ix[city_2, 'addr']
# print(city_2_word, '*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-')
city_2_index = data_other[data_other['call_addr'].str.contains(city_2_word)].index
# print(data_other[data_other['call_addr'].str.contains(city_2_word)], '------------------------------------------')
#该市必须被100整除或者第三位为9(省辖县)
if int(city_2)%100==0 or int(city_2)%10000//1000==9:
province_word_2 = data.ix[str(data.ix[city_2, 'id']//10000*10000), 'addr']
# print(province_word_2, '*********************************************************')
data_old.ix[city_2_index, ['call_addr_new']] = province_word_2 + ',' + data.ix[city_2, 'addr']
data_old.ix[city_2_index, 'flag_city'] = data_old.ix[city_2_index, 'flag_city'] + 1
for city_3 in data_3.index:
city_3_word = data_3.ix[city_3, 'addr']
city_3_index = data_other[data_other['call_addr'].str.contains(city_3_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_3) % 100 == 0 or int(city_3)%10000//1000 == 9:
province_word_3= data.ix[str(data.ix[city_3, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_3_index, ['call_addr_new']] = province_word_3 + ',' + data.ix[city_3, 'addr']
data_old.ix[city_3_index, 'flag_city'] = data_old.ix[city_3_index, 'flag_city'] + 1
for city_4 in data_4.index:
city_4_word = data_4.ix[city_4, 'addr']
city_4_index = data_other[data_other['call_addr'].str.contains(city_4_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_4) % 100 == 0 or int(city_4) %10000//1000 == 9:
province_word_4 = data.ix[str(data.ix[city_4, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_4_index, ['call_addr_new']] = province_word_4 + ',' + data.ix[city_4, 'addr']
data_old.ix[city_4_index, 'flag_city'] = data_old.ix[city_4_index, 'flag_city'] + 1
#现在还有一部分数据只有三级行政区的名字, 如海拉尔,但不考虑直辖县,用data_county表示----------------------------------------------------------------
#直辖市--------------------------------------------------
# 第三部分:是否只含有三级行政区的名字。
print('判断是否只含有三级行政区的名字,请稍候.............................................................................')
data_bj_tj = data["110000": "120119"]
data_sh = data['310000':"310151"]
data_cq = data['500000':'500243']
data_am_xg = data['810000':"820109"]
#得到包括四个直辖市和两个特别行政区的数据,行索引不变
data_unique = pd.concat([data_bj_tj, data_sh, data_cq])
data_old_unique = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_unique in data_unique.index:
unique_name = data_unique.ix[index_unique, 'addr']
if len(unique_name)> 5:
#地址太长,只取前两个
unique_name = unique_name[:2]
elif len(unique_name)<=2:
#地址太长,只取前两个
pass
else:
unique_name = unique_name[:-1]
unique_old_index = data_old_unique[data_old_unique['call_addr'].str.contains(unique_name)].index
data_old.ix[unique_old_index, 'call_addr_new'] = data.ix[str(int(index_unique)//10000*10000), 'addr'] + ',' + data.ix[str(int(index_unique)//10000*10000), 'addr']
data_old.ix[unique_old_index, 'flag_county'] += 1
#香港和澳门命名不一样
data_old_am_xg = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_am_xg in data_am_xg.index:
am_xg_name = data_am_xg.ix[index_am_xg, 'addr']
if len(am_xg_name) > 5:
# 地址太长,只取前两个
am_xg_name = am_xg_name[:3]
elif len(am_xg_name)<=2:
am_xg_name = am_xg_name[:2]
else:
am_xg_name = am_xg_name[:-1]
am_xg_old_index = data_old_am_xg[data_old_am_xg['call_addr'].str.contains(am_xg_name)].index
data_old.ix[am_xg_old_index, 'call_addr_new'] = data.ix[str(int(index_am_xg) // 10000 * 10000), 'addr']
data_old.ix[am_xg_old_index, 'flag_county'] += 1
data_county = data[(data['id']%100 != 0)&(data['id']%10000//1000!=9)]
list_county_drop = list(data_unique['id'])
#extend没有返回值和append一样,所以直接修改
list_county_drop.extend(list(data_am_xg['id']))
#data_county不能包括直辖市和香港澳门,因为它们的二级行政区命名不一样。
data_county = data_county[~data_county['id'].isin(list_county_drop)]
data_county_1 = data_county[(data_county['addr'].str.contains('县'))|(data_county['addr'].str.contains('区'))|(data_county['addr'].str.contains('市'))]
data_county_2 = data_county[data_county['addr'].str.contains('自治县')]
#将data_old中处理的数据选进来,还有的县是属于直辖市和特别行政区,得去掉。前两位是11,31,50,12,81,82
data_old_other_2 = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
for county_index_1 in data_county_1.index:
county_word_1 = data_county_1.ix[county_index_1, 'addr']
if len(county_word_1) > 2:
county_word_1 = county_word_1[:-1]
county_old_index = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_1)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 10000 * 10000), 'addr']
city_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index, ['call_addr_new']] = province_word_5 + ',' + city_word_5
data_old.ix[county_old_index,'flag_county'] += 1
for county_index_2 in data_county_2.index:
#对于自治县,只取前两个字符
county_word_2 = data_county_2.ix[county_index_2, 'addr'][:2]
county_old_index_2 = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_2)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 10000 * 10000), 'addr']
city_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index_2, ['call_addr_new']] = province_word_6 + ',' + city_word_6
data_old.ix[county_old_index_2, 'flag_county'] += 1
#生成excel-----------------------------------------------------------------------------------------------------------------------------------------
writer = pd.ExcelWriter(path + file_name_2[:-4] + '_analysis.xlsx' )
data_old.to_excel(writer, 'sheet1')
writer.save()
read_local_data()
| addr_alter_test.csv'
print('get_local_data()函数正在执行,请稍候...')
select_string = '''
select call_addr from call_history
group by call_addr
'''
columns = ['call_addr']
data = my.mysql_connection(select_string, columns)
print('正在生成本地文件,请稍候...')
data.to_csv(path + file_name, index=False, sep='\t', encoding='utf-8')
#生成excel
writer | identifier_body |
call_addr_alter.py | #coding: utf-8
import json
import re
# import sys
import time
import pandas as pd
from pandas import DataFrame, Series
# from datetime import datetime
# import pymysql
from call_history_addr_alter import mysql_connection as my
'''
1.利用mysql过滤:
call_addr为“未知”,“-”,“������������”及null
2.原数据形式:
“郑州”、“抚顺;沈阳;铁岭”(可能不在同一个省)、“海南省海口市”、“海南省.海口市”、“湖南永州(冷水滩)”、“上海市.上海市”、“西安.咸阳”、“美国/加拿大”、"其他”
3.统一格式:
“河南省,郑州市”、“北京市”、“广西壮族自治区,钦州市”、
生成新的表
一级行政区,33个
二级行政区,334个(地级市293个,自治州30个,地区8个,盟3个)
三级行政区,2856个(市辖区958个,县1367个,县级市360个,自治县117个,特区1个,林区1个,旗49个,自治旗3个)
问题:
县级市需要考虑么
同一个"mobile"对应多个"call_addr"
最终提供一个csv的本地数据文件
'''
#读取省市规则的json文件,并转换成Series格式
def read_addr_json():
print('read_addr_json()函数开始执行,请稍候...')
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
with open(path + file_name,'r') as json_file:
data = json_file.read()
#data应该为字典格式
print(type(data))
print(data)
data_s = Series(data)
print(data_s)
return data_s
#使用mysql语句将数据库中文件生成本地csv文件,方便后续操作,无返回值。
def get_local_data():
#计算时间
start_1 = time.time()
#路径
path = 'D:\\work\\database\\province-city\\'
file_name = 'call_addr_old.csv'
f | = 'call_addr_alter_test.csv'
print('get_local_data()函数正在执行,请稍候...')
select_string = '''
select call_addr from call_history
group by call_addr
'''
columns = ['call_addr']
data = my.mysql_connection(select_string, columns)
print('正在生成本地文件,请稍候...')
data.to_csv(path + file_name, index=False, sep='\t', encoding='utf-8')
#生成excel
writer = pd.ExcelWriter(path + file_name[:-4] + '.xlsx')
data.to_excel(writer, 'sheet1')
writer.save()
print('本地文件已生成,获取的数据一共%d行\n'%len(data))
print('函数已经结束,共花费时间%d'%(time.time()-start_1))
#读取本地的通话记录文件,并增加新列
def read_local_data():
#不含中文的数据,或者含有国外名称的数据
#既有一级行政区又有二级行政区的数据
#只有二级行政区的数据
#只有三级行政区的数据
#其他
path = "D:\\work\\database\\province-city\\"
file_name = "province_city_rule.json"
file_name_2 = 'call_addr_old.csv'
file = open(path + file_name, encoding='utf-8')
#读取json文件
data = json.load(file)
data = Series(data)
index_new = []
for index in data.index:
index_new.append(int(index))
#原地址规则文件
data = DataFrame(data, columns=['addr'])
data['id'] = index_new
#得到data表,含有addr,id字段————————————————————————————-----------------————
# print(data)
province = data[data['id']%10000==0]
province['addr_new'] = 'NULL'
#第一部分:原数据中含有省名和市名。
print('找出原数据中的省名和市名,请稍候.............................................................................')
index_item = province[(province['addr'].str.contains('省'))| (province['addr'].str.contains('市')) ].index
for item_1 in index_item:
province.ix[item_1, 'addr_new'] = province.ix[item_1,'addr'][:-1]
province.ix['150000', 'addr_new'] = '内蒙'
province.ix['450000', 'addr_new'] = '广西'
province.ix['540000', 'addr_new'] = '西藏'
province.ix['640000', 'addr_new'] = '宁夏'
province.ix['650000', 'addr_new'] = '新疆'
province.ix['810000', 'addr_new'] = '香港'
province.ix['820000', 'addr_new'] = '澳门'
# city = data[(data['id']%100==0) & (data['id']%10000!=0)]
# 建立一个数据表province,只存放省名,含有addr,id ,addr_new----------------------------------------------------
#data_2为原地址表,是处理的数据
data_old = pd.read_table(path + file_name_2, encoding='utf-8', sep='\t')
data_old['call_addr_new'] = "NULL"
#flag判断地址字符中是否含有多个省名,如果只有一个,就确定它属于哪个省,如果有多个省名,则给出提示。
data_old['flag_province'] = 0
data_old['flag_city'] = 0
data_old['flag_county'] = 0
#找出既有省名又有市名的数据------------------------------------------------------------------------------------------------------------
#遍历每一个省名
for index_province in province.index:
province_word = province.ix[index_province, 'addr_new']
if province_word not in ['北京', '天津', '上海', '重庆', '香港', '澳门']:
# print(province_word, '***********************************************************')
#找到含有该省名的地址,比如 海南
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# print(data_old)
#遍历该省下的每一个市名,考虑直辖县,第三位为9的是直辖县,如429004,仙桃市。"469024":"临高县"。市的编码需要大于本省的编码,小于下一省的编码。普通城市编码小于
#9000,且被100整除,或者大于9000的直辖县。
for city_index in data[(data['id'] > int(index_province)) & (((data['id'] < int(index_province) + 9000) & (data['id'] % 100 == 0)
|(data['id']>int(index_province)+8999)&(data['id']<int(index_province)+10000)))].index:
city_word = data.ix[city_index, 'addr'][:-1]
# print(city_word, '_______________')
#找到地址字符中含有该城市数据索引
#该省的data_old数据记录中,寻找对应市的记录
data_old_province = data_old.ix[data_province_index]
if city_word != '吉林':
#data_city_index为data_old中含有特定省名和市名的数据索引,
data_city_index = data_old_province[data_old_province['call_addr'].str.contains(city_word)].index
data_old.ix[data_city_index, 'flag_city'] = data_old.ix[data_city_index, 'flag_city'] + 1
# print(data_old.ix[data_city_index, 'call_addr'], '____________________________________')
#遍历每一个包含该城市名的原数据
for data_item in data_city_index:
if data_old.ix[data_item, 'flag_city'] == 1:
#让标准文件中的城市名作为新的地址
data_old.ix[data_item, 'call_addr_new'] = province.ix[index_province, 'addr'] +','+ data.ix[city_index, 'addr']
elif data_old.ix[data_item, 'flag_city'] > 1:
data_old.ix[data_item, 'call_addr_new'] = "多个城市"
else:
pass
else:
#如果是吉林市,先不考虑
pass
elif province_word in ['北京', '天津', '上海', '重庆']:
#如果是这四个直辖市
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '市,' + province_word + '市'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
#香港、澳门
else:
data_province_index = data_old[data_old["call_addr"].str.contains(province_word)].index
data_old.ix[data_province_index, 'call_addr_new'] = province_word + '特别行政区'
data_old.ix[data_province_index, 'flag_province'] = data_old.ix[data_province_index, 'flag_province'] + 1
# 第二部分:原数据中是否含有中文和"未知"。
print('正在判断原数据是否含有非中文和“未知”,请稍候.............................................................................')
# print(data_old)
#-------------------------------------------------------------------------------------------------------------------------------
for item_2 in data_old.index:
addr_old = data_old.ix[item_2, 'call_addr']
#先看看地址中是否含有中文,不含有中文,新地址为“未知”;如果含有国外信息,新地址也为未知。
if re.search(r'[\u4e00-\u9fa5]', addr_old):
if re.search(r'[未知]', addr_old):
data_old.ix[item_2, 'call_addr_new'] = '未知'
else:
data_old.ix[item_2, 'call_addr_new'] = '未知'
#
# # print(data_old)
# #增加新列"call_addr_new"
# data_old['call_addr_new'] = "NULL"
#原数据中只有二级市名的情况------------------------------------------------------------------------------------------------------------------------------
#目前中国的二级行政区由市和盟,自治州,地区组成,分别用data_2, data_3, data_4表示。另外还要考虑省辖县。
data_2 = data[(data['addr'].str.contains('市')|(data['addr'].str.contains('盟'))|(data['id']%10000//1000==9))]
for data_2_item in data_2.index:
data_2.ix[data_2_item, 'addr'] = data_2.ix[data_2_item, 'addr'][:-1]
# print(data_2)
#自治州名字较长,取前两位,如临夏回族自治州
data_3 = data[data['addr'].str.contains('自治州')]
for data_3_item in data_3.index:
data_3.ix[data_3_item, 'addr'] = data_3.ix[data_3_item, 'addr'][:2]
data_4 = data[data['addr'].str.contains('地区')]
for data_4_item in data_4.index:
data_4.ix[data_4_item, 'addr'] = data_4.ix[data_4_item, 'addr'][:-2]
#data_old是要处理的数据,data_2,data_3,data_4是要遍历的二级行政区,吉林省含有吉林市单独考虑
data_other = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
# print(data_other,'__________________________________________________')
for city_2 in data_2.index:
city_2_word = data_2.ix[city_2, 'addr']
# print(city_2_word, '*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-')
city_2_index = data_other[data_other['call_addr'].str.contains(city_2_word)].index
# print(data_other[data_other['call_addr'].str.contains(city_2_word)], '------------------------------------------')
#该市必须被100整除或者第三位为9(省辖县)
if int(city_2)%100==0 or int(city_2)%10000//1000==9:
province_word_2 = data.ix[str(data.ix[city_2, 'id']//10000*10000), 'addr']
# print(province_word_2, '*********************************************************')
data_old.ix[city_2_index, ['call_addr_new']] = province_word_2 + ',' + data.ix[city_2, 'addr']
data_old.ix[city_2_index, 'flag_city'] = data_old.ix[city_2_index, 'flag_city'] + 1
for city_3 in data_3.index:
city_3_word = data_3.ix[city_3, 'addr']
city_3_index = data_other[data_other['call_addr'].str.contains(city_3_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_3) % 100 == 0 or int(city_3)%10000//1000 == 9:
province_word_3= data.ix[str(data.ix[city_3, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_3_index, ['call_addr_new']] = province_word_3 + ',' + data.ix[city_3, 'addr']
data_old.ix[city_3_index, 'flag_city'] = data_old.ix[city_3_index, 'flag_city'] + 1
for city_4 in data_4.index:
city_4_word = data_4.ix[city_4, 'addr']
city_4_index = data_other[data_other['call_addr'].str.contains(city_4_word)].index
# 该市必须被100整除或者第三位为9(省辖县)
if int(city_4) % 100 == 0 or int(city_4) %10000//1000 == 9:
province_word_4 = data.ix[str(data.ix[city_4, 'id'] // 10000 * 10000), 'addr']
# print(province_word_3, '*********************************************************')
data_old.ix[city_4_index, ['call_addr_new']] = province_word_4 + ',' + data.ix[city_4, 'addr']
data_old.ix[city_4_index, 'flag_city'] = data_old.ix[city_4_index, 'flag_city'] + 1
#现在还有一部分数据只有三级行政区的名字, 如海拉尔,但不考虑直辖县,用data_county表示----------------------------------------------------------------
#直辖市--------------------------------------------------
# 第三部分:是否只含有三级行政区的名字。
print('判断是否只含有三级行政区的名字,请稍候.............................................................................')
data_bj_tj = data["110000": "120119"]
data_sh = data['310000':"310151"]
data_cq = data['500000':'500243']
data_am_xg = data['810000':"820109"]
#得到包括四个直辖市和两个特别行政区的数据,行索引不变
data_unique = pd.concat([data_bj_tj, data_sh, data_cq])
data_old_unique = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_unique in data_unique.index:
unique_name = data_unique.ix[index_unique, 'addr']
if len(unique_name)> 5:
#地址太长,只取前两个
unique_name = unique_name[:2]
elif len(unique_name)<=2:
#地址太长,只取前两个
pass
else:
unique_name = unique_name[:-1]
unique_old_index = data_old_unique[data_old_unique['call_addr'].str.contains(unique_name)].index
data_old.ix[unique_old_index, 'call_addr_new'] = data.ix[str(int(index_unique)//10000*10000), 'addr'] + ',' + data.ix[str(int(index_unique)//10000*10000), 'addr']
data_old.ix[unique_old_index, 'flag_county'] += 1
#香港和澳门命名不一样
data_old_am_xg = data_old[(data_old['call_addr_new'] == 'NULL') & (~(data_old['call_addr'].str.contains('吉林')))]
for index_am_xg in data_am_xg.index:
am_xg_name = data_am_xg.ix[index_am_xg, 'addr']
if len(am_xg_name) > 5:
# 地址太长,只取前两个
am_xg_name = am_xg_name[:3]
elif len(am_xg_name)<=2:
am_xg_name = am_xg_name[:2]
else:
am_xg_name = am_xg_name[:-1]
am_xg_old_index = data_old_am_xg[data_old_am_xg['call_addr'].str.contains(am_xg_name)].index
data_old.ix[am_xg_old_index, 'call_addr_new'] = data.ix[str(int(index_am_xg) // 10000 * 10000), 'addr']
data_old.ix[am_xg_old_index, 'flag_county'] += 1
data_county = data[(data['id']%100 != 0)&(data['id']%10000//1000!=9)]
list_county_drop = list(data_unique['id'])
#extend没有返回值和append一样,所以直接修改
list_county_drop.extend(list(data_am_xg['id']))
#data_county不能包括直辖市和香港澳门,因为它们的二级行政区命名不一样。
data_county = data_county[~data_county['id'].isin(list_county_drop)]
data_county_1 = data_county[(data_county['addr'].str.contains('县'))|(data_county['addr'].str.contains('区'))|(data_county['addr'].str.contains('市'))]
data_county_2 = data_county[data_county['addr'].str.contains('自治县')]
#将data_old中处理的数据选进来,还有的县是属于直辖市和特别行政区,得去掉。前两位是11,31,50,12,81,82
data_old_other_2 = data_old[(data_old['call_addr_new'] == 'NULL')&(~(data_old['call_addr'].str.contains('吉林')))]
for county_index_1 in data_county_1.index:
county_word_1 = data_county_1.ix[county_index_1, 'addr']
if len(county_word_1) > 2:
county_word_1 = county_word_1[:-1]
county_old_index = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_1)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 10000 * 10000), 'addr']
city_word_5 = data.ix[str(data.ix[county_index_1, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index, ['call_addr_new']] = province_word_5 + ',' + city_word_5
data_old.ix[county_old_index,'flag_county'] += 1
for county_index_2 in data_county_2.index:
#对于自治县,只取前两个字符
county_word_2 = data_county_2.ix[county_index_2, 'addr'][:2]
county_old_index_2 = data_old_other_2[data_old_other_2['call_addr'].str.contains(county_word_2)].index
#找到data中找到省名和市名,并在data_old中修改“call_addr_new”
province_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 10000 * 10000), 'addr']
city_word_6 = data.ix[str(data.ix[county_index_2, 'id'] // 100 * 100), 'addr']
data_old.ix[county_old_index_2, ['call_addr_new']] = province_word_6 + ',' + city_word_6
data_old.ix[county_old_index_2, 'flag_county'] += 1
#生成excel-----------------------------------------------------------------------------------------------------------------------------------------
writer = pd.ExcelWriter(path + file_name_2[:-4] + '_analysis.xlsx' )
data_old.to_excel(writer, 'sheet1')
writer.save()
read_local_data()
| ile_name_test | identifier_name |
find.py | # Copyright 2014 Jetperch LLC - See LICENSE file.
"""
Find images using a variety of techniques.
This software is heavily based upon and copies some code from the following
opencv samples:
* https://github.com/Itseez/opencv/blob/master/samples/python2/find_obj.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/feature_homography.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/plane_tracker.py
"""
import cv2
import numpy as np
from collections import namedtuple
class FindError(Exception):
"""Error indicating that find failed."""
def __init__(self, metric, location, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.metric = metric
self.location = location
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
FLANN_PARAMS_NORM = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
FLANN_PARAMS_HAMMING = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6,
'key_size': 12,
'multi_probe_level': 1
}
MIN_MATCH_COUNT_DEFAULT = 10
Target = namedtuple('Target', 'image, quad, keypoints, descriptors, data')
"""The Target for image searches.
image - image to track
quad - Target boundary quad in the original image
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
"""
class TrackedTarget(object):
"""A Target that was found during an image search.
:var target: reference to :class:`Target`
:var image: The image used for the search.
:var inliers: The list of matching points given as [((x0, y0), (x1, y1)), ...]
:var outliers: The list of unmatched points.
:var H: homography matrix from (x0, y0) to (x1, y1)
:var quad: target bounary quad in input frame
"""
def __init__(self, target, image, inliers, outliers, H, quad):
self.target = target
self.image = image
self.inliers = inliers
self.outliers = outliers
self.H = H
self.quad = quad
def inTargetCoordinates(self, p):
p = np.float32(p)
H = np.linalg.inv(self.H)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), H).reshape(-1, 2)
return p
def inImageCoordinates(self, p):
p = np.float32(p)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), self.H).reshape(-1, 2)
return p
def draw(self, image, color, width, quad=None):
"""Draw the tracked target on the image.
:param image: The image used to draw the tracked target. If None,
then use the same image from the search.
:param color: The color tuple.
:param width: The line width in pixels.
:param quad: The quad to draw. When None (default), uses the quad from
this instance.
:returns: The image, which is also modified in place.
"""
if image is None:
image = self.image.copy()
if quad is None:
quad = self.quad
color = tuple(np.int32(color).tolist())
cv2.polylines(image, [np.int32(quad)], True, color, int(width))
return image
def visualize(self):
"""Visualize the search results.
:returns: The resulting visualization image.
"""
colors = {'outline': (220, 220, 220),
'inlier': (0, 255, 0),
'outlier': (0, 0, 255),
'lines': (128, 220, 128)}
# Create output image for visualization
gap = 5
h1, w1 = self.target.image.shape[:2]
h2, w2 = self.image.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)
vis[:h1, :w1, :] = self.target.image
w1 += gap
vis[:h2, w1:w1+w2, :] = self.image
# Draw the located object
quad = np.float32(self.quad) + np.float32([w1, 0])
self.draw(vis, colors['outline'], 2, quad)
# draw point details
inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]
outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]
if colors['outlier'] is not None: # draw x on each point
r = 2 # radius
thickness = 2
for x0, y0, x1, y1 in outliers:
cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)
if colors['lines'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)
if colors['inlier'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)
cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)
return vis
class Features(object):
"""Find an image using feature-based object recognition.
This class holds initialized search objects to accelerate subsequent
searchs which is especially useful for processing video streams.
"""
def __init__(self, search_spec=None):
"""Initialize the feature detection framework.
:param search_spec: The specification string which consists of
"[detector]-[matcher]". Valid detector values include
sift, surf (Default), and orb. Valid matcher values include flann and
bf (brute_force) (Default).
"""
if search_spec is None or not search_spec:
search_spec = 'orb'
chunks = search_spec.split('-')
if len(chunks) == 1:
chunks.append('bf')
elif len(chunks) != 2:
raise ValueError('Invalid search specification: %s' % search_spec)
detector_name, matcher_name = [x.lower() for x in chunks]
self._detector, norm = self._init_detector(detector_name)
self._matcher = self._init_matcher(matcher_name, norm)
self._targets = []
self.min_match_count = MIN_MATCH_COUNT_DEFAULT
def _init_detector(self, name):
norm = cv2.NORM_L2
if name == 'fast':
detector = cv2.FastFeatureDetector()
elif name == 'brisk':
detector = cv2.BRISK()
elif name == 'sift':
detector = cv2.SIFT()
elif name == 'surf':
detector = cv2.SURF(800)
elif name == 'orb':
detector = cv2.ORB(1200)
norm = cv2.NORM_HAMMING
else:
raise ValueError('Unsupported detector: %s' % name)
return detector, norm
def _init_matcher(self, name, norm):
if name == 'flann':
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params = dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
elif name in ['bf', 'brute_force']:
matcher = cv2.BFMatcher(norm)
else:
raise ValueError('Unsupported matcher: %s' % name)
return matcher
def | (self, image, data=None):
"""Add a new target to the serach list.
:param image: The image containing just the target.
:param data: Additional user data. Defaults to None.
:raises ValueError: If the image is insufficient.
"""
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
raise ValueError('Target image has insufficient keypoints')
y, x = image.shape[:2]
quad = np.float32([[0, 0], [x, 0], [x, y], [0, y]])
self._targets.append(Target(image, quad, keypoints, descriptors, data))
self._matcher.add([descriptors])
#print('Target has %d features' % len(keypoints))
def clear(self):
"""Remove all targets"""
self._targets = []
self._matcher.clear()
def draw_keypoints(self, image, color=None):
if color is None:
color = [0, 255, 0]
keypoints, _ = self._detector.detectAndCompute(image, None)
image = image.copy()
return cv2.drawKeypoints(image, keypoints, color=color)
def find(self, image, k=None, ratio=None):
"""Find the targets in the provided image.
:param image: The image to search for targets.
:param k: The number of knnMatches to use. None (Default) uses 2.
:param ratio: The distance ration to use. None (Default) uses 0.75.
:returns: A list containing :class:`TrackedTarget` instances for
each target found.
"""
if not self._targets:
return []
k = 2 if k is None else k
ratio = 0.75 if ratio is None else ratio
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
return []
matches = self._matcher.knnMatch(descriptors, k=int(k))
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]
if len(matches) < self.min_match_count:
return []
matches_by_id = [[] for _ in xrange(len(self._targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < self.min_match_count:
continue
target = self._targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [keypoints[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < self.min_match_count:
continue
p0, p1 = np.int32((p0, p1))
inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]
outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]
quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.inliers), reverse=True)
return tracked
def onDraw(self, image, tracked=None):
if tracked is None:
tracked = self.search(image)
for tr in tracked:
tr.draw(image, (255, 255, 255), 2)
return image
def find(image_to_find, image, search_spec=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
Additional arguments are passed to the search.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param search_spec: The search specification. A value of 'template' invokes
:func:`search_template`. All other values are defined in
:class:`Features`.
:returns: A :class:`TrackedTarget` instance if found or None if not found.
"""
if search_spec is not None and search_spec.startswith('template'):
return _find_using_template(image_to_find, image, **kwargs)
features = Features(search_spec)
features.add_target(image_to_find)
tracked = features.find(image, kwargs.get('k'), kwargs.get('ratio'))
if not len(tracked):
raise FindError(1.0, None)
return tracked[0]
def _find_using_template(image_to_find, image, threshold=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param threshold: The metric threshold. 1e-6 (Default) finds nearly
exact images undistorted by noise or error.
:returns: A :class:`TrackedTarget` instance if found.
:raises FindError: If the image_to_find could not be found in image.
"""
threshold = 1e-6 if threshold is None else threshold
result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)
idx = np.argmin(result)
metric = np.ravel(result)[idx]
x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]
if metric > threshold:
raise FindError(metric, (x0, y0))
x, y = image_to_find.shape[1::-1]
target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)
x1 = x0 + image_to_find.shape[1]
y1 = y0 + image_to_find.shape[0]
quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])
return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad)
| add_target | identifier_name |
find.py | # Copyright 2014 Jetperch LLC - See LICENSE file.
"""
Find images using a variety of techniques.
This software is heavily based upon and copies some code from the following
opencv samples:
* https://github.com/Itseez/opencv/blob/master/samples/python2/find_obj.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/feature_homography.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/plane_tracker.py
"""
import cv2
import numpy as np
from collections import namedtuple
class FindError(Exception):
"""Error indicating that find failed."""
def __init__(self, metric, location, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.metric = metric
self.location = location
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
FLANN_PARAMS_NORM = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
FLANN_PARAMS_HAMMING = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6,
'key_size': 12,
'multi_probe_level': 1
}
MIN_MATCH_COUNT_DEFAULT = 10
Target = namedtuple('Target', 'image, quad, keypoints, descriptors, data')
"""The Target for image searches.
image - image to track
quad - Target boundary quad in the original image
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
"""
class TrackedTarget(object):
"""A Target that was found during an image search.
:var target: reference to :class:`Target`
:var image: The image used for the search.
:var inliers: The list of matching points given as [((x0, y0), (x1, y1)), ...]
:var outliers: The list of unmatched points.
:var H: homography matrix from (x0, y0) to (x1, y1)
:var quad: target bounary quad in input frame
"""
def __init__(self, target, image, inliers, outliers, H, quad):
self.target = target
self.image = image
self.inliers = inliers
self.outliers = outliers
self.H = H
self.quad = quad
def inTargetCoordinates(self, p):
p = np.float32(p)
H = np.linalg.inv(self.H)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), H).reshape(-1, 2)
return p
def inImageCoordinates(self, p):
p = np.float32(p)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), self.H).reshape(-1, 2)
return p
def draw(self, image, color, width, quad=None):
"""Draw the tracked target on the image.
:param image: The image used to draw the tracked target. If None,
then use the same image from the search.
:param color: The color tuple.
:param width: The line width in pixels.
:param quad: The quad to draw. When None (default), uses the quad from
this instance.
:returns: The image, which is also modified in place.
"""
if image is None:
image = self.image.copy()
if quad is None:
quad = self.quad
color = tuple(np.int32(color).tolist())
cv2.polylines(image, [np.int32(quad)], True, color, int(width))
return image
def visualize(self):
"""Visualize the search results.
:returns: The resulting visualization image.
"""
colors = {'outline': (220, 220, 220),
'inlier': (0, 255, 0),
'outlier': (0, 0, 255),
'lines': (128, 220, 128)}
# Create output image for visualization
gap = 5
h1, w1 = self.target.image.shape[:2]
h2, w2 = self.image.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)
vis[:h1, :w1, :] = self.target.image
w1 += gap
vis[:h2, w1:w1+w2, :] = self.image
# Draw the located object
quad = np.float32(self.quad) + np.float32([w1, 0])
self.draw(vis, colors['outline'], 2, quad)
# draw point details
inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]
outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]
if colors['outlier'] is not None: # draw x on each point
r = 2 # radius
thickness = 2
for x0, y0, x1, y1 in outliers:
cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)
if colors['lines'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)
if colors['inlier'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)
cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)
return vis
class Features(object):
"""Find an image using feature-based object recognition.
This class holds initialized search objects to accelerate subsequent
searchs which is especially useful for processing video streams.
"""
def __init__(self, search_spec=None):
"""Initialize the feature detection framework.
:param search_spec: The specification string which consists of
"[detector]-[matcher]". Valid detector values include
sift, surf (Default), and orb. Valid matcher values include flann and
bf (brute_force) (Default).
"""
if search_spec is None or not search_spec:
search_spec = 'orb'
chunks = search_spec.split('-')
if len(chunks) == 1:
chunks.append('bf')
elif len(chunks) != 2:
raise ValueError('Invalid search specification: %s' % search_spec)
detector_name, matcher_name = [x.lower() for x in chunks]
self._detector, norm = self._init_detector(detector_name)
self._matcher = self._init_matcher(matcher_name, norm)
self._targets = []
self.min_match_count = MIN_MATCH_COUNT_DEFAULT
def _init_detector(self, name):
norm = cv2.NORM_L2
if name == 'fast':
detector = cv2.FastFeatureDetector()
elif name == 'brisk':
detector = cv2.BRISK()
elif name == 'sift':
detector = cv2.SIFT()
elif name == 'surf':
detector = cv2.SURF(800)
elif name == 'orb':
detector = cv2.ORB(1200)
norm = cv2.NORM_HAMMING
else:
raise ValueError('Unsupported detector: %s' % name)
return detector, norm
def _init_matcher(self, name, norm):
if name == 'flann':
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params = dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
elif name in ['bf', 'brute_force']:
matcher = cv2.BFMatcher(norm)
else:
raise ValueError('Unsupported matcher: %s' % name)
return matcher
def add_target(self, image, data=None):
"""Add a new target to the serach list.
:param image: The image containing just the target.
:param data: Additional user data. Defaults to None.
:raises ValueError: If the image is insufficient.
"""
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
raise ValueError('Target image has insufficient keypoints')
y, x = image.shape[:2]
quad = np.float32([[0, 0], [x, 0], [x, y], [0, y]])
self._targets.append(Target(image, quad, keypoints, descriptors, data))
self._matcher.add([descriptors])
#print('Target has %d features' % len(keypoints))
def clear(self):
"""Remove all targets"""
self._targets = []
self._matcher.clear()
def draw_keypoints(self, image, color=None):
if color is None:
color = [0, 255, 0]
keypoints, _ = self._detector.detectAndCompute(image, None)
image = image.copy()
return cv2.drawKeypoints(image, keypoints, color=color)
def find(self, image, k=None, ratio=None):
"""Find the targets in the provided image.
:param image: The image to search for targets.
:param k: The number of knnMatches to use. None (Default) uses 2.
:param ratio: The distance ration to use. None (Default) uses 0.75.
:returns: A list containing :class:`TrackedTarget` instances for
each target found.
"""
if not self._targets:
return []
k = 2 if k is None else k
ratio = 0.75 if ratio is None else ratio
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
return []
matches = self._matcher.knnMatch(descriptors, k=int(k))
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]
if len(matches) < self.min_match_count:
return []
matches_by_id = [[] for _ in xrange(len(self._targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < self.min_match_count:
continue
target = self._targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [keypoints[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < self.min_match_count:
continue
p0, p1 = np.int32((p0, p1))
inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]
outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]
quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.inliers), reverse=True)
return tracked
def onDraw(self, image, tracked=None):
if tracked is None:
tracked = self.search(image)
for tr in tracked:
tr.draw(image, (255, 255, 255), 2)
return image
def find(image_to_find, image, search_spec=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
Additional arguments are passed to the search.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param search_spec: The search specification. A value of 'template' invokes
:func:`search_template`. All other values are defined in
:class:`Features`.
:returns: A :class:`TrackedTarget` instance if found or None if not found.
"""
if search_spec is not None and search_spec.startswith('template'):
return _find_using_template(image_to_find, image, **kwargs)
features = Features(search_spec)
features.add_target(image_to_find)
tracked = features.find(image, kwargs.get('k'), kwargs.get('ratio'))
if not len(tracked):
raise FindError(1.0, None)
return tracked[0]
def _find_using_template(image_to_find, image, threshold=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param threshold: The metric threshold. 1e-6 (Default) finds nearly
exact images undistorted by noise or error.
:returns: A :class:`TrackedTarget` instance if found.
:raises FindError: If the image_to_find could not be found in image.
"""
threshold = 1e-6 if threshold is None else threshold
result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)
idx = np.argmin(result)
metric = np.ravel(result)[idx]
x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]
if metric > threshold:
raise FindError(metric, (x0, y0))
x, y = image_to_find.shape[1::-1]
target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)
x1 = x0 + image_to_find.shape[1]
y1 = y0 + image_to_find.shape[0]
quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]] | H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])
return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad) | random_line_split | |
find.py | # Copyright 2014 Jetperch LLC - See LICENSE file.
"""
Find images using a variety of techniques.
This software is heavily based upon and copies some code from the following
opencv samples:
* https://github.com/Itseez/opencv/blob/master/samples/python2/find_obj.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/feature_homography.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/plane_tracker.py
"""
import cv2
import numpy as np
from collections import namedtuple
class FindError(Exception):
"""Error indicating that find failed."""
def __init__(self, metric, location, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.metric = metric
self.location = location
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
FLANN_PARAMS_NORM = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
FLANN_PARAMS_HAMMING = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6,
'key_size': 12,
'multi_probe_level': 1
}
MIN_MATCH_COUNT_DEFAULT = 10
Target = namedtuple('Target', 'image, quad, keypoints, descriptors, data')
"""The Target for image searches.
image - image to track
quad - Target boundary quad in the original image
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
"""
class TrackedTarget(object):
"""A Target that was found during an image search.
:var target: reference to :class:`Target`
:var image: The image used for the search.
:var inliers: The list of matching points given as [((x0, y0), (x1, y1)), ...]
:var outliers: The list of unmatched points.
:var H: homography matrix from (x0, y0) to (x1, y1)
:var quad: target bounary quad in input frame
"""
def __init__(self, target, image, inliers, outliers, H, quad):
self.target = target
self.image = image
self.inliers = inliers
self.outliers = outliers
self.H = H
self.quad = quad
def inTargetCoordinates(self, p):
p = np.float32(p)
H = np.linalg.inv(self.H)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), H).reshape(-1, 2)
return p
def inImageCoordinates(self, p):
p = np.float32(p)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), self.H).reshape(-1, 2)
return p
def draw(self, image, color, width, quad=None):
"""Draw the tracked target on the image.
:param image: The image used to draw the tracked target. If None,
then use the same image from the search.
:param color: The color tuple.
:param width: The line width in pixels.
:param quad: The quad to draw. When None (default), uses the quad from
this instance.
:returns: The image, which is also modified in place.
"""
if image is None:
image = self.image.copy()
if quad is None:
quad = self.quad
color = tuple(np.int32(color).tolist())
cv2.polylines(image, [np.int32(quad)], True, color, int(width))
return image
def visualize(self):
"""Visualize the search results.
:returns: The resulting visualization image.
"""
colors = {'outline': (220, 220, 220),
'inlier': (0, 255, 0),
'outlier': (0, 0, 255),
'lines': (128, 220, 128)}
# Create output image for visualization
gap = 5
h1, w1 = self.target.image.shape[:2]
h2, w2 = self.image.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)
vis[:h1, :w1, :] = self.target.image
w1 += gap
vis[:h2, w1:w1+w2, :] = self.image
# Draw the located object
quad = np.float32(self.quad) + np.float32([w1, 0])
self.draw(vis, colors['outline'], 2, quad)
# draw point details
inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]
outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]
if colors['outlier'] is not None: # draw x on each point
r = 2 # radius
thickness = 2
for x0, y0, x1, y1 in outliers:
cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)
if colors['lines'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)
if colors['inlier'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)
cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)
return vis
class Features(object):
"""Find an image using feature-based object recognition.
This class holds initialized search objects to accelerate subsequent
searchs which is especially useful for processing video streams.
"""
def __init__(self, search_spec=None):
"""Initialize the feature detection framework.
:param search_spec: The specification string which consists of
"[detector]-[matcher]". Valid detector values include
sift, surf (Default), and orb. Valid matcher values include flann and
bf (brute_force) (Default).
"""
if search_spec is None or not search_spec:
search_spec = 'orb'
chunks = search_spec.split('-')
if len(chunks) == 1:
chunks.append('bf')
elif len(chunks) != 2:
raise ValueError('Invalid search specification: %s' % search_spec)
detector_name, matcher_name = [x.lower() for x in chunks]
self._detector, norm = self._init_detector(detector_name)
self._matcher = self._init_matcher(matcher_name, norm)
self._targets = []
self.min_match_count = MIN_MATCH_COUNT_DEFAULT
def _init_detector(self, name):
norm = cv2.NORM_L2
if name == 'fast':
detector = cv2.FastFeatureDetector()
elif name == 'brisk':
detector = cv2.BRISK()
elif name == 'sift':
detector = cv2.SIFT()
elif name == 'surf':
detector = cv2.SURF(800)
elif name == 'orb':
detector = cv2.ORB(1200)
norm = cv2.NORM_HAMMING
else:
raise ValueError('Unsupported detector: %s' % name)
return detector, norm
def _init_matcher(self, name, norm):
if name == 'flann':
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params = dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
elif name in ['bf', 'brute_force']:
matcher = cv2.BFMatcher(norm)
else:
raise ValueError('Unsupported matcher: %s' % name)
return matcher
def add_target(self, image, data=None):
"""Add a new target to the serach list.
:param image: The image containing just the target.
:param data: Additional user data. Defaults to None.
:raises ValueError: If the image is insufficient.
"""
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
raise ValueError('Target image has insufficient keypoints')
y, x = image.shape[:2]
quad = np.float32([[0, 0], [x, 0], [x, y], [0, y]])
self._targets.append(Target(image, quad, keypoints, descriptors, data))
self._matcher.add([descriptors])
#print('Target has %d features' % len(keypoints))
def clear(self):
"""Remove all targets"""
self._targets = []
self._matcher.clear()
def draw_keypoints(self, image, color=None):
if color is None:
color = [0, 255, 0]
keypoints, _ = self._detector.detectAndCompute(image, None)
image = image.copy()
return cv2.drawKeypoints(image, keypoints, color=color)
def find(self, image, k=None, ratio=None):
"""Find the targets in the provided image.
:param image: The image to search for targets.
:param k: The number of knnMatches to use. None (Default) uses 2.
:param ratio: The distance ration to use. None (Default) uses 0.75.
:returns: A list containing :class:`TrackedTarget` instances for
each target found.
"""
if not self._targets:
return []
k = 2 if k is None else k
ratio = 0.75 if ratio is None else ratio
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
return []
matches = self._matcher.knnMatch(descriptors, k=int(k))
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]
if len(matches) < self.min_match_count:
return []
matches_by_id = [[] for _ in xrange(len(self._targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < self.min_match_count:
continue
target = self._targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [keypoints[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < self.min_match_count:
continue
p0, p1 = np.int32((p0, p1))
inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]
outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]
quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.inliers), reverse=True)
return tracked
def onDraw(self, image, tracked=None):
if tracked is None:
tracked = self.search(image)
for tr in tracked:
tr.draw(image, (255, 255, 255), 2)
return image
def find(image_to_find, image, search_spec=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
Additional arguments are passed to the search.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param search_spec: The search specification. A value of 'template' invokes
:func:`search_template`. All other values are defined in
:class:`Features`.
:returns: A :class:`TrackedTarget` instance if found or None if not found.
"""
if search_spec is not None and search_spec.startswith('template'):
|
features = Features(search_spec)
features.add_target(image_to_find)
tracked = features.find(image, kwargs.get('k'), kwargs.get('ratio'))
if not len(tracked):
raise FindError(1.0, None)
return tracked[0]
def _find_using_template(image_to_find, image, threshold=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param threshold: The metric threshold. 1e-6 (Default) finds nearly
exact images undistorted by noise or error.
:returns: A :class:`TrackedTarget` instance if found.
:raises FindError: If the image_to_find could not be found in image.
"""
threshold = 1e-6 if threshold is None else threshold
result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)
idx = np.argmin(result)
metric = np.ravel(result)[idx]
x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]
if metric > threshold:
raise FindError(metric, (x0, y0))
x, y = image_to_find.shape[1::-1]
target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)
x1 = x0 + image_to_find.shape[1]
y1 = y0 + image_to_find.shape[0]
quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])
return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad)
| return _find_using_template(image_to_find, image, **kwargs) | conditional_block |
find.py | # Copyright 2014 Jetperch LLC - See LICENSE file.
"""
Find images using a variety of techniques.
This software is heavily based upon and copies some code from the following
opencv samples:
* https://github.com/Itseez/opencv/blob/master/samples/python2/find_obj.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/feature_homography.py
* https://github.com/Itseez/opencv/blob/master/samples/python2/plane_tracker.py
"""
import cv2
import numpy as np
from collections import namedtuple
class FindError(Exception):
"""Error indicating that find failed."""
def __init__(self, metric, location, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.metric = metric
self.location = location
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
FLANN_PARAMS_NORM = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
FLANN_PARAMS_HAMMING = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6,
'key_size': 12,
'multi_probe_level': 1
}
MIN_MATCH_COUNT_DEFAULT = 10
Target = namedtuple('Target', 'image, quad, keypoints, descriptors, data')
"""The Target for image searches.
image - image to track
quad - Target boundary quad in the original image
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
"""
class TrackedTarget(object):
"""A Target that was found during an image search.
:var target: reference to :class:`Target`
:var image: The image used for the search.
:var inliers: The list of matching points given as [((x0, y0), (x1, y1)), ...]
:var outliers: The list of unmatched points.
:var H: homography matrix from (x0, y0) to (x1, y1)
:var quad: target bounary quad in input frame
"""
def __init__(self, target, image, inliers, outliers, H, quad):
self.target = target
self.image = image
self.inliers = inliers
self.outliers = outliers
self.H = H
self.quad = quad
def inTargetCoordinates(self, p):
p = np.float32(p)
H = np.linalg.inv(self.H)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), H).reshape(-1, 2)
return p
def inImageCoordinates(self, p):
p = np.float32(p)
p = cv2.perspectiveTransform(p.reshape(1, -1, 2), self.H).reshape(-1, 2)
return p
def draw(self, image, color, width, quad=None):
"""Draw the tracked target on the image.
:param image: The image used to draw the tracked target. If None,
then use the same image from the search.
:param color: The color tuple.
:param width: The line width in pixels.
:param quad: The quad to draw. When None (default), uses the quad from
this instance.
:returns: The image, which is also modified in place.
"""
if image is None:
image = self.image.copy()
if quad is None:
quad = self.quad
color = tuple(np.int32(color).tolist())
cv2.polylines(image, [np.int32(quad)], True, color, int(width))
return image
def visualize(self):
"""Visualize the search results.
:returns: The resulting visualization image.
"""
colors = {'outline': (220, 220, 220),
'inlier': (0, 255, 0),
'outlier': (0, 0, 255),
'lines': (128, 220, 128)}
# Create output image for visualization
gap = 5
h1, w1 = self.target.image.shape[:2]
h2, w2 = self.image.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)
vis[:h1, :w1, :] = self.target.image
w1 += gap
vis[:h2, w1:w1+w2, :] = self.image
# Draw the located object
quad = np.float32(self.quad) + np.float32([w1, 0])
self.draw(vis, colors['outline'], 2, quad)
# draw point details
inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]
outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]
if colors['outlier'] is not None: # draw x on each point
r = 2 # radius
thickness = 2
for x0, y0, x1, y1 in outliers:
cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)
cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)
if colors['lines'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)
if colors['inlier'] is not None:
for x0, y0, x1, y1 in inliers:
cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)
cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)
return vis
class Features(object):
"""Find an image using feature-based object recognition.
This class holds initialized search objects to accelerate subsequent
searchs which is especially useful for processing video streams.
"""
def __init__(self, search_spec=None):
"""Initialize the feature detection framework.
:param search_spec: The specification string which consists of
"[detector]-[matcher]". Valid detector values include
sift, surf (Default), and orb. Valid matcher values include flann and
bf (brute_force) (Default).
"""
if search_spec is None or not search_spec:
search_spec = 'orb'
chunks = search_spec.split('-')
if len(chunks) == 1:
chunks.append('bf')
elif len(chunks) != 2:
raise ValueError('Invalid search specification: %s' % search_spec)
detector_name, matcher_name = [x.lower() for x in chunks]
self._detector, norm = self._init_detector(detector_name)
self._matcher = self._init_matcher(matcher_name, norm)
self._targets = []
self.min_match_count = MIN_MATCH_COUNT_DEFAULT
def _init_detector(self, name):
norm = cv2.NORM_L2
if name == 'fast':
detector = cv2.FastFeatureDetector()
elif name == 'brisk':
detector = cv2.BRISK()
elif name == 'sift':
detector = cv2.SIFT()
elif name == 'surf':
detector = cv2.SURF(800)
elif name == 'orb':
detector = cv2.ORB(1200)
norm = cv2.NORM_HAMMING
else:
raise ValueError('Unsupported detector: %s' % name)
return detector, norm
def _init_matcher(self, name, norm):
if name == 'flann':
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params = dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
elif name in ['bf', 'brute_force']:
matcher = cv2.BFMatcher(norm)
else:
raise ValueError('Unsupported matcher: %s' % name)
return matcher
def add_target(self, image, data=None):
"""Add a new target to the serach list.
:param image: The image containing just the target.
:param data: Additional user data. Defaults to None.
:raises ValueError: If the image is insufficient.
"""
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
raise ValueError('Target image has insufficient keypoints')
y, x = image.shape[:2]
quad = np.float32([[0, 0], [x, 0], [x, y], [0, y]])
self._targets.append(Target(image, quad, keypoints, descriptors, data))
self._matcher.add([descriptors])
#print('Target has %d features' % len(keypoints))
def clear(self):
"""Remove all targets"""
self._targets = []
self._matcher.clear()
def draw_keypoints(self, image, color=None):
|
def find(self, image, k=None, ratio=None):
"""Find the targets in the provided image.
:param image: The image to search for targets.
:param k: The number of knnMatches to use. None (Default) uses 2.
:param ratio: The distance ration to use. None (Default) uses 0.75.
:returns: A list containing :class:`TrackedTarget` instances for
each target found.
"""
if not self._targets:
return []
k = 2 if k is None else k
ratio = 0.75 if ratio is None else ratio
keypoints, descriptors = self._detector.detectAndCompute(image, None)
if len(keypoints) < self.min_match_count:
return []
matches = self._matcher.knnMatch(descriptors, k=int(k))
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]
if len(matches) < self.min_match_count:
return []
matches_by_id = [[] for _ in xrange(len(self._targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < self.min_match_count:
continue
target = self._targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [keypoints[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < self.min_match_count:
continue
p0, p1 = np.int32((p0, p1))
inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]
outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]
quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.inliers), reverse=True)
return tracked
def onDraw(self, image, tracked=None):
if tracked is None:
tracked = self.search(image)
for tr in tracked:
tr.draw(image, (255, 255, 255), 2)
return image
def find(image_to_find, image, search_spec=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
Additional arguments are passed to the search.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param search_spec: The search specification. A value of 'template' invokes
:func:`search_template`. All other values are defined in
:class:`Features`.
:returns: A :class:`TrackedTarget` instance if found or None if not found.
"""
if search_spec is not None and search_spec.startswith('template'):
return _find_using_template(image_to_find, image, **kwargs)
features = Features(search_spec)
features.add_target(image_to_find)
tracked = features.find(image, kwargs.get('k'), kwargs.get('ratio'))
if not len(tracked):
raise FindError(1.0, None)
return tracked[0]
def _find_using_template(image_to_find, image, threshold=None, **kwargs):
"""Search an image to see if it contains the provided reference image.
:param image_to_find: The reference image to find in image.
:param image: The image to search.
:param threshold: The metric threshold. 1e-6 (Default) finds nearly
exact images undistorted by noise or error.
:returns: A :class:`TrackedTarget` instance if found.
:raises FindError: If the image_to_find could not be found in image.
"""
threshold = 1e-6 if threshold is None else threshold
result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)
idx = np.argmin(result)
metric = np.ravel(result)[idx]
x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]
if metric > threshold:
raise FindError(metric, (x0, y0))
x, y = image_to_find.shape[1::-1]
target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)
x1 = x0 + image_to_find.shape[1]
y1 = y0 + image_to_find.shape[0]
quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])
return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad)
| if color is None:
color = [0, 255, 0]
keypoints, _ = self._detector.detectAndCompute(image, None)
image = image.copy()
return cv2.drawKeypoints(image, keypoints, color=color) | identifier_body |
analyzeAnimatedFrames.py | #!/usr/bin/env python3
"""
analyzes performance of animation rendering, based on files
"""
# standard library modules
import argparse
import collections
import contextlib
from concurrent import futures
import datetime
import getpass
import json
import logging
import math
import os
import socket
import shutil
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import pandas as pd
import requests
# neocortix modules
try:
import ncs
import devicePerformance
except ImportError:
# set system and python paths for default places, since path seems to be not set properly
ncscliPath = os.path.expanduser('~/ncscli/ncscli')
sys.path.append( ncscliPath )
os.environ["PATH"] += os.pathsep + ncscliPath
import ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def demuxResults( inFilePath ):
'''deinterleave jlog items into separate lists for each instance'''
byInstance = {}
badOnes = set()
topLevelKeys = collections.Counter()
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
for key in decoded:
topLevelKeys[ key ] += 1
iid = decoded.get( 'instanceId', '<unknown>')
have = byInstance.get( iid, [] )
have.append( decoded )
byInstance[iid] = have
if 'returncode' in decoded:
rc = decoded['returncode']
if rc:
logger.info( 'returncode %d for %s', rc, iid )
badOnes.add( iid )
if 'exception' in decoded:
logger.info( 'exception %s for %s', decoded['exception'], iid )
badOnes.add( iid )
if 'timeout' in decoded:
#logger.info( 'timeout %s for %s', decoded['timeout'], iid )
badOnes.add( iid )
return byInstance, badOnes
def readJLog( inFilePath ):
'''read JLog file, return list of decoded objects'''
recs = []
topLevelKeys = collections.Counter() # for debugging
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
if isinstance( decoded, dict ):
for key in decoded:
|
recs.append( decoded )
logger.info( 'topLevelKeys: %s', topLevelKeys )
return recs
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def instanceDpr( inst ):
#logger.info( 'NCSC Inst details %s', inst )
# cpuarch: string like "aarch64" or "armv7l"
# cpunumcores: int
# cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz
# cpufamily: list of strings of length cpunumcores
cpuarch = inst['cpu']['arch']
cpunumcores = len( inst['cpu']['cores'])
cpuspeeds = []
cpufamily = []
for core in inst['cpu']['cores']:
cpuspeeds.append( core['freq'] / 1e9)
cpufamily.append( core['family'] )
dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily )
#print( 'device', inst['device-id'], 'dpr', dpr )
if dpr < 37:
logger.info( 'unhappy dpr for dev %d with cpu %s', inst['device-id'], inst['cpu'] )
return dpr
g_instanceBads = collections.Counter()
g_badBadCount = 0
def checkFrame( frameNum ):
global g_badBadCount
info = { 'frameNum': frameNum, 'state': 'unstarted' }
frameDirPath = os.path.join( dataDirPath, 'frame_%06d' % frameNum )
frameFileName = frameFilePattern.replace( '######', '%06d' % frameNum )
launchedJsonFilePath = os.path.join(frameDirPath, 'data', 'launched.json' )
installerFilePath = os.path.join(frameDirPath, 'data', 'runDistributedBlender.py.jlog' )
frameFilePath = os.path.join(dataDirPath, frameFileName )
if not os.path.isdir( frameDirPath ):
logger.error( '%06d, %s not found', frameNum, frameDirPath )
#info['state'] = 'noData'
return info
if os.path.isfile( launchedJsonFilePath ):
info['state'] = 'launched'
launchedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( launchedJsonFilePath ) )
info[ 'launchedDateTime' ] = launchedDateTime
#logger.info( '%06d, launched %s', frameNum, launchedDateTime.strftime( '%Y-%m-%d_%H%M%S' ) )
# get instances from the launched json file
launchedInstances = []
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
instanceMap = {}
for inst in launchedInstances:
instanceMap[ inst['instanceId'] ] = inst
deviceIds = [inst['device-id'] for inst in launchedInstances]
info['deviceIds'] = deviceIds[0] if len(deviceIds)==1 else deviceIds #scalarize if only one
dpr = [instanceDpr( inst ) for inst in launchedInstances ]
info['dpr'] = dpr[0] if len(dpr)==1 else dpr #scalarize if only one
if os.path.isfile( installerFilePath ):
info['state'] = 'installing'
installedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( installerFilePath ) )
info[ 'installedDateTime' ] = installedDateTime
(byInstance, badOnes) = demuxResults( installerFilePath )
if badOnes:
if len(badOnes) >= 3:
logger.error( 'BAD BAD BAD' )
g_badBadCount += 1
logger.warning( '%d badOnes for frame %d', len(badOnes), frameNum )
for badIid in badOnes:
g_instanceBads[ badIid ] += 1
iidSet = set( byInstance.keys() )
iidSet.discard( '<master>' )
info['iids'] = list( iidSet )
for iid, events in byInstance.items():
for event in events:
if 'timeout' in event:
logger.warning( 'TIMEOUT for instance %s', event['instanceId'])
if os.path.isfile( frameFilePath ):
info['state'] = 'finished'
info[ 'finishedDateTime' ] = datetime.datetime.fromtimestamp( os.path.getmtime( frameFilePath ) )
info['durSeconds'] = (info[ 'finishedDateTime' ] - info[ 'installedDateTime' ]).total_seconds()
else:
#info['state'] = 'unfinished'
logger.warning( '%06d, %s not found', frameNum, frameFilePath )
return info
return info
if __name__ == "__main__":
# configure logger formatting
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
ncs.logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
#tellInstances.logger.setLevel(logging.INFO)
logger.debug('the logger is configured')
ap = argparse.ArgumentParser( description=__doc__,
fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
#ap.add_argument( 'blendFilePath', help='the .blend file to render' )
ap.add_argument( '--dataDir', help='where to read and write data', default='aniData' )
ap.add_argument( '--filter', help='json to filter instances for launch' )
ap.add_argument( '--instTimeLimit', type=int, default=900, help='amount of time (in seconds) installer is allowed to take on instances' )
ap.add_argument( '--jobId', help='to identify this job' )
ap.add_argument( '--launch', type=boolArg, default=True, help='to launch and terminate instances' )
ap.add_argument( '--nWorkers', type=int, default=1, help='the # of worker instances to launch (or zero for all available)' )
ap.add_argument( '--sshAgent', type=boolArg, default=False, help='whether or not to use ssh agent' )
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--timeLimit', type=int, help='time limit (in seconds) for the whole job',
default=24*60*60 )
ap.add_argument( '--useCompositor', type=boolArg, default=True, help='whether or not to use blender compositor' )
# dtr-specific args
ap.add_argument( '--width', type=int, help='the width (in pixels) of the output',
default=960 )
ap.add_argument( '--height', type=int, help='the height (in pixels) of the output',
default=540 )
ap.add_argument( '--blocks_user', type=int, help='the number of blocks to partition the image (or zero for "auto"',
default=0 )
ap.add_argument( '--fileType', choices=['PNG', 'OPEN_EXR'], help='the type of output file',
default='PNG' )
ap.add_argument( '--startFrame', type=int, help='the first frame number to render',
default=1 )
ap.add_argument( '--endFrame', type=int, help='the last frame number to render',
default=1 )
ap.add_argument( '--frameStep', type=int, help='the frame number increment',
default=1 )
ap.add_argument( '--seed', type=int, help='the blender cycles noise seed',
default=0 )
args = ap.parse_args()
#logger.debug('args: %s', args)
startTime = time.time()
extensions = {'PNG': 'png', 'OPEN_EXR': 'exr'}
frameFilePattern = 'rendered_frame_######_seed_%d.%s'%(args.seed,extensions[args.fileType])
dataDirPath = args.dataDir # './aniData'
frameInfos = []
for frameNum in range( args.startFrame, args.endFrame+1, args.frameStep ):
frameInfo = checkFrame( frameNum )
#if frameInfo['state'] != 'finished':
# logger.info( '%s', frameInfo )
frameInfos.append( frameInfo )
framesTable = pd.DataFrame( frameInfos )
framesTable.to_csv( dataDirPath+'/frameSummaries.csv', index=False)
# analyze the "outer" jlog file
animatorFilePath = os.path.join(dataDirPath, 'animateWholeFrames_results.jlog' )
events = readJLog( animatorFilePath )
recTable = pd.DataFrame( events )
recTable['dateTime'] = pd.to_datetime( recTable.dateTime )
#print( recTable.info() )
recTable.to_csv( dataDirPath+'/parsedLog.csv', index=False)
nTerminations = 0
nTerminated = 0
for event in events:
if 'renderFrame would terminate bad instances' in event:
nTerminations += 1
nTerminated += len( event['renderFrame would terminate bad instances'] )
print( nTerminated, 'instances terminated in', nTerminations, 'terminations' )
print( 'g_instanceBads: count', len(g_instanceBads), 'max', g_instanceBads.most_common(1) )
print( 'g_badBadCount', g_badBadCount )
if not len(framesTable):
sys.exit( 'no frames in framesTable')
if not 'finishedDateTime' in framesTable:
sys.exit( 'no frames finished')
#logger.info( '%s', framesTable.info() )
framesTable['launchedDateTime'] = pd.to_datetime( framesTable.launchedDateTime, utc=True )
framesTable['finishedDateTime'] = pd.to_datetime( framesTable.finishedDateTime, utc=True )
finished = framesTable[ ~pd.isnull(framesTable.finishedDateTime) ]
justStarted = framesTable[ pd.isnull(framesTable.finishedDateTime) & ~pd.isnull(framesTable.launchedDateTime) ]
print( len(finished), 'frames finished' )
print( len(justStarted), 'frames launched but not finished' )
print( 'earliest event', recTable.dateTime.min() )
print( 'earliest start', finished.launchedDateTime.min() )
print( 'latest finish', finished.finishedDateTime.max() )
overallDur = finished.finishedDateTime.max() - recTable.dateTime.min()
print( 'overall duration %.2f minutes (%.2f hours)' % (overallDur.total_seconds() / 60, overallDur.total_seconds() / 3600) )
timePerFrame = (finished.finishedDateTime.max() - finished.launchedDateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
timePerFrame = (finished.finishedDateTime.max() - recTable.dateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'real time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
#elapsed = time.time() - startTime
#logger.info( 'finished; elapsed time %.1f seconds (%.1f minutes)', elapsed, elapsed/60 )
| topLevelKeys[ key ] += 1 | conditional_block |
analyzeAnimatedFrames.py | #!/usr/bin/env python3
"""
analyzes performance of animation rendering, based on files
"""
# standard library modules
import argparse
import collections
import contextlib
from concurrent import futures
import datetime
import getpass
import json
import logging
import math
import os
import socket
import shutil
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import pandas as pd
import requests
# neocortix modules
try:
import ncs
import devicePerformance
except ImportError:
# set system and python paths for default places, since path seems to be not set properly
ncscliPath = os.path.expanduser('~/ncscli/ncscli')
sys.path.append( ncscliPath )
os.environ["PATH"] += os.pathsep + ncscliPath
import ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def demuxResults( inFilePath ):
'''deinterleave jlog items into separate lists for each instance'''
byInstance = {}
badOnes = set()
topLevelKeys = collections.Counter()
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
for key in decoded:
topLevelKeys[ key ] += 1
iid = decoded.get( 'instanceId', '<unknown>')
have = byInstance.get( iid, [] )
have.append( decoded )
byInstance[iid] = have
if 'returncode' in decoded:
rc = decoded['returncode']
if rc:
logger.info( 'returncode %d for %s', rc, iid )
badOnes.add( iid )
if 'exception' in decoded:
logger.info( 'exception %s for %s', decoded['exception'], iid )
badOnes.add( iid )
if 'timeout' in decoded:
#logger.info( 'timeout %s for %s', decoded['timeout'], iid )
badOnes.add( iid )
return byInstance, badOnes
def readJLog( inFilePath ):
'''read JLog file, return list of decoded objects'''
recs = []
topLevelKeys = collections.Counter() # for debugging
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
if isinstance( decoded, dict ):
for key in decoded:
topLevelKeys[ key ] += 1
recs.append( decoded )
logger.info( 'topLevelKeys: %s', topLevelKeys )
return recs
def | ():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def instanceDpr( inst ):
#logger.info( 'NCSC Inst details %s', inst )
# cpuarch: string like "aarch64" or "armv7l"
# cpunumcores: int
# cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz
# cpufamily: list of strings of length cpunumcores
cpuarch = inst['cpu']['arch']
cpunumcores = len( inst['cpu']['cores'])
cpuspeeds = []
cpufamily = []
for core in inst['cpu']['cores']:
cpuspeeds.append( core['freq'] / 1e9)
cpufamily.append( core['family'] )
dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily )
#print( 'device', inst['device-id'], 'dpr', dpr )
if dpr < 37:
logger.info( 'unhappy dpr for dev %d with cpu %s', inst['device-id'], inst['cpu'] )
return dpr
g_instanceBads = collections.Counter()
g_badBadCount = 0
def checkFrame( frameNum ):
global g_badBadCount
info = { 'frameNum': frameNum, 'state': 'unstarted' }
frameDirPath = os.path.join( dataDirPath, 'frame_%06d' % frameNum )
frameFileName = frameFilePattern.replace( '######', '%06d' % frameNum )
launchedJsonFilePath = os.path.join(frameDirPath, 'data', 'launched.json' )
installerFilePath = os.path.join(frameDirPath, 'data', 'runDistributedBlender.py.jlog' )
frameFilePath = os.path.join(dataDirPath, frameFileName )
if not os.path.isdir( frameDirPath ):
logger.error( '%06d, %s not found', frameNum, frameDirPath )
#info['state'] = 'noData'
return info
if os.path.isfile( launchedJsonFilePath ):
info['state'] = 'launched'
launchedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( launchedJsonFilePath ) )
info[ 'launchedDateTime' ] = launchedDateTime
#logger.info( '%06d, launched %s', frameNum, launchedDateTime.strftime( '%Y-%m-%d_%H%M%S' ) )
# get instances from the launched json file
launchedInstances = []
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
instanceMap = {}
for inst in launchedInstances:
instanceMap[ inst['instanceId'] ] = inst
deviceIds = [inst['device-id'] for inst in launchedInstances]
info['deviceIds'] = deviceIds[0] if len(deviceIds)==1 else deviceIds #scalarize if only one
dpr = [instanceDpr( inst ) for inst in launchedInstances ]
info['dpr'] = dpr[0] if len(dpr)==1 else dpr #scalarize if only one
if os.path.isfile( installerFilePath ):
info['state'] = 'installing'
installedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( installerFilePath ) )
info[ 'installedDateTime' ] = installedDateTime
(byInstance, badOnes) = demuxResults( installerFilePath )
if badOnes:
if len(badOnes) >= 3:
logger.error( 'BAD BAD BAD' )
g_badBadCount += 1
logger.warning( '%d badOnes for frame %d', len(badOnes), frameNum )
for badIid in badOnes:
g_instanceBads[ badIid ] += 1
iidSet = set( byInstance.keys() )
iidSet.discard( '<master>' )
info['iids'] = list( iidSet )
for iid, events in byInstance.items():
for event in events:
if 'timeout' in event:
logger.warning( 'TIMEOUT for instance %s', event['instanceId'])
if os.path.isfile( frameFilePath ):
info['state'] = 'finished'
info[ 'finishedDateTime' ] = datetime.datetime.fromtimestamp( os.path.getmtime( frameFilePath ) )
info['durSeconds'] = (info[ 'finishedDateTime' ] - info[ 'installedDateTime' ]).total_seconds()
else:
#info['state'] = 'unfinished'
logger.warning( '%06d, %s not found', frameNum, frameFilePath )
return info
return info
if __name__ == "__main__":
# configure logger formatting
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
ncs.logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
#tellInstances.logger.setLevel(logging.INFO)
logger.debug('the logger is configured')
ap = argparse.ArgumentParser( description=__doc__,
fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
#ap.add_argument( 'blendFilePath', help='the .blend file to render' )
ap.add_argument( '--dataDir', help='where to read and write data', default='aniData' )
ap.add_argument( '--filter', help='json to filter instances for launch' )
ap.add_argument( '--instTimeLimit', type=int, default=900, help='amount of time (in seconds) installer is allowed to take on instances' )
ap.add_argument( '--jobId', help='to identify this job' )
ap.add_argument( '--launch', type=boolArg, default=True, help='to launch and terminate instances' )
ap.add_argument( '--nWorkers', type=int, default=1, help='the # of worker instances to launch (or zero for all available)' )
ap.add_argument( '--sshAgent', type=boolArg, default=False, help='whether or not to use ssh agent' )
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--timeLimit', type=int, help='time limit (in seconds) for the whole job',
default=24*60*60 )
ap.add_argument( '--useCompositor', type=boolArg, default=True, help='whether or not to use blender compositor' )
# dtr-specific args
ap.add_argument( '--width', type=int, help='the width (in pixels) of the output',
default=960 )
ap.add_argument( '--height', type=int, help='the height (in pixels) of the output',
default=540 )
ap.add_argument( '--blocks_user', type=int, help='the number of blocks to partition the image (or zero for "auto"',
default=0 )
ap.add_argument( '--fileType', choices=['PNG', 'OPEN_EXR'], help='the type of output file',
default='PNG' )
ap.add_argument( '--startFrame', type=int, help='the first frame number to render',
default=1 )
ap.add_argument( '--endFrame', type=int, help='the last frame number to render',
default=1 )
ap.add_argument( '--frameStep', type=int, help='the frame number increment',
default=1 )
ap.add_argument( '--seed', type=int, help='the blender cycles noise seed',
default=0 )
args = ap.parse_args()
#logger.debug('args: %s', args)
startTime = time.time()
extensions = {'PNG': 'png', 'OPEN_EXR': 'exr'}
frameFilePattern = 'rendered_frame_######_seed_%d.%s'%(args.seed,extensions[args.fileType])
dataDirPath = args.dataDir # './aniData'
frameInfos = []
for frameNum in range( args.startFrame, args.endFrame+1, args.frameStep ):
frameInfo = checkFrame( frameNum )
#if frameInfo['state'] != 'finished':
# logger.info( '%s', frameInfo )
frameInfos.append( frameInfo )
framesTable = pd.DataFrame( frameInfos )
framesTable.to_csv( dataDirPath+'/frameSummaries.csv', index=False)
# analyze the "outer" jlog file
animatorFilePath = os.path.join(dataDirPath, 'animateWholeFrames_results.jlog' )
events = readJLog( animatorFilePath )
recTable = pd.DataFrame( events )
recTable['dateTime'] = pd.to_datetime( recTable.dateTime )
#print( recTable.info() )
recTable.to_csv( dataDirPath+'/parsedLog.csv', index=False)
nTerminations = 0
nTerminated = 0
for event in events:
if 'renderFrame would terminate bad instances' in event:
nTerminations += 1
nTerminated += len( event['renderFrame would terminate bad instances'] )
print( nTerminated, 'instances terminated in', nTerminations, 'terminations' )
print( 'g_instanceBads: count', len(g_instanceBads), 'max', g_instanceBads.most_common(1) )
print( 'g_badBadCount', g_badBadCount )
if not len(framesTable):
sys.exit( 'no frames in framesTable')
if not 'finishedDateTime' in framesTable:
sys.exit( 'no frames finished')
#logger.info( '%s', framesTable.info() )
framesTable['launchedDateTime'] = pd.to_datetime( framesTable.launchedDateTime, utc=True )
framesTable['finishedDateTime'] = pd.to_datetime( framesTable.finishedDateTime, utc=True )
finished = framesTable[ ~pd.isnull(framesTable.finishedDateTime) ]
justStarted = framesTable[ pd.isnull(framesTable.finishedDateTime) & ~pd.isnull(framesTable.launchedDateTime) ]
print( len(finished), 'frames finished' )
print( len(justStarted), 'frames launched but not finished' )
print( 'earliest event', recTable.dateTime.min() )
print( 'earliest start', finished.launchedDateTime.min() )
print( 'latest finish', finished.finishedDateTime.max() )
overallDur = finished.finishedDateTime.max() - recTable.dateTime.min()
print( 'overall duration %.2f minutes (%.2f hours)' % (overallDur.total_seconds() / 60, overallDur.total_seconds() / 3600) )
timePerFrame = (finished.finishedDateTime.max() - finished.launchedDateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
timePerFrame = (finished.finishedDateTime.max() - recTable.dateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'real time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
#elapsed = time.time() - startTime
#logger.info( 'finished; elapsed time %.1f seconds (%.1f minutes)', elapsed, elapsed/60 )
| scriptDirPath | identifier_name |
analyzeAnimatedFrames.py | #!/usr/bin/env python3
"""
analyzes performance of animation rendering, based on files
"""
# standard library modules
import argparse
import collections
import contextlib
from concurrent import futures
import datetime
import getpass
import json
import logging
import math
import os
import socket
import shutil
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import pandas as pd
import requests
# neocortix modules
try:
import ncs
import devicePerformance
except ImportError:
# set system and python paths for default places, since path seems to be not set properly
ncscliPath = os.path.expanduser('~/ncscli/ncscli')
sys.path.append( ncscliPath )
os.environ["PATH"] += os.pathsep + ncscliPath
import ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def demuxResults( inFilePath ):
'''deinterleave jlog items into separate lists for each instance'''
byInstance = {}
badOnes = set()
topLevelKeys = collections.Counter()
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
for key in decoded:
topLevelKeys[ key ] += 1
iid = decoded.get( 'instanceId', '<unknown>')
have = byInstance.get( iid, [] )
have.append( decoded )
byInstance[iid] = have
if 'returncode' in decoded:
rc = decoded['returncode']
if rc:
logger.info( 'returncode %d for %s', rc, iid ) | if 'exception' in decoded:
logger.info( 'exception %s for %s', decoded['exception'], iid )
badOnes.add( iid )
if 'timeout' in decoded:
#logger.info( 'timeout %s for %s', decoded['timeout'], iid )
badOnes.add( iid )
return byInstance, badOnes
def readJLog( inFilePath ):
'''read JLog file, return list of decoded objects'''
recs = []
topLevelKeys = collections.Counter() # for debugging
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
if isinstance( decoded, dict ):
for key in decoded:
topLevelKeys[ key ] += 1
recs.append( decoded )
logger.info( 'topLevelKeys: %s', topLevelKeys )
return recs
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def instanceDpr( inst ):
#logger.info( 'NCSC Inst details %s', inst )
# cpuarch: string like "aarch64" or "armv7l"
# cpunumcores: int
# cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz
# cpufamily: list of strings of length cpunumcores
cpuarch = inst['cpu']['arch']
cpunumcores = len( inst['cpu']['cores'])
cpuspeeds = []
cpufamily = []
for core in inst['cpu']['cores']:
cpuspeeds.append( core['freq'] / 1e9)
cpufamily.append( core['family'] )
dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily )
#print( 'device', inst['device-id'], 'dpr', dpr )
if dpr < 37:
logger.info( 'unhappy dpr for dev %d with cpu %s', inst['device-id'], inst['cpu'] )
return dpr
g_instanceBads = collections.Counter()
g_badBadCount = 0
def checkFrame( frameNum ):
global g_badBadCount
info = { 'frameNum': frameNum, 'state': 'unstarted' }
frameDirPath = os.path.join( dataDirPath, 'frame_%06d' % frameNum )
frameFileName = frameFilePattern.replace( '######', '%06d' % frameNum )
launchedJsonFilePath = os.path.join(frameDirPath, 'data', 'launched.json' )
installerFilePath = os.path.join(frameDirPath, 'data', 'runDistributedBlender.py.jlog' )
frameFilePath = os.path.join(dataDirPath, frameFileName )
if not os.path.isdir( frameDirPath ):
logger.error( '%06d, %s not found', frameNum, frameDirPath )
#info['state'] = 'noData'
return info
if os.path.isfile( launchedJsonFilePath ):
info['state'] = 'launched'
launchedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( launchedJsonFilePath ) )
info[ 'launchedDateTime' ] = launchedDateTime
#logger.info( '%06d, launched %s', frameNum, launchedDateTime.strftime( '%Y-%m-%d_%H%M%S' ) )
# get instances from the launched json file
launchedInstances = []
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
instanceMap = {}
for inst in launchedInstances:
instanceMap[ inst['instanceId'] ] = inst
deviceIds = [inst['device-id'] for inst in launchedInstances]
info['deviceIds'] = deviceIds[0] if len(deviceIds)==1 else deviceIds #scalarize if only one
dpr = [instanceDpr( inst ) for inst in launchedInstances ]
info['dpr'] = dpr[0] if len(dpr)==1 else dpr #scalarize if only one
if os.path.isfile( installerFilePath ):
info['state'] = 'installing'
installedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( installerFilePath ) )
info[ 'installedDateTime' ] = installedDateTime
(byInstance, badOnes) = demuxResults( installerFilePath )
if badOnes:
if len(badOnes) >= 3:
logger.error( 'BAD BAD BAD' )
g_badBadCount += 1
logger.warning( '%d badOnes for frame %d', len(badOnes), frameNum )
for badIid in badOnes:
g_instanceBads[ badIid ] += 1
iidSet = set( byInstance.keys() )
iidSet.discard( '<master>' )
info['iids'] = list( iidSet )
for iid, events in byInstance.items():
for event in events:
if 'timeout' in event:
logger.warning( 'TIMEOUT for instance %s', event['instanceId'])
if os.path.isfile( frameFilePath ):
info['state'] = 'finished'
info[ 'finishedDateTime' ] = datetime.datetime.fromtimestamp( os.path.getmtime( frameFilePath ) )
info['durSeconds'] = (info[ 'finishedDateTime' ] - info[ 'installedDateTime' ]).total_seconds()
else:
#info['state'] = 'unfinished'
logger.warning( '%06d, %s not found', frameNum, frameFilePath )
return info
return info
if __name__ == "__main__":
# configure logger formatting
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
ncs.logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
#tellInstances.logger.setLevel(logging.INFO)
logger.debug('the logger is configured')
ap = argparse.ArgumentParser( description=__doc__,
fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
#ap.add_argument( 'blendFilePath', help='the .blend file to render' )
ap.add_argument( '--dataDir', help='where to read and write data', default='aniData' )
ap.add_argument( '--filter', help='json to filter instances for launch' )
ap.add_argument( '--instTimeLimit', type=int, default=900, help='amount of time (in seconds) installer is allowed to take on instances' )
ap.add_argument( '--jobId', help='to identify this job' )
ap.add_argument( '--launch', type=boolArg, default=True, help='to launch and terminate instances' )
ap.add_argument( '--nWorkers', type=int, default=1, help='the # of worker instances to launch (or zero for all available)' )
ap.add_argument( '--sshAgent', type=boolArg, default=False, help='whether or not to use ssh agent' )
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--timeLimit', type=int, help='time limit (in seconds) for the whole job',
default=24*60*60 )
ap.add_argument( '--useCompositor', type=boolArg, default=True, help='whether or not to use blender compositor' )
# dtr-specific args
ap.add_argument( '--width', type=int, help='the width (in pixels) of the output',
default=960 )
ap.add_argument( '--height', type=int, help='the height (in pixels) of the output',
default=540 )
ap.add_argument( '--blocks_user', type=int, help='the number of blocks to partition the image (or zero for "auto"',
default=0 )
ap.add_argument( '--fileType', choices=['PNG', 'OPEN_EXR'], help='the type of output file',
default='PNG' )
ap.add_argument( '--startFrame', type=int, help='the first frame number to render',
default=1 )
ap.add_argument( '--endFrame', type=int, help='the last frame number to render',
default=1 )
ap.add_argument( '--frameStep', type=int, help='the frame number increment',
default=1 )
ap.add_argument( '--seed', type=int, help='the blender cycles noise seed',
default=0 )
args = ap.parse_args()
#logger.debug('args: %s', args)
startTime = time.time()
extensions = {'PNG': 'png', 'OPEN_EXR': 'exr'}
frameFilePattern = 'rendered_frame_######_seed_%d.%s'%(args.seed,extensions[args.fileType])
dataDirPath = args.dataDir # './aniData'
frameInfos = []
for frameNum in range( args.startFrame, args.endFrame+1, args.frameStep ):
frameInfo = checkFrame( frameNum )
#if frameInfo['state'] != 'finished':
# logger.info( '%s', frameInfo )
frameInfos.append( frameInfo )
framesTable = pd.DataFrame( frameInfos )
framesTable.to_csv( dataDirPath+'/frameSummaries.csv', index=False)
# analyze the "outer" jlog file
animatorFilePath = os.path.join(dataDirPath, 'animateWholeFrames_results.jlog' )
events = readJLog( animatorFilePath )
recTable = pd.DataFrame( events )
recTable['dateTime'] = pd.to_datetime( recTable.dateTime )
#print( recTable.info() )
recTable.to_csv( dataDirPath+'/parsedLog.csv', index=False)
nTerminations = 0
nTerminated = 0
for event in events:
if 'renderFrame would terminate bad instances' in event:
nTerminations += 1
nTerminated += len( event['renderFrame would terminate bad instances'] )
print( nTerminated, 'instances terminated in', nTerminations, 'terminations' )
print( 'g_instanceBads: count', len(g_instanceBads), 'max', g_instanceBads.most_common(1) )
print( 'g_badBadCount', g_badBadCount )
if not len(framesTable):
sys.exit( 'no frames in framesTable')
if not 'finishedDateTime' in framesTable:
sys.exit( 'no frames finished')
#logger.info( '%s', framesTable.info() )
framesTable['launchedDateTime'] = pd.to_datetime( framesTable.launchedDateTime, utc=True )
framesTable['finishedDateTime'] = pd.to_datetime( framesTable.finishedDateTime, utc=True )
finished = framesTable[ ~pd.isnull(framesTable.finishedDateTime) ]
justStarted = framesTable[ pd.isnull(framesTable.finishedDateTime) & ~pd.isnull(framesTable.launchedDateTime) ]
print( len(finished), 'frames finished' )
print( len(justStarted), 'frames launched but not finished' )
print( 'earliest event', recTable.dateTime.min() )
print( 'earliest start', finished.launchedDateTime.min() )
print( 'latest finish', finished.finishedDateTime.max() )
overallDur = finished.finishedDateTime.max() - recTable.dateTime.min()
print( 'overall duration %.2f minutes (%.2f hours)' % (overallDur.total_seconds() / 60, overallDur.total_seconds() / 3600) )
timePerFrame = (finished.finishedDateTime.max() - finished.launchedDateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
timePerFrame = (finished.finishedDateTime.max() - recTable.dateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'real time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
#elapsed = time.time() - startTime
#logger.info( 'finished; elapsed time %.1f seconds (%.1f minutes)', elapsed, elapsed/60 ) | badOnes.add( iid ) | random_line_split |
analyzeAnimatedFrames.py | #!/usr/bin/env python3
"""
analyzes performance of animation rendering, based on files
"""
# standard library modules
import argparse
import collections
import contextlib
from concurrent import futures
import datetime
import getpass
import json
import logging
import math
import os
import socket
import shutil
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import pandas as pd
import requests
# neocortix modules
try:
import ncs
import devicePerformance
except ImportError:
# set system and python paths for default places, since path seems to be not set properly
ncscliPath = os.path.expanduser('~/ncscli/ncscli')
sys.path.append( ncscliPath )
os.environ["PATH"] += os.pathsep + ncscliPath
import ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def demuxResults( inFilePath ):
'''deinterleave jlog items into separate lists for each instance'''
byInstance = {}
badOnes = set()
topLevelKeys = collections.Counter()
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
for key in decoded:
topLevelKeys[ key ] += 1
iid = decoded.get( 'instanceId', '<unknown>')
have = byInstance.get( iid, [] )
have.append( decoded )
byInstance[iid] = have
if 'returncode' in decoded:
rc = decoded['returncode']
if rc:
logger.info( 'returncode %d for %s', rc, iid )
badOnes.add( iid )
if 'exception' in decoded:
logger.info( 'exception %s for %s', decoded['exception'], iid )
badOnes.add( iid )
if 'timeout' in decoded:
#logger.info( 'timeout %s for %s', decoded['timeout'], iid )
badOnes.add( iid )
return byInstance, badOnes
def readJLog( inFilePath ):
|
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def instanceDpr( inst ):
#logger.info( 'NCSC Inst details %s', inst )
# cpuarch: string like "aarch64" or "armv7l"
# cpunumcores: int
# cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz
# cpufamily: list of strings of length cpunumcores
cpuarch = inst['cpu']['arch']
cpunumcores = len( inst['cpu']['cores'])
cpuspeeds = []
cpufamily = []
for core in inst['cpu']['cores']:
cpuspeeds.append( core['freq'] / 1e9)
cpufamily.append( core['family'] )
dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily )
#print( 'device', inst['device-id'], 'dpr', dpr )
if dpr < 37:
logger.info( 'unhappy dpr for dev %d with cpu %s', inst['device-id'], inst['cpu'] )
return dpr
g_instanceBads = collections.Counter()
g_badBadCount = 0
def checkFrame( frameNum ):
global g_badBadCount
info = { 'frameNum': frameNum, 'state': 'unstarted' }
frameDirPath = os.path.join( dataDirPath, 'frame_%06d' % frameNum )
frameFileName = frameFilePattern.replace( '######', '%06d' % frameNum )
launchedJsonFilePath = os.path.join(frameDirPath, 'data', 'launched.json' )
installerFilePath = os.path.join(frameDirPath, 'data', 'runDistributedBlender.py.jlog' )
frameFilePath = os.path.join(dataDirPath, frameFileName )
if not os.path.isdir( frameDirPath ):
logger.error( '%06d, %s not found', frameNum, frameDirPath )
#info['state'] = 'noData'
return info
if os.path.isfile( launchedJsonFilePath ):
info['state'] = 'launched'
launchedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( launchedJsonFilePath ) )
info[ 'launchedDateTime' ] = launchedDateTime
#logger.info( '%06d, launched %s', frameNum, launchedDateTime.strftime( '%Y-%m-%d_%H%M%S' ) )
# get instances from the launched json file
launchedInstances = []
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
instanceMap = {}
for inst in launchedInstances:
instanceMap[ inst['instanceId'] ] = inst
deviceIds = [inst['device-id'] for inst in launchedInstances]
info['deviceIds'] = deviceIds[0] if len(deviceIds)==1 else deviceIds #scalarize if only one
dpr = [instanceDpr( inst ) for inst in launchedInstances ]
info['dpr'] = dpr[0] if len(dpr)==1 else dpr #scalarize if only one
if os.path.isfile( installerFilePath ):
info['state'] = 'installing'
installedDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( installerFilePath ) )
info[ 'installedDateTime' ] = installedDateTime
(byInstance, badOnes) = demuxResults( installerFilePath )
if badOnes:
if len(badOnes) >= 3:
logger.error( 'BAD BAD BAD' )
g_badBadCount += 1
logger.warning( '%d badOnes for frame %d', len(badOnes), frameNum )
for badIid in badOnes:
g_instanceBads[ badIid ] += 1
iidSet = set( byInstance.keys() )
iidSet.discard( '<master>' )
info['iids'] = list( iidSet )
for iid, events in byInstance.items():
for event in events:
if 'timeout' in event:
logger.warning( 'TIMEOUT for instance %s', event['instanceId'])
if os.path.isfile( frameFilePath ):
info['state'] = 'finished'
info[ 'finishedDateTime' ] = datetime.datetime.fromtimestamp( os.path.getmtime( frameFilePath ) )
info['durSeconds'] = (info[ 'finishedDateTime' ] - info[ 'installedDateTime' ]).total_seconds()
else:
#info['state'] = 'unfinished'
logger.warning( '%06d, %s not found', frameNum, frameFilePath )
return info
return info
if __name__ == "__main__":
# configure logger formatting
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
ncs.logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
#tellInstances.logger.setLevel(logging.INFO)
logger.debug('the logger is configured')
ap = argparse.ArgumentParser( description=__doc__,
fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
#ap.add_argument( 'blendFilePath', help='the .blend file to render' )
ap.add_argument( '--dataDir', help='where to read and write data', default='aniData' )
ap.add_argument( '--filter', help='json to filter instances for launch' )
ap.add_argument( '--instTimeLimit', type=int, default=900, help='amount of time (in seconds) installer is allowed to take on instances' )
ap.add_argument( '--jobId', help='to identify this job' )
ap.add_argument( '--launch', type=boolArg, default=True, help='to launch and terminate instances' )
ap.add_argument( '--nWorkers', type=int, default=1, help='the # of worker instances to launch (or zero for all available)' )
ap.add_argument( '--sshAgent', type=boolArg, default=False, help='whether or not to use ssh agent' )
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--timeLimit', type=int, help='time limit (in seconds) for the whole job',
default=24*60*60 )
ap.add_argument( '--useCompositor', type=boolArg, default=True, help='whether or not to use blender compositor' )
# dtr-specific args
ap.add_argument( '--width', type=int, help='the width (in pixels) of the output',
default=960 )
ap.add_argument( '--height', type=int, help='the height (in pixels) of the output',
default=540 )
ap.add_argument( '--blocks_user', type=int, help='the number of blocks to partition the image (or zero for "auto"',
default=0 )
ap.add_argument( '--fileType', choices=['PNG', 'OPEN_EXR'], help='the type of output file',
default='PNG' )
ap.add_argument( '--startFrame', type=int, help='the first frame number to render',
default=1 )
ap.add_argument( '--endFrame', type=int, help='the last frame number to render',
default=1 )
ap.add_argument( '--frameStep', type=int, help='the frame number increment',
default=1 )
ap.add_argument( '--seed', type=int, help='the blender cycles noise seed',
default=0 )
args = ap.parse_args()
#logger.debug('args: %s', args)
startTime = time.time()
extensions = {'PNG': 'png', 'OPEN_EXR': 'exr'}
frameFilePattern = 'rendered_frame_######_seed_%d.%s'%(args.seed,extensions[args.fileType])
dataDirPath = args.dataDir # './aniData'
frameInfos = []
for frameNum in range( args.startFrame, args.endFrame+1, args.frameStep ):
frameInfo = checkFrame( frameNum )
#if frameInfo['state'] != 'finished':
# logger.info( '%s', frameInfo )
frameInfos.append( frameInfo )
framesTable = pd.DataFrame( frameInfos )
framesTable.to_csv( dataDirPath+'/frameSummaries.csv', index=False)
# analyze the "outer" jlog file
animatorFilePath = os.path.join(dataDirPath, 'animateWholeFrames_results.jlog' )
events = readJLog( animatorFilePath )
recTable = pd.DataFrame( events )
recTable['dateTime'] = pd.to_datetime( recTable.dateTime )
#print( recTable.info() )
recTable.to_csv( dataDirPath+'/parsedLog.csv', index=False)
nTerminations = 0
nTerminated = 0
for event in events:
if 'renderFrame would terminate bad instances' in event:
nTerminations += 1
nTerminated += len( event['renderFrame would terminate bad instances'] )
print( nTerminated, 'instances terminated in', nTerminations, 'terminations' )
print( 'g_instanceBads: count', len(g_instanceBads), 'max', g_instanceBads.most_common(1) )
print( 'g_badBadCount', g_badBadCount )
if not len(framesTable):
sys.exit( 'no frames in framesTable')
if not 'finishedDateTime' in framesTable:
sys.exit( 'no frames finished')
#logger.info( '%s', framesTable.info() )
framesTable['launchedDateTime'] = pd.to_datetime( framesTable.launchedDateTime, utc=True )
framesTable['finishedDateTime'] = pd.to_datetime( framesTable.finishedDateTime, utc=True )
finished = framesTable[ ~pd.isnull(framesTable.finishedDateTime) ]
justStarted = framesTable[ pd.isnull(framesTable.finishedDateTime) & ~pd.isnull(framesTable.launchedDateTime) ]
print( len(finished), 'frames finished' )
print( len(justStarted), 'frames launched but not finished' )
print( 'earliest event', recTable.dateTime.min() )
print( 'earliest start', finished.launchedDateTime.min() )
print( 'latest finish', finished.finishedDateTime.max() )
overallDur = finished.finishedDateTime.max() - recTable.dateTime.min()
print( 'overall duration %.2f minutes (%.2f hours)' % (overallDur.total_seconds() / 60, overallDur.total_seconds() / 3600) )
timePerFrame = (finished.finishedDateTime.max() - finished.launchedDateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
timePerFrame = (finished.finishedDateTime.max() - recTable.dateTime.min()) / len(finished)
#print( 'time per frame', timePerFrame, type(timePerFrame) )
print( 'real time per frame %d seconds (%.2f minutes)' % \
(timePerFrame.total_seconds(), timePerFrame.total_seconds()/60 ) )
#elapsed = time.time() - startTime
#logger.info( 'finished; elapsed time %.1f seconds (%.1f minutes)', elapsed, elapsed/60 )
| '''read JLog file, return list of decoded objects'''
recs = []
topLevelKeys = collections.Counter() # for debugging
# demux by instance
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
if isinstance( decoded, dict ):
for key in decoded:
topLevelKeys[ key ] += 1
recs.append( decoded )
logger.info( 'topLevelKeys: %s', topLevelKeys )
return recs | identifier_body |
shell.py | # -*- coding: utf-8 -*-
# import os
# os.chdir('D:/Denis/python/freemind-tools')
import sys
# sys.path.append('D:/Denis/python/freemind-tools')
import os.path
import re
from datetime import date, datetime
from pprint import pprint
import freemind
BTN_OK = 'button_ok'
BTN_STOP = 'stop-sign'
BTN_CANCEL = 'button_cancel'
# def print_as_tree(node, level):
# for key in node.keys():
# if key == '@TEXT':
# print(level * ' ' + node[key])
def display_nodes(nodes:list, level=0):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + nodes.get_title())
if nodes.has_content():
content = nodes.get_content()
l = level + 1
print(l * ' ' + '---')
delim = l * ' '
print(delim + ('\n' + delim).join(content.split("\n")))
print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
display_nodes(node, level + 1)
def display_command(path=None):
result = query_nodes(path)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
display_nodes(result)
def check_path(path=None):
if path is None or not os.path.isfile(path):
print("Please define valid path")
exit()
def dict_set(props, name, value):
if name in props:
props[name].append(value)
else:
props.setdefault(name, [value])
def node_is_expired(node):
return node.has_attr('Due') and node.get_attr('Due') < datetime.now()
def format_icon(icon):
if icon == BTN_OK:
return 'DONE'
elif icon == BTN_STOP:
return 'STOP'
elif icon == BTN_CANCEL:
return 'CANCEL'
else:
return icon
def format_icons(icons):
if icons is None:
return []
return [format_icon(i) for i in icons]
def full_node_path(fullNodePath, node):
return (full_node_path(fullNodePath, node.get_parent()) if
node.get_parent() is not None and node.get_parent().get_title() != 'root' else fullNodePath) + '/' + node.get_title()
def format_node(node: freemind.FreeMindNode, format="flag parent / title {attrs} icon") -> str:
"""
Format: parent, flag, title, icon, attrs
:param node: some node
:param format: use format
:return: formatted string
"""
attrs = []
for i in ["Due", "Start"]:
if node.has_attr(i):
attrs.append("%s: %s" % (i, date.strftime(node.get_attr(i), "%d.%m.%Y")))
if node.has_attr("Assigned"):
attrs.append(node.get_attr("Assigned"))
flag = ''
if node_is_expired(node):
flag += '[EXPIRED] '
parent = ''
grandparent = ''
fullnodepath = full_node_path('', node)
if node.get_parent() is not None:
parent = node.get_parent().get_title()
if node.get_parent().get_parent() is not None:
grandparent = node.get_parent().get_parent().get_title()
icon = ", ".join(format_icons(node.get_attr('@ICON')))
if node.has_content():
content = "--\n%s\n--\n" % node.get_content()
else:
content = ''
# return "%s%s / %s {%s} %s" % (flag, parent, node.get_title(), ", ".join(attrs), node.get_attr('@ICON'))
# print(format.replace('icon', node.get_attr('@ICON')))
attr_pattrn = re.compile("{@([\w\-]+)}")
return attr_pattrn.sub(lambda m: node.get_attr(m.group(1)), format) \
.replace('{parent}', parent) \
.replace('{grandparent}', grandparent) \
.replace('{fullnodepath}', fullnodepath) \
.replace('{flag}', flag) \
.replace('{title}', node.get_title()) \
.replace('{attrs}', ", ".join(attrs)) \
.replace('{icon}', icon)\
.replace('{content}', content)
def match_condition(node, filter_rules):
# if len(filter_rules) == 1 and filter_rules[0] == '':
# return True
conditions = []
for i in filter_rules:
if i == '': # if empty condition passed
conditions.append(False)
elif i == '*':
conditions.append(True)
elif i[:5] == 'title' and node.get_title() == i[6:]:
conditions.append(True)
elif i == 'root' and node.get_parent() is None:
conditions.append(True)
elif i[:3] == 'id:' and node.has_attr("@ID") and i[3:] == node.get_attr("@ID"):
conditions.append(True)
elif i[:4] == 'icon' and node.has_attr("@ICON") and i[5:] in node.get_attr("@ICON"):
conditions.append(True)
elif i[:9] == 'has-attr:' and node.has_attr(i[9:]):
conditions.append(True)
elif i[:11] == 'hasnt-attr:' and not node.has_attr(i[11:]):
conditions.append(True)
elif i[:5] == '!icon' and node.has_attr("@ICON") and i[6:] not in node.get_attr("@ICON"):
conditions.append(True)
elif i == 'expired' and node_is_expired(node):
conditions.append(True)
elif i == 'not-expired' and not node_is_expired(node):
conditions.append(True)
elif i.lower() == 'not-assigned' and not node.has_attr("Assigned"):
conditions.append(True)
elif i.lower()[:8] == 'assigned' and node.has_attr("Assigned"):
if len(i) == 8:
conditions.append(True) # just assigned
elif node.get_attr("Assigned") == i[9:]:
conditions.append(True) # assigned and has passed assignee
else:
conditions.append(False)
# TODO : simplify it later
# print(node)
# print(filter_rules, conditions)
return len([i for i in conditions if i]) > 0
def nodes_select(doc, rules: str):
if rules == '':
return doc
conditions = rules.split(',')
return [i for i in freemind.traverse(doc, lambda n: match_condition(n, conditions))]
def nodes_filter(nodes: list, rules: str):
if rules == '':
return nodes
conditions = rules.split(',')
return [i for i in nodes if not match_condition(i, conditions)]
def | (path=None, select='', filter=''):
check_path(path)
doc = freemind.freemind_load(path)
result = nodes_select(doc, select)
result = nodes_filter(result, filter)
return result
def todo_command(path=None, select='', filter='', group='', format='flag title {attrs} icon'):
result = query_nodes(path, select=select, filter=filter)
# for i in result:
# print(i)
# exit()
if group == '':
for i in result:
print(format_node(i, format=format))
else:
groups = {}
for i in result:
if i.has_attr(group):
dict_set(groups, i.get_attr(group), i)
else:
dict_set(groups, 'None', i)
for i in sorted(groups.keys()):
# sys.stdout.buffer.write(i)
# print(i)
print(i)
print("\n".join(["\t%s" % format_node(i, format=format) for i in sorted(groups[i], key=lambda x: x.get_title())]))
def process_goals(nodes:list, filter:list, level=0, format='flag title {attrs} icon'):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + format_node(nodes, format=format))
# if nodes.has_content():
# content = nodes.get_content()
# l = level + 1
# if description == 'yes':
# print(l * ' ' + '---')
# delim = l * ' '
# print(delim + ('\n' + delim).join(content.split("\n")))
# print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
if not match_condition(node, filter):
process_goals(node, filter, level + 1, format=format)
def goals_command(path=None, select='', filter='', format='flag title {attrs} icon'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter.split(','), format=format)
def stat_command(path=None):
check_path(path)
print("Goals count: %s" % len(query_nodes(path, '*', '')))
print("Goals done: %s" % len(query_nodes(path, 'icon-%s' % BTN_OK, '')))
print("Goals canceled: %s" % len(query_nodes(path, 'icon-%s' % BTN_CANCEL, '')))
print("Goals stoped: %s" % len(query_nodes(path, 'icon-%s' % BTN_STOP, '')))
node = freemind.freemind_load(path)
print("Goals in progress: %s" % len(freemind.select_bottom(node)))
def traverse_command(path=None, select='', filter='', format='title'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter=filter.split(','), format=format)
def questions_command(path=None):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'counter'):
fn_list.counter = 1
if n.has_attr('@ICON') and 'help' in n.get_attr('@ICON'):
print("%s. %s\n %s\n" % (fn_list.counter, n.get_title(), n.get_content()))
fn_list.counter += 1
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
def estimate_command(path=None, format=' - {grandparent}/{parent}/{title}, {@estimate}h'):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'result'):
fn_list.result = []
fn_list.total = 0
if n.has_attr('estimate'):
if n.has_attr('@ICON') and BTN_STOP in n.get_attr('@ICON'):
return
fn_list.result.append(format_node(n, format))
fn_list.total += float(n.get_attr('estimate'))
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
[print(i) for i in sorted(fn_list.result)]
print("\nTotal: %sh" % fn_list.total)
def competences_command(path=None):
check_path(path)
result = query_nodes(path, select='title:Activities')
if len(result) != 1:
print('Activities node missed')
return
import json
output = []
for competence in result[0]:
tech = False
if competence.has_attr('tech') and competence.get_attr('tech').lower() == 'yes':
tech = True
project = False
if competence.has_attr('project') and competence.get_attr('project').lower() == 'yes':
project = True
roles = '-'
if competence.has_attr('roles'):
roles = competence.get_attr('roles')
confirm = ''
if competence.has_attr('confirm'):
confirm = competence.get_attr('confirm')
employee = False
if competence.has_attr('employee') and competence.get_attr('employee').lower() == 'yes':
employee = True
# print("%s [tech: %s, project: %s, roles: %s]" % (competence.get_title(), tech, project, roles))
competences = []
for competence_level in competence:
if competence_level.get_title() == 'attribute_layout':
continue
level = 0
if competence_level.has_attr('@ICON'):
if 'full-1' in competence_level.get_attr('@ICON'):
level = '1'
elif 'full-2' in competence_level.get_attr('@ICON'):
level = '2'
elif 'full-3' in competence_level.get_attr('@ICON'):
level = '3'
elif 'full-4' in competence_level.get_attr('@ICON'):
level = '4'
# print(" %s. %s" % (level, competence_level.get_title()))
competences.append({'level': level, 'desc': competence_level.get_title()})
output.append({
'title': competence.get_title(),
'tech': tech,
'project': project,
'roles': roles.split(','),
'confirm': confirm.split(','),
'competences': competences,
'employee': employee
})
print(json.dumps(output))
def __revert_spaces(s):
return s.replace('§', ' ')
def spec_command(path=None, parts=None, out=None):
check_path(path)
doc = freemind.freemind_load(path)
if out:
output = open(out, "w", encoding="utf-8")
def process_node(n, level):
if n.has_attr('@ICON') and 'button_cancel' in n.get_attr('@ICON'):
return
result = level * '#' + ' ' + n.get_title() + "\n\n"
if n.has_content():
result += n.get_content() + "\n\n"
outStr = __revert_spaces(result)
if out:
output.write(outStr)
else:
print(outStr)
def fn_list(nodes, level):
if not nodes:
return
for n in nodes:
process_node(n, level)
fn_list(n, level+1)
if parts is None:
pass
else:
for part in parts.split(';'):
nodes = nodes_select(doc, "title:%s" % part)
fn_list(nodes, 1)
# print(nodes)
if out:
output.close()
def tex_command(path):
check_path(path)
doc = open(path).read()
begin_marker = '\\begin{document}'
end_marker = '\\end{document}'
begin_pos = doc.find(begin_marker)
end_pos = doc.find(end_marker)
if begin_pos < 0 or end_pos < 0:
print('Markers not found')
return
print(doc[begin_pos+len(begin_marker):end_pos])
if __name__ == '__main__':
# shell.py search --path=Goals.mm --icon=stop-sign // nodes on hold
# shell.py search --path=Goals.mm --icon=yes // important nodes
# shell.py search --path=Goals.mm // all nodes
# shell.py group --path=Goals.mm --by=resource // actual tasks by resource
# shell.py group --path=Goals.mm --by=expired // expired tasks
# mm todo --path=Goals.mm --filter=not-assigned --group=Assigned
# mm todo --path=Goals.mm --filter=expired
# mm traverse --path=tests\Test.mm --select="title:New Mindmap" --format="title @Assigned"
# node = freemind.FreeMindNode(None)
# node.set_title('test')
# node.add_attr('@ICON', 'stop-sign')
# print(should_filter(node, ['icon-stop-sign']))
# goals_command('Goals.mm', select='title:Ilya Levoshko (QA)', filter='icon-button_ok,icon-stop-sign', description='no')
# todo_command('Goals.mm', select='expired', filter='icon-button_ok')
# todo_command('Goals.mm', select='assigned:@Sheremetov', filter='icon-button_ok')
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='assigned', filter='icon-button_ok')
# exit()
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='id:363b6bf92c5df3e2dc30043f1212d103')
# stat_command(PATH)
# nodes = query_nodes(PATH, 'icon-button_ok', '')
# print(len(nodes))
# nodes = freemind.freemind_load('Goals.mm')
# todo_command('tests/Test.mm')
# result = freemind.freemind_load('D:/temp/presale/impesa/test.mm')
# freemind.traverse_with_level(result, lambda n, l: print((l-1) * ' ' + format_node(n, '{title}')))
# traverse_command('tests/Test.mm', select='title:New Mindmap', filter='', format='title {@Assigned}')
# traverse_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm", select='title:Screens', filter='icon-stop-sign', format='title,{@estimate},{@estimate-res}')
# print([i.get_title() for i in query_nodes("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")])
# freemind.traverse(result, lambda n: print(format_node(n, 'title')))
# estimate_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")
# estimate_command("tests/Test.mm")
# spec_command('D:/temp/presale/impesa/Impesa.mm', 'Web application functionality')
# display_command('D:/temp/presale/impesa/impesa.mm')
# tex_command('D:/temp/presale/impesa/impesa.tex')
# exit()
# PATH = '/home/denis/Dropbox/Onix/skills-matrix-v2.mm'
# competences_command(PATH)
# exit()
# python3 shell.py competences --path=/home/denis/Dropbox/Onix/skills-matrix-v2.mm > /var/www/hrm/web/code/webroot/competences.json
# result = query_nodes('tests/TestFP.mm', select='id:ID_1232863674')
# pprint(result[0][0])
# for i in result[0]:
# pprint(i)
# print('--')
# exit()
from commandliner import commandliner
commandliner(locals())
| query_nodes | identifier_name |
shell.py | # -*- coding: utf-8 -*-
# import os
# os.chdir('D:/Denis/python/freemind-tools')
import sys
# sys.path.append('D:/Denis/python/freemind-tools')
import os.path
import re
from datetime import date, datetime
from pprint import pprint
import freemind
BTN_OK = 'button_ok'
BTN_STOP = 'stop-sign'
BTN_CANCEL = 'button_cancel'
# def print_as_tree(node, level):
# for key in node.keys():
# if key == '@TEXT':
# print(level * ' ' + node[key])
def display_nodes(nodes:list, level=0):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + nodes.get_title())
if nodes.has_content():
content = nodes.get_content()
l = level + 1
print(l * ' ' + '---')
delim = l * ' '
print(delim + ('\n' + delim).join(content.split("\n")))
print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
display_nodes(node, level + 1)
def display_command(path=None):
result = query_nodes(path)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
display_nodes(result)
def check_path(path=None):
if path is None or not os.path.isfile(path):
print("Please define valid path")
exit()
def dict_set(props, name, value):
if name in props:
props[name].append(value)
else:
props.setdefault(name, [value])
def node_is_expired(node):
return node.has_attr('Due') and node.get_attr('Due') < datetime.now()
def format_icon(icon):
if icon == BTN_OK:
return 'DONE'
elif icon == BTN_STOP:
return 'STOP'
elif icon == BTN_CANCEL:
return 'CANCEL'
else:
return icon
def format_icons(icons):
if icons is None:
return []
return [format_icon(i) for i in icons]
def full_node_path(fullNodePath, node):
return (full_node_path(fullNodePath, node.get_parent()) if
node.get_parent() is not None and node.get_parent().get_title() != 'root' else fullNodePath) + '/' + node.get_title()
def format_node(node: freemind.FreeMindNode, format="flag parent / title {attrs} icon") -> str:
"""
Format: parent, flag, title, icon, attrs
:param node: some node
:param format: use format
:return: formatted string
"""
attrs = []
for i in ["Due", "Start"]:
if node.has_attr(i):
attrs.append("%s: %s" % (i, date.strftime(node.get_attr(i), "%d.%m.%Y")))
if node.has_attr("Assigned"):
attrs.append(node.get_attr("Assigned"))
flag = ''
if node_is_expired(node):
flag += '[EXPIRED] '
parent = ''
grandparent = ''
fullnodepath = full_node_path('', node)
if node.get_parent() is not None:
parent = node.get_parent().get_title()
if node.get_parent().get_parent() is not None:
grandparent = node.get_parent().get_parent().get_title()
icon = ", ".join(format_icons(node.get_attr('@ICON')))
if node.has_content():
content = "--\n%s\n--\n" % node.get_content()
else:
content = ''
# return "%s%s / %s {%s} %s" % (flag, parent, node.get_title(), ", ".join(attrs), node.get_attr('@ICON'))
# print(format.replace('icon', node.get_attr('@ICON')))
attr_pattrn = re.compile("{@([\w\-]+)}")
return attr_pattrn.sub(lambda m: node.get_attr(m.group(1)), format) \
.replace('{parent}', parent) \
.replace('{grandparent}', grandparent) \
.replace('{fullnodepath}', fullnodepath) \
.replace('{flag}', flag) \
.replace('{title}', node.get_title()) \
.replace('{attrs}', ", ".join(attrs)) \
.replace('{icon}', icon)\
.replace('{content}', content)
def match_condition(node, filter_rules):
# if len(filter_rules) == 1 and filter_rules[0] == '':
# return True
conditions = []
for i in filter_rules:
if i == '': # if empty condition passed
conditions.append(False)
elif i == '*':
conditions.append(True)
elif i[:5] == 'title' and node.get_title() == i[6:]:
conditions.append(True)
elif i == 'root' and node.get_parent() is None:
conditions.append(True)
elif i[:3] == 'id:' and node.has_attr("@ID") and i[3:] == node.get_attr("@ID"):
conditions.append(True)
elif i[:4] == 'icon' and node.has_attr("@ICON") and i[5:] in node.get_attr("@ICON"):
conditions.append(True)
elif i[:9] == 'has-attr:' and node.has_attr(i[9:]):
conditions.append(True)
elif i[:11] == 'hasnt-attr:' and not node.has_attr(i[11:]):
conditions.append(True)
elif i[:5] == '!icon' and node.has_attr("@ICON") and i[6:] not in node.get_attr("@ICON"):
conditions.append(True)
elif i == 'expired' and node_is_expired(node):
conditions.append(True)
elif i == 'not-expired' and not node_is_expired(node):
conditions.append(True)
elif i.lower() == 'not-assigned' and not node.has_attr("Assigned"):
conditions.append(True)
elif i.lower()[:8] == 'assigned' and node.has_attr("Assigned"):
if len(i) == 8:
conditions.append(True) # just assigned
elif node.get_attr("Assigned") == i[9:]:
conditions.append(True) # assigned and has passed assignee
else:
conditions.append(False)
# TODO : simplify it later
# print(node)
# print(filter_rules, conditions)
return len([i for i in conditions if i]) > 0
def nodes_select(doc, rules: str):
if rules == '':
return doc
conditions = rules.split(',')
return [i for i in freemind.traverse(doc, lambda n: match_condition(n, conditions))]
def nodes_filter(nodes: list, rules: str):
if rules == '':
return nodes
conditions = rules.split(',')
return [i for i in nodes if not match_condition(i, conditions)]
def query_nodes(path=None, select='', filter=''):
check_path(path)
doc = freemind.freemind_load(path)
result = nodes_select(doc, select)
result = nodes_filter(result, filter)
return result
def todo_command(path=None, select='', filter='', group='', format='flag title {attrs} icon'):
result = query_nodes(path, select=select, filter=filter)
# for i in result:
# print(i)
# exit()
if group == '':
for i in result:
print(format_node(i, format=format))
else:
groups = {}
for i in result:
if i.has_attr(group):
dict_set(groups, i.get_attr(group), i)
else:
dict_set(groups, 'None', i)
for i in sorted(groups.keys()):
# sys.stdout.buffer.write(i)
# print(i)
print(i)
print("\n".join(["\t%s" % format_node(i, format=format) for i in sorted(groups[i], key=lambda x: x.get_title())]))
def process_goals(nodes:list, filter:list, level=0, format='flag title {attrs} icon'):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + format_node(nodes, format=format))
# if nodes.has_content():
# content = nodes.get_content()
# l = level + 1
# if description == 'yes':
# print(l * ' ' + '---')
# delim = l * ' '
# print(delim + ('\n' + delim).join(content.split("\n")))
# print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
if not match_condition(node, filter):
process_goals(node, filter, level + 1, format=format)
def goals_command(path=None, select='', filter='', format='flag title {attrs} icon'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter.split(','), format=format)
def stat_command(path=None):
check_path(path)
print("Goals count: %s" % len(query_nodes(path, '*', '')))
print("Goals done: %s" % len(query_nodes(path, 'icon-%s' % BTN_OK, '')))
print("Goals canceled: %s" % len(query_nodes(path, 'icon-%s' % BTN_CANCEL, '')))
print("Goals stoped: %s" % len(query_nodes(path, 'icon-%s' % BTN_STOP, '')))
node = freemind.freemind_load(path)
print("Goals in progress: %s" % len(freemind.select_bottom(node)))
def traverse_command(path=None, select='', filter='', format='title'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter=filter.split(','), format=format)
def questions_command(path=None):
|
def estimate_command(path=None, format=' - {grandparent}/{parent}/{title}, {@estimate}h'):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'result'):
fn_list.result = []
fn_list.total = 0
if n.has_attr('estimate'):
if n.has_attr('@ICON') and BTN_STOP in n.get_attr('@ICON'):
return
fn_list.result.append(format_node(n, format))
fn_list.total += float(n.get_attr('estimate'))
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
[print(i) for i in sorted(fn_list.result)]
print("\nTotal: %sh" % fn_list.total)
def competences_command(path=None):
check_path(path)
result = query_nodes(path, select='title:Activities')
if len(result) != 1:
print('Activities node missed')
return
import json
output = []
for competence in result[0]:
tech = False
if competence.has_attr('tech') and competence.get_attr('tech').lower() == 'yes':
tech = True
project = False
if competence.has_attr('project') and competence.get_attr('project').lower() == 'yes':
project = True
roles = '-'
if competence.has_attr('roles'):
roles = competence.get_attr('roles')
confirm = ''
if competence.has_attr('confirm'):
confirm = competence.get_attr('confirm')
employee = False
if competence.has_attr('employee') and competence.get_attr('employee').lower() == 'yes':
employee = True
# print("%s [tech: %s, project: %s, roles: %s]" % (competence.get_title(), tech, project, roles))
competences = []
for competence_level in competence:
if competence_level.get_title() == 'attribute_layout':
continue
level = 0
if competence_level.has_attr('@ICON'):
if 'full-1' in competence_level.get_attr('@ICON'):
level = '1'
elif 'full-2' in competence_level.get_attr('@ICON'):
level = '2'
elif 'full-3' in competence_level.get_attr('@ICON'):
level = '3'
elif 'full-4' in competence_level.get_attr('@ICON'):
level = '4'
# print(" %s. %s" % (level, competence_level.get_title()))
competences.append({'level': level, 'desc': competence_level.get_title()})
output.append({
'title': competence.get_title(),
'tech': tech,
'project': project,
'roles': roles.split(','),
'confirm': confirm.split(','),
'competences': competences,
'employee': employee
})
print(json.dumps(output))
def __revert_spaces(s):
return s.replace('§', ' ')
def spec_command(path=None, parts=None, out=None):
check_path(path)
doc = freemind.freemind_load(path)
if out:
output = open(out, "w", encoding="utf-8")
def process_node(n, level):
if n.has_attr('@ICON') and 'button_cancel' in n.get_attr('@ICON'):
return
result = level * '#' + ' ' + n.get_title() + "\n\n"
if n.has_content():
result += n.get_content() + "\n\n"
outStr = __revert_spaces(result)
if out:
output.write(outStr)
else:
print(outStr)
def fn_list(nodes, level):
if not nodes:
return
for n in nodes:
process_node(n, level)
fn_list(n, level+1)
if parts is None:
pass
else:
for part in parts.split(';'):
nodes = nodes_select(doc, "title:%s" % part)
fn_list(nodes, 1)
# print(nodes)
if out:
output.close()
def tex_command(path):
check_path(path)
doc = open(path).read()
begin_marker = '\\begin{document}'
end_marker = '\\end{document}'
begin_pos = doc.find(begin_marker)
end_pos = doc.find(end_marker)
if begin_pos < 0 or end_pos < 0:
print('Markers not found')
return
print(doc[begin_pos+len(begin_marker):end_pos])
if __name__ == '__main__':
# shell.py search --path=Goals.mm --icon=stop-sign // nodes on hold
# shell.py search --path=Goals.mm --icon=yes // important nodes
# shell.py search --path=Goals.mm // all nodes
# shell.py group --path=Goals.mm --by=resource // actual tasks by resource
# shell.py group --path=Goals.mm --by=expired // expired tasks
# mm todo --path=Goals.mm --filter=not-assigned --group=Assigned
# mm todo --path=Goals.mm --filter=expired
# mm traverse --path=tests\Test.mm --select="title:New Mindmap" --format="title @Assigned"
# node = freemind.FreeMindNode(None)
# node.set_title('test')
# node.add_attr('@ICON', 'stop-sign')
# print(should_filter(node, ['icon-stop-sign']))
# goals_command('Goals.mm', select='title:Ilya Levoshko (QA)', filter='icon-button_ok,icon-stop-sign', description='no')
# todo_command('Goals.mm', select='expired', filter='icon-button_ok')
# todo_command('Goals.mm', select='assigned:@Sheremetov', filter='icon-button_ok')
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='assigned', filter='icon-button_ok')
# exit()
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='id:363b6bf92c5df3e2dc30043f1212d103')
# stat_command(PATH)
# nodes = query_nodes(PATH, 'icon-button_ok', '')
# print(len(nodes))
# nodes = freemind.freemind_load('Goals.mm')
# todo_command('tests/Test.mm')
# result = freemind.freemind_load('D:/temp/presale/impesa/test.mm')
# freemind.traverse_with_level(result, lambda n, l: print((l-1) * ' ' + format_node(n, '{title}')))
# traverse_command('tests/Test.mm', select='title:New Mindmap', filter='', format='title {@Assigned}')
# traverse_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm", select='title:Screens', filter='icon-stop-sign', format='title,{@estimate},{@estimate-res}')
# print([i.get_title() for i in query_nodes("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")])
# freemind.traverse(result, lambda n: print(format_node(n, 'title')))
# estimate_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")
# estimate_command("tests/Test.mm")
# spec_command('D:/temp/presale/impesa/Impesa.mm', 'Web application functionality')
# display_command('D:/temp/presale/impesa/impesa.mm')
# tex_command('D:/temp/presale/impesa/impesa.tex')
# exit()
# PATH = '/home/denis/Dropbox/Onix/skills-matrix-v2.mm'
# competences_command(PATH)
# exit()
# python3 shell.py competences --path=/home/denis/Dropbox/Onix/skills-matrix-v2.mm > /var/www/hrm/web/code/webroot/competences.json
# result = query_nodes('tests/TestFP.mm', select='id:ID_1232863674')
# pprint(result[0][0])
# for i in result[0]:
# pprint(i)
# print('--')
# exit()
from commandliner import commandliner
commandliner(locals())
| check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'counter'):
fn_list.counter = 1
if n.has_attr('@ICON') and 'help' in n.get_attr('@ICON'):
print("%s. %s\n %s\n" % (fn_list.counter, n.get_title(), n.get_content()))
fn_list.counter += 1
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list) | identifier_body |
shell.py | # -*- coding: utf-8 -*-
# import os
# os.chdir('D:/Denis/python/freemind-tools')
import sys
# sys.path.append('D:/Denis/python/freemind-tools')
import os.path
import re
from datetime import date, datetime
from pprint import pprint
import freemind
BTN_OK = 'button_ok'
BTN_STOP = 'stop-sign'
BTN_CANCEL = 'button_cancel'
# def print_as_tree(node, level):
# for key in node.keys():
# if key == '@TEXT':
# print(level * ' ' + node[key])
def display_nodes(nodes:list, level=0):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + nodes.get_title())
if nodes.has_content():
content = nodes.get_content()
l = level + 1
print(l * ' ' + '---')
delim = l * ' '
print(delim + ('\n' + delim).join(content.split("\n")))
print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
display_nodes(node, level + 1)
def display_command(path=None):
result = query_nodes(path)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
display_nodes(result)
def check_path(path=None):
if path is None or not os.path.isfile(path):
print("Please define valid path")
exit()
def dict_set(props, name, value):
if name in props:
props[name].append(value)
else:
props.setdefault(name, [value])
def node_is_expired(node):
return node.has_attr('Due') and node.get_attr('Due') < datetime.now()
def format_icon(icon):
if icon == BTN_OK:
return 'DONE'
elif icon == BTN_STOP:
return 'STOP'
elif icon == BTN_CANCEL:
return 'CANCEL'
else:
return icon
def format_icons(icons):
if icons is None:
return []
return [format_icon(i) for i in icons]
def full_node_path(fullNodePath, node):
return (full_node_path(fullNodePath, node.get_parent()) if
node.get_parent() is not None and node.get_parent().get_title() != 'root' else fullNodePath) + '/' + node.get_title()
def format_node(node: freemind.FreeMindNode, format="flag parent / title {attrs} icon") -> str:
"""
Format: parent, flag, title, icon, attrs
:param node: some node
:param format: use format
:return: formatted string
"""
attrs = []
for i in ["Due", "Start"]:
if node.has_attr(i):
attrs.append("%s: %s" % (i, date.strftime(node.get_attr(i), "%d.%m.%Y")))
if node.has_attr("Assigned"):
attrs.append(node.get_attr("Assigned"))
flag = ''
if node_is_expired(node):
flag += '[EXPIRED] '
parent = ''
grandparent = ''
fullnodepath = full_node_path('', node)
if node.get_parent() is not None:
parent = node.get_parent().get_title()
if node.get_parent().get_parent() is not None:
|
icon = ", ".join(format_icons(node.get_attr('@ICON')))
if node.has_content():
content = "--\n%s\n--\n" % node.get_content()
else:
content = ''
# return "%s%s / %s {%s} %s" % (flag, parent, node.get_title(), ", ".join(attrs), node.get_attr('@ICON'))
# print(format.replace('icon', node.get_attr('@ICON')))
attr_pattrn = re.compile("{@([\w\-]+)}")
return attr_pattrn.sub(lambda m: node.get_attr(m.group(1)), format) \
.replace('{parent}', parent) \
.replace('{grandparent}', grandparent) \
.replace('{fullnodepath}', fullnodepath) \
.replace('{flag}', flag) \
.replace('{title}', node.get_title()) \
.replace('{attrs}', ", ".join(attrs)) \
.replace('{icon}', icon)\
.replace('{content}', content)
def match_condition(node, filter_rules):
# if len(filter_rules) == 1 and filter_rules[0] == '':
# return True
conditions = []
for i in filter_rules:
if i == '': # if empty condition passed
conditions.append(False)
elif i == '*':
conditions.append(True)
elif i[:5] == 'title' and node.get_title() == i[6:]:
conditions.append(True)
elif i == 'root' and node.get_parent() is None:
conditions.append(True)
elif i[:3] == 'id:' and node.has_attr("@ID") and i[3:] == node.get_attr("@ID"):
conditions.append(True)
elif i[:4] == 'icon' and node.has_attr("@ICON") and i[5:] in node.get_attr("@ICON"):
conditions.append(True)
elif i[:9] == 'has-attr:' and node.has_attr(i[9:]):
conditions.append(True)
elif i[:11] == 'hasnt-attr:' and not node.has_attr(i[11:]):
conditions.append(True)
elif i[:5] == '!icon' and node.has_attr("@ICON") and i[6:] not in node.get_attr("@ICON"):
conditions.append(True)
elif i == 'expired' and node_is_expired(node):
conditions.append(True)
elif i == 'not-expired' and not node_is_expired(node):
conditions.append(True)
elif i.lower() == 'not-assigned' and not node.has_attr("Assigned"):
conditions.append(True)
elif i.lower()[:8] == 'assigned' and node.has_attr("Assigned"):
if len(i) == 8:
conditions.append(True) # just assigned
elif node.get_attr("Assigned") == i[9:]:
conditions.append(True) # assigned and has passed assignee
else:
conditions.append(False)
# TODO : simplify it later
# print(node)
# print(filter_rules, conditions)
return len([i for i in conditions if i]) > 0
def nodes_select(doc, rules: str):
if rules == '':
return doc
conditions = rules.split(',')
return [i for i in freemind.traverse(doc, lambda n: match_condition(n, conditions))]
def nodes_filter(nodes: list, rules: str):
if rules == '':
return nodes
conditions = rules.split(',')
return [i for i in nodes if not match_condition(i, conditions)]
def query_nodes(path=None, select='', filter=''):
check_path(path)
doc = freemind.freemind_load(path)
result = nodes_select(doc, select)
result = nodes_filter(result, filter)
return result
def todo_command(path=None, select='', filter='', group='', format='flag title {attrs} icon'):
result = query_nodes(path, select=select, filter=filter)
# for i in result:
# print(i)
# exit()
if group == '':
for i in result:
print(format_node(i, format=format))
else:
groups = {}
for i in result:
if i.has_attr(group):
dict_set(groups, i.get_attr(group), i)
else:
dict_set(groups, 'None', i)
for i in sorted(groups.keys()):
# sys.stdout.buffer.write(i)
# print(i)
print(i)
print("\n".join(["\t%s" % format_node(i, format=format) for i in sorted(groups[i], key=lambda x: x.get_title())]))
def process_goals(nodes:list, filter:list, level=0, format='flag title {attrs} icon'):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + format_node(nodes, format=format))
# if nodes.has_content():
# content = nodes.get_content()
# l = level + 1
# if description == 'yes':
# print(l * ' ' + '---')
# delim = l * ' '
# print(delim + ('\n' + delim).join(content.split("\n")))
# print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
if not match_condition(node, filter):
process_goals(node, filter, level + 1, format=format)
def goals_command(path=None, select='', filter='', format='flag title {attrs} icon'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter.split(','), format=format)
def stat_command(path=None):
check_path(path)
print("Goals count: %s" % len(query_nodes(path, '*', '')))
print("Goals done: %s" % len(query_nodes(path, 'icon-%s' % BTN_OK, '')))
print("Goals canceled: %s" % len(query_nodes(path, 'icon-%s' % BTN_CANCEL, '')))
print("Goals stoped: %s" % len(query_nodes(path, 'icon-%s' % BTN_STOP, '')))
node = freemind.freemind_load(path)
print("Goals in progress: %s" % len(freemind.select_bottom(node)))
def traverse_command(path=None, select='', filter='', format='title'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter=filter.split(','), format=format)
def questions_command(path=None):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'counter'):
fn_list.counter = 1
if n.has_attr('@ICON') and 'help' in n.get_attr('@ICON'):
print("%s. %s\n %s\n" % (fn_list.counter, n.get_title(), n.get_content()))
fn_list.counter += 1
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
def estimate_command(path=None, format=' - {grandparent}/{parent}/{title}, {@estimate}h'):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'result'):
fn_list.result = []
fn_list.total = 0
if n.has_attr('estimate'):
if n.has_attr('@ICON') and BTN_STOP in n.get_attr('@ICON'):
return
fn_list.result.append(format_node(n, format))
fn_list.total += float(n.get_attr('estimate'))
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
[print(i) for i in sorted(fn_list.result)]
print("\nTotal: %sh" % fn_list.total)
def competences_command(path=None):
check_path(path)
result = query_nodes(path, select='title:Activities')
if len(result) != 1:
print('Activities node missed')
return
import json
output = []
for competence in result[0]:
tech = False
if competence.has_attr('tech') and competence.get_attr('tech').lower() == 'yes':
tech = True
project = False
if competence.has_attr('project') and competence.get_attr('project').lower() == 'yes':
project = True
roles = '-'
if competence.has_attr('roles'):
roles = competence.get_attr('roles')
confirm = ''
if competence.has_attr('confirm'):
confirm = competence.get_attr('confirm')
employee = False
if competence.has_attr('employee') and competence.get_attr('employee').lower() == 'yes':
employee = True
# print("%s [tech: %s, project: %s, roles: %s]" % (competence.get_title(), tech, project, roles))
competences = []
for competence_level in competence:
if competence_level.get_title() == 'attribute_layout':
continue
level = 0
if competence_level.has_attr('@ICON'):
if 'full-1' in competence_level.get_attr('@ICON'):
level = '1'
elif 'full-2' in competence_level.get_attr('@ICON'):
level = '2'
elif 'full-3' in competence_level.get_attr('@ICON'):
level = '3'
elif 'full-4' in competence_level.get_attr('@ICON'):
level = '4'
# print(" %s. %s" % (level, competence_level.get_title()))
competences.append({'level': level, 'desc': competence_level.get_title()})
output.append({
'title': competence.get_title(),
'tech': tech,
'project': project,
'roles': roles.split(','),
'confirm': confirm.split(','),
'competences': competences,
'employee': employee
})
print(json.dumps(output))
def __revert_spaces(s):
return s.replace('§', ' ')
def spec_command(path=None, parts=None, out=None):
check_path(path)
doc = freemind.freemind_load(path)
if out:
output = open(out, "w", encoding="utf-8")
def process_node(n, level):
if n.has_attr('@ICON') and 'button_cancel' in n.get_attr('@ICON'):
return
result = level * '#' + ' ' + n.get_title() + "\n\n"
if n.has_content():
result += n.get_content() + "\n\n"
outStr = __revert_spaces(result)
if out:
output.write(outStr)
else:
print(outStr)
def fn_list(nodes, level):
if not nodes:
return
for n in nodes:
process_node(n, level)
fn_list(n, level+1)
if parts is None:
pass
else:
for part in parts.split(';'):
nodes = nodes_select(doc, "title:%s" % part)
fn_list(nodes, 1)
# print(nodes)
if out:
output.close()
def tex_command(path):
check_path(path)
doc = open(path).read()
begin_marker = '\\begin{document}'
end_marker = '\\end{document}'
begin_pos = doc.find(begin_marker)
end_pos = doc.find(end_marker)
if begin_pos < 0 or end_pos < 0:
print('Markers not found')
return
print(doc[begin_pos+len(begin_marker):end_pos])
if __name__ == '__main__':
# shell.py search --path=Goals.mm --icon=stop-sign // nodes on hold
# shell.py search --path=Goals.mm --icon=yes // important nodes
# shell.py search --path=Goals.mm // all nodes
# shell.py group --path=Goals.mm --by=resource // actual tasks by resource
# shell.py group --path=Goals.mm --by=expired // expired tasks
# mm todo --path=Goals.mm --filter=not-assigned --group=Assigned
# mm todo --path=Goals.mm --filter=expired
# mm traverse --path=tests\Test.mm --select="title:New Mindmap" --format="title @Assigned"
# node = freemind.FreeMindNode(None)
# node.set_title('test')
# node.add_attr('@ICON', 'stop-sign')
# print(should_filter(node, ['icon-stop-sign']))
# goals_command('Goals.mm', select='title:Ilya Levoshko (QA)', filter='icon-button_ok,icon-stop-sign', description='no')
# todo_command('Goals.mm', select='expired', filter='icon-button_ok')
# todo_command('Goals.mm', select='assigned:@Sheremetov', filter='icon-button_ok')
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='assigned', filter='icon-button_ok')
# exit()
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='id:363b6bf92c5df3e2dc30043f1212d103')
# stat_command(PATH)
# nodes = query_nodes(PATH, 'icon-button_ok', '')
# print(len(nodes))
# nodes = freemind.freemind_load('Goals.mm')
# todo_command('tests/Test.mm')
# result = freemind.freemind_load('D:/temp/presale/impesa/test.mm')
# freemind.traverse_with_level(result, lambda n, l: print((l-1) * ' ' + format_node(n, '{title}')))
# traverse_command('tests/Test.mm', select='title:New Mindmap', filter='', format='title {@Assigned}')
# traverse_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm", select='title:Screens', filter='icon-stop-sign', format='title,{@estimate},{@estimate-res}')
# print([i.get_title() for i in query_nodes("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")])
# freemind.traverse(result, lambda n: print(format_node(n, 'title')))
# estimate_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")
# estimate_command("tests/Test.mm")
# spec_command('D:/temp/presale/impesa/Impesa.mm', 'Web application functionality')
# display_command('D:/temp/presale/impesa/impesa.mm')
# tex_command('D:/temp/presale/impesa/impesa.tex')
# exit()
# PATH = '/home/denis/Dropbox/Onix/skills-matrix-v2.mm'
# competences_command(PATH)
# exit()
# python3 shell.py competences --path=/home/denis/Dropbox/Onix/skills-matrix-v2.mm > /var/www/hrm/web/code/webroot/competences.json
# result = query_nodes('tests/TestFP.mm', select='id:ID_1232863674')
# pprint(result[0][0])
# for i in result[0]:
# pprint(i)
# print('--')
# exit()
from commandliner import commandliner
commandliner(locals())
| grandparent = node.get_parent().get_parent().get_title() | conditional_block |
shell.py | # -*- coding: utf-8 -*-
# import os
# os.chdir('D:/Denis/python/freemind-tools')
import sys
# sys.path.append('D:/Denis/python/freemind-tools')
import os.path
import re
from datetime import date, datetime
from pprint import pprint
import freemind
BTN_OK = 'button_ok'
BTN_STOP = 'stop-sign'
BTN_CANCEL = 'button_cancel'
# def print_as_tree(node, level):
# for key in node.keys():
# if key == '@TEXT':
# print(level * ' ' + node[key])
| print(level * ' ' + nodes.get_title())
if nodes.has_content():
content = nodes.get_content()
l = level + 1
print(l * ' ' + '---')
delim = l * ' '
print(delim + ('\n' + delim).join(content.split("\n")))
print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
display_nodes(node, level + 1)
def display_command(path=None):
result = query_nodes(path)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
display_nodes(result)
def check_path(path=None):
if path is None or not os.path.isfile(path):
print("Please define valid path")
exit()
def dict_set(props, name, value):
if name in props:
props[name].append(value)
else:
props.setdefault(name, [value])
def node_is_expired(node):
return node.has_attr('Due') and node.get_attr('Due') < datetime.now()
def format_icon(icon):
if icon == BTN_OK:
return 'DONE'
elif icon == BTN_STOP:
return 'STOP'
elif icon == BTN_CANCEL:
return 'CANCEL'
else:
return icon
def format_icons(icons):
if icons is None:
return []
return [format_icon(i) for i in icons]
def full_node_path(fullNodePath, node):
return (full_node_path(fullNodePath, node.get_parent()) if
node.get_parent() is not None and node.get_parent().get_title() != 'root' else fullNodePath) + '/' + node.get_title()
def format_node(node: freemind.FreeMindNode, format="flag parent / title {attrs} icon") -> str:
"""
Format: parent, flag, title, icon, attrs
:param node: some node
:param format: use format
:return: formatted string
"""
attrs = []
for i in ["Due", "Start"]:
if node.has_attr(i):
attrs.append("%s: %s" % (i, date.strftime(node.get_attr(i), "%d.%m.%Y")))
if node.has_attr("Assigned"):
attrs.append(node.get_attr("Assigned"))
flag = ''
if node_is_expired(node):
flag += '[EXPIRED] '
parent = ''
grandparent = ''
fullnodepath = full_node_path('', node)
if node.get_parent() is not None:
parent = node.get_parent().get_title()
if node.get_parent().get_parent() is not None:
grandparent = node.get_parent().get_parent().get_title()
icon = ", ".join(format_icons(node.get_attr('@ICON')))
if node.has_content():
content = "--\n%s\n--\n" % node.get_content()
else:
content = ''
# return "%s%s / %s {%s} %s" % (flag, parent, node.get_title(), ", ".join(attrs), node.get_attr('@ICON'))
# print(format.replace('icon', node.get_attr('@ICON')))
attr_pattrn = re.compile("{@([\w\-]+)}")
return attr_pattrn.sub(lambda m: node.get_attr(m.group(1)), format) \
.replace('{parent}', parent) \
.replace('{grandparent}', grandparent) \
.replace('{fullnodepath}', fullnodepath) \
.replace('{flag}', flag) \
.replace('{title}', node.get_title()) \
.replace('{attrs}', ", ".join(attrs)) \
.replace('{icon}', icon)\
.replace('{content}', content)
def match_condition(node, filter_rules):
# if len(filter_rules) == 1 and filter_rules[0] == '':
# return True
conditions = []
for i in filter_rules:
if i == '': # if empty condition passed
conditions.append(False)
elif i == '*':
conditions.append(True)
elif i[:5] == 'title' and node.get_title() == i[6:]:
conditions.append(True)
elif i == 'root' and node.get_parent() is None:
conditions.append(True)
elif i[:3] == 'id:' and node.has_attr("@ID") and i[3:] == node.get_attr("@ID"):
conditions.append(True)
elif i[:4] == 'icon' and node.has_attr("@ICON") and i[5:] in node.get_attr("@ICON"):
conditions.append(True)
elif i[:9] == 'has-attr:' and node.has_attr(i[9:]):
conditions.append(True)
elif i[:11] == 'hasnt-attr:' and not node.has_attr(i[11:]):
conditions.append(True)
elif i[:5] == '!icon' and node.has_attr("@ICON") and i[6:] not in node.get_attr("@ICON"):
conditions.append(True)
elif i == 'expired' and node_is_expired(node):
conditions.append(True)
elif i == 'not-expired' and not node_is_expired(node):
conditions.append(True)
elif i.lower() == 'not-assigned' and not node.has_attr("Assigned"):
conditions.append(True)
elif i.lower()[:8] == 'assigned' and node.has_attr("Assigned"):
if len(i) == 8:
conditions.append(True) # just assigned
elif node.get_attr("Assigned") == i[9:]:
conditions.append(True) # assigned and has passed assignee
else:
conditions.append(False)
# TODO : simplify it later
# print(node)
# print(filter_rules, conditions)
return len([i for i in conditions if i]) > 0
def nodes_select(doc, rules: str):
if rules == '':
return doc
conditions = rules.split(',')
return [i for i in freemind.traverse(doc, lambda n: match_condition(n, conditions))]
def nodes_filter(nodes: list, rules: str):
if rules == '':
return nodes
conditions = rules.split(',')
return [i for i in nodes if not match_condition(i, conditions)]
def query_nodes(path=None, select='', filter=''):
check_path(path)
doc = freemind.freemind_load(path)
result = nodes_select(doc, select)
result = nodes_filter(result, filter)
return result
def todo_command(path=None, select='', filter='', group='', format='flag title {attrs} icon'):
result = query_nodes(path, select=select, filter=filter)
# for i in result:
# print(i)
# exit()
if group == '':
for i in result:
print(format_node(i, format=format))
else:
groups = {}
for i in result:
if i.has_attr(group):
dict_set(groups, i.get_attr(group), i)
else:
dict_set(groups, 'None', i)
for i in sorted(groups.keys()):
# sys.stdout.buffer.write(i)
# print(i)
print(i)
print("\n".join(["\t%s" % format_node(i, format=format) for i in sorted(groups[i], key=lambda x: x.get_title())]))
def process_goals(nodes:list, filter:list, level=0, format='flag title {attrs} icon'):
if type(nodes) is freemind.FreeMindNode:
print(level * ' ' + format_node(nodes, format=format))
# if nodes.has_content():
# content = nodes.get_content()
# l = level + 1
# if description == 'yes':
# print(l * ' ' + '---')
# delim = l * ' '
# print(delim + ('\n' + delim).join(content.split("\n")))
# print(l * ' ' + '---')
if len(nodes) > 0:
for node in nodes:
if not match_condition(node, filter):
process_goals(node, filter, level + 1, format=format)
def goals_command(path=None, select='', filter='', format='flag title {attrs} icon'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter.split(','), format=format)
def stat_command(path=None):
check_path(path)
print("Goals count: %s" % len(query_nodes(path, '*', '')))
print("Goals done: %s" % len(query_nodes(path, 'icon-%s' % BTN_OK, '')))
print("Goals canceled: %s" % len(query_nodes(path, 'icon-%s' % BTN_CANCEL, '')))
print("Goals stoped: %s" % len(query_nodes(path, 'icon-%s' % BTN_STOP, '')))
node = freemind.freemind_load(path)
print("Goals in progress: %s" % len(freemind.select_bottom(node)))
def traverse_command(path=None, select='', filter='', format='title'):
result = query_nodes(path, select, filter)
if not result:
print("%s not found" % select)
else:
# then process only first node cause we don't expect find more
process_goals(result, filter=filter.split(','), format=format)
def questions_command(path=None):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'counter'):
fn_list.counter = 1
if n.has_attr('@ICON') and 'help' in n.get_attr('@ICON'):
print("%s. %s\n %s\n" % (fn_list.counter, n.get_title(), n.get_content()))
fn_list.counter += 1
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
def estimate_command(path=None, format=' - {grandparent}/{parent}/{title}, {@estimate}h'):
check_path(path)
def fn_list(n):
if not hasattr(fn_list, 'result'):
fn_list.result = []
fn_list.total = 0
if n.has_attr('estimate'):
if n.has_attr('@ICON') and BTN_STOP in n.get_attr('@ICON'):
return
fn_list.result.append(format_node(n, format))
fn_list.total += float(n.get_attr('estimate'))
result = freemind.freemind_load(path)
freemind.traverse(result, fn_list)
[print(i) for i in sorted(fn_list.result)]
print("\nTotal: %sh" % fn_list.total)
def competences_command(path=None):
check_path(path)
result = query_nodes(path, select='title:Activities')
if len(result) != 1:
print('Activities node missed')
return
import json
output = []
for competence in result[0]:
tech = False
if competence.has_attr('tech') and competence.get_attr('tech').lower() == 'yes':
tech = True
project = False
if competence.has_attr('project') and competence.get_attr('project').lower() == 'yes':
project = True
roles = '-'
if competence.has_attr('roles'):
roles = competence.get_attr('roles')
confirm = ''
if competence.has_attr('confirm'):
confirm = competence.get_attr('confirm')
employee = False
if competence.has_attr('employee') and competence.get_attr('employee').lower() == 'yes':
employee = True
# print("%s [tech: %s, project: %s, roles: %s]" % (competence.get_title(), tech, project, roles))
competences = []
for competence_level in competence:
if competence_level.get_title() == 'attribute_layout':
continue
level = 0
if competence_level.has_attr('@ICON'):
if 'full-1' in competence_level.get_attr('@ICON'):
level = '1'
elif 'full-2' in competence_level.get_attr('@ICON'):
level = '2'
elif 'full-3' in competence_level.get_attr('@ICON'):
level = '3'
elif 'full-4' in competence_level.get_attr('@ICON'):
level = '4'
# print(" %s. %s" % (level, competence_level.get_title()))
competences.append({'level': level, 'desc': competence_level.get_title()})
output.append({
'title': competence.get_title(),
'tech': tech,
'project': project,
'roles': roles.split(','),
'confirm': confirm.split(','),
'competences': competences,
'employee': employee
})
print(json.dumps(output))
def __revert_spaces(s):
return s.replace('§', ' ')
def spec_command(path=None, parts=None, out=None):
check_path(path)
doc = freemind.freemind_load(path)
if out:
output = open(out, "w", encoding="utf-8")
def process_node(n, level):
if n.has_attr('@ICON') and 'button_cancel' in n.get_attr('@ICON'):
return
result = level * '#' + ' ' + n.get_title() + "\n\n"
if n.has_content():
result += n.get_content() + "\n\n"
outStr = __revert_spaces(result)
if out:
output.write(outStr)
else:
print(outStr)
def fn_list(nodes, level):
if not nodes:
return
for n in nodes:
process_node(n, level)
fn_list(n, level+1)
if parts is None:
pass
else:
for part in parts.split(';'):
nodes = nodes_select(doc, "title:%s" % part)
fn_list(nodes, 1)
# print(nodes)
if out:
output.close()
def tex_command(path):
check_path(path)
doc = open(path).read()
begin_marker = '\\begin{document}'
end_marker = '\\end{document}'
begin_pos = doc.find(begin_marker)
end_pos = doc.find(end_marker)
if begin_pos < 0 or end_pos < 0:
print('Markers not found')
return
print(doc[begin_pos+len(begin_marker):end_pos])
if __name__ == '__main__':
# shell.py search --path=Goals.mm --icon=stop-sign // nodes on hold
# shell.py search --path=Goals.mm --icon=yes // important nodes
# shell.py search --path=Goals.mm // all nodes
# shell.py group --path=Goals.mm --by=resource // actual tasks by resource
# shell.py group --path=Goals.mm --by=expired // expired tasks
# mm todo --path=Goals.mm --filter=not-assigned --group=Assigned
# mm todo --path=Goals.mm --filter=expired
# mm traverse --path=tests\Test.mm --select="title:New Mindmap" --format="title @Assigned"
# node = freemind.FreeMindNode(None)
# node.set_title('test')
# node.add_attr('@ICON', 'stop-sign')
# print(should_filter(node, ['icon-stop-sign']))
# goals_command('Goals.mm', select='title:Ilya Levoshko (QA)', filter='icon-button_ok,icon-stop-sign', description='no')
# todo_command('Goals.mm', select='expired', filter='icon-button_ok')
# todo_command('Goals.mm', select='assigned:@Sheremetov', filter='icon-button_ok')
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='assigned', filter='icon-button_ok')
# exit()
# todo_command('D:\Denis\python\onixteam\Goals.mm', select='id:363b6bf92c5df3e2dc30043f1212d103')
# stat_command(PATH)
# nodes = query_nodes(PATH, 'icon-button_ok', '')
# print(len(nodes))
# nodes = freemind.freemind_load('Goals.mm')
# todo_command('tests/Test.mm')
# result = freemind.freemind_load('D:/temp/presale/impesa/test.mm')
# freemind.traverse_with_level(result, lambda n, l: print((l-1) * ' ' + format_node(n, '{title}')))
# traverse_command('tests/Test.mm', select='title:New Mindmap', filter='', format='title {@Assigned}')
# traverse_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm", select='title:Screens', filter='icon-stop-sign', format='title,{@estimate},{@estimate-res}')
# print([i.get_title() for i in query_nodes("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")])
# freemind.traverse(result, lambda n: print(format_node(n, 'title')))
# estimate_command("D:/Dropbox/onix/clients/UpMost/UpMostLanding.mm")
# estimate_command("tests/Test.mm")
# spec_command('D:/temp/presale/impesa/Impesa.mm', 'Web application functionality')
# display_command('D:/temp/presale/impesa/impesa.mm')
# tex_command('D:/temp/presale/impesa/impesa.tex')
# exit()
# PATH = '/home/denis/Dropbox/Onix/skills-matrix-v2.mm'
# competences_command(PATH)
# exit()
# python3 shell.py competences --path=/home/denis/Dropbox/Onix/skills-matrix-v2.mm > /var/www/hrm/web/code/webroot/competences.json
# result = query_nodes('tests/TestFP.mm', select='id:ID_1232863674')
# pprint(result[0][0])
# for i in result[0]:
# pprint(i)
# print('--')
# exit()
from commandliner import commandliner
commandliner(locals()) | def display_nodes(nodes:list, level=0):
if type(nodes) is freemind.FreeMindNode: | random_line_split |
script2.js | $(document).ready(function(){
// Fonction d'ajustement
function ajustResp(){
// Ajustement pour le responsive
var largeur_fen = $(window).width()-200; // Largeur de la fenêtre
var hauteur_fen = $("#size").height();
var largeur_el = $(".note").width(); // Largeur d'une note
var hauteur_el = $(".note").height(); // Hauteur d'une note
var marge_left = 42; // Marge intérieur fixe
var marge_top = 59; // Menu supérieur + marge fixe
var nb_col = Math.round((largeur_fen+marge_left)/largeur_el); // Nombre de colonnes affichable
var nb_noteTotale = Object.keys(tableJson).length; // Nombre de notes totales
var nb_note_par_ligne = Math.ceil(nb_noteTotale / nb_col)+1;
reposElement(nb_col,nb_note_par_ligne);
function reposElement(nbCol,nb_note_par_ligne){
// Repositionnement des éléments
var nb = 0;
var sel = ".note."+nb; // Initialise
var numCol = 1;
var nb_ligneEncours = 0;
var coordLeft = marge_left+"px";
var marge_top = 276;
var coordTop = marge_top+"px";
// Variables maj tableau
var indice = 0;
while ($(sel).length > 0) { // Tant que l'on a une note
if(nb >= Object.keys(tableJson).length)
numCol = "End";
if(numCol == 1){
$(sel).css('left', coordLeft); // Première colonne
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
marge_top = marge_top+219;
coordTop = marge_top+"px";
nb_ligneEncours++;
if(nbCol == 1){
if(nb_ligneEncours == nb_note_par_ligne){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}else{
if(nb_ligneEncours == nb_note_par_ligne-1){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}
}else if(numCol == "End"){
break;
}else{
if(nb_ligneEncours == nb_note_par_ligne){
marge_left = marge_left+304; // Incrémentation
coordLeft = marge_left+"px";
marge_top = 57; // Réinitialisation
coordTop = marge_top+"px";
nb_ligneEncours=0;
}else{
marge_top = marge_top+219;
coordTop = marge_top+"px";
}
$(sel).css('left', coordLeft); // Colonne suivante
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
nb_ligneEncours++;
}
nb++;
sel = ".note."+nb; // On maj
}
}
}
// ajustResp();
// On calcule le nombre de note affichable
$(window).resize(function(event) {
ajustResp();
});
$(".note").draggable();
// // Fonction qui récupére les positions // //
function majPos(redirect){
var nb = 0;
var posL = 0;
var posT = 0;
var tableJson = {};
var sel = ".note."+nb; // Initialise
var redirect = redirect+".php";
while ($(sel).length > 0) { // Tant que l'on a une note
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[nb]={"posL":posL,"posT":posT};
nb++; // On icrémente
sel = ".note."+nb; // On maj
}
$.ajax({
url: 'traitement/saveJson.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson}
})
.done(function() {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une note
function savePos(element){
var indice = element[0].classList[1];
$.ajax({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une autre note
function savePos2(indice){
$.ajax | .click(function(event) { // deconnexion
majPos("traitement/deco");
});
$('.note').mousedown(function(event) { // Lorsqu'on presse l'élément
$(this).css('background-color', '#bfb172');
$(this).css('z-index', 1000);
});
// Gestion du positionnement auto
$('.note').mouseup(function(event) { // Lorsqu'on relâche l'élément
// Initialisation sauvegarde
var boolSave = true;
$(this).css('background-color', '#b5a768');
// Capture de la position initiale d'un élément
var str = $(this).attr('class');
var reg = new RegExp(" ", "g");
var tableau = str.split(reg);
var indice = tableau[1];
iniLeft = tableJson[indice].posL;
iniTop = tableJson[indice].posT;
// Position de l'élément relâché
var leftE = $(this)[0].offsetLeft;
var topE = $(this)[0].offsetTop;
if((iniLeft != leftE || iniTop != topE)) // Si les positions sont différentes de celles initiales
boolSave = true;
else
boolSave = false;
// Ligne et colonne les plus proches
var posX = Math.round(parseFloat(leftE) / 304);
var posY = Math.round(parseFloat(topE) / 219);
if (posX == 0){
var xE = 42;
}else{
var xE = (304*posX)+42;
}
if(posY == 0){
var yE = 57;
}else {
var yE = (219*posY)+57;
}
// vérifie la présence d'un élément
var ok = true;
for (var i = 0; i < Object.keys(tableJson).length; i++) {
// Si un élément est déjà la
if(tableJson[i].posL == xE && tableJson[i].posT == yE){
ok = false;
break;
}
}
if(ok){ // Si ok on positionne l'élément et on met à jour ces coordonnées
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
$(this).css({
left: xE,
top: yE
});
}else{ // Si pas ok on inverse la position des éléments et on met à jour leurs coordonnées
boolSave=false;
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
tableJson[i].posL = iniLeft;
tableJson[i].posT = iniTop;
$(this).css({
left: xE,
top: yE
});
savePos($(this));
$('.note.'+i).css({
left: iniLeft,
top: iniTop
});
savePos2(i);
}
$(this).css('z-index', '');
// Enregistrement du positionnement
if(boolSave){
savePos($(this));
}
});
// Gestion de la bulle contenu
$('.plus').click(function(event) {
// On récupère le note number
var str = $(this).parents()[1][2].parentElement.parentNode.className;
str = str.replace("grbt ", "");
$('#bulle form input[type="hidden"]').val(str);
// On efface les éléments parents
$(this).siblings().css('display', 'none');
// On affiche la bulle
var leftE = $(this).parents()[2].offsetLeft;
var topE = $(this).parents()[2].offsetTop;
// On la positionne
$('#bulle').css({
left: leftE,
top: topE,
display: 'block'
});
});
// Fermeture de la bulle (Lecture note)
$('#img2').click(function(event) {
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
// On la positionne
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
});
// Gestion du code pin
$("form").submit(function(e){ // Dès que l'on soumet un formulaire
e.preventDefault(); // On stoppe le comportement par défaut
if(e.target.id == "formB"){
$.post("traitement/validation.php",$(this).serialize(),function(data){
var texte = JSON.parse(data).response;
if(texte == "correct"){
$("#error").css('display', 'none');
var description = JSON.parse(data).desc; // On récupère la description
var login = JSON.parse(data).login; // On récupère le login
var pass = JSON.parse(data).pass; // Le mot de passe
var idNote = JSON.parse(data).note; // L'id de la note
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
$('.note .'+idNote)[0].parentElement[0].value = login;
$('.note .'+idNote)[0].parentElement[0].disabled = "";
$('.note .'+idNote)[0].parentElement[1].value = pass;
$('.note .'+idNote)[0].parentElement[1].disabled = "";
$('.note .'+idNote)[0].parentElement[2].parentElement.parentElement.style.display = "block";
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
$('.note .'+idNote)["0"].parentElement.parentElement.childNodes[1].firstElementChild.childNodes[3].style.display = "none";
$('.'+idNote+' h5').replaceWith("<div class=\"form-group\"><div class=\"col-sm-9 col-sm-offset-1\"> <input id=\"descMod\" onchange=\"this.value = this.value.charAt(0).toUpperCase() + this.value.substr(1);\" class=\"form-control\" type=\"text\" name=\"descMod\" value=\""+description+"\" /></div></div></div>");
}else if (texte == "incorrectNomb") {
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else if (texte == "empty") {
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}else if (texte == "incorrectCode") {
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}
});
}
});
// Création d'une nouvelle note
var nbNote = 0;
$('#newF').click(function(event) {
if(nbNote == 0){
var num = 0; // On intitialise
var trouve = false;
// On recherche le numéro a généré
for (var i = 0; i < Object.keys(tableJson).length; i++) {
if (tableJson[i] == undefined) {
num = i;
trouve = true;
}
}
if(!trouve) // Dernier numéro
num = i;
// On récupère le bloc du proto
var container = $('#blocnote');
// On prépare le prototype
var prototype = $(container.attr('dataprototype').replace('numM', num).replace('grbt numM', 'grbt '+num));
// On le positionne
$('body').prepend(prototype);
// On active le button
$('.grbt.'+num).css('display', 'block');
// On met à jour le tableau
var leftNE = $('.note .'+num)[0].offsetParent.offsetLeft;
var topNE = $('.note .'+num)[0].offsetParent.offsetTop;
// On désactive le bouton new
$("#newF").prop('disabled', 'true');
tableJson[num]={"posL":leftNE,"posT":topNE};
$('#numCache').attr('value', num);
nbNote++;
}
// Enregistrement de la nouvelle note
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
if(event.target.id == "formN"){
$.post("traitement/enregistrement.php",$(this).serialize(),function(data){
var texte = data;
if(texte == "ok"){
$("#error2").css('display', 'none');
majPos("main");
window.location = "main.php";
ajustResp();
}else if (texte == "emptyDes") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir une description !");
}else if (texte == "emptyLog") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un login !");
}else if (texte == "emptyPass") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un mot de passe !");
}else {
$("#error2").css('display', 'block');
$("#error2").html("Les champs sont obligatoires !");
}
});
}
});
// Annulation de la nouvelle note
$("#reset").click(function(event) {
// On réactive le bouton new
$("#newF").removeProp('disabled');
$("#formN").parent().remove();
nbNote=0;
});
});
// Fonction de mise à jour des numéros de notes
function saveNewIndice(redirect){
// Lien traitement de redirection
var redirect = redirect+".php";
$.ajax({
url: 'traitement/saveNumNote.php',
type: 'POST',
dataType: 'text',
})
.done(function(text) {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction de réinitialisation du tableau json
function reiniTable(numNote){
var newtableJson = {};
// On parcourt le tableau
$.each(tableJson, function(numCours,objet) {
// Si le num est différent de celui demandé
if(numCours != numNote)
newtableJson[numCours]={"posL":objet.posL,"posT":objet.posT};
});
// On reconstruit le tableau à partir du nouveau exempt du num à supprimer
var newNum = 0;
tableJson = {};
$.each(newtableJson, function(ind,objet){
tableJson[newNum]={"posL":objet.posL,"posT":objet.posT};
newNum++;
});
}
// Suppression d'une note
$(".sup").click(function() {
var id = $(this).parent().children()[1].value; // On récupére l'id de la note à supprimer
var numNote = $(this).parent().parent()[0].className;
numNote = numNote.replace("grbt ", ""); // Le numéro de la note
if(confirm("Confirmer-vous la suppression de cette note ?")){
$.post("traitement/suppression.php",{IdNote :id}, function(data) {
if(data == "ok"){
// On réinitialise le tableau json
reiniTable(numNote);
// On sauvegarde puis on recharge la page
saveNewIndice("traitement/reload");
}else{
alert("Erreur lors de la suppression");
}
});
}
});
// Mise à jour d'une note
$(".modif").click(function() {
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
$.post("traitement/modification.php",$(this).serialize(), function(data) {
if(data == "ok"){
window.location = "main.php";
}else{
alert(data);
}
});
});
});
// Réinitialisation de la bulle
function reiniBulle(){
$('#libCode').html("Validation du code");
$('.prov').removeAttr('id');
$('.prov').attr('id', 'img2');
$('#img2').removeClass('prov');
$("#formMod").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formB'); // On l'a redéfini
$("#chCode").removeAttr('placeholder');
$("#chCode").val("");
$("#chCode").attr('placeholder','Code pin...');
$("#valideCode").attr('value','Ok');
$("#bulle").hide();
$("#error").css('display', 'none');
}
// Mise à jour du code
$("#modC").click(function() {
// Affichage de la bulle de vérification
$("#bulle").show('slow/400/fast', function() {
$(this).css({
top: '57px',
left: '42px'
}); // Correction du positionnement
$("#formB").removeAttr('id'); // Supprime l'id précédente
$("#bulle form").attr('id','formNote'); // On l'a redéfini
});
// Fermeture de la bulle (Modification code)
$('#img2Bis').click(function() {
reiniBulle();
});
$("form").submit(function(e){ // Dès que l'on soumet le formulaire
e.preventDefault(); // On stoppe le comportement habituel
if(e.target.id == "formNote"){ // Validation du code d'accès
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Ok",function(data){
if(data == "ok"){
$("#error").css('display', 'none');
// Traitement de la bulle
$('#libCode').html("Modification du code");
$('#img2').attr('class', 'prov');
$('#img2').removeAttr('id');
$('.prov').attr('id', 'img2Bis');
$("#formNote").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formMod'); // On l'a redéfini
$("#valideCode").attr('value','Modifier');
$("#chCode").removeAttr('placeholder');
$("#chCode").attr('placeholder','Nouveau code pin...');
$("#chCode").val("");
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}else if(e.target.id == "formMod"){ // Enregistrement de la modification
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Modifier",function(data){
$("#error").html("");
$("#error").css('display', 'none');
if(data == "ok"){
$("#error").css('display', 'block');
$("#error").html("Modification effectuée avec succès");
setTimeout(function(){
reiniBulle(); // On réinitialise la bulle
}, 1000);
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Erreur lors de la mise à jour");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}
});
});
}); | ({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
$("#deco") | identifier_body |
script2.js | $(document).ready(function(){
// Fonction d'ajustement
function ajustResp(){
// Ajustement pour le responsive
var largeur_fen = $(window).width()-200; // Largeur de la fenêtre
var hauteur_fen = $("#size").height();
var largeur_el = $(".note").width(); // Largeur d'une note
var hauteur_el = $(".note").height(); // Hauteur d'une note
var marge_left = 42; // Marge intérieur fixe
var marge_top = 59; // Menu supérieur + marge fixe
var nb_col = Math.round((largeur_fen+marge_left)/largeur_el); // Nombre de colonnes affichable
var nb_noteTotale = Object.keys(tableJson).length; // Nombre de notes totales
var nb_note_par_ligne = Math.ceil(nb_noteTotale / nb_col)+1;
reposElement(nb_col,nb_note_par_ligne);
function reposElement(nbCol,nb_note_par_ligne){
// Repositionnement des éléments
var nb = 0;
var sel = ".note."+nb; // Initialise
var numCol = 1;
var nb_ligneEncours = 0;
var coordLeft = marge_left+"px";
var marge_top = 276;
var coordTop = marge_top+"px";
// Variables maj tableau
var indice = 0;
while ($(sel).length > 0) { // Tant que l'on a une note
if(nb >= Object.keys(tableJson).length)
numCol = "End";
if(numCol == 1){
$(sel).css('left', coordLeft); // Première colonne
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
marge_top = marge_top+219;
coordTop = marge_top+"px";
nb_ligneEncours++;
if(nbCol == 1){
if(nb_ligneEncours == nb_note_par_ligne){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}else{
if(nb_ligneEncours == nb_note_par_ligne-1){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}
}else if(numCol == "End"){
break;
}else{
if(nb_ligneEncours == nb_note_par_ligne){
marge_left = marge_left+304; // Incrémentation
coordLeft = marge_left+"px";
marge_top = 57; // Réinitialisation
coordTop = marge_top+"px";
nb_ligneEncours=0;
}else{
marge_top = marge_top+219;
coordTop = marge_top+"px";
}
$(sel).css('left', coordLeft); // Colonne suivante
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
nb_ligneEncours++;
}
nb++;
sel = ".note."+nb; // On maj
}
}
}
// ajustResp();
// On calcule le nombre de note affichable
$(window).resize(function(event) {
ajustResp();
});
$(".note").draggable();
// // Fonction qui récupére les positions // //
function majPos(redirect){
var nb = 0;
var posL = 0;
var posT = 0;
var tableJson = {};
var sel = ".note."+nb; // Initialise
var redirect = redirect+".php";
while ($(sel).length > 0) { // Tant que l'on a une note
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[nb]={"posL":posL,"posT":posT};
nb++; // On icrémente
sel = ".note."+nb; // On maj
}
$.ajax({
url: 'traitement/saveJson.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson}
})
.done(function() {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une note
function savePos(element){
var indice = element[0].classList[1];
$.ajax({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une autre note
function savePos2(indice){
$.ajax({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
$("#deco").click(function(event) { // deconnexion
majPos("traitement/deco");
});
$('.note').mousedown(function(event) { // Lorsqu'on presse l'élément
$(this).css('background-color', '#bfb172');
$(this).css('z-index', 1000);
});
// Gestion du positionnement auto
$('.note').mouseup(function(event) { // Lorsqu'on relâche l'élément
// Initialisation sauvegarde
var boolSave = true;
$(this).css('background-color', '#b5a768');
// Capture de la position initiale d'un élément
var str = $(this).attr('class');
var reg = new RegExp(" ", "g");
var tableau = str.split(reg);
var indice = tableau[1];
iniLeft = tableJson[indice].posL;
iniTop = tableJson[indice].posT;
// Position de l'élément relâché
var leftE = $(this)[0].offsetLeft;
var topE = $(this)[0].offsetTop;
if((iniLeft != leftE || iniTop != topE)) // Si les positions sont différentes de celles initiales
boolSave = true;
else
boolSave = false;
// Ligne et colonne les plus proches
var posX = Math.round(parseFloat(leftE) / 304);
var posY = Math.round(parseFloat(topE) / 219);
if (posX == 0){
var xE = 42;
}else{
var xE = (304*posX)+42;
}
if(posY == 0){
var yE = 57;
}else {
var yE = (219*posY)+57;
}
// vérifie la présence d'un élément
var ok = true;
for (var i = 0; i < Object.keys(tableJson).length; i++) {
// Si un élément est déjà la
if(tableJson[i].posL == xE && tableJson[i].posT == yE){
ok = false;
break;
}
}
if(ok){ // Si ok on positionne l'élément et on met à jour ces coordonnées
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
$(this).css({
left: xE,
top: yE
});
}else{ // Si pas ok on inverse la position des éléments et on met à jour leurs coordonnées
boolSave=false;
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
tableJson[i].posL = iniLeft;
tableJson[i].posT = iniTop;
$(this).css({
left: xE,
top: yE
});
savePos($(this));
$('.note.'+i).css({
left: iniLeft,
top: iniTop
});
savePos2(i);
}
$(this).css('z-index', '');
// Enregistrement du positionnement
if(boolSave){
savePos($(this));
}
});
// Gestion de la bulle contenu
$('.plus').click(function(event) {
// On récupère le note number
var str = $(this).parents()[1][2].parentElement.parentNode.className;
str = str.replace("grbt ", "");
$('#bulle form input[type="hidden"]').val(str);
// On efface les éléments parents
$(this).siblings().css('display', 'none');
// On affiche la bulle
var leftE = $(this).parents()[2].offsetLeft;
var topE = $(this).parents()[2].offsetTop;
// On la positionne
$('#bulle').css({
left: leftE,
top: topE,
display: 'block'
});
});
// Fermeture de la bulle (Lecture note)
$('#img2').click(function(event) {
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
// On la positionne
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
});
// Gestion du code pin
$("form").submit(function(e){ // Dès que l'on soumet un formulaire
e.preventDefault(); // On stoppe le comportement par défaut
if(e.target.id == "formB"){
$.post("traitement/validation.php",$(this).serialize(),function(data){
var texte = JSON.parse(data).response;
if(texte == "correct"){
$("#error").css('display', 'none');
var description = JSON.parse(data).desc; // On récupère la description
var login = JSON.parse(data).login; // On récupère le login
var pass = JSON.parse(data).pass; // Le mot de passe
var idNote = JSON.parse(data).note; // L'id de la note
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
$('.note .'+idNote)[0].parentElement[0].value = login;
$('.note .'+idNote)[0].parentElement[0].disabled = "";
$('.note .'+idNote)[0].parentElement[1].value = pass;
$('.note .'+idNote)[0].parentElement[1].disabled = "";
$('.note .'+idNote)[0].parentElement[2].parentElement.parentElement.style.display = "block";
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
$('.note .'+idNote)["0"].parentElement.parentElement.childNodes[1].firstElementChild.childNodes[3].style.display = "none";
$('.'+idNote+' h5').replaceWith("<div class=\"form-group\"><div class=\"col-sm-9 col-sm-offset-1\"> <input id=\"descMod\" onchange=\"this.value = this.value.charAt(0).toUpperCase() + this.value.substr(1);\" class=\"form-control\" type=\"text\" name=\"descMod\" value=\""+description+"\" /></div></div></div>");
}else if (texte == "incorrectNomb") {
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else if (texte == "empty") {
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}else if (texte == "incorrectCode") {
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}
});
}
});
// Création d'une nouvelle note
var nbNote = 0;
$('#newF').click(function(event) {
if(nbNote == 0){
var num = 0; // On intitialise
var trouve = false;
// On recherche le numéro a généré
for (var i = 0; i < Object.keys(tableJson).length; i++) {
if (tableJson[i] == undefined) {
num = i;
trouve = true;
}
}
if(!trouve) // Dernier numéro
num = i;
// On récupère le bloc du proto
var container = $('#blocnote');
// On prépare le prototype
var prototype = $(container.attr('dataprototype').replace('numM', num).replace('grbt numM', 'grbt '+num));
// On le positionne
$('body').prepend(prototype);
// On active le button
$('.grbt.'+num).css('display', 'block');
// On met à jour le tableau
var leftNE = $('.note .'+num)[0].offsetParent.offsetLeft;
var topNE = $('.note .'+num)[0].offsetParent.offsetTop;
// On désactive le bouton new
$("#newF").prop('disabled', 'true');
tableJson[num]={"posL":leftNE,"posT":topNE};
$('#numCache').attr('value', num);
nbNote++;
}
// Enregistrement de la nouvelle note
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
if(event.target.id == "formN"){
$.post("traitement/enregistrement.php",$(this).serialize(),function(data){
var texte = data;
if(texte == "ok"){
$("#error2").css('display', 'none');
majPos("main");
window.location = "main.php";
ajustResp();
}else if (texte == "emptyDes") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir une description !");
}else if (texte == "emptyLog") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un login !");
}else if (texte == "emptyPass") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un mot de passe !");
}else {
$("#error2").css('display', 'block');
$("#error2").html("Les champs sont obligatoires !");
}
});
}
});
// Annulation de la nouvelle note
$("#reset").click(function(event) {
// On réactive le bouton new
$("#newF").removeProp('disabled');
$("#formN").parent().remove();
nbNote=0;
});
});
// Fonction de mise à jour des numéros de notes
function saveNewIndice(redirect){
// Lien traitement de redirection
var redirect = redirect+".php";
$.ajax({
url: 'traitement/saveNumNote.php',
type: 'POST',
dataType: 'text',
})
.done(function(text) {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction de réinitialisation du tableau json
function reiniTable(numNote){
var newtableJson = {};
// On parcourt le table | ch(tableJson, function(numCours,objet) {
// Si le num est différent de celui demandé
if(numCours != numNote)
newtableJson[numCours]={"posL":objet.posL,"posT":objet.posT};
});
// On reconstruit le tableau à partir du nouveau exempt du num à supprimer
var newNum = 0;
tableJson = {};
$.each(newtableJson, function(ind,objet){
tableJson[newNum]={"posL":objet.posL,"posT":objet.posT};
newNum++;
});
}
// Suppression d'une note
$(".sup").click(function() {
var id = $(this).parent().children()[1].value; // On récupére l'id de la note à supprimer
var numNote = $(this).parent().parent()[0].className;
numNote = numNote.replace("grbt ", ""); // Le numéro de la note
if(confirm("Confirmer-vous la suppression de cette note ?")){
$.post("traitement/suppression.php",{IdNote :id}, function(data) {
if(data == "ok"){
// On réinitialise le tableau json
reiniTable(numNote);
// On sauvegarde puis on recharge la page
saveNewIndice("traitement/reload");
}else{
alert("Erreur lors de la suppression");
}
});
}
});
// Mise à jour d'une note
$(".modif").click(function() {
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
$.post("traitement/modification.php",$(this).serialize(), function(data) {
if(data == "ok"){
window.location = "main.php";
}else{
alert(data);
}
});
});
});
// Réinitialisation de la bulle
function reiniBulle(){
$('#libCode').html("Validation du code");
$('.prov').removeAttr('id');
$('.prov').attr('id', 'img2');
$('#img2').removeClass('prov');
$("#formMod").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formB'); // On l'a redéfini
$("#chCode").removeAttr('placeholder');
$("#chCode").val("");
$("#chCode").attr('placeholder','Code pin...');
$("#valideCode").attr('value','Ok');
$("#bulle").hide();
$("#error").css('display', 'none');
}
// Mise à jour du code
$("#modC").click(function() {
// Affichage de la bulle de vérification
$("#bulle").show('slow/400/fast', function() {
$(this).css({
top: '57px',
left: '42px'
}); // Correction du positionnement
$("#formB").removeAttr('id'); // Supprime l'id précédente
$("#bulle form").attr('id','formNote'); // On l'a redéfini
});
// Fermeture de la bulle (Modification code)
$('#img2Bis').click(function() {
reiniBulle();
});
$("form").submit(function(e){ // Dès que l'on soumet le formulaire
e.preventDefault(); // On stoppe le comportement habituel
if(e.target.id == "formNote"){ // Validation du code d'accès
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Ok",function(data){
if(data == "ok"){
$("#error").css('display', 'none');
// Traitement de la bulle
$('#libCode').html("Modification du code");
$('#img2').attr('class', 'prov');
$('#img2').removeAttr('id');
$('.prov').attr('id', 'img2Bis');
$("#formNote").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formMod'); // On l'a redéfini
$("#valideCode").attr('value','Modifier');
$("#chCode").removeAttr('placeholder');
$("#chCode").attr('placeholder','Nouveau code pin...');
$("#chCode").val("");
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}else if(e.target.id == "formMod"){ // Enregistrement de la modification
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Modifier",function(data){
$("#error").html("");
$("#error").css('display', 'none');
if(data == "ok"){
$("#error").css('display', 'block');
$("#error").html("Modification effectuée avec succès");
setTimeout(function(){
reiniBulle(); // On réinitialise la bulle
}, 1000);
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Erreur lors de la mise à jour");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}
});
});
}); | au
$.ea | identifier_name |
script2.js | $(document).ready(function(){
// Fonction d'ajustement
function ajustResp(){
// Ajustement pour le responsive
var largeur_fen = $(window).width()-200; // Largeur de la fenêtre
var hauteur_fen = $("#size").height();
var largeur_el = $(".note").width(); // Largeur d'une note
var hauteur_el = $(".note").height(); // Hauteur d'une note
var marge_left = 42; // Marge intérieur fixe
var marge_top = 59; // Menu supérieur + marge fixe
var nb_col = Math.round((largeur_fen+marge_left)/largeur_el); // Nombre de colonnes affichable
var nb_noteTotale = Object.keys(tableJson).length; // Nombre de notes totales
var nb_note_par_ligne = Math.ceil(nb_noteTotale / nb_col)+1;
reposElement(nb_col,nb_note_par_ligne);
function reposElement(nbCol,nb_note_par_ligne){
// Repositionnement des éléments
var nb = 0;
var sel = ".note."+nb; // Initialise
var numCol = 1;
var nb_ligneEncours = 0;
var coordLeft = marge_left+"px";
var marge_top = 276;
var coordTop = marge_top+"px";
// Variables maj tableau
var indice = 0;
while ($(sel).length > 0) { // Tant que l'on a une note
if(nb >= Object.keys(tableJson).length)
numCol = "End";
if(numCol == 1){
$(sel).css('left', coordLeft); // Première colonne
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
marge_top = marge_top+219;
coordTop = marge_top+"px";
nb_ligneEncours++;
if(nbCol == 1){
if(nb_ligneEncours == nb_note_par_ligne){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}else{
if(nb_ligneEncours == nb_note_par_ligne-1){
numCol=2;
nb_ligneEncours = nb_note_par_ligne; // Réinitialise le nombre de ligne
}
}
}else if(numCol == "End"){
break;
}else{
if(nb_ligneEncours == nb_note_par_ligne){
marge_left = marge_left+304; // Incrémentation
coordLeft = marge_left+"px";
marge_top = 57; // Réinitialisation
coordTop = marge_top+"px";
nb_ligneEncours=0;
}else{
marge_top = marge_top+219;
coordTop = marge_top+"px";
}
$(sel).css('left', coordLeft); // Colonne suivante
$(sel).css('top', coordTop); // Ligne suivante
// Maj tableau
indice = $(sel)[0].classList[1];
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[indice]={"posL":posL,"posT":posT};
nb_ligneEncours++;
}
nb++;
sel = ".note."+nb; // On maj
}
}
}
// ajustResp();
// On calcule le nombre de note affichable
$(window).resize(function(event) {
ajustResp();
});
$(".note").draggable();
// // Fonction qui récupére les positions // //
function majPos(redirect){
var nb = 0;
var posL = 0;
var posT = 0;
var tableJson = {};
var sel = ".note."+nb; // Initialise
var redirect = redirect+".php";
while ($(sel).length > 0) { // Tant que l'on a une note
posL = $(sel).position().left; // Position Left
posT = $(sel).position().top; // Position Top
tableJson[nb]={"posL":posL,"posT":posT};
nb++; // On icrémente
sel = ".note."+nb; // On maj
}
$.ajax({
url: 'traitement/saveJson.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson}
})
.done(function() {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une note
function savePos(element){
var indice = element[0].classList[1];
$.ajax({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction qui sauvegarde la position d'une autre note
function savePos2(indice){
$.ajax({
url: 'traitement/saveOne.php',
type: 'POST',
dataType: 'text',
data: {tableJson: tableJson,
indice: indice}
})
.done(function() {
console.log("done");
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
$("#deco").click(function(event) { // deconnexion
majPos("traitement/deco");
});
$('.note').mousedown(function(event) { // Lorsqu'on presse l'élément
$(this).css('background-color', '#bfb172');
$(this).css('z-index', 1000);
});
// Gestion du positionnement auto
$('.note').mouseup(function(event) { // Lorsqu'on relâche l'élément
// Initialisation sauvegarde
var boolSave = true;
$(this).css('background-color', '#b5a768');
// Capture de la position initiale d'un élément
var str = $(this).attr('class');
var reg = new RegExp(" ", "g");
var tableau = str.split(reg);
var indice = tableau[1];
iniLeft = tableJson[indice].posL;
iniTop = tableJson[indice].posT;
// Position de l'élément relâché
var leftE = $(this)[0].offsetLeft;
var topE = $(this)[0].offsetTop;
if((iniLeft != leftE || iniTop != topE)) // Si les positions sont différentes de celles initiales
boolSave = true;
else
boolSave = false;
// Ligne et colonne les plus proches
var posX = Math.round(parseFloat(leftE) / 304);
var posY = Math.round(parseFloat(topE) / 219);
if (posX == 0){
var xE = 42;
}else{
var xE = (304*posX)+42;
}
if(posY == 0){
var yE = 57;
}else {
var yE = (219*posY)+57;
}
// vérifie la présence d'un élément
var ok = true;
for (var i = 0; i < Object.keys(tableJson).length; i++) {
// Si un élément est déjà la
if(tableJson[i].posL == xE && tableJson[i].posT == yE){
ok = false;
break;
}
}
if(ok){ // Si ok on positionne l'élément et on met à jour ces coordonnées
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
$(this).css({
left: xE,
top: yE
});
}else{ // Si pas ok on inverse la position des éléments et on met à jour leurs coordonnées
boolSave=false;
tableJson[indice].posL = xE;
tableJson[indice].posT = yE;
tableJson[i].posL = iniLeft;
tableJson[i].posT = iniTop;
$(this).css({
left: xE,
top: yE
});
savePos($(this));
$('.note.'+i).css({
left: iniLeft,
top: iniTop
});
savePos2(i);
}
$(this).css('z-index', '');
// Enregistrement du positionnement
if(boolSave){
savePos($(this));
}
});
// Gestion de la bulle contenu
$('.plus').click(function(event) {
// On récupère le note number
var str = $(this).parents()[1][2].parentElement.parentNode.className;
str = str.replace("grbt ", "");
$('#bulle form input[type="hidden"]').val(str);
// On efface les éléments parents
$(this).siblings().css('display', 'none');
// On affiche la bulle
var leftE = $(this).parents()[2].offsetLeft;
var topE = $(this).parents()[2].offsetTop;
// On la positionne
$('#bulle').css({
left: leftE,
top: topE,
display: 'block'
});
});
// Fermeture de la bulle (Lecture note)
$('#img2').click(function(event) {
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
// On la positionne
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
});
// Gestion du code pin
$("form").submit(function(e){ // Dès que l'on soumet un formulaire
e.preventDefault(); // On stoppe le comportement par défaut
if(e.target.id == "formB"){
$.post("traitement/validation.php",$(this).serialize(),function(data){
var texte = JSON.parse(data).response;
if(texte == "correct"){
$("#error").css('display', 'none');
var description = JSON.parse(data).desc; // On récupère la description
var login = JSON.parse(data).login; // On récupère le login
var pass = JSON.parse(data).pass; // Le mot de passe
var idNote = JSON.parse(data).note; // L'id de la note
$('#bulle').css({
left: '',
top: '',
display: 'none'
});
$('.note .'+idNote)[0].parentElement[0].value = login;
$('.note .'+idNote)[0].parentElement[0].disabled = "";
$('.note .'+idNote)[0].parentElement[1].value = pass;
$('.note .'+idNote)[0].parentElement[1].disabled = "";
$('.note .'+idNote)[0].parentElement[2].parentElement.parentElement.style.display = "block";
// On réaffiche les éléments parent
$("h5").css('display', 'block');
$(".point").css('display', 'block');
$('.note .'+idNote)["0"].parentElement.parentElement.childNodes[1].firstElementChild.childNodes[3].style.display = "none";
$('.'+idNote+' h5').replaceWith("<div class=\"form-group\"><div class=\"col-sm-9 col-sm-offset-1\"> <input id=\"descMod\" onchange=\"this.value = this.value.charAt(0).toUpperCase() + this.value.substr(1);\" class=\"form-control\" type=\"text\" name=\"descMod\" value=\""+description+"\" /></div></div></div>");
}else if (texte == "incorrectNomb") {
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else if (texte == "empty") {
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}else if (texte == "incorrectCode") {
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}
});
}
});
// Création d'une nouvelle note
var nbNote = 0;
$('#newF').click(function(event) {
if(nbNote == 0){
var num = 0; // On intitialise
var trouve = false;
// On recherche le numéro a généré
for (var i = 0; i < Object.keys(tableJson).length; i++) {
if (tableJson[i] == undefined) {
num = i;
trouve = true;
}
}
if(!trouve) // Dernier numéro
num = i;
// On récupère le bloc du proto
var container = $('#blocnote');
// On prépare le prototype
var prototype = $(container.attr('dataprototype').replace('numM', num).replace('grbt numM', 'grbt '+num));
// On le positionne
$('body').prepend(prototype);
// On active le button
$('.grbt.'+num).css('display', 'block');
// On met à jour le tableau
var leftNE = $('.note .'+num)[0].offsetParent.offsetLeft;
var topNE = $('.note .'+num)[0].offsetParent.offsetTop;
// On désactive le bouton new
$("#newF").prop('disabled', 'true');
tableJson[num]={"posL":leftNE,"posT":topNE};
$('#numCache').attr('value', num);
nbNote++;
}
// Enregistrement de la nouvelle note
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
if(event.target.id == "formN"){
$.post("traitement/enregistrement.php",$(this).serialize(),function(data){
var texte = data;
if(texte == "ok"){
$("#error2").css('display', 'none');
majPos("main");
window.location = "main.php";
ajustResp();
}else if (texte == "emptyDes") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir une description !");
}else if (texte == "emptyLog") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un login !");
}else if (texte == "emptyPass") {
$("#error2").css('display', 'block');
$("#error2").html("Vous devez saisir un mot de passe !");
}else {
$("#error2").css('display', 'block');
$("#error2").html("Les champs sont obligatoires !");
}
});
}
});
// Annulation de la nouvelle note
$("#reset").click(function(event) {
// On réactive le bouton new
$("#newF").removeProp('disabled');
$("#formN").parent().remove();
nbNote=0;
});
});
// Fonction de mise à jour des numéros de notes
function saveNewIndice(redirect){
// Lien traitement de redirection
var redirect = redirect+".php";
$.ajax({
url: 'traitement/saveNumNote.php',
type: 'POST',
dataType: 'text',
})
.done(function(text) {
window.location = redirect;
})
.fail(function() {
console.log("error");
})
.always(function() {
console.log("complete");
});
}
// Fonction de réinitialisation du tableau json
function reiniTable(numNote){
var newtableJson = {};
// On parcourt le tableau
$.each(tableJson, function(numCours,objet) {
// Si le num est différent de celui demandé
if(numCours != numNote)
newtableJson[numCours]={"posL":objet.posL,"posT":objet.posT};
});
// On reconstruit le tableau à partir du nouveau exempt du num à supprimer
var newNum = 0;
tableJson = {};
$.each(newtableJson, function(ind,objet){
tableJson[newNum]={"posL":objet.posL,"posT":objet.posT};
newNum++;
});
}
// Suppression d'une note
$(".sup").click(function() {
var id = $(this).parent().children()[1].value; // On récupére l'id de la note à supprimer
| if(confirm("Confirmer-vous la suppression de cette note ?")){
$.post("traitement/suppression.php",{IdNote :id}, function(data) {
if(data == "ok"){
// On réinitialise le tableau json
reiniTable(numNote);
// On sauvegarde puis on recharge la page
saveNewIndice("traitement/reload");
}else{
alert("Erreur lors de la suppression");
}
});
}
});
// Mise à jour d'une note
$(".modif").click(function() {
$('form').submit(function(event) {
event.preventDefault(); // On stoppe le comportement habituel
$.post("traitement/modification.php",$(this).serialize(), function(data) {
if(data == "ok"){
window.location = "main.php";
}else{
alert(data);
}
});
});
});
// Réinitialisation de la bulle
function reiniBulle(){
$('#libCode').html("Validation du code");
$('.prov').removeAttr('id');
$('.prov').attr('id', 'img2');
$('#img2').removeClass('prov');
$("#formMod").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formB'); // On l'a redéfini
$("#chCode").removeAttr('placeholder');
$("#chCode").val("");
$("#chCode").attr('placeholder','Code pin...');
$("#valideCode").attr('value','Ok');
$("#bulle").hide();
$("#error").css('display', 'none');
}
// Mise à jour du code
$("#modC").click(function() {
// Affichage de la bulle de vérification
$("#bulle").show('slow/400/fast', function() {
$(this).css({
top: '57px',
left: '42px'
}); // Correction du positionnement
$("#formB").removeAttr('id'); // Supprime l'id précédente
$("#bulle form").attr('id','formNote'); // On l'a redéfini
});
// Fermeture de la bulle (Modification code)
$('#img2Bis').click(function() {
reiniBulle();
});
$("form").submit(function(e){ // Dès que l'on soumet le formulaire
e.preventDefault(); // On stoppe le comportement habituel
if(e.target.id == "formNote"){ // Validation du code d'accès
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Ok",function(data){
if(data == "ok"){
$("#error").css('display', 'none');
// Traitement de la bulle
$('#libCode').html("Modification du code");
$('#img2').attr('class', 'prov');
$('#img2').removeAttr('id');
$('.prov').attr('id', 'img2Bis');
$("#formNote").removeAttr('id'); // Supprime l'id actuelle
$("#bulle form").attr('id','formMod'); // On l'a redéfini
$("#valideCode").attr('value','Modifier');
$("#chCode").removeAttr('placeholder');
$("#chCode").attr('placeholder','Nouveau code pin...');
$("#chCode").val("");
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Le code que vous avez saisi n'est pas valide !");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}else if(e.target.id == "formMod"){ // Enregistrement de la modification
$.post("traitement/verification.php",$(this).serialize()+"&valideC=Modifier",function(data){
$("#error").html("");
$("#error").css('display', 'none');
if(data == "ok"){
$("#error").css('display', 'block');
$("#error").html("Modification effectuée avec succès");
setTimeout(function(){
reiniBulle(); // On réinitialise la bulle
}, 1000);
}else if(data == "pas ok"){
$("#error").css('display', 'block');
$("#error").html("Erreur lors de la mise à jour");
}else if(data == "code tpg"){
$("#error").css('display', 'block');
$("#error").html("Le code pin doit être composé de 4 chiffres !");
}else{
$("#error").css('display', 'block');
$("#error").html("Vous devez saisir un code pin !");
}
});
}
});
});
}); | var numNote = $(this).parent().parent()[0].className;
numNote = numNote.replace("grbt ", ""); // Le numéro de la note
| random_line_split |
peer.rs | use config;
use dt::{Set};
use proto::{Request, Response, Transport};
use tokio_core::channel::{channel, Sender, Receiver};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use tokio_service::Service;
use tokio_proto::easy::{EasyClient, multiplex};
use tokio_timer::{Timer, Sleep};
use futures::{Future, Poll, Async};
use futures::stream::Stream;
use std::{io, mem};
use std::time::Duration;
// Handle to the peer task.
//
// Sending a join message to a peer dispatches a message on `tx` to the task
// managing the peer connection and will be processed there.
//
// See `Task` for details on the peer task.
pub struct Peer {
tx: Sender<Set<String>>,
}
// State required for managing a peer connection.
//
// Connections to MiniDB peers are managed on reactor tasks. When the server
// initializes, it spawns one task for each peer in the cluster. The peer task
// is responsible for maintaining an open connection to the peer and to send a
// `Join` message every time the state is sent to the task.
//
// If the connection fails, the task will attempt a reconnect after a short
// period of time.
struct Task {
// Receives `Set` values that need to be sent to the peer.
rx: Receiver<Set<String>>,
// Route information
route: config::Route,
// Tokio reactor handle. Used to establish tcp connections
reactor_handle: Handle,
// Handle to the timer. The timer is used to set a re-connect timeout when
// the peer tcp connection fails.
timer: Timer,
// Current tcp connection state, see below
state: State,
// Pending `Join` message to send. This also tracks in-flight joins. If a
// join request to a peer fails, the connection will be re-established.
// Once it is re-established, the join request should be sent again.
//
// However, if while the task is waiting to re-establish a connection, a
// new state is replicated, then drop the join request that failed to send
// in favor of the newer one. Doing so is safe thanks to CRDTs!
pending_message: PendingMessage,
// Pending response future. A join was issued to the peer and the task is
// currently waiting for the response.
pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>,
}
// Peer connection state. The actual connection to the peer node can be in one
// of the following states:
enum State {
// Waiting to connect, this state is reached after hitting a connect error
Waiting(Sleep),
// Connecting to the remote. A TCP connect has been issued and the task is
// waiting on the connect to complete
Connecting(Box<Future<Item = TcpStream, Error = io::Error>>),
// A connection is open to the peer.
Connected(EasyClient<Request, Response>),
}
// Tracks the state of replication requests
enum PendingMessage {
// A replication request is waiting to be sent.
Pending(Set<String>),
// A replication request is currently in-flight. The value of the message
// is saved in case the request fails and must be re-issued later.
InFlight(Set<String>),
// There are no pending replication requests.
None,
}
impl Peer {
/// Establish a connection to a peer node.
pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer {
// Create a channel. The channel will be used to send replication
// requests from the server task to the peer task.
let (tx, rx) = channel(handle).unwrap();
// Initialize the task state
let task = Task { | reactor_handle: handle.clone(),
timer: timer.clone(),
// Initialize in the "waiting to connect" state but with a 0 length
// sleep. This will effectively initiate the connect immediately
state: State::Waiting(timer.sleep(Duration::from_millis(0))),
// There are no pending messages
pending_message: PendingMessage::None,
// There are no pending responses
pending_response: None,
};
// Spawn the task
handle.spawn(task);
// Return the send half as the peer handle
Peer { tx: tx }
}
// Send a replication request to the task managing the peer connection
pub fn send(&self, set: Set<String>) {
self.tx.send(set).unwrap();
}
}
// Implement `Future` for `Task`. All tasks spawned on the I/O reactor must
// implement future w/ Item = () and Error = ();
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
// First, process any in-bound replication requests
self.process_rx();
// Perform pending work.
try!(self.tick());
Ok(Async::NotReady)
}
}
impl Task {
fn process_rx(&mut self) {
// Read any pending replication request and set `pending_message`. It
// is expected that some messages will be dropped. The most important
// thing is that the **last** replication request ends up getting
// processed.
while let Async::Ready(Some(set)) = self.rx.poll().unwrap() {
self.pending_message = PendingMessage::Pending(set);
}
}
fn tick(&mut self) -> Poll<(), ()> {
trace!("Peer::tick; actor-id={:?}", self.route.destination());
loop {
match self.state {
State::Waiting(..) => {
// Currently waiting a short period of time before
// establishing the TCP connection with the peer.
try_ready!(self.process_waiting());
}
State::Connecting(..) => {
// Waiting for the TCP connection finish connecting
try_ready!(self.process_connecting());
}
State::Connected(..) => {
if self.pending_response.is_some() {
// A request has been sent, waiting for the response.
try_ready!(self.process_response());
} else if self.pending_message.is_some() {
// A join request is pending, dispatch it
try_ready!(self.process_connected());
} else {
// Nothing to do, return ok
return Ok(Async::Ready(()));
}
}
}
}
}
fn process_waiting(&mut self) -> Poll<(), ()> {
trace!(" --> waiting");
match self.state {
// Try polling the sleep future. If `NotReady` `process_waiting`
// will return.
State::Waiting(ref mut sleep) => try_ready!(sleep.poll()),
_ => unreachable!(),
}
// We are done waiting and can now attempt to establish the connection
trace!(" --> sleep complete -- attempting tcp connect");
// Start a tcp connect
let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle);
// Set a connect timeout of 5 seconds
let socket = self.timer.timeout(socket, Duration::from_secs(5));
// Transition the state to "connecting"
self.state = State::Connecting(Box::new(socket));
Ok(Async::Ready(()))
}
fn process_connecting(&mut self) -> Poll<(), ()> {
trace!(" --> connecting");
// Check if the `connecting` future is complete, aka the connection has
// been established
let socket = match self.state {
State::Connecting(ref mut connecting) => {
match connecting.poll() {
// The connection is not yet established
Ok(Async::NotReady) => return Ok(Async::NotReady),
// The connection is established
Ok(Async::Ready(socket)) => Some(socket),
// An error was hit while connecting. A timeout for a short
// period of time will be set after which, the connect will
// be stablished again.
Err(err) => {
info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err);
None
}
}
}
_ => unreachable!(),
};
if let Some(socket) = socket {
trace!(" --> connect success");
info!("established peer connection to {:?}", self.route.remote_addr());
// The connection was successfully established. Now we have a Tcp
// socket. Using that, we will build up the MiniDB transport.
//
// The socket will be wrapped by the length delimited framer,
// followed by junkify, and last `Transport`.
let transport = Transport::junkified(socket, &self.route, &self.timer);
// Using the transport, spawn a task that manages this connection
// (vs. the general peer replication task).
//
// This is done with `tokio-proto`, which takes the transport and
// returns a `Service`. Requests can be dispatched directly to the
// service.
let service = multiplex::connect(transport, &self.reactor_handle);
// Update the state
self.state = State::Connected(service);
} else {
trace!(" --> connect failed");
// The connection failed, transition the state to "waiting to
// reconnect". We will wait a short bit of time before attempting a
// reconnect.
self.transition_to_waiting();
}
Ok(Async::Ready(()))
}
fn process_connected(&mut self) -> Poll<(), ()> {
trace!(" --> process peer connection");
let service = match self.state {
State::Connected(ref mut service) => service,
_ => unreachable!(),
};
// The connection is currently in the connected state. If there are any
// pending replication requests, then they should be dispatched to the
// client.
// First ensure that the service handle is ready to accept requests, if
// not, return `NotReady` and try again later.
if !service.poll_ready().is_ready() {
trace!(" --> peer socket not ready");
return Ok(Async::NotReady);
}
// Build the join / replication request
let set = self.pending_message.message_to_send().unwrap();
let msg = Request::Join(set);
trace!(" --> sending Join message");
// Dispatch the replication request and get back a future repesenting
// the response from the peer node.
let resp = service.call(msg);
// Timeout the response after 5 seconds. If the peer does not
// respond to the join within this time, the connection will be
// reestablished and the join sent again
let resp = self.timer.timeout(resp, Duration::from_secs(5));
// Track the response future
self.pending_response = Some(Box::new(resp));
Ok(Async::Ready(()))
}
fn process_response(&mut self) -> Poll<(), ()> {
trace!(" --> process peer response");
// Check the response future. If it is complete, see if it is a
// successful response or if the connection needs to be re-established
let response = match self.pending_response {
Some(ref mut pending_response) => {
match pending_response.poll() {
Ok(Async::Ready(v)) => Ok(v),
Err(e) => Err(e),
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
_ => unreachable!(),
};
// Clear the pending response future
self.pending_response = None;
// The response has completed, check to see if it was successful
match response {
Ok(_) => {
// The join / replication successfully applied
self.pending_message.in_flight_succeeded();
trace!(" --> received response: OK");
}
Err(e) => {
// The replication failed. Transition the state to waiting to
// connect. Also, setup the join request to get redisptached
// once the connection is established again.
warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e);
self.pending_message.in_flight_failed();
self.transition_to_waiting();
}
}
Ok(Async::Ready(()))
}
fn transition_to_waiting(&mut self) {
trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination());
// Set a timeout for 5 seconds
let sleep = self.timer.sleep(Duration::from_secs(5));
// Update the state to reflect waiting
self.state = State::Waiting(sleep);
}
}
impl PendingMessage {
fn is_none(&self) -> bool {
match *self {
PendingMessage::None => true,
_ => false,
}
}
fn is_some(&self) -> bool {
!self.is_none()
}
fn message_to_send(&mut self) -> Option<Set<String>> {
match mem::replace(self, PendingMessage::None) {
PendingMessage::Pending(set) => {
*self = PendingMessage::InFlight(set.clone());
Some(set)
}
_ => None,
}
}
fn in_flight_succeeded(&mut self) {
match *self {
PendingMessage::Pending(..) => return,
_ => *self = PendingMessage::None,
}
}
fn in_flight_failed(&mut self) {
match mem::replace(self, PendingMessage::None) {
PendingMessage::InFlight(set) => {
*self = PendingMessage::Pending(set);
}
v => *self = v,
}
}
} | rx: rx,
route: route, | random_line_split |
peer.rs | use config;
use dt::{Set};
use proto::{Request, Response, Transport};
use tokio_core::channel::{channel, Sender, Receiver};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use tokio_service::Service;
use tokio_proto::easy::{EasyClient, multiplex};
use tokio_timer::{Timer, Sleep};
use futures::{Future, Poll, Async};
use futures::stream::Stream;
use std::{io, mem};
use std::time::Duration;
// Handle to the peer task.
//
// Sending a join message to a peer dispatches a message on `tx` to the task
// managing the peer connection and will be processed there.
//
// See `Task` for details on the peer task.
pub struct Peer {
tx: Sender<Set<String>>,
}
// State required for managing a peer connection.
//
// Connections to MiniDB peers are managed on reactor tasks. When the server
// initializes, it spawns one task for each peer in the cluster. The peer task
// is responsible for maintaining an open connection to the peer and to send a
// `Join` message every time the state is sent to the task.
//
// If the connection fails, the task will attempt a reconnect after a short
// period of time.
struct Task {
// Receives `Set` values that need to be sent to the peer.
rx: Receiver<Set<String>>,
// Route information
route: config::Route,
// Tokio reactor handle. Used to establish tcp connections
reactor_handle: Handle,
// Handle to the timer. The timer is used to set a re-connect timeout when
// the peer tcp connection fails.
timer: Timer,
// Current tcp connection state, see below
state: State,
// Pending `Join` message to send. This also tracks in-flight joins. If a
// join request to a peer fails, the connection will be re-established.
// Once it is re-established, the join request should be sent again.
//
// However, if while the task is waiting to re-establish a connection, a
// new state is replicated, then drop the join request that failed to send
// in favor of the newer one. Doing so is safe thanks to CRDTs!
pending_message: PendingMessage,
// Pending response future. A join was issued to the peer and the task is
// currently waiting for the response.
pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>,
}
// Peer connection state. The actual connection to the peer node can be in one
// of the following states:
enum State {
// Waiting to connect, this state is reached after hitting a connect error
Waiting(Sleep),
// Connecting to the remote. A TCP connect has been issued and the task is
// waiting on the connect to complete
Connecting(Box<Future<Item = TcpStream, Error = io::Error>>),
// A connection is open to the peer.
Connected(EasyClient<Request, Response>),
}
// Tracks the state of replication requests
enum PendingMessage {
// A replication request is waiting to be sent.
Pending(Set<String>),
// A replication request is currently in-flight. The value of the message
// is saved in case the request fails and must be re-issued later.
InFlight(Set<String>),
// There are no pending replication requests.
None,
}
impl Peer {
/// Establish a connection to a peer node.
pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer {
// Create a channel. The channel will be used to send replication
// requests from the server task to the peer task.
let (tx, rx) = channel(handle).unwrap();
// Initialize the task state
let task = Task {
rx: rx,
route: route,
reactor_handle: handle.clone(),
timer: timer.clone(),
// Initialize in the "waiting to connect" state but with a 0 length
// sleep. This will effectively initiate the connect immediately
state: State::Waiting(timer.sleep(Duration::from_millis(0))),
// There are no pending messages
pending_message: PendingMessage::None,
// There are no pending responses
pending_response: None,
};
// Spawn the task
handle.spawn(task);
// Return the send half as the peer handle
Peer { tx: tx }
}
// Send a replication request to the task managing the peer connection
pub fn send(&self, set: Set<String>) {
self.tx.send(set).unwrap();
}
}
// Implement `Future` for `Task`. All tasks spawned on the I/O reactor must
// implement future w/ Item = () and Error = ();
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
// First, process any in-bound replication requests
self.process_rx();
// Perform pending work.
try!(self.tick());
Ok(Async::NotReady)
}
}
impl Task {
fn process_rx(&mut self) {
// Read any pending replication request and set `pending_message`. It
// is expected that some messages will be dropped. The most important
// thing is that the **last** replication request ends up getting
// processed.
while let Async::Ready(Some(set)) = self.rx.poll().unwrap() {
self.pending_message = PendingMessage::Pending(set);
}
}
fn tick(&mut self) -> Poll<(), ()> {
trace!("Peer::tick; actor-id={:?}", self.route.destination());
loop {
match self.state {
State::Waiting(..) => {
// Currently waiting a short period of time before
// establishing the TCP connection with the peer.
try_ready!(self.process_waiting());
}
State::Connecting(..) => {
// Waiting for the TCP connection finish connecting
try_ready!(self.process_connecting());
}
State::Connected(..) => {
if self.pending_response.is_some() {
// A request has been sent, waiting for the response.
try_ready!(self.process_response());
} else if self.pending_message.is_some() {
// A join request is pending, dispatch it
try_ready!(self.process_connected());
} else {
// Nothing to do, return ok
return Ok(Async::Ready(()));
}
}
}
}
}
fn process_waiting(&mut self) -> Poll<(), ()> {
trace!(" --> waiting");
match self.state {
// Try polling the sleep future. If `NotReady` `process_waiting`
// will return.
State::Waiting(ref mut sleep) => try_ready!(sleep.poll()),
_ => unreachable!(),
}
// We are done waiting and can now attempt to establish the connection
trace!(" --> sleep complete -- attempting tcp connect");
// Start a tcp connect
let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle);
// Set a connect timeout of 5 seconds
let socket = self.timer.timeout(socket, Duration::from_secs(5));
// Transition the state to "connecting"
self.state = State::Connecting(Box::new(socket));
Ok(Async::Ready(()))
}
fn process_connecting(&mut self) -> Poll<(), ()> {
trace!(" --> connecting");
// Check if the `connecting` future is complete, aka the connection has
// been established
let socket = match self.state {
State::Connecting(ref mut connecting) => {
match connecting.poll() {
// The connection is not yet established
Ok(Async::NotReady) => return Ok(Async::NotReady),
// The connection is established
Ok(Async::Ready(socket)) => Some(socket),
// An error was hit while connecting. A timeout for a short
// period of time will be set after which, the connect will
// be stablished again.
Err(err) => {
info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err);
None
}
}
}
_ => unreachable!(),
};
if let Some(socket) = socket {
trace!(" --> connect success");
info!("established peer connection to {:?}", self.route.remote_addr());
// The connection was successfully established. Now we have a Tcp
// socket. Using that, we will build up the MiniDB transport.
//
// The socket will be wrapped by the length delimited framer,
// followed by junkify, and last `Transport`.
let transport = Transport::junkified(socket, &self.route, &self.timer);
// Using the transport, spawn a task that manages this connection
// (vs. the general peer replication task).
//
// This is done with `tokio-proto`, which takes the transport and
// returns a `Service`. Requests can be dispatched directly to the
// service.
let service = multiplex::connect(transport, &self.reactor_handle);
// Update the state
self.state = State::Connected(service);
} else {
trace!(" --> connect failed");
// The connection failed, transition the state to "waiting to
// reconnect". We will wait a short bit of time before attempting a
// reconnect.
self.transition_to_waiting();
}
Ok(Async::Ready(()))
}
fn | (&mut self) -> Poll<(), ()> {
trace!(" --> process peer connection");
let service = match self.state {
State::Connected(ref mut service) => service,
_ => unreachable!(),
};
// The connection is currently in the connected state. If there are any
// pending replication requests, then they should be dispatched to the
// client.
// First ensure that the service handle is ready to accept requests, if
// not, return `NotReady` and try again later.
if !service.poll_ready().is_ready() {
trace!(" --> peer socket not ready");
return Ok(Async::NotReady);
}
// Build the join / replication request
let set = self.pending_message.message_to_send().unwrap();
let msg = Request::Join(set);
trace!(" --> sending Join message");
// Dispatch the replication request and get back a future repesenting
// the response from the peer node.
let resp = service.call(msg);
// Timeout the response after 5 seconds. If the peer does not
// respond to the join within this time, the connection will be
// reestablished and the join sent again
let resp = self.timer.timeout(resp, Duration::from_secs(5));
// Track the response future
self.pending_response = Some(Box::new(resp));
Ok(Async::Ready(()))
}
fn process_response(&mut self) -> Poll<(), ()> {
trace!(" --> process peer response");
// Check the response future. If it is complete, see if it is a
// successful response or if the connection needs to be re-established
let response = match self.pending_response {
Some(ref mut pending_response) => {
match pending_response.poll() {
Ok(Async::Ready(v)) => Ok(v),
Err(e) => Err(e),
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
_ => unreachable!(),
};
// Clear the pending response future
self.pending_response = None;
// The response has completed, check to see if it was successful
match response {
Ok(_) => {
// The join / replication successfully applied
self.pending_message.in_flight_succeeded();
trace!(" --> received response: OK");
}
Err(e) => {
// The replication failed. Transition the state to waiting to
// connect. Also, setup the join request to get redisptached
// once the connection is established again.
warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e);
self.pending_message.in_flight_failed();
self.transition_to_waiting();
}
}
Ok(Async::Ready(()))
}
fn transition_to_waiting(&mut self) {
trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination());
// Set a timeout for 5 seconds
let sleep = self.timer.sleep(Duration::from_secs(5));
// Update the state to reflect waiting
self.state = State::Waiting(sleep);
}
}
impl PendingMessage {
fn is_none(&self) -> bool {
match *self {
PendingMessage::None => true,
_ => false,
}
}
fn is_some(&self) -> bool {
!self.is_none()
}
fn message_to_send(&mut self) -> Option<Set<String>> {
match mem::replace(self, PendingMessage::None) {
PendingMessage::Pending(set) => {
*self = PendingMessage::InFlight(set.clone());
Some(set)
}
_ => None,
}
}
fn in_flight_succeeded(&mut self) {
match *self {
PendingMessage::Pending(..) => return,
_ => *self = PendingMessage::None,
}
}
fn in_flight_failed(&mut self) {
match mem::replace(self, PendingMessage::None) {
PendingMessage::InFlight(set) => {
*self = PendingMessage::Pending(set);
}
v => *self = v,
}
}
}
| process_connected | identifier_name |
peer.rs | use config;
use dt::{Set};
use proto::{Request, Response, Transport};
use tokio_core::channel::{channel, Sender, Receiver};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use tokio_service::Service;
use tokio_proto::easy::{EasyClient, multiplex};
use tokio_timer::{Timer, Sleep};
use futures::{Future, Poll, Async};
use futures::stream::Stream;
use std::{io, mem};
use std::time::Duration;
// Handle to the peer task.
//
// Sending a join message to a peer dispatches a message on `tx` to the task
// managing the peer connection and will be processed there.
//
// See `Task` for details on the peer task.
pub struct Peer {
tx: Sender<Set<String>>,
}
// State required for managing a peer connection.
//
// Connections to MiniDB peers are managed on reactor tasks. When the server
// initializes, it spawns one task for each peer in the cluster. The peer task
// is responsible for maintaining an open connection to the peer and to send a
// `Join` message every time the state is sent to the task.
//
// If the connection fails, the task will attempt a reconnect after a short
// period of time.
struct Task {
// Receives `Set` values that need to be sent to the peer.
rx: Receiver<Set<String>>,
// Route information
route: config::Route,
// Tokio reactor handle. Used to establish tcp connections
reactor_handle: Handle,
// Handle to the timer. The timer is used to set a re-connect timeout when
// the peer tcp connection fails.
timer: Timer,
// Current tcp connection state, see below
state: State,
// Pending `Join` message to send. This also tracks in-flight joins. If a
// join request to a peer fails, the connection will be re-established.
// Once it is re-established, the join request should be sent again.
//
// However, if while the task is waiting to re-establish a connection, a
// new state is replicated, then drop the join request that failed to send
// in favor of the newer one. Doing so is safe thanks to CRDTs!
pending_message: PendingMessage,
// Pending response future. A join was issued to the peer and the task is
// currently waiting for the response.
pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>,
}
// Peer connection state. The actual connection to the peer node can be in one
// of the following states:
enum State {
// Waiting to connect, this state is reached after hitting a connect error
Waiting(Sleep),
// Connecting to the remote. A TCP connect has been issued and the task is
// waiting on the connect to complete
Connecting(Box<Future<Item = TcpStream, Error = io::Error>>),
// A connection is open to the peer.
Connected(EasyClient<Request, Response>),
}
// Tracks the state of replication requests
enum PendingMessage {
// A replication request is waiting to be sent.
Pending(Set<String>),
// A replication request is currently in-flight. The value of the message
// is saved in case the request fails and must be re-issued later.
InFlight(Set<String>),
// There are no pending replication requests.
None,
}
impl Peer {
/// Establish a connection to a peer node.
pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer {
// Create a channel. The channel will be used to send replication
// requests from the server task to the peer task.
let (tx, rx) = channel(handle).unwrap();
// Initialize the task state
let task = Task {
rx: rx,
route: route,
reactor_handle: handle.clone(),
timer: timer.clone(),
// Initialize in the "waiting to connect" state but with a 0 length
// sleep. This will effectively initiate the connect immediately
state: State::Waiting(timer.sleep(Duration::from_millis(0))),
// There are no pending messages
pending_message: PendingMessage::None,
// There are no pending responses
pending_response: None,
};
// Spawn the task
handle.spawn(task);
// Return the send half as the peer handle
Peer { tx: tx }
}
// Send a replication request to the task managing the peer connection
pub fn send(&self, set: Set<String>) {
self.tx.send(set).unwrap();
}
}
// Implement `Future` for `Task`. All tasks spawned on the I/O reactor must
// implement future w/ Item = () and Error = ();
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
// First, process any in-bound replication requests
self.process_rx();
// Perform pending work.
try!(self.tick());
Ok(Async::NotReady)
}
}
impl Task {
fn process_rx(&mut self) {
// Read any pending replication request and set `pending_message`. It
// is expected that some messages will be dropped. The most important
// thing is that the **last** replication request ends up getting
// processed.
while let Async::Ready(Some(set)) = self.rx.poll().unwrap() {
self.pending_message = PendingMessage::Pending(set);
}
}
fn tick(&mut self) -> Poll<(), ()> {
trace!("Peer::tick; actor-id={:?}", self.route.destination());
loop {
match self.state {
State::Waiting(..) => {
// Currently waiting a short period of time before
// establishing the TCP connection with the peer.
try_ready!(self.process_waiting());
}
State::Connecting(..) => {
// Waiting for the TCP connection finish connecting
try_ready!(self.process_connecting());
}
State::Connected(..) => {
if self.pending_response.is_some() {
// A request has been sent, waiting for the response.
try_ready!(self.process_response());
} else if self.pending_message.is_some() {
// A join request is pending, dispatch it
try_ready!(self.process_connected());
} else {
// Nothing to do, return ok
return Ok(Async::Ready(()));
}
}
}
}
}
fn process_waiting(&mut self) -> Poll<(), ()> {
trace!(" --> waiting");
match self.state {
// Try polling the sleep future. If `NotReady` `process_waiting`
// will return.
State::Waiting(ref mut sleep) => try_ready!(sleep.poll()),
_ => unreachable!(),
}
// We are done waiting and can now attempt to establish the connection
trace!(" --> sleep complete -- attempting tcp connect");
// Start a tcp connect
let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle);
// Set a connect timeout of 5 seconds
let socket = self.timer.timeout(socket, Duration::from_secs(5));
// Transition the state to "connecting"
self.state = State::Connecting(Box::new(socket));
Ok(Async::Ready(()))
}
fn process_connecting(&mut self) -> Poll<(), ()> {
trace!(" --> connecting");
// Check if the `connecting` future is complete, aka the connection has
// been established
let socket = match self.state {
State::Connecting(ref mut connecting) => {
match connecting.poll() {
// The connection is not yet established
Ok(Async::NotReady) => return Ok(Async::NotReady),
// The connection is established
Ok(Async::Ready(socket)) => Some(socket),
// An error was hit while connecting. A timeout for a short
// period of time will be set after which, the connect will
// be stablished again.
Err(err) => {
info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err);
None
}
}
}
_ => unreachable!(),
};
if let Some(socket) = socket {
trace!(" --> connect success");
info!("established peer connection to {:?}", self.route.remote_addr());
// The connection was successfully established. Now we have a Tcp
// socket. Using that, we will build up the MiniDB transport.
//
// The socket will be wrapped by the length delimited framer,
// followed by junkify, and last `Transport`.
let transport = Transport::junkified(socket, &self.route, &self.timer);
// Using the transport, spawn a task that manages this connection
// (vs. the general peer replication task).
//
// This is done with `tokio-proto`, which takes the transport and
// returns a `Service`. Requests can be dispatched directly to the
// service.
let service = multiplex::connect(transport, &self.reactor_handle);
// Update the state
self.state = State::Connected(service);
} else {
trace!(" --> connect failed");
// The connection failed, transition the state to "waiting to
// reconnect". We will wait a short bit of time before attempting a
// reconnect.
self.transition_to_waiting();
}
Ok(Async::Ready(()))
}
fn process_connected(&mut self) -> Poll<(), ()> |
fn process_response(&mut self) -> Poll<(), ()> {
trace!(" --> process peer response");
// Check the response future. If it is complete, see if it is a
// successful response or if the connection needs to be re-established
let response = match self.pending_response {
Some(ref mut pending_response) => {
match pending_response.poll() {
Ok(Async::Ready(v)) => Ok(v),
Err(e) => Err(e),
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
_ => unreachable!(),
};
// Clear the pending response future
self.pending_response = None;
// The response has completed, check to see if it was successful
match response {
Ok(_) => {
// The join / replication successfully applied
self.pending_message.in_flight_succeeded();
trace!(" --> received response: OK");
}
Err(e) => {
// The replication failed. Transition the state to waiting to
// connect. Also, setup the join request to get redisptached
// once the connection is established again.
warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e);
self.pending_message.in_flight_failed();
self.transition_to_waiting();
}
}
Ok(Async::Ready(()))
}
fn transition_to_waiting(&mut self) {
trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination());
// Set a timeout for 5 seconds
let sleep = self.timer.sleep(Duration::from_secs(5));
// Update the state to reflect waiting
self.state = State::Waiting(sleep);
}
}
impl PendingMessage {
fn is_none(&self) -> bool {
match *self {
PendingMessage::None => true,
_ => false,
}
}
fn is_some(&self) -> bool {
!self.is_none()
}
fn message_to_send(&mut self) -> Option<Set<String>> {
match mem::replace(self, PendingMessage::None) {
PendingMessage::Pending(set) => {
*self = PendingMessage::InFlight(set.clone());
Some(set)
}
_ => None,
}
}
fn in_flight_succeeded(&mut self) {
match *self {
PendingMessage::Pending(..) => return,
_ => *self = PendingMessage::None,
}
}
fn in_flight_failed(&mut self) {
match mem::replace(self, PendingMessage::None) {
PendingMessage::InFlight(set) => {
*self = PendingMessage::Pending(set);
}
v => *self = v,
}
}
}
| {
trace!(" --> process peer connection");
let service = match self.state {
State::Connected(ref mut service) => service,
_ => unreachable!(),
};
// The connection is currently in the connected state. If there are any
// pending replication requests, then they should be dispatched to the
// client.
// First ensure that the service handle is ready to accept requests, if
// not, return `NotReady` and try again later.
if !service.poll_ready().is_ready() {
trace!(" --> peer socket not ready");
return Ok(Async::NotReady);
}
// Build the join / replication request
let set = self.pending_message.message_to_send().unwrap();
let msg = Request::Join(set);
trace!(" --> sending Join message");
// Dispatch the replication request and get back a future repesenting
// the response from the peer node.
let resp = service.call(msg);
// Timeout the response after 5 seconds. If the peer does not
// respond to the join within this time, the connection will be
// reestablished and the join sent again
let resp = self.timer.timeout(resp, Duration::from_secs(5));
// Track the response future
self.pending_response = Some(Box::new(resp));
Ok(Async::Ready(()))
} | identifier_body |
test4.py |
import arcade
import random
import math
import os
#from arcade.experimental.camera import Camera2D
from arcade import Point, Vector
from arcade.utils import _Vec2
import time
import pyglet
from typing import cast
import pprint
import pyglet.input.base
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 700
SCREEN_TITLE = "test joystick"
def dump_obj(obj):
for key in sorted(vars(obj)):
val = getattr(obj, key)
print("{:30} = {} ({})".format(key, val, type(val).__name__))
def dump_joystick(joy):
print("========== {}".format(joy))
print("x {}".format(joy.x))
print("y {}".format(joy.y))
print("z {}".format(joy.z))
print("rx {}".format(joy.rx))
print("ry {}".format(joy.ry))
print("rz {}".format(joy.rz))
print("hat_x {}".format(joy.hat_x))
print("hat_y {}".format(joy.hat_y))
print("buttons {}".format(joy.buttons))
print("========== Extra joy")
dump_obj(joy)
print("========== Extra joy.device")
dump_obj(joy.device)
print("========== pprint joy")
pprint.pprint(joy)
print("========== pprint joy.device")
pprint.pprint(joy.device)
def dump_joystick_state(ticks, joy):
# print("{:5.2f} {:5.2f} {:>20} {:5}_".format(1.234567, -8.2757272903, "hello", str(True)))
fmt_str = "{:6d} "
num_fmts = ["{:5.2f}"] * 6
fmt_str += " ".join(num_fmts)
fmt_str += " {:2d} {:2d} {}"
buttons = " ".join(["{:5}".format(str(b)) for b in joy.buttons])
print(fmt_str.format(ticks,
joy.x,
joy.y,
joy.z,
joy.rx,
joy.ry,
joy.rz,
joy.hat_x,
joy.hat_y,
buttons))
def get_joy_position(x, y):
"""Given position of joystick axes, return (x, y, angle_in_degrees).
If movement is not outside of deadzone, return (None, None, None)"""
if x > JOY_DEADZONE or x < -JOY_DEADZONE or y > JOY_DEADZONE or y < -JOY_DEADZONE:
y = -y
rad = math.atan2(y, x)
angle = math.degrees(rad)
return x, y, angle
return None, None, None
class MyGame(arcade.View):
def __init__(self):
super().__init__()
def on_joybutton_press(self, _joystick, button):
""" Handle button-down event for the joystick """
print("Button {} down".format(button))
if button == JUMPBTN:
iced_ground_contact_list = arcade.check_for_collision_with_list(self.player_list[0], self.lowfric_list)
if iced_ground_contact_list == []:
if self.physics_engine.is_on_ground(self.player_sprite) and not self.player_sprite.is_on_ladder:
# She is! Go ahead and jump
impulse = (0, PLAYER_JUMP_IMPULSE)
self.physics_engine.apply_impulse(self.player_sprite, impulse)
def on_show(self):
arcade.set_background_color(arcade.color.DARK_MIDNIGHT_BLUE)
joys = self.window.joys
for joy in joys:
dump_joystick(joy)
if joys:
self.joy = joys[0]
print("Using joystick controls: {}".format(self.joy.device))
arcade.window_commands.schedule(self.debug_joy_state, 0.1)
if not self.joy:
print("No joystick present, using keyboard controls")
arcade.window_commands.schedule(self.spawn_enemy, ENEMY_SPAWN_INTERVAL)
def debug_joy_state(self, _delta_time):
dump_joystick_state(self.tick, self.joy)
def on_update(self, delta_time):
self.tick += 1
if self.game_over:
return
self.bullet_cooldown += 1
for enemy in self.enemy_list:
cast(Enemy, enemy).follow_sprite(self.player)
if self.joy:
# Joystick input - movement
move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
else:
# Keyboard input - shooting
if self.player.shoot_right_pressed and self.player.shoot_up_pressed:
self.spawn_bullet(0+45)
elif self.player.shoot_up_pressed and self.player.shoot_left_pressed:
self.spawn_bullet(90+45)
elif self.player.shoot_left_pressed and self.player.shoot_down_pressed:
self.spawn_bullet(180+45)
elif self.player.shoot_down_pressed and self.player.shoot_right_pressed:
self.spawn_bullet(270+45)
elif self.player.shoot_right_pressed:
self.spawn_bullet(0)
elif self.player.shoot_up_pressed:
self.spawn_bullet(90)
elif self.player.shoot_left_pressed:
self.spawn_bullet(180)
elif self.player.shoot_down_pressed:
self.spawn_bullet(270)
class JoyConfigView(arcade.View):
"""A View that allows a user to interactively configure their joystick"""
REGISTRATION_PAUSE = 1.5
NO_JOYSTICK_PAUSE = 2.0
JOY_ATTRS = ("x", "y", "z", "rx", "ry", "rz")
def __init__(self, joy_method_names, joysticks, next_view, width, height):
super().__init__()
self.next_view = next_view
self.width = width
self.height = height
self.msg = ""
self.script = self.joy_config_script()
self.joys = joysticks
arcade.set_background_color(arcade.color.WHITE)
if len(joysticks) > 0:
self.joy = joysticks[0]
self.joy_method_names = joy_method_names
self.axis_ranges = {}
def config_axis(self, joy_axis_label, method_name):
self.msg = joy_axis_label
self.axis_ranges = {a: 0.0 for a in self.JOY_ATTRS}
while max([v for k, v in self.axis_ranges.items()]) < 0.85:
for attr, farthest_val in self.axis_ranges.items():
cur_val = getattr(self.joy, attr)
if abs(cur_val) > abs(farthest_val):
self.axis_ranges[attr] = abs(cur_val)
yield
max_val = 0.0
max_attr = None
for attr, farthest_val in self.axis_ranges.items():
if farthest_val > max_val:
max_attr = attr
max_val = farthest_val
self.msg = f"Registered!"
setattr(pyglet.input.base.Joystick, method_name, property(lambda that: getattr(that, max_attr), None))
# pause briefly after registering an axis
yield from self._pause(self.REGISTRATION_PAUSE)
def joy_config_script(self):
if len(self.joys) == 0:
self.msg = "No joysticks found! Use keyboard controls."
yield from self._pause(self.NO_JOYSTICK_PAUSE)
return
for joy_axis_label, method_name in self.joy_method_names:
yield from self.config_axis(joy_axis_label, method_name)
def on_update(self, delta_time):
try:
next(self.script)
except StopIteration:
self.window.show_view(self.next_view)
def on_draw(self):
arcade.start_render()
arcade.draw_text("Configure your joystick", self.width/2, self.height/2+100,
arcade.color.BLACK, font_size=32, anchor_x="center")
arcade.draw_text(self.msg, self.width/2, self.height/2,
arcade.color.BLACK, font_size=24, anchor_x="center")
def _pause(self, delay):
"""Block a generator from advancing for the given delay. Call with 'yield from self._pause(1.0)"""
start = time.time()
end = start + delay
while time.time() < end:
yield
class InstructionView(arcade.View):
def __init__(self):
super().__init__()
pass
def on_show(self):
|
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("Instructions Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.WHITE, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.WHITE, font_size=20, anchor_x="center")
def on_mouse_press(self, _x, _y, _button, _modifiers):
""" If the user presses the mouse button, start the game. """
game_view = GameView()
game_view.setup(level=1)
arcade.set_background_color(arcade.csscolor.BLACK)
try:
pass
except ValueError:
print("music already finished") # ValueError: list.remove(x): x not in list media.Source._players.remove(player)
self.window.show_view(game_view)
def on_update(self, delta_time):
""" Movement and game logic """
#pressed = self.window.joys[0].on_joybutton_press
#print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>
#print(type(pressed)) # <class 'method'>
joy_dico = self.window.joys[0]
btns = joy_dico.buttons
print(btns)
#print(type(btns)) # list
print(">>>>")
print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]
print(joy_dico.button_controls[0].__dict__)
print("_______*******")
#print(joy_dico.button_controls.BTN_A)
joy_dico = self.window.joys[0]
BTN_A = joy_dico.button_controls[0]
BTN_B = joy_dico.button_controls[1]
BTN_X = joy_dico.button_controls[2]
BTN_Y = joy_dico.button_controls[3]
BTN_TL = joy_dico.button_controls[4]
BTN_TR = joy_dico.button_controls[5]
BTN_SELECT = joy_dico.button_controls[6]
BTN_START = joy_dico.button_controls[7]
BTN_MODE = joy_dico.button_controls[8]
BTN_THUMBL = joy_dico.button_controls[9]
BTN_THUMBR = joy_dico.button_controls[10]
print(f"\n BTN_A ----> {BTN_A}")
BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]
BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]
for BTN in BTN_list:
if BTN._value == 1:
print(f"=====> >=====> ====> {BTN.raw_name}")
idx = BTN_list.index(BTN)
BTN_fn_list[idx]()
def joy_A(self):
print("\n... A ... \n")
def joy_B(self):
print("\n... B ... \n")
def joy_X(self):
print("\n... X ... \n")
def joy_Y(self):
print("\n... Y ... \n")
class GameView(arcade.View):
def __init__(self):
super().__init__()
pass
def setup(self, level):
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("GameView Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.GREEN, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.RED, font_size=20, anchor_x="center")
def on_update(self, delta_time):
#if self.joy
if self.window.joys:
joy = self.window.joys[0]
print(joy.__dict__)
print(joy.buttons)
# Joystick input - movement
#move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
move_x, move_y, move_angle = get_joy_position(joy.move_stick_x, joy.move_stick_y)
#move_x, move_y, move_angle = get_joy_position(self.window.joys.move_stick_x, self.window.joys.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
def main():
#window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE, fullscreen=False)
#start_view = GameView()
#window.show_view(start_view)
#start_view.setup()
#start_view.setup(level=1)
start_view = InstructionView()
#start_view = GameView()
window.show_view(start_view)
window.joys = arcade.get_joysticks()
for j in window.joys:
j.open()
#joy_config_method_names = (
# ("Move the movement stick left or right", "move_stick_x"),
# ("Move the movement stick up or down", "move_stick_y"),
# ("Move the shooting stick left or right", "shoot_stick_x"),
# ("Move the shooting stick up or down", "shoot_stick_y"),
#)
game = InstructionView()
print(dir(window.joys[0]))
print("---")
print(window.joys[0].__dict__)
print("iii---")
print("\n\n\n\n\n")
#window.show_view(JoyConfigView(joy_config_method_names, window.joys, game, SCREEN_WIDTH, SCREEN_HEIGHT))
arcade.run()
if __name__ == "__main__":
main() | """ This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1) | identifier_body |
test4.py |
import arcade
import random
import math
import os
#from arcade.experimental.camera import Camera2D
from arcade import Point, Vector
from arcade.utils import _Vec2
import time
import pyglet
from typing import cast
import pprint
import pyglet.input.base
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 700
SCREEN_TITLE = "test joystick"
def dump_obj(obj):
for key in sorted(vars(obj)):
val = getattr(obj, key)
print("{:30} = {} ({})".format(key, val, type(val).__name__))
def dump_joystick(joy):
print("========== {}".format(joy))
print("x {}".format(joy.x))
print("y {}".format(joy.y))
print("z {}".format(joy.z))
print("rx {}".format(joy.rx))
print("ry {}".format(joy.ry))
print("rz {}".format(joy.rz))
print("hat_x {}".format(joy.hat_x))
print("hat_y {}".format(joy.hat_y))
print("buttons {}".format(joy.buttons))
print("========== Extra joy")
dump_obj(joy)
print("========== Extra joy.device")
dump_obj(joy.device)
print("========== pprint joy")
pprint.pprint(joy)
print("========== pprint joy.device")
pprint.pprint(joy.device)
def dump_joystick_state(ticks, joy):
# print("{:5.2f} {:5.2f} {:>20} {:5}_".format(1.234567, -8.2757272903, "hello", str(True)))
fmt_str = "{:6d} "
num_fmts = ["{:5.2f}"] * 6
fmt_str += " ".join(num_fmts)
fmt_str += " {:2d} {:2d} {}"
buttons = " ".join(["{:5}".format(str(b)) for b in joy.buttons])
print(fmt_str.format(ticks,
joy.x,
joy.y,
joy.z,
joy.rx,
joy.ry,
joy.rz,
joy.hat_x,
joy.hat_y,
buttons))
def get_joy_position(x, y):
"""Given position of joystick axes, return (x, y, angle_in_degrees).
If movement is not outside of deadzone, return (None, None, None)"""
if x > JOY_DEADZONE or x < -JOY_DEADZONE or y > JOY_DEADZONE or y < -JOY_DEADZONE:
y = -y
rad = math.atan2(y, x)
angle = math.degrees(rad)
return x, y, angle
return None, None, None
class MyGame(arcade.View):
def __init__(self):
super().__init__()
def | (self, _joystick, button):
""" Handle button-down event for the joystick """
print("Button {} down".format(button))
if button == JUMPBTN:
iced_ground_contact_list = arcade.check_for_collision_with_list(self.player_list[0], self.lowfric_list)
if iced_ground_contact_list == []:
if self.physics_engine.is_on_ground(self.player_sprite) and not self.player_sprite.is_on_ladder:
# She is! Go ahead and jump
impulse = (0, PLAYER_JUMP_IMPULSE)
self.physics_engine.apply_impulse(self.player_sprite, impulse)
def on_show(self):
arcade.set_background_color(arcade.color.DARK_MIDNIGHT_BLUE)
joys = self.window.joys
for joy in joys:
dump_joystick(joy)
if joys:
self.joy = joys[0]
print("Using joystick controls: {}".format(self.joy.device))
arcade.window_commands.schedule(self.debug_joy_state, 0.1)
if not self.joy:
print("No joystick present, using keyboard controls")
arcade.window_commands.schedule(self.spawn_enemy, ENEMY_SPAWN_INTERVAL)
def debug_joy_state(self, _delta_time):
dump_joystick_state(self.tick, self.joy)
def on_update(self, delta_time):
self.tick += 1
if self.game_over:
return
self.bullet_cooldown += 1
for enemy in self.enemy_list:
cast(Enemy, enemy).follow_sprite(self.player)
if self.joy:
# Joystick input - movement
move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
else:
# Keyboard input - shooting
if self.player.shoot_right_pressed and self.player.shoot_up_pressed:
self.spawn_bullet(0+45)
elif self.player.shoot_up_pressed and self.player.shoot_left_pressed:
self.spawn_bullet(90+45)
elif self.player.shoot_left_pressed and self.player.shoot_down_pressed:
self.spawn_bullet(180+45)
elif self.player.shoot_down_pressed and self.player.shoot_right_pressed:
self.spawn_bullet(270+45)
elif self.player.shoot_right_pressed:
self.spawn_bullet(0)
elif self.player.shoot_up_pressed:
self.spawn_bullet(90)
elif self.player.shoot_left_pressed:
self.spawn_bullet(180)
elif self.player.shoot_down_pressed:
self.spawn_bullet(270)
class JoyConfigView(arcade.View):
"""A View that allows a user to interactively configure their joystick"""
REGISTRATION_PAUSE = 1.5
NO_JOYSTICK_PAUSE = 2.0
JOY_ATTRS = ("x", "y", "z", "rx", "ry", "rz")
def __init__(self, joy_method_names, joysticks, next_view, width, height):
super().__init__()
self.next_view = next_view
self.width = width
self.height = height
self.msg = ""
self.script = self.joy_config_script()
self.joys = joysticks
arcade.set_background_color(arcade.color.WHITE)
if len(joysticks) > 0:
self.joy = joysticks[0]
self.joy_method_names = joy_method_names
self.axis_ranges = {}
def config_axis(self, joy_axis_label, method_name):
self.msg = joy_axis_label
self.axis_ranges = {a: 0.0 for a in self.JOY_ATTRS}
while max([v for k, v in self.axis_ranges.items()]) < 0.85:
for attr, farthest_val in self.axis_ranges.items():
cur_val = getattr(self.joy, attr)
if abs(cur_val) > abs(farthest_val):
self.axis_ranges[attr] = abs(cur_val)
yield
max_val = 0.0
max_attr = None
for attr, farthest_val in self.axis_ranges.items():
if farthest_val > max_val:
max_attr = attr
max_val = farthest_val
self.msg = f"Registered!"
setattr(pyglet.input.base.Joystick, method_name, property(lambda that: getattr(that, max_attr), None))
# pause briefly after registering an axis
yield from self._pause(self.REGISTRATION_PAUSE)
def joy_config_script(self):
if len(self.joys) == 0:
self.msg = "No joysticks found! Use keyboard controls."
yield from self._pause(self.NO_JOYSTICK_PAUSE)
return
for joy_axis_label, method_name in self.joy_method_names:
yield from self.config_axis(joy_axis_label, method_name)
def on_update(self, delta_time):
try:
next(self.script)
except StopIteration:
self.window.show_view(self.next_view)
def on_draw(self):
arcade.start_render()
arcade.draw_text("Configure your joystick", self.width/2, self.height/2+100,
arcade.color.BLACK, font_size=32, anchor_x="center")
arcade.draw_text(self.msg, self.width/2, self.height/2,
arcade.color.BLACK, font_size=24, anchor_x="center")
def _pause(self, delay):
"""Block a generator from advancing for the given delay. Call with 'yield from self._pause(1.0)"""
start = time.time()
end = start + delay
while time.time() < end:
yield
class InstructionView(arcade.View):
def __init__(self):
super().__init__()
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("Instructions Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.WHITE, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.WHITE, font_size=20, anchor_x="center")
def on_mouse_press(self, _x, _y, _button, _modifiers):
""" If the user presses the mouse button, start the game. """
game_view = GameView()
game_view.setup(level=1)
arcade.set_background_color(arcade.csscolor.BLACK)
try:
pass
except ValueError:
print("music already finished") # ValueError: list.remove(x): x not in list media.Source._players.remove(player)
self.window.show_view(game_view)
def on_update(self, delta_time):
""" Movement and game logic """
#pressed = self.window.joys[0].on_joybutton_press
#print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>
#print(type(pressed)) # <class 'method'>
joy_dico = self.window.joys[0]
btns = joy_dico.buttons
print(btns)
#print(type(btns)) # list
print(">>>>")
print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]
print(joy_dico.button_controls[0].__dict__)
print("_______*******")
#print(joy_dico.button_controls.BTN_A)
joy_dico = self.window.joys[0]
BTN_A = joy_dico.button_controls[0]
BTN_B = joy_dico.button_controls[1]
BTN_X = joy_dico.button_controls[2]
BTN_Y = joy_dico.button_controls[3]
BTN_TL = joy_dico.button_controls[4]
BTN_TR = joy_dico.button_controls[5]
BTN_SELECT = joy_dico.button_controls[6]
BTN_START = joy_dico.button_controls[7]
BTN_MODE = joy_dico.button_controls[8]
BTN_THUMBL = joy_dico.button_controls[9]
BTN_THUMBR = joy_dico.button_controls[10]
print(f"\n BTN_A ----> {BTN_A}")
BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]
BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]
for BTN in BTN_list:
if BTN._value == 1:
print(f"=====> >=====> ====> {BTN.raw_name}")
idx = BTN_list.index(BTN)
BTN_fn_list[idx]()
def joy_A(self):
print("\n... A ... \n")
def joy_B(self):
print("\n... B ... \n")
def joy_X(self):
print("\n... X ... \n")
def joy_Y(self):
print("\n... Y ... \n")
class GameView(arcade.View):
def __init__(self):
super().__init__()
pass
def setup(self, level):
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("GameView Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.GREEN, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.RED, font_size=20, anchor_x="center")
def on_update(self, delta_time):
#if self.joy
if self.window.joys:
joy = self.window.joys[0]
print(joy.__dict__)
print(joy.buttons)
# Joystick input - movement
#move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
move_x, move_y, move_angle = get_joy_position(joy.move_stick_x, joy.move_stick_y)
#move_x, move_y, move_angle = get_joy_position(self.window.joys.move_stick_x, self.window.joys.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
def main():
#window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE, fullscreen=False)
#start_view = GameView()
#window.show_view(start_view)
#start_view.setup()
#start_view.setup(level=1)
start_view = InstructionView()
#start_view = GameView()
window.show_view(start_view)
window.joys = arcade.get_joysticks()
for j in window.joys:
j.open()
#joy_config_method_names = (
# ("Move the movement stick left or right", "move_stick_x"),
# ("Move the movement stick up or down", "move_stick_y"),
# ("Move the shooting stick left or right", "shoot_stick_x"),
# ("Move the shooting stick up or down", "shoot_stick_y"),
#)
game = InstructionView()
print(dir(window.joys[0]))
print("---")
print(window.joys[0].__dict__)
print("iii---")
print("\n\n\n\n\n")
#window.show_view(JoyConfigView(joy_config_method_names, window.joys, game, SCREEN_WIDTH, SCREEN_HEIGHT))
arcade.run()
if __name__ == "__main__":
main() | on_joybutton_press | identifier_name |
test4.py |
import arcade
import random
import math
import os
#from arcade.experimental.camera import Camera2D
from arcade import Point, Vector
from arcade.utils import _Vec2
import time
import pyglet
from typing import cast
import pprint
import pyglet.input.base
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 700
SCREEN_TITLE = "test joystick"
def dump_obj(obj):
for key in sorted(vars(obj)):
val = getattr(obj, key)
print("{:30} = {} ({})".format(key, val, type(val).__name__))
def dump_joystick(joy):
print("========== {}".format(joy))
print("x {}".format(joy.x))
print("y {}".format(joy.y))
print("z {}".format(joy.z))
print("rx {}".format(joy.rx))
print("ry {}".format(joy.ry))
print("rz {}".format(joy.rz))
print("hat_x {}".format(joy.hat_x))
print("hat_y {}".format(joy.hat_y))
print("buttons {}".format(joy.buttons))
print("========== Extra joy")
dump_obj(joy)
print("========== Extra joy.device")
dump_obj(joy.device)
print("========== pprint joy")
pprint.pprint(joy)
print("========== pprint joy.device")
pprint.pprint(joy.device)
def dump_joystick_state(ticks, joy):
# print("{:5.2f} {:5.2f} {:>20} {:5}_".format(1.234567, -8.2757272903, "hello", str(True)))
fmt_str = "{:6d} "
num_fmts = ["{:5.2f}"] * 6
fmt_str += " ".join(num_fmts)
fmt_str += " {:2d} {:2d} {}"
buttons = " ".join(["{:5}".format(str(b)) for b in joy.buttons])
print(fmt_str.format(ticks,
joy.x,
joy.y,
joy.z,
joy.rx,
joy.ry,
joy.rz,
joy.hat_x,
joy.hat_y,
buttons))
def get_joy_position(x, y):
"""Given position of joystick axes, return (x, y, angle_in_degrees).
If movement is not outside of deadzone, return (None, None, None)"""
if x > JOY_DEADZONE or x < -JOY_DEADZONE or y > JOY_DEADZONE or y < -JOY_DEADZONE:
y = -y
rad = math.atan2(y, x)
angle = math.degrees(rad)
return x, y, angle
return None, None, None
class MyGame(arcade.View):
def __init__(self):
super().__init__()
def on_joybutton_press(self, _joystick, button):
""" Handle button-down event for the joystick """
print("Button {} down".format(button))
if button == JUMPBTN:
iced_ground_contact_list = arcade.check_for_collision_with_list(self.player_list[0], self.lowfric_list)
if iced_ground_contact_list == []:
if self.physics_engine.is_on_ground(self.player_sprite) and not self.player_sprite.is_on_ladder:
# She is! Go ahead and jump
impulse = (0, PLAYER_JUMP_IMPULSE)
self.physics_engine.apply_impulse(self.player_sprite, impulse)
def on_show(self):
arcade.set_background_color(arcade.color.DARK_MIDNIGHT_BLUE)
joys = self.window.joys
for joy in joys:
dump_joystick(joy)
if joys:
self.joy = joys[0]
print("Using joystick controls: {}".format(self.joy.device))
arcade.window_commands.schedule(self.debug_joy_state, 0.1)
if not self.joy:
print("No joystick present, using keyboard controls")
arcade.window_commands.schedule(self.spawn_enemy, ENEMY_SPAWN_INTERVAL)
def debug_joy_state(self, _delta_time):
dump_joystick_state(self.tick, self.joy)
def on_update(self, delta_time):
self.tick += 1
if self.game_over:
return
self.bullet_cooldown += 1
for enemy in self.enemy_list:
cast(Enemy, enemy).follow_sprite(self.player)
if self.joy:
# Joystick input - movement
move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
else:
# Keyboard input - shooting
if self.player.shoot_right_pressed and self.player.shoot_up_pressed:
self.spawn_bullet(0+45)
elif self.player.shoot_up_pressed and self.player.shoot_left_pressed:
self.spawn_bullet(90+45)
elif self.player.shoot_left_pressed and self.player.shoot_down_pressed:
self.spawn_bullet(180+45)
elif self.player.shoot_down_pressed and self.player.shoot_right_pressed:
|
elif self.player.shoot_right_pressed:
self.spawn_bullet(0)
elif self.player.shoot_up_pressed:
self.spawn_bullet(90)
elif self.player.shoot_left_pressed:
self.spawn_bullet(180)
elif self.player.shoot_down_pressed:
self.spawn_bullet(270)
class JoyConfigView(arcade.View):
"""A View that allows a user to interactively configure their joystick"""
REGISTRATION_PAUSE = 1.5
NO_JOYSTICK_PAUSE = 2.0
JOY_ATTRS = ("x", "y", "z", "rx", "ry", "rz")
def __init__(self, joy_method_names, joysticks, next_view, width, height):
super().__init__()
self.next_view = next_view
self.width = width
self.height = height
self.msg = ""
self.script = self.joy_config_script()
self.joys = joysticks
arcade.set_background_color(arcade.color.WHITE)
if len(joysticks) > 0:
self.joy = joysticks[0]
self.joy_method_names = joy_method_names
self.axis_ranges = {}
def config_axis(self, joy_axis_label, method_name):
self.msg = joy_axis_label
self.axis_ranges = {a: 0.0 for a in self.JOY_ATTRS}
while max([v for k, v in self.axis_ranges.items()]) < 0.85:
for attr, farthest_val in self.axis_ranges.items():
cur_val = getattr(self.joy, attr)
if abs(cur_val) > abs(farthest_val):
self.axis_ranges[attr] = abs(cur_val)
yield
max_val = 0.0
max_attr = None
for attr, farthest_val in self.axis_ranges.items():
if farthest_val > max_val:
max_attr = attr
max_val = farthest_val
self.msg = f"Registered!"
setattr(pyglet.input.base.Joystick, method_name, property(lambda that: getattr(that, max_attr), None))
# pause briefly after registering an axis
yield from self._pause(self.REGISTRATION_PAUSE)
def joy_config_script(self):
if len(self.joys) == 0:
self.msg = "No joysticks found! Use keyboard controls."
yield from self._pause(self.NO_JOYSTICK_PAUSE)
return
for joy_axis_label, method_name in self.joy_method_names:
yield from self.config_axis(joy_axis_label, method_name)
def on_update(self, delta_time):
try:
next(self.script)
except StopIteration:
self.window.show_view(self.next_view)
def on_draw(self):
arcade.start_render()
arcade.draw_text("Configure your joystick", self.width/2, self.height/2+100,
arcade.color.BLACK, font_size=32, anchor_x="center")
arcade.draw_text(self.msg, self.width/2, self.height/2,
arcade.color.BLACK, font_size=24, anchor_x="center")
def _pause(self, delay):
"""Block a generator from advancing for the given delay. Call with 'yield from self._pause(1.0)"""
start = time.time()
end = start + delay
while time.time() < end:
yield
class InstructionView(arcade.View):
def __init__(self):
super().__init__()
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("Instructions Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.WHITE, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.WHITE, font_size=20, anchor_x="center")
def on_mouse_press(self, _x, _y, _button, _modifiers):
""" If the user presses the mouse button, start the game. """
game_view = GameView()
game_view.setup(level=1)
arcade.set_background_color(arcade.csscolor.BLACK)
try:
pass
except ValueError:
print("music already finished") # ValueError: list.remove(x): x not in list media.Source._players.remove(player)
self.window.show_view(game_view)
def on_update(self, delta_time):
""" Movement and game logic """
#pressed = self.window.joys[0].on_joybutton_press
#print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>
#print(type(pressed)) # <class 'method'>
joy_dico = self.window.joys[0]
btns = joy_dico.buttons
print(btns)
#print(type(btns)) # list
print(">>>>")
print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]
print(joy_dico.button_controls[0].__dict__)
print("_______*******")
#print(joy_dico.button_controls.BTN_A)
joy_dico = self.window.joys[0]
BTN_A = joy_dico.button_controls[0]
BTN_B = joy_dico.button_controls[1]
BTN_X = joy_dico.button_controls[2]
BTN_Y = joy_dico.button_controls[3]
BTN_TL = joy_dico.button_controls[4]
BTN_TR = joy_dico.button_controls[5]
BTN_SELECT = joy_dico.button_controls[6]
BTN_START = joy_dico.button_controls[7]
BTN_MODE = joy_dico.button_controls[8]
BTN_THUMBL = joy_dico.button_controls[9]
BTN_THUMBR = joy_dico.button_controls[10]
print(f"\n BTN_A ----> {BTN_A}")
BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]
BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]
for BTN in BTN_list:
if BTN._value == 1:
print(f"=====> >=====> ====> {BTN.raw_name}")
idx = BTN_list.index(BTN)
BTN_fn_list[idx]()
def joy_A(self):
print("\n... A ... \n")
def joy_B(self):
print("\n... B ... \n")
def joy_X(self):
print("\n... X ... \n")
def joy_Y(self):
print("\n... Y ... \n")
class GameView(arcade.View):
def __init__(self):
super().__init__()
pass
def setup(self, level):
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("GameView Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.GREEN, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.RED, font_size=20, anchor_x="center")
def on_update(self, delta_time):
#if self.joy
if self.window.joys:
joy = self.window.joys[0]
print(joy.__dict__)
print(joy.buttons)
# Joystick input - movement
#move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
move_x, move_y, move_angle = get_joy_position(joy.move_stick_x, joy.move_stick_y)
#move_x, move_y, move_angle = get_joy_position(self.window.joys.move_stick_x, self.window.joys.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
def main():
#window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE, fullscreen=False)
#start_view = GameView()
#window.show_view(start_view)
#start_view.setup()
#start_view.setup(level=1)
start_view = InstructionView()
#start_view = GameView()
window.show_view(start_view)
window.joys = arcade.get_joysticks()
for j in window.joys:
j.open()
#joy_config_method_names = (
# ("Move the movement stick left or right", "move_stick_x"),
# ("Move the movement stick up or down", "move_stick_y"),
# ("Move the shooting stick left or right", "shoot_stick_x"),
# ("Move the shooting stick up or down", "shoot_stick_y"),
#)
game = InstructionView()
print(dir(window.joys[0]))
print("---")
print(window.joys[0].__dict__)
print("iii---")
print("\n\n\n\n\n")
#window.show_view(JoyConfigView(joy_config_method_names, window.joys, game, SCREEN_WIDTH, SCREEN_HEIGHT))
arcade.run()
if __name__ == "__main__":
main() | self.spawn_bullet(270+45) | conditional_block |
test4.py | import arcade
import random
import math
import os
#from arcade.experimental.camera import Camera2D
from arcade import Point, Vector
from arcade.utils import _Vec2
import time
import pyglet
from typing import cast
import pprint
import pyglet.input.base
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 700
SCREEN_TITLE = "test joystick"
def dump_obj(obj):
for key in sorted(vars(obj)):
val = getattr(obj, key)
print("{:30} = {} ({})".format(key, val, type(val).__name__))
def dump_joystick(joy):
print("========== {}".format(joy))
print("x {}".format(joy.x)) | print("z {}".format(joy.z))
print("rx {}".format(joy.rx))
print("ry {}".format(joy.ry))
print("rz {}".format(joy.rz))
print("hat_x {}".format(joy.hat_x))
print("hat_y {}".format(joy.hat_y))
print("buttons {}".format(joy.buttons))
print("========== Extra joy")
dump_obj(joy)
print("========== Extra joy.device")
dump_obj(joy.device)
print("========== pprint joy")
pprint.pprint(joy)
print("========== pprint joy.device")
pprint.pprint(joy.device)
def dump_joystick_state(ticks, joy):
# print("{:5.2f} {:5.2f} {:>20} {:5}_".format(1.234567, -8.2757272903, "hello", str(True)))
fmt_str = "{:6d} "
num_fmts = ["{:5.2f}"] * 6
fmt_str += " ".join(num_fmts)
fmt_str += " {:2d} {:2d} {}"
buttons = " ".join(["{:5}".format(str(b)) for b in joy.buttons])
print(fmt_str.format(ticks,
joy.x,
joy.y,
joy.z,
joy.rx,
joy.ry,
joy.rz,
joy.hat_x,
joy.hat_y,
buttons))
def get_joy_position(x, y):
"""Given position of joystick axes, return (x, y, angle_in_degrees).
If movement is not outside of deadzone, return (None, None, None)"""
if x > JOY_DEADZONE or x < -JOY_DEADZONE or y > JOY_DEADZONE or y < -JOY_DEADZONE:
y = -y
rad = math.atan2(y, x)
angle = math.degrees(rad)
return x, y, angle
return None, None, None
class MyGame(arcade.View):
def __init__(self):
super().__init__()
def on_joybutton_press(self, _joystick, button):
""" Handle button-down event for the joystick """
print("Button {} down".format(button))
if button == JUMPBTN:
iced_ground_contact_list = arcade.check_for_collision_with_list(self.player_list[0], self.lowfric_list)
if iced_ground_contact_list == []:
if self.physics_engine.is_on_ground(self.player_sprite) and not self.player_sprite.is_on_ladder:
# She is! Go ahead and jump
impulse = (0, PLAYER_JUMP_IMPULSE)
self.physics_engine.apply_impulse(self.player_sprite, impulse)
def on_show(self):
arcade.set_background_color(arcade.color.DARK_MIDNIGHT_BLUE)
joys = self.window.joys
for joy in joys:
dump_joystick(joy)
if joys:
self.joy = joys[0]
print("Using joystick controls: {}".format(self.joy.device))
arcade.window_commands.schedule(self.debug_joy_state, 0.1)
if not self.joy:
print("No joystick present, using keyboard controls")
arcade.window_commands.schedule(self.spawn_enemy, ENEMY_SPAWN_INTERVAL)
def debug_joy_state(self, _delta_time):
dump_joystick_state(self.tick, self.joy)
def on_update(self, delta_time):
self.tick += 1
if self.game_over:
return
self.bullet_cooldown += 1
for enemy in self.enemy_list:
cast(Enemy, enemy).follow_sprite(self.player)
if self.joy:
# Joystick input - movement
move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
else:
# Keyboard input - shooting
if self.player.shoot_right_pressed and self.player.shoot_up_pressed:
self.spawn_bullet(0+45)
elif self.player.shoot_up_pressed and self.player.shoot_left_pressed:
self.spawn_bullet(90+45)
elif self.player.shoot_left_pressed and self.player.shoot_down_pressed:
self.spawn_bullet(180+45)
elif self.player.shoot_down_pressed and self.player.shoot_right_pressed:
self.spawn_bullet(270+45)
elif self.player.shoot_right_pressed:
self.spawn_bullet(0)
elif self.player.shoot_up_pressed:
self.spawn_bullet(90)
elif self.player.shoot_left_pressed:
self.spawn_bullet(180)
elif self.player.shoot_down_pressed:
self.spawn_bullet(270)
class JoyConfigView(arcade.View):
"""A View that allows a user to interactively configure their joystick"""
REGISTRATION_PAUSE = 1.5
NO_JOYSTICK_PAUSE = 2.0
JOY_ATTRS = ("x", "y", "z", "rx", "ry", "rz")
def __init__(self, joy_method_names, joysticks, next_view, width, height):
super().__init__()
self.next_view = next_view
self.width = width
self.height = height
self.msg = ""
self.script = self.joy_config_script()
self.joys = joysticks
arcade.set_background_color(arcade.color.WHITE)
if len(joysticks) > 0:
self.joy = joysticks[0]
self.joy_method_names = joy_method_names
self.axis_ranges = {}
def config_axis(self, joy_axis_label, method_name):
self.msg = joy_axis_label
self.axis_ranges = {a: 0.0 for a in self.JOY_ATTRS}
while max([v for k, v in self.axis_ranges.items()]) < 0.85:
for attr, farthest_val in self.axis_ranges.items():
cur_val = getattr(self.joy, attr)
if abs(cur_val) > abs(farthest_val):
self.axis_ranges[attr] = abs(cur_val)
yield
max_val = 0.0
max_attr = None
for attr, farthest_val in self.axis_ranges.items():
if farthest_val > max_val:
max_attr = attr
max_val = farthest_val
self.msg = f"Registered!"
setattr(pyglet.input.base.Joystick, method_name, property(lambda that: getattr(that, max_attr), None))
# pause briefly after registering an axis
yield from self._pause(self.REGISTRATION_PAUSE)
def joy_config_script(self):
if len(self.joys) == 0:
self.msg = "No joysticks found! Use keyboard controls."
yield from self._pause(self.NO_JOYSTICK_PAUSE)
return
for joy_axis_label, method_name in self.joy_method_names:
yield from self.config_axis(joy_axis_label, method_name)
def on_update(self, delta_time):
try:
next(self.script)
except StopIteration:
self.window.show_view(self.next_view)
def on_draw(self):
arcade.start_render()
arcade.draw_text("Configure your joystick", self.width/2, self.height/2+100,
arcade.color.BLACK, font_size=32, anchor_x="center")
arcade.draw_text(self.msg, self.width/2, self.height/2,
arcade.color.BLACK, font_size=24, anchor_x="center")
def _pause(self, delay):
"""Block a generator from advancing for the given delay. Call with 'yield from self._pause(1.0)"""
start = time.time()
end = start + delay
while time.time() < end:
yield
class InstructionView(arcade.View):
def __init__(self):
super().__init__()
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("Instructions Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.WHITE, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.WHITE, font_size=20, anchor_x="center")
def on_mouse_press(self, _x, _y, _button, _modifiers):
""" If the user presses the mouse button, start the game. """
game_view = GameView()
game_view.setup(level=1)
arcade.set_background_color(arcade.csscolor.BLACK)
try:
pass
except ValueError:
print("music already finished") # ValueError: list.remove(x): x not in list media.Source._players.remove(player)
self.window.show_view(game_view)
def on_update(self, delta_time):
""" Movement and game logic """
#pressed = self.window.joys[0].on_joybutton_press
#print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>
#print(type(pressed)) # <class 'method'>
joy_dico = self.window.joys[0]
btns = joy_dico.buttons
print(btns)
#print(type(btns)) # list
print(">>>>")
print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]
print(joy_dico.button_controls[0].__dict__)
print("_______*******")
#print(joy_dico.button_controls.BTN_A)
joy_dico = self.window.joys[0]
BTN_A = joy_dico.button_controls[0]
BTN_B = joy_dico.button_controls[1]
BTN_X = joy_dico.button_controls[2]
BTN_Y = joy_dico.button_controls[3]
BTN_TL = joy_dico.button_controls[4]
BTN_TR = joy_dico.button_controls[5]
BTN_SELECT = joy_dico.button_controls[6]
BTN_START = joy_dico.button_controls[7]
BTN_MODE = joy_dico.button_controls[8]
BTN_THUMBL = joy_dico.button_controls[9]
BTN_THUMBR = joy_dico.button_controls[10]
print(f"\n BTN_A ----> {BTN_A}")
BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]
BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]
for BTN in BTN_list:
if BTN._value == 1:
print(f"=====> >=====> ====> {BTN.raw_name}")
idx = BTN_list.index(BTN)
BTN_fn_list[idx]()
def joy_A(self):
print("\n... A ... \n")
def joy_B(self):
print("\n... B ... \n")
def joy_X(self):
print("\n... X ... \n")
def joy_Y(self):
print("\n... Y ... \n")
class GameView(arcade.View):
def __init__(self):
super().__init__()
pass
def setup(self, level):
pass
def on_show(self):
""" This is run once when we switch to this view """
arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)
arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)
def on_draw(self):
""" Draw this view """
arcade.start_render()
arcade.draw_text("GameView Screen", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2,
arcade.color.GREEN, font_size=50, anchor_x="center")
arcade.draw_text("Click to advance", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2-75,
arcade.color.RED, font_size=20, anchor_x="center")
def on_update(self, delta_time):
#if self.joy
if self.window.joys:
joy = self.window.joys[0]
print(joy.__dict__)
print(joy.buttons)
# Joystick input - movement
#move_x, move_y, move_angle = get_joy_position(self.joy.move_stick_x, self.joy.move_stick_y)
move_x, move_y, move_angle = get_joy_position(joy.move_stick_x, joy.move_stick_y)
#move_x, move_y, move_angle = get_joy_position(self.window.joys.move_stick_x, self.window.joys.move_stick_y)
if move_angle:
self.player.change_x = move_x * MOVEMENT_SPEED
self.player.change_y = move_y * MOVEMENT_SPEED
self.player.angle = move_angle + ROTATE_OFFSET
else:
self.player.change_x = 0
self.player.change_y = 0
# Joystick input - shooting
shoot_x, shoot_y, shoot_angle = get_joy_position(self.joy.shoot_stick_x, self.joy.shoot_stick_y)
if shoot_angle:
self.spawn_bullet(shoot_angle)
def main():
#window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE, fullscreen=False)
#start_view = GameView()
#window.show_view(start_view)
#start_view.setup()
#start_view.setup(level=1)
start_view = InstructionView()
#start_view = GameView()
window.show_view(start_view)
window.joys = arcade.get_joysticks()
for j in window.joys:
j.open()
#joy_config_method_names = (
# ("Move the movement stick left or right", "move_stick_x"),
# ("Move the movement stick up or down", "move_stick_y"),
# ("Move the shooting stick left or right", "shoot_stick_x"),
# ("Move the shooting stick up or down", "shoot_stick_y"),
#)
game = InstructionView()
print(dir(window.joys[0]))
print("---")
print(window.joys[0].__dict__)
print("iii---")
print("\n\n\n\n\n")
#window.show_view(JoyConfigView(joy_config_method_names, window.joys, game, SCREEN_WIDTH, SCREEN_HEIGHT))
arcade.run()
if __name__ == "__main__":
main() | print("y {}".format(joy.y)) | random_line_split |
ip.py | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import re
import netaddr
from akanda.router import models
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
GENERIC_IFNAME = 'ge'
PHYSICAL_INTERFACES = ['lo', 'eth', 'em', 're', 'en', 'vio', 'vtnet']
ULA_PREFIX = 'fdca:3ba5:a17a:acda::/64'
class IPManager(base.Manager):
"""
A class that provides a pythonic interface to unix system network
configuration information.
"""
EXECUTABLE = '/sbin/ip'
def __init__(self, root_helper='sudo'):
"""Initializes resources for the IPManager class"""
super(IPManager, self).__init__(root_helper)
self.next_generic_index = 0
self.host_mapping = {}
self.generic_mapping = {}
def ensure_mapping(self):
"""
Creates a mapping of generic interface names (e.g., ge0, ge1) to
physical interface names (eth1, eth2) if one does not already exist.
"""
if not self.host_mapping:
self.get_interfaces()
def get_interfaces(self):
"""
Returns a list of the available network interfaces. This information
is obtained through the `ip addr show` system command.
"""
interfaces = _parse_interfaces(self.do('addr', 'show'),
filters=PHYSICAL_INTERFACES)
interfaces.sort(key=lambda x: x.ifname)
for i in interfaces:
if i.ifname not in self.host_mapping:
generic_name = 'ge%d' % self.next_generic_index
self.host_mapping[i.ifname] = generic_name
self.next_generic_index += 1
# change ifname to generic version
i.ifname = self.host_mapping[i.ifname]
self.generic_mapping = dict((v, k) for k, v in
self.host_mapping.iteritems())
return interfaces
def get_interface(self, ifname):
"""
Returns network configuration information for the requested network
interface. This information is obtained through the system command `ip
addr show <ifname>`.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
:rtype: akanda.router.model.Interface
"""
real_ifname = self.generic_to_host(ifname)
retval = _parse_interface(self.do('addr', 'show', real_ifname))
retval.ifname = ifname
return retval
def is_valid(self, ifname):
"""
Validates if the supplied interface is a valid system network
interface. Returns `True` if <ifname> is a valid interface. Returns
`False` if <ifname> is not a valid interface.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
"""
self.ensure_mapping()
return ifname in self.generic_mapping
def generic_to_host(self, generic_name):
"""
Translates a generic interface name into the physical network interface
name.
:param ifname: the generic name to translate, e.g., `ge0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.generic_mapping.get(generic_name)
def host_to_generic(self, real_name):
"""
Translates a physical interface name into the generic network interface
name.
:param ifname: the physical name to translate, e.g., `eth0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.host_mapping.get(real_name)
def update_interfaces(self, interfaces):
"""
Wrapper function that accepts a list of interfaces and iterates over
them, calling update_interface(<interface>) in order to update
their configuration.
"""
for i in interfaces:
self.update_interface(i)
def up(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "up".
:param interface: the interface to mark up
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'up')
return self.get_interface(interface.ifname)
def down(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "down".
:param interface: the interface to mark down
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'down')
def update_interface(self, interface, ignore_link_local=True):
"""
Updates a network interface, particularly its addresses
:param interface: the interface to update
:type interface: akanda.router.models.Interface
:param ignore_link_local: When True, link local addresses will not be
added/removed
:type ignore_link_local: bool
"""
real_ifname = self.generic_to_host(interface.ifname)
old_interface = self.get_interface(interface.ifname)
if ignore_link_local:
interface.addresses = [a for a in interface.addresses
if not a.is_link_local()]
old_interface.addresses = [a for a in old_interface.addresses
if not a.is_link_local()]
# Must update primary before aliases otherwise will lose address
# in case where primary and alias are swapped.
self._update_addresses(real_ifname, interface, old_interface)
def _update_addresses(self, real_ifname, interface, old_interface):
"""
Compare the state of an interface, and add/remove address that have
changed.
:param real_ifname: the name of the interface to modify
:param real_ifname: str
:param interface: the new interface reference
:type interface: akanda.router.models.Interface
:param old_interface: the reference to the current network interface
:type old_interface: akanda.router.models.Interface
"""
def _gen_cmd(cmd, address):
"""
Generates an `ip addr (add|del) <cidr> dev <ifname>` command.
"""
family = {4: 'inet', 6: 'inet6'}[address[0].version]
args = ['addr', cmd, '%s/%s' % (address[0], address[1])]
if family == 'inet' and cmd == 'add':
args += ['brd', '+']
args += ['dev', real_ifname]
if family == 'inet6':
args = ['-6'] + args
return args
add = functools.partial(_gen_cmd, 'add')
delete = functools.partial(_gen_cmd, 'del')
mutator = lambda a: (a.ip, a.prefixlen)
self._update_set(real_ifname, interface, old_interface,
'all_addresses', add, delete, mutator)
def _update_set(self, real_ifname, interface, old_interface, attribute,
fmt_args_add, fmt_args_delete, mutator=lambda x: x):
"""
Compare the set of addresses (the current set and the desired set)
for an interface and generate a series of `ip addr add` and `ip addr
del` commands.
"""
next_set = set(mutator(i) for i in getattr(interface, attribute))
prev_set = set(mutator(i) for i in getattr(old_interface, attribute))
if next_set == prev_set:
return
for item in (next_set - prev_set):
self.sudo(*fmt_args_add(item))
self.up(interface)
for item in (prev_set - next_set):
self.sudo(*fmt_args_delete(item))
ip, prefix = item
if ip.version == 4:
self._delete_conntrack_state(ip)
def get_management_address(self, ensure_configuration=False):
"""
Get the network interface address that will be used for management
traffic.
:param ensure_configuration: when `True`, this method will ensure that
the management address if configured on
`ge0`.
:rtype: str
"""
primary = self.get_interface(GENERIC_IFNAME + '0')
prefix, prefix_len = ULA_PREFIX.split('/', 1)
eui = netaddr.EUI(primary.lladdr)
ip_str = str(eui.ipv6_link_local()).replace('fe80::', prefix[:-1])
if not primary.is_up:
self.up(primary)
ip = netaddr.IPNetwork('%s/%s' % (ip_str, prefix_len))
if ensure_configuration and ip not in primary.addresses:
primary.addresses.append(ip)
self.update_interface(primary)
return ip_str
def update_default_gateway(self, config):
"""
Sets the default gateway for v4 and v6 via the use of `ip route add`.
:type config: akanda.router.models.Configuration
"""
# Track whether we have set the default gateways, by IP
# version.
gw_set = {
4: False,
6: False,
}
ifname = None
for net in config.networks:
if not net.is_external_network:
continue
ifname = net.interface.ifname
# The default v4 gateway is pulled out as a special case
# because we only want one but we might have multiple v4
# subnets on the external network. However, sometimes the RUG
# can't figure out what that value is, because it thinks we
# don't have any external IP addresses, yet. In that case, it
# doesn't give us a default.
if config.default_v4_gateway:
self._set_default_gateway(config.default_v4_gateway, ifname)
gw_set[4] = True
# Look through our networks and make sure we have a default
# gateway set for each IP version, if we have an IP for that
# version on the external net. If we haven't already set the
# v4 gateway, this picks the gateway for the first subnet we
# find, which might be wrong.
for net in config.networks:
if not net.is_external_network:
continue
for subnet in net.subnets:
if subnet.gateway_ip and not gw_set[subnet.gateway_ip.version]:
self._set_default_gateway(
subnet.gateway_ip,
net.interface.ifname
)
gw_set[subnet.gateway_ip.version] = True
def update_host_routes(self, config, cache):
"""
Update the network routes. This is primarily used to support static
routes that users provide to neutron.
:type config: akanda.router.models.Configuration
:param cache: a dbm cache for storing the "last applied routes".
Because Linux does not differentiate user-provided routes
from, for example, the default gateway, this is necessary
so that subsequent calls to this method can determine
"what changed" for the user-provided routes.
:type cache: dogpile.cache.region.CacheRegion
"""
db = cache.get_or_create('host_routes', lambda: {})
for net in config.networks:
# For each subnet...
for subnet in net.subnets:
cidr = str(subnet.cidr)
# determine the set of previously written routes for this cidr
if cidr not in db:
db[cidr] = set()
current = db[cidr]
# build a set of new routes for this cidr
latest = set()
for r in subnet.host_routes:
latest.add((r.destination, r.next_hop))
# If the set of previously written routes contains routes that
# aren't defined in the new config, run commands to delete them
for x in current - latest:
if self._alter_route(net.interface.ifname, 'del', *x):
current.remove(x)
# If the new config contains routes that aren't defined in the
# set of previously written routes, run commands to add them
for x in latest - current:
if self._alter_route(net.interface.ifname, 'add', *x):
current.add(x)
if not current:
del db[cidr]
cache.set('host_routes', db)
def _get_default_gateway(self, version):
"""
Gets the default gateway.
:param version: the IP version, 4 or 6
:type version: int
:rtype: str
"""
try:
cmd_out = self.sudo('-%s' % version, 'route', 'show')
except:
# assume the route is missing and use defaults
pass
else:
for l in cmd_out.splitlines():
l = l.strip()
if l.startswith('default'):
match = re.search('via (?P<gateway>[^ ]+)', l)
if match:
return match.group('gateway')
def _set_default_gateway(self, gateway_ip, ifname):
"""
Sets the default gateway.
:param gateway_ip: the IP address to set as the default gateway_ip
:type gateway_ip: netaddr.IPAddress
:param ifname: the interface name (in our case, of the external
network)
:type ifname: str
"""
version = 4
if gateway_ip.version == 6:
version = 6
current = self._get_default_gateway(version)
desired = str(gateway_ip)
ifname = self.generic_to_host(ifname)
if current and current != desired:
# Remove the current gateway and add the desired one
self.sudo(
'-%s' % version, 'route', 'del', 'default', 'via', current,
'dev', ifname
)
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
if not current:
# Add the desired gateway
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
def _alter_route(self, ifname, action, destination, next_hop):
"""
Apply/remove a custom (generally, user-supplied) route using the `ip
route add/delete` command.
:param ifname: The name of the interface on which to alter the route
:type ifname: str
:param action: The action, 'add' or 'del'
:type action: str
:param destination: The destination CIDR
:type destination: netaddr.IPNetwork
:param next_hop: The next hop IP addressj
:type next_hop: netaddr.IPAddress
"""
version = destination.version
ifname = self.generic_to_host(ifname)
try:
LOG.debug(self.sudo(
'-%s' % version, 'route', action, str(destination), 'via',
str(next_hop), 'dev', ifname
))
return True
except RuntimeError as e:
# Since these are user-supplied custom routes, it's very possible
# that adding/removing them will fail. A failure to apply one of
# these custom rules, however, should *not* cause an overall router
# failure.
LOG.warn('Route could not be %sed: %s' % (action, unicode(e)))
return False
def disable_duplicate_address_detection(self, network):
"""
Disabled duplicate address detection for a specific interface.
:type network: akanda.models.Network
"""
# For non-external networks, duplicate address detection isn't
# necessary (and it sometimes results in race conditions for services
# that attempt to bind to addresses before they're ready).
if network.network_type != network.TYPE_EXTERNAL:
real_ifname = self.generic_to_host(network.interface.ifname)
try:
utils.execute([
'sysctl', '-w', 'net.ipv6.conf.%s.accept_dad=0'
% real_ifname
], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed to disable v6 dad on %s' % real_ifname
)
def _delete_conntrack_state(self, ip):
"""
Explicitly remove an IP from in-kernel connection tracking.
:param ip: The IP address to remove
:type ip: netaddr.IPAddress
"""
# If no flow entries are deleted, `conntrack -D` will return 1
try:
utils.execute(['conntrack', '-D', '-d', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting ingress connection state of %s' % ip
)
try:
utils.execute(['conntrack', '-D', '-q', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting egress connection state of %s' % ip
)
def get_rug_address():
""" Return the RUG address """
net = netaddr.IPNetwork(ULA_PREFIX)
return str(netaddr.IPAddress(net.first + 1))
def _parse_interfaces(data, filters=None):
"""
Parse the output of `ip addr show`.
:param data: the output of `ip addr show`
:type data: str
:param filter: a list of valid interface names to match on
:type data: list of str
:rtype: list of akanda.router.models.Interface
"""
retval = []
for iface_data in re.split('(^|\n)(?=[0-9]: \w+\d{0,3}:)', data, re.M):
if not iface_data.strip():
continue
number, interface = iface_data.split(': ', 1)
# FIXME (mark): the logic works, but should be more readable
for f in filters or ['']:
if f == '':
break
elif interface.startswith(f) and interface[len(f)].isdigit():
break
else:
continue
retval.append(_parse_interface(iface_data))
return retval
def _parse_interface(data):
"""
Parse details for an interface, given its data from `ip addr show <ifname>`
:rtype: akanda.router.models.Interface
"""
retval = dict(addresses=[])
for line in data.split('\n'):
if line.startswith(' '):
line = line.strip()
if line.startswith('inet'):
retval['addresses'].append(_parse_inet(line))
elif 'link/ether' in line:
retval['lladdr'] = _parse_lladdr(line)
else:
retval.update(_parse_head(line))
return models.Interface.from_dict(retval)
def | (line):
"""
Parse the line of `ip addr show` that contains the interface name, MTU, and
flags.
"""
retval = {}
m = re.match(
'[0-9]+: (?P<if>\w+\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',
line
)
if m:
retval['ifname'] = m.group('if')
retval['mtu'] = int(m.group('mtu'))
retval['flags'] = m.group('flags').split(',')
return retval
def _parse_inet(line):
"""
Parse a line of `ip addr show` that contains an address.
"""
tokens = line.split()
return netaddr.IPNetwork(tokens[1])
def _parse_lladdr(line):
"""
Parse the line of `ip addr show` that contains the hardware address.
"""
tokens = line.split()
return tokens[1]
| _parse_head | identifier_name |
ip.py | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import re
import netaddr
from akanda.router import models
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
GENERIC_IFNAME = 'ge'
PHYSICAL_INTERFACES = ['lo', 'eth', 'em', 're', 'en', 'vio', 'vtnet']
ULA_PREFIX = 'fdca:3ba5:a17a:acda::/64'
class IPManager(base.Manager):
"""
A class that provides a pythonic interface to unix system network
configuration information.
"""
EXECUTABLE = '/sbin/ip'
def __init__(self, root_helper='sudo'):
"""Initializes resources for the IPManager class"""
super(IPManager, self).__init__(root_helper)
self.next_generic_index = 0
self.host_mapping = {}
self.generic_mapping = {}
def ensure_mapping(self):
"""
Creates a mapping of generic interface names (e.g., ge0, ge1) to
physical interface names (eth1, eth2) if one does not already exist.
"""
if not self.host_mapping:
self.get_interfaces()
def get_interfaces(self):
"""
Returns a list of the available network interfaces. This information
is obtained through the `ip addr show` system command.
"""
interfaces = _parse_interfaces(self.do('addr', 'show'),
filters=PHYSICAL_INTERFACES)
interfaces.sort(key=lambda x: x.ifname)
for i in interfaces:
if i.ifname not in self.host_mapping:
generic_name = 'ge%d' % self.next_generic_index
self.host_mapping[i.ifname] = generic_name
self.next_generic_index += 1
# change ifname to generic version
i.ifname = self.host_mapping[i.ifname]
self.generic_mapping = dict((v, k) for k, v in
self.host_mapping.iteritems())
return interfaces
def get_interface(self, ifname):
"""
Returns network configuration information for the requested network
interface. This information is obtained through the system command `ip
addr show <ifname>`.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
:rtype: akanda.router.model.Interface
"""
real_ifname = self.generic_to_host(ifname)
retval = _parse_interface(self.do('addr', 'show', real_ifname))
retval.ifname = ifname
return retval
def is_valid(self, ifname):
"""
Validates if the supplied interface is a valid system network
interface. Returns `True` if <ifname> is a valid interface. Returns
`False` if <ifname> is not a valid interface.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
"""
self.ensure_mapping()
return ifname in self.generic_mapping
def generic_to_host(self, generic_name):
"""
Translates a generic interface name into the physical network interface
name.
:param ifname: the generic name to translate, e.g., `ge0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.generic_mapping.get(generic_name)
def host_to_generic(self, real_name):
"""
Translates a physical interface name into the generic network interface
name.
:param ifname: the physical name to translate, e.g., `eth0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.host_mapping.get(real_name)
def update_interfaces(self, interfaces):
"""
Wrapper function that accepts a list of interfaces and iterates over
them, calling update_interface(<interface>) in order to update
their configuration.
"""
for i in interfaces:
self.update_interface(i)
def up(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "up".
:param interface: the interface to mark up
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'up')
return self.get_interface(interface.ifname)
def down(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "down".
:param interface: the interface to mark down
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'down')
def update_interface(self, interface, ignore_link_local=True):
"""
Updates a network interface, particularly its addresses
:param interface: the interface to update
:type interface: akanda.router.models.Interface
:param ignore_link_local: When True, link local addresses will not be
added/removed
:type ignore_link_local: bool
"""
real_ifname = self.generic_to_host(interface.ifname)
old_interface = self.get_interface(interface.ifname)
if ignore_link_local:
interface.addresses = [a for a in interface.addresses
if not a.is_link_local()]
old_interface.addresses = [a for a in old_interface.addresses
if not a.is_link_local()]
# Must update primary before aliases otherwise will lose address
# in case where primary and alias are swapped.
self._update_addresses(real_ifname, interface, old_interface)
def _update_addresses(self, real_ifname, interface, old_interface):
"""
Compare the state of an interface, and add/remove address that have
changed.
:param real_ifname: the name of the interface to modify
:param real_ifname: str
:param interface: the new interface reference
:type interface: akanda.router.models.Interface
:param old_interface: the reference to the current network interface
:type old_interface: akanda.router.models.Interface
"""
def _gen_cmd(cmd, address):
"""
Generates an `ip addr (add|del) <cidr> dev <ifname>` command.
"""
family = {4: 'inet', 6: 'inet6'}[address[0].version]
args = ['addr', cmd, '%s/%s' % (address[0], address[1])]
if family == 'inet' and cmd == 'add':
args += ['brd', '+']
args += ['dev', real_ifname]
if family == 'inet6':
args = ['-6'] + args
return args
add = functools.partial(_gen_cmd, 'add')
delete = functools.partial(_gen_cmd, 'del')
mutator = lambda a: (a.ip, a.prefixlen)
self._update_set(real_ifname, interface, old_interface,
'all_addresses', add, delete, mutator)
def _update_set(self, real_ifname, interface, old_interface, attribute,
fmt_args_add, fmt_args_delete, mutator=lambda x: x):
"""
Compare the set of addresses (the current set and the desired set)
for an interface and generate a series of `ip addr add` and `ip addr
del` commands.
"""
next_set = set(mutator(i) for i in getattr(interface, attribute))
prev_set = set(mutator(i) for i in getattr(old_interface, attribute))
if next_set == prev_set:
return
for item in (next_set - prev_set):
self.sudo(*fmt_args_add(item))
self.up(interface)
for item in (prev_set - next_set):
self.sudo(*fmt_args_delete(item))
ip, prefix = item
if ip.version == 4:
self._delete_conntrack_state(ip)
def get_management_address(self, ensure_configuration=False):
"""
Get the network interface address that will be used for management
traffic.
:param ensure_configuration: when `True`, this method will ensure that
the management address if configured on
`ge0`.
:rtype: str
"""
primary = self.get_interface(GENERIC_IFNAME + '0')
prefix, prefix_len = ULA_PREFIX.split('/', 1)
eui = netaddr.EUI(primary.lladdr)
ip_str = str(eui.ipv6_link_local()).replace('fe80::', prefix[:-1])
if not primary.is_up:
self.up(primary)
ip = netaddr.IPNetwork('%s/%s' % (ip_str, prefix_len))
if ensure_configuration and ip not in primary.addresses:
primary.addresses.append(ip)
self.update_interface(primary)
return ip_str
def update_default_gateway(self, config):
"""
Sets the default gateway for v4 and v6 via the use of `ip route add`.
:type config: akanda.router.models.Configuration
"""
# Track whether we have set the default gateways, by IP
# version.
gw_set = {
4: False,
6: False,
}
ifname = None
for net in config.networks:
if not net.is_external_network:
continue
ifname = net.interface.ifname
# The default v4 gateway is pulled out as a special case
# because we only want one but we might have multiple v4
# subnets on the external network. However, sometimes the RUG
# can't figure out what that value is, because it thinks we
# don't have any external IP addresses, yet. In that case, it
# doesn't give us a default.
if config.default_v4_gateway:
self._set_default_gateway(config.default_v4_gateway, ifname)
gw_set[4] = True
# Look through our networks and make sure we have a default
# gateway set for each IP version, if we have an IP for that
# version on the external net. If we haven't already set the
# v4 gateway, this picks the gateway for the first subnet we
# find, which might be wrong.
for net in config.networks:
if not net.is_external_network:
continue
for subnet in net.subnets:
if subnet.gateway_ip and not gw_set[subnet.gateway_ip.version]:
self._set_default_gateway(
subnet.gateway_ip,
net.interface.ifname
)
gw_set[subnet.gateway_ip.version] = True
def update_host_routes(self, config, cache):
"""
Update the network routes. This is primarily used to support static
routes that users provide to neutron.
:type config: akanda.router.models.Configuration
:param cache: a dbm cache for storing the "last applied routes".
Because Linux does not differentiate user-provided routes
from, for example, the default gateway, this is necessary
so that subsequent calls to this method can determine
"what changed" for the user-provided routes.
:type cache: dogpile.cache.region.CacheRegion
"""
db = cache.get_or_create('host_routes', lambda: {})
for net in config.networks:
# For each subnet...
for subnet in net.subnets:
cidr = str(subnet.cidr)
# determine the set of previously written routes for this cidr
if cidr not in db:
db[cidr] = set()
current = db[cidr]
# build a set of new routes for this cidr
latest = set()
for r in subnet.host_routes:
latest.add((r.destination, r.next_hop))
# If the set of previously written routes contains routes that
# aren't defined in the new config, run commands to delete them
for x in current - latest:
if self._alter_route(net.interface.ifname, 'del', *x):
current.remove(x)
# If the new config contains routes that aren't defined in the
# set of previously written routes, run commands to add them
for x in latest - current:
if self._alter_route(net.interface.ifname, 'add', *x):
current.add(x)
if not current:
del db[cidr]
cache.set('host_routes', db)
def _get_default_gateway(self, version):
"""
Gets the default gateway.
:param version: the IP version, 4 or 6
:type version: int
:rtype: str
"""
try:
cmd_out = self.sudo('-%s' % version, 'route', 'show')
except:
# assume the route is missing and use defaults
pass
else:
for l in cmd_out.splitlines():
l = l.strip()
if l.startswith('default'):
match = re.search('via (?P<gateway>[^ ]+)', l)
if match:
return match.group('gateway')
def _set_default_gateway(self, gateway_ip, ifname):
"""
Sets the default gateway.
:param gateway_ip: the IP address to set as the default gateway_ip
:type gateway_ip: netaddr.IPAddress
:param ifname: the interface name (in our case, of the external
network)
:type ifname: str
"""
version = 4
if gateway_ip.version == 6:
version = 6
current = self._get_default_gateway(version)
desired = str(gateway_ip)
ifname = self.generic_to_host(ifname)
if current and current != desired:
# Remove the current gateway and add the desired one
self.sudo(
'-%s' % version, 'route', 'del', 'default', 'via', current,
'dev', ifname
)
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
if not current:
# Add the desired gateway
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
def _alter_route(self, ifname, action, destination, next_hop):
"""
Apply/remove a custom (generally, user-supplied) route using the `ip
route add/delete` command.
:param ifname: The name of the interface on which to alter the route
:type ifname: str
:param action: The action, 'add' or 'del'
:type action: str
:param destination: The destination CIDR
:type destination: netaddr.IPNetwork
:param next_hop: The next hop IP addressj
:type next_hop: netaddr.IPAddress
"""
version = destination.version
ifname = self.generic_to_host(ifname)
try:
LOG.debug(self.sudo(
'-%s' % version, 'route', action, str(destination), 'via',
str(next_hop), 'dev', ifname
))
return True
except RuntimeError as e:
# Since these are user-supplied custom routes, it's very possible
# that adding/removing them will fail. A failure to apply one of
# these custom rules, however, should *not* cause an overall router
# failure.
LOG.warn('Route could not be %sed: %s' % (action, unicode(e)))
return False
def disable_duplicate_address_detection(self, network):
"""
Disabled duplicate address detection for a specific interface.
:type network: akanda.models.Network
"""
# For non-external networks, duplicate address detection isn't
# necessary (and it sometimes results in race conditions for services
# that attempt to bind to addresses before they're ready).
if network.network_type != network.TYPE_EXTERNAL:
real_ifname = self.generic_to_host(network.interface.ifname)
try:
utils.execute([
'sysctl', '-w', 'net.ipv6.conf.%s.accept_dad=0'
% real_ifname
], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed to disable v6 dad on %s' % real_ifname
)
def _delete_conntrack_state(self, ip):
"""
Explicitly remove an IP from in-kernel connection tracking.
:param ip: The IP address to remove
:type ip: netaddr.IPAddress
"""
# If no flow entries are deleted, `conntrack -D` will return 1
try:
utils.execute(['conntrack', '-D', '-d', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting ingress connection state of %s' % ip
)
try:
utils.execute(['conntrack', '-D', '-q', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting egress connection state of %s' % ip
)
def get_rug_address():
""" Return the RUG address """
net = netaddr.IPNetwork(ULA_PREFIX)
return str(netaddr.IPAddress(net.first + 1))
def _parse_interfaces(data, filters=None):
"""
Parse the output of `ip addr show`.
:param data: the output of `ip addr show`
:type data: str
:param filter: a list of valid interface names to match on
:type data: list of str
:rtype: list of akanda.router.models.Interface
"""
retval = []
for iface_data in re.split('(^|\n)(?=[0-9]: \w+\d{0,3}:)', data, re.M):
if not iface_data.strip():
continue
number, interface = iface_data.split(': ', 1)
# FIXME (mark): the logic works, but should be more readable
for f in filters or ['']:
if f == '':
break
elif interface.startswith(f) and interface[len(f)].isdigit():
break
else:
continue
retval.append(_parse_interface(iface_data))
return retval
def _parse_interface(data):
|
def _parse_head(line):
"""
Parse the line of `ip addr show` that contains the interface name, MTU, and
flags.
"""
retval = {}
m = re.match(
'[0-9]+: (?P<if>\w+\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',
line
)
if m:
retval['ifname'] = m.group('if')
retval['mtu'] = int(m.group('mtu'))
retval['flags'] = m.group('flags').split(',')
return retval
def _parse_inet(line):
"""
Parse a line of `ip addr show` that contains an address.
"""
tokens = line.split()
return netaddr.IPNetwork(tokens[1])
def _parse_lladdr(line):
"""
Parse the line of `ip addr show` that contains the hardware address.
"""
tokens = line.split()
return tokens[1]
| """
Parse details for an interface, given its data from `ip addr show <ifname>`
:rtype: akanda.router.models.Interface
"""
retval = dict(addresses=[])
for line in data.split('\n'):
if line.startswith(' '):
line = line.strip()
if line.startswith('inet'):
retval['addresses'].append(_parse_inet(line))
elif 'link/ether' in line:
retval['lladdr'] = _parse_lladdr(line)
else:
retval.update(_parse_head(line))
return models.Interface.from_dict(retval) | identifier_body |
ip.py | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import re
import netaddr
from akanda.router import models
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
GENERIC_IFNAME = 'ge'
PHYSICAL_INTERFACES = ['lo', 'eth', 'em', 're', 'en', 'vio', 'vtnet']
ULA_PREFIX = 'fdca:3ba5:a17a:acda::/64'
class IPManager(base.Manager):
"""
A class that provides a pythonic interface to unix system network
configuration information.
"""
EXECUTABLE = '/sbin/ip'
def __init__(self, root_helper='sudo'):
"""Initializes resources for the IPManager class"""
super(IPManager, self).__init__(root_helper)
self.next_generic_index = 0
self.host_mapping = {}
self.generic_mapping = {}
def ensure_mapping(self):
"""
Creates a mapping of generic interface names (e.g., ge0, ge1) to
physical interface names (eth1, eth2) if one does not already exist.
"""
if not self.host_mapping:
self.get_interfaces()
def get_interfaces(self):
"""
Returns a list of the available network interfaces. This information
is obtained through the `ip addr show` system command.
"""
interfaces = _parse_interfaces(self.do('addr', 'show'),
filters=PHYSICAL_INTERFACES)
interfaces.sort(key=lambda x: x.ifname)
for i in interfaces:
if i.ifname not in self.host_mapping:
generic_name = 'ge%d' % self.next_generic_index
self.host_mapping[i.ifname] = generic_name
self.next_generic_index += 1
# change ifname to generic version
i.ifname = self.host_mapping[i.ifname]
self.generic_mapping = dict((v, k) for k, v in
self.host_mapping.iteritems())
return interfaces
def get_interface(self, ifname):
"""
Returns network configuration information for the requested network
interface. This information is obtained through the system command `ip
addr show <ifname>`.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
:rtype: akanda.router.model.Interface
"""
real_ifname = self.generic_to_host(ifname)
retval = _parse_interface(self.do('addr', 'show', real_ifname))
retval.ifname = ifname
return retval
def is_valid(self, ifname):
"""
Validates if the supplied interface is a valid system network
interface. Returns `True` if <ifname> is a valid interface. Returns
`False` if <ifname> is not a valid interface.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
"""
self.ensure_mapping()
return ifname in self.generic_mapping
def generic_to_host(self, generic_name):
"""
Translates a generic interface name into the physical network interface
name.
:param ifname: the generic name to translate, e.g., `ge0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.generic_mapping.get(generic_name)
def host_to_generic(self, real_name):
"""
Translates a physical interface name into the generic network interface
name.
:param ifname: the physical name to translate, e.g., `eth0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.host_mapping.get(real_name)
def update_interfaces(self, interfaces):
"""
Wrapper function that accepts a list of interfaces and iterates over
them, calling update_interface(<interface>) in order to update
their configuration.
"""
for i in interfaces:
self.update_interface(i)
def up(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "up".
:param interface: the interface to mark up
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'up')
return self.get_interface(interface.ifname)
def down(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "down".
:param interface: the interface to mark down
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'down')
def update_interface(self, interface, ignore_link_local=True):
"""
Updates a network interface, particularly its addresses
:param interface: the interface to update
:type interface: akanda.router.models.Interface
:param ignore_link_local: When True, link local addresses will not be
added/removed
:type ignore_link_local: bool
"""
real_ifname = self.generic_to_host(interface.ifname)
old_interface = self.get_interface(interface.ifname)
if ignore_link_local:
interface.addresses = [a for a in interface.addresses
if not a.is_link_local()]
old_interface.addresses = [a for a in old_interface.addresses
if not a.is_link_local()]
# Must update primary before aliases otherwise will lose address
# in case where primary and alias are swapped.
self._update_addresses(real_ifname, interface, old_interface)
def _update_addresses(self, real_ifname, interface, old_interface):
"""
Compare the state of an interface, and add/remove address that have
changed.
:param real_ifname: the name of the interface to modify
:param real_ifname: str
:param interface: the new interface reference
:type interface: akanda.router.models.Interface
:param old_interface: the reference to the current network interface
:type old_interface: akanda.router.models.Interface
"""
def _gen_cmd(cmd, address):
"""
Generates an `ip addr (add|del) <cidr> dev <ifname>` command.
"""
family = {4: 'inet', 6: 'inet6'}[address[0].version]
args = ['addr', cmd, '%s/%s' % (address[0], address[1])]
if family == 'inet' and cmd == 'add':
args += ['brd', '+']
args += ['dev', real_ifname]
if family == 'inet6':
args = ['-6'] + args
return args
add = functools.partial(_gen_cmd, 'add')
delete = functools.partial(_gen_cmd, 'del')
mutator = lambda a: (a.ip, a.prefixlen)
self._update_set(real_ifname, interface, old_interface,
'all_addresses', add, delete, mutator)
def _update_set(self, real_ifname, interface, old_interface, attribute,
fmt_args_add, fmt_args_delete, mutator=lambda x: x):
"""
Compare the set of addresses (the current set and the desired set)
for an interface and generate a series of `ip addr add` and `ip addr
del` commands.
"""
next_set = set(mutator(i) for i in getattr(interface, attribute))
prev_set = set(mutator(i) for i in getattr(old_interface, attribute))
if next_set == prev_set:
return
for item in (next_set - prev_set):
self.sudo(*fmt_args_add(item))
self.up(interface)
for item in (prev_set - next_set):
self.sudo(*fmt_args_delete(item))
ip, prefix = item
if ip.version == 4:
self._delete_conntrack_state(ip)
def get_management_address(self, ensure_configuration=False):
"""
Get the network interface address that will be used for management
traffic.
:param ensure_configuration: when `True`, this method will ensure that
the management address if configured on
`ge0`.
:rtype: str
"""
primary = self.get_interface(GENERIC_IFNAME + '0')
prefix, prefix_len = ULA_PREFIX.split('/', 1)
eui = netaddr.EUI(primary.lladdr)
ip_str = str(eui.ipv6_link_local()).replace('fe80::', prefix[:-1])
if not primary.is_up:
self.up(primary)
ip = netaddr.IPNetwork('%s/%s' % (ip_str, prefix_len))
if ensure_configuration and ip not in primary.addresses:
primary.addresses.append(ip)
self.update_interface(primary)
return ip_str
def update_default_gateway(self, config):
"""
Sets the default gateway for v4 and v6 via the use of `ip route add`.
:type config: akanda.router.models.Configuration
"""
# Track whether we have set the default gateways, by IP
# version.
gw_set = {
4: False,
6: False,
}
ifname = None
for net in config.networks:
if not net.is_external_network:
continue
ifname = net.interface.ifname
# The default v4 gateway is pulled out as a special case
# because we only want one but we might have multiple v4
# subnets on the external network. However, sometimes the RUG
# can't figure out what that value is, because it thinks we
# don't have any external IP addresses, yet. In that case, it
# doesn't give us a default.
if config.default_v4_gateway:
self._set_default_gateway(config.default_v4_gateway, ifname)
gw_set[4] = True
# Look through our networks and make sure we have a default
# gateway set for each IP version, if we have an IP for that
# version on the external net. If we haven't already set the
# v4 gateway, this picks the gateway for the first subnet we
# find, which might be wrong.
for net in config.networks:
if not net.is_external_network:
continue
for subnet in net.subnets:
if subnet.gateway_ip and not gw_set[subnet.gateway_ip.version]:
self._set_default_gateway(
subnet.gateway_ip,
net.interface.ifname
)
gw_set[subnet.gateway_ip.version] = True
def update_host_routes(self, config, cache):
"""
Update the network routes. This is primarily used to support static
routes that users provide to neutron.
:type config: akanda.router.models.Configuration
:param cache: a dbm cache for storing the "last applied routes".
Because Linux does not differentiate user-provided routes
from, for example, the default gateway, this is necessary
so that subsequent calls to this method can determine
"what changed" for the user-provided routes.
:type cache: dogpile.cache.region.CacheRegion
"""
db = cache.get_or_create('host_routes', lambda: {})
for net in config.networks:
# For each subnet...
for subnet in net.subnets:
cidr = str(subnet.cidr)
# determine the set of previously written routes for this cidr
if cidr not in db:
db[cidr] = set()
current = db[cidr]
# build a set of new routes for this cidr
latest = set()
for r in subnet.host_routes:
latest.add((r.destination, r.next_hop))
# If the set of previously written routes contains routes that
# aren't defined in the new config, run commands to delete them
for x in current - latest:
if self._alter_route(net.interface.ifname, 'del', *x):
current.remove(x)
# If the new config contains routes that aren't defined in the
# set of previously written routes, run commands to add them
for x in latest - current:
if self._alter_route(net.interface.ifname, 'add', *x):
current.add(x)
if not current:
del db[cidr]
cache.set('host_routes', db)
def _get_default_gateway(self, version):
"""
Gets the default gateway.
:param version: the IP version, 4 or 6
:type version: int
:rtype: str
"""
try:
cmd_out = self.sudo('-%s' % version, 'route', 'show') | l = l.strip()
if l.startswith('default'):
match = re.search('via (?P<gateway>[^ ]+)', l)
if match:
return match.group('gateway')
def _set_default_gateway(self, gateway_ip, ifname):
"""
Sets the default gateway.
:param gateway_ip: the IP address to set as the default gateway_ip
:type gateway_ip: netaddr.IPAddress
:param ifname: the interface name (in our case, of the external
network)
:type ifname: str
"""
version = 4
if gateway_ip.version == 6:
version = 6
current = self._get_default_gateway(version)
desired = str(gateway_ip)
ifname = self.generic_to_host(ifname)
if current and current != desired:
# Remove the current gateway and add the desired one
self.sudo(
'-%s' % version, 'route', 'del', 'default', 'via', current,
'dev', ifname
)
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
if not current:
# Add the desired gateway
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
def _alter_route(self, ifname, action, destination, next_hop):
"""
Apply/remove a custom (generally, user-supplied) route using the `ip
route add/delete` command.
:param ifname: The name of the interface on which to alter the route
:type ifname: str
:param action: The action, 'add' or 'del'
:type action: str
:param destination: The destination CIDR
:type destination: netaddr.IPNetwork
:param next_hop: The next hop IP addressj
:type next_hop: netaddr.IPAddress
"""
version = destination.version
ifname = self.generic_to_host(ifname)
try:
LOG.debug(self.sudo(
'-%s' % version, 'route', action, str(destination), 'via',
str(next_hop), 'dev', ifname
))
return True
except RuntimeError as e:
# Since these are user-supplied custom routes, it's very possible
# that adding/removing them will fail. A failure to apply one of
# these custom rules, however, should *not* cause an overall router
# failure.
LOG.warn('Route could not be %sed: %s' % (action, unicode(e)))
return False
def disable_duplicate_address_detection(self, network):
"""
Disabled duplicate address detection for a specific interface.
:type network: akanda.models.Network
"""
# For non-external networks, duplicate address detection isn't
# necessary (and it sometimes results in race conditions for services
# that attempt to bind to addresses before they're ready).
if network.network_type != network.TYPE_EXTERNAL:
real_ifname = self.generic_to_host(network.interface.ifname)
try:
utils.execute([
'sysctl', '-w', 'net.ipv6.conf.%s.accept_dad=0'
% real_ifname
], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed to disable v6 dad on %s' % real_ifname
)
def _delete_conntrack_state(self, ip):
"""
Explicitly remove an IP from in-kernel connection tracking.
:param ip: The IP address to remove
:type ip: netaddr.IPAddress
"""
# If no flow entries are deleted, `conntrack -D` will return 1
try:
utils.execute(['conntrack', '-D', '-d', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting ingress connection state of %s' % ip
)
try:
utils.execute(['conntrack', '-D', '-q', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting egress connection state of %s' % ip
)
def get_rug_address():
""" Return the RUG address """
net = netaddr.IPNetwork(ULA_PREFIX)
return str(netaddr.IPAddress(net.first + 1))
def _parse_interfaces(data, filters=None):
"""
Parse the output of `ip addr show`.
:param data: the output of `ip addr show`
:type data: str
:param filter: a list of valid interface names to match on
:type data: list of str
:rtype: list of akanda.router.models.Interface
"""
retval = []
for iface_data in re.split('(^|\n)(?=[0-9]: \w+\d{0,3}:)', data, re.M):
if not iface_data.strip():
continue
number, interface = iface_data.split(': ', 1)
# FIXME (mark): the logic works, but should be more readable
for f in filters or ['']:
if f == '':
break
elif interface.startswith(f) and interface[len(f)].isdigit():
break
else:
continue
retval.append(_parse_interface(iface_data))
return retval
def _parse_interface(data):
"""
Parse details for an interface, given its data from `ip addr show <ifname>`
:rtype: akanda.router.models.Interface
"""
retval = dict(addresses=[])
for line in data.split('\n'):
if line.startswith(' '):
line = line.strip()
if line.startswith('inet'):
retval['addresses'].append(_parse_inet(line))
elif 'link/ether' in line:
retval['lladdr'] = _parse_lladdr(line)
else:
retval.update(_parse_head(line))
return models.Interface.from_dict(retval)
def _parse_head(line):
"""
Parse the line of `ip addr show` that contains the interface name, MTU, and
flags.
"""
retval = {}
m = re.match(
'[0-9]+: (?P<if>\w+\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',
line
)
if m:
retval['ifname'] = m.group('if')
retval['mtu'] = int(m.group('mtu'))
retval['flags'] = m.group('flags').split(',')
return retval
def _parse_inet(line):
"""
Parse a line of `ip addr show` that contains an address.
"""
tokens = line.split()
return netaddr.IPNetwork(tokens[1])
def _parse_lladdr(line):
"""
Parse the line of `ip addr show` that contains the hardware address.
"""
tokens = line.split()
return tokens[1] | except:
# assume the route is missing and use defaults
pass
else:
for l in cmd_out.splitlines(): | random_line_split |
ip.py | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import re
import netaddr
from akanda.router import models
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
GENERIC_IFNAME = 'ge'
PHYSICAL_INTERFACES = ['lo', 'eth', 'em', 're', 'en', 'vio', 'vtnet']
ULA_PREFIX = 'fdca:3ba5:a17a:acda::/64'
class IPManager(base.Manager):
"""
A class that provides a pythonic interface to unix system network
configuration information.
"""
EXECUTABLE = '/sbin/ip'
def __init__(self, root_helper='sudo'):
"""Initializes resources for the IPManager class"""
super(IPManager, self).__init__(root_helper)
self.next_generic_index = 0
self.host_mapping = {}
self.generic_mapping = {}
def ensure_mapping(self):
"""
Creates a mapping of generic interface names (e.g., ge0, ge1) to
physical interface names (eth1, eth2) if one does not already exist.
"""
if not self.host_mapping:
self.get_interfaces()
def get_interfaces(self):
"""
Returns a list of the available network interfaces. This information
is obtained through the `ip addr show` system command.
"""
interfaces = _parse_interfaces(self.do('addr', 'show'),
filters=PHYSICAL_INTERFACES)
interfaces.sort(key=lambda x: x.ifname)
for i in interfaces:
if i.ifname not in self.host_mapping:
generic_name = 'ge%d' % self.next_generic_index
self.host_mapping[i.ifname] = generic_name
self.next_generic_index += 1
# change ifname to generic version
i.ifname = self.host_mapping[i.ifname]
self.generic_mapping = dict((v, k) for k, v in
self.host_mapping.iteritems())
return interfaces
def get_interface(self, ifname):
"""
Returns network configuration information for the requested network
interface. This information is obtained through the system command `ip
addr show <ifname>`.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
:rtype: akanda.router.model.Interface
"""
real_ifname = self.generic_to_host(ifname)
retval = _parse_interface(self.do('addr', 'show', real_ifname))
retval.ifname = ifname
return retval
def is_valid(self, ifname):
"""
Validates if the supplied interface is a valid system network
interface. Returns `True` if <ifname> is a valid interface. Returns
`False` if <ifname> is not a valid interface.
:param ifname: the name of the interface to retrieve, e.g., `eth1`
:type ifname: str
"""
self.ensure_mapping()
return ifname in self.generic_mapping
def generic_to_host(self, generic_name):
"""
Translates a generic interface name into the physical network interface
name.
:param ifname: the generic name to translate, e.g., `ge0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.generic_mapping.get(generic_name)
def host_to_generic(self, real_name):
"""
Translates a physical interface name into the generic network interface
name.
:param ifname: the physical name to translate, e.g., `eth0`
:type ifname: str
:rtype: str
"""
self.ensure_mapping()
return self.host_mapping.get(real_name)
def update_interfaces(self, interfaces):
"""
Wrapper function that accepts a list of interfaces and iterates over
them, calling update_interface(<interface>) in order to update
their configuration.
"""
for i in interfaces:
self.update_interface(i)
def up(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "up".
:param interface: the interface to mark up
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'up')
return self.get_interface(interface.ifname)
def down(self, interface):
"""
Sets the administrative mode for the network link on interface
<interface> to "down".
:param interface: the interface to mark down
:type interface: akanda.router.models.Interface
"""
real_ifname = self.generic_to_host(interface.ifname)
self.sudo('link', 'set', real_ifname, 'down')
def update_interface(self, interface, ignore_link_local=True):
"""
Updates a network interface, particularly its addresses
:param interface: the interface to update
:type interface: akanda.router.models.Interface
:param ignore_link_local: When True, link local addresses will not be
added/removed
:type ignore_link_local: bool
"""
real_ifname = self.generic_to_host(interface.ifname)
old_interface = self.get_interface(interface.ifname)
if ignore_link_local:
interface.addresses = [a for a in interface.addresses
if not a.is_link_local()]
old_interface.addresses = [a for a in old_interface.addresses
if not a.is_link_local()]
# Must update primary before aliases otherwise will lose address
# in case where primary and alias are swapped.
self._update_addresses(real_ifname, interface, old_interface)
def _update_addresses(self, real_ifname, interface, old_interface):
"""
Compare the state of an interface, and add/remove address that have
changed.
:param real_ifname: the name of the interface to modify
:param real_ifname: str
:param interface: the new interface reference
:type interface: akanda.router.models.Interface
:param old_interface: the reference to the current network interface
:type old_interface: akanda.router.models.Interface
"""
def _gen_cmd(cmd, address):
"""
Generates an `ip addr (add|del) <cidr> dev <ifname>` command.
"""
family = {4: 'inet', 6: 'inet6'}[address[0].version]
args = ['addr', cmd, '%s/%s' % (address[0], address[1])]
if family == 'inet' and cmd == 'add':
args += ['brd', '+']
args += ['dev', real_ifname]
if family == 'inet6':
args = ['-6'] + args
return args
add = functools.partial(_gen_cmd, 'add')
delete = functools.partial(_gen_cmd, 'del')
mutator = lambda a: (a.ip, a.prefixlen)
self._update_set(real_ifname, interface, old_interface,
'all_addresses', add, delete, mutator)
def _update_set(self, real_ifname, interface, old_interface, attribute,
fmt_args_add, fmt_args_delete, mutator=lambda x: x):
"""
Compare the set of addresses (the current set and the desired set)
for an interface and generate a series of `ip addr add` and `ip addr
del` commands.
"""
next_set = set(mutator(i) for i in getattr(interface, attribute))
prev_set = set(mutator(i) for i in getattr(old_interface, attribute))
if next_set == prev_set:
return
for item in (next_set - prev_set):
self.sudo(*fmt_args_add(item))
self.up(interface)
for item in (prev_set - next_set):
self.sudo(*fmt_args_delete(item))
ip, prefix = item
if ip.version == 4:
self._delete_conntrack_state(ip)
def get_management_address(self, ensure_configuration=False):
"""
Get the network interface address that will be used for management
traffic.
:param ensure_configuration: when `True`, this method will ensure that
the management address if configured on
`ge0`.
:rtype: str
"""
primary = self.get_interface(GENERIC_IFNAME + '0')
prefix, prefix_len = ULA_PREFIX.split('/', 1)
eui = netaddr.EUI(primary.lladdr)
ip_str = str(eui.ipv6_link_local()).replace('fe80::', prefix[:-1])
if not primary.is_up:
self.up(primary)
ip = netaddr.IPNetwork('%s/%s' % (ip_str, prefix_len))
if ensure_configuration and ip not in primary.addresses:
primary.addresses.append(ip)
self.update_interface(primary)
return ip_str
def update_default_gateway(self, config):
"""
Sets the default gateway for v4 and v6 via the use of `ip route add`.
:type config: akanda.router.models.Configuration
"""
# Track whether we have set the default gateways, by IP
# version.
gw_set = {
4: False,
6: False,
}
ifname = None
for net in config.networks:
if not net.is_external_network:
continue
ifname = net.interface.ifname
# The default v4 gateway is pulled out as a special case
# because we only want one but we might have multiple v4
# subnets on the external network. However, sometimes the RUG
# can't figure out what that value is, because it thinks we
# don't have any external IP addresses, yet. In that case, it
# doesn't give us a default.
if config.default_v4_gateway:
self._set_default_gateway(config.default_v4_gateway, ifname)
gw_set[4] = True
# Look through our networks and make sure we have a default
# gateway set for each IP version, if we have an IP for that
# version on the external net. If we haven't already set the
# v4 gateway, this picks the gateway for the first subnet we
# find, which might be wrong.
for net in config.networks:
if not net.is_external_network:
continue
for subnet in net.subnets:
if subnet.gateway_ip and not gw_set[subnet.gateway_ip.version]:
self._set_default_gateway(
subnet.gateway_ip,
net.interface.ifname
)
gw_set[subnet.gateway_ip.version] = True
def update_host_routes(self, config, cache):
"""
Update the network routes. This is primarily used to support static
routes that users provide to neutron.
:type config: akanda.router.models.Configuration
:param cache: a dbm cache for storing the "last applied routes".
Because Linux does not differentiate user-provided routes
from, for example, the default gateway, this is necessary
so that subsequent calls to this method can determine
"what changed" for the user-provided routes.
:type cache: dogpile.cache.region.CacheRegion
"""
db = cache.get_or_create('host_routes', lambda: {})
for net in config.networks:
# For each subnet...
for subnet in net.subnets:
cidr = str(subnet.cidr)
# determine the set of previously written routes for this cidr
if cidr not in db:
db[cidr] = set()
current = db[cidr]
# build a set of new routes for this cidr
latest = set()
for r in subnet.host_routes:
latest.add((r.destination, r.next_hop))
# If the set of previously written routes contains routes that
# aren't defined in the new config, run commands to delete them
for x in current - latest:
if self._alter_route(net.interface.ifname, 'del', *x):
current.remove(x)
# If the new config contains routes that aren't defined in the
# set of previously written routes, run commands to add them
for x in latest - current:
if self._alter_route(net.interface.ifname, 'add', *x):
current.add(x)
if not current:
del db[cidr]
cache.set('host_routes', db)
def _get_default_gateway(self, version):
"""
Gets the default gateway.
:param version: the IP version, 4 or 6
:type version: int
:rtype: str
"""
try:
cmd_out = self.sudo('-%s' % version, 'route', 'show')
except:
# assume the route is missing and use defaults
pass
else:
for l in cmd_out.splitlines():
|
def _set_default_gateway(self, gateway_ip, ifname):
"""
Sets the default gateway.
:param gateway_ip: the IP address to set as the default gateway_ip
:type gateway_ip: netaddr.IPAddress
:param ifname: the interface name (in our case, of the external
network)
:type ifname: str
"""
version = 4
if gateway_ip.version == 6:
version = 6
current = self._get_default_gateway(version)
desired = str(gateway_ip)
ifname = self.generic_to_host(ifname)
if current and current != desired:
# Remove the current gateway and add the desired one
self.sudo(
'-%s' % version, 'route', 'del', 'default', 'via', current,
'dev', ifname
)
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
if not current:
# Add the desired gateway
return self.sudo(
'-%s' % version, 'route', 'add', 'default', 'via', desired,
'dev', ifname
)
def _alter_route(self, ifname, action, destination, next_hop):
"""
Apply/remove a custom (generally, user-supplied) route using the `ip
route add/delete` command.
:param ifname: The name of the interface on which to alter the route
:type ifname: str
:param action: The action, 'add' or 'del'
:type action: str
:param destination: The destination CIDR
:type destination: netaddr.IPNetwork
:param next_hop: The next hop IP addressj
:type next_hop: netaddr.IPAddress
"""
version = destination.version
ifname = self.generic_to_host(ifname)
try:
LOG.debug(self.sudo(
'-%s' % version, 'route', action, str(destination), 'via',
str(next_hop), 'dev', ifname
))
return True
except RuntimeError as e:
# Since these are user-supplied custom routes, it's very possible
# that adding/removing them will fail. A failure to apply one of
# these custom rules, however, should *not* cause an overall router
# failure.
LOG.warn('Route could not be %sed: %s' % (action, unicode(e)))
return False
def disable_duplicate_address_detection(self, network):
"""
Disabled duplicate address detection for a specific interface.
:type network: akanda.models.Network
"""
# For non-external networks, duplicate address detection isn't
# necessary (and it sometimes results in race conditions for services
# that attempt to bind to addresses before they're ready).
if network.network_type != network.TYPE_EXTERNAL:
real_ifname = self.generic_to_host(network.interface.ifname)
try:
utils.execute([
'sysctl', '-w', 'net.ipv6.conf.%s.accept_dad=0'
% real_ifname
], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed to disable v6 dad on %s' % real_ifname
)
def _delete_conntrack_state(self, ip):
"""
Explicitly remove an IP from in-kernel connection tracking.
:param ip: The IP address to remove
:type ip: netaddr.IPAddress
"""
# If no flow entries are deleted, `conntrack -D` will return 1
try:
utils.execute(['conntrack', '-D', '-d', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting ingress connection state of %s' % ip
)
try:
utils.execute(['conntrack', '-D', '-q', str(ip)], self.root_helper)
except RuntimeError:
LOG.debug(
'Failed deleting egress connection state of %s' % ip
)
def get_rug_address():
""" Return the RUG address """
net = netaddr.IPNetwork(ULA_PREFIX)
return str(netaddr.IPAddress(net.first + 1))
def _parse_interfaces(data, filters=None):
"""
Parse the output of `ip addr show`.
:param data: the output of `ip addr show`
:type data: str
:param filter: a list of valid interface names to match on
:type data: list of str
:rtype: list of akanda.router.models.Interface
"""
retval = []
for iface_data in re.split('(^|\n)(?=[0-9]: \w+\d{0,3}:)', data, re.M):
if not iface_data.strip():
continue
number, interface = iface_data.split(': ', 1)
# FIXME (mark): the logic works, but should be more readable
for f in filters or ['']:
if f == '':
break
elif interface.startswith(f) and interface[len(f)].isdigit():
break
else:
continue
retval.append(_parse_interface(iface_data))
return retval
def _parse_interface(data):
"""
Parse details for an interface, given its data from `ip addr show <ifname>`
:rtype: akanda.router.models.Interface
"""
retval = dict(addresses=[])
for line in data.split('\n'):
if line.startswith(' '):
line = line.strip()
if line.startswith('inet'):
retval['addresses'].append(_parse_inet(line))
elif 'link/ether' in line:
retval['lladdr'] = _parse_lladdr(line)
else:
retval.update(_parse_head(line))
return models.Interface.from_dict(retval)
def _parse_head(line):
"""
Parse the line of `ip addr show` that contains the interface name, MTU, and
flags.
"""
retval = {}
m = re.match(
'[0-9]+: (?P<if>\w+\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',
line
)
if m:
retval['ifname'] = m.group('if')
retval['mtu'] = int(m.group('mtu'))
retval['flags'] = m.group('flags').split(',')
return retval
def _parse_inet(line):
"""
Parse a line of `ip addr show` that contains an address.
"""
tokens = line.split()
return netaddr.IPNetwork(tokens[1])
def _parse_lladdr(line):
"""
Parse the line of `ip addr show` that contains the hardware address.
"""
tokens = line.split()
return tokens[1]
| l = l.strip()
if l.startswith('default'):
match = re.search('via (?P<gateway>[^ ]+)', l)
if match:
return match.group('gateway') | conditional_block |
lib.rs | #![cfg_attr(docsrs, doc = include_str!("../README.md"))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg, doc_cfg_hide))]
#![cfg_attr(docsrs, deny(missing_docs))]
#![cfg_attr(not(any(feature = "std", test)), no_std)]
#![allow(unused_unsafe)]
//!
//! ## data structures
//!
//! `cordyceps` provides implementations of the following data structures:
//!
//! - **[`List`]: a mutable, doubly-linked list.**
//!
//! A [`List`] provides *O*(1) insertion and removal at both the head and
//! tail of the list. In addition, parts of a [`List`] may be split off to
//! form new [`List`]s, and two [`List`]s may be spliced together to form a
//! single [`List`], all in *O*(1) time. The [`list`] module also provides
//! [`list::Cursor`] and [`list::CursorMut`] types, which allow traversal and
//! modification of elements in a list. Finally, elements can remove themselves
//! from arbitrary positions in a [`List`], provided that they have mutable
//! access to the [`List`] itself. This makes the [`List`] type suitable for
//! use in cases where elements must be able to drop themselves while linked
//! into a list.
//!
//! The [`List`] type is **not** a lock-free data structure, and can only be
//! modified through `&mut` references.
//!
//! - **[`MpscQueue`]: a multi-producer, single-consumer (MPSC) lock-free
//! last-in, first-out (LIFO) queue.**
//!
//! A [`MpscQueue`] is a *lock-free* concurrent data structure that allows
//! multiple producers to concurrently push elements onto the queue, and a
//! single consumer to dequeue elements in the order that they were pushed.
//!
//! [`MpscQueue`]s can be used to efficiently share data from multiple
//! concurrent producers with a consumer.
//!
//! - **[`Stack`]: a mutable, singly-linked first-in, first-out (FIFO)
//! stack.**
//!
//! This is a simple, singly-linked stack with *O*(1) push and pop
//! operations. The pop operation returns the *last* element pushed to the
//! stack. A [`Stack`] also implements the [`Iterator`] trait; iterating over
//! a stack pops elements from the end of the list.
//!
//! The [`Stack`] type is **not** a lock-free data structure, and can only be
//! modified through `&mut` references.
//!
//! - **[`TransferStack`]: a lock-free, multi-producer FIFO stack, where
//! all elements currently in the stack are popped in a single atomic operation.**
//!
//! A [`TransferStack`] is a lock-free data structure where multiple producers
//! can [concurrently push elements](stack::TransferStack::push) to the end of
//! the stack through immutable `&` references. A consumer can [pop all
//! elements currently in the `TransferStack`](stack::TransferStack::take_all)
//! in a single atomic operation, returning a new [`Stack`]. Pushing an
//! element, and taking all elements in the [`TransferStack`] are both *O*(1)
//! operations.
//!
//! A [`TransferStack`] can be used to efficiently transfer ownership of
//! resources from multiple producers to a consumer, such as for reuse or
//! cleanup.
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(test)]
extern crate std;
#[macro_use]
pub(crate) mod util;
pub mod list;
pub mod mpsc_queue;
pub mod stack;
#[doc(inline)]
pub use list::List;
#[doc(inline)]
pub use mpsc_queue::MpscQueue;
#[doc(inline)]
pub use stack::{Stack, TransferStack};
pub(crate) mod loom;
use core::ptr::NonNull;
/// Trait implemented by types which can be members of an [intrusive collection].
///
/// In order to be part of an intrusive collection, a type must contain a
/// `Links` type that stores the pointers to other nodes in that collection. For
/// example, to be part of a [doubly-linked list], a type must contain the
/// [`list::Links`] struct, or to be part of a [MPSC queue], a type must contain
/// the [`mpsc_queue::Links`] struct.
///
/// # Safety
///
/// This is unsafe to implement because it's the implementation's responsibility
/// to ensure that types implementing this trait are valid intrusive collection
/// nodes. In particular:
///
/// - Implementations **must** ensure that implementors are pinned in memory while they
/// are in an intrusive collection. While a given `Linked` type is in an intrusive
/// data structure, it may not be deallocated or moved to a different memory
/// location.
/// - The type implementing this trait **must not** implement [`Unpin`].
/// - Additional safety requirements for individual methods on this trait are
/// documented on those methods.
///
/// Failure to uphold these invariants will result in corruption of the
/// intrusive data structure, including dangling pointers.
///
/// # Implementing `Linked::links`
///
/// The [`Linked::links`] method provides access to a `Linked` type's `Links`
/// field through a [`NonNull`] pointer. This is necessary for a type to
/// participate in an intrusive structure, as it tells the intrusive structure
/// how to access the links to other parts of that data structure. However, this
/// method is somewhat difficult to implement correctly. | ///
/// Suppose we have an entry type like this:
/// ```rust
/// use cordyceps::list;
///
/// struct Entry {
/// links: list::Links<Self>,
/// data: usize,
/// }
/// ```
///
/// The naive implementation of [`links`](Linked::links) for this `Entry` type
/// might look like this:
///
/// ```
/// use cordyceps::Linked;
/// use core::ptr::NonNull;
///
/// # use cordyceps::list;
/// # struct Entry {
/// # links: list::Links<Self>,
/// # }
///
/// unsafe impl Linked<list::Links<Self>> for Entry {
/// # type Handle = NonNull<Self>;
/// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r }
/// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr }
/// // ...
///
/// unsafe fn links(mut target: NonNull<Self>) -> NonNull<list::Links<Self>> {
/// // Borrow the target's `links` field.
/// let links = &mut target.as_mut().links;
/// // Convert that reference into a pointer.
/// NonNull::from(links)
/// }
/// }
/// ```
///
/// However, this implementation **is not sound** under [Stacked Borrows]! It
/// creates a temporary reference from the original raw pointer, and then
/// creates a new raw pointer from that temporary reference. Stacked Borrows
/// will reject this reborrow as unsound.[^1]
///
/// There are two ways we can implement [`Linked::links`] without creating a
/// temporary reference in this manner. The recommended one is to use the
/// [`core::ptr::addr_of_mut!`] macro, as follows:
///
/// ```
/// use core::ptr::{self, NonNull};
/// # use cordyceps::{Linked, list};
/// # struct Entry {
/// # links: list::Links<Self>,
/// # }
///
/// unsafe impl Linked<list::Links<Self>> for Entry {
/// # type Handle = NonNull<Self>;
/// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r }
/// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr }
/// // ...
///
/// unsafe fn links(target: NonNull<Self>) -> NonNull<list::Links<Self>> {
/// let target = target.as_ptr();
///
/// // Using the `ptr::addr_of_mut!` macro, we can offset a raw pointer to a
/// // raw pointer to a field *without* creating a temporary reference.
/// let links = ptr::addr_of_mut!((*target).links);
///
/// // `NonNull::new_unchecked` is safe to use here, because the pointer that
/// // we offset was not null, implying that the pointer produced by offsetting
/// // it will also not be null.
/// NonNull::new_unchecked(links)
/// }
/// }
/// ```
///
/// It is also possible to ensure that the struct implementing `Linked` is laid
/// out so that the `Links` field is the first member of the struct, and then
/// cast the pointer to a `Links`. Since [Rust's native type representation][repr]
/// does not guarantee the layout of struct members, it is **necessary** to ensure
/// that any struct that implements the `Linked::links` method in this manner has a
/// [`#[repr(C)]` attribute][repr-c], ensuring that its fields are laid out in the
/// order that they are defined.
///
/// For example:
///
/// ```
/// use core::ptr::NonNull;
/// use cordyceps::{Linked, list};
///
/// // This `repr(C)` attribute is *mandatory* here, as it ensures that the
/// // `links` field will *always* be the first field in the struct's in-memory
/// // representation.
/// #[repr(C)]
/// struct Entry {
/// links: list::Links<Self>,
/// data: usize,
/// }
///
/// unsafe impl Linked<list::Links<Self>> for Entry {
/// # type Handle = NonNull<Self>;
/// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r }
/// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr }
/// // ...
///
/// unsafe fn links(target: NonNull<Self>) -> NonNull<list::Links<Self>> {
/// // Safety: this performs a layout-dependent cast! it is only sound
/// // if the `Entry` type has a `#[repr(C)]` attribute!
/// target.cast::<list::Links<Self>>()
/// }
/// }
/// ```
///
/// In general, this approach is not recommended, and using
/// [`core::ptr::addr_of_mut!`] should be preferred in almost all cases. In
/// particular, the layout-dependent cast is more error-prone, as it requires a
/// `#[repr(C)]` attribute to avoid soundness issues. Additionally, the
/// layout-based cast does not permit a single struct to contain `Links` fields
/// for multiple intrusive data structures, as the `Links` type *must* be the
/// struct's first field.[^2] Therefore, [`Linked::links`] should generally be
/// implemented using [`addr_of_mut!`](core::ptr::addr_of_mut).
///
/// [^1]: Note that code like this is not *currently* known to result in
/// miscompiles, but it is rejected by tools like Miri as being unsound.
/// Like all undefined behavior, there is no guarantee that future Rust
/// compilers will not miscompile code like this, with disastrous results.
///
/// [^2]: And two different fields cannot both be the first field at the same
/// time...by definition.
///
/// [intrusive collection]: crate#intrusive-data-structures
/// [`Unpin`]: core::marker::Unpin
/// [doubly-linked list]: crate::list
/// [MSPC queue]: crate::mpsc_queue
/// [Stacked Borrows]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md
/// [repr]: https://doc.rust-lang.org/nomicon/repr-rust.html
/// [repr-c]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprc
pub unsafe trait Linked<L> {
/// The handle owning nodes in the linked list.
///
/// This type must have ownership over a `Self`-typed value. When a `Handle`
/// is dropped, it should drop the corresponding `Linked` type.
///
/// A quintessential example of a `Handle` is [`Box`].
///
/// [`Box`]: alloc::boxed::Box
type Handle;
/// Convert a [`Self::Handle`] to a raw pointer to `Self`, taking ownership
/// of it in the process.
fn into_ptr(r: Self::Handle) -> NonNull<Self>;
/// Convert a raw pointer to `Self` into an owning [`Self::Handle`].
///
/// # Safety
///
/// This function is safe to call when:
/// - It is valid to construct a [`Self::Handle`] from a`raw pointer
/// - The pointer points to a valid instance of `Self` (e.g. it does not
/// dangle).
unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle;
/// Return the links of the node pointed to by `ptr`.
///
/// # Safety
///
/// This function is safe to call when:
/// - It is valid to construct a [`Self::Handle`] from a`raw pointer
/// - The pointer points to a valid instance of `Self` (e.g. it does not
/// dangle).
///
/// See [the trait-level documentation](#implementing-linkedlinks) for
/// details on how to correctly implement this method.
unsafe fn links(ptr: NonNull<Self>) -> NonNull<L>;
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.