text
stringlengths 3
1.05M
|
|---|
// Copyright (c) 2011-2013 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef ROICOIN_QT_MACNOTIFICATIONHANDLER_H
#define ROICOIN_QT_MACNOTIFICATIONHANDLER_H
#include <QObject>
/** Macintosh-specific notification handler (supports UserNotificationCenter and Growl).
*/
class MacNotificationHandler : public QObject
{
Q_OBJECT
public:
/** shows a 10.8+ UserNotification in the UserNotificationCenter
*/
void showNotification(const QString &title, const QString &text);
/** executes AppleScript */
void sendAppleScript(const QString &script);
/** check if OS can handle UserNotifications */
bool hasUserNotificationCenterSupport(void);
static MacNotificationHandler *instance();
};
#endif // ROICOIN_QT_MACNOTIFICATIONHANDLER_H
|
/* Audio Library for Teensy
* Copyright (c) 2021, Nic Newdigate
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef TEENSY_AUDIO_SAMPLER_SAMPLEPLAYMIDICONTROLLERENUMS_H
#define TEENSY_AUDIO_SAMPLER_SAMPLEPLAYMIDICONTROLLERENUMS_H
enum playcontrollerstate {
playcontrollerstate_initialising = 0, // need to ascertain which midi notes and channels correspond to which control functions
playcontrollerstate_performing = 1,
playcontrollerstate_selecting_target = 2,
playcontrollerstate_editing_target = 3,
};
enum triggerctrlfunction {
triggerctrlfunction_none = 0,
triggerctrlfunction_changetriggertype = 1,
triggerctrlfunction_changedirection = 2,
triggerctrlfunction_changelooptype = 3,
triggerctrlfunction_changesample = 4,
triggerctrlfunction_selector_cc = 5,
};
#endif // TEENSY_AUDIO_SAMPLER_SAMPLEPLAYMIDICONTROLLERENUMS_H
|
!function(a,b){b.site=a,function c(a,b,d){function e(g,h){if(!b[g]){if(!a[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=b[g]={exports:{}};a[g][0].call(k.exports,function(b){var c=a[g][1][b];return e(c?c:b)},k,k.exports,c,a,b,d)}return b[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b){!function(a,c){"use strict";function d(){e.READY||(t.determineEventTypes(),s.each(e.gestures,function(a){v.register(a)}),t.onTouch(e.DOCUMENT,o,v.detect),t.onTouch(e.DOCUMENT,p,v.detect),e.READY=!0)}var e=function w(a,b){return new w.Instance(a,b||{})};e.VERSION="1.1.3",e.defaults={behavior:{userSelect:"none",touchAction:"pan-y",touchCallout:"none",contentZooming:"none",userDrag:"none",tapHighlightColor:"rgba(0,0,0,0)"}},e.DOCUMENT=document,e.HAS_POINTEREVENTS=navigator.pointerEnabled||navigator.msPointerEnabled,e.HAS_TOUCHEVENTS="ontouchstart"in a,e.IS_MOBILE=/mobile|tablet|ip(ad|hone|od)|android|silk/i.test(navigator.userAgent),e.NO_MOUSEEVENTS=e.HAS_TOUCHEVENTS&&e.IS_MOBILE||e.HAS_POINTEREVENTS,e.CALCULATE_INTERVAL=25;var f={},g=e.DIRECTION_DOWN="down",h=e.DIRECTION_LEFT="left",i=e.DIRECTION_UP="up",j=e.DIRECTION_RIGHT="right",k=e.POINTER_MOUSE="mouse",l=e.POINTER_TOUCH="touch",m=e.POINTER_PEN="pen",n=e.EVENT_START="start",o=e.EVENT_MOVE="move",p=e.EVENT_END="end",q=e.EVENT_RELEASE="release",r=e.EVENT_TOUCH="touch";e.READY=!1,e.plugins=e.plugins||{},e.gestures=e.gestures||{};var s=e.utils={extend:function(a,b,d){for(var e in b)!b.hasOwnProperty(e)||a[e]!==c&&d||(a[e]=b[e]);return a},on:function(a,b,c){a.addEventListener(b,c,!1)},off:function(a,b,c){a.removeEventListener(b,c,!1)},each:function(a,b,d){var e,f;if("forEach"in a)a.forEach(b,d);else if(a.length!==c){for(e=0,f=a.length;f>e;e++)if(b.call(d,a[e],e,a)===!1)return}else for(e in a)if(a.hasOwnProperty(e)&&b.call(d,a[e],e,a)===!1)return},inStr:function(a,b){return a.indexOf(b)>-1},inArray:function(a,b){if(a.indexOf){var c=a.indexOf(b);return-1===c?!1:c}for(var d=0,e=a.length;e>d;d++)if(a[d]===b)return d;return!1},toArray:function(a){return Array.prototype.slice.call(a,0)},hasParent:function(a,b){for(;a;){if(a==b)return!0;a=a.parentNode}return!1},getCenter:function(a){var b=[],c=[],d=[],e=[],f=Math.min,g=Math.max;return 1===a.length?{pageX:a[0].pageX,pageY:a[0].pageY,clientX:a[0].clientX,clientY:a[0].clientY}:(s.each(a,function(a){b.push(a.pageX),c.push(a.pageY),d.push(a.clientX),e.push(a.clientY)}),{pageX:(f.apply(Math,b)+g.apply(Math,b))/2,pageY:(f.apply(Math,c)+g.apply(Math,c))/2,clientX:(f.apply(Math,d)+g.apply(Math,d))/2,clientY:(f.apply(Math,e)+g.apply(Math,e))/2})},getVelocity:function(a,b,c){return{x:Math.abs(b/a)||0,y:Math.abs(c/a)||0}},getAngle:function(a,b){var c=b.clientX-a.clientX,d=b.clientY-a.clientY;return 180*Math.atan2(d,c)/Math.PI},getDirection:function(a,b){var c=Math.abs(a.clientX-b.clientX),d=Math.abs(a.clientY-b.clientY);return c>=d?a.clientX-b.clientX>0?h:j:a.clientY-b.clientY>0?i:g},getDistance:function(a,b){var c=b.clientX-a.clientX,d=b.clientY-a.clientY;return Math.sqrt(c*c+d*d)},getScale:function(a,b){return a.length>=2&&b.length>=2?this.getDistance(b[0],b[1])/this.getDistance(a[0],a[1]):1},getRotation:function(a,b){return a.length>=2&&b.length>=2?this.getAngle(b[1],b[0])-this.getAngle(a[1],a[0]):0},isVertical:function(a){return a==i||a==g},setPrefixedCss:function(a,b,c,d){var e=["","Webkit","Moz","O","ms"];b=s.toCamelCase(b);for(var f=0;f<e.length;f++){var g=b;if(e[f]&&(g=e[f]+g.slice(0,1).toUpperCase()+g.slice(1)),g in a.style){a.style[g]=(null==d||d)&&c||"";break}}},toggleBehavior:function(a,b,c){if(b&&a&&a.style){s.each(b,function(b,d){s.setPrefixedCss(a,d,b,c)});var d=c&&function(){return!1};"none"==b.userSelect&&(a.onselectstart=d),"none"==b.userDrag&&(a.ondragstart=d)}},toCamelCase:function(a){return a.replace(/[_-]([a-z])/g,function(a){return a[1].toUpperCase()})}},t=e.event={preventMouseEvents:!1,started:!1,shouldDetect:!1,on:function(a,b,c,d){var e=b.split(" ");s.each(e,function(b){s.on(a,b,c),d&&d(b)})},off:function(a,b,c,d){var e=b.split(" ");s.each(e,function(b){s.off(a,b,c),d&&d(b)})},onTouch:function(a,b,c){var d=this,g=function(f){var g,h=f.type.toLowerCase(),i=e.HAS_POINTEREVENTS,j=s.inStr(h,"mouse");j&&d.preventMouseEvents||(j&&b==n&&0===f.button?(d.preventMouseEvents=!1,d.shouldDetect=!0):i&&b==n?d.shouldDetect=1===f.buttons||u.matchType(l,f):j||b!=n||(d.preventMouseEvents=!0,d.shouldDetect=!0),i&&b!=p&&u.updatePointer(b,f),d.shouldDetect&&(g=d.doDetect.call(d,f,b,a,c)),g==p&&(d.preventMouseEvents=!1,d.shouldDetect=!1,u.reset()),i&&b==p&&u.updatePointer(b,f))};return this.on(a,f[b],g),g},doDetect:function(a,b,c,d){var e=this.getTouchList(a,b),f=e.length,g=b,h=e.trigger,i=f;b==n?h=r:b==p&&(h=q,i=e.length-(a.changedTouches?a.changedTouches.length:1)),i>0&&this.started&&(g=o),this.started=!0;var j=this.collectEventData(c,g,e,a);return b!=p&&d.call(v,j),h&&(j.changedLength=i,j.eventType=h,d.call(v,j),j.eventType=g,delete j.changedLength),g==p&&(d.call(v,j),this.started=!1),g},determineEventTypes:function(){var b;return b=e.HAS_POINTEREVENTS?a.PointerEvent?["pointerdown","pointermove","pointerup pointercancel lostpointercapture"]:["MSPointerDown","MSPointerMove","MSPointerUp MSPointerCancel MSLostPointerCapture"]:e.NO_MOUSEEVENTS?["touchstart","touchmove","touchend touchcancel"]:["touchstart mousedown","touchmove mousemove","touchend touchcancel mouseup"],f[n]=b[0],f[o]=b[1],f[p]=b[2],f},getTouchList:function(a,b){if(e.HAS_POINTEREVENTS)return u.getTouchList();if(a.touches){if(b==o)return a.touches;var c=[],d=[].concat(s.toArray(a.touches),s.toArray(a.changedTouches)),f=[];return s.each(d,function(a){s.inArray(c,a.identifier)===!1&&f.push(a),c.push(a.identifier)}),f}return a.identifier=1,[a]},collectEventData:function(a,b,c,d){var e=l;return s.inStr(d.type,"mouse")||u.matchType(k,d)?e=k:u.matchType(m,d)&&(e=m),{center:s.getCenter(c),timeStamp:Date.now(),target:d.target,touches:c,eventType:b,pointerType:e,srcEvent:d,preventDefault:function(){var a=this.srcEvent;a.preventManipulation&&a.preventManipulation(),a.preventDefault&&a.preventDefault()},stopPropagation:function(){this.srcEvent.stopPropagation()},stopDetect:function(){return v.stopDetect()}}}},u=e.PointerEvent={pointers:{},getTouchList:function(){var a=[];return s.each(this.pointers,function(b){a.push(b)}),a},updatePointer:function(a,b){a==p||a!=p&&1!==b.buttons?delete this.pointers[b.pointerId]:(b.identifier=b.pointerId,this.pointers[b.pointerId]=b)},matchType:function(a,b){if(!b.pointerType)return!1;var c=b.pointerType,d={};return d[k]=c===(b.MSPOINTER_TYPE_MOUSE||k),d[l]=c===(b.MSPOINTER_TYPE_TOUCH||l),d[m]=c===(b.MSPOINTER_TYPE_PEN||m),d[a]},reset:function(){this.pointers={}}},v=e.detection={gestures:[],current:null,previous:null,stopped:!1,startDetect:function(a,b){this.current||(this.stopped=!1,this.current={inst:a,startEvent:s.extend({},b),lastEvent:!1,lastCalcEvent:!1,futureCalcEvent:!1,lastCalcData:{},name:""},this.detect(b))},detect:function(a){if(this.current&&!this.stopped){a=this.extendEventData(a);var b=this.current.inst,c=b.options;return s.each(this.gestures,function(d){!this.stopped&&b.enabled&&c[d.name]&&d.handler.call(d,a,b)},this),this.current&&(this.current.lastEvent=a),a.eventType==p&&this.stopDetect(),a}},stopDetect:function(){this.previous=s.extend({},this.current),this.current=null,this.stopped=!0},getCalculatedData:function(a,b,c,d,f){var g=this.current,h=!1,i=g.lastCalcEvent,j=g.lastCalcData;i&&a.timeStamp-i.timeStamp>e.CALCULATE_INTERVAL&&(b=i.center,c=a.timeStamp-i.timeStamp,d=a.center.clientX-i.center.clientX,f=a.center.clientY-i.center.clientY,h=!0),(a.eventType==r||a.eventType==q)&&(g.futureCalcEvent=a),(!g.lastCalcEvent||h)&&(j.velocity=s.getVelocity(c,d,f),j.angle=s.getAngle(b,a.center),j.direction=s.getDirection(b,a.center),g.lastCalcEvent=g.futureCalcEvent||a,g.futureCalcEvent=a),a.velocityX=j.velocity.x,a.velocityY=j.velocity.y,a.interimAngle=j.angle,a.interimDirection=j.direction},extendEventData:function(a){var b=this.current,c=b.startEvent,d=b.lastEvent||c;(a.eventType==r||a.eventType==q)&&(c.touches=[],s.each(a.touches,function(a){c.touches.push({clientX:a.clientX,clientY:a.clientY})}));var e=a.timeStamp-c.timeStamp,f=a.center.clientX-c.center.clientX,g=a.center.clientY-c.center.clientY;return this.getCalculatedData(a,d.center,e,f,g),s.extend(a,{startEvent:c,deltaTime:e,deltaX:f,deltaY:g,distance:s.getDistance(c.center,a.center),angle:s.getAngle(c.center,a.center),direction:s.getDirection(c.center,a.center),scale:s.getScale(c.touches,a.touches),rotation:s.getRotation(c.touches,a.touches)}),a},register:function(a){var b=a.defaults||{};return b[a.name]===c&&(b[a.name]=!0),s.extend(e.defaults,b,!0),a.index=a.index||1e3,this.gestures.push(a),this.gestures.sort(function(a,b){return a.index<b.index?-1:a.index>b.index?1:0}),this.gestures}};e.Instance=function(a,b){var c=this;d(),this.element=a,this.enabled=!0,s.each(b,function(a,c){delete b[c],b[s.toCamelCase(c)]=a}),this.options=s.extend(s.extend({},e.defaults),b||{}),this.options.behavior&&s.toggleBehavior(this.element,this.options.behavior,!0),this.eventStartHandler=t.onTouch(a,n,function(a){c.enabled&&a.eventType==n?v.startDetect(c,a):a.eventType==r&&v.detect(a)}),this.eventHandlers=[]},e.Instance.prototype={on:function(a,b){var c=this;return t.on(c.element,a,b,function(a){c.eventHandlers.push({gesture:a,handler:b})}),c},off:function(a,b){var c=this;return t.off(c.element,a,b,function(a){var d=s.inArray({gesture:a,handler:b});d!==!1&&c.eventHandlers.splice(d,1)}),c},trigger:function(a,b){b||(b={});var c=e.DOCUMENT.createEvent("Event");c.initEvent(a,!0,!0),c.gesture=b;var d=this.element;return s.hasParent(b.target,d)&&(d=b.target),d.dispatchEvent(c),this},enable:function(a){return this.enabled=a,this},dispose:function(){var a,b;for(s.toggleBehavior(this.element,this.options.behavior,!1),a=-1;b=this.eventHandlers[++a];)s.off(this.element,b.gesture,b.handler);return this.eventHandlers=[],t.off(this.element,f[n],this.eventStartHandler),null}},function(a){function b(b,d){var e=v.current;if(!(d.options.dragMaxTouches>0&&b.touches.length>d.options.dragMaxTouches))switch(b.eventType){case n:c=!1;break;case o:if(b.distance<d.options.dragMinDistance&&e.name!=a)return;var f=e.startEvent.center;if(e.name!=a&&(e.name=a,d.options.dragDistanceCorrection&&b.distance>0)){var k=Math.abs(d.options.dragMinDistance/b.distance);f.pageX+=b.deltaX*k,f.pageY+=b.deltaY*k,f.clientX+=b.deltaX*k,f.clientY+=b.deltaY*k,b=v.extendEventData(b)}(e.lastEvent.dragLockToAxis||d.options.dragLockToAxis&&d.options.dragLockMinDistance<=b.distance)&&(b.dragLockToAxis=!0);var l=e.lastEvent.direction;b.dragLockToAxis&&l!==b.direction&&(b.direction=s.isVertical(l)?b.deltaY<0?i:g:b.deltaX<0?h:j),c||(d.trigger(a+"start",b),c=!0),d.trigger(a,b),d.trigger(a+b.direction,b);var m=s.isVertical(b.direction);(d.options.dragBlockVertical&&m||d.options.dragBlockHorizontal&&!m)&&b.preventDefault();break;case q:c&&b.changedLength<=d.options.dragMaxTouches&&(d.trigger(a+"end",b),c=!1);break;case p:c=!1}}var c=!1;e.gestures.Drag={name:a,index:50,handler:b,defaults:{dragMinDistance:10,dragDistanceCorrection:!0,dragMaxTouches:1,dragBlockHorizontal:!1,dragBlockVertical:!1,dragLockToAxis:!1,dragLockMinDistance:25}}}("drag"),e.gestures.Gesture={name:"gesture",index:1337,handler:function(a,b){b.trigger(this.name,a)}},function(a){function b(b,d){var e=d.options,f=v.current;switch(b.eventType){case n:clearTimeout(c),f.name=a,c=setTimeout(function(){f&&f.name==a&&d.trigger(a,b)},e.holdTimeout);break;case o:b.distance>e.holdThreshold&&clearTimeout(c);break;case q:clearTimeout(c)}}var c;e.gestures.Hold={name:a,index:10,defaults:{holdTimeout:500,holdThreshold:2},handler:b}}("hold"),e.gestures.Release={name:"release",index:1/0,handler:function(a,b){a.eventType==q&&b.trigger(this.name,a)}},e.gestures.Swipe={name:"swipe",index:40,defaults:{swipeMinTouches:1,swipeMaxTouches:1,swipeVelocityX:.6,swipeVelocityY:.6},handler:function(a,b){if(a.eventType==q){var c=a.touches.length,d=b.options;if(c<d.swipeMinTouches||c>d.swipeMaxTouches)return;(a.velocityX>d.swipeVelocityX||a.velocityY>d.swipeVelocityY)&&(b.trigger(this.name,a),b.trigger(this.name+a.direction,a))}}},function(a){function b(b,d){var e,f,g=d.options,h=v.current,i=v.previous;switch(b.eventType){case n:c=!1;break;case o:c=c||b.distance>g.tapMaxDistance;break;case p:!s.inStr(b.srcEvent.type,"cancel")&&b.deltaTime<g.tapMaxTime&&!c&&(e=i&&i.lastEvent&&b.timeStamp-i.lastEvent.timeStamp,f=!1,i&&i.name==a&&e&&e<g.doubleTapInterval&&b.distance<g.doubleTapDistance&&(d.trigger("doubletap",b),f=!0),(!f||g.tapAlways)&&(h.name=a,d.trigger(h.name,b)))}}var c=!1;e.gestures.Tap={name:a,index:100,handler:b,defaults:{tapMaxTime:250,tapMaxDistance:10,tapAlways:!0,doubleTapDistance:20,doubleTapInterval:300}}}("tap"),e.gestures.Touch={name:"touch",index:-1/0,defaults:{preventDefault:!1,preventMouse:!1},handler:function(a,b){return b.options.preventMouse&&a.pointerType==k?void a.stopDetect():(b.options.preventDefault&&a.preventDefault(),void(a.eventType==r&&b.trigger("touch",a)))}},function(a){function b(b,d){switch(b.eventType){case n:c=!1;break;case o:if(b.touches.length<2)return;var e=Math.abs(1-b.scale),f=Math.abs(b.rotation);if(e<d.options.transformMinScale&&f<d.options.transformMinRotation)return;v.current.name=a,c||(d.trigger(a+"start",b),c=!0),d.trigger(a,b),f>d.options.transformMinRotation&&d.trigger("rotate",b),e>d.options.transformMinScale&&(d.trigger("pinch",b),d.trigger("pinch"+(b.scale<1?"in":"out"),b));break;case q:c&&b.changedLength<2&&(d.trigger(a+"end",b),c=!1)}}var c=!1;e.gestures.Transform={name:a,index:45,defaults:{transformMinScale:.01,transformMinRotation:1},handler:b}}("transform"),"function"==typeof define&&define.amd?define(function(){return e}):"undefined"!=typeof b&&b.exports?b.exports=e:a.Hammer=e}(window)},{}],2:[function(a,c,d){(function(a){(function(){function b(a,b,c){for(var d=(c||0)-1,e=a?a.length:0;++d<e;)if(a[d]===b)return d;return-1}function e(a,c){var d=typeof c;if(a=a.cache,"boolean"==d||null==c)return a[c]?0:-1;"number"!=d&&"string"!=d&&(d="object");var e="number"==d?c:u+c;return a=(a=a[d])&&a[e],"object"==d?a&&b(a,c)>-1?0:-1:a?0:-1}function f(a){var b=this.cache,c=typeof a;if("boolean"==c||null==a)b[a]=!0;else{"number"!=c&&"string"!=c&&(c="object");var d="number"==c?a:u+a,e=b[c]||(b[c]={});"object"==c?(e[d]||(e[d]=[])).push(a):e[d]=!0}}function g(a){return a.charCodeAt(0)}function h(a,b){for(var c=a.criteria,d=b.criteria,e=-1,f=c.length;++e<f;){var g=c[e],h=d[e];if(g!==h){if(g>h||"undefined"==typeof g)return 1;if(h>g||"undefined"==typeof h)return-1}}return a.index-b.index}function i(a){var b=-1,c=a.length,d=a[0],e=a[c/2|0],g=a[c-1];if(d&&"object"==typeof d&&e&&"object"==typeof e&&g&&"object"==typeof g)return!1;var h=l();h["false"]=h["null"]=h["true"]=h.undefined=!1;var i=l();for(i.array=a,i.cache=h,i.push=f;++b<c;)i.push(a[b]);return i}function j(a){return"\\"+Y[a]}function k(){return r.pop()||[]}function l(){return s.pop()||{array:null,cache:null,criteria:null,"false":!1,index:0,"null":!1,number:null,object:null,push:null,string:null,"true":!1,undefined:!1,value:null}}function m(a){a.length=0,r.length<w&&r.push(a)}function n(a){var b=a.cache;b&&n(b),a.array=a.cache=a.criteria=a.object=a.number=a.string=a.value=null,s.length<w&&s.push(a)}function o(a,b,c){b||(b=0),"undefined"==typeof c&&(c=a?a.length:0);for(var d=-1,e=c-b||0,f=Array(0>e?0:e);++d<e;)f[d]=a[b+d];return f}function p(a){function c(a){return a&&"object"==typeof a&&!Zd(a)&&Hd.call(a,"__wrapped__")?a:new d(a)}function d(a,b){this.__chain__=!!b,this.__wrapped__=a}function f(a){function b(){if(d){var a=o(d);Id.apply(a,arguments)}if(this instanceof b){var f=s(c.prototype),g=c.apply(f,a||arguments);return Eb(g)?g:f}return c.apply(e,a||arguments)}var c=a[0],d=a[2],e=a[4];return Yd(b,a),b}function r(a,b,c,d,e){if(c){var f=c(a);if("undefined"!=typeof f)return f}var g=Eb(a);if(!g)return a;var h=Ad.call(a);if(!U[h])return a;var i=Wd[h];switch(h){case N:case O:return new i(+a);case Q:case T:return new i(a);case S:return f=i(a.source,C.exec(a)),f.lastIndex=a.lastIndex,f}var j=Zd(a);if(b){var l=!d;d||(d=k()),e||(e=k());for(var n=d.length;n--;)if(d[n]==a)return e[n];f=j?i(a.length):{}}else f=j?o(a):ee({},a);return j&&(Hd.call(a,"index")&&(f.index=a.index),Hd.call(a,"input")&&(f.input=a.input)),b?(d.push(a),e.push(f),(j?Yb:he)(a,function(a,g){f[g]=r(a,b,c,d,e)}),l&&(m(d),m(e)),f):f}function s(a){return Eb(a)?Nd(a):{}}function w(a,b,c){if("function"!=typeof a)return Zc;if("undefined"==typeof b||!("prototype"in a))return a;var d=a.__bindData__;if("undefined"==typeof d&&(Xd.funcNames&&(d=!a.name),d=d||!Xd.funcDecomp,!d)){var e=Fd.call(a);Xd.funcNames||(d=!D.test(e)),d||(d=H.test(e),Yd(a,d))}if(d===!1||d!==!0&&1&d[1])return a;switch(c){case 1:return function(c){return a.call(b,c)};case 2:return function(c,d){return a.call(b,c,d)};case 3:return function(c,d,e){return a.call(b,c,d,e)};case 4:return function(c,d,e,f){return a.call(b,c,d,e,f)}}return Ic(a,b)}function Y(a){function b(){var a=i?g:this;if(e){var n=o(e);Id.apply(n,arguments)}if((f||k)&&(n||(n=o(arguments)),f&&Id.apply(n,f),k&&n.length<h))return d|=16,Y([c,l?d:-4&d,n,null,g,h]);if(n||(n=arguments),j&&(c=a[m]),this instanceof b){a=s(c.prototype);var p=c.apply(a,n);return Eb(p)?p:a}return c.apply(a,n)}var c=a[0],d=a[1],e=a[2],f=a[3],g=a[4],h=a[5],i=1&d,j=2&d,k=4&d,l=8&d,m=c;return Yd(b,a),b}function $(a,c){var d=-1,f=ib(),g=a?a.length:0,h=g>=v&&f===b,j=[];if(h){var k=i(c);k?(f=e,c=k):h=!1}for(;++d<g;){var l=a[d];f(c,l)<0&&j.push(l)}return h&&n(c),j}function _(a,b,c,d){for(var e=(d||0)-1,f=a?a.length:0,g=[];++e<f;){var h=a[e];if(h&&"object"==typeof h&&"number"==typeof h.length&&(Zd(h)||mb(h))){b||(h=_(h,b,c));var i=-1,j=h.length,k=g.length;for(g.length+=j;++i<j;)g[k++]=h[i]}else c||g.push(h)}return g}function ab(a,b,c,d,e,f){if(c){var g=c(a,b);if("undefined"!=typeof g)return!!g}if(a===b)return 0!==a||1/a==1/b;var h=typeof a,i=typeof b;if(!(a!==a||a&&X[h]||b&&X[i]))return!1;if(null==a||null==b)return a===b;var j=Ad.call(a),l=Ad.call(b);if(j==L&&(j=R),l==L&&(l=R),j!=l)return!1;switch(j){case N:case O:return+a==+b;case Q:return a!=+a?b!=+b:0==a?1/a==1/b:a==+b;case S:case T:return a==vd(b)}var n=j==M;if(!n){var o=Hd.call(a,"__wrapped__"),p=Hd.call(b,"__wrapped__");if(o||p)return ab(o?a.__wrapped__:a,p?b.__wrapped__:b,c,d,e,f);if(j!=R)return!1;var q=a.constructor,r=b.constructor;if(q!=r&&!(Db(q)&&q instanceof q&&Db(r)&&r instanceof r)&&"constructor"in a&&"constructor"in b)return!1}var s=!e;e||(e=k()),f||(f=k());for(var t=e.length;t--;)if(e[t]==a)return f[t]==b;var u=0;if(g=!0,e.push(a),f.push(b),n){if(t=a.length,u=b.length,g=u==t,g||d)for(;u--;){var v=t,w=b[u];if(d)for(;v--&&!(g=ab(a[v],w,c,d,e,f)););else if(!(g=ab(a[u],w,c,d,e,f)))break}}else ge(b,function(b,h,i){return Hd.call(i,h)?(u++,g=Hd.call(a,h)&&ab(a[h],b,c,d,e,f)):void 0}),g&&!d&&ge(a,function(a,b,c){return Hd.call(c,b)?g=--u>-1:void 0});return e.pop(),f.pop(),s&&(m(e),m(f)),g}function bb(a,b,c,d,e){(Zd(b)?Yb:he)(b,function(b,f){var g,h,i=b,j=a[f];if(b&&((h=Zd(b))||ie(b))){for(var k=d.length;k--;)if(g=d[k]==b){j=e[k];break}if(!g){var l;c&&(i=c(j,b),(l="undefined"!=typeof i)&&(j=i)),l||(j=h?Zd(j)?j:[]:ie(j)?j:{}),d.push(b),e.push(j),l||bb(j,b,c,d,e)}}else c&&(i=c(j,b),"undefined"==typeof i&&(i=b)),"undefined"!=typeof i&&(j=i);a[f]=j})}function db(a,b){return a+Ed(Vd()*(b-a+1))}function eb(a,c,d){var f=-1,g=ib(),h=a?a.length:0,j=[],l=!c&&h>=v&&g===b,o=d||l?k():j;if(l){var p=i(o);g=e,o=p}for(;++f<h;){var q=a[f],r=d?d(q,f,a):q;(c?!f||o[o.length-1]!==r:g(o,r)<0)&&((d||l)&&o.push(r),j.push(q))}return l?(m(o.array),n(o)):d&&m(o),j}function fb(a){return function(b,d,e){var f={};d=c.createCallback(d,e,3);var g=-1,h=b?b.length:0;if("number"==typeof h)for(;++g<h;){var i=b[g];a(f,i,d(i,g,b),b)}else he(b,function(b,c,e){a(f,b,d(b,c,e),e)});return f}}function gb(a,b,c,d,e,g){var h=1&b,i=2&b,j=4&b,k=16&b,l=32&b;if(!i&&!Db(a))throw new wd;k&&!c.length&&(b&=-17,k=c=!1),l&&!d.length&&(b&=-33,l=d=!1);var m=a&&a.__bindData__;if(m&&m!==!0)return m=o(m),m[2]&&(m[2]=o(m[2])),m[3]&&(m[3]=o(m[3])),!h||1&m[1]||(m[4]=e),!h&&1&m[1]&&(b|=8),!j||4&m[1]||(m[5]=g),k&&Id.apply(m[2]||(m[2]=[]),c),l&&Ld.apply(m[3]||(m[3]=[]),d),m[1]|=b,gb.apply(null,m);var n=1==b||17===b?f:Y;return n([a,b,c,d,e,g])}function hb(a){return ae[a]}function ib(){var a=(a=c.indexOf)===rc?b:a;return a}function jb(a){return"function"==typeof a&&Bd.test(a)}function kb(a){var b,c;return a&&Ad.call(a)==R&&(b=a.constructor,!Db(b)||b instanceof b)?(ge(a,function(a,b){c=b}),"undefined"==typeof c||Hd.call(a,c)):!1}function lb(a){return be[a]}function mb(a){return a&&"object"==typeof a&&"number"==typeof a.length&&Ad.call(a)==L||!1}function nb(a,b,c,d){return"boolean"!=typeof b&&null!=b&&(d=c,c=b,b=!1),r(a,b,"function"==typeof c&&w(c,d,1))}function ob(a,b,c){return r(a,!0,"function"==typeof b&&w(b,c,1))}function pb(a,b){var c=s(a);return b?ee(c,b):c}function qb(a,b,d){var e;return b=c.createCallback(b,d,3),he(a,function(a,c,d){return b(a,c,d)?(e=c,!1):void 0}),e}function rb(a,b,d){var e;return b=c.createCallback(b,d,3),tb(a,function(a,c,d){return b(a,c,d)?(e=c,!1):void 0}),e}function sb(a,b,c){var d=[];ge(a,function(a,b){d.push(b,a)});var e=d.length;for(b=w(b,c,3);e--&&b(d[e--],d[e],a)!==!1;);return a}function tb(a,b,c){var d=_d(a),e=d.length;for(b=w(b,c,3);e--;){var f=d[e];if(b(a[f],f,a)===!1)break}return a}function ub(a){var b=[];return ge(a,function(a,c){Db(a)&&b.push(c)}),b.sort()}function vb(a,b){return a?Hd.call(a,b):!1}function wb(a){for(var b=-1,c=_d(a),d=c.length,e={};++b<d;){var f=c[b];e[a[f]]=f}return e}function xb(a){return a===!0||a===!1||a&&"object"==typeof a&&Ad.call(a)==N||!1}function yb(a){return a&&"object"==typeof a&&Ad.call(a)==O||!1}function zb(a){return a&&1===a.nodeType||!1}function Ab(a){var b=!0;if(!a)return b;var c=Ad.call(a),d=a.length;return c==M||c==T||c==L||c==R&&"number"==typeof d&&Db(a.splice)?!d:(he(a,function(){return b=!1}),b)}function Bb(a,b,c,d){return ab(a,b,"function"==typeof c&&w(c,d,2))}function Cb(a){return Pd(a)&&!Qd(parseFloat(a))}function Db(a){return"function"==typeof a}function Eb(a){return!(!a||!X[typeof a])}function Fb(a){return Hb(a)&&a!=+a}function Gb(a){return null===a}function Hb(a){return"number"==typeof a||a&&"object"==typeof a&&Ad.call(a)==Q||!1}function Ib(a){return a&&"object"==typeof a&&Ad.call(a)==S||!1}function Jb(a){return"string"==typeof a||a&&"object"==typeof a&&Ad.call(a)==T||!1}function Kb(a){return"undefined"==typeof a}function Lb(a,b,d){var e={};return b=c.createCallback(b,d,3),he(a,function(a,c,d){e[c]=b(a,c,d)}),e}function Mb(a){var b=arguments,c=2;if(!Eb(a))return a;if("number"!=typeof b[2]&&(c=b.length),c>3&&"function"==typeof b[c-2])var d=w(b[--c-1],b[c--],2);else c>2&&"function"==typeof b[c-1]&&(d=b[--c]);for(var e=o(arguments,1,c),f=-1,g=k(),h=k();++f<c;)bb(a,e[f],d,g,h);return m(g),m(h),a}function Nb(a,b,d){var e={};if("function"!=typeof b){var f=[];ge(a,function(a,b){f.push(b)}),f=$(f,_(arguments,!0,!1,1));for(var g=-1,h=f.length;++g<h;){var i=f[g];e[i]=a[i]}}else b=c.createCallback(b,d,3),ge(a,function(a,c,d){b(a,c,d)||(e[c]=a)});return e}function Ob(a){for(var b=-1,c=_d(a),d=c.length,e=nd(d);++b<d;){var f=c[b];e[b]=[f,a[f]]}return e}function Pb(a,b,d){var e={};if("function"!=typeof b)for(var f=-1,g=_(arguments,!0,!1,1),h=Eb(a)?g.length:0;++f<h;){var i=g[f];i in a&&(e[i]=a[i])}else b=c.createCallback(b,d,3),ge(a,function(a,c,d){b(a,c,d)&&(e[c]=a)});return e}function Qb(a,b,d,e){var f=Zd(a);if(null==d)if(f)d=[];else{var g=a&&a.constructor,h=g&&g.prototype;d=s(h)}return b&&(b=c.createCallback(b,e,4),(f?Yb:he)(a,function(a,c,e){return b(d,a,c,e)})),d}function Rb(a){for(var b=-1,c=_d(a),d=c.length,e=nd(d);++b<d;)e[b]=a[c[b]];return e}function Sb(a){for(var b=arguments,c=-1,d=_(b,!0,!1,1),e=b[2]&&b[2][b[1]]===a?1:d.length,f=nd(e);++c<e;)f[c]=a[d[c]];return f}function Tb(a,b,c){var d=-1,e=ib(),f=a?a.length:0,g=!1;return c=(0>c?Sd(0,f+c):c)||0,Zd(a)?g=e(a,b,c)>-1:"number"==typeof f?g=(Jb(a)?a.indexOf(b,c):e(a,b,c))>-1:he(a,function(a){return++d>=c?!(g=a===b):void 0}),g}function Ub(a,b,d){var e=!0;b=c.createCallback(b,d,3);var f=-1,g=a?a.length:0;if("number"==typeof g)for(;++f<g&&(e=!!b(a[f],f,a)););else he(a,function(a,c,d){return e=!!b(a,c,d)});return e}function Vb(a,b,d){var e=[];b=c.createCallback(b,d,3);var f=-1,g=a?a.length:0;if("number"==typeof g)for(;++f<g;){var h=a[f];b(h,f,a)&&e.push(h)}else he(a,function(a,c,d){b(a,c,d)&&e.push(a)});return e}function Wb(a,b,d){b=c.createCallback(b,d,3);var e=-1,f=a?a.length:0;if("number"!=typeof f){var g;return he(a,function(a,c,d){return b(a,c,d)?(g=a,!1):void 0}),g}for(;++e<f;){var h=a[e];if(b(h,e,a))return h}}function Xb(a,b,d){var e;return b=c.createCallback(b,d,3),Zb(a,function(a,c,d){return b(a,c,d)?(e=a,!1):void 0}),e}function Yb(a,b,c){var d=-1,e=a?a.length:0;if(b=b&&"undefined"==typeof c?b:w(b,c,3),"number"==typeof e)for(;++d<e&&b(a[d],d,a)!==!1;);else he(a,b);return a}function Zb(a,b,c){var d=a?a.length:0;if(b=b&&"undefined"==typeof c?b:w(b,c,3),"number"==typeof d)for(;d--&&b(a[d],d,a)!==!1;);else{var e=_d(a);d=e.length,he(a,function(a,c,f){return c=e?e[--d]:--d,b(f[c],c,f)})}return a}function $b(a,b){var c=o(arguments,2),d=-1,e="function"==typeof b,f=a?a.length:0,g=nd("number"==typeof f?f:0);return Yb(a,function(a){g[++d]=(e?b:a[b]).apply(a,c)}),g}function _b(a,b,d){var e=-1,f=a?a.length:0;if(b=c.createCallback(b,d,3),"number"==typeof f)for(var g=nd(f);++e<f;)g[e]=b(a[e],e,a);else g=[],he(a,function(a,c,d){g[++e]=b(a,c,d)});return g}function ac(a,b,d){var e=-1/0,f=e;if("function"!=typeof b&&d&&d[b]===a&&(b=null),null==b&&Zd(a))for(var h=-1,i=a.length;++h<i;){var j=a[h];j>f&&(f=j)}else b=null==b&&Jb(a)?g:c.createCallback(b,d,3),Yb(a,function(a,c,d){var g=b(a,c,d);g>e&&(e=g,f=a)});return f}function bc(a,b,d){var e=1/0,f=e;if("function"!=typeof b&&d&&d[b]===a&&(b=null),null==b&&Zd(a))for(var h=-1,i=a.length;++h<i;){var j=a[h];f>j&&(f=j)}else b=null==b&&Jb(a)?g:c.createCallback(b,d,3),Yb(a,function(a,c,d){var g=b(a,c,d);e>g&&(e=g,f=a)});return f}function cc(a,b,d,e){if(!a)return d;var f=arguments.length<3;b=c.createCallback(b,e,4);var g=-1,h=a.length;if("number"==typeof h)for(f&&(d=a[++g]);++g<h;)d=b(d,a[g],g,a);else he(a,function(a,c,e){d=f?(f=!1,a):b(d,a,c,e)});return d}function dc(a,b,d,e){var f=arguments.length<3;return b=c.createCallback(b,e,4),Zb(a,function(a,c,e){d=f?(f=!1,a):b(d,a,c,e)}),d}function ec(a,b,d){return b=c.createCallback(b,d,3),Vb(a,function(a,c,d){return!b(a,c,d)})}function fc(a,b,c){if(a&&"number"!=typeof a.length&&(a=Rb(a)),null==b||c)return a?a[db(0,a.length-1)]:q;var d=gc(a);return d.length=Td(Sd(0,b),d.length),d}function gc(a){var b=-1,c=a?a.length:0,d=nd("number"==typeof c?c:0);return Yb(a,function(a){var c=db(0,++b);d[b]=d[c],d[c]=a}),d}function hc(a){var b=a?a.length:0;return"number"==typeof b?b:_d(a).length}function ic(a,b,d){var e;b=c.createCallback(b,d,3);var f=-1,g=a?a.length:0;if("number"==typeof g)for(;++f<g&&!(e=b(a[f],f,a)););else he(a,function(a,c,d){return!(e=b(a,c,d))});return!!e}function jc(a,b,d){var e=-1,f=Zd(b),g=a?a.length:0,i=nd("number"==typeof g?g:0);for(f||(b=c.createCallback(b,d,3)),Yb(a,function(a,c,d){var g=i[++e]=l();f?g.criteria=_b(b,function(b){return a[b]}):(g.criteria=k())[0]=b(a,c,d),g.index=e,g.value=a}),g=i.length,i.sort(h);g--;){var j=i[g];i[g]=j.value,f||m(j.criteria),n(j)}return i}function kc(a){return a&&"number"==typeof a.length?o(a):Rb(a)}function lc(a){for(var b=-1,c=a?a.length:0,d=[];++b<c;){var e=a[b];e&&d.push(e)}return d}function mc(a){return $(a,_(arguments,!0,!0,1))}function nc(a,b,d){var e=-1,f=a?a.length:0;for(b=c.createCallback(b,d,3);++e<f;)if(b(a[e],e,a))return e;return-1}function oc(a,b,d){var e=a?a.length:0;for(b=c.createCallback(b,d,3);e--;)if(b(a[e],e,a))return e;return-1}function pc(a,b,d){var e=0,f=a?a.length:0;if("number"!=typeof b&&null!=b){var g=-1;for(b=c.createCallback(b,d,3);++g<f&&b(a[g],g,a);)e++}else if(e=b,null==e||d)return a?a[0]:q;return o(a,0,Td(Sd(0,e),f))}function qc(a,b,c,d){return"boolean"!=typeof b&&null!=b&&(d=c,c="function"!=typeof b&&d&&d[b]===a?null:b,b=!1),null!=c&&(a=_b(a,c,d)),_(a,b)}function rc(a,c,d){if("number"==typeof d){var e=a?a.length:0;d=0>d?Sd(0,e+d):d||0}else if(d){var f=Ac(a,c);return a[f]===c?f:-1}return b(a,c,d)}function sc(a,b,d){var e=0,f=a?a.length:0;if("number"!=typeof b&&null!=b){var g=f;for(b=c.createCallback(b,d,3);g--&&b(a[g],g,a);)e++}else e=null==b||d?1:b||e;return o(a,0,Td(Sd(0,f-e),f))}function tc(){for(var a=[],c=-1,d=arguments.length,f=k(),g=ib(),h=g===b,j=k();++c<d;){var l=arguments[c];(Zd(l)||mb(l))&&(a.push(l),f.push(h&&l.length>=v&&i(c?a[c]:j)))}var o=a[0],p=-1,q=o?o.length:0,r=[];a:for(;++p<q;){var s=f[0];if(l=o[p],(s?e(s,l):g(j,l))<0){for(c=d,(s||j).push(l);--c;)if(s=f[c],(s?e(s,l):g(a[c],l))<0)continue a;r.push(l)}}for(;d--;)s=f[d],s&&n(s);return m(f),m(j),r}function uc(a,b,d){var e=0,f=a?a.length:0;if("number"!=typeof b&&null!=b){var g=f;for(b=c.createCallback(b,d,3);g--&&b(a[g],g,a);)e++}else if(e=b,null==e||d)return a?a[f-1]:q;return o(a,Sd(0,f-e))}function vc(a,b,c){var d=a?a.length:0;for("number"==typeof c&&(d=(0>c?Sd(0,d+c):Td(c,d-1))+1);d--;)if(a[d]===b)return d;return-1}function wc(a){for(var b=arguments,c=0,d=b.length,e=a?a.length:0;++c<d;)for(var f=-1,g=b[c];++f<e;)a[f]===g&&(Kd.call(a,f--,1),e--);return a}function xc(a,b,c){a=+a||0,c="number"==typeof c?c:+c||1,null==b&&(b=a,a=0);for(var d=-1,e=Sd(0,Cd((b-a)/(c||1))),f=nd(e);++d<e;)f[d]=a,a+=c;return f}function yc(a,b,d){var e=-1,f=a?a.length:0,g=[];for(b=c.createCallback(b,d,3);++e<f;){var h=a[e];b(h,e,a)&&(g.push(h),Kd.call(a,e--,1),f--)}return g}function zc(a,b,d){if("number"!=typeof b&&null!=b){var e=0,f=-1,g=a?a.length:0;for(b=c.createCallback(b,d,3);++f<g&&b(a[f],f,a);)e++}else e=null==b||d?1:Sd(0,b);return o(a,e)}function Ac(a,b,d,e){var f=0,g=a?a.length:f;for(d=d?c.createCallback(d,e,1):Zc,b=d(b);g>f;){var h=f+g>>>1;d(a[h])<b?f=h+1:g=h}return f}function Bc(){return eb(_(arguments,!0,!0))}function Cc(a,b,d,e){return"boolean"!=typeof b&&null!=b&&(e=d,d="function"!=typeof b&&e&&e[b]===a?null:b,b=!1),null!=d&&(d=c.createCallback(d,e,3)),eb(a,b,d)}function Dc(a){return $(a,o(arguments,1))}function Ec(){for(var a=-1,b=arguments.length;++a<b;){var c=arguments[a];if(Zd(c)||mb(c))var d=d?eb($(d,c).concat($(c,d))):c}return d||[]}function Fc(){for(var a=arguments.length>1?arguments:arguments[0],b=-1,c=a?ac(me(a,"length")):0,d=nd(0>c?0:c);++b<c;)d[b]=me(a,b);return d}function Gc(a,b){var c=-1,d=a?a.length:0,e={};for(b||!d||Zd(a[0])||(b=[]);++c<d;){var f=a[c];b?e[f]=b[c]:f&&(e[f[0]]=f[1])}return e}function Hc(a,b){if(!Db(b))throw new wd;return function(){return--a<1?b.apply(this,arguments):void 0}}function Ic(a,b){return arguments.length>2?gb(a,17,o(arguments,2),null,b):gb(a,1,null,null,b)}function Jc(a){for(var b=arguments.length>1?_(arguments,!0,!1,1):ub(a),c=-1,d=b.length;++c<d;){var e=b[c];a[e]=gb(a[e],1,null,null,a)}return a}function Kc(a,b){return arguments.length>2?gb(b,19,o(arguments,2),null,a):gb(b,3,null,null,a)}function Lc(){for(var a=arguments,b=a.length;b--;)if(!Db(a[b]))throw new wd;return function(){for(var b=arguments,c=a.length;c--;)b=[a[c].apply(this,b)];return b[0]}}function Mc(a,b){return b="number"==typeof b?b:+b||a.length,gb(a,4,null,null,null,b)}function Nc(a,b,c){var d,e,f,g,h,i,j,k=0,l=!1,m=!0;if(!Db(a))throw new wd;if(b=Sd(0,b)||0,c===!0){var n=!0;m=!1}else Eb(c)&&(n=c.leading,l="maxWait"in c&&(Sd(b,c.maxWait)||0),m="trailing"in c?c.trailing:m);var o=function(){var c=b-(oe()-g);if(0>=c){e&&Dd(e);var l=j;e=i=j=q,l&&(k=oe(),f=a.apply(h,d),i||e||(d=h=null))}else i=Jd(o,c)},p=function(){i&&Dd(i),e=i=j=q,(m||l!==b)&&(k=oe(),f=a.apply(h,d),i||e||(d=h=null))};return function(){if(d=arguments,g=oe(),h=this,j=m&&(i||!n),l===!1)var c=n&&!i;else{e||n||(k=g);var q=l-(g-k),r=0>=q;r?(e&&(e=Dd(e)),k=g,f=a.apply(h,d)):e||(e=Jd(p,q))}return r&&i?i=Dd(i):i||b===l||(i=Jd(o,b)),c&&(r=!0,f=a.apply(h,d)),!r||i||e||(d=h=null),f
}}function Oc(a){if(!Db(a))throw new wd;var b=o(arguments,1);return Jd(function(){a.apply(q,b)},1)}function Pc(a,b){if(!Db(a))throw new wd;var c=o(arguments,2);return Jd(function(){a.apply(q,c)},b)}function Qc(a,b){if(!Db(a))throw new wd;var c=function(){var d=c.cache,e=b?b.apply(this,arguments):u+arguments[0];return Hd.call(d,e)?d[e]:d[e]=a.apply(this,arguments)};return c.cache={},c}function Rc(a){var b,c;if(!Db(a))throw new wd;return function(){return b?c:(b=!0,c=a.apply(this,arguments),a=null,c)}}function Sc(a){return gb(a,16,o(arguments,1))}function Tc(a){return gb(a,32,null,o(arguments,1))}function Uc(a,b,c){var d=!0,e=!0;if(!Db(a))throw new wd;return c===!1?d=!1:Eb(c)&&(d="leading"in c?c.leading:d,e="trailing"in c?c.trailing:e),V.leading=d,V.maxWait=b,V.trailing=e,Nc(a,b,V)}function Vc(a,b){return gb(b,16,[a])}function Wc(a){return function(){return a}}function Xc(a,b,c){var d=typeof a;if(null==a||"function"==d)return w(a,b,c);if("object"!=d)return bd(a);var e=_d(a),f=e[0],g=a[f];return 1!=e.length||g!==g||Eb(g)?function(b){for(var c=e.length,d=!1;c--&&(d=ab(b[e[c]],a[e[c]],null,!0)););return d}:function(a){var b=a[f];return g===b&&(0!==g||1/g==1/b)}}function Yc(a){return null==a?"":vd(a).replace(de,hb)}function Zc(a){return a}function $c(a,b,e){var f=!0,g=b&&ub(b);b&&(e||g.length)||(null==e&&(e=b),h=d,b=a,a=c,g=ub(b)),e===!1?f=!1:Eb(e)&&"chain"in e&&(f=e.chain);var h=a,i=Db(h);Yb(g,function(c){var d=a[c]=b[c];i&&(h.prototype[c]=function(){var b=this.__chain__,c=this.__wrapped__,e=[c];Id.apply(e,arguments);var g=d.apply(a,e);if(f||b){if(c===g&&Eb(g))return this;g=new h(g),g.__chain__=b}return g})})}function _c(){return a._=zd,this}function ad(){}function bd(a){return function(b){return b[a]}}function cd(a,b,c){var d=null==a,e=null==b;if(null==c&&("boolean"==typeof a&&e?(c=a,a=1):e||"boolean"!=typeof b||(c=b,e=!0)),d&&e&&(b=1),a=+a||0,e?(b=a,a=0):b=+b||0,c||a%1||b%1){var f=Vd();return Td(a+f*(b-a+parseFloat("1e-"+((f+"").length-1))),b)}return db(a,b)}function dd(a,b){if(a){var c=a[b];return Db(c)?a[b]():c}}function ed(a,b,d){var e=c.templateSettings;a=vd(a||""),d=fe({},d,e);var f,g=fe({},d.imports,e.imports),h=_d(g),i=Rb(g),k=0,l=d.interpolate||G,m="__p += '",n=ud((d.escape||G).source+"|"+l.source+"|"+(l===E?B:G).source+"|"+(d.evaluate||G).source+"|$","g");a.replace(n,function(b,c,d,e,g,h){return d||(d=e),m+=a.slice(k,h).replace(I,j),c&&(m+="' +\n__e("+c+") +\n'"),g&&(f=!0,m+="';\n"+g+";\n__p += '"),d&&(m+="' +\n((__t = ("+d+")) == null ? '' : __t) +\n'"),k=h+b.length,b}),m+="';\n";var o=d.variable,p=o;p||(o="obj",m="with ("+o+") {\n"+m+"\n}\n"),m=(f?m.replace(y,""):m).replace(z,"$1").replace(A,"$1;"),m="function("+o+") {\n"+(p?"":o+" || ("+o+" = {});\n")+"var __t, __p = '', __e = _.escape"+(f?", __j = Array.prototype.join;\nfunction print() { __p += __j.call(arguments, '') }\n":";\n")+m+"return __p\n}";var r="\n/*\n//# sourceURL="+(d.sourceURL||"/lodash/template/source["+K++ +"]")+"\n*/";try{var s=qd(h,"return "+m+r).apply(q,i)}catch(t){throw t.source=m,t}return b?s(b):(s.source=m,s)}function fd(a,b,c){a=(a=+a)>-1?a:0;var d=-1,e=nd(a);for(b=w(b,c,1);++d<a;)e[d]=b(d);return e}function gd(a){return null==a?"":vd(a).replace(ce,lb)}function hd(a){var b=++t;return vd(null==a?"":a)+b}function id(a){return a=new d(a),a.__chain__=!0,a}function jd(a,b){return b(a),a}function kd(){return this.__chain__=!0,this}function ld(){return vd(this.__wrapped__)}function md(){return this.__wrapped__}a=a?cb.defaults(Z.Object(),a,cb.pick(Z,J)):Z;var nd=a.Array,od=a.Boolean,pd=a.Date,qd=a.Function,rd=a.Math,sd=a.Number,td=a.Object,ud=a.RegExp,vd=a.String,wd=a.TypeError,xd=[],yd=td.prototype,zd=a._,Ad=yd.toString,Bd=ud("^"+vd(Ad).replace(/[.*+?^${}()|[\]\\]/g,"\\$&").replace(/toString| for [^\]]+/g,".*?")+"$"),Cd=rd.ceil,Dd=a.clearTimeout,Ed=rd.floor,Fd=qd.prototype.toString,Gd=jb(Gd=td.getPrototypeOf)&&Gd,Hd=yd.hasOwnProperty,Id=xd.push,Jd=a.setTimeout,Kd=xd.splice,Ld=xd.unshift,Md=function(){try{var a={},b=jb(b=td.defineProperty)&&b,c=b(a,a,a)&&b}catch(d){}return c}(),Nd=jb(Nd=td.create)&&Nd,Od=jb(Od=nd.isArray)&&Od,Pd=a.isFinite,Qd=a.isNaN,Rd=jb(Rd=td.keys)&&Rd,Sd=rd.max,Td=rd.min,Ud=a.parseInt,Vd=rd.random,Wd={};Wd[M]=nd,Wd[N]=od,Wd[O]=pd,Wd[P]=qd,Wd[R]=td,Wd[Q]=sd,Wd[S]=ud,Wd[T]=vd,d.prototype=c.prototype;var Xd=c.support={};Xd.funcDecomp=!jb(a.WinRTError)&&H.test(p),Xd.funcNames="string"==typeof qd.name,c.templateSettings={escape:/<%-([\s\S]+?)%>/g,evaluate:/<%([\s\S]+?)%>/g,interpolate:E,variable:"",imports:{_:c}},Nd||(s=function(){function b(){}return function(c){if(Eb(c)){b.prototype=c;var d=new b;b.prototype=null}return d||a.Object()}}());var Yd=Md?function(a,b){W.value=b,Md(a,"__bindData__",W)}:ad,Zd=Od||function(a){return a&&"object"==typeof a&&"number"==typeof a.length&&Ad.call(a)==M||!1},$d=function(a){var b,c=a,d=[];if(!c)return d;if(!X[typeof a])return d;for(b in c)Hd.call(c,b)&&d.push(b);return d},_d=Rd?function(a){return Eb(a)?Rd(a):[]}:$d,ae={"&":"&","<":"<",">":">",'"':""","'":"'"},be=wb(ae),ce=ud("("+_d(be).join("|")+")","g"),de=ud("["+_d(ae).join("")+"]","g"),ee=function(a,b,c){var d,e=a,f=e;if(!e)return f;var g=arguments,h=0,i="number"==typeof c?2:g.length;if(i>3&&"function"==typeof g[i-2])var j=w(g[--i-1],g[i--],2);else i>2&&"function"==typeof g[i-1]&&(j=g[--i]);for(;++h<i;)if(e=g[h],e&&X[typeof e])for(var k=-1,l=X[typeof e]&&_d(e),m=l?l.length:0;++k<m;)d=l[k],f[d]=j?j(f[d],e[d]):e[d];return f},fe=function(a,b,c){var d,e=a,f=e;if(!e)return f;for(var g=arguments,h=0,i="number"==typeof c?2:g.length;++h<i;)if(e=g[h],e&&X[typeof e])for(var j=-1,k=X[typeof e]&&_d(e),l=k?k.length:0;++j<l;)d=k[j],"undefined"==typeof f[d]&&(f[d]=e[d]);return f},ge=function(a,b,c){var d,e=a,f=e;if(!e)return f;if(!X[typeof e])return f;b=b&&"undefined"==typeof c?b:w(b,c,3);for(d in e)if(b(e[d],d,a)===!1)return f;return f},he=function(a,b,c){var d,e=a,f=e;if(!e)return f;if(!X[typeof e])return f;b=b&&"undefined"==typeof c?b:w(b,c,3);for(var g=-1,h=X[typeof e]&&_d(e),i=h?h.length:0;++g<i;)if(d=h[g],b(e[d],d,a)===!1)return f;return f},ie=Gd?function(a){if(!a||Ad.call(a)!=R)return!1;var b=a.valueOf,c=jb(b)&&(c=Gd(b))&&Gd(c);return c?a==c||Gd(a)==c:kb(a)}:kb,je=fb(function(a,b,c){Hd.call(a,c)?a[c]++:a[c]=1}),ke=fb(function(a,b,c){(Hd.call(a,c)?a[c]:a[c]=[]).push(b)}),le=fb(function(a,b,c){a[c]=b}),me=_b,ne=Vb,oe=jb(oe=pd.now)&&oe||function(){return(new pd).getTime()},pe=8==Ud(x+"08")?Ud:function(a,b){return Ud(Jb(a)?a.replace(F,""):a,b||0)};return c.after=Hc,c.assign=ee,c.at=Sb,c.bind=Ic,c.bindAll=Jc,c.bindKey=Kc,c.chain=id,c.compact=lc,c.compose=Lc,c.constant=Wc,c.countBy=je,c.create=pb,c.createCallback=Xc,c.curry=Mc,c.debounce=Nc,c.defaults=fe,c.defer=Oc,c.delay=Pc,c.difference=mc,c.filter=Vb,c.flatten=qc,c.forEach=Yb,c.forEachRight=Zb,c.forIn=ge,c.forInRight=sb,c.forOwn=he,c.forOwnRight=tb,c.functions=ub,c.groupBy=ke,c.indexBy=le,c.initial=sc,c.intersection=tc,c.invert=wb,c.invoke=$b,c.keys=_d,c.map=_b,c.mapValues=Lb,c.max=ac,c.memoize=Qc,c.merge=Mb,c.min=bc,c.omit=Nb,c.once=Rc,c.pairs=Ob,c.partial=Sc,c.partialRight=Tc,c.pick=Pb,c.pluck=me,c.property=bd,c.pull=wc,c.range=xc,c.reject=ec,c.remove=yc,c.rest=zc,c.shuffle=gc,c.sortBy=jc,c.tap=jd,c.throttle=Uc,c.times=fd,c.toArray=kc,c.transform=Qb,c.union=Bc,c.uniq=Cc,c.values=Rb,c.where=ne,c.without=Dc,c.wrap=Vc,c.xor=Ec,c.zip=Fc,c.zipObject=Gc,c.collect=_b,c.drop=zc,c.each=Yb,c.eachRight=Zb,c.extend=ee,c.methods=ub,c.object=Gc,c.select=Vb,c.tail=zc,c.unique=Cc,c.unzip=Fc,$c(c),c.clone=nb,c.cloneDeep=ob,c.contains=Tb,c.escape=Yc,c.every=Ub,c.find=Wb,c.findIndex=nc,c.findKey=qb,c.findLast=Xb,c.findLastIndex=oc,c.findLastKey=rb,c.has=vb,c.identity=Zc,c.indexOf=rc,c.isArguments=mb,c.isArray=Zd,c.isBoolean=xb,c.isDate=yb,c.isElement=zb,c.isEmpty=Ab,c.isEqual=Bb,c.isFinite=Cb,c.isFunction=Db,c.isNaN=Fb,c.isNull=Gb,c.isNumber=Hb,c.isObject=Eb,c.isPlainObject=ie,c.isRegExp=Ib,c.isString=Jb,c.isUndefined=Kb,c.lastIndexOf=vc,c.mixin=$c,c.noConflict=_c,c.noop=ad,c.now=oe,c.parseInt=pe,c.random=cd,c.reduce=cc,c.reduceRight=dc,c.result=dd,c.runInContext=p,c.size=hc,c.some=ic,c.sortedIndex=Ac,c.template=ed,c.unescape=gd,c.uniqueId=hd,c.all=Ub,c.any=ic,c.detect=Wb,c.findWhere=Wb,c.foldl=cc,c.foldr=dc,c.include=Tb,c.inject=cc,$c(function(){var a={};return he(c,function(b,d){c.prototype[d]||(a[d]=b)}),a}(),!1),c.first=pc,c.last=uc,c.sample=fc,c.take=pc,c.head=pc,he(c,function(a,b){var e="sample"!==b;c.prototype[b]||(c.prototype[b]=function(b,c){var f=this.__chain__,g=a(this.__wrapped__,b,c);return f||null!=b&&(!c||e&&"function"==typeof b)?new d(g,f):g})}),c.VERSION="2.4.1",c.prototype.chain=kd,c.prototype.toString=ld,c.prototype.value=md,c.prototype.valueOf=md,Yb(["join","pop","shift"],function(a){var b=xd[a];c.prototype[a]=function(){var a=this.__chain__,c=b.apply(this.__wrapped__,arguments);return a?new d(c,a):c}}),Yb(["push","reverse","sort","unshift"],function(a){var b=xd[a];c.prototype[a]=function(){return b.apply(this.__wrapped__,arguments),this}}),Yb(["concat","slice","splice"],function(a){var b=xd[a];c.prototype[a]=function(){return new d(b.apply(this.__wrapped__,arguments),this.__chain__)}}),c}var q,r=[],s=[],t=0,u=+new Date+"",v=75,w=40,x=" \f \n\r\u2028\u2029 ",y=/\b__p \+= '';/g,z=/\b(__p \+=) '' \+/g,A=/(__e\(.*?\)|\b__t\)) \+\n'';/g,B=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,C=/\w*$/,D=/^\s*function[ \n\r\t]+\w/,E=/<%=([\s\S]+?)%>/g,F=RegExp("^["+x+"]*0+(?=.$)"),G=/($^)/,H=/\bthis\b/,I=/['\n\r\t\u2028\u2029\\]/g,J=["Array","Boolean","Date","Function","Math","Number","Object","RegExp","String","_","attachEvent","clearTimeout","isFinite","isNaN","parseInt","setTimeout"],K=0,L="[object Arguments]",M="[object Array]",N="[object Boolean]",O="[object Date]",P="[object Function]",Q="[object Number]",R="[object Object]",S="[object RegExp]",T="[object String]",U={};U[P]=!1,U[L]=U[M]=U[N]=U[O]=U[Q]=U[R]=U[S]=U[T]=!0;var V={leading:!1,maxWait:0,trailing:!1},W={configurable:!1,enumerable:!1,value:null,writable:!1},X={"boolean":!1,"function":!0,object:!0,number:!1,string:!1,undefined:!1},Y={"\\":"\\","'":"'","\n":"n","\r":"r"," ":"t","\u2028":"u2028","\u2029":"u2029"},Z=X[typeof window]&&window||this,$=X[typeof d]&&d&&!d.nodeType&&d,_=X[typeof c]&&c&&!c.nodeType&&c,ab=_&&_.exports===$&&$,bb=X[typeof a]&&a;!bb||bb.global!==bb&&bb.window!==bb||(Z=bb);var cb=p();"function"==typeof define&&"object"==typeof define.amd&&define.amd?(Z._=cb,define(function(){return cb})):$&&_?ab?(_.exports=cb)._=cb:$._=cb:Z._=cb}).call(this)}).call(this,"undefined"!=typeof b?b:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],3:[function(a,b,c){!function(a,b){"use strict";"function"==typeof define&&define.amd?define(["exports"],b):b("object"==typeof c?c:a.PubSub={})}("object"==typeof window&&window||this,function(a){"use strict";function b(a){var b;for(b in a)if(a.hasOwnProperty(b))return!0;return!1}function c(a){return function(){throw a}}function d(a,b,d){try{a(b,d)}catch(e){setTimeout(c(e),0)}}function e(a,b,c){a(b,c)}function f(a,b,c,f){var g,h=j[b],i=f?e:d;if(j.hasOwnProperty(b))for(g in h)h.hasOwnProperty(g)&&i(h[g],a,c)}function g(a,b,c){return function(){var d=String(a),e=d.lastIndexOf(".");for(f(a,a,b,c);-1!==e;)d=d.substr(0,e),e=d.lastIndexOf("."),f(a,d,b)}}function h(a){for(var c=String(a),d=Boolean(j.hasOwnProperty(c)&&b(j[c])),e=c.lastIndexOf(".");!d&&-1!==e;)c=c.substr(0,e),e=c.lastIndexOf("."),d=Boolean(j.hasOwnProperty(c)&&b(j[c]));return d}function i(a,b,c,d){var e=g(a,b,d),f=h(a);return f?(c===!0?e():setTimeout(e,0),!0):!1}var j={},k=-1;a.publish=function(b,c){return i(b,c,!1,a.immediateExceptions)},a.publishSync=function(b,c){return i(b,c,!0,a.immediateExceptions)},a.subscribe=function(a,b){if("function"!=typeof b)return!1;j.hasOwnProperty(a)||(j[a]={});var c="uid_"+String(++k);return j[a][c]=b,c},a.clearAllSubscriptions=function(){j={}},a.unsubscribe=function(a){var b,c,d,e="string"==typeof a&&j.hasOwnProperty(a),f=!e&&"string"==typeof a,g="function"==typeof a,h=!1;if(e)return void delete j[a];for(b in j)if(j.hasOwnProperty(b)){if(c=j[b],f&&c[a]){delete c[a],h=a;break}if(g)for(d in c)c.hasOwnProperty(d)&&c[d]===a&&(delete c[d],h=!0)}return h}})},{}],4:[function(a,b){var c=a("hammerjs"),d=a("lodash"),e=function(a,b,c){this.index=a,this.dataModel=new function(a,b){this.data=a,this.index=b,this.getText=function(){return this.data[this.index]}}(c.data,b),this.init=function(){this.angle=c.theta*a,this.elem=document.createElement("li"),this.elem.classList.add("a"+100*this.angle),this.elem.style[c.transformProp]=c.rotateFn+"("+-this.angle+"deg) translateZ("+c.radius+"px)",this.setText()},this.setText=function(){this.elem.innerHTML=this.dataModel.getText()},this.update=function(a){this.dataModel.index!==a&&(this.dataModel.index=a,this.setText())}},f=function(a){var b=a,f={panelCount:a.length,rotateFn:"rotateX",interactive:!0,dail_w:20,dail_h:5,dail_stroke_color:"#999999",dail_stroke_width:1};f.transformProp=Modernizr.prefixed("transform"),f.rotation=0,f.distance=0,f.last_angle=0,f.theta=90/f.panelCount,f.initselect=b.selectedIndex;var g;if(f.transformProp){f.data=[];for(var h=0;h<b.children.length;h++)f.data.push(b.children[h].innerHTML);g=document.createElement("div"),g.classList.add("select-wrapper"),f.id?g.id=f.id:b.id?g.id="select_"+b.id:b.name&&(g.id="select_"+b.name),b.parentNode.appendChild(g);var i=document.createElement("div");i.classList.add("inner"),g.appendChild(i);var j=document.createElement("div");j.classList.add("container"),i.appendChild(j);var k=document.querySelector("#city .dots"),l=document.createElement("ul");l.classList.add("select"),l.addEventListener("transitionend",function(){l.classList.remove("transit")}),j.appendChild(l),j.appendChild(k),j.appendChild(k.cloneNode(!0)),f.radius=Math.round(l.clientHeight/2/Math.tan(Math.PI/5/f.panelCount)),f.mapping=[];var m=0;for(h=0;h<f.panelCount&&f.data.length!==h;h++){var n=m;m>=f.panelCount/2&&(n=f.data.length-(f.panelCount-m)),m++;var o=new e(h,n,f);o.init(),f.mapping.push(o),l.appendChild(o.elem)}var p=function(a){a=a||f.rotation;var b=f.theta/2,c=180,d=((a+b)%c+c)%c;d-=d%f.theta;var e=(f.data.length-1)*f.theta;return d>e?a>0?e:0:d},q=function(){var a=p();for(var b in f.mapping)if(f.mapping[b].angle===a)return f.mapping[b]},r=function(a){for(var b,c=[],d=f.panelCount,e=f.panelCount/4,g=f.data.length,h=a.index,i=a.dataModel.index,j=i-e;i+e-1>=j;j++)b=j,0>j&&(b=g+j),j>g-1&&(b=j-g),c.push(b);var k=c.slice(e-h);for(c=k.concat(c.slice(0,d-k.length)),h=0;h<f.mapping.length;h++)f.mapping[h].update(c[h])},s=function(a){l.style[f.transformProp]="translateZ(-"+f.radius+"px) "+f.rotateFn+"("+f.rotation+"deg)";var c=q();if(c){var d=c.dataModel,e=b.selectedIndex;b.selectedIndex=d.index,a&&e!==d.index&&f.onChange&&f.onChange(b),c.elem.classList.add("on"),l.selected&&l.selected!==c.elem&&l.selected.classList.remove("on"),l.selected=c.elem,c.angle!==f.last_angle&&[0,90,120,270].indexOf(c.angle)>=0&&(f.last_angle=c.angle,r(c))}};this.setIndex=function(a){var b=Math.floor(a/f.panelCount),c=a-b*f.panelCount;f.rotation=c*f.theta,s(!1)}}else this.setIndex=function(a){b.selectedIndex=a};this.setIndex(f.initselect),this.getIndex=function(){return b.selectedIndex},f.transformProp&&"undefined"!=typeof c&&(f.touch=new c(g,{prevent_default:!0,no_mouseevents:!0}),f.touch.on("dragstart",function(){f.distance=0}),f.touch.on("drag",function(a){var b=["up","down"];b.indexOf(a.gesture.direction)>=0&&(f.rotation+=Math.round(a.gesture.deltaY-f.distance)/6*-1,s(!0),f.distance=a.gesture.deltaY)}),f.touch.on("dragend",function(){f.rotation=p(),l.classList.add("transit"),window.requestAnimationFrame(function(){s(!0)})}),f.touch.on("tap",function(a){var b,c=a.gesture.target;b=d.find(f.mapping,function(a){return a.elem===c}),b&&l.selected!==b.elem&&(f.rotation=b.angle,l.classList.add("transit"),window.requestAnimationFrame(function(){s(!0)}))}))};b.exports=f},{hammerjs:1,lodash:2}],5:[function(a,b){var c=function(){if(navigator.userAgent.match(/Android/i)||navigator.userAgent.match(/webOS/i)||navigator.userAgent.match(/iPhone/i)||navigator.userAgent.match(/iPad/i)||navigator.userAgent.match(/iPod/i)||navigator.userAgent.match(/BlackBerry/i)||navigator.userAgent.match(/Windows Phone/i)||navigator.userAgent.match(/Mobile.*Firefox/i)){if(window.WebGLRenderingContext){for(var a=document.createElement("canvas"),b=["webgl","experimental-webgl","moz-webgl","webkit-3d"],c=!1,d=0;4>d;d++)try{if(c=a.getContext(b[d]),c&&"function"==typeof c.getParameter)return!0}catch(e){}return"mobile"}return"mobile"}return"desktop"};b.exports=c},{}],6:[function(a){var b=a("pubsub-js"),c=a("./helpers/webgl-detect"),d=a("./modules/experience"),e=a("./modules/tutorial"),f=a("./modules/city"),g=a("./modules/screens"),h=a("./modules/share"),i=function(){var a=window.location.hash.replace("#","");this.userCity=document.body.dataset.city;var i,j={FR:"paris",GB:"london",JP:"tokyo",NL:"amsterdam",BR:"sao paulo",US:"washington"};window.geoip_country_code&&(i=window.geoip_country_code(),this.geoCity=j[i]),this.userCity||(this.userCity=this.geoCity?this.geoCity:this.geoCity="generic"),this.geoCity||(this.geoCity=this.userCity);var k=c();if(!0===k)if(ga("send","pageview",{page:"/webGLversion",title:"webGLversion"}),this.userMessage="Happy holidays",this.xp=new d(this),this.tutorial=new e(this),this.city=new f(this),this.share=new h(this),this.pages=new g(this),a){var l=new XMLHttpRequest;l.open("GET","http://ec2-54-65-84-140.ap-northeast-1.compute.amazonaws.com:3000/"+a,!1),l.onload=function(){if(l.status>=200&&l.status<400){var a=JSON.parse(l.responseText);a.message.trim().length&&(this.userMessage=a.message)}b.publish("app.start")}.bind(this),l.onerror=function(){b.publish("app.start")};try{l.send()}catch(m){b.publish("app.start")}}else b.publish("app.start");else if(document.getElementById("loader").style.display="none","mobile"===k)document.getElementById("fallback-mobile").style.display="block",ga("send","pageview",{page:"/fallbackMobile",title:"fallbackMobile"});else{document.getElementById("fallback-desktop").style.display="block";var n=document.getElementById("overlay"),o=document.getElementById("player"),p=document.createElement("iframe"),q=document.getElementById("textarea"),r=document.getElementById("closeOverlay");p.src="//player.vimeo.com/video/114695314",r.addEventListener("click",function(a){a.preventDefault(),n.classList.remove("visible"),setTimeout(function(){o.removeChild(p),n.style.display="none"},500)},!1),window.console.log("checkIfDesktopDeeplink"),a?(q.innerHTML="Someone left you a greeting in Winterlands.<br>Visit this page on your mobile to see it.<br><small>for iOS8 or Android 4.3.3</small>",ga("send","pageview",{page:"/desktopUncompatible",title:"desktopUncompatible"})):ga("send","pageview",{page:"/desktopStandard",title:"desktopStandard"})}};new i},{"./helpers/webgl-detect":5,"./modules/city":8,"./modules/experience":9,"./modules/screens":10,"./modules/share":11,"./modules/tutorial":12,"pubsub-js":3}],7:[function(a,b){b.exports=function(a,b){return this.size=b?b:222,this.init=function(){this.el=document.createElement("canvas"),this.el.setAttribute("width",this.size),this.el.setAttribute("height",this.size),this.index=0},this.pause=function(){window.clearTimeout(this.delay),window.clearInterval(this.interval)},this.play=function(b){b=b||0;var c=this.el.getContext("2d"),d=function(){c.clearRect(0,0,this.size,this.size),c.drawImage(a[this.index++],0,0,this.size,this.size,0,0,this.size,this.size),a[this.index]||(this.index=0)}.bind(this);this.delay=window.setTimeout(function(){this.interval=window.setInterval(function(){window.requestAnimationFrame(d)},50)}.bind(this),b)},this.init(),{play:this.play.bind(this),pause:this.pause.bind(this),el:this.el}}},{}],8:[function(a,b){var c=a("pubsub-js"),d=a("hammerjs"),e=a("./animation"),f=a("../helpers/select");b.exports=function(a){this.init=function(){this.process(),this.bind()},this.initAnnimation=function(){this.datas.animationLoader=new e(document.querySelectorAll(".swap img.await"),76),this.els.animLoader.appendChild(this.datas.animationLoader.el),window.console.log("initAnnimation done")},this.bind=function(){this.datas.select=new f(this.els.select),this.el.addEventListener("transitionend",this.handleTransition.bind(this)),this.els.form.addEventListener("submit",function(a){a.preventDefault()}),this.datas.tapSubmit=new d(this.els.next),this.datas.tapSubmit.on("tap",this.handleSubmit.bind(this)),this.els.textarea.addEventListener("input",this.handleInput.bind(this)),this.datas.tapFieldset=new d(this.els.fieldset),this.datas.tapFieldset.on("tap",function(a){this.datas.isLoadingCity||a.gesture.target===this.els.fieldset&&(c.publish("webgl.zoom",!1),this.el.classList.add("hide"),this.el.classList.remove("show"))}.bind(this)),this.datas.tapBackZoom=new d(this.els.cityBack),this.datas.tapBackZoom.on("tap",function(){c.publish("webgl.zoom",!1),this.el.classList.add("hide"),this.el.classList.remove("show")}.bind(this)),this.datas.tapLegend=new d(this.els.legend),this.datas.tapLegend.on("tap",function(){this.el.classList.add("select")}.bind(this)),this.datas.tapCity=new d(this.els.cityLabel),this.datas.tapCity.on("tap",function(){this.el.classList.add("select")}.bind(this)),c.subscribe("cityLoaded",function(){window.console.log("we remove wait to "+this.els.next),this.els.next.classList.remove("wait"),this.datas.animationLoader.pause(),this.datas.isLoadingCity=!1}.bind(this)),c.subscribe("webgl.city",function(){window.console.log("we add wait to "+this.els.next),this.els.next.classList.add("wait"),this.datas.animationLoader.play(),this.datas.isLoadingCity=!0}.bind(this)),this.datas.tapClose=new d(this.els.cityClose),this.datas.tapClose.on("tap",function(){this.el.classList.remove("select")}.bind(this)),this.datas.tapBack=new d(this.els.messageBack),this.datas.tapBack.on("tap",this.handleBack.bind(this)),this.datas.tapCityOk=new d(this.els.cityOk),this.datas.currentCity=a.userCity,this.datas.tapCityOk.on("tap",function(a){this.handleSelectChange(a),window.setTimeout(function(){this.el.classList.remove("select")}.bind(this),10)}.bind(this)),this.initAnnimation()},this.handleBack=function(){var a=this.els.fieldset.previousElementSibling;"FIELDSET"===a.nodeName&&(this.els.fieldset.classList.add("fade"),this.els.fieldset.classList.remove("show"),a.classList.add("show"),this.els.fieldset=a,this.els.submit.innerHTML=this.els.submit.dataset.next,this.els.submit.parentNode.removeAttribute("disabled"))},this.handleInput=function(){var a=this.datas.max-this.els.textarea.value.length;this.els.count.classList.toggle("error",0>a),this.els.submit.parentNode[0>a?"setAttribute":"removeAttribute"]("disabled",!0),this.els.count.innerHTML=a,this.els.messageLabel.style.display=a!==this.datas.max?"none":"",this.els.submit.innerHTML=this.els.submit.dataset[a!==this.datas.max?"next":"skip"]},this.handleSelectChange=function(b){var d=this.els.options[this.els.select.selectedIndex];window.console.log("site city "+a.userCity),window.console.log("form city "+this.els.form.city.value),this.datas.currentCity!==this.els.form.city.value&&(window.console.log("city name diff "+this.datas.currentCity+" "+this.els.form.city.value),this.els.cityLabel.innerHTML=d.innerHTML,b&&(c.publish("city",d.value),c.publish("webgl.city",d.value)),this.datas.currentCity=this.els.form.city.value)},this.handleSubmit=function(a){if(!this.datas.isLoadingCity){a.preventDefault(),a.stopPropagation();var b=this.els.fieldset.nextElementSibling;if("FIELDSET"===b.nodeName)return this.els.fieldset.classList.add("fade"),this.els.fieldset.classList.remove("show"),b.classList.add("show"),this.els.fieldset=b,this.els.submit.innerHTML=this.els.submit.dataset[this.els.textarea.value.length>0?"next":"skip"],void this.els.submit.parentNode[this.els.textarea.value.length>this.datas.max?"setAttribute":"removeAttribute"]("disabled",!0);if(this.els.textarea.value.length>this.datas.max)return;var d={city:this.els.form.city.value,message:this.els.form.message.value};if(this.datas.information.city!==d.city||this.datas.information.message!==d.message){this.datas.information=d;var e=new XMLHttpRequest;e.open("POST","http://ec2-54-65-84-140.ap-northeast-1.compute.amazonaws.com:3000/new",!1),e.setRequestHeader("Content-Type","application/json;charset=UTF-8"),e.onload=function(){if(e.status>=200&&e.status<400){var a=JSON.parse(e.responseText);c.publish("share",a.hashtag),c.publish("page.share","show"),this.el.classList.add("hide"),this.el.classList.remove("show")}}.bind(this),e.onerror=function(){},e.send(JSON.stringify(this.datas.information))}else c.publish("page.share","show"),this.el.classList.add("hide"),this.el.classList.remove("show")}},this.handleTransition=function(a){this.handleSelectChange(a);var b=a.target;return b.classList.contains("fade")?void b.classList.remove("fade"):"opacity"===a.propertyName&&b.classList.contains("hide")?void b.classList.remove("hide"):b===this.el?(b.style.display="",b.style.visibility="",void("opacity"===a.propertyName&&b.classList.add("hide"))):void 0},this.process=function(){var b=a.geoCity;this.el=document.getElementById("city"),this.els={form:this.el.querySelector("form"),next:this.el.querySelector(".submit"),animLoader:this.el.querySelector(".submit .loader"),submit:this.el.querySelector(".submit span"),fieldset:this.el.querySelector("fieldset"),select:document.getElementById("cities"),options:[].slice.call(this.el.querySelectorAll("option")),cityLabel:this.el.querySelector(".city label span"),cityBack:this.el.querySelector(".city .bt-back"),cityClose:this.el.querySelector(".city .bt-close"),cityOk:this.el.querySelector(".city .bt-action.ok"),zoomin:this.el.querySelector(".city .bt-zoomin"),textarea:document.getElementById("message"),messageLabel:this.el.querySelector(".message label"),messageBack:this.el.querySelector(".message .bt-back"),count:this.el.querySelector(".count"),legend:this.el.querySelector(".legend")},this.els.options.forEach(function(a){a.value===b&&a.setAttribute("selected","selected")},this),this.els.fieldset.classList.add("show"),this.datas={max:+this.els.textarea.dataset.max,length:this.els.textarea.value.length,information:{}},this.handleSelectChange(),ga("send","pageview",{page:"/city?id="+this.els.form.city.value,title:"city"})},c.subscribe("app.start",this.init.bind(this))}},{"../helpers/select":4,"./animation":7,hammerjs:1,"pubsub-js":3}],9:[function(a,b){var c,d=a("pubsub-js"),e=a("lodash"),f=a("hammerjs"),g=a("./animation");"undefined"!=typeof document.hidden?c="visibilitychange":"undefined"!=typeof document.mozHidden?c="mozvisibilitychange":"undefined"!=typeof document.msHidden?c="msvisibilitychange":"undefined"!=typeof document.webkitHidden&&(c="webkitvisibilitychange"),document.hidden=document.hidden||document.mozHidden||document.msHidden||document.webkitHidden,b.exports=function(a){this.init=function(){this.preload(),this.bind()},this.bind=function(){d.subscribe("webgl",this.send.bind(this)),this.datas.tapCreate=new f(this.els.create),this.datas.tapCreate.on("tap",this.handleCreateAction.bind(this)),document.body.addEventListener("transitionend",this.handleTransition.bind(this)),document.addEventListener(c,this.handleVisibilityChange.bind(this),!1),d.subscribe("tutorial.done",function(){this.datas.tutorialDone=!0,this.datas.siteAligned&&this.handleCreateButton(!0)}.bind(this))},this.displayStart=function(){this.els.status.classList.remove("show"),this.els.start.classList.add("ready")},this.fallback=function(){},this.getAnimation=function(){var a=window.require;a.config({baseUrl:"./js/assets",waitSeconds:0}),a(["app"],this.start.bind(this))},this.handleError=function(){},this.handleStatus=function(a){switch("progress"!==a.code,a.code){case"sign-aligned":this.datas.siteAligned=a.data,this.datas.tutorialDone&&this.handleCreateButton(a.data);break;case"ready":break;case"city-ready":window.console&&window.console.info("city ready"),d.publish("cityLoaded");break;case"progress":this.datas.webglLoaded=a.data,this.updateLoader();break;case"citysight-complete":d.publish("page.city","show");break;case"intro-complete":d.publish("page.tutorial","show")}},this.handleCreateAction=function(){this.handleCreateButton(),this.send("webgl.zoom",!0)},this.handleCreateButton=function(a){a&&(this.els.create.style.zIndex=1),this.els.create.classList.toggle("show",a)},this.handleTransition=function(a){a.target===this.el&&(this.el.style.display="none",this.datas.canvas.pause())},this.handleVisibilityChange=function(){this.els.audio[document.hidden?"pause":"play"](),this.send("webgl.playback",document.hidden)},this.play=function(a){this.datas.canvas=new g(a),this.els.container.appendChild(this.datas.canvas.el),this.datas.canvas.play(),this.getAnimation()},this.prefetchAudio=function(){var a,b=new XMLHttpRequest;b.open("GET",this.els.audio.src,!0),b.addEventListener("progress",function(b){b.lengthComputable&&(a=Math.round(b.loaded/b.total*3)/10,this.datas.audioLoaded!==a&&(this.datas.audioLoaded=a,this.updateLoader()))}.bind(this)),b.send()},this.preload=function(){this.play([].slice.call(document.querySelectorAll(".swap img.aloader")))},this.process=function(b){this.el=document.getElementById("loader"),this.els={create:document.querySelector(".bt-create"),status:document.querySelector("#loader small"),container:this.el.querySelector(".snow"),audio:this.el.querySelector("audio")},this.datas={audioLoaded:0,webglLoaded:0,webgl:{container:"xp",assetsDir:"media/assets",city:a.userCity,message:{text:a.userMessage.toUpperCase(),lineHeight:40,font:"28pt ArcherBook",color:"#EBE4D4"}}},e.assign(this.datas.webgl,b||{}),this.init()},this.send=function(a,b){var c=a.split(".")[1];switch(c){case"city":this.app.setCity(b);break;case"message":this.app.setMessage({text:b});break;case"start":this.app.intro();break;case"zoom":this.app.citySight(b);break;case"playback":this.app&&this.app.playback&&this.app.playback(b)}},this.start=function(a){this.app=new a,this.app.initialize(this.datas.webgl).then(this.fallback.bind(this),this.handleError.bind(this),this.handleStatus.bind(this)),this.prefetchAudio()},this.updateLoader=function(){var a=Math.floor((this.datas.webglLoaded+this.datas.audioLoaded)/1.3*100);this.els.status.classList.contains("show")||this.els.status.classList.add("show"),this.els.status.innerHTML=[a,"%"].join(""),100===a&&(this.el.classList.add("fade"),d.publish("page.tutorial","show"))},d.subscribe("data",this.process.bind(this)),window.onload=this.process.bind(this)}},{"./animation":7,hammerjs:1,lodash:2,"pubsub-js":3}],10:[function(a,b){var c=a("pubsub-js"),d=a("lodash");b.exports=function(){this.init=function(){this.process(),this.bind()},this.bind=function(){c.subscribe("page",this.handlePage.bind(this)),document.body.addEventListener("transitionend",this.handleTransition.bind(this))},this.process=function(){this.els={pages:[].slice.call(document.querySelectorAll("header#intro, main > section[id]"))},this.datas={pages:{}},d.each(this.els.pages,function(a){this.datas.pages[a.id]=a},this)},this.handlePage=function(a,b){var c,d=a.split(".")[1],e=this.datas.pages[d];e&&(this.datas.currentPage=e,e.style.display="show"===b?"block":"",e.style.visibility="show"===b?"visible":"",window.requestAnimationFrame(function(){c=e.classList.toggle(b),"intro"===d&&c&&window.setTimeout(function(){this.handlePage("page.intro","show")}.bind(this),7e3)}.bind(this)))},this.handleTransition=function(a){var b=a.target;~~this.els.pages.indexOf(b)||b.classList.contains("show")||(b.style.display="",b.style.visibility="")},this.startXP=function(){this.datas.tapIntro.enable(!1),c.publish("webgl.start"),c.publish("page.intro","show")},c.subscribe("app.start",this.init.bind(this))}},{lodash:2,"pubsub-js":3}],11:[function(a,b){var c=a("pubsub-js"),d=a("lodash"),e=a("hammerjs");b.exports=function(a){this.init=function(){this.process(),this.bind()},this.bind=function(){this.el.addEventListener("transitionend",this.handleTransition.bind(this)),c.subscribe("city",function(a,b){this.datas.city=b}.bind(this)),this.els.sharers.forEach(function(a){a.hammer=new e(a),a.hammer.on("tap",this.handleShare.bind(this))
},this),this.els.back.hammer=new e(this.els.back),this.els.back.hammer.on("tap",function(){this.el.classList.add("hide"),this.el.classList.remove("show"),c.publish("page.city","show")}.bind(this)),c.subscribe("share",function(a,b){var c="";window.console.log("cityname = "+this.datas.city),"generic"!==this.datas.city&&(c="sanfransisco"===this.datas.city?"sanfrancisco.html":"shangai"===this.datas.city?"shanghai.html":this.datas.city+".html"),this.datas.shareLink=[this.datas.shareUrl,c+"#"+b].join("/"),this.els.shareLink.setAttribute("href",this.datas.shareLink),this.els.shareLink.innerHTML=this.datas.shareLink.replace("http://","")}.bind(this))},this.process=function(){this.el=document.getElementById("share"),this.els={sharers:[].slice.call(this.el.querySelectorAll(".social .bt-action")),shareLink:this.el.querySelector(".getLink"),back:this.el.querySelector(".bt-back")},this.els.shareLink.style.display="none",this.datas={city:a.userCity,shareUrl:"http://"+document.location.host,socialUrl:{facebook:"//www.facebook.com/sharer.php",twitter:"//twitter.com/intent/tweet",gplus:"https://plus.google.com/share",email:"mailto:"}},this.datas.shareLink=this.datas.shareUrl},this.handleShare=function(a){var b,c=d.find(this.els.sharers,function(b){return b.contains(a.target)}),e=c.className.replace("bt-action ",""),f=[this.datas.socialUrl[e]],g=document.querySelector('head meta[name="og:description"]');switch(g=g?g.getAttribute("content"):"",window.console.log("handleShare "+e),e){case"facebook":b=["u="+encodeURIComponent(this.datas.shareLink)];break;case"twitter":b=["text="+encodeURIComponent("Wrap up warm - I’ve left my holiday wishes for you in Winterlands "+this.datas.shareLink+" via @AKQA")];break;case"email":b=["Subject="+encodeURIComponent("Greetings from Winterlands"),"Body="+encodeURIComponent(["Wrap up warm - I’ve left my holiday wishes for you in Winterlands",this.datas.shareLink].join(" "))];break;default:b=["url="+encodeURIComponent(this.datas.shareLink)]}f.push(b.join("&")),"email"!==e?window.open(f.join("?")):window.location.href=f.join("?")},this.handleTransition=function(a){this.els.shareLink.style.display="block",setTimeout(function(){this.els.shareLink.parentNode.classList.add("show")}.bind(this),100);var b=a.target;b.classList.contains("hide")&&(b.style.display="",b.style.visibility="",b.classList.remove("hide")),ga("send","pageview",{page:"/share",title:"share"})},c.subscribe("app.start",this.init.bind(this))}},{hammerjs:1,lodash:2,"pubsub-js":3}],12:[function(a,b){var c=a("pubsub-js"),d=a("hammerjs"),e=a("./animation");b.exports=function(){this.init=function(){this.process(),this.bind()},this.bind=function(){this.datas.tapClose=new d(this.els.close),this.el.addEventListener("transitionend",this.handleTransition.bind(this)),this.datas.tapClose.on("tap",this.goNext.bind(this)),this.datas.tapStart=new d(this.els.start),this.datas.tapStart.on("tap",function(){this.goNext(),this.els.audio.play(),c.publish("webgl.start")}.bind(this)),c.subscribe("page.tutorial",function(){window.requestAnimationFrame(function(){this.el.classList.contains("hide")&&(this.el.classList.remove("hide"),this.el.classList.add("fadein"),this.el.classList.add("show"),this.el.style.opacity=1);var a=this.els.screens[this.datas.screenIndex];!a.anim&&this.els.animation[this.datas.screenIndex]&&(a.anim=new e(this.els.animation[this.datas.screenIndex]),a.querySelector(".info").appendChild(a.anim.el)),a.classList.add("show"),a.anim&&window.setTimeout(function(){a.anim.play(),this.datas.screenIndex>0&&(this.datas.timeout=window.setTimeout(function(){this.goNext()}.bind(this),this.datas.durations[this.datas.screenIndex]))}.bind(this),400)}.bind(this))}.bind(this))},this.process=function(){this.el=document.getElementById("tutorial"),this.els={close:this.el.querySelector(".bt-close"),start:this.el.querySelector(".bt-action"),audio:document.querySelector("audio"),screens:[].slice.call(this.el.querySelectorAll(".screen"))},this.datas={animations:["360","swipe","shake"],durations:[4800,2350,3900],screenIndex:0},this.els.animation=[document.querySelectorAll(".swap img.a360"),document.querySelectorAll(".swap img.aswipe"),document.querySelectorAll(".swap img.ashake")]},this.goNext=function(){var a=this.els.screens[this.datas.screenIndex],b=this.els.screens[++this.datas.screenIndex];if(window.clearTimeout(this.datas.timeout),a&&(a.anim&&a.anim.pause(),a.classList.add("fade"),a.classList.remove("show")),!b||b.classList.contains("wait")){if(this.el.classList.add("hide"),this.el.classList.remove("show"),!b)return void c.publish("tutorial.done");!b.anim&&this.els.animation[this.datas.screenIndex]&&(b.anim=new e(this.els.animation[this.datas.screenIndex]),b.querySelector(".info").appendChild(b.anim.el)),window.requestAnimationFrame(function(){c.publish("page.intro","show")}.bind(this))}else!b.anim&&this.els.animation[this.datas.screenIndex]&&(b.anim=new e(this.els.animation[this.datas.screenIndex]),b.querySelector(".info").appendChild(b.anim.el)),b.classList.add("show"),b.anim&&b.anim.play(600),this.datas.timeout=window.setTimeout(function(){this.goNext()}.bind(this),this.datas.durations[this.datas.screenIndex])},this.handleTransition=function(a){var b=a.target;return b.classList.contains("hide")?(b.style.display="",b.style.visibility="",this.els.close.style.display="block",void(this.els.start.style.display="none")):b.classList.contains("fadein")?(b.classList.remove("fadein"),void(b.style.opacity="")):void(b===this.els.start&&this.els.start.classList.contains("fade")&&(this.els.start.classList.remove("fade"),this.els.start.classList.add("hide")))},c.subscribe("app.start",this.init.bind(this))}},{"./animation":7,hammerjs:1,"pubsub-js":3}]},{},[6])}({},function(){return this}());
|
"""The Hello World package."""
from hashlib import blake2b
__name__ = 'helloworld'
__version__ = "0.0.3"
__author__ = 'Mikel Menta Garde'
__url__ = "https://github.com/mkmenta/python-package-example"
__license__ = "MIT License"
def say_hello(name=None):
"""Say hello to the world or someone.
Args:
name (str): who you want to greet. If None it will greet the world.
Returns:
A string with the greeting.
"""
if name is None:
return "Hello world!"
h = blake2b(digest_size=20)
h.update(name.encode())
if h.hexdigest() == 'df543254a1110b5d32d96028cf4b1df9ea96ebbb':
return "I'm on the radioooooo!!!"
return f"Hello {name}!"
|
from PyQt5.QtWidgets import QMessageBox, QMainWindow
from PyQt5.QtGui import QPixmap, QBrush, QResizeEvent, QPalette
from PyQt5.QtCore import Qt
from PyQt5 import uic
from datetime import date
import ctypes
class Main_Window(QMainWindow):
def __init__(self, presentador):
QMainWindow.__init__(self)
uic.loadUi('vista/ui/main_window.ui', self)
self.__presentador = presentador
self.annadir_eventos()
self.mover_al_medio()
self.annadir_titulo()
def annadir_eventos(self):
self.ac_close.triggered.connect(self.close)
self.ac_viaje_local.triggered.connect(self.__presentador.CRUD_viaje_local)
self.ac_viaje_ciudad.triggered.connect(self.__presentador.CRUD_viaje_fuera_ciudad)
self.ac_op1.triggered.connect(self.__presentador.mostrar_operacion_1)
self.ac_op2.triggered.connect(self.__presentador.mostrar_operacion_2)
self.ac_op3.triggered.connect(self.__presentador.mostrar_operacion_3)
self.ac_op4.triggered.connect(self.__presentador.mostrar_operacion_4)
self.ac_op5.triggered.connect(self.__presentador.mostrar_operacion_5)
def closeEvent(self, e):
res = QMessageBox.question(self, 'Salir!!', 'Seguro que desea salir?', QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
e.accept()
else:
e.ignore()
def mover_al_medio(self):
resolucion = ctypes.windll.user32
resolucion_ancho = resolucion.GetSystemMetrics(0)
resolucion_alto = resolucion.GetSystemMetrics(1)
left = (resolucion_ancho / 2) - (self.frameSize().width() / 2)
top = (resolucion_alto / 2) - (self.frameSize().height() / 2)
self.move(left, top)
def resizeEvent(self, a0: QResizeEvent):
background = QPixmap('vista/img/fondo.jpg')
background = background.scaled(self.size(), Qt.IgnoreAspectRatio)
pal = self.palette()
pal.setBrush(QPalette.Background, QBrush(background))
self.setPalette(pal)
def annadir_titulo(self):
dias = {0: 'Lunes', 1: 'Martes', 2: 'Miércoles', 3: 'Jueves', 4: 'Viernes', 5: 'Sábado', 6: 'Domingo'}
meses = {1: 'Enero', 2: 'Febrero', 3: 'Marzo', 4: 'Abril', 5: 'Mayo', 6: 'Junio', 7: 'Julio', 8: 'Agosto',
9: 'Septiembre', 10: 'Octubre', 11: 'Noviembre', 12: 'Diciembre'}
dia_sem = dias[date.today().weekday()]
dia = str(date.today().day)
mes = meses[date.today().month]
anno = str(date.today().year)
msg = 'Sistema de bases de taxis. Fecha Actual: ({} {} de {} del {})'.format(dia_sem, dia, mes, anno)
self.setWindowTitle(msg)
|
require('./bootstrap');
// oggetto elementi button 'Cancella'
const deleteBtn = document.getElementsByClassName('delete_btn');
// oggetto elementi button 'Annulla'
const cancelBtn = document.getElementsByClassName('cancel_btn');
// ciclo n volte quanti sono i bottoni 'Cancella' e 'Annulla' (nel nostro caso stessa quantità)
for(let i = 0; i < deleteBtn.length; i++) {
// per ogni elemento button 'Cancella', mi salvo il suo id che corrisponde al valore della colonna ID dell'elemento
const elementId = deleteBtn[i].getAttribute("id");
deleteBtn[i].addEventListener('click', function() {
document.querySelector(`.alert_delete_${elementId}`).classList.remove('display_none');
});
cancelBtn[i].addEventListener('click', function() {
document.querySelector(`.alert_delete_${elementId}`).classList.add('display_none');
});
}
|
from src import annotations
# Script below can be used to tune paramaters of of sampling and ranking functions
# In current setup, .75 is sampled, and val_reweight is set to 60
# This gives around 81% precision at rank 10 and 77% precision at rank 20
# Only on set T9 do we have more than 20 results, which results in around 78.9% prec
task_list = ['T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T10']
all_correct_incorrect_at_10 = []
from collections import Counter
for task in task_list:
print('_________________________________________________________')
print(task)
result_obj = annotations.Results(task)
results = result_obj.get_task_results()
print('Total results available:')
print(len(results))
print('Total results sampled after removal of excess incorrects:')
sample = result_obj.get_sample_of_results(results, .00)
print(len(sample))
correct_incorrect_at_10 = []
correct_incorrect_at_20 = []
correct_incorrect_at_all = []
rank_1_list = []
rank_2_list = []
rank_3_list = []
rank_4_list = []
rank_5_list = []
rank_6_list = []
rank_7_list = []
rank_8_list = []
rank_9_list = []
rank_10_list = []
for i in range(1000):
ranks = result_obj.get_ranked_results(sample, val_reweight=42, random_seed=None)
correct_incorrect_10 = list(map(lambda result: result['JUDGEMENTS'], ranks[0:10]))
correct_incorrect_at_10.extend(correct_incorrect_10)
correct_incorrect_20 = list(map(lambda result: result['JUDGEMENTS'], ranks[0:20]))
correct_incorrect_at_20.extend(correct_incorrect_20)
correct_incorrect_all = list(map(lambda result: result['JUDGEMENTS'], ranks))
correct_incorrect_at_all.extend(correct_incorrect_all)
rank_1_list.append(ranks[0]['JUDGEMENTS'])
rank_2_list.append(ranks[1]['JUDGEMENTS'])
rank_3_list.append(ranks[2]['JUDGEMENTS'])
rank_4_list.append(ranks[3]['JUDGEMENTS'])
rank_5_list.append(ranks[4]['JUDGEMENTS'])
rank_6_list.append(ranks[5]['JUDGEMENTS'])
rank_7_list.append(ranks[6]['JUDGEMENTS'])
rank_8_list.append(ranks[7]['JUDGEMENTS'])
rank_9_list.append(ranks[8]['JUDGEMENTS'])
rank_10_list.append(ranks[9]['JUDGEMENTS'])
print ('Total correct and incorrect in top 10 after 10000 runs: ')
print(Counter(correct_incorrect_at_10))
all_correct_incorrect_at_10.append(dict(Counter(correct_incorrect_at_10)))
print('Total correct and incorrect in top 20 after 10000 runs: ')
print(Counter(correct_incorrect_at_20))
print('Total correct and incorrect in for all after 10000 runs: ')
print(Counter(correct_incorrect_at_all))
#
# print('--ranks--')
# print ('Total correct and incorrect at rank 1 after 10000 runs: ')
# print(Counter(rank_1_list))
#
# print('Total correct and incorrect at rank 2 after 10000 runs: ')
# print(Counter(rank_2_list))
#
# print('Total correct and incorrect at rank 3 after 10000 runs: ')
# print(Counter(rank_3_list))
#
# print('Total correct and incorrect at rank 4 after 10000 runs: ')
# print(Counter(rank_4_list))
#
# print('Total correct and incorrect at rank 5 after 10000 runs: ')
# print(Counter(rank_5_list))
#
# print('Total correct and incorrect at rank 6 after 10000 runs: ')
# print(Counter(rank_6_list))
#
# print('Total correct and incorrect at rank 7 after 10000 runs: ')
# print(Counter(rank_7_list))
#
# print('Total correct and incorrect at rank 8 after 10000 runs: ')
# print(Counter(rank_8_list))
#
# print('Total correct and incorrect at rank 9 after 10000 runs: ')
# print(Counter(rank_9_list))
#
# print('Total correct and incorrect at rank 10 after 10000 runs: ')
# print(Counter(rank_10_list))
total_correct = 0
total_incorrect = 0
for item in all_correct_incorrect_at_10:
total_correct += item['Correct']
total_incorrect += item['Incorrect']
print(total_correct)
print(total_incorrect)
|
import * as alt from 'alt';
import * as native from 'natives';
import { distance } from '/client/utility/vector.js';
let itemsOnGround = [];
let objects = [];
let lastDropUpdate = Date.now();
let drawItemsInterval;
const unknownModel = native.getHashKey('sm_prop_smug_rsply_crate02a');
alt.loadModel(unknownModel);
native.requestModel(unknownModel);
alt.onServer('inventory:ItemDrops', itemDrops);
alt.onServer('inventory:ItemPickup', itemPickup);
alt.onServer('inventory:UseRepairKit', useRepairKit);
alt.onServer('inventory:UseGasCan', useGasCan);
function itemDrops(jsonDrops) {
itemsOnGround = JSON.parse(jsonDrops);
if (drawItemsInterval) {
alt.clearInterval(drawItemsInterval);
drawItemsInterval = undefined;
}
if (itemsOnGround.length <= 0) {
return;
}
drawItemsInterval = alt.setInterval(drawItems, 0);
}
function itemPickup(hash) {
if (alt.Player.local.vehicle) {
return;
}
if (itemsOnGround.length <= 0) {
return;
}
let index = itemsOnGround.findIndex(item => item.hash === hash);
if (index <= -1) {
return;
}
itemsOnGround.splice(index, 1);
}
function drawItems() {
if (itemsOnGround.length <= 0) {
if (objects.length >= 1) {
objects.forEach(object => {
native.freezeEntityPosition(object.id, false);
native.deleteEntity(object.id);
});
objects = [];
}
return;
}
if (Date.now() > lastDropUpdate) {
lastDropUpdate = Date.now() + 1000;
objects.forEach(object => {
native.freezeEntityPosition(object.id, false);
native.deleteEntity(object.id);
});
objects = [];
itemsOnGround.forEach(itemData => {
const dist = distance(alt.Player.local.pos, itemData.pos);
if (dist > 10) return;
const id = native.createObject(
unknownModel,
itemData.pos.x,
itemData.pos.y,
itemData.pos.z - 1.05,
false,
false,
false
);
native.freezeEntityPosition(id, true);
objects.push({ id, data: itemData });
});
}
}
alt.on('item:Pickup', data => {
alt.emitServer('inventory:Pickup', data.hash);
});
export function getItemByEntity(ent) {
const obj = objects.find(object => {
if (object.id === ent) return object;
});
if (!obj) return undefined;
return obj;
}
function useRepairKit() {
alt.Player.local.isRepairing = true;
alt.emit(
'chat:Send',
`{00FF00} Select the vehicle you want to repair with your cursor.`
);
}
function useGasCan() {
alt.Player.local.isUsingGasCan = true;
alt.emit(
'chat:Send',
`{00FF00} Select the vehicle you want to re-fuel with your cursor.`
);
}
|
import numpy as np
import sqlalchemy
import datetime as dt
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# Database Setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Flask Setup
app = Flask(__name__)
# Flask Route
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Query precipitation
year_ago = year_ago = dt.date(2017, 8, 23) - dt.timedelta(days =365)
result = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= year_ago).all()
result =dict(result)
return jsonify(result)
@app.route("/api/v1.0/stations")
def stations():
station_id = session.query(Station.station, Station.name).all()
station_id = list(np.ravel(station_id))
return jsonify(station_id)
@app.route("/api/v1.0/tobs")
def tobs():
year_ago = year_ago = dt.date(2017, 8, 23) - dt.timedelta(days =365)
station_tobs = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281' ).\
filter(Measurement.date >= year_ago).all()
station_tobs = list(np.ravel(station_tobs))
return jsonify(station_tobs)
@app.route("/api/v1.0/start")
def calc_temps():
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
start_date = "2017-04-01"
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
return jsonify(results)
if __name__ == '__main__':
app.run(debug=True)
|
import pandas as pd
import psycopg2
import uuid
from modules.connector import Bancosql
from modules.connector_cassandra import Cassandra
import __main__
class Insert:
## 1 passo - Editando os arquivos CSV - SQL
def popular_sql():
try:
data = pd.read_csv("Sistema_A_SQL.csv", sep=",") ##salvando o arquivo em variavel(data)
data['vendedor'] = data['vendedor'].astype('string') ## ajustando a coluna vendedor para string
except Exception as e:
print(str(e))
# 2 passo - popular o banco postgreSQL
#Neste for é inserir os dados que foi tratado para a o banco na tabela vendas.
try:
for index, row in data.iterrows():
query = (f"INSERT INTO vendas (nota_fiscal, vendedor, total) VALUES ({int(row['nota_fiscal'])}, '{row['vendedor']}', {float(row['total'])})")
Bancosql.executar(query)
print("Oldtech Filial - SQL populado com sucesso!")
__main__.menu()
except Exception as e:
print(str(e))
__main__.menu()
## 3 Passo - Editando o arquivo CVS - NoSQL
def popular_nosql():
try:
conectar = Cassandra('oldtech')
data_nosql = pd.read_csv("Sistema_B_NoSQL.csv", sep=",") ##salvando o arquivo em variavel (data_nosql)
data_nosql['vendedor'] = data_nosql['vendedor'].astype('string') ##ajustando a coluna vendedor para string
# print(data_nosql)
lista = []
for index, row in data_nosql.iterrows(): # percorre o arquivo data_nosql
dados = {'id':str(uuid.uuid4()),'nota_fiscal':f"{int(row['nota_fiscal'])}",'vendedor': f"{repr(str(row['vendedor']))}",
'total':f"{float(row['total'])}"} #estrutura em dicionário para adicionar a linha na lista
lista.append(dados)
# print(lista)
# 4 - Passo - popular Cassandra
conectar.inserir(1,'vendas', lista)
print("Oldtech Matriz - NoSQL populado com sucesso!")
__main__.menu()
except Exception as e:
print(str(e))
## Passo 5 - Pegar os dados o PostgreSql, tratar, inserir no banco Cassandra
def popular_nosql_sql():
#buscando os dados no SQL e tratando os dados
try:
conectar = Cassandra('oldtech')
query = "SELECT * FROM vendas;"
dados = Bancosql.buscar(query)
dados = pd.DataFrame(dados) ## salvar na variavel
dados[0] = dados[0].astype('int') ## coluna 0 é a nota_fiscal transformar inteiro
dados[1] = dados[1].astype('string') ## coluna 1 é a vendedor transformar String
dados[2] = dados[2].astype('float') ## coluna 2 é a total transformar float
# print(dados)
# print(dados.dtypes)
lista_sql = []
for index, row in dados.iterrows():
dados = {'id':str(uuid.uuid4()),'nota_fiscal': f'{row[0]}','vendedor': repr(str(row[1])), 'total':f'{row[2]}'}
lista_sql.append(dados)
# print(lista_sql)
conectar.inserir(1,'vendas', lista_sql)
print("Dados transferidos com populado com sucesso!")
__main__.menu()
except Exception as e:
print(str(e))
def mostrar_filial():
query = "SELECT * FROM vendas;"
dados = Bancosql.buscar(query)
dados = pd.DataFrame(dados)
print(dados)
def mostrar_matriz():
conectar = Cassandra('oldtech')
conectar.buscar('vendas')
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to sample many images from a model for evaluation.
"""
import argparse, json
import os
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from scipy.misc import imsave, imresize
from sg2im.data import imagenet_deprocess_batch
from sg2im.data.coco import CocoSceneGraphDataset, coco_collate_fn
from sg2im.data.vg import VgSceneGraphDataset, vg_collate_fn
from sg2im.data.utils import split_graph_batch
from sg2im.model import Sg2ImModel
from sg2im.utils import int_tuple, bool_flag
from sg2im.vis import draw_scene_graph
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='sg2im-models/vg64.pt')
parser.add_argument('--checkpoint_list', default=None)
parser.add_argument('--model_mode', default='eval', choices=['train', 'eval'])
# Shared dataset options
parser.add_argument('--dataset', default='vg', choices=['coco', 'vg'])
parser.add_argument('--image_size', default=(64, 64), type=int_tuple)
parser.add_argument('--batch_size', default=24, type=int)
parser.add_argument('--shuffle', default=False, type=bool_flag)
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--num_samples', default=10000, type=int)
parser.add_argument('--save_gt_imgs', default=False, type=bool_flag)
parser.add_argument('--save_graphs', default=False, type=bool_flag)
parser.add_argument('--use_gt_boxes', default=False, type=bool_flag)
parser.add_argument('--use_gt_masks', default=False, type=bool_flag)
parser.add_argument('--save_layout', default=True, type=bool_flag)
parser.add_argument('--output_dir', default='output')
# For VG
VG_DIR = os.path.expanduser('datasets/vg')
parser.add_argument('--vg_h5', default=os.path.join(VG_DIR, 'val.h5'))
parser.add_argument('--vg_image_dir',
default=os.path.join(VG_DIR, 'images'))
# For COCO
COCO_DIR = os.path.expanduser('~/datasets/coco/2017')
parser.add_argument('--coco_image_dir',
default=os.path.join(COCO_DIR, 'images/val2017'))
parser.add_argument('--instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_val2017.json'))
parser.add_argument('--stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_val2017.json'))
def build_coco_dset(args, checkpoint):
checkpoint_args = checkpoint['args']
print('include other: ', checkpoint_args.get('coco_include_other'))
dset_kwargs = {
'image_dir': args.coco_image_dir,
'instances_json': args.instances_json,
'stuff_json': args.stuff_json,
'stuff_only': checkpoint_args['coco_stuff_only'],
'image_size': args.image_size,
'mask_size': checkpoint_args['mask_size'],
'max_samples': args.num_samples,
'min_object_size': checkpoint_args['min_object_size'],
'min_objects_per_image': checkpoint_args['min_objects_per_image'],
'instance_whitelist': checkpoint_args['instance_whitelist'],
'stuff_whitelist': checkpoint_args['stuff_whitelist'],
'include_other': checkpoint_args.get('coco_include_other', True),
}
dset = CocoSceneGraphDataset(**dset_kwargs)
return dset
def build_vg_dset(args, checkpoint):
vocab = checkpoint['model_kwargs']['vocab']
dset_kwargs = {
'vocab': vocab,
'h5_path': args.vg_h5,
'image_dir': args.vg_image_dir,
'image_size': args.image_size,
'max_samples': args.num_samples,
'max_objects': checkpoint['args']['max_objects_per_image'],
'use_orphaned_objects': checkpoint['args']['vg_use_orphaned_objects'],
}
dset = VgSceneGraphDataset(**dset_kwargs)
return dset
def build_loader(args, checkpoint):
if args.dataset == 'coco':
dset = build_coco_dset(args, checkpoint)
collate_fn = coco_collate_fn
elif args.dataset == 'vg':
dset = build_vg_dset(args, checkpoint)
collate_fn = vg_collate_fn
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': args.shuffle,
'collate_fn': collate_fn,
}
loader = DataLoader(dset, **loader_kwargs)
return loader
def build_model(args, checkpoint):
kwargs = checkpoint['model_kwargs']
model = Sg2ImModel(**checkpoint['model_kwargs'])
model.load_state_dict(checkpoint['model_state'])
if args.model_mode == 'eval':
model.eval()
elif args.model_mode == 'train':
model.train()
model.image_size = args.image_size
model.cuda()
return model
def makedir(base, name, flag=True):
dir_name = None
if flag:
dir_name = os.path.join(base, name)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
return dir_name
def run_model(args, checkpoint, output_dir, loader=None):
vocab = checkpoint['model_kwargs']['vocab']
model = build_model(args, checkpoint)
if loader is None:
loader = build_loader(args, checkpoint)
img_dir = makedir(output_dir, 'images')
graph_dir = makedir(output_dir, 'graphs', args.save_graphs)
gt_img_dir = makedir(output_dir, 'images_gt', args.save_gt_imgs)
data_path = os.path.join(output_dir, 'data.pt')
data = {
'vocab': vocab,
'objs': [],
'masks_pred': [],
'boxes_pred': [],
'masks_gt': [],
'boxes_gt': [],
'filenames': [],
}
img_idx = 0
for batch in loader:
masks = None
if len(batch) == 6:
imgs, objs, boxes, triples, obj_to_img, triple_to_img = [x.cuda() for x in batch]
elif len(batch) == 7:
imgs, objs, boxes, masks, triples, obj_to_img, triple_to_img = [x.cuda() for x in batch]
imgs_gt = imagenet_deprocess_batch(imgs)
boxes_gt = None
masks_gt = None
if args.use_gt_boxes:
boxes_gt = boxes
if args.use_gt_masks:
masks_gt = masks
# Run the model with predicted masks
model_out = model(objs, triples, obj_to_img,
boxes_gt=boxes_gt, masks_gt=masks_gt)
imgs_pred, boxes_pred, masks_pred, _ = model_out
imgs_pred = imagenet_deprocess_batch(imgs_pred)
obj_data = [objs, boxes_pred, masks_pred]
_, obj_data = split_graph_batch(triples, obj_data, obj_to_img,
triple_to_img)
objs, boxes_pred, masks_pred = obj_data
obj_data_gt = [boxes.data]
if masks is not None:
obj_data_gt.append(masks.data)
triples, obj_data_gt = split_graph_batch(triples, obj_data_gt,
obj_to_img, triple_to_img)
boxes_gt, masks_gt = obj_data_gt[0], None
if masks is not None:
masks_gt = obj_data_gt[1]
for i in range(imgs_pred.size(0)):
img_filename = '%04d.png' % img_idx
if args.save_gt_imgs:
img_gt = imgs_gt[i].numpy().transpose(1, 2, 0)
img_gt_path = os.path.join(gt_img_dir, img_filename)
imsave(img_gt_path, img_gt)
img_pred = imgs_pred[i]
img_pred_np = imgs_pred[i].numpy().transpose(1, 2, 0)
img_path = os.path.join(img_dir, img_filename)
imsave(img_path, img_pred_np)
data['objs'].append(objs[i].cpu().clone())
data['masks_pred'].append(masks_pred[i].cpu().clone())
data['boxes_pred'].append(boxes_pred[i].cpu().clone())
data['boxes_gt'].append(boxes_gt[i].cpu().clone())
data['filenames'].append(img_filename)
cur_masks_gt = None
if masks_gt is not None:
cur_masks_gt = masks_gt[i].cpu().clone()
data['masks_gt'].append(cur_masks_gt)
if args.save_graphs:
graph_img = draw_scene_graph(vocab, objs[i], triples[i])
graph_path = os.path.join(graph_dir, img_filename)
imsave(graph_path, graph_img)
img_idx += 1
torch.save(data, data_path)
print('Saved %d images' % img_idx)
def main(args):
got_checkpoint = args.checkpoint is not None
got_checkpoint_list = args.checkpoint_list is not None
if got_checkpoint == got_checkpoint_list:
raise ValueError('Must specify exactly one of --checkpoint and --checkpoint_list')
if got_checkpoint:
checkpoint = torch.load(args.checkpoint)
print('Loading model from ', args.checkpoint)
run_model(args, checkpoint, args.output_dir)
elif got_checkpoint_list:
# For efficiency, use the same loader for all checkpoints
loader = None
with open(args.checkpoint_list, 'r') as f:
checkpoint_list = [line.strip() for line in f]
for i, path in enumerate(checkpoint_list):
if os.path.isfile(path):
print('Loading model from ', path)
checkpoint = torch.load(path)
if loader is None:
loader = build_loader(args, checkpoint)
output_dir = os.path.join(args.output_dir, 'result%03d' % (i + 1))
run_model(args, checkpoint, output_dir, loader)
elif os.path.isdir(path):
# Look for snapshots in this dir
for fn in sorted(os.listdir(path)):
if 'snapshot' not in fn:
continue
checkpoint_path = os.path.join(path, fn)
print('Loading model from ', checkpoint_path)
checkpoint = torch.load(checkpoint_path)
if loader is None:
loader = build_loader(args, checkpoint)
# Snapshots have names like "snapshot_00100K.pt'; we want to
# extract the "00100K" part
snapshot_name = os.path.splitext(fn)[0].split('_')[1]
output_dir = 'result%03d_%s' % (i, snapshot_name)
output_dir = os.path.join(args.output_dir, output_dir)
run_model(args, checkpoint, output_dir, loader)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
const React=require("react");function FinderColor(e){return React.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 32 32",width:32,height:32,strokeWidth:!0,stroke:!0},e),React.createElement("g",{filter:"url(#a)"},React.createElement("rect",{width:28,height:24,x:2,y:4,fill:"#283544",rx:1})),React.createElement("path",{fill:"url(#b)",fillRule:"evenodd",d:"M17.2151 4h11.9397c.4226 0 .7396.28916.8452.77108V27.2289c0 .3856-.4226.7711-.8452.7711H19.434c-.4226-1.3494-.634-2.6988-.8453-4.0482 2.5359-.4819 4.8604-1.6385 6.4453-3.4699.5283-.7711-.4226-1.253-.8453-.7711-1.1622 1.735-3.3811 2.7952-5.7056 3.1808-.317-3.8555 0-6.9398 0-6.9398h-4.1208s0-6.16867 2.8528-11.9518Zm5.2831 9.0602c.5283 0 .8452-.3855.8452-.771V10.747c0-.4819-.4226-.7711-.8452-.7711-.5283 0-.8453.3855-.8453.7711v1.5422c0 .4819.4226.771.8453.771Z",clipRule:"evenodd"}),React.createElement("path",{fill:"url(#c)",fillRule:"evenodd",d:"M15.9472 24.1446c-3.4868 0-6.97361-1.253-9.08682-3.6627-.5283-.7711.42264-1.253.84528-.7711 1.69057 2.2169 4.96604 3.3735 8.24154 3.3735.7559 0 1.6905-.0963 1.6905-.0963-.3169-2.6988-.2113-5.1085-.1056-6.2651H13.517v-.7711s0-6.16867 2.8528-11.9518H2.84528C2.31698 4 2 4.38554 2 4.77108V27.2289c0 .3856.42264.7711.84528.7711H18.5c-.0501-.2104-.1002-.4149-.1497-.6164-.258-1.0527-.4964-2.0251-.6069-3.3354 0 0-1.2278.0964-1.7962.0964ZM9.39623 9.9759c-.5283 0-.84529.3855-.84529.7711v1.5422c0 .3855.42265.6747.84529.6747.42264 0 .84527-.2892.84527-.6747V10.747c0-.3856-.31697-.7711-.84527-.7711Z",clipRule:"evenodd"}),React.createElement("defs",null,React.createElement("linearGradient",{id:"b",x1:22.1812,x2:22.1812,y1:4,y2:28,gradientUnits:"userSpaceOnUse"},React.createElement("stop",{stopColor:"#F7F6F7"}),React.createElement("stop",{offset:1,stopColor:"#D9E4F0"})),React.createElement("linearGradient",{id:"c",x1:10.25,x2:10.25,y1:4,y2:28,gradientUnits:"userSpaceOnUse"},React.createElement("stop",{stopColor:"#2FD4FB"}),React.createElement("stop",{offset:1,stopColor:"#2777EE"})),React.createElement("filter",{id:"a",width:30,height:26,x:1,y:3.5,colorInterpolationFilters:"sRGB",filterUnits:"userSpaceOnUse"},React.createElement("feFlood",{floodOpacity:0,result:"BackgroundImageFix"}),React.createElement("feColorMatrix",{in:"SourceAlpha",result:"hardAlpha",values:"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"}),React.createElement("feOffset",{dy:.5}),React.createElement("feGaussianBlur",{stdDeviation:.5}),React.createElement("feColorMatrix",{values:"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.25 0"}),React.createElement("feBlend",{in2:"BackgroundImageFix",result:"effect1_dropShadow_87_7821"}),React.createElement("feBlend",{in:"SourceGraphic",in2:"effect1_dropShadow_87_7821",result:"shape"}))))}module.exports=FinderColor;
|
import { Text, View, Image, TextInput, ImageBackground, KeyboardAvoidingView } from 'react-native';
import React, { Component } from 'react';
import * as Styles from '@res/styles';
import { Button } from '@utils/CustomView';
import { isNaturalNumber } from '@utils/isNatural'
import { COLOR_BLACK} from '@res/color';
import configureStore from '@store/store';
import { verifyOtp } from '@actions/actions';
const store = configureStore();
export class VerifyActivity extends Component {
constructor(props) {
super(props);
this.state = {
errortext: '',
buttontext: 'Verify OTP',
animating: false}
}
setOTP = (otp) => {
this.setState({ errortext: '' });
this.otp = otp;
}
verifyOTP = () => {
try {
this.setState(
{ animating: true });
if (this.otp != undefined) {
if (isNaturalNumber(parseInt(this.otp)) && this.otp.length == 8) {
store.dispatch({
type: verifyOtp.type,
payload: JSON.stringify({
otp: this.otp,
csrfToken: store.getState().csrfToken,
sessionId: store.getState().sessionId,
})
});
} else
this.setState({
animating: false,
errortext: 'Invalid OTP'
});
} else
this.setState({
animating: false,
errortext: 'Enter your OTP to continue'
}
);
} catch (error) {
console.log(error);
}
}
render() {
return (
<View style={Styles.center(1)}>
<KeyboardAvoidingView behavior="padding" style={Styles.center()}>
<Text style={[
Styles.setTextDesign(COLOR_BLACK, 'PrimaryBold'),
{fontSize:28},
Styles.setMargin(0, 0, 0, 30)]}>
{'Enter OTP'}
</Text>
<Text style={[
Styles.setMargin(0, 0, 0, 30),
Styles.setTextDesign()]}>
Enter your 8 digit OTP sent to you.
</Text>
<TextInput
style={[Styles.setInputDesign()]}
keyboardType='numeric'
maxLength={8}
onChangeText={otp => this.setOTP(otp)}
underlineColorAndroid="transparent"
placeholder="OTP"
autoCapitalize="none" />
<Button
margin={30}
width={80}
header={this.state.buttontext}
onClick={this.verifyOTP}
animating={this.state.animating}
/>
<Text style={[
Styles.setTextDesign('red'),
Styles.setMargin(0, 0, 0, 15)]}>
{this.state.errortext}
</Text>
</KeyboardAvoidingView>
</View>
);
}
}
|
H5.merge(new System.Globalization.CultureInfo("fr-TD", true), {
englishName: "French (Chad)",
nativeName: "français (Tchad)",
numberFormat: H5.merge(new System.Globalization.NumberFormatInfo(), {
nanSymbol: "NaN",
negativeSign: "-",
positiveSign: "+",
negativeInfinitySymbol: "-∞",
positiveInfinitySymbol: "∞",
percentSymbol: "%",
percentGroupSizes: [3],
percentDecimalDigits: 2,
percentDecimalSeparator: ",",
percentGroupSeparator: " ",
percentPositivePattern: 0,
percentNegativePattern: 0,
currencySymbol: "FCFA",
currencyGroupSizes: [3],
currencyDecimalDigits: 0,
currencyDecimalSeparator: ",",
currencyGroupSeparator: " ",
currencyNegativePattern: 8,
currencyPositivePattern: 3,
numberGroupSizes: [3],
numberDecimalDigits: 2,
numberDecimalSeparator: ",",
numberGroupSeparator: " ",
numberNegativePattern: 1
}),
dateTimeFormat: H5.merge(new System.Globalization.DateTimeFormatInfo(), {
abbreviatedDayNames: ["dim.","lun.","mar.","mer.","jeu.","ven.","sam."],
abbreviatedMonthGenitiveNames: ["janv.","févr.","mars","avr.","mai","juin","juil.","août","sept.","oct.","nov.","déc.",""],
abbreviatedMonthNames: ["janv.","févr.","mars","avr.","mai","juin","juil.","août","sept.","oct.","nov.","déc.",""],
amDesignator: "AM",
dateSeparator: "/",
dayNames: ["dimanche","lundi","mardi","mercredi","jeudi","vendredi","samedi"],
firstDayOfWeek: 1,
fullDateTimePattern: "dddd d MMMM yyyy h:mm:ss tt",
longDatePattern: "dddd d MMMM yyyy",
longTimePattern: "h:mm:ss tt",
monthDayPattern: "d MMMM",
monthGenitiveNames: ["janvier","février","mars","avril","mai","juin","juillet","août","septembre","octobre","novembre","décembre",""],
monthNames: ["janvier","février","mars","avril","mai","juin","juillet","août","septembre","octobre","novembre","décembre",""],
pmDesignator: "PM",
rfc1123: "ddd, dd MMM yyyy HH':'mm':'ss 'GMT'",
shortDatePattern: "dd/MM/yyyy",
shortestDayNames: ["di","lu","ma","me","je","ve","sa"],
shortTimePattern: "h:mm tt",
sortableDateTimePattern: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
sortableDateTimePattern1: "yyyy'-'MM'-'dd",
timeSeparator: ":",
universalSortableDateTimePattern: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
yearMonthPattern: "MMMM yyyy",
roundtripFormat: "yyyy'-'MM'-'dd'T'HH':'mm':'ss.fffffffzzz"
}),
TextInfo: H5.merge(new System.Globalization.TextInfo(), {
ANSICodePage: 1252,
CultureName: "fr-TD",
EBCDICCodePage: 20297,
IsRightToLeft: false,
LCID: 4096,
listSeparator: ";",
MacCodePage: 10000,
OEMCodePage: 850,
IsReadOnly: true
})
});
|
/*
* Backpack - Skyscanner's Design System
*
* Copyright 2018 Skyscanner Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// If we encounter performance issues with the calendar we should switch these obj-C properties to C functions which the compiler can inline
@interface BPKCalendarCellSpacing : NSObject
/**
* The height of the circle surrounding the date label.
*/
@property(nonatomic, class, readonly) CGFloat cellCircleHeight;
/**
* The vertical spacing between the cell content and the bottom edge of the cell.
*/
@property(nonatomic, class, readonly) CGFloat cellBottomSpacing;
/**
* The height of the date label.
*/
@property(nonatomic, class, readonly) CGFloat cellTitleHeight;
/**
* The total heigh of a normal cell.
*/
@property(nonatomic, class, readonly) CGFloat defaultCellHeight;
@end
NS_ASSUME_NONNULL_END
|
// --------------------------------------
// Variables, strings, numbers, floats
// --------------------------------------
// Exercise 1 - Console and constiables
const firstName = "Anders";
const lastName = "Latif";
// EXERCISE
// show in the console
// My first name is Anders and my last name is Latif
console.log("My first name is", firstName, "and my last name is", lastName);
// --------------------------------------
// Exercise 2 - Numbers and Strings
const year = "2020121";
const number = 1;
// Add the year plus the number
// The result should be 2021
// You cannot touch line 1 or 2
// Parser så godt som muligt
console.log(parseInt(year) + number);
// Vil give fejl hvis det ikke er et number.
console.log(Number(year) + number);
// Plus foran konverterer String til number
console.log(+year + number);
// --------------------------------------
|
import numpy as np
a=np.array([[1,2,3],[4,5,6]])
#print(a.shape)
a.shape=(3,2)
print(a)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from web import my_form
from zen import router
def index(_write_tmpl):
url = router.to_path(my_form)
_write_tmpl('templates/home.html', {'form_url': url})
|
"""Basic canvas for animations."""
from __future__ import annotations
__all__ = ["Scene"]
import copy
import datetime
import inspect
import platform
import random
import threading
import time
import types
from queue import Queue
import srt
from manim.scene.section import DefaultSectionType
try:
import dearpygui.dearpygui as dpg
dearpygui_imported = True
except ImportError:
dearpygui_imported = False
import numpy as np
from tqdm import tqdm
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from .. import config, logger
from ..animation.animation import Animation, Wait, prepare_animation
from ..camera.camera import Camera
from ..constants import *
from ..gui.gui import configure_pygui
from ..mobject.opengl_mobject import OpenGLPoint
from ..renderer.cairo_renderer import CairoRenderer
from ..renderer.opengl_renderer import OpenGLRenderer
from ..renderer.shader import Object3D
from ..utils import opengl, space_ops
from ..utils.exceptions import EndSceneEarlyException, RerunSceneException
from ..utils.family import extract_mobject_family_members
from ..utils.family_ops import restructure_list_to_exclude_certain_family_members
from ..utils.file_ops import open_media_file
from ..utils.iterables import list_difference_update, list_update
class RerunSceneHandler(FileSystemEventHandler):
"""A class to handle rerunning a Scene after the input file is modified."""
def __init__(self, queue):
super().__init__()
self.queue = queue
def on_modified(self, event):
self.queue.put(("rerun_file", [], {}))
class Scene:
"""A Scene is the canvas of your animation.
The primary role of :class:`Scene` is to provide the user with tools to manage
mobjects and animations. Generally speaking, a manim script consists of a class
that derives from :class:`Scene` whose :meth:`Scene.construct` method is overridden
by the user's code.
Mobjects are displayed on screen by calling :meth:`Scene.add` and removed from
screen by calling :meth:`Scene.remove`. All mobjects currently on screen are kept
in :attr:`Scene.mobjects`. Animations are played by calling :meth:`Scene.play`.
A :class:`Scene` is rendered internally by calling :meth:`Scene.render`. This in
turn calls :meth:`Scene.setup`, :meth:`Scene.construct`, and
:meth:`Scene.tear_down`, in that order.
It is not recommended to override the ``__init__`` method in user Scenes. For code
that should be ran before a Scene is rendered, use :meth:`Scene.setup` instead.
Examples
--------
Override the :meth:`Scene.construct` method with your code.
.. code-block:: python
class MyScene(Scene):
def construct(self):
self.play(Write(Text("Hello World!")))
"""
def __init__(
self,
renderer=None,
camera_class=Camera,
always_update_mobjects=False,
random_seed=None,
skip_animations=False,
):
self.camera_class = camera_class
self.always_update_mobjects = always_update_mobjects
self.random_seed = random_seed
self.skip_animations = skip_animations
self.animations = None
self.stop_condition = None
self.moving_mobjects = []
self.static_mobjects = []
self.time_progression = None
self.duration = None
self.last_t = None
self.queue = Queue()
self.skip_animation_preview = False
self.meshes = []
self.camera_target = ORIGIN
self.widgets = []
self.dearpygui_imported = dearpygui_imported
self.updaters = []
self.point_lights = []
self.ambient_light = None
self.key_to_function_map = {}
self.mouse_press_callbacks = []
self.interactive_mode = False
if config.renderer == "opengl":
# Items associated with interaction
self.mouse_point = OpenGLPoint()
self.mouse_drag_point = OpenGLPoint()
if renderer is None:
renderer = OpenGLRenderer()
if renderer is None:
self.renderer = CairoRenderer(
camera_class=self.camera_class,
skip_animations=self.skip_animations,
)
else:
self.renderer = renderer
self.renderer.init_scene(self)
self.mobjects = []
# TODO, remove need for foreground mobjects
self.foreground_mobjects = []
if self.random_seed is not None:
random.seed(self.random_seed)
np.random.seed(self.random_seed)
@property
def camera(self):
return self.renderer.camera
def __deepcopy__(self, clone_from_id):
cls = self.__class__
result = cls.__new__(cls)
clone_from_id[id(self)] = result
for k, v in self.__dict__.items():
if k in ["renderer", "time_progression"]:
continue
if k == "camera_class":
setattr(result, k, v)
setattr(result, k, copy.deepcopy(v, clone_from_id))
result.mobject_updater_lists = []
# Update updaters
for mobject in self.mobjects:
cloned_updaters = []
for updater in mobject.updaters:
# Make the cloned updater use the cloned Mobjects as free variables
# rather than the original ones. Analyzing function bytecode with the
# dis module will help in understanding this.
# https://docs.python.org/3/library/dis.html
# TODO: Do the same for function calls recursively.
free_variable_map = inspect.getclosurevars(updater).nonlocals
cloned_co_freevars = []
cloned_closure = []
for free_variable_name in updater.__code__.co_freevars:
free_variable_value = free_variable_map[free_variable_name]
# If the referenced variable has not been cloned, raise.
if id(free_variable_value) not in clone_from_id:
raise Exception(
f"{free_variable_name} is referenced from an updater "
"but is not an attribute of the Scene, which isn't "
"allowed.",
)
# Add the cloned object's name to the free variable list.
cloned_co_freevars.append(free_variable_name)
# Add a cell containing the cloned object's reference to the
# closure list.
cloned_closure.append(
types.CellType(clone_from_id[id(free_variable_value)]),
)
cloned_updater = types.FunctionType(
updater.__code__.replace(co_freevars=tuple(cloned_co_freevars)),
updater.__globals__,
updater.__name__,
updater.__defaults__,
tuple(cloned_closure),
)
cloned_updaters.append(cloned_updater)
mobject_clone = clone_from_id[id(mobject)]
mobject_clone.updaters = cloned_updaters
if len(cloned_updaters) > 0:
result.mobject_updater_lists.append((mobject_clone, cloned_updaters))
return result
def render(self, preview=False):
"""
Renders this Scene.
Parameters
---------
preview : bool
If true, opens scene in a file viewer.
"""
self.setup()
try:
self.construct()
except EndSceneEarlyException:
pass
except RerunSceneException as e:
self.remove(*self.mobjects)
self.renderer.clear_screen()
self.renderer.num_plays = 0
return True
self.tear_down()
# We have to reset these settings in case of multiple renders.
self.renderer.scene_finished(self)
# Show info only if animations are rendered or to get image
if (
self.renderer.num_plays
or config["format"] == "png"
or config["save_last_frame"]
):
logger.info(
f"Rendered {str(self)}\nPlayed {self.renderer.num_plays} animations",
)
# If preview open up the render after rendering.
if preview:
config["preview"] = True
if config["preview"] or config["show_in_file_browser"]:
open_media_file(self.renderer.file_writer)
def setup(self):
"""
This is meant to be implemented by any scenes which
are commonly subclassed, and have some common setup
involved before the construct method is called.
"""
pass
def tear_down(self):
"""
This is meant to be implemented by any scenes which
are commonly subclassed, and have some common method
to be invoked before the scene ends.
"""
pass
def construct(self):
"""Add content to the Scene.
From within :meth:`Scene.construct`, display mobjects on screen by calling
:meth:`Scene.add` and remove them from screen by calling :meth:`Scene.remove`.
All mobjects currently on screen are kept in :attr:`Scene.mobjects`. Play
animations by calling :meth:`Scene.play`.
Notes
-----
Initialization code should go in :meth:`Scene.setup`. Termination code should
go in :meth:`Scene.tear_down`.
Examples
--------
A typical manim script includes a class derived from :class:`Scene` with an
overridden :meth:`Scene.contruct` method:
.. code-block:: python
class MyScene(Scene):
def construct(self):
self.play(Write(Text("Hello World!")))
See Also
--------
:meth:`Scene.setup`
:meth:`Scene.render`
:meth:`Scene.tear_down`
"""
pass # To be implemented in subclasses
def next_section(
self,
name: str = "unnamed",
type: str = DefaultSectionType.NORMAL,
skip_animations: bool = False,
) -> None:
"""Create separation here; the last section gets finished and a new one gets created.
``skip_animations`` skips the rendering of all animations in this section.
Refer to :doc:`the documentation</tutorials/a_deeper_look>` on how to use sections.
"""
self.renderer.file_writer.next_section(name, type, skip_animations)
def __str__(self):
return self.__class__.__name__
def get_attrs(self, *keys):
"""
Gets attributes of a scene given the attribute's identifier/name.
Parameters
----------
*keys : str
Name(s) of the argument(s) to return the attribute of.
Returns
-------
list
List of attributes of the passed identifiers.
"""
return [getattr(self, key) for key in keys]
def update_mobjects(self, dt):
"""
Begins updating all mobjects in the Scene.
Parameters
----------
dt: int or float
Change in time between updates. Defaults (mostly) to 1/frames_per_second
"""
for mobject in self.mobjects:
mobject.update(dt)
def update_meshes(self, dt):
for obj in self.meshes:
for mesh in obj.get_family():
mesh.update(dt)
def update_self(self, dt):
for func in self.updaters:
func(dt)
def should_update_mobjects(self):
"""
Returns True if any mobject in Scene is being updated
or if the scene has always_update_mobjects set to true.
Returns
-------
bool
"""
return self.always_update_mobjects or any(
[mob.has_time_based_updater() for mob in self.get_mobject_family_members()],
)
def get_top_level_mobjects(self):
"""
Returns all mobjects which are not submobjects.
Returns
-------
list
List of top level mobjects.
"""
# Return only those which are not in the family
# of another mobject from the scene
families = [m.get_family() for m in self.mobjects]
def is_top_level(mobject):
num_families = sum((mobject in family) for family in families)
return num_families == 1
return list(filter(is_top_level, self.mobjects))
def get_mobject_family_members(self):
"""
Returns list of family-members of all mobjects in scene.
If a Circle() and a VGroup(Rectangle(),Triangle()) were added,
it returns not only the Circle(), Rectangle() and Triangle(), but
also the VGroup() object.
Returns
-------
list
List of mobject family members.
"""
if config.renderer == "opengl":
family_members = []
for mob in self.mobjects:
family_members.extend(mob.get_family())
return family_members
else:
return extract_mobject_family_members(
self.mobjects,
use_z_index=self.renderer.camera.use_z_index,
)
def add(self, *mobjects):
"""
Mobjects will be displayed, from background to
foreground in the order with which they are added.
Parameters
---------
*mobjects : Mobject
Mobjects to add.
Returns
-------
Scene
The same scene after adding the Mobjects in.
"""
if config.renderer == "opengl":
new_mobjects = []
new_meshes = []
for mobject_or_mesh in mobjects:
if isinstance(mobject_or_mesh, Object3D):
new_meshes.append(mobject_or_mesh)
else:
new_mobjects.append(mobject_or_mesh)
self.remove(*new_mobjects)
self.mobjects += new_mobjects
self.remove(*new_meshes)
self.meshes += new_meshes
else:
mobjects = [*mobjects, *self.foreground_mobjects]
self.restructure_mobjects(to_remove=mobjects)
self.mobjects += mobjects
if self.moving_mobjects is not None:
self.restructure_mobjects(
to_remove=mobjects,
mobject_list_name="moving_mobjects",
)
self.moving_mobjects += mobjects
return self
def add_mobjects_from_animations(self, animations):
curr_mobjects = self.get_mobject_family_members()
for animation in animations:
if animation.is_introducer():
continue
# Anything animated that's not already in the
# scene gets added to the scene
mob = animation.mobject
if mob is not None and mob not in curr_mobjects:
self.add(mob)
curr_mobjects += mob.get_family()
def remove(self, *mobjects):
"""
Removes mobjects in the passed list of mobjects
from the scene and the foreground, by removing them
from "mobjects" and "foreground_mobjects"
Parameters
----------
*mobjects : Mobject
The mobjects to remove.
"""
if config.renderer == "opengl":
mobjects_to_remove = []
meshes_to_remove = set()
for mobject_or_mesh in mobjects:
if isinstance(mobject_or_mesh, Object3D):
meshes_to_remove.add(mobject_or_mesh)
else:
mobjects_to_remove.append(mobject_or_mesh)
self.mobjects = restructure_list_to_exclude_certain_family_members(
self.mobjects,
mobjects_to_remove,
)
self.meshes = list(
filter(lambda mesh: mesh not in set(meshes_to_remove), self.meshes),
)
return self
else:
for list_name in "mobjects", "foreground_mobjects":
self.restructure_mobjects(mobjects, list_name, False)
return self
def add_updater(self, func):
self.updaters.append(func)
def remove_updater(self, func):
self.updaters = [f for f in self.updaters if f is not func]
def restructure_mobjects(
self,
to_remove,
mobject_list_name="mobjects",
extract_families=True,
):
"""
tl:wr
If your scene has a Group(), and you removed a mobject from the Group,
this dissolves the group and puts the rest of the mobjects directly
in self.mobjects or self.foreground_mobjects.
In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one
of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects
will be edited to contain other submobjects, but not m1, e.g. it will now
insert m2 and m3 to where the group once was.
Parameters
----------
to_remove : Mobject
The Mobject to remove.
mobject_list_name : str, optional
The list of mobjects ("mobjects", "foreground_mobjects" etc) to remove from.
extract_families : bool, optional
Whether the mobject's families should be recursively extracted.
Returns
-------
Scene
The Scene mobject with restructured Mobjects.
"""
if extract_families:
to_remove = extract_mobject_family_members(
to_remove,
use_z_index=self.renderer.camera.use_z_index,
)
_list = getattr(self, mobject_list_name)
new_list = self.get_restructured_mobject_list(_list, to_remove)
setattr(self, mobject_list_name, new_list)
return self
def get_restructured_mobject_list(self, mobjects, to_remove):
"""
Given a list of mobjects and a list of mobjects to be removed, this
filters out the removable mobjects from the list of mobjects.
Parameters
----------
mobjects : list
The Mobjects to check.
to_remove : list
The list of mobjects to remove.
Returns
-------
list
The list of mobjects with the mobjects to remove removed.
"""
new_mobjects = []
def add_safe_mobjects_from_list(list_to_examine, set_to_remove):
for mob in list_to_examine:
if mob in set_to_remove:
continue
intersect = set_to_remove.intersection(mob.get_family())
if intersect:
add_safe_mobjects_from_list(mob.submobjects, intersect)
else:
new_mobjects.append(mob)
add_safe_mobjects_from_list(mobjects, set(to_remove))
return new_mobjects
# TODO, remove this, and calls to this
def add_foreground_mobjects(self, *mobjects):
"""
Adds mobjects to the foreground, and internally to the list
foreground_mobjects, and mobjects.
Parameters
----------
*mobjects : Mobject
The Mobjects to add to the foreground.
Returns
------
Scene
The Scene, with the foreground mobjects added.
"""
self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)
self.add(*mobjects)
return self
def add_foreground_mobject(self, mobject):
"""
Adds a single mobject to the foreground, and internally to the list
foreground_mobjects, and mobjects.
Parameters
----------
mobject : Mobject
The Mobject to add to the foreground.
Returns
------
Scene
The Scene, with the foreground mobject added.
"""
return self.add_foreground_mobjects(mobject)
def remove_foreground_mobjects(self, *to_remove):
"""
Removes mobjects from the foreground, and internally from the list
foreground_mobjects.
Parameters
----------
*to_remove : Mobject
The mobject(s) to remove from the foreground.
Returns
------
Scene
The Scene, with the foreground mobjects removed.
"""
self.restructure_mobjects(to_remove, "foreground_mobjects")
return self
def remove_foreground_mobject(self, mobject):
"""
Removes a single mobject from the foreground, and internally from the list
foreground_mobjects.
Parameters
----------
mobject : Mobject
The mobject to remove from the foreground.
Returns
------
Scene
The Scene, with the foreground mobject removed.
"""
return self.remove_foreground_mobjects(mobject)
def bring_to_front(self, *mobjects):
"""
Adds the passed mobjects to the scene again,
pushing them to he front of the scene.
Parameters
----------
*mobjects : Mobject
The mobject(s) to bring to the front of the scene.
Returns
------
Scene
The Scene, with the mobjects brought to the front
of the scene.
"""
self.add(*mobjects)
return self
def bring_to_back(self, *mobjects):
"""
Removes the mobject from the scene and
adds them to the back of the scene.
Parameters
----------
*mobjects : Mobject
The mobject(s) to push to the back of the scene.
Returns
------
Scene
The Scene, with the mobjects pushed to the back
of the scene.
"""
self.remove(*mobjects)
self.mobjects = list(mobjects) + self.mobjects
return self
def clear(self):
"""
Removes all mobjects present in self.mobjects
and self.foreground_mobjects from the scene.
Returns
------
Scene
The Scene, with all of its mobjects in
self.mobjects and self.foreground_mobjects
removed.
"""
self.mobjects = []
self.foreground_mobjects = []
return self
def get_moving_mobjects(self, *animations):
"""
Gets all moving mobjects in the passed animation(s).
Parameters
----------
*animations : Animation
The animations to check for moving mobjects.
Returns
------
list
The list of mobjects that could be moving in
the Animation(s)
"""
# Go through mobjects from start to end, and
# as soon as there's one that needs updating of
# some kind per frame, return the list from that
# point forward.
animation_mobjects = [anim.mobject for anim in animations]
mobjects = self.get_mobject_family_members()
for i, mob in enumerate(mobjects):
update_possibilities = [
mob in animation_mobjects,
len(mob.get_family_updaters()) > 0,
mob in self.foreground_mobjects,
]
if any(update_possibilities):
return mobjects[i:]
return []
def get_moving_and_static_mobjects(self, animations):
all_mobjects = list_update(self.mobjects, self.foreground_mobjects)
all_mobject_families = extract_mobject_family_members(
all_mobjects,
use_z_index=self.renderer.camera.use_z_index,
only_those_with_points=True,
)
moving_mobjects = self.get_moving_mobjects(*animations)
all_moving_mobject_families = extract_mobject_family_members(
moving_mobjects,
use_z_index=self.renderer.camera.use_z_index,
)
static_mobjects = list_difference_update(
all_mobject_families,
all_moving_mobject_families,
)
return all_moving_mobject_families, static_mobjects
def compile_animations(self, *args, **kwargs):
"""
Creates _MethodAnimations from any _AnimationBuilders and updates animation
kwargs with kwargs passed to play().
Parameters
----------
*args : Tuple[:class:`Animation`]
Animations to be played.
**kwargs
Configuration for the call to play().
Returns
-------
Tuple[:class:`Animation`]
Animations to be played.
"""
animations = []
for arg in args:
try:
animations.append(prepare_animation(arg))
except TypeError:
if inspect.ismethod(arg):
raise TypeError(
"Passing Mobject methods to Scene.play is no longer"
" supported. Use Mobject.animate instead.",
)
else:
raise TypeError(
f"Unexpected argument {arg} passed to Scene.play().",
)
for animation in animations:
for k, v in kwargs.items():
setattr(animation, k, v)
return animations
def _get_animation_time_progression(self, animations, duration):
"""
You will hardly use this when making your own animations.
This method is for Manim's internal use.
Uses :func:`~.get_time_progression` to obtain a
CommandLine ProgressBar whose ``fill_time`` is
dependent on the qualities of the passed Animation,
Parameters
----------
animations : List[:class:`~.Animation`, ...]
The list of animations to get
the time progression for.
duration : int or float
duration of wait time
Returns
-------
time_progression
The CommandLine Progress Bar.
"""
if len(animations) == 1 and isinstance(animations[0], Wait):
stop_condition = animations[0].stop_condition
if stop_condition is not None:
time_progression = self.get_time_progression(
duration,
f"Waiting for {stop_condition.__name__}",
n_iterations=-1, # So it doesn't show % progress
override_skip_animations=True,
)
else:
time_progression = self.get_time_progression(
duration,
f"Waiting {self.renderer.num_plays}",
)
else:
time_progression = self.get_time_progression(
duration,
"".join(
[
f"Animation {self.renderer.num_plays}: ",
str(animations[0]),
(", etc." if len(animations) > 1 else ""),
],
),
)
return time_progression
def get_time_progression(
self,
run_time,
description,
n_iterations=None,
override_skip_animations=False,
):
"""
You will hardly use this when making your own animations.
This method is for Manim's internal use.
Returns a CommandLine ProgressBar whose ``fill_time``
is dependent on the ``run_time`` of an animation,
the iterations to perform in that animation
and a bool saying whether or not to consider
the skipped animations.
Parameters
----------
run_time : float
The ``run_time`` of the animation.
n_iterations : int, optional
The number of iterations in the animation.
override_skip_animations : bool, optional
Whether or not to show skipped animations in the progress bar.
Returns
-------
time_progression
The CommandLine Progress Bar.
"""
if self.renderer.skip_animations and not override_skip_animations:
times = [run_time]
else:
step = 1 / config["frame_rate"]
times = np.arange(0, run_time, step)
time_progression = tqdm(
times,
desc=description,
total=n_iterations,
leave=config["progress_bar"] == "leave",
ascii=True if platform.system() == "Windows" else None,
disable=config["progress_bar"] == "none",
)
return time_progression
def get_run_time(self, animations):
"""
Gets the total run time for a list of animations.
Parameters
----------
animations : List[:class:`Animation`, ...]
A list of the animations whose total
``run_time`` is to be calculated.
Returns
-------
float
The total ``run_time`` of all of the animations in the list.
"""
if len(animations) == 1 and isinstance(animations[0], Wait):
if animations[0].stop_condition is not None:
return 0
else:
return animations[0].duration
else:
return np.max([animation.run_time for animation in animations])
def play(
self,
*args,
subcaption=None,
subcaption_duration=None,
subcaption_offset=0,
**kwargs,
):
r"""Plays an animation in this scene.
Parameters
----------
args
Animations to be played.
subcaption
The content of the external subcaption that should
be added during the animation.
subcaption_duration
The duration for which the specified subcaption is
added. If ``None`` (the default), the run time of the
animation is taken.
subcaption_offset
An offset (in seconds) for the start time of the
added subcaption.
kwargs
All other keywords are passed to the renderer.
"""
start_time = self.renderer.time
self.renderer.play(self, *args, **kwargs)
run_time = self.renderer.time - start_time
if subcaption:
if subcaption_duration is None:
subcaption_duration = run_time
# The start of the subcaption needs to be offset by the
# run_time of the animation because it is added after
# the animation has already been played (and Scene.renderer.time
# has already been updated).
self.add_subcaption(
content=subcaption,
duration=subcaption_duration,
offset=-run_time + subcaption_offset,
)
def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):
self.play(Wait(run_time=duration, stop_condition=stop_condition))
def wait_until(self, stop_condition, max_time=60):
"""
Like a wrapper for wait().
You pass a function that determines whether to continue waiting,
and a max wait time if that is never fulfilled.
Parameters
----------
stop_condition : function
The function whose boolean return value determines whether to continue waiting
max_time : int or float, optional
The maximum wait time in seconds, if the stop_condition is never fulfilled.
"""
self.wait(max_time, stop_condition=stop_condition)
def compile_animation_data(self, *animations: Animation, **play_kwargs):
"""Given a list of animations, compile the corresponding
static and moving mobjects, and gather the animation durations.
This also begins the animations.
Parameters
----------
skip_rendering : bool, optional
Whether the rendering should be skipped, by default False
Returns
-------
self, None
None if there is nothing to play, or self otherwise.
"""
# NOTE TODO : returns statement of this method are wrong. It should return nothing, as it makes a little sense to get any information from this method.
# The return are kept to keep webgl renderer from breaking.
if len(animations) == 0:
raise ValueError("Called Scene.play with no animations")
self.animations = self.compile_animations(*animations, **play_kwargs)
self.add_mobjects_from_animations(self.animations)
self.last_t = 0
self.stop_condition = None
self.moving_mobjects = []
self.static_mobjects = []
if config.renderer != "opengl":
if len(self.animations) == 1 and isinstance(self.animations[0], Wait):
self.update_mobjects(dt=0) # Any problems with this?
if self.should_update_mobjects():
self.stop_condition = self.animations[0].stop_condition
else:
self.duration = self.animations[0].duration
# Static image logic when the wait is static is done by the renderer, not here.
self.animations[0].is_static_wait = True
return None
else:
# Paint all non-moving objects onto the screen, so they don't
# have to be rendered every frame
(
self.moving_mobjects,
self.static_mobjects,
) = self.get_moving_and_static_mobjects(self.animations)
self.duration = self.get_run_time(self.animations)
return self
def begin_animations(self) -> None:
"""Start the animations of the scene."""
for animation in self.animations:
animation._setup_scene(self)
animation.begin()
def is_current_animation_frozen_frame(self) -> bool:
"""Returns whether the current animation produces a static frame (generally a Wait)."""
return (
isinstance(self.animations[0], Wait)
and len(self.animations) == 1
and self.animations[0].is_static_wait
)
def play_internal(self, skip_rendering=False):
"""
This method is used to prep the animations for rendering,
apply the arguments and parameters required to them,
render them, and write them to the video file.
Parameters
----------
args
Animation or mobject with mobject method and params
kwargs
named parameters affecting what was passed in ``args``,
e.g. ``run_time``, ``lag_ratio`` and so on.
"""
self.duration = self.get_run_time(self.animations)
self.time_progression = self._get_animation_time_progression(
self.animations,
self.duration,
)
for t in self.time_progression:
self.update_to_time(t)
if not skip_rendering and not self.skip_animation_preview:
self.renderer.render(self, t, self.moving_mobjects)
if self.stop_condition is not None and self.stop_condition():
self.time_progression.close()
break
for animation in self.animations:
animation.finish()
animation.clean_up_from_scene(self)
if not self.renderer.skip_animations:
self.update_mobjects(0)
self.renderer.static_image = None
# Closing the progress bar at the end of the play.
self.time_progression.close()
def check_interactive_embed_is_valid(self):
if config["force_window"]:
return True
if self.skip_animation_preview:
logger.warning(
"Disabling interactive embed as 'skip_animation_preview' is enabled",
)
return False
elif config["write_to_movie"]:
logger.warning("Disabling interactive embed as 'write_to_movie' is enabled")
return False
elif config["format"]:
logger.warning(
"Disabling interactive embed as '--format' is set as "
+ config["format"],
)
return False
elif not self.renderer.window:
logger.warning("Disabling interactive embed as no window was created")
return False
elif config.dry_run:
logger.warning("Disabling interactive embed as dry_run is enabled")
return False
return True
def interactive_embed(self):
"""
Like embed(), but allows for screen interaction.
"""
if not self.check_interactive_embed_is_valid():
return
self.interactive_mode = True
def ipython(shell, namespace):
import manim
import manim.opengl
def load_module_into_namespace(module, namespace):
for name in dir(module):
namespace[name] = getattr(module, name)
load_module_into_namespace(manim, namespace)
load_module_into_namespace(manim.opengl, namespace)
def embedded_rerun(*args, **kwargs):
self.queue.put(("rerun_keyboard", args, kwargs))
shell.exiter()
namespace["rerun"] = embedded_rerun
shell(local_ns=namespace)
self.queue.put(("exit_keyboard", [], {}))
def get_embedded_method(method_name):
return lambda *args, **kwargs: self.queue.put((method_name, args, kwargs))
local_namespace = inspect.currentframe().f_back.f_locals
for method in ("play", "wait", "add", "remove"):
embedded_method = get_embedded_method(method)
# Allow for calling scene methods without prepending 'self.'.
local_namespace[method] = embedded_method
from IPython.terminal.embed import InteractiveShellEmbed
from traitlets.config import Config
cfg = Config()
cfg.TerminalInteractiveShell.confirm_exit = False
shell = InteractiveShellEmbed(config=cfg)
keyboard_thread = threading.Thread(
target=ipython,
args=(shell, local_namespace),
)
# run as daemon to kill thread when main thread exits
if not shell.pt_app:
keyboard_thread.daemon = True
keyboard_thread.start()
if self.dearpygui_imported and config["enable_gui"]:
if not dpg.is_dearpygui_running():
gui_thread = threading.Thread(
target=configure_pygui,
args=(self.renderer, self.widgets),
kwargs={"update": False},
)
gui_thread.start()
else:
configure_pygui(self.renderer, self.widgets, update=True)
self.camera.model_matrix = self.camera.default_model_matrix
self.interact(shell, keyboard_thread)
def interact(self, shell, keyboard_thread):
event_handler = RerunSceneHandler(self.queue)
file_observer = Observer()
file_observer.schedule(event_handler, config["input_file"], recursive=True)
file_observer.start()
self.quit_interaction = False
keyboard_thread_needs_join = shell.pt_app is not None
assert self.queue.qsize() == 0
last_time = time.time()
while not (self.renderer.window.is_closing or self.quit_interaction):
if not self.queue.empty():
tup = self.queue.get_nowait()
if tup[0].startswith("rerun"):
# Intentionally skip calling join() on the file thread to save time.
if not tup[0].endswith("keyboard"):
if shell.pt_app:
shell.pt_app.app.exit(exception=EOFError)
file_observer.unschedule_all()
raise RerunSceneException
keyboard_thread.join()
kwargs = tup[2]
if "from_animation_number" in kwargs:
config["from_animation_number"] = kwargs[
"from_animation_number"
]
# # TODO: This option only makes sense if interactive_embed() is run at the
# # end of a scene by default.
# if "upto_animation_number" in kwargs:
# config["upto_animation_number"] = kwargs[
# "upto_animation_number"
# ]
keyboard_thread.join()
file_observer.unschedule_all()
raise RerunSceneException
elif tup[0].startswith("exit"):
# Intentionally skip calling join() on the file thread to save time.
if not tup[0].endswith("keyboard") and shell.pt_app:
shell.pt_app.app.exit(exception=EOFError)
keyboard_thread.join()
# Remove exit_keyboard from the queue if necessary.
while self.queue.qsize() > 0:
self.queue.get()
keyboard_thread_needs_join = False
break
else:
method, args, kwargs = tup
getattr(self, method)(*args, **kwargs)
else:
self.renderer.animation_start_time = 0
dt = time.time() - last_time
last_time = time.time()
self.renderer.render(self, dt, self.moving_mobjects)
self.update_mobjects(dt)
self.update_meshes(dt)
self.update_self(dt)
# Join the keyboard thread if necessary.
if shell is not None and keyboard_thread_needs_join:
shell.pt_app.app.exit(exception=EOFError)
keyboard_thread.join()
# Remove exit_keyboard from the queue if necessary.
while self.queue.qsize() > 0:
self.queue.get()
file_observer.stop()
file_observer.join()
if self.dearpygui_imported and config["enable_gui"]:
dpg.stop_dearpygui()
if self.renderer.window.is_closing:
self.renderer.window.destroy()
def embed(self):
if not config["preview"]:
logger.warning("Called embed() while no preview window is available.")
return
if config["write_to_movie"]:
logger.warning("embed() is skipped while writing to a file.")
return
self.renderer.animation_start_time = 0
self.renderer.render(self, -1, self.moving_mobjects)
# Configure IPython shell.
from IPython.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed()
# Have the frame update after each command
shell.events.register(
"post_run_cell",
lambda *a, **kw: self.renderer.render(self, -1, self.moving_mobjects),
)
# Use the locals of the caller as the local namespace
# once embedded, and add a few custom shortcuts.
local_ns = inspect.currentframe().f_back.f_locals
# local_ns["touch"] = self.interact
for method in (
"play",
"wait",
"add",
"remove",
"interact",
# "clear",
# "save_state",
# "restore",
):
local_ns[method] = getattr(self, method)
shell(local_ns=local_ns, stack_depth=2)
# End scene when exiting an embed.
raise Exception("Exiting scene.")
def update_to_time(self, t):
dt = t - self.last_t
self.last_t = t
for animation in self.animations:
animation.update_mobjects(dt)
alpha = t / animation.run_time
animation.interpolate(alpha)
self.update_mobjects(dt)
self.update_meshes(dt)
self.update_self(dt)
def add_subcaption(
self, content: str, duration: float = 1, offset: float = 0
) -> None:
r"""Adds an entry in the corresponding subcaption file
at the current time stamp.
The current time stamp is obtained from ``Scene.renderer.time``.
Parameters
----------
content
The subcaption content.
duration
The duration (in seconds) for which the subcaption is shown.
offset
This offset (in seconds) is added to the starting time stamp
of the subcaption.
Examples
--------
This example illustrates both possibilities for adding
subcaptions to Manimations::
class SubcaptionExample(Scene):
def construct(self):
square = Square()
circle = Circle()
# first option: via the add_subcaption method
self.add_subcaption("Hello square!", duration=1)
self.play(Create(square))
# second option: within the call to Scene.play
self.play(
Transform(square, circle),
subcaption="The square transforms."
)
"""
subtitle = srt.Subtitle(
index=len(self.renderer.file_writer.subcaptions),
content=content,
start=datetime.timedelta(seconds=self.renderer.time + offset),
end=datetime.timedelta(seconds=self.renderer.time + offset + duration),
)
self.renderer.file_writer.subcaptions.append(subtitle)
def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):
"""
This method is used to add a sound to the animation.
Parameters
----------
sound_file : str
The path to the sound file.
time_offset : int,float, optional
The offset in the sound file after which
the sound can be played.
gain : float
Amplification of the sound.
Examples
--------
.. manim:: SoundExample
class SoundExample(Scene):
# Source of sound under Creative Commons 0 License. https://freesound.org/people/Druminfected/sounds/250551/
def construct(self):
dot = Dot().set_color(GREEN)
self.add_sound("click.wav")
self.add(dot)
self.wait()
self.add_sound("click.wav")
dot.set_color(BLUE)
self.wait()
self.add_sound("click.wav")
dot.set_color(RED)
self.wait()
Download the resource for the previous example `here <https://github.com/ManimCommunity/manim/blob/main/docs/source/_static/click.wav>`_ .
"""
if self.renderer.skip_animations:
return
time = self.renderer.time + time_offset
self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)
def on_mouse_motion(self, point, d_point):
self.mouse_point.move_to(point)
if SHIFT_VALUE in self.renderer.pressed_keys:
shift = -d_point
shift[0] *= self.camera.get_width() / 2
shift[1] *= self.camera.get_height() / 2
transform = self.camera.inverse_rotation_matrix
shift = np.dot(np.transpose(transform), shift)
self.camera.shift(shift)
def on_mouse_scroll(self, point, offset):
if not config.use_projection_stroke_shaders:
factor = 1 + np.arctan(-2.1 * offset[1])
self.camera.scale(factor, about_point=self.camera_target)
self.mouse_scroll_orbit_controls(point, offset)
def on_key_press(self, symbol, modifiers):
try:
char = chr(symbol)
except OverflowError:
logger.warning("The value of the pressed key is too large.")
return
if char == "r":
self.camera.to_default_state()
self.camera_target = np.array([0, 0, 0], dtype=np.float32)
elif char == "q":
self.quit_interaction = True
else:
if char in self.key_to_function_map:
self.key_to_function_map[char]()
def on_key_release(self, symbol, modifiers):
pass
def on_mouse_drag(self, point, d_point, buttons, modifiers):
self.mouse_drag_point.move_to(point)
if buttons == 1:
self.camera.increment_theta(-d_point[0])
self.camera.increment_phi(d_point[1])
elif buttons == 4:
camera_x_axis = self.camera.model_matrix[:3, 0]
horizontal_shift_vector = -d_point[0] * camera_x_axis
vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)
total_shift_vector = horizontal_shift_vector + vertical_shift_vector
self.camera.shift(1.1 * total_shift_vector)
self.mouse_drag_orbit_controls(point, d_point, buttons, modifiers)
def mouse_scroll_orbit_controls(self, point, offset):
camera_to_target = self.camera_target - self.camera.get_position()
camera_to_target *= np.sign(offset[1])
shift_vector = 0.01 * camera_to_target
self.camera.model_matrix = (
opengl.translation_matrix(*shift_vector) @ self.camera.model_matrix
)
def mouse_drag_orbit_controls(self, point, d_point, buttons, modifiers):
# Left click drag.
if buttons == 1:
# Translate to target the origin and rotate around the z axis.
self.camera.model_matrix = (
opengl.rotation_matrix(z=-d_point[0])
@ opengl.translation_matrix(*-self.camera_target)
@ self.camera.model_matrix
)
# Rotation off of the z axis.
camera_position = self.camera.get_position()
camera_y_axis = self.camera.model_matrix[:3, 1]
axis_of_rotation = space_ops.normalize(
np.cross(camera_y_axis, camera_position),
)
rotation_matrix = space_ops.rotation_matrix(
d_point[1],
axis_of_rotation,
homogeneous=True,
)
maximum_polar_angle = self.camera.maximum_polar_angle
minimum_polar_angle = self.camera.minimum_polar_angle
potential_camera_model_matrix = rotation_matrix @ self.camera.model_matrix
potential_camera_location = potential_camera_model_matrix[:3, 3]
potential_camera_y_axis = potential_camera_model_matrix[:3, 1]
sign = (
np.sign(potential_camera_y_axis[2])
if potential_camera_y_axis[2] != 0
else 1
)
potential_polar_angle = sign * np.arccos(
potential_camera_location[2]
/ np.linalg.norm(potential_camera_location),
)
if minimum_polar_angle <= potential_polar_angle <= maximum_polar_angle:
self.camera.model_matrix = potential_camera_model_matrix
else:
sign = np.sign(camera_y_axis[2]) if camera_y_axis[2] != 0 else 1
current_polar_angle = sign * np.arccos(
camera_position[2] / np.linalg.norm(camera_position),
)
if potential_polar_angle > maximum_polar_angle:
polar_angle_delta = maximum_polar_angle - current_polar_angle
else:
polar_angle_delta = minimum_polar_angle - current_polar_angle
rotation_matrix = space_ops.rotation_matrix(
polar_angle_delta,
axis_of_rotation,
homogeneous=True,
)
self.camera.model_matrix = rotation_matrix @ self.camera.model_matrix
# Translate to target the original target.
self.camera.model_matrix = (
opengl.translation_matrix(*self.camera_target)
@ self.camera.model_matrix
)
# Right click drag.
elif buttons == 4:
camera_x_axis = self.camera.model_matrix[:3, 0]
horizontal_shift_vector = -d_point[0] * camera_x_axis
vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)
total_shift_vector = horizontal_shift_vector + vertical_shift_vector
self.camera.model_matrix = (
opengl.translation_matrix(*total_shift_vector)
@ self.camera.model_matrix
)
self.camera_target += total_shift_vector
def set_key_function(self, char, func):
self.key_to_function_map[char] = func
def on_mouse_press(self, point, button, modifiers):
for func in self.mouse_press_callbacks:
func()
|
// eslint-disable-next-line
import * as loginService from '@/api/login'
// eslint-disable-next-line
import { BasicLayout, BlankLayout, PageView, RouteView } from '@/layouts'
// 前端路由表
const constantRouterComponents = {
// 基础页面 layout 必须引入
BasicLayout: BasicLayout,
BlankLayout: BlankLayout,
RouteView: RouteView,
PageView: PageView,
'403': () => import(/* webpackChunkName: "error" */ '@/views/exception/403'),
'404': () => import(/* webpackChunkName: "error" */ '@/views/exception/404'),
'500': () => import(/* webpackChunkName: "error" */ '@/views/exception/500'),
// 你需要动态引入的页面组件
// Workplace: () => import('@/views/dashboard/Workplace'),
// Analysis: () => import('@/views/dashboard/Analysis'),
// form
BasicForm: () => import('@/views/form/basicForm'),
StepForm: () => import('@/views/form/stepForm/StepForm'),
AdvanceForm: () => import('@/views/form/advancedForm/AdvancedForm'),
// list
TableList: () => import('@/views/list/TableList'),
StandardList: () => import('@/views/list/BasicList'),
CardList: () => import('@/views/list/CardList'),
SearchLayout: () => import('@/views/list/search/SearchLayout'),
SearchArticles: () => import('@/views/list/search/Article'),
SearchProjects: () => import('@/views/list/search/Projects'),
SearchApplications: () => import('@/views/list/search/Applications'),
ProfileBasic: () => import('@/views/profile/basic'),
ProfileAdvanced: () => import('@/views/profile/advanced/Advanced'),
// result
ResultSuccess: () => import(/* webpackChunkName: "result" */ '@/views/result/Success'),
ResultFail: () => import(/* webpackChunkName: "result" */ '@/views/result/Error'),
// exception
Exception403: () => import(/* webpackChunkName: "fail" */ '@/views/exception/403'),
Exception404: () => import(/* webpackChunkName: "fail" */ '@/views/exception/404'),
Exception500: () => import(/* webpackChunkName: "fail" */ '@/views/exception/500'),
// account
AccountCenter: () => import('@/views/account/center'),
AccountSettings: () => import('@/views/account/settings/Index'),
BasicSetting: () => import('@/views/account/settings/BasicSetting'),
SecuritySettings: () => import('@/views/account/settings/Security'),
CustomSettings: () => import('@/views/account/settings/Custom'),
BindingSettings: () => import('@/views/account/settings/Binding'),
NotificationSettings: () => import('@/views/account/settings/Notification')
// 'TestWork': () => import(/* webpackChunkName: "TestWork" */ '@/views/dashboard/TestWork')
}
// 前端未找到页面路由(固定不用改)
const notFoundRouter = {
path: '*',
redirect: '/404',
hidden: true
}
// 根级菜单
const rootRouter = {
key: '',
name: 'index',
path: '',
component: 'BasicLayout',
redirect: '/dashboard',
meta: {
title: '首页'
},
children: []
}
/**
* 动态生成菜单
* @param token
* @returns {Promise<Router>}
*/
export const generatorDynamicRouter = token => {
return new Promise((resolve, reject) => {
loginService
.getCurrentUserNav(token)
.then(res => {
console.log('generatorDynamicRouter response:', res)
const { result } = res
const menuNav = []
const childrenNav = []
// 后端数据, 根级树数组, 根级 PID
listToTree(result, childrenNav, 0)
rootRouter.children = childrenNav
menuNav.push(rootRouter)
console.log('menuNav', menuNav)
const routers = generator(menuNav)
routers.push(notFoundRouter)
console.log('routers', routers)
resolve(routers)
})
.catch(err => {
reject(err)
})
})
}
/**
* 格式化树形结构数据 生成 vue-router 层级路由表
*
* @param routerMap
* @param parent
* @returns {*}
*/
export const generator = (routerMap, parent) => {
return routerMap.map(item => {
const { title, show, hideChildren, hiddenHeaderContent, target, icon } = item.meta || {}
const currentRouter = {
// 如果路由设置了 path,则作为默认 path,否则 路由地址 动态拼接生成如 /dashboard/workplace
path: item.path || `${(parent && parent.path) || ''}/${item.key}`,
// 路由名称,建议唯一
name: item.name || item.key || '',
// 该路由对应页面的 组件 :方案1
// component: constantRouterComponents[item.component || item.key],
// 该路由对应页面的 组件 :方案2 (动态加载)
component: constantRouterComponents[item.component || item.key] || (() => import(`@/views/${item.component}`)),
// meta: 页面标题, 菜单图标, 页面权限(供指令权限用,可去掉)
meta: {
title: title,
icon: icon || undefined,
hiddenHeaderContent: hiddenHeaderContent,
target: target,
permission: item.name
}
}
// 是否设置了隐藏菜单
if (show === false) {
currentRouter.hidden = true
}
// 是否设置了隐藏子菜单
if (hideChildren) {
currentRouter.hideChildrenInMenu = true
}
// 为了防止出现后端返回结果不规范,处理有可能出现拼接出两个 反斜杠
if (!currentRouter.path.startsWith('http')) {
currentRouter.path = currentRouter.path.replace('//', '/')
}
// 重定向
item.redirect && (currentRouter.redirect = item.redirect)
// 是否有子菜单,并递归处理
if (item.children && item.children.length > 0) {
// Recursion
currentRouter.children = generator(item.children, currentRouter)
}
return currentRouter
})
}
/**
* 数组转树形结构
* @param list 源数组
* @param tree 树
* @param parentId 父ID
*/
const listToTree = (list, tree, parentId) => {
list.forEach(item => {
// 判断是否为父级菜单
if (item.parentId === parentId) {
const child = {
...item,
key: item.key || item.name,
children: []
}
// 迭代 list, 找到当前菜单相符合的所有子菜单
listToTree(list, child.children, item.id)
// 删掉不存在 children 值的属性
if (child.children.length <= 0) {
delete child.children
}
// 加入到树中
tree.push(child)
}
})
}
|
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __AGENT_H_
#define __AGENT_H_
#include <linux/err.h>
#include <rdma/ib_mad.h>
extern int ib_agent_port_open(struct ib_device *device, int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num);
extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
const struct ib_wc *wc, const struct ib_device *device,
int port_num, int qpn, size_t resp_mad_len, bool opa);
#endif /* __AGENT_H_ */
|
import React from 'react';
const Navbar = () => {
return (
<div className='navigation'>
<p>Lab 1 - Audio Cues</p>
<p>Lab 2 - Use of Colors</p>
<p>lab 3 - Where to Click</p>
<p>Lab 4 - Autoplay Video</p>
<p>Lab 5 - Captions</p>
</div>
);
}
export default Navbar;
|
/*
@license
Copyright (c) 2016 The Polymer Project Authors. All rights reserved.
This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
Code distributed by Google as part of the polymer project is also
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
*/
import Xen from '../xen/xen.js';
import './data-explorer.js';
const html = Xen.Template.html;
const template = html`
<left title="{{name}}" on-click="_onExpandClick"><span>{{name}}</span>:</left>
<right>
<div hidden="{{hideexpand}}" on-click="_onExpandClick">+</div>
<div hidden="{{notbool}}" title="{{name}}"><input type="checkbox" checked="{{value}}" on-click="_onCheckInput"></div>
<div hidden="{{notstring}}" title="{{title}}" style="white-space: pre;">{{value}}</div>
<data-explorer hidden="{{notobject}}" object="{{object}}"></data-explorer>
</right>
`;
class DataItem extends Xen.Base {
static get observedAttributes() {
return ['name', 'value', 'expand'];
}
get template() {
return template;
}
get host() {
return this;
}
_onCheckInput(e) {
e.stopPropagation();
this.dispatchEvent(new CustomEvent('item-change', {detail: e.target.checked}));
}
_willReceiveProps(props, state) {
state.expanded = Boolean(props.expand);
}
_render(props, state) {
const type = typeof props.value;
const isnull = props.value === null;
const isobject = (type === 'object' && !isnull);
const isstring = (type === 'string' || type === 'number' || isnull);
const isbool = (type==='boolean');
if (!isNaN(Number(props.name))) {
state.expanded = true;
}
return {
name: props.name,
notstring: !isstring,
notbool: !isbool,
notobject: !isobject || !state.expanded,
object: isobject && state.expanded ? props.value : null,
hideexpand: state.expanded || !isobject,
value: isnull || isobject ? '(null)' : isbool ? props.value : String(props.value),
title: isstring ? props.value : props.name
};
}
_onExpandClick(e) {
e.stopPropagation();
this._setState({expanded: !this._state.expanded});
}
}
customElements.define('data-item', DataItem);
|
import os
import random
import string
import time
import ctypes
try: # Check if the requrements have been installed
from discord_webhook import DiscordWebhook # Try to import discord_webhook
except ImportError: # If it chould not be installed
input(f"Module discord_webhook not installed, to install run '{'py -3' if os.name == 'nt' else 'python3.8'} -m pip install discord_webhook'\nPress enter to exit") # Tell the user it has not been installed and how to install it
exit() # Exit the program
try: # Setup try statement to catch the error
import requests # Try to import requests
except ImportError: # If it has not been installed
input(f"Module requests not installed, to install run '{'py -3' if os.name == 'nt' else 'python3.8'} -m pip install requests'\nPress enter to exit")# Tell the user it has not been installed and how to install it
exit() # Exit the program
class NitroGen: # Initialise the class
def __init__(self): # The initaliseaiton function
self.fileName = "Nitro Codes.txt" # Set the file name the codes are stored in
def main(self): # The main function contains the most important code
os.system('cls' if os.name == 'nt' else 'clear') # Clear the screen
if os.name == "nt": # If the system is windows
print("")
ctypes.windll.kernel32.SetConsoleTitleW("Nitro Generator and Checker - Made by Drillenissen#4268") # Change the
else: # Or if it is unix
print(f'\33]0;Nitro Generator and Checker - Made by Drillenissen#4268\a', end='', flush=True) # Update title of command prompt
print(""" █████╗ ███╗ ██╗ ██████╗ ███╗ ██╗██╗██╗ ██╗
██╔══██╗████╗ ██║██╔═══██╗████╗ ██║██║╚██╗██╔╝
███████║██╔██╗ ██║██║ ██║██╔██╗ ██║██║ ╚███╔╝
██╔══██║██║╚██╗██║██║ ██║██║╚██╗██║██║ ██╔██╗
██║ ██║██║ ╚████║╚██████╔╝██║ ╚████║██║██╔╝ ██╗
╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝╚═╝ ╚═╝
""") # Print the title card
time.sleep(2) # Wait a few seconds
print("Made by: Drillenissen#4268 && Benz#4947", .02) # Print who developed the code
time.sleep(1) # Wait a little more
print("\nInput How Many Codes to Generate and Check: ", .02, newLine = False) # Print the first question
num = int(input('')) # Ask the user for the amount of codes
# Get the webhook url, if the user does not wish to use a webhook the message will be an empty string
self.slowType("\nDo you wish to use a discord webhook? \nIf so type it here or press enter to ignore: ", .02, newLine = False)
url = input('') # Get the awnser
webhook = url if url != "" else None # If the url is empty make it be None insted
# print() # Print a newline for looks
valid = [] # Keep track of valid codes
invalid = 0 # Keep track of how many invalid codes was detected
for i in range(num): # Loop over the amount of codes to check
try: # Catch any errors that may happen
code = "".join(random.choices( # Generate the id for the gift
string.ascii_uppercase + string.digits + string.ascii_lowercase,
k = 16
))
url = f"https://discord.gift/{code}" # Generate the url
result = self.quickChecker(url, webhook) # Check the codes
if result: # If the code was valid
valid.append(url) # Add that code to the list of found codes
else: # If the code was not valid
invalid += 1 # Increase the invalid counter by one
except Exception as e: # If the request fails
print(f" Error | {url} ") # Tell the user an error occurred
if os.name == "nt": # If the system is windows
ctypes.windll.kernel32.SetConsoleTitleW(f"Nitro Generator and Checker - {len(valid)} Valid | {invalid} Invalid - Made by Drillenissen#4268") # Change the title
print("")
else: # If it is a unix system
print(f'\33]0;Nitro Generator and Checker - {len(valid)} Valid | {invalid} Invalid - Made by Drillenissen#4268\a', end='', flush=True) # Change the title
print(f"""
Results:
Valid: {len(valid)}
Invalid: {invalid}
Valid Codes: {', '.join(valid )}""") # Give a report of the results of the check
input("\nThe end! Press Enter 5 times to close the program.") # Tell the user the program finished
[input(i) for i in range(4,0,-1)] # Wait for 4 enter presses
def slowType(self, text, speed, newLine = True): # Function used to print text a little more fancier
for i in text: # Loop over the message
print(i, end = "", flush = True) # Print the one charecter, flush is used to force python to print the char
time.sleep(speed) # Sleep a little before the next one
if newLine: # Check if the newLine argument is set to True
print() # Print a final newline to make it act more like a normal print statement
def generator(self, amount): # Function used to generate and store nitro codes in a seperate file
with open(self.fileName, "w", encoding="utf-8") as file: # Load up the file in write mode
print("Wait, Generating for you") # Let the user know the code is generating the codes
start = time.time() # Note the initaliseation time
for i in range(amount): # Loop the amount of codes to generate
code = "".join(random.choices(
string.ascii_uppercase + string.digits + string.ascii_lowercase,
k = 16
)) # Generate the code id
file.write(f"https://discord.gift/{code}\n") # Write the code
# Tell the user its done generating and how long tome it took
print(f"Genned {amount} codes | Time taken: {round(time.time() - start, 5)}s\n") #
def fileChecker(self, notify = None): # Function used to check nitro codes from a file
valid = [] # A list of the valid codes
invalid = 0 # The amount of invalid codes detected
with open(self.fileName, "r", encoding="utf-8") as file: # Open the file containing the nitro codes
for line in file.readlines(): # Loop over each line in the file
nitro = line.strip("\n") # Remove the newline at the end of the nitro code
# Create the requests url for later use
url = f"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true"
response = requests.get(url) # Get the responce from the url
if response.status_code == 200: # If the responce went through
print(f" Valid | {nitro} ") # Notify the user the code was valid
valid.append(nitro) # Append the nitro code the the list of valid codes
if notify is not None: # If a webhook has been added
DiscordWebhook( # Send the message to discord letting the user know there has been a valid nitro code
url = notify,
content = f"Valid Nito Code detected! @everyone \n{nitro}"
).execute()
else: # If there has not been a discord webhook setup just stop the code
break # Stop the loop since a valid code was found
else: # If the responce got ignored or is invalid ( such as a 404 or 405 )
print(f" Invalid | {nitro} ") # Tell the user it tested a code and it was invalid
invalid += 1 # Increase the invalid counter by one
return {"valid" : valid, "invalid" : invalid} # Return a report of the results
def quickChecker(self, nitro, notify = None): # Used to check a single code at a time
# Generate the request url
url = f"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true"
response = requests.get(url) # Get the response from discord
if response.status_code == 200: # If the responce went through
print(f" Valid | {nitro} ", flush=True, end="" if os.name == 'nt' else "\n") # Notify the user the code was valid
with open("Nitro Codes.txt", "w") as file: # Open file to write
file.write(nitro) # Write the nitro code to the file it will automatically add a newline
if notify is not None: # If a webhook has been added
DiscordWebhook( # Send the message to discord letting the user know there has been a valid nitro code
url = notify,
content = f"Valid Nito Code detected! @everyone \n{nitro}"
).execute()
return True # Tell the main function the code was found
else: # If the responce got ignored or is invalid ( such as a 404 or 405 )
print(f" Invalid | {nitro} ", flush=True, end="" if os.name == 'nt' else "\n") # Tell the user it tested a code and it was invalid
return False # Tell the main function there was not a code found
if __name__ == '__main__':
Gen = NitroGen() # Create the nitro generator object
Gen.main() # Run the main code
|
# coding: utf-8
"""
EXACT - API
API to interact with the EXACT Server # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ImageRegistrations(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[ImageRegistration]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None): # noqa: E501
"""ImageRegistrations - a model defined in Swagger""" # noqa: E501
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
if count is not None:
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
if results is not None:
self.results = results
@property
def count(self):
"""Gets the count of this ImageRegistrations. # noqa: E501
:return: The count of this ImageRegistrations. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ImageRegistrations.
:param count: The count of this ImageRegistrations. # noqa: E501
:type: int
"""
self._count = count
@property
def next(self):
"""Gets the next of this ImageRegistrations. # noqa: E501
:return: The next of this ImageRegistrations. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this ImageRegistrations.
:param next: The next of this ImageRegistrations. # noqa: E501
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this ImageRegistrations. # noqa: E501
:return: The previous of this ImageRegistrations. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this ImageRegistrations.
:param previous: The previous of this ImageRegistrations. # noqa: E501
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this ImageRegistrations. # noqa: E501
:return: The results of this ImageRegistrations. # noqa: E501
:rtype: list[ImageRegistration]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this ImageRegistrations.
:param results: The results of this ImageRegistrations. # noqa: E501
:type: list[ImageRegistration]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ImageRegistrations, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImageRegistrations):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import React, { Component, PropTypes } from 'react';
import CSSModules from 'react-css-modules';
import Todo from './Todo';
import styles from './TodoList.css';
class TodoList extends Component {
createToDo (e) {
e.preventDefault();
const uid = () => Math.random().toString(34).slice(2);
const text = this.getInput.value;
const isLongEnough = text.length > 3;
if (isLongEnough) {
const todo = {
id: uid(),
isDone: false,
text: text
}
this.props.addToDo(todo);
this.addToDoForm.reset();
}
}
handleClick (e) {
const todoId = e.target.id;
this.props.toggleToDo(todoId);
}
handleDelete (e) {
const todoId = e.target.id;
this.props.deleteToDo(todoId);
}
render () {
return (
<div styleName='container'>
<h1 styleName='title'>Today's To Do List</h1>
<form styleName='form' ref={input => this.addToDoForm = input} onSubmit={this.createToDo.bind(this)}>
<input
ref={input => this.getInput = input}
type='text'
placeholder='add ToDo'
styleName='input'
/>
<button styleName='addToDo' type='submit'>+ Add</button>
</form>
<ul styleName='list'>
{this.props.todosList.map(t => (
<Todo
key={t.id}
isDone={t.isDone}
text={t.text}
handleClick={this.handleClick.bind(this)}
handleDelete={this.handleDelete.bind(this)}
id={t.id}
/>
))}
</ul>
</div>
)
}
}
TodoList.propTypes = {
todosList: PropTypes.array,
addToDo: PropTypes.func,
toggleToDo: PropTypes.func,
deleteToDo: PropTypes.func
}
export default CSSModules(TodoList, styles, {allowMultiple: true});
|
# model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/tile'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CutROI'),
dict(type='CutImage', window=(5332 // 2, 3200 // 2), step=(2666 // 2, 1600 // 2)),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='CutROI', training=False),
dict(type='CutImage', training=False, window=(5332 // 2, 3200 // 2), step=(2666 // 2, 1600 // 2),
order_index=False),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'roi_top_left', 'top_left']),
# dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + '/annotations/instance_train.json',
img_prefix=data_root + '/tile_round1_train_20201231/train_imgs/',
# category_ids for not load and not train, start from 1
# ignore_ids=[1],
pipeline=train_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + '/annotations/instance_test.json',
img_prefix=data_root + '/tile_round1_train_20201231/train_imgs/',
# category_ids for not coco_eval, start from 0
# ignore_ids=[0],
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02 / 4, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
dataset_name = 'tile'
first_model_cfg = None
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '../work_dirs/' + dataset_name + '/baseline_model_cut_ROI_cut_2666x1600'
resume_from = None
load_from = '../work_dirs/pretrained/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth'
workflow = [('train', 1)]
|
!function(e){function r(r){for(var n,i,l=r[0],f=r[1],p=r[2],c=0,s=[];c<l.length;c++)i=l[c],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&s.push(o[i][0]),o[i]=0;for(n in f)Object.prototype.hasOwnProperty.call(f,n)&&(e[n]=f[n]);for(a&&a(r);s.length;)s.shift()();return u.push.apply(u,p||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,l=1;l<t.length;l++){var f=t[l];0!==o[f]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={321:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="";var l=window.webpackJsonp=window.webpackJsonp||[],f=l.push.bind(l);l.push=r,l=l.slice();for(var p=0;p<l.length;p++)r(l[p]);var a=f;u.push([320,0]),t()}({320:function(e,r,t){"use strict";r.__esModule=!0,r.title=void 0;var n=t(0);r.title="Long^9 ship",n.renderLife([[0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,1,1,0,0,0,0,0,0,0,0,0,0,0],[0,1,0,1,0,0,0,0,0,0,0,0,0,0],[0,0,1,0,1,0,0,0,0,0,0,0,0,0],[0,0,0,1,0,1,0,0,0,0,0,0,0,0],[0,0,0,0,1,0,1,0,0,0,0,0,0,0],[0,0,0,0,0,1,0,1,0,0,0,0,0,0],[0,0,0,0,0,0,1,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,0,1,0,0,0,0],[0,0,0,0,0,0,0,0,1,0,1,0,0,0],[0,0,0,0,0,0,0,0,0,1,0,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,1,0],[0,0,0,0,0,0,0,0,0,0,0,1,1,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0]])}});
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
steps=250
distance=0
x=0
distance_list=[]
steps_list=[]
while x<steps:
distance+=np.random.randint(-1,2)
distance_list.append(distance)
x+=1
steps_list.append(x)
plt.plot(steps_list,distance_list, color='green', label="Random Walk Data")
steps_list=np.asarray(steps_list)
distance_list=np.asarray(distance_list)
X=steps_list[:,np.newaxis]
#Polynomial fits
#Degree 2
poly_features=PolynomialFeatures(degree=2, include_bias=False)
X_poly=poly_features.fit_transform(X)
lin_reg=LinearRegression()
poly_fit=lin_reg.fit(X_poly,distance_list)
b=lin_reg.coef_
c=lin_reg.intercept_
print ("2nd degree coefficients:")
print ("zero power: ",c)
print ("first power: ", b[0])
print ("second power: ",b[1])
z = np.arange(0, steps, .01)
z_mod=b[1]*z**2+b[0]*z+c
fit_mod=b[1]*X**2+b[0]*X+c
plt.plot(z, z_mod, color='r', label="2nd Degree Fit")
plt.title("Polynomial Regression")
plt.xlabel("Steps")
plt.ylabel("Distance")
#Degree 10
poly_features10=PolynomialFeatures(degree=10, include_bias=False)
X_poly10=poly_features10.fit_transform(X)
poly_fit10=lin_reg.fit(X_poly10,distance_list)
y_plot=poly_fit10.predict(X_poly10)
plt.plot(X, y_plot, color='black', label="10th Degree Fit")
plt.legend()
plt.show()
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regr_1=DecisionTreeRegressor(max_depth=2)
regr_2=DecisionTreeRegressor(max_depth=5)
regr_3=DecisionTreeRegressor(max_depth=7)
regr_1.fit(X, distance_list)
regr_2.fit(X, distance_list)
regr_3.fit(X, distance_list)
X_test = np.arange(0.0, steps, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3=regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, distance_list, s=2.5, c="black", label="data")
plt.plot(X_test, y_1, color="red",
label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="green", label="max_depth=5", linewidth=2)
plt.plot(X_test, y_3, color="m", label="max_depth=7", linewidth=2)
plt.xlabel("Data")
plt.ylabel("Darget")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
"""new_dist=distance_list[-1]
step_max=2500
new_x=steps
new_dist_list=[]
new_steps_list=np.arange(steps,step_max)
while new_x>=steps and new_x<step_max:
dist_prediction=clf.predict(new_x)
new_dist_list.append(dist_prediction)
new_x+=1
plt.plot(new_steps_list,new_dist_list, color='red')
plt.show()"""
|
'use strict';
const { storage, tabs, runtime, alarms } = chrome;
import { getStoredBlackList } from './storage';
chrome.action.onClicked.addListener((tab) => {
tabs.create({
url: 'index.html',
});
});
const background = {
active: false,
sessionTime: 0,
init: async function () {
try {
if (!this.active) {
alarms.clearAll(() => {
console.log('alarms are cleared');
});
this.listenForAlarm();
this.active = true;
}
} catch (error) {
console.log('issue with start up in background js', error);
}
},
listenForAlarm: function () {
return alarms.onAlarm.addListener(function (alarm) {
if (alarm.name === 'startTimer') {
chrome.notifications.create(
undefined,
{
type: 'basic',
title: 'Your focus session is complete!',
message: 'Nice job! You deserve a break!',
iconUrl: 'logo-pomo.png',
requireInteraction: true,
silent: false,
},
() => {
console.log('last error: ', runtime.lastError);
}
);
storage.local.set({
alarmCreated: false,
currentSession: {},
timerOn: false,
sessionTime: 0,
sessionComplete: true,
});
}
});
},
};
background.init();
tabs.onUpdated.addListener(function async(tabId, changeInfo) {
if (changeInfo.url) {
getStoredBlackList().then((blackListUrls) => {
if (blackListUrls) {
for (let i = 0; i < blackListUrls.length; i++) {
if (
changeInfo.url.includes(blackListUrls[i]) &&
blackListUrls[i].length
) {
tabs.update(tabId, {
url: `${process.env.API_URL}/blocked`,
});
}
}
}
});
}
});
let timerID;
let timerTime;
runtime.onMessage.addListener((request, sender, sendResponse) => {
if (request.cmd === 'START_TIMER') {
timerTime = new Date(request.when);
timerID = setTimeout(() => {
chrome.alarms.create('startTimer', { when: Date.now() });
timerTime = 0;
}, timerTime.getTime() - Date.now());
} else if (request.cmd === 'GET_TIME') {
sendResponse({ time: timerTime });
}
});
|
/**
* Copyright 2013-2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @emails react-core
*/
'use strict';
var React;
describe('ReactES6Class', function() {
var container;
var Inner;
var attachedListener = null;
var renderedName = null;
beforeEach(function() {
React = require('React');
container = document.createElement('div');
attachedListener = null;
renderedName = null;
Inner = class extends React.Component {
getName() {
return this.props.name;
}
render() {
attachedListener = this.props.onClick;
renderedName = this.props.name;
return <div className={this.props.name} />;
}
};
});
function test(element, expectedTag, expectedClassName) {
var instance = React.render(element, container);
expect(container.firstChild).not.toBeNull();
expect(container.firstChild.tagName).toBe(expectedTag);
expect(container.firstChild.className).toBe(expectedClassName);
return instance;
}
it('preserves the name of the class for use in error messages', function() {
class Foo extends React.Component { }
expect(Foo.name).toBe('Foo');
});
it('throws if no render function is defined', function() {
class Foo extends React.Component { }
expect(() => React.render(<Foo />, container)).toThrow();
});
it('renders a simple stateless component with prop', function() {
class Foo {
render() {
return <Inner name={this.props.bar} />;
}
}
test(<Foo bar="foo" />, 'DIV', 'foo');
test(<Foo bar="bar" />, 'DIV', 'bar');
});
it('renders based on state using initial values in this.props', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: this.props.initialValue};
}
render() {
return <span className={this.state.bar} />;
}
}
test(<Foo initialValue="foo" />, 'SPAN', 'foo');
});
it('renders based on state using props in the constructor', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: props.initialValue};
}
changeState() {
this.setState({bar: 'bar'});
}
render() {
if (this.state.bar === 'foo') {
return <div className="foo" />;
}
return <span className={this.state.bar} />;
}
}
var instance = test(<Foo initialValue="foo" />, 'DIV', 'foo');
instance.changeState();
test(<Foo />, 'SPAN', 'bar');
});
it('renders based on context in the constructor', function() {
class Foo extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {tag: context.tag, className: this.context.className};
}
render() {
var Tag = this.state.tag;
return <Tag className={this.state.className} />;
}
}
Foo.contextTypes = {
tag: React.PropTypes.string,
className: React.PropTypes.string
};
class Outer extends React.Component {
getChildContext() {
return {tag: 'span', className: 'foo'};
}
render() {
return <Foo />;
}
}
Outer.childContextTypes = {
tag: React.PropTypes.string,
className: React.PropTypes.string
};
test(<Outer />, 'SPAN', 'foo');
});
it('renders only once when setting state in componentWillMount', function() {
var renderCount = 0;
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: props.initialValue};
}
componentWillMount() {
this.setState({bar: 'bar'});
}
render() {
renderCount++;
return <span className={this.state.bar} />;
}
}
test(<Foo initialValue="foo" />, 'SPAN', 'bar');
expect(renderCount).toBe(1);
});
it('should throw with non-object in the initial state property', function() {
[['an array'], 'a string', 1234].forEach(function(state) {
class Foo {
constructor() {
this.state = state;
}
render() {
return <span />;
}
}
expect(() => test(<Foo />, 'span', '')).toThrow(
'Invariant Violation: Foo.state: ' +
'must be set to an object or null'
);
});
});
it('should render with null in the initial state property', function() {
class Foo extends React.Component {
constructor() {
super();
this.state = null;
}
render() {
return <span />;
}
}
test(<Foo />, 'SPAN', '');
});
it('setState through an event handler', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: props.initialValue};
}
handleClick() {
this.setState({bar: 'bar'});
}
render() {
return (
<Inner
name={this.state.bar}
onClick={this.handleClick.bind(this)}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
attachedListener();
expect(renderedName).toBe('bar');
});
it('should not implicitly bind event handlers', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: props.initialValue};
}
handleClick() {
this.setState({bar: 'bar'});
}
render() {
return (
<Inner
name={this.state.bar}
onClick={this.handleClick}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
expect(attachedListener).toThrow();
});
it('renders using forceUpdate even when there is no state', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.mutativeValue = props.initialValue;
}
handleClick() {
this.mutativeValue = 'bar';
this.forceUpdate();
}
render() {
return (
<Inner
name={this.mutativeValue}
onClick={this.handleClick.bind(this)}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
attachedListener();
expect(renderedName).toBe('bar');
});
it('will call all the normal life cycle methods', function() {
var lifeCycles = [];
class Foo {
constructor() {
this.state = {};
}
componentWillMount() {
lifeCycles.push('will-mount');
}
componentDidMount() {
lifeCycles.push('did-mount');
}
componentWillReceiveProps(nextProps) {
lifeCycles.push('receive-props', nextProps);
}
shouldComponentUpdate(nextProps, nextState) {
lifeCycles.push('should-update', nextProps, nextState);
return true;
}
componentWillUpdate(nextProps, nextState) {
lifeCycles.push('will-update', nextProps, nextState);
}
componentDidUpdate(prevProps, prevState) {
lifeCycles.push('did-update', prevProps, prevState);
}
componentWillUnmount() {
lifeCycles.push('will-unmount');
}
render() {
return <span className={this.props.value} />;
}
}
test(<Foo value="foo" />, 'SPAN', 'foo');
expect(lifeCycles).toEqual([
'will-mount',
'did-mount'
]);
lifeCycles = []; // reset
test(<Foo value="bar" />, 'SPAN', 'bar');
expect(lifeCycles).toEqual([
'receive-props', {value: 'bar'},
'should-update', {value: 'bar'}, {},
'will-update', {value: 'bar'}, {},
'did-update', {value: 'foo'}, {}
]);
lifeCycles = []; // reset
React.unmountComponentAtNode(container);
expect(lifeCycles).toEqual([
'will-unmount'
]);
});
it('warns when classic properties are defined on the instance, ' +
'but does not invoke them.', function() {
spyOn(console, 'error');
var getDefaultPropsWasCalled = false;
var getInitialStateWasCalled = false;
class Foo extends React.Component {
constructor() {
super();
this.contextTypes = {};
this.propTypes = {};
}
getInitialState() {
getInitialStateWasCalled = true;
return {};
}
getDefaultProps() {
getDefaultPropsWasCalled = true;
return {};
}
render() {
return <span className="foo" />;
}
}
test(<Foo />, 'SPAN', 'foo');
expect(getInitialStateWasCalled).toBe(false);
expect(getDefaultPropsWasCalled).toBe(false);
expect(console.error.calls.length).toBe(4);
expect(console.error.calls[0].args[0]).toContain(
'getInitialState was defined on Foo, a plain JavaScript class.'
);
expect(console.error.calls[1].args[0]).toContain(
'getDefaultProps was defined on Foo, a plain JavaScript class.'
);
expect(console.error.calls[2].args[0]).toContain(
'propTypes was defined as an instance property on Foo.'
);
expect(console.error.calls[3].args[0]).toContain(
'contextTypes was defined as an instance property on Foo.'
);
});
it('should warn when mispelling shouldComponentUpdate', function() {
spyOn(console, 'error');
class NamedComponent {
componentShouldUpdate() {
return false;
}
render() {
return <span className="foo" />;
}
}
test(<NamedComponent />, 'SPAN', 'foo');
expect(console.error.calls.length).toBe(1);
expect(console.error.calls[0].args[0]).toBe(
'Warning: ' +
'NamedComponent has a method called componentShouldUpdate(). Did you ' +
'mean shouldComponentUpdate()? The name is phrased as a question ' +
'because the function is expected to return a value.'
);
});
it('should throw AND warn when trying to access classic APIs', function() {
spyOn(console, 'error');
var instance = test(<Inner name="foo" />, 'DIV', 'foo');
expect(() => instance.getDOMNode()).toThrow();
expect(() => instance.replaceState({})).toThrow();
expect(() => instance.isMounted()).toThrow();
expect(() => instance.setProps({name: 'bar'})).toThrow();
expect(() => instance.replaceProps({name: 'bar'})).toThrow();
expect(console.error.calls.length).toBe(5);
expect(console.error.calls[0].args[0]).toContain(
'getDOMNode(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[1].args[0]).toContain(
'replaceState(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[2].args[0]).toContain(
'isMounted(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[3].args[0]).toContain(
'setProps(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[4].args[0]).toContain(
'replaceProps(...) is deprecated in plain JavaScript React classes'
);
});
it('supports this.context passed via getChildContext', function() {
class Bar {
render() {
return <div className={this.context.bar} />;
}
}
Bar.contextTypes = {bar: React.PropTypes.string};
class Foo {
getChildContext() {
return {bar: 'bar-through-context'};
}
render() {
return <Bar />;
}
}
Foo.childContextTypes = {bar: React.PropTypes.string};
test(<Foo />, 'DIV', 'bar-through-context');
});
it('supports classic refs', function() {
class Foo {
render() {
return <Inner name="foo" ref="inner" />;
}
}
var instance = test(<Foo />, 'DIV', 'foo');
expect(instance.refs.inner.getName()).toBe('foo');
});
it('supports drilling through to the DOM using findDOMNode', function() {
var instance = test(<Inner name="foo" />, 'DIV', 'foo');
var node = React.findDOMNode(instance);
expect(node).toBe(container.firstChild);
});
});
|
import os, json
class Config:
__INSTANCE = None
@staticmethod
def get_instance():
if not Config.__INSTANCE:
Config.__INSTANCE = Config()
return Config.__INSTANCE
def __init__(self):
self.index = 1
self.base_dir = os.path.dirname(os.path.realpath(__file__))
self.transmit_range_list = list(range(5, 50, 5))
self.routing_protocol_list = ['ProphetRouter', 'SprayAndWaitRouter', 'EpidemicRouter', 'WaveRouter']
self.buffer_size_list = ['25M', '50M', '75M', '100M']
self.amount_nodes_list = list(range(20, 140, 20))
def __get_next_config(self):
for transmit_range in self.transmit_range_list:
for routing_protocol in self.routing_protocol_list:
for buffer_size in self.buffer_size_list:
for amount_nodes in self.amount_nodes_list:
num_pedestrians = int(0.6 * amount_nodes) # ~60%
num_cars = int(0.3 * amount_nodes) # ~30%
num_trams = amount_nodes - num_cars - num_pedestrians # ~10%
yield self.__build_config(transmit_range,
routing_protocol,
buffer_size,
amount_nodes,
num_pedestrians,
num_cars,
num_trams,
f'{self.index:03}')
def __build_config(self,
transmit_range,
routing_protocol,
buffer_size,
amount_nodes,
num_pedestrians,
num_cars,
num_trams,
file_name):
return {
'transmit_range': transmit_range,
'routing_protocol': routing_protocol,
'buffer_size': buffer_size,
'amount_nodes': amount_nodes,
'num_pedestrians': num_pedestrians,
'num_cars': num_cars,
'num_trams': num_trams,
'file_name': file_name,
}
def generate(self):
dic = {}
for config in self.__get_next_config():
dic[f'{self.index:03}'] = config
self.index += 1
with open(f'{self.base_dir}/META.json', 'w') as fp:
json.dump(dic, fp)
if __name__ == '__main__':
config = Config.get_instance()
config.generate()
|
// Codigo del cuadrado
console.group("Cuadrados");
const ladoCuadrado = 5;
console.log("Los lados del cuadrado miden: " + ladoCuadrado + "cm");
function perimetroCuadrado (lado) {
return lado * 4;
}
console.log("El perimetro del cuadrado es: " + perimetroCuadrado + "cm");
function areaCuadrado (lado) {
return lado * lado;
}
console.log("El area del cuadrado es: " + areaCuadrado + "cm^2");
console.groupEnd();
// Codigo del triangulo
console.group("Triangulos");
const ladoTriangulo1 = 6;
const ladoTriangulo2 = 6;
const baseTriangulo = 4;
console.log("Los lados del triangulo miden: " + ladoTriangulo1 + "cm, " + ladoTriangulo2 + "cm, " + baseTriangulo + "cm");
const alturaTriangulo = 5.5;
console.log("La altura del triangulo es de: " + alturaTriangulo + "cm");
const perimetroTriangulo = ladoTriangulo1 + ladoTriangulo2 + baseTriangulo;
console.log ("El perimetro del triangulo es: " + perimetroTriangulo + "cm");
const areaTriangulo = (baseTriangulo * alturaTriangulo) / 2;
console.log ("El area del triangulo es: " + areaTriangulo + "cm^2 ");
console.groupEnd();
// Codigo del circulo
console.group("Circulos");
//Radio
const radioCirculo = 4;
console.log("El radio del circulo es: " + radioCirculo + "cm");
//Diametro
const diametroCirculo = radioCirculo * 2;
console.log("El diametro del circulo es: " + diametroCirculo + "cm");
//PI
const PI = Math.PI;
console.log("PI es: " + PI );
//Circunferencia
const perimetroCirculo = diametroCirculo * PI;
console.log("El perimetro del circulo es: " + perimetroCirculo + "cm");
//Area
const areaCirculo = (radioCirculo * radioCirculo) * PI;
console.log("El area del circulo es: " + areaCirculo + "cm");
console.groupEnd();
// Aqui interactuamos con el html
function calcularPerimetroCuadrado() {
const input = document.getElementById("InputCuadrado");
const value = input.value;
const perimetro = perimetroCuadrado (value);
alert(perimetro);
}
function calcularAreaCuadrado() {
const input = document.getElementById("InputCuadrado");
const value = input.value;
const area = areaCuadrado (value);
alert(area);
}
|
import React from "react";
function Form(props) {
return (
<form>
<div className="form-group">
<label htmlFor="search">Search:</label>
<input
onChange={props.handleInputChange}
value={props.search}
name="search"
type="text"
className="form-control"
placeholder="Search for an Employee"
id="search"
/>
<button onClick={props.handleFormSubmit} className="btn btn-primary mt-3">
Search
</button>
</div>
</form>
);
}
export default Form;
|
/**********************************************************************************************************************
* DISCLAIMER
* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
* applicable laws, including copyright laws.
* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
* this software. By using this software, you agree to the additional terms and conditions found by accessing the
* following link:
* http://www.renesas.com/disclaimer
*
* Copyright (C) 2015(2020) Renesas Electronics Corporation. All rights reserved.
*********************************************************************************************************************/
/**********************************************************************************************************************
* File Name : r_usb_clibusbip.c
* Description : USB IP Host and Peripheral low level library
*********************************************************************************************************************/
/**********************************************************************************************************************
* History : DD.MM.YYYY Version Description
* : 08.01.2014 1.00 First Release
* : 26.12.2014 1.10 RX71M is added
* : 30.09.2015 1.11 RX63N/RX631 is added.
* : 30.09.2016 1.20 RX65N/RX651 is added.
* : 31.03.2018 1.23 Supporting Smart Configurator
* : 16.11.2018 1.24 Supporting RTOS Thread safe
* : 01.03.2020 1.30 RX72N/RX66N is added and uITRON is supported.
***********************************************************************************************************************/
/******************************************************************************
Includes <System Includes> , "Project Includes"
******************************************************************************/
#include "r_usb_basic_if.h"
#include "r_usb_typedef.h"
#include "r_usb_extern.h"
#include "r_usb_bitdefine.h"
#include "r_usb_reg_access.h"
#if (BSP_CFG_RTOS_USED != 0) /* Use RTOS */
#include "r_rtos_abstract.h"
#include "r_usb_cstd_rtos.h"
#endif /* (BSP_CFG_RTOS_USED != 0) */
#if defined(USB_CFG_HCDC_USE)
#include "r_usb_hcdc_if.h"
#endif /* defined(USB_CFG_PCDC_USE) */
#if defined(USB_CFG_HHID_USE)
#include "r_usb_hhid_if.h"
#endif /* defined(USB_CFG_HMSC_USE) */
#if defined(USB_CFG_HMSC_USE)
#include "r_usb_hmsc_if.h"
#endif /* defined(USB_CFG_HMSC_USE) */
#if defined(USB_CFG_PCDC_USE)
#include "r_usb_pcdc_if.h"
#endif /* defined(USB_CFG_PCDC_USE) */
#if defined(USB_CFG_PMSC_USE)
#include "r_usb_pmsc_if.h"
#endif /* defined(USB_CFG_PMSC_USE) */
#if ((USB_CFG_DTC == USB_CFG_ENABLE) || (USB_CFG_DMA == USB_CFG_ENABLE))
#include "r_usb_dmac.h"
#endif /* ((USB_CFG_DTC == USB_CFG_ENABLE) || (USB_CFG_DMA == USB_CFG_ENABLE)) */
/******************************************************************************
Macro definitions
*****************************************************************************/
/******************************************************************************
Exported global variables (to be accessed by other files)
******************************************************************************/
/******************************************************************************
Private global variables and functions
*****************************************************************************/
#if (BSP_CFG_RTOS_USED != 0) /* Use RTOS */
static uint16_t g_rtos_msg_pipe[USB_NUM_USBIP][USB_MAXPIPE_NUM + 1];
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
static uint16_t g_rtos_msg_count_pcd_sub = 0;
#endif /* (USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI */
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
static uint16_t g_rtos_msg_count_hcd_sub = 0;
#endif /* (USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST */
#endif /* BSP_CFG_RTOS_USED != 0 */
/******************************************************************************
Renesas Abstracted Driver API functions
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_nrdy_enable
Description : Enable NRDY interrupt of the specified pipe.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : none
******************************************************************************/
void usb_cstd_nrdy_enable (usb_utr_t *ptr, uint16_t pipe)
{
if (USB_MAX_PIPE_NO < pipe)
{
return; /* Error */
}
/* Enable NRDY */
hw_usb_set_nrdyenb(ptr, pipe);
}
/******************************************************************************
End of function usb_cstd_nrdy_enable
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_get_pid
Description : Fetch specified pipe's PID.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : uint16_t PID-bit status
******************************************************************************/
uint16_t usb_cstd_get_pid (usb_utr_t *ptr, uint16_t pipe)
{
uint16_t buf;
if (USB_MAX_PIPE_NO < pipe)
{
return USB_NULL; /* Error */
}
/* PIPE control reg read */
buf = hw_usb_read_pipectr(ptr, pipe);
return (uint16_t) (buf & USB_PID);
}
/******************************************************************************
End of function usb_cstd_get_pid
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_get_maxpacket_size
Description : Fetch MaxPacketSize of the specified pipe.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : uint16_t MaxPacketSize
******************************************************************************/
uint16_t usb_cstd_get_maxpacket_size (usb_utr_t *ptr, uint16_t pipe)
{
uint16_t size;
uint16_t buffer;
if (USB_MAX_PIPE_NO < pipe)
{
return USB_NULL; /* Error */
}
if (USB_PIPE0 == pipe)
{
buffer = hw_usb_read_dcpmaxp(ptr);
}
else
{
/* Pipe select */
hw_usb_write_pipesel(ptr, pipe);
buffer = hw_usb_read_pipemaxp(ptr);
}
/* Max Packet Size */
size = (uint16_t) (buffer & USB_MXPS);
return size;
}
/******************************************************************************
End of function usb_cstd_get_maxpacket_size
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_get_pipe_dir
Description : Get PIPE DIR
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : uint16_t pipe direction.
******************************************************************************/
uint16_t usb_cstd_get_pipe_dir (usb_utr_t *ptr, uint16_t pipe)
{
uint16_t buffer;
if (USB_MAX_PIPE_NO < pipe)
{
return USB_NULL; /* Error */
}
/* Pipe select */
hw_usb_write_pipesel(ptr, pipe);
/* Read Pipe direction */
buffer = hw_usb_read_pipecfg(ptr);
return (uint16_t) (buffer & USB_DIRFIELD);
}
/******************************************************************************
End of function usb_cstd_get_pipe_dir
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_get_pipe_type
Description : Fetch and return PIPE TYPE.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : uint16_t pipe type
******************************************************************************/
uint16_t usb_cstd_get_pipe_type (usb_utr_t *ptr, uint16_t pipe)
{
uint16_t buffer;
if (USB_MAX_PIPE_NO < pipe)
{
return USB_NULL; /* Error */
}
/* Pipe select */
hw_usb_write_pipesel(ptr, pipe);
/* Read Pipe direction */
buffer = hw_usb_read_pipecfg(ptr);
return (uint16_t) (buffer & USB_TYPFIELD);
}
/******************************************************************************
End of function usb_cstd_get_pipe_type
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_do_aclrm
Description : Set the ACLRM-bit (Auto Buffer Clear Mode) of the specified
: pipe.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : none
******************************************************************************/
void usb_cstd_do_aclrm (usb_utr_t *ptr, uint16_t pipe)
{
if (USB_MAX_PIPE_NO < pipe)
{
return; /* Error */
}
/* Control ACLRM */
hw_usb_set_aclrm(ptr, pipe);
hw_usb_clear_aclrm(ptr, pipe);
}
/******************************************************************************
End of function usb_cstd_do_aclrm
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_set_buf
Description : Set PID (packet ID) of the specified pipe to BUF.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : none
******************************************************************************/
void usb_cstd_set_buf (usb_utr_t *ptr, uint16_t pipe)
{
if (USB_MAX_PIPE_NO < pipe)
{
return; /* Error */
}
/* PIPE control reg set */
hw_usb_set_pid(ptr, pipe, USB_PID_BUF);
}
/******************************************************************************
End of function usb_cstd_set_buf
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_clr_stall
Description : Set up to NAK the specified pipe, and clear the STALL-bit set
: to the PID of the specified pipe.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe : Pipe number.
Return value : none
Note : PID is set to NAK.
******************************************************************************/
void usb_cstd_clr_stall (usb_utr_t *ptr, uint16_t pipe)
{
if (USB_MAX_PIPE_NO < pipe)
{
return; /* Error */
}
/* Set NAK */
usb_cstd_set_nak(ptr, pipe);
/* Clear STALL */
hw_usb_clear_pid(ptr, pipe, USB_PID_STALL);
}
/******************************************************************************
End of function usb_cstd_clr_stall
******************************************************************************/
/******************************************************************************
Function Name : usb_cstd_port_speed
Description : Get USB-speed of the specified port.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
:Return value : uint16_t : HSCONNECT, Hi-Speed
: : FSCONNECT : Full-Speed
: : LSCONNECT : Low-Speed
: : NOCONNECT : not connect
******************************************************************************/
uint16_t usb_cstd_port_speed (usb_utr_t *ptr)
{
uint16_t buf;
uint16_t conn_inf;
buf = hw_usb_read_dvstctr(ptr);
/* Reset handshake status get */
buf = (uint16_t) (buf & USB_RHST);
switch (buf)
{
/* Get port speed */
case USB_HSMODE :
conn_inf = USB_HSCONNECT;
break;
case USB_FSMODE :
conn_inf = USB_FSCONNECT;
break;
case USB_LSMODE :
conn_inf = USB_LSCONNECT;
break;
case USB_HSPROC :
conn_inf = USB_NOCONNECT;
break;
default :
conn_inf = USB_NOCONNECT;
break;
}
return (conn_inf);
}
/******************************************************************************
End of function usb_cstd_port_speed
******************************************************************************/
/******************************************************************************
Function Name : usb_set_event
Description : Set event.
Arguments : uint16_t event : event code.
: usb_ctrl_t *p_ctrl : control structure for USB API.
Return value : none
******************************************************************************/
void usb_set_event (usb_status_t event, usb_ctrl_t *p_ctrl)
{
#if (BSP_CFG_RTOS_USED == 0) /* Non-OS */
g_usb_cstd_event.code[g_usb_cstd_event.write_pointer] = event;
g_usb_cstd_event.ctrl[g_usb_cstd_event.write_pointer] = *p_ctrl;
g_usb_cstd_event.write_pointer++;
if (g_usb_cstd_event.write_pointer >= USB_EVENT_MAX)
{
g_usb_cstd_event.write_pointer = 0;
}
#else /* (BSP_CFG_RTOS_USED == 0) */
static uint16_t count = 0;
p_ctrl->event = event;
g_usb_cstd_event[count] = *p_ctrl;
switch (event)
{
case USB_STS_DEFAULT :
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
break;
case USB_STS_CONFIGURED :
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
break;
case USB_STS_BC :
case USB_STS_OVERCURRENT :
case USB_STS_NOT_SUPPORT :
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
break;
case USB_STS_DETACH :
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
break;
case USB_STS_REQUEST :
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_ON);
break;
case USB_STS_SUSPEND :
case USB_STS_RESUME :
if (USB_HOST == g_usb_usbmode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)p_ctrl->p_data, USB_OFF);
#endif /* (USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
#endif /* (USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI */
}
break;
case USB_STS_REQUEST_COMPLETE :
if (USB_HOST == g_usb_usbmode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)p_ctrl->p_data, USB_OFF);
#endif /* (USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
if (0 == p_ctrl->setup.length)
{
/* Processing for USB request has the no data stage */
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)USB_NULL, USB_OFF);
}
else
{
/* Processing for USB request has the data state */
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)p_ctrl->p_data, USB_OFF);
}
#endif /* (USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI */
}
break;
case USB_STS_READ_COMPLETE :
case USB_STS_WRITE_COMPLETE :
#if defined(USB_CFG_HMSC_USE)
case USB_STS_MSC_CMD_COMPLETE:
#endif /* defined(USB_CFG_HMSC_USE) */
(*g_usb_apl_callback)(&g_usb_cstd_event[count], (rtos_task_id_t)p_ctrl->p_data, USB_OFF);
break;
default :
/* Do Nothing */
break;
}
count = ((count + 1) % USB_EVENT_MAX);
#endif /*(BSP_CFG_RTOS_USED == 0)*/
} /* End of function usb_set_event() */
#if (BSP_CFG_RTOS_USED == 0) /* Non-OS */
/******************************************************************************
Function Name : usb_cstd_usb_task
Description : USB driver main loop processing.
Arguments : none
Return value : none
******************************************************************************/
void usb_cstd_usb_task (void)
{
if ( USB_HOST == g_usb_usbmode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
#if defined(USB_CFG_HMSC_USE)
do
{
#endif /* defined(USB_CFG_HMSC_USE) */
usb_cstd_scheduler(); /* Scheduler */
if (USB_FLGSET == usb_cstd_check_schedule()) /* Check for any task processing requests flags. */
{
/** Use only in non-OS. In RTOS, the kernel will schedule these tasks, no polling. **/
usb_hstd_hcd_task((usb_vp_int_t) 0); /* HCD Task */
usb_hstd_mgr_task((usb_vp_int_t) 0); /* MGR Task */
#if USB_CFG_HUB == USB_CFG_ENABLE
usb_hstd_hub_task((usb_vp_int_t) 0); /* HUB Task */
#endif /* USB_CFG_HUB == USB_CFG_ENABLE */
#if defined(USB_CFG_HCDC_USE) || defined(USB_CFG_HHID_USE) || defined(USB_CFG_HMSC_USE) || defined(USB_CFG_HVND_USE)
usb_class_task();
#endif /* defined(USB_CFG_HCDC_USE)||defined(USB_CFG_HHID_USE)||defined(USB_CFG_HMSC_USE)||defined(USB_CFG_HVND_USE) */
}
#if defined(USB_CFG_HMSC_USE)
}
/* WAIT_LOOP */
while (USB_FALSE != g_drive_search_lock);
#endif /* defined(USB_CFG_HMSC_USE) */
#endif /*( (USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST )*/
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
usb_pstd_pcd_task();
#if defined(USB_CFG_PMSC_USE)
if (USB_NULL != (g_usb_open_class[USB_CFG_USE_USBIP] & (1 << USB_PMSC))) /* Check USB Open device class */
{
usb_pmsc_task();
}
#endif /* defined(USB_CFG_PMSC_USE) */
#endif /*( (USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI )*/
}
} /* End of function usb_cstd_usb_task() */
/******************************************************************************
Function Name : usb_class_task
Description : Each device class task processing
Arguments : none
Return value : none
******************************************************************************/
void usb_class_task (void)
{
#if defined(USB_CFG_HMSC_USE)
usb_utr_t utr;
uint16_t addr;
usb_hmsc_task(); /* USB Host MSC driver task */
usb_hmsc_strg_drive_task(); /* HSTRG Task */
if (USB_FALSE == g_drive_search_lock)
{
if (g_drive_search_que_cnt > 0)
{
g_drive_search_lock = g_drive_search_que[0];
utr.ip = USB_IP0;
if (USBA_ADDRESS_OFFSET == (g_drive_search_lock & USB_IP_MASK))
{
utr.ip = USB_IP1;
}
addr = g_drive_search_lock & USB_ADDRESS_MASK;
utr.ipp = usb_hstd_get_usb_ip_adr(utr.ip); /* Get the USB IP base address. */
/* Storage drive search. */
usb_hmsc_strg_drive_search(&utr, addr, (usb_cb_t) usb_hmsc_drive_complete);
}
}
#endif /* defined(USB_CFG_HMSC_USE) */
#if defined(USB_CFG_HCDC_USE)
usb_hcdc_task((usb_vp_int_t) 0); /* USB Host CDC driver task */
#endif /* defined(USB_CFG_HCDC_USE) */
#if defined(USB_CFG_HHID_USE)
usb_hhid_task((usb_vp_int_t) 0); /* USB Host CDC driver task */
#endif /* defined(USB_CFG_HHID_USE) */
} /* End of function usb_class_task */
#endif /*(BSP_CFG_RTOS_USED == 0)*/
#if (BSP_CFG_RTOS_USED != 0) /* Use RTOS */
/******************************************************************************
Function Name : usb_rtos_delete_msg_submbx
Description : Message clear for PIPE Transfer wait que.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe_no : Pipe no.
Return : none
******************************************************************************/
void usb_rtos_delete_msg_submbx (usb_utr_t *p_ptr, uint16_t usb_mode)
{
usb_utr_t *mess;
uint16_t i;
uint16_t ip;
uint16_t pipe;
if (USB_HOST == usb_mode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
ip = p_ptr->ip;
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
ip = USB_CFG_USE_USBIP;
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
pipe = p_ptr->pipectr;
if (0 == g_rtos_msg_pipe[ip][pipe])
{
return;
}
if (USB_HOST == usb_mode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
/* WAIT_LOOP */
for (i = 0; i != g_rtos_msg_count_hcd_sub; i++)
{
rtos_receive_mailbox(&g_rtos_usb_hcd_sub_mbx_id, (void **)&mess, RTOS_ZERO);
if ((ip == mess->ip)&&(pipe == mess->keyword))
{
rtos_release_fixed_memory (&g_rtos_usb_mpf_id, (void *)mess);
}
else
{
rtos_send_mailbox (&g_rtos_usb_hcd_sub_mbx_id, (void *)mess);
}
}
g_rtos_msg_count_hcd_sub -= g_rtos_msg_pipe[ip][pipe];
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
/* WAIT_LOOP */
for (i = 0; i != g_rtos_msg_count_pcd_sub; i++)
{
rtos_receive_mailbox (&g_rtos_usb_pcd_sub_mbx_id, (void **)&mess, RTOS_ZERO);
if (pipe == mess->keyword)
{
rtos_release_fixed_memory (&g_rtos_usb_mpf_id, (void *)mess);
}
else
{
rtos_send_mailbox (&g_rtos_usb_pcd_sub_mbx_id, (void *)mess);
}
}
g_rtos_msg_count_pcd_sub -= g_rtos_msg_pipe[ip][pipe];
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
g_rtos_msg_pipe[ip][pipe] = 0;
}
/******************************************************************************
End of function usb_rtos_delete_msg_submbx
******************************************************************************/
/******************************************************************************
Function Name : usb_rtos_resend_msg_to_submbx
Description : Get PIPE Transfer wait que and Message send to HCD/PCD
Argument : uint16_t pipe_no : Pipe no.
Return : none
******************************************************************************/
void usb_rtos_resend_msg_to_submbx (uint16_t ip, uint16_t pipe, uint16_t usb_mode)
{
usb_utr_t *mess;
if ((USB_MIN_PIPE_NO > pipe) || (USB_MAXPIPE_NUM < pipe))
{
return;
}
if (0 == g_rtos_msg_pipe[ip][pipe])
{
return;
}
if (USB_HOST == usb_mode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
/* WAIT_LOOP */
while(1)
{
rtos_receive_mailbox (&g_rtos_usb_hcd_sub_mbx_id, (void **)&mess, RTOS_ZERO);
if ((mess->ip == ip) && (mess->keyword == pipe))
{
g_rtos_msg_pipe[ip][pipe]--;
g_rtos_msg_count_hcd_sub--;
rtos_send_mailbox (&g_rtos_usb_hcd_mbx_id,(void *)mess);
break;
}
else
{
rtos_send_mailbox (&g_rtos_usb_hcd_sub_mbx_id, (void *)mess);
}
}
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
/* WAIT_LOOP */
while(1)
{
rtos_receive_mailbox (&g_rtos_usb_pcd_sub_mbx_id, (void **)&mess, RTOS_ZERO);
if (mess->keyword == pipe)
{
g_rtos_msg_pipe[ip][pipe]--;
g_rtos_msg_count_pcd_sub--;
rtos_send_mailbox (&g_rtos_usb_pcd_mbx_id,(void *)mess);
break;
}
else
{
rtos_send_mailbox (&g_rtos_usb_pcd_sub_mbx_id, (void *)mess);
}
}
#endif /* ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI) */
}
}
/******************************************************************************
End of function usb_rtos_resend_msg_to_submbx
******************************************************************************/
/******************************************************************************
Function Name : usb_rtos_send_msg_to_submbx
Description : Message foward to PIPE Transfer wait que.
Arguments : usb_utr_t *ptr : Pointer to usb_utr_t structure.
: uint16_t pipe_no : Pipe no.
Return : none
******************************************************************************/
void usb_rtos_send_msg_to_submbx (usb_utr_t *p_ptr, uint16_t pipe_no, uint16_t usb_mode)
{
if ((USB_MIN_PIPE_NO > pipe_no) || (USB_MAXPIPE_NUM < pipe_no))
{
return;
}
if (USB_HOST == usb_mode)
{
#if ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST)
g_rtos_msg_pipe[p_ptr->ip][pipe_no]++;
g_rtos_msg_count_hcd_sub++;
rtos_send_mailbox (&g_rtos_usb_hcd_sub_mbx_id, (void *)p_ptr);
#endif /* ((USB_CFG_MODE & USB_CFG_HOST) == USB_CFG_HOST) */
}
else
{
#if ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI)
g_rtos_msg_pipe[USB_CFG_USE_USBIP][pipe_no]++;
g_rtos_msg_count_pcd_sub++;
rtos_send_mailbox (&g_rtos_usb_pcd_sub_mbx_id, (void *)p_ptr);
#endif /* ((USB_CFG_MODE & USB_CFG_PERI) == USB_CFG_PERI) */
}
}
/******************************************************************************
End of function usb_rtos_send_msg_to_submbx
******************************************************************************/
#endif /* (BSP_CFG_RTOS_USED != 0) */
/******************************************************************************
End Of File
******************************************************************************/
|
/**
* Copyright (c) 2013-present, Facebook, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
*/
'use strict';
function getTestDocument(markup) {
document.open();
document.write(markup || '<!doctype html><html><meta charset=utf-8><title>test doc</title>');
document.close();
return document;
}
module.exports = getTestDocument;
|
define(["leaflet", "map.core", "map.events"], function(L, mapCore, mapEvents) {
let button;
let autoFocusEnabled = true;
const eventEmitter = mapCore.getEventEmitter();
const autoFocusingEvents = [
mapEvents.viewport.boundedMarkerCreated,
mapEvents.viewport.boundedMarkerRemoved,
mapEvents.location.singleReceived,
mapEvents.location.allReceived
];
autoFocusingEvents.forEach(function(event) {
eventEmitter.addListener(event, fitBoundsWhenAutoFocusing);
});
function fitBoundsWhenAutoFocusing() {
if(autoFocusEnabled) {
eventEmitter.emit(mapEvents.viewport.staleBounds);
}
}
function handleClick() {
autoFocusEnabled = !autoFocusEnabled;
fitBoundsWhenAutoFocusing();
if(autoFocusEnabled) {
L.DomUtil.addClass(button, 'focusing');
}
else {
L.DomUtil.removeClass(button, 'focusing');
}
}
L.Control.AutoFocus = L.Control.extend({
onAdd: function() {
const container = L.DomUtil.create('div', 'leaflet-bar leaflet-control');
button = L.DomUtil.create('a', 'auto-focus focusing', container);
button.innerHTML = '<i class="icon-binoculars" />';
button.role = "button";
button.href = "#";
L.DomEvent.disableClickPropagation(button);
L.DomEvent.on(button, 'click', handleClick);
return container;
},
onRemove: function() {
L.DomEvent.off(button, 'click', handleClick);
}
});
L.control.autofocus = function(options) {
return new L.Control.AutoFocus(options);
};
L.control.autofocus({ position: 'bottomleft' }).addTo(mapCore.getInstance());
});
|
/*
* ScopedLock.h
* ------------
* Purpose: A wrapper class for CSoundFile and CModDoc that ensures that access to those objects is done while a lock is held.
* Notes : (currently none)
* Authors: OpenMPT Devs
* The OpenMPT source code is released under the BSD license. Read LICENSE for more details.
*/
#pragma once
#include "../../../soundlib/AudioCriticalSection.h"
#include "../../Moddoc.h"
OPENMPT_NAMESPACE_BEGIN
template<typename T>
struct ScopedLock
{
T &m_object;
CriticalSection m_cs;
ScopedLock(T &object, CriticalSection &&cs) noexcept : m_object(object), m_cs(std::move(cs)) { }
ScopedLock(ScopedLock<typename std::remove_const<T>::type> &&other) noexcept : m_object(other.m_object), m_cs(std::move(other.m_cs)) { }
template<typename Tother>
ScopedLock(T &object, ScopedLock<Tother> &&lockFrom) noexcept : m_object(object), m_cs(std::move(lockFrom.m_cs)) { }
operator const CriticalSection& () const noexcept { return m_cs; }
operator T& () noexcept { return m_object; }
operator const T& () const noexcept { return m_object; }
operator T* () noexcept { return &m_object; }
operator const T* () const noexcept { return &m_object; }
T* operator-> () noexcept { return &m_object; }
const T* operator-> () const noexcept { return &m_object; }
};
using CModDocLock = ScopedLock<CModDoc>;
using CModDocLockConst = ScopedLock<const CModDoc>;
template<typename SndFileType>
struct CSoundFileLockT : public ScopedLock<SndFileType>
{
CSoundFileLockT(CModDocLock &&modDocLock) noexcept : ScopedLock<SndFileType>(modDocLock->GetSoundFile(), std::move(modDocLock.m_cs)) { }
CSoundFileLockT(CModDocLockConst &&modDocLock) noexcept : ScopedLock<SndFileType>(modDocLock->GetSoundFile(), std::move(modDocLock.m_cs)) { }
CSoundFileLockT(CSoundFileLockT<SndFileType> &&other) noexcept : ScopedLock<SndFileType>(other.m_sndFile, std::move(other.m_cs)) { }
};
using CSoundFileLock = CSoundFileLockT<CSoundFile>;
using CSoundFileLockConst = CSoundFileLockT<const CSoundFile>;
template<typename T>
ScopedLock<T> make_lock(T &object, CriticalSection &&cs) noexcept
{
return ScopedLock<T>(object, std::move(cs));
}
template<typename T, typename Tother>
ScopedLock<T> make_lock(T &object, ScopedLock<Tother> &&lockFrom) noexcept
{
return ScopedLock<T>(object, std::move(lockFrom.m_cs));
}
OPENMPT_NAMESPACE_END
|
import React from "react";
import { GoogleLogout } from "react-google-login";
import { connect } from "react-redux";
import { signOut } from "../redux/auth/actions";
const clientId =
"1005866627235-pkltkjsfn593b70jaeqs8bo841dgtob3.apps.googleusercontent.com";
function Logout(props) {
const onSuccess = () => {
props.signOut();
alert("Logout made successfully ✌");
};
return (
<div>
<GoogleLogout
clientId={clientId}
buttonText="Logout"
onLogoutSuccess={onSuccess}
></GoogleLogout>
</div>
);
}
export default connect(null, { signOut })(Logout);
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef VERSIONAPI_H
#define VERSIONAPI_H
#include "updater.h"
class VersionApi final : public Updater {
Q_DISABLE_COPY_MOVE(VersionApi)
public:
VersionApi(QObject* parent);
~VersionApi();
void start() override;
// compare 2 version strings and return:
// - -1 if the first one is lower than the second one or if the second one is
// empty.
// - 0 if they are equal
// - 1 if the first one is greater than the second one or if the first one is
// empty.
static int compareVersions(const QString& a, const QString& b);
// Strips the minor version
// e.g 2.2.2 -> 2.2.0
static QString stripMinor(const QString& a);
private:
[[nodiscard]] bool processData(const QByteArray& data);
};
#endif // VERSIONAPI_H
|
$(document).ready(function () {
Global.initParallax({
parent: '#parallax'
});
Global.initParallax({
parent: '#parallax2'
});
Global.fourSlider({
slider: '.four-slider'
});
Global.threeSlider({
slider: '.events'
});
Global.square({
widthEl: '.four-slider__item',
heightEl: '.four-slider__content',
offset: 25
});
$('.menu__btn').click(function () {
$(this).toggleClass('is-active');
$('.nav, .header__wrap').toggleClass('is-active');
});
AOS.init({
initClassName: 'aos-init',
animatedClassName: 'aos-animate',
});
});
/*- end doc ready -*/
/*- resize doc -*/
$(window).on('resize', function () {
Global.square({
widthEl: '.four-slider__item',
heightEl: '.four-slider__content',
offset: 25
});
});
|
//control table
var json_data, dotprobe_table;
function serverData() {
$.ajax({
url: "php/get.php", // this is the path to the above PHP script
type: 'post',
success: function (data) {
json_data = JSON.parse(data);
//if redirect message
if (json_data.redirect !== undefined && json_data.redirect){
// data.location contains the string URL to redirect to
console.log('redirect true')
window.location.href = json_data.location;
} else {
if (json_data != JSON.parse(data)){
console.log('not equal')
dotprobe_table = $('#database').DataTable({
lengthMenu: [[25, 50, 100, -1], [25, 50, 100, "All"]],
lengthChange: false,
responsive: true,
order: [[ 0, "desc" ]],
dom: '<"download"B><"search"f>tp',
buttons: ['excel', 'csv', {
text: 'JSON',
action: function (e, dt, button, config) {
var data = dt.buttons.exportData();
$.fn.dataTable.fileSave(new Blob([JSON.stringify(json_data)]),'dotprobe-js.json')
}
}],
processing: true,
data: json_data,
//serverSide: true,
columns: [
{"data": "id"},
{"data": "code"},
{"data": "group"}],
});
//dotprobe_table.buttons().container().prependTo($('#database_filter'));
$(".download").css({"float": "left", "margin-bottom":'5px'});
$(".search").css({"float": "right"});
//highlight table on hover
$('#database').on('mouseenter', 'td', function () {
var rowIdx = dotprobe_table.cell(this).index().row;
$(dotprobe_table.rows().nodes()).removeClass('highlight');
$(dotprobe_table.rows(rowIdx).nodes()).addClass('highlight');
});
} //else {console.log('equal')};
}
}
})
}
|
from setuptools import setup, Extension
# check if cython or pyrex is available.
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
build_ext = __import__(pyrex_impl, fromlist=['build_ext']).build_ext
break
except:
pass
have_pyrex = 'build_ext' in globals()
if have_pyrex:
cmdclass = {'build_ext': build_ext}
PYREX_SOURCE = "src/_region_filter.pyx"
else:
cmdclass = {}
PYREX_SOURCE = "src/_region_filter.c"
import sys
import warnings
# If you don't want to build filtering module (which requires a C
# compiler), set it to False
WITH_FILTER = True
for line in open('lib/version.py').readlines():
if (line.startswith('__version__')):
exec(line.strip())
def main():
if sys.version_info[0] >= 3:
install_requires = ['pyparsing>=2.0.0']
elif sys.version_info[:2] >= (2, 6):
# For Python 2.6 and 2.7, any version *except* 2.0.0 will work
install_requires = ['pyparsing!=2.0.0']
else:
# For Python < 2.6, a version before 2.0.0 is required
install_requires = ['pyparsing<2.0.0']
ka = dict(name = "pyregion",
version = __version__,
description = "python parser for ds9 region files",
author = "Jae-Joon Lee",
author_email = "lee.j.joon@gmail.com",
url="http://leejjoon.github.com/pyregion/",
download_url="http://github.com/leejjoon/pyregion/downloads",
license = "MIT",
platforms = ["Linux","MacOS X"],
packages = ['pyregion'],
package_dir={'pyregion':'lib'},
install_requires = install_requires,
use_2to3 = False,
)
ka["classifiers"]=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Astronomy',
]
if WITH_FILTER:
try:
import numpy
except ImportError:
warnings.warn("numpy must be installed to build the filtering module.")
sys.exit(1)
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
if cmdclass:
ka["cmdclass"] = cmdclass
ka["ext_modules"] = [ Extension("pyregion._region_filter",
[PYREX_SOURCE],
include_dirs=['./src',
numpy_include,
],
libraries=[],
)
]
setup(**ka)
if __name__ == "__main__":
main()
|
import React from 'react';
import ReactDOM from 'react-dom';
import "bootstrap/dist/css/bootstrap.css";
import App from './app';
ReactDOM.render(<App style = {{resize : 'both', width : window.innerWidth, height : window.innerHeight}}/>, document.getElementById("root"));
|
function core (method, url, options) {
let xhr = new XMLHttpRequest()
xhr.open(method, url)
xhr.onload = () => {
options.success && options.success(xhr.response)
}
xhr.onerror = () => {
options.fail && options.fail(xhr, xhr.status)
}
xhr.send(options.data)
}
export default {
get () {},
post (url, options) {
return core('post', url, options)
},
put () {},
delete () {},
patch () {}
}
|
#!/usr/bin/env python
import os
import optparse
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import commands, get_summaries, get_similar_commands
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "1.6.dev1"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0].lower()
#all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(args_else[0].lower())
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return cmd_name, cmd_args
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, cmd_args = parseopts(initial_args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name]()
return command.main(cmd_args)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
pkgs = ['pip']
try:
import setuptools
# Dumb hack
setuptools
except ImportError:
pkgs.append('setuptools')
return main(['install', '--upgrade'] + pkgs + sys.argv[1:])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError as exc:
logger.warn(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format" % exc
)
req = None
if req is None:
logger.warn(
'Could not determine repository location of %s' % location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
|
import JATS from '../JATS'
import XMLIterator from '../../util/XMLIterator'
let GRAPHIC_ELEMENTS = JATS.ACCESS
.concat(JATS.ADDRESS_LINK)
.concat(['caption', 'object-id', 'kwd-group', 'label'])
.concat(JATS.DISPLAY_BACK_MATTER)
export default {
type: 'graphic',
tagName: 'graphic',
/*
Attributes
content-type Type of Content
id Document Internal Identifier
mime-subtype Mime Subtype
mimetype Mime Type
orientation Orientation
position Position
specific-use Specific Use
xlink:actuate Actuating the Link
xlink:href Href (Linking Mechanism)
xlink:role Role of the Link
xlink:show Showing the Link
xlink:title Title of the Link
xlink:type Type of Link
xml:base Base
xml:lang Language
xmlns:xlink XLink Namespace Declaration
Content
(
alt-text | long-desc | abstract | email | ext-link | uri | caption |
object-id | kwd-group | label | attrib | permissions
)*
*/
import: function(el, node, converter) {
let iterator = new XMLIterator(el.getChildren())
iterator.manyOf(GRAPHIC_ELEMENTS, function(child) {
node.nodes.push(converter.convertElement(child).id)
})
if (iterator.hasNext()) throw new Error('Illegal JATS: ' + el.outerHTML)
},
export: function(node, el, converter) {
el.append(converter.convertNodes(node.nodes))
}
}
|
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
import time
import uuid
import zmq
from anna.client import AnnaTcpClient
from anna.zmq_util import SocketCache
import requests
from cloudburst.server.scheduler.call import call_dag, call_function
from cloudburst.server.scheduler.create import (
create_dag,
create_function,
delete_dag
)
from cloudburst.server.scheduler.policy.default_policy import (
DefaultCloudburstSchedulerPolicy
)
import cloudburst.server.scheduler.utils as sched_utils
import cloudburst.server.utils as sutils
from cloudburst.shared.proto.cloudburst_pb2 import (
Dag,
DagCall,
GenericResponse,
NO_SUCH_DAG # Cloudburst's error types
)
from cloudburst.shared.proto.internal_pb2 import (
ExecutorStatistics,
SchedulerStatus,
ThreadStatus
)
from cloudburst.shared.proto.shared_pb2 import StringSet
from cloudburst.shared.utils import (
CONNECT_PORT,
DAG_CALL_PORT,
DAG_CREATE_PORT,
DAG_DELETE_PORT,
FUNC_CALL_PORT,
FUNC_CREATE_PORT,
LIST_PORT
)
METADATA_THRESHOLD = 5
REPORT_THRESHOLD = 5
logging.basicConfig(filename='log_scheduler.txt', level=logging.INFO,
format='%(asctime)s %(message)s')
def scheduler(ip, mgmt_ip, route_addr):
# If the management IP is not set, we are running in local mode.
local = (mgmt_ip is None)
kvs = AnnaTcpClient(route_addr, ip, local=local)
scheduler_id = str(uuid.uuid4())
context = zmq.Context(1)
# A mapping from a DAG's name to its protobuf representation.
dags = {}
# Tracks how often a request for each function is received.
call_frequency = {}
# Tracks the time interval between successive requests for a particular
# DAG.
interarrivals = {}
# Tracks the most recent arrival for each DAG -- used to calculate
# interarrival times.
last_arrivals = {}
# Maintains a list of all other schedulers in the system, so we can
# propagate metadata to them.
schedulers = []
connect_socket = context.socket(zmq.REP)
connect_socket.bind(sutils.BIND_ADDR_TEMPLATE % (CONNECT_PORT))
func_create_socket = context.socket(zmq.REP)
func_create_socket.bind(sutils.BIND_ADDR_TEMPLATE % (FUNC_CREATE_PORT))
func_call_socket = context.socket(zmq.REP)
func_call_socket.bind(sutils.BIND_ADDR_TEMPLATE % (FUNC_CALL_PORT))
dag_create_socket = context.socket(zmq.REP)
dag_create_socket.bind(sutils.BIND_ADDR_TEMPLATE % (DAG_CREATE_PORT))
dag_call_socket = context.socket(zmq.REP)
dag_call_socket.bind(sutils.BIND_ADDR_TEMPLATE % (DAG_CALL_PORT))
dag_delete_socket = context.socket(zmq.REP)
dag_delete_socket.bind(sutils.BIND_ADDR_TEMPLATE % (DAG_DELETE_PORT))
list_socket = context.socket(zmq.REP)
list_socket.bind(sutils.BIND_ADDR_TEMPLATE % (LIST_PORT))
exec_status_socket = context.socket(zmq.PULL)
exec_status_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.STATUS_PORT))
sched_update_socket = context.socket(zmq.PULL)
sched_update_socket.bind(sutils.BIND_ADDR_TEMPLATE %
(sutils.SCHED_UPDATE_PORT))
pin_accept_socket = context.socket(zmq.PULL)
pin_accept_socket.setsockopt(zmq.RCVTIMEO, 500)
pin_accept_socket.bind(sutils.BIND_ADDR_TEMPLATE %
(sutils.PIN_ACCEPT_PORT))
requestor_cache = SocketCache(context, zmq.REQ)
pusher_cache = SocketCache(context, zmq.PUSH)
poller = zmq.Poller()
poller.register(connect_socket, zmq.POLLIN)
poller.register(func_create_socket, zmq.POLLIN)
poller.register(func_call_socket, zmq.POLLIN)
poller.register(dag_create_socket, zmq.POLLIN)
poller.register(dag_call_socket, zmq.POLLIN)
poller.register(dag_delete_socket, zmq.POLLIN)
poller.register(list_socket, zmq.POLLIN)
poller.register(exec_status_socket, zmq.POLLIN)
poller.register(sched_update_socket, zmq.POLLIN)
# Start the policy engine.
policy = DefaultCloudburstSchedulerPolicy(pin_accept_socket, pusher_cache,
kvs, ip, local=local)
policy.update()
start = time.time()
while True:
socks = dict(poller.poll(timeout=1000))
if connect_socket in socks and socks[connect_socket] == zmq.POLLIN:
msg = connect_socket.recv_string()
connect_socket.send_string(route_addr)
if (func_create_socket in socks and
socks[func_create_socket] == zmq.POLLIN):
create_function(func_create_socket, kvs)
if func_call_socket in socks and socks[func_call_socket] == zmq.POLLIN:
call_function(func_call_socket, pusher_cache, policy)
if (dag_create_socket in socks and socks[dag_create_socket]
== zmq.POLLIN):
create_dag(dag_create_socket, pusher_cache, kvs, dags, policy,
call_frequency)
if dag_call_socket in socks and socks[dag_call_socket] == zmq.POLLIN:
call = DagCall()
call.ParseFromString(dag_call_socket.recv())
name = call.name
t = time.time()
if name in last_arrivals:
if name not in interarrivals:
interarrivals[name] = []
interarrivals[name].append(t - last_arrivals[name])
last_arrivals[name] = t
if name not in dags:
resp = GenericResponse()
resp.success = False
resp.error = NO_SUCH_DAG
dag_call_socket.send(resp.SerializeToString())
continue
dag = dags[name]
for fname in dag[0].functions:
call_frequency[fname] += 1
response = call_dag(call, pusher_cache, dags, policy)
dag_call_socket.send(response.SerializeToString())
if (dag_delete_socket in socks and socks[dag_delete_socket] ==
zmq.POLLIN):
delete_dag(dag_delete_socket, dags, policy, call_frequency)
if list_socket in socks and socks[list_socket] == zmq.POLLIN:
msg = list_socket.recv_string()
prefix = msg if msg else ''
resp = StringSet()
resp.keys.extend(sched_utils.get_func_list(kvs, prefix))
list_socket.send(resp.SerializeToString())
if exec_status_socket in socks and socks[exec_status_socket] == \
zmq.POLLIN:
status = ThreadStatus()
status.ParseFromString(exec_status_socket.recv())
policy.process_status(status)
if sched_update_socket in socks and socks[sched_update_socket] == \
zmq.POLLIN:
status = SchedulerStatus()
status.ParseFromString(sched_update_socket.recv())
# Retrieve any DAGs that some other scheduler knows about that we
# do not yet know about.
for dname in status.dags:
if dname not in dags:
payload = kvs.get(dname)
while None in payload:
payload = kvs.get(dname)
dag = Dag()
dag.ParseFromString(payload[dname].reveal())
dags[dag.name] = (dag, sched_utils.find_dag_source(dag))
for fname in dag.functions:
if fname not in call_frequency:
call_frequency[fname] = 0
policy.update_function_locations(status.function_locations)
end = time.time()
if end - start > METADATA_THRESHOLD:
# Update the scheduler policy-related metadata.
policy.update()
# If the management IP is None, that means we arre running in
# local mode, so there is no need to deal with caches and other
# schedulers.
if mgmt_ip:
schedulers = sched_utils.get_ip_set(
sched_utils.get_scheduler_list_address(mgmt_ip),
requestor_cache, False)
if end - start > REPORT_THRESHOLD:
num_unique_executors = policy.get_unique_executors()
key = scheduler_id + ':' + str(time.time())
data = {'key': key, 'count': num_unique_executors}
status = SchedulerStatus()
for name in dags.keys():
status.dags.append(name)
for fname in policy.function_locations:
for loc in policy.function_locations[fname]:
floc = status.function_locations.add()
floc.name = fname
floc.ip = loc[0]
floc.tid = loc[1]
msg = status.SerializeToString()
for sched_ip in schedulers:
if sched_ip != ip:
sckt = pusher_cache.get(
sched_utils.get_scheduler_update_address
(sched_ip))
sckt.send(msg)
stats = ExecutorStatistics()
for fname in call_frequency:
fstats = stats.functions.add()
fstats.name = fname
fstats.call_count = call_frequency[fname]
logging.info('Reporting %d calls for function %s.' %
(call_frequency[fname], fname))
call_frequency[fname] = 0
for dname in interarrivals:
dstats = stats.dags.add()
dstats.name = dname
dstats.call_count = len(interarrivals[dname]) + 1
dstats.interarrival.extend(interarrivals[dname])
interarrivals[dname].clear()
# We only attempt to send the statistics if we are running in
# cluster mode. If we are running in local mode, we write them to
# the local log file.
if mgmt_ip:
sckt = pusher_cache.get(sutils.get_statistics_report_address
(mgmt_ip))
sckt.send(stats.SerializeToString())
start = time.time()
if __name__ == '__main__':
if len(sys.argv) > 1:
conf_file = sys.argv[1]
else:
conf_file = 'conf/cloudburst-config.yml'
conf = sutils.load_conf(conf_file)
sched_conf = conf['scheduler']
scheduler(conf['ip'], conf['mgmt_ip'], sched_conf['routing_address'])
|
from kamodo import Kamodo, kamodofy
import numpy as np
import scipy
import datetime
from datetime import timezone
import urllib
import plotly.express as px
import plotly.graph_objects as go
def ror_get_extraction(server, runID, coord, satellite):
'''Query for file contents from server'''
query = '{}/{}/{}/{}_{}.txt'.format(server, runID, satellite, coord, satellite)
response = urllib.request.urlopen(query)
file = response.read()
return file
class SATEXTRACT(Kamodo):
def __init__(self, runID, coord, satellite, **kwargs):
super(SATEXTRACT, self).__init__(**kwargs)
self.verbose=False
self.symbol_registry=dict()
self.signatures=dict()
self.RE=6.3781E3
self.server = "https://ccmc.gsfc.nasa.gov/RoR_WWW/VMR/"
self.runID = runID
self.coordinates = coord
self.satellite = satellite
print(' -server: ',self.server)
print(' -runID: ',runID)
print(' -coordinate system: ',coord)
print(' -satellite: ',satellite)
self.variables=dict()
self.file = ror_get_extraction(self.server, runID, coord, satellite).decode('ascii')
self.parse_file()
ts=self.tsarray[0]
self.start = datetime.datetime.fromtimestamp(ts,tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
ts=self.tsarray[-1]
self.stop = datetime.datetime.fromtimestamp(ts,tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
print(" ")
print(" -date start: ",self.start)
print(" end: ",self.stop)
for varname in self.variables:
if varname == "N":
continue
units = self.variables[varname]['units']
print('... registering ',varname,units)
self.register_variable(varname, units)
# classification of position into coordinates to assist visualizion
self.possible_coords=('TOD','J2K','GEO','GM','GSM','GSE','SM')
self.possible_directions=('x','y','z')
self.coords=dict()
for varname in self.variables:
size = self.variables[varname]['size']
if size == 1:
# Look for position values
direction = varname.lower()
key = self.coordinates
if key in self.possible_coords and direction in self.possible_directions:
if key not in self.coords:
self.coords[key] = dict(coord=key)
self.coords[key]['size'] = size
self.coords[key][direction] = varname
# Change 'fill' values in data to NaNs
self.fill2nan()
def parse_file(self):
import re
vars=[]
units=[]
times=[]
arrays = []
print("===> Printing File Header ...")
for line in self.file.splitlines(False):
A = re.match('^# ', line)
B = re.match('# Run', line)
C = re.match('# Coordinate', line)
D = re.match('# Satellite', line)
E = re.match('# Year', line)
F = re.match('# \[year\]', line)
if A or B or C or D or E or F:
if A:
print("-> ",line)
if B:
# Find runname and fill value
parts=re.sub(' +', ' ', line).split(' ')
self.runname = parts[3]
self.fillvalue = parts[6]
if C:
# Check that coordinate system matches
parts=re.sub(' +', ' ', line).split(' ')
if self.coordinates != parts[3]:
print("ERROR: Coordinate system does not match.",self.coordinates,parts[3])
if D:
# Check that satellite name matches
parts=re.sub(' +', ' ', line).split(' ')
if self.satellite != parts[3]:
print("ERROR: Satellite does not match.",self.satellite,parts[3])
if E:
# Variable names, remove . and change N and B_1
parts=re.sub(' +', ' ', line).strip().split(' ')
for p in parts[7:]:
p=re.sub("\.","",p)
p=re.sub("B_1","B1",p)
p=re.sub("^N$","rho",p)
vars.append(p)
if F:
# Variable units, remove [] and fix exponents
parts=re.sub(' +', ' ', line).strip().split(' ')
for p in parts[7:]:
p=re.sub("cm\^-3","1/cm^3",p)
p=re.sub("m2","m^2",p)
p=re.sub("m3","m^3",p)
p=re.sub("\[","",p)
p=re.sub("\]","",p)
units.append(p)
else:
parts=re.sub(' +', ' ', line).strip().split(' ')
year=parts[0]
month=parts[1]
day=parts[2]
hour=parts[3]
minute=parts[4]
second=parts[5]
ms=0
if '.' in second:
(second,ms)=second.split('.')
dd=datetime.datetime(int(year),int(month),int(day),
hour=int(hour),minute=int(minute),second=int(second),
microsecond=int(ms)*1000,tzinfo=datetime.timezone.utc)
times.append(dd)
for s in parts[6:]:
arrays.append(float(s))
self.dtarray=np.array([dd for dd in times])
self.tsarray = np.array([d.timestamp() for d in self.dtarray])
nvar=len(vars)
nval=len(arrays)
npos=int(nval/nvar)
arrays=np.array(arrays)
arrays=arrays.reshape((npos,nvar))
i=0
for var in vars:
self.variables[var] = dict(units=units[i],
data=arrays[:,i],
size=1,
fill=self.fillvalue)
i+=1
return
def register_variable(self, varname, units):
"""register variables into Kamodo for this service, CCMC ROR satellite extractions"""
def interpolate(timestamp):
data = self.variables[varname]['data']
return np.interp(timestamp,self.tsarray,data)
# store the interpolator
self.variables[varname]['interpolator'] = interpolate
# update docstring for this variable
interpolate.__doc__ = "A function that returns {} in [{}].".format(varname,units)
self[varname] = kamodofy(interpolate,
units = units,
citation = "De Zeeuw 2020",
data = None)
def fill2nan(self):
'''
Replaces fill value in data with NaN.
Not Yet called by default. Call as needed.
'''
for varname in self.variables:
data = self.variables[varname]['data']
fill = self.variables[varname]['fill']
if fill is not None:
mask = data==float(fill)
nbad = np.count_nonzero(mask)
if nbad > 0:
print("Found",nbad,"fill values, replacing with NaN for variable",varname,
"of size",data.size)
data[mask]=np.nan
self.variables[varname]['data'] = data
def get_plot(self, type="1Dpos", scale="R_E", var=""):
'''
Return a plotly figure object.
type = 1Dvar => 1D plot of variable value vs Time
1Dpos (default) => 1D location x,y,z vs Time
3Dpos => 3D location colored by altitude
scale = km, R_E (default)
var = variable name for variable value plots
'''
coord=self.coordinates
# Set plot title for plots
txttop=self.satellite + " position extracted from run " + self.runname + "<br>"\
+ self.start + " to " + self.stop + "<br>" + coord
if type == '1Dvar':
if var == "":
print("No plot variable passed in.")
return
fig=go.Figure()
if self.variables[var]['size'] == 1:
x=self.variables[var]['data']
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers', name=var))
elif self.variables[var]['size'] == 3:
x=self.variables[var]['data'][:,0]
y=self.variables[var]['data'][:,1]
z=self.variables[var]['data'][:,2]
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers', name=var))
fig.add_trace(go.Scatter(x=self.dtarray, y=y, mode='lines+markers', name=var))
fig.add_trace(go.Scatter(x=self.dtarray, y=z, mode='lines+markers', name=var))
ytitle=var+" ["+self.variables[var]['units']+"]"
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text=ytitle)
fig.update_layout(hovermode="x")
fig.update_layout(title_text=txttop)
return fig
if type == '1Dpos':
fig=go.Figure()
xvarname = self.coords[coord]['x']
if self.coords[coord]['size'] == 1:
x=self.variables[self.coords[coord]['x']]['data']
y=self.variables[self.coords[coord]['y']]['data']
z=self.variables[self.coords[coord]['z']]['data']
elif self.coords[coord]['size'] == 3:
x=self.variables[self.coords[coord]['x']]['data'][:,0]
y=self.variables[self.coords[coord]['y']]['data'][:,1]
z=self.variables[self.coords[coord]['z']]['data'][:,2]
if scale == "km":
if self.variables[xvarname]['units'] == "R_E":
x=x*self.RE
y=y*self.RE
z=z*self.RE
ytitle="Position [km]"
else:
if self.variables[xvarname]['units'] == "km":
x=x/self.RE
y=y/self.RE
z=z/self.RE
ytitle="Position [R_E]"
fig.add_trace(go.Scatter(x=self.dtarray, y=x,
mode='lines+markers', name=self.coords[coord]['x']))
fig.add_trace(go.Scatter(x=self.dtarray, y=y,
mode='lines+markers', name=self.coords[coord]['y']))
fig.add_trace(go.Scatter(x=self.dtarray, y=z,
mode='lines+markers', name=self.coords[coord]['z']))
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text=ytitle)
fig.update_layout(hovermode="x")
fig.update_layout(title_text=txttop)
return fig
if type == "3Dpos":
xvarname = self.coords[coord]['x']
if self.coords[coord]['size'] == 1:
x=self.variables[self.coords[coord]['x']]['data']
y=self.variables[self.coords[coord]['y']]['data']
z=self.variables[self.coords[coord]['z']]['data']
elif self.coords[coord]['size'] == 3:
x=self.variables[self.coords[coord]['x']]['data'][:,0]
y=self.variables[self.coords[coord]['y']]['data'][:,1]
z=self.variables[self.coords[coord]['z']]['data'][:,2]
if scale == "km":
if self.variables[xvarname]['units'] == "R_E":
x=x*self.RE
y=y*self.RE
z=z*self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-self.RE
ytitle="Position [km]"
else:
if self.variables[xvarname]['units'] == "km":
x=x/self.RE
y=y/self.RE
z=z/self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-1.
ytitle="Position [R_E]"
fig=px.scatter_3d(
x=x,
y=y,
z=z,
color=r)
bartitle = "Altitude [" + scale + "]"
fig.update_layout(coloraxis=dict(colorbar=dict(title=bartitle)))
fig.update_layout(scene=dict(xaxis=dict(title=dict(text="X ["+scale+"]")),
yaxis=dict(title=dict(text="Y ["+scale+"]")),
zaxis=dict(title=dict(text="Z ["+scale+"]"))))
fig.update_layout(title_text=txttop)
return fig
print('ERROR, reached end of get_plot without any action taken.')
return
|
# -*- coding: utf-8 -*-
"""
stamps.services
~~~~~~~~~~~~~~~
Stamps.com services.
:copyright: 2014 by Jonathan Zempel.
:license: BSD, see LICENSE for more details.
"""
from decimal import Decimal
from logging import getLogger
from re import compile
from suds import WebFault
from suds.bindings.document import Document
from suds.client import Client
from suds.plugin import MessagePlugin
from suds.sax.element import Element
from suds.sudsobject import asdict
from suds.xsd.sxbase import XBuiltin
from suds.xsd.sxbuiltin import Factory
PATTERN_HEX = r"[0-9a-fA-F]"
PATTERN_ID = r"{hex}{{8}}-{hex}{{4}}-{hex}{{4}}-{hex}{{4}}-{hex}{{12}}".format(
hex=PATTERN_HEX)
RE_TRANSACTION_ID = compile(PATTERN_ID)
class AuthenticatorPlugin(MessagePlugin):
"""Handle message authentication.
:param credentials: Stamps API credentials.
:param wsdl: Configured service client.
"""
def __init__(self, credentials, client):
self.credentials = credentials
self.client = client
self.authenticator = None
def marshalled(self, context):
"""Add an authenticator token to the document before it is sent.
:param context: The current message context.
"""
body = context.envelope.getChild("Body")
operation = body[0]
if operation.name in ("AuthenticateUser", "RegisterAccount"):
pass
elif self.authenticator:
namespace = operation.namespace()
element = Element("Authenticator", ns=namespace)
element.setText(self.authenticator)
operation.insert(element)
else:
document = Document(self.client.wsdl)
method = self.client.service.AuthenticateUser.method
parameter = document.param_defs(method)[0]
element = document.mkparam(method, parameter, self.credentials)
operation.insert(element)
def unmarshalled(self, context):
"""Store the authenticator token for the next call.
:param context: The current message context.
"""
if hasattr(context.reply, "Authenticator"):
self.authenticator = context.reply.Authenticator
del context.reply.Authenticator
else:
self.authenticator = None
return context
class BaseService(object):
"""Base service.
:param configuration: API configuration.
"""
def __init__(self, configuration):
Factory.maptag("decimal", XDecimal)
self.client = Client(configuration.wsdl)
credentials = self.create("Credentials")
credentials.IntegrationID = configuration.integration_id
credentials.Username = configuration.username
credentials.Password = configuration.password
self.plugin = AuthenticatorPlugin(credentials, self.client)
self.client.set_options(plugins=[self.plugin], port=configuration.port)
self.logger = getLogger("stamps")
def call(self, method, **kwargs):
"""Call the given web service method.
:param method: The name of the web service operation to call.
:param kwargs: Method keyword-argument parameters.
"""
self.logger.debug("%s(%s)", method, kwargs)
instance = getattr(self.client.service, method)
try:
ret_val = instance(**kwargs)
except WebFault as error:
self.logger.warning("Retry %s", method, exc_info=True)
self.plugin.authenticator = None
try: # retry with a re-authenticated user.
ret_val = instance(**kwargs)
except WebFault as error:
self.logger.exception("%s retry failed", method)
self.plugin.authenticator = None
raise error
return ret_val
def create(self, wsdl_type):
"""Create an object of the given WSDL type.
:param wsdl_type: The WSDL type to create an object for.
"""
return self.client.factory.create(wsdl_type)
class StampsService(BaseService):
"""Stamps.com service.
"""
def add_postage(self, amount, transaction_id=None):
"""Add postage to the account.
:param amount: The amount of postage to purchase.
:param transaction_id: Default `None`. ID that may be used to retry the
purchase of this postage.
"""
account = self.get_account()
control = account.AccountInfo.PostageBalance.ControlTotal
return self.call("PurchasePostage", PurchaseAmount=amount,
ControlTotal=control, IntegratorTxID=transaction_id)
def create_add_on(self):
"""Create a new add-on object.
"""
return self.create("AddOnV7")
def create_customs(self):
"""Create a new customs object.
"""
return self.create("CustomsV3")
def create_array_of_customs_lines(self):
"""Create a new array of customs objects.
"""
return self.create("ArrayOfCustomsLine")
def create_customs_lines(self):
"""Create new customs lines.
"""
return self.create("CustomsLine")
def create_address(self):
"""Create a new address object.
"""
return self.create("Address")
def create_purchase_status(self):
"""Create a new purchase status object.
"""
return self.create("PurchaseStatus")
def create_registration(self):
"""Create a new registration object.
"""
ret_val = self.create("RegisterAccount")
ret_val.IntegrationID = self.plugin.credentials.IntegrationID
ret_val.UserName = self.plugin.credentials.Username
ret_val.Password = self.plugin.credentials.Password
return ret_val
def create_shipping(self):
"""Create a new shipping object.
"""
return self.create("RateV18")
def get_address(self, address):
"""Get a shipping address.
:param address: Address instance to get a clean shipping address for.
"""
return self.call("CleanseAddress", Address=address)
def get_account(self):
"""Get account information.
"""
return self.call("GetAccountInfo")
def get_label(self, from_address, to_address, rate, transaction_id,
customs=None, sample=False):
"""Get a shipping label.
:param from_address: The shipping 'from' address.
:param to_address: The shipping 'to' address.
:param rate: A rate instance for the shipment.
:param transaction_id: ID that may be used to retry/rollback the
purchase of this label.
:param customs: A customs instance for international shipments.
:param sample: Default ``False``. Get a sample label without postage.
"""
return self.call("CreateIndicium", IntegratorTxID=transaction_id,
Rate=rate, From=from_address, To=to_address, Customs=customs,
SampleOnly=sample)
def get_postage_status(self, transaction_id):
"""Get postage purchase status.
:param transaction_id: The transaction ID returned by
:meth:`add_postage`.
"""
return self.call("GetPurchaseStatus", TransactionID=transaction_id)
def get_rates(self, shipping):
"""Get shipping rates.
:param shipping: Shipping instance to get rates for.
"""
rates = self.call("GetRates", Rate=shipping)
if rates.Rates:
ret_val = [rate for rate in rates.Rates.Rate]
else:
ret_val = []
return ret_val
def get_tracking(self, transaction_id):
"""Get tracking events for a shipment.
:param transaction_id: The transaction ID (or tracking number) returned
by :meth:`get_label`.
"""
if RE_TRANSACTION_ID.match(transaction_id):
arguments = dict(StampsTxID=transaction_id)
else:
arguments = dict(TrackingNumber=transaction_id)
return self.call("TrackShipment", **arguments)
def register_account(self, registration):
"""Register a new account.
:param registration: Registration instance.
"""
arguments = asdict(registration)
return self.call("RegisterAccount", **arguments)
def remove_label(self, transaction_id):
"""Cancel a shipping label.
:param transaction_id: The transaction ID (or tracking number) returned
by :meth:`get_label`.
"""
if RE_TRANSACTION_ID.match(transaction_id):
arguments = dict(StampsTxID=transaction_id)
else:
arguments = dict(TrackingNumber=transaction_id)
return self.call("CancelIndicium", **arguments)
class XDecimal(XBuiltin):
"""Represents an XSD decimal type.
"""
def translate(self, value, topython=True):
"""Translate between string and decimal values.
:param value: The value to translate.
:param topython: Default `True`. Determine whether to translate the
value for python.
"""
if topython:
if isinstance(value, basestring) and len(value):
ret_val = Decimal(value)
else:
ret_val = None
else:
if isinstance(value, (int, float, Decimal)):
ret_val = str(value)
else:
ret_val = value
return ret_val
|
var express = require('express');
var router = express.Router();
/* GET developer page. */
router.get('/', function (req, res, next) {
res.render('developer', {
title: 'KITENGE DEVELOPERS'
});
});
module.exports = router;
|
import pymongo
MONGO_URL = f"mongodb://127.0.0.1:27017/"
con = pymongo.MongoClient(MONGO_URL)
db = con['test_mongo']
table = db['record']
table.drop()
|
from json_typer import TypeSerializable
class Bar(TypeSerializable):
def __init__(self, *args, **kwargs):
super(Bar, self).__init__(*args, **kwargs)
|
const mongoose = require('mongoose');
const config = require('../config/database');
const Schema = mongoose.Schema;
const tokenSchema = new Schema({
_userId: {
type: mongoose.Schema.Types.ObjectId,
required: true,
ref: 'User'
},
token: {
type: String,
required: true
},
createdAt: {
type: Date,
required: true,
default: Date.now,
expires: 43200
}
});
const Token = module.exports = mongoose.model('Token', tokenSchema);
|
'''
From https://github.com/tonylins/pytorch-mobilenet-v2
'''
import torch.nn as nn
import math
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
# input_channel = make_divisible(input_channel * width_mult) # first channel is always 32!
self.last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = make_divisible(c * width_mult) if t > 1 else c
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Linear(self.last_channel, n_class)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenet_v2(num_classes = 1000, pretrained=True):
model = MobileNetV2(width_mult=1)
if pretrained:
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
state_dict = load_state_dict_from_url(
'https://www.dropbox.com/s/47tyzpofuuyyv1b/mobilenetv2_1.0-f2a8633.pth.tar?dl=1', progress=True)
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
net = mobilenet_v2(True)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListModelEvaluations
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_sync]
from google.cloud import aiplatform_v1
def sample_list_model_evaluations():
"""Snippet for list_model_evaluations"""
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelEvaluationsRequest(
parent="projects/{project}/locations/{location}/models/{model}",
)
# Make the request
page_result = client.list_model_evaluations(request=request)
for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_sync]
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 - present Instructure, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
import React from 'react'
import ReactDOM from 'react-dom'
import lorem from 'lorem-ipsum'
import moment from 'moment'
import 'moment/min/locales'
import { mirrorHorizontalPlacement } from '@instructure/ui-layout'
// eslint-plugin-import doesn't like 'import * as Components' here
const Components = require('./components')
import '@instructure/ui-icons/lib/font/Solid/InstructureIcons-Solid.css'
import '@instructure/ui-icons/lib/font/Line/InstructureIcons-Line.css'
import { DateTime } from '@instructure/ui-i18n'
// eslint-disable-next-line import/no-webpack-loader-syntax, import/no-unresolved
import iconExample from '!svg-inline-loader!./heart_lg.svg'
import avatarSquare from './avatarSquare.jpg'
import avatarPortrait from './avatarPortrait.jpg'
import placeholderImage from './placeholder-image'
import placeholderLogo from './placeholder-logo'
import { theme } from '@instructure/canvas-theme'
theme.use()
const globals = {
...Components,
moment,
locales: moment.locales(),
avatarSquare,
avatarPortrait,
DateTime,
iconExample,
lorem: {
sentence () {
return lorem({
count: 1,
units: 'sentences'
})
},
paragraph () {
return lorem({
count: 1,
units: 'paragraphs'
})
},
paragraphs (count) {
return lorem({
count: count || Math.floor(Math.random() * 10),
units: 'paragraphs'
})
}
},
mirrorHorizontalPlacement,
placeholderImage,
placeholderLogo,
React,
ReactDOM
}
Object.keys(globals).forEach((key) => {
global[key] = globals[key]
})
export default globals
|
import math
class GaussianPerceptron():
def __init__(self, inputs, targets, n, d, sigma):
super(object, self).__init__()
assert n == len(inputs), 'number of inputs is not equal to n'
assert d == len(inputs[0]), 'number of attributes is not equal to d'
self.w = [0 for i in range(d)]
self.inputs = inputs
self.targets = targets
self.sigma = sigma
self.final_w = []
self.final_label = []
def kernel_gaussian(self, x1, x2, sigma=5.0):
if self.sigma:
sigma = self.sigma
L2_norm = 0
for d in range(len(x1)):
L2_norm += (x1[d] - x2[d]) ** 2
return math.exp(- L2_norm / (2 * (sigma ** 2)))
def get_label(self, idx): # map 1/0 to 1/-1
if self.targets[idx] != int(1):
label = int(-1)
else:
label = self.targets[idx]
return label
def train(self):
global iteration
iteration = True
all_w = []
labels = []
all_w.append(self.inputs[0]) # the first point is bound to be preserved
labels.append(self.get_label(0))
iteration_num = 0
while iteration:
for idx, each in enumerate(self.inputs[1:]):
label = self.get_label(idx+1)
total_m = 0
for k in range(len(all_w)):
m = self.kernel_gaussian(all_w[k], each)
total_m += m * labels[k] # for violation points, if its label=1, its mapped result will be added
if total_m * label < 0:
all_w.append(self.inputs[idx+1]) # violation, preserve this point
labels.append(label)
break
if idx == len(self.inputs)-2: # so far so good
iteration = False
if iteration_num > 70: # if iteration over 70, stop it and get result
iteration = False
iteration_num += 1
print('this is a iteration: ', iteration_num)
print('Finish')
self.final_w = all_w
self.final_label = labels
def predict(self, input_data):
# input_data: test data
# return accuracy of prediction
total_m = 0
for k in range(len(self.final_w)):
m = self.kernel_gaussian(self.final_w[k], input_data)
total_m += m * self.final_label[k]
return int(total_m > 0)
def acc(self, inputs, targets):
# inputs: test data
# targets: test label
# return accuracy of prediction
correct = 0
for idx, each in enumerate(inputs):
correct += self.predict(each) == targets[idx]
return correct / len(inputs)
|
/* MagicMirror² Test config default calendar with auth by default
*
* By Rodrigo Ramírez Norambuena https://rodrigoramirez.com
* MIT Licensed.
*/
let config = {
timeFormat: 12,
modules: [
{
module: "calendar",
position: "bottom_bar",
config: {
calendars: [
{
maximumNumberOfDays: 10000,
url: "http://localhost:8010/tests/configs/data/calendar_test.ics",
auth: {
user: "MagicMirror",
pass: "CallMeADog"
}
}
]
}
}
]
};
/*************** DO NOT EDIT THE LINE BELOW ***************/
if (typeof module !== "undefined") {
module.exports = config;
}
|
from bs4 import BeautifulSoup
from string import ascii_lowercase
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import pypyodbc
import re
from PIL import Image
import urllib.request
import os
'''
TODO: Better organize how I am scraping this data.
For now I will try to separate the scraping of the data and how I insert the data into my MSSQL Table.
'''
class Scraper:
def __init__(self):
index = ascii_lowercase #from string module get a str of 'a-z'
'''Open a connection with the MSSQL Table'''
#export = Export_MSSQL()
for i in range(len(index)):
#Use beautifulsoup to parse and webscrape
page_index = requests.get(f'http://www.basketball-reference.com/players/{index[i]}').content
self.scrape_player(page_index)
'''Commit and close the connection to the MSSQL Table'''
#export.close_connection()
'''
Called by _init_() of class Scraper
@param BeautifulSoup object that contains a page index ranging from 'a-z'
@method scrape and separate active players,
then scrape attributes of every individual active player,
then send the player information to class Export_MSSQL
'''
def scrape_player(self, page_index):
bs = BeautifulSoup(page_index, 'html5lib')
player_directory = bs.select('strong a')
active_players = [] #creates a list of active players, also clears the list for every new index
for player in player_directory:
active_players.append(player['href'])
for i in range(len(active_players)):
'''
Create a new request and BeautifulSoup object, request every player\]]
'''
player_index = requests.get(f'http://www.basketball-reference.com{active_players[i]}').content
bs2 = BeautifulSoup(player_index, 'html5lib')
#horrible hardcode catch because apparently there are two Tony Mitchell's ugh
if active_players[i] == "/players/m/mitchto02.html":
continue
href_key = active_players[i]
'''gather all attributes, some need certain modifications to be fully scraped (due to parsing limitations on my part)
I comment on the side of each attribute what index contains the actual information for the current scraped player'''
name = bs2.find('h1', attrs={'itemprop': 'name'})
pic = bs2.find('img', attrs={'itemscope': 'image'}) #pic['src']
ppg = bs2.find('h4', string='PTS').find_next_siblings('p') #[0]
rpg = bs2.find('h4', string='TRB').find_next_siblings('p') #[0]
apg = bs2.find('h4', string='AST').find_next_siblings('p') #[0]
team = bs2.find_all(href=re.compile('team')) #[1]
per = bs2.find('h4', string='PER').find_next_siblings('p') #0
Export_MSSQL.submit_to_sql( href_key, team[1].text.strip(), name.text.strip(), pic['src'], ppg[0].text.strip(), rpg[0].text.strip(), apg[0].text.strip(), per[0].text.strip() )
print(f'Player {href_key} inserted!')
'''send information to class MSSQL, where the players href will be the primary key
TODO: Think of a better way then just sending 8 parameters to the other method,
maybe a list of some sort.'''
"""
Send the output from class Scraper to this class, then send the data to the MSSQL Table.
"""
class Export_MSSQL:
def submit_to_sql(id, team, name, pic, ppg, rpg, apg, per):
#Connect to MSSQL Database using the ODBC driver.
connection_string ='Driver={ODBC Driver 13 for SQL Server};Server=LAPTOP47;Database=TutorialDB;trusted_connection=yes;'
connection = pypyodbc.connect(connection_string)
#Insert into SQL table, just put the table name and this will find wherever it is in the database selected.
#Put the ?'s in values to indicate we will be inserting into this table.
SQL = 'SELECT * FROM NBA_TEAM'
cur = connection.cursor()
#Not all the values, I wasn't able to scrape Salary and Years Experience from this scrape method.
#SQLCMD = ("INSERT INTO NBA_TEAM " "(ID, Team, Name, Picture, PPG, RPG, APG, PER)" "VALUES (?,?,?,?,?,?,?,?)")
#TODO: FIX HOW I DO THIS, MAYBE USE CLASS ATTRIBUTES THIS WAY IS UNACCEPTABLE BUT I JUST WANT IT TO WORK NOW
primary_key = name.lower()
if len(primary_key.split()) < 2:
primary_key = re.sub("[.,\' -]", '', primary_key)
else:
primary_key = re.sub("[.,\'-]", '', primary_key.split()[1]) + re.sub("[.,\'-]", '', primary_key.split()[0])
#now make our new picture name and save it locally
urllib.request.urlretrieve(pic, f".\\nba-trade-machine\\Frontend\\static\\images\\{primary_key}.jpg")
#get the path of the new saved file, that's the local image that will be inserted into the database until i figure out cloud or some better solution
pic = os.path.abspath(f"{primary_key}.jpg")
SQLCMD = f"""UPDATE NBA_TEAM SET Picture='{pic}' WHERE ID='{primary_key}'"""
print(f"this is what went in {pic} for user {primary_key}")
"""
id = [primary_key]
team = [team]
name = [name]
pic = [pic]
ppg = [ppg]
rpg = [rpg]
apg = [apg]
per = [per]
values = [id,team,name,pic,ppg,rpg,apg,per]
"""
#for values in zip(id,team,name,pic,ppg,rpg,apg,per):
# cur.execute(SQLCMD, values)
cur.execute(SQLCMD)
cur.commit()
cur.close()
#run the program
Scraper()
|
//
// NADInterstitial.h
// NendAd
//
// Created by ADN division on 2014/05/12.
// Copyright (c) 2014年 F@N Communications, Inc. All rights reserved.
//
#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
///-----------------------------------------------
/// @name Constants
///-----------------------------------------------
/**
NADInterstitialClickType
*/
typedef enum {
DOWNLOAD,
CLOSE,
} NADInterstitialClickType;
/**
NADInterstitialStatusCode
*/
typedef enum {
SUCCESS,
INVALID_RESPONSE_TYPE,
FAILED_AD_REQUEST,
FAILED_AD_DOWNLOAD,
} NADInterstitialStatusCode;
/**
NADInterstitialShowAdResult
*/
typedef enum {
AD_SHOW_SUCCESS,
AD_LOAD_INCOMPLETE,
AD_REQUEST_INCOMPLETE,
AD_DOWNLOAD_INCOMPLETE,
AD_FREQUENCY_NOT_REACHABLE,
AD_SHOW_ALREADY
} NADInterstitialShowResult;
/**
A delegate object for each event of Interstitial-AD.
*/
@protocol NADInterstitialDelegate <NSObject>
@optional
/**
Notify the results of the ad load.
*/
- (void) didFinishLoadInterstitialAdWithStatus:(NADInterstitialStatusCode)status;
/**
Notify the event of the ad click.
*/
- (void) didClickWithType:(NADInterstitialClickType)type;
@end
/**
The management class of Interstitial-AD.
*/
@interface NADInterstitial : NSObject
/**
Set the delegate object.
@warning Please set this to `nil` when the delegate object is deallocated.
*/
@property (nonatomic, assign, readwrite) id<NADInterstitialDelegate> delegate;
/**
Log setting.
*/
@property (nonatomic, readwrite) BOOL isOutputLog;
/**
Supported Orientations.
*/
@property (nonatomic, assign) NSArray* supportedOrientations;
///-----------------------------------------------
/// @name Creating and Initializing Nend Instance
///-----------------------------------------------
/**
Creates and returns a `NADInterstitial` object.
@return NADInterstitial
*/
+ (instancetype) sharedInstance;
///------------------------
/// @name Loading AD
///------------------------
/**
Load the Interstitial-AD.
@param apiKey An apiKey issued from the management screen.
@param spotId A spotId issued from the management screen.
@warning Please call this when the application starts.
for example:
`- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions`
*/
- (void) loadAdWithApiKey:(NSString *)apiKey spotId:(NSString *)spotId;
///----------------------------
/// @name Showing / Closing AD
///----------------------------
/**
Show the Interstitial-AD on the UIWindow.
@return NADInterstitialShowResult
*/
- (NADInterstitialShowResult) showAd;
/**
Dismiss the Interstitial-AD.
@return `YES` AD will be closed, otherwise `NO`.
*/
- (BOOL) dismissAd;
@end
|
export const cisBorderStyle = ["512 512"," <path fill='currentColor' d='M435,16H77A61,61,0,0,0,16,77V435a61,61,0,0,0,61,61H435a61,61,0,0,0,61-61V77A61,61,0,0,0,435,16ZM160,440H128V408h32Zm56,0H184V408h32Zm56,0H240V408h32Zm56,0H296V408h32Zm56,0H352V408h32Zm56,0H408V408h32Zm0-56H408V352h32Zm0-56H408V296h32Zm0-56H408V240h32Zm0-56H408V184h32Zm0-56H408V128h32Zm0-72v16H104V440H72V88A16,16,0,0,1,88,72H440Z'/>"]
|
from __future__ import with_statement
from fabric.api import *
def production():
projectname = 'pumpwerk'
basepath = '/srv/pumpwerk.org/%s'
env.hosts = ['pumpwerk@pumpwerk.org']
env.path = basepath % projectname
env.virtualenv_path = basepath % (projectname + 'env')
env.push_branch = 'master'
env.push_remote = 'origin'
env.reload_cmd = 'supervisorctl restart {0}'.format(projectname)
env.after_deploy_url = 'http://pumpwerk.org'
def reload_webserver():
run("%(reload_cmd)s" % env)
def migrate():
with prefix("source %(virtualenv_path)s/bin/activate" % env):
run("%(path)s/manage.py migrate --settings=config.settings.production" % env)
def ping():
run("echo %(after_deploy_url)s returned: \>\>\> $(curl --write-out %%{http_code} --silent --output /dev/null %(after_deploy_url)s)" % env)
def deploy():
with cd(env.path):
run("git pull %(push_remote)s %(push_branch)s" % env)
with prefix("source %(virtualenv_path)s/bin/activate" % env):
run("./manage.py collectstatic --noinput --settings=config.settings.production")
migrate()
reload_webserver()
ping()
def pip():
with cd(env.path):
run("git pull %(push_remote)s %(push_branch)s" % env)
with prefix("source %(virtualenv_path)s/bin/activate" % env):
run("pip install -Ur requirements/production.txt")
reload_webserver()
def soft_deploy():
with cd(env.path):
run("git pull %(push_remote)s %(push_branch)s" % env)
reload_webserver()
ping()
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE122_Heap_Based_Buffer_Overflow__c_CWE193_char_cpy_67a.c
Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_CWE193.label.xml
Template File: sources-sink-67a.tmpl.c
*/
/*
* @description
* CWE: 122 Heap Based Buffer Overflow
* BadSource: Allocate memory for a string, but do not allocate space for NULL terminator
* GoodSource: Allocate enough memory for a string and the NULL terminator
* Sinks: cat
* BadSink : Copy string to data using strcpy()
* Flow Variant: 67 Data flow: data passed in a struct from one function to another in different source files
*
* */
#include "std_testcase.h"
#ifndef _WIN32
#include <wchar.h>
#endif
struct data
{
char name[64];
};
struct fp
{
void (*fp)();
};
void test()
{
printLine("That's OK!");
}
typedef struct _structType
{
struct data * structFirst;
} structType;
#ifndef OMITBAD
/* bad function declaration */
void badSink(structType myStruct, char *source)
{
struct data * d;
d = myStruct.structFirst;
if (source[0] == '7' && source[1] == '/' && source[2] == '4'
&& source[3] == '2' && source[4] == 'a' && source[5] == '8' && source[75] == 'a')
{
/* POTENTIAL FLAW: data may not have enough space to hold source */
strcat(d->name, source);
}
}
void bad(char *source)
{
struct data * d = NULL;
struct fp * f = NULL;
structType myStruct;
d = (struct data *)malloc(sizeof(struct data));
f = (struct fp *)malloc(sizeof(struct fp));
if (d == NULL) {exit(-1);}
if (f == NULL) {exit(-1);}
f->fp = test;
myStruct.structFirst = d;
badSink(myStruct, source);
f->fp();
free(f);
free(d);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink(structType myStruct, char *source)
{
struct data * d;
d = myStruct.structFirst;
strncat(d->name, source, 63);
}
static void goodG2B(char *source)
{
struct data * d = NULL;
struct fp * f = NULL;
structType myStruct;
d = (struct data *)malloc(sizeof(struct data));
f = (struct fp *)malloc(sizeof(struct fp));
if (d == NULL) {exit(-1);}
if (f == NULL) {exit(-1);}
f->fp = test;
myStruct.structFirst = d;
goodG2BSink(myStruct, source);
f->fp();
free(f);
free(d);
}
void good(char *source)
{
goodG2B(source);
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
printLine("Calling good()...");
good(argv[1]);
printLine("Finished good()");
printLine("Calling bad()...");
bad(argv[1]);
printLine("Finished bad()");
return 0;
}
|
from keras.models import Sequential, Model
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.merge import concatenate
from keras.losses import categorical_crossentropy
from keras.losses import binary_crossentropy
import keras.backend as K
import tensorflow as tf
import imgaug as ia
from tqdm import tqdm
from imgaug import augmenters as iaa
import numpy as np
import pickle
import os, sys, cv2
import time
from generator import BatchGenerator
sys.path.append("..")
from models.yolo_models import get_yolo
FINE_TUNE=1
LABELS = ['wildebeest']
IMAGE_H, IMAGE_W = 864, 864
GRID_H, GRID_W = 27, 27
# each cell is going to be 32x32
BOX = 3
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3#0.5 # this must be for showing the object - should be lower??
NMS_THRESHOLD = 0.3#0.45 # non max suppression - what does this do?
# this is the width/height of the anchor boxes - this will be 2,2 for all 5 - maybe - might be better to use pretrained
#ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
ANCHORS = [53.57159857, 42.28639429, 29.47927551, 51.27168234, 37.15496912, 26.17125211]
ignore_thresh=0.8
# scales - for training maybe?? no idea
# all seem to be in the custom loss function - some method to weight the loss
NO_OBJECT_SCALE = 1.0# upping this to 5 (from 1) to get rid of false positives
OBJECT_SCALE = 5.0
COORD_SCALE = 2.0
CLASS_SCALE = 1.0
if FINE_TUNE:
BATCH_SIZE = 4
else:
BATCH_SIZE = 16
WARM_UP_BATCHES = 0
TRUE_BOX_BUFFER = 50
print(len(LABELS))
#true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4))
train_image_folder = 'train_images/' #/home/ctorney/data/coco/train2014/'
train_annot_folder = 'train_images/'
valid_image_folder = train_image_folder#'/home/ctorney/data/coco/val2014/'
valid_annot_folder = train_annot_folder#'/home/ctorney/data/coco/val2014ann/'
model = get_yolo(IMAGE_W,IMAGE_H)
if FINE_TUNE:
model.load_weights('../weights/wb-yolo.h5')
else:
model.load_weights('../weights/yolo-v3-coco.h5', by_name=True)
for layer in model.layers[:-7]:
layer.trainable = False
print(model.summary())
def yolo_loss(y_true, y_pred):
#loss = tf.sqrt(tf.reduce_sum(y_pred))
# adjust the shape of the y_predict [batch, grid_h, grid_w, 3, 4+1+nb_class]
#loss = tf.Print(loss, [tf.shape(y_pred)], message='prereshape \t\t', summarize=1000)
y_pred = tf.reshape(y_pred, tf.concat([tf.shape(y_pred)[:3], tf.constant([3, -1])], axis=0))
y_true = tf.reshape(y_true, tf.concat([tf.shape(y_true)[:3], tf.constant([3, -1])], axis=0))
#loss = tf.Print(loss, [tf.shape(y_pred)], message='shape \t\t', summarize=1000)
#return loss
# compute grid factor and net factor
grid_h = tf.shape(y_true)[1]
grid_w = tf.shape(y_true)[2]
# the variable to keep track of number of batches processed
batch_seen = tf.Variable(0.)
grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32), [1,1,1,1,2])
net_h = IMAGE_H
net_w = IMAGE_W
net_factor = tf.reshape(tf.cast([net_w, net_h], tf.float32), [1,1,1,1,2])
"""
Adjust prediction
"""
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(grid_w), [grid_h]), (1, grid_h, grid_w, 1, 1)))
cell_y = tf.transpose(cell_x, (0,2,1,3,4))
cell_grid = tf.tile(tf.concat([cell_x,cell_y],-1), [BATCH_SIZE, 1, 1, 3, 1])
pred_box_xy = (cell_grid[:,:grid_h,:grid_w,:,:] + tf.sigmoid(y_pred[..., :2])) # sigma(t_xy) + c_xy
pred_box_wh = y_pred[..., 2:4] # t_wh
pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[..., 4]), 4) # adjust confidence
pred_box_class = tf.sigmoid(y_pred[..., 5:]) # adjust class probabilities
# initialize the masks
object_mask = tf.expand_dims(y_true[..., 4], 4)
"""
Adjust ground truth
"""
true_box_xy = y_true[..., 0:2] # (sigma(t_xy) + c_xy)
true_box_wh = y_true[..., 2:4] # t_wh
true_box_conf = tf.expand_dims(y_true[..., 4], 4)
true_box_class = y_true[..., 5:]
anc = tf.constant(ANCHORS, dtype='float', shape=[1,1,1,3,2])
true_xy = tf.expand_dims(true_box_xy / grid_factor,4)
true_wh = tf.expand_dims(tf.exp(true_box_wh) * anc / net_factor,4)
"""
Compare each predicted box to all true boxes
"""
# initially, drag all objectness of all boxes to 0
conf_delta = pred_box_conf
# then, ignore the boxes which have good overlap with some true box
#true_xy = true_boxes[..., 0:2] / grid_factor
#true_wh = true_boxes[..., 2:4] / net_factor
#for b in range(BOX):
#true_xy = y_true[..., 0:2]
#true_wh = y_true[..., 2:4]
# ya = y_true[...,0,:]
#ya = y_true[y_true[...,0,4],0,1]
#ya = y_true[y_true[...,0,4]==1,0,1]
# ya = y_true[...,4]==1
# ya=tf.where(y_true[...,4],true_wh)
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy / grid_factor, 4)
# pred_xy = pred_box_xy / grid_factor
# pred_wh = tf.exp(pred_box_wh) * anc / net_factor
pred_wh = tf.expand_dims(tf.exp(pred_box_wh) * anc / net_factor, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
# loss = tf.Print(loss, [tf.shape(pred_maxes)], message='shape \t\t', summarize=1000)
# loss = tf.Print(loss, [tf.shape(true_maxes)], message='shape \t\t', summarize=1000)
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=-1)
# conf_delta = tf.where(best_ious<ignore_thresh,conf_delta[...,0], tf.zeros_like(conf_delta[...,0]))
# conf_delta = tf.expand_dims(conf_delta, 4)
# d_delta = tf.expand_dims(tf.to_float(best_ious < ignore_thresh), 4)
#d_delta =tf.to_float(best_ious < ignore_thresh)
# d_delta = tf.expand_dims(tf.to_float(true_wh < ignore_thresh), 4)
#d_delta = tf.to_float(best_ious < ignore_thresh)
# loss = tf.Print(loss, [tf.shape(best_ious<ignore_thresh)], message='shape \t\t', summarize=1000)
# loss = tf.Print(loss, [tf.shape(conf_delta)], message='shape \t\t', summarize=1000)
# return loss
# conf_delta = d_delta
conf_delta *= tf.expand_dims(tf.to_float(best_ious < ignore_thresh), 4)
# return loss
"""
Compute some online statistics
"""
# true_wh_half = true_wh / 2.
# true_mins = true_xy - true_wh_half
# true_maxes = true_xy + true_wh_half
#
# pred_xy = pred_box_xy / grid_factor
# pred_wh = tf.exp(pred_box_wh) * anc / net_factor
#
# pred_wh_half = pred_wh / 2.
# pred_mins = pred_xy - pred_wh_half
# pred_maxes = pred_xy + pred_wh_half #
#
# intersect_mins = tf.maximum(pred_mins, true_mins)
# intersect_maxes = tf.minimum(pred_maxes, true_maxes)
# intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
# intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
#
# true_areas = true_wh[..., 0] * true_wh[..., 1]
# pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
#
# union_areas = pred_areas + true_areas - intersect_areas
# iou_scores = tf.truediv(intersect_areas, union_areas)
# iou_scores = object_mask * tf.expand_dims(iou_scores, -1)
#
# count = tf.reduce_sum(object_mask)
# count_noobj = tf.reduce_sum(1-object_mask)
# detect_mask = tf.to_float(pred_box_conf >= 0.5)
# class_mask = tf.expand_dims(tf.to_float(tf.equal(tf.argmax(pred_box_class, -1), tf.argmax(true_box_class, -1))), 4)
# recall50 = tf.to_float(iou_scores >= 0.5 ) * detect_mask
# recall75 = tf.to_float(iou_scores >= 0.75) * detect_mask
# recall50_c = tf.reduce_sum(recall50 * class_mask) / (count + 1e-3)
# recall75_c = tf.reduce_sum(recall75 * class_mask) / (count + 1e-3)
# recall50 = tf.reduce_sum(recall50) / (count + 1e-3)
# recall75 = tf.reduce_sum(recall75) / (count + 1e-3)
# avg_iou = tf.reduce_sum(iou_scores) / (count + 1e-3)
# avg_obj = tf.reduce_sum(detect_mask * object_mask) / (count + 1e-3)
# avg_noobj = tf.reduce_sum(detect_mask * (1-object_mask)) / (count_noobj + 1e-3)
# avg_cat = tf.reduce_sum(pred_box_class * true_box_class) / (count + 1e-3)
#
"""
Warm-up training
"""
batch_seen = tf.assign_add(batch_seen, 1.)
# true_box_xy, true_box_wh, xywh_mask = tf.cond(tf.less(batch_seen, self.warmup_batches+1),
# lambda: [true_box_xy + (0.5 + self.cell_grid[:,:grid_h,:grid_w,:,:]) * (1-object_mask),
# true_box_wh + tf.zeros_like(true_box_wh) * (1-object_mask),
# tf.ones_like(object_mask)],
# lambda: [true_box_xy,
# true_box_wh,
# object_mask])
xywh_mask=object_mask
"""
Compare each true box to all anchor boxes
"""
xywh_scale = tf.exp(true_box_wh) * anc / net_factor
xywh_scale = tf.expand_dims(2 - xywh_scale[..., 0] * xywh_scale[..., 1], axis=4) # the smaller the box, the bigger the scale
xy_delta = xywh_mask * (pred_box_xy-true_box_xy) * xywh_scale
wh_delta = xywh_mask * (pred_box_wh-true_box_wh) * xywh_scale
#loss = tf.Print(loss, [tf.shape(xy_delta)], message='shape \t\t', summarize=1000)
#loss = tf.Print(loss, [tf.shape(object_mask)], message='shape \t\t', summarize=1000)
#loss = tf.Print(loss, [tf.shape(pred_box_conf)], message='shape \t\t', summarize=1000)
#loss = tf.Print(loss, [tf.shape(true_box_conf)], message='shape \t\t', summarize=1000)
#conf_delta = (object_mask * (pred_box_conf-true_box_conf) * 5) + ((1-object_mask) * conf_delta)
obj_delta = (object_mask * (pred_box_conf-true_box_conf) * 5)
no_obj_delta = ((1-object_mask) * conf_delta)*2
class_delta = object_mask * (pred_box_class-true_box_class)
#class_delta = object_mask * (pred_box_conf-true_box_conf)
# closs = tf.reduce_sum(tf.square(conf_delta), list(range(1,5))) #+ \
# tf.reduce_sum(tf.square(class_delta), list(range(1,5)))
loss = tf.reduce_sum(tf.square(xy_delta), list(range(1,5))) + \
tf.reduce_sum(tf.square(wh_delta), list(range(1,5))) + \
tf.reduce_sum(tf.square(obj_delta), list(range(1,5))) + \
tf.reduce_sum(tf.square(no_obj_delta), list(range(1,5))) + \
tf.reduce_sum(tf.square(class_delta), list(range(1,5)))
# noloss = tf.reduce_sum(tf.square(no_obj_delta), list(range(1,5)))
# loss = tf.Print(loss, [noloss], message='shape \t\t', summarize=1000)
# loss = tf.Print(loss, [tf.shape(closs)], message='conshape \t\t', summarize=1000)
# return closs
#loss = tf.cond(tf.less(batch_seen, self.warmup_batches+1), # add 10 to the loss if this is the warmup stage
# lambda: loss + 10,
# lambda: loss)
# loss = tf.Print(loss, [avg_obj], message='\n\n avg_obj \t', summarize=1000)
# loss = tf.Print(loss, [avg_noobj], message='\n avg_noobj \t\n', summarize=1000)
# loss = tf.Print(loss, [grid_h, avg_iou], message='avg_iou \t\t', summarize=1000)
# loss = tf.Print(loss, [grid_h, avg_cat], message='avg_cat \t\t', summarize=1000)
# loss = tf.Print(loss, [grid_h, recall50], message='recall50 \t', summarize=1000)
# loss = tf.Print(loss, [grid_h, recall75], message='recall75 \t', summarize=1000)
# loss = tf.Print(loss, [grid_h, recall50_c], message='recall50_cat \t', summarize=1000)
# loss = tf.Print(loss, [grid_h, recall75_c], message='recall75_Cat \t', summarize=1000)
# loss = tf.Print(loss, [grid_h, count], message='count \t', summarize=1000)
# loss = tf.Print(loss, [grid_h, tf.reduce_sum(loss)], message='loss: \t', summarize=1000)
#
return loss
generator_config = {
'IMAGE_H' : IMAGE_H,
'IMAGE_W' : IMAGE_W,
'GRID_H' : GRID_H,
'GRID_W' : GRID_W,
'BOX' : BOX,
'LABELS' : LABELS,
'CLASS' : len(LABELS),
'ANCHORS' : ANCHORS,
'BATCH_SIZE' : BATCH_SIZE,
'TRUE_BOX_BUFFER' : 50,
}
from operator import itemgetter
import random
### read saved pickle of parsed annotations
with open ('train_images/annotations-checked-2.pickle', 'rb') as fp:
all_imgs = pickle.load(fp)
num_ims = len(all_imgs)
indexes = np.arange(num_ims)
random.shuffle(indexes)
num_val = 0#num_ims//10
#valid_imgs = list(itemgetter(*indexes[:num_val].tolist())(all_imgs))
train_imgs = list(itemgetter(*indexes[num_val:].tolist())(all_imgs))
def normalize(image):
image = image / 255.
return image
train_batch = BatchGenerator(
instances = train_imgs,
anchors = ANCHORS,
labels = LABELS,
downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
max_box_per_image = 1000,
batch_size = BATCH_SIZE,
min_net_size = IMAGE_H,
max_net_size = IMAGE_H,
shuffle = False,
jitter = 0.0,
norm = normalize
)
#train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize, jitter=False)
#valid_batch = BatchGenerator(valid_imgs, generator_config, norm=normalize, jitter=False)
# In[104]:
# **Setup a few callbacks and start the training**
# In[105]:
if FINE_TUNE:
optimizer = Adam(lr=0.5e-6, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
EPOCHS=200
else:
optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
EPOCHS=25
# optimizer = SGD(lr=1e-5, decay=0.0005, momentum=0.9)
model.compile(loss=yolo_loss, optimizer=optimizer)
wt_file='../weights/wb-yolo.h5'
#optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)
early_stop = EarlyStopping(monitor='loss',
min_delta=0.001,
patience=5,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(wt_file,
monitor='loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
start = time.time()
model.fit_generator(generator = train_batch,
steps_per_epoch = len(train_batch),
epochs = EPOCHS,
verbose = 1,
# validation_data = valid_batch,
# validation_steps = len(valid_batch),
callbacks = [checkpoint, early_stop],#, tensorboard],
max_queue_size = 3)
end = time.time()
print('Training took ' + str(end - start) + ' seconds')
model.save_weights(wt_file)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import time
from datetime import datetime
from random import choice, randint
from telethon.events import StopPropagation
from telethon.tl.functions.account import UpdateProfileRequest
from userbot import AFKREASON, BOTLOG_CHATID, PM_AUTO_BAN, bot, owner
from userbot.events import geez_cmd, register
# ========================= CONSTANTS ============================
AFKSTR = [
f"**Maaf {owner} Sedang OFF!**",
f"**Maaf {owner} Sedang OFF Tunggu Sampai Online!**",
f"**{owner} Sedang OFF Tunggulah Sampai Online**",
f"**Maaf {owner} Sedang OFF!**",
]
USER_AFK = {}
afk_time = None
afk_start = {}
# =================================================================
@bot.on(geez_cmd(outgoing=True, pattern=r"off(?: |$)(.*)"))
async def set_afk(afk_e):
"""For .afk command, allows you to inform people that you are afk when they message you"""
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK
global afk_time
global afk_start
global afk_end
user = await bot.get_me()
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(
f"**✆ {owner} Telah OFF **\
\n❖▸ **Karena :** `{string}`"
)
else:
await afk_e.edit(f"**✆ {owner} Telah OFF !!**")
if user.last_name:
await afk_e.client(
UpdateProfileRequest(
first_name=user.first_name, last_name=user.last_name + ""
)
)
else:
await afk_e.client(
UpdateProfileRequest(first_name=user.first_name, last_name="")
)
if BOTLOG_CHATID:
await afk_e.client.send_message(BOTLOG_CHATID, f"#OFF\n**{owner} Telah OFF!**")
ISAFK = True
afk_time = datetime.now()
raise StopPropagation
@register(outgoing=True)
async def type_afk_is_not_true(notafk):
"""This sets your status as not afk automatically when you write something while being afk"""
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK
global afk_time
global afk_start
global afk_end
user = await bot.get_me()
last = user.last_name
if last and last.endswith(""):
last1 = last[:-12]
else:
last1 = ""
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond(f"**{owner} Telah Kembali!**")
time.sleep(7)
await msg.delete()
await notafk.client(
UpdateProfileRequest(first_name=user.first_name, last_name=last1)
)
if BOTLOG_CHATID:
await notafk.client.send_message(
BOTLOG_CHATID,
"Anda Mendapatkan "
+ str(COUNT_MSG)
+ " Pesan Dari "
+ str(len(USERS))
+ " Obrolan Saat Anda OFFLINE",
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"["
+ name0
+ "](tg://user?id="
+ str(i)
+ ")"
+ " Mengirim Mu "
+ "`"
+ str(USERS[i])
+ " Pesan`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
"""This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK
global afk_time
global afk_start
global afk_end
user = await bot.get_me() # pylint:disable=E0602
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**Terakhir Online**"
if mention.message.mentioned and not (await mention.get_sender()).bot and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time %= 24 * 3600
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Kemarin**"
elif days > 1:
if days > 6:
date = now + datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes
)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime("%A")
elif hours > 1:
afk_since = f"`{int(hours)} Jam {int(minutes)} Menit`"
elif minutes > 0:
afk_since = f"`{int(minutes)} Menit {int(seconds)} Detik`"
else:
afk_since = f"`{int(seconds)} Detik`"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply(
f"**✆ {owner} Sedang OFF** {afk_since} **Yang Lalu.**\
\n❖▸ **Karena :** `{AFKREASON}`"
)
else:
await mention.reply(str(choice(AFKSTR)))
USERS.update({mention.sender_id: 1})
else:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(
f"**✆ {owner} Masih OFF!!** {afk_since} **Yang Lalu.**\
\n❖▸ **Karena :** `{AFKREASON}`"
)
else:
await mention.reply(str(choice(AFKSTR)))
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
"""Function which informs people that you are AFK in PM"""
global ISAFK
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK
global afk_time
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**Belum Lama**"
if (
sender.is_private
and sender.sender_id != 777000
and not (await sender.get_sender()).bot
):
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time %= 24 * 3600
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Kemarin**"
elif days > 1:
if days > 6:
date = now + datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes
)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime("%A")
elif hours > 1:
afk_since = f"`{int(hours)} Jam {int(minutes)} Menit`"
elif minutes > 0:
afk_since = f"`{int(minutes)} Menit {int(seconds)} Detik`"
else:
afk_since = f"`{int(seconds)} Detik`"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(
f"✆ **{owner} Sedang OFF** {afk_since} **Yang Lalu**.\
\n❖▸ **Karena :** `{AFKREASON}`"
)
else:
await sender.reply(str(choice(AFKSTR)))
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(
f"✆ **{owner} Sedang OFF!!** {afk_since} **Yang Lalu. **\
\n❖▸ **Karena :** `{AFKREASON}`"
)
else:
await sender.reply(str(choice(AFKSTR)))
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
|
import React from 'react';
import moment from 'moment';
import {createComponentWithIntl} from '@ciscospark/react-test-utils';
import DaySeparator from '.';
describe('DaySeparator component', () => {
const today = moment('2001-01-31');
const yesterday = moment(today).subtract(1, 'days');
const aMonthAgo = moment(today).subtract(1, 'months');
const twoYearsAgo = moment(today).subtract(2, 'years');
it('renders properly for today', () => {
const fromDate = aMonthAgo;
const now = today;
const toDate = today;
const component = createComponentWithIntl(<DaySeparator
fromDate={fromDate}
now={now}
toDate={toDate}
/>);
expect(component).toMatchSnapshot();
});
it('renders properly for yesterday', () => {
const fromDate = aMonthAgo;
const now = today;
const toDate = yesterday;
const component = createComponentWithIntl(<DaySeparator
fromDate={fromDate}
now={now}
toDate={toDate}
/>);
expect(component).toMatchSnapshot();
});
it('renders properly for a month ago', () => {
const fromDate = aMonthAgo;
const now = today;
const toDate = aMonthAgo;
const component = createComponentWithIntl(<DaySeparator
fromDate={fromDate}
now={now}
toDate={toDate}
/>);
expect(component).toMatchSnapshot();
});
it('renders properly for more than a year ago', () => {
const fromDate = moment(twoYearsAgo).subtract(1, 'days');
const now = today;
const toDate = twoYearsAgo;
const component = createComponentWithIntl(<DaySeparator
fromDate={fromDate}
now={now}
toDate={toDate}
/>);
expect(component).toMatchSnapshot();
});
});
|
//
// License type: BSD 3-Clause License
// License copy: https://github.com/Telecominfraproject/wlan-cloud-ucentralgw/blob/master/LICENSE
//
// Created by Stephane Bourque on 2021-03-04.
// Arilia Wireless Inc.
//
#pragma once
#include "framework/MicroService.h"
namespace OpenWifi {
class RESTAPI_blacklist : public RESTAPIHandler {
public:
RESTAPI_blacklist(const RESTAPIHandler::BindingMap &bindings, Poco::Logger &L, RESTAPI_GenericServer & Server , uint64_t TransactionId , bool Internal)
: RESTAPIHandler(bindings, L,
std::vector<std::string>{Poco::Net::HTTPRequest::HTTP_GET,
Poco::Net::HTTPRequest::HTTP_POST,
Poco::Net::HTTPRequest::HTTP_PUT,
Poco::Net::HTTPRequest::HTTP_DELETE,
Poco::Net::HTTPRequest::HTTP_OPTIONS},
Server,
TransactionId,
Internal) {}
static const std::list<const char *> PathName() { return std::list<const char *>{"/api/v1/blacklist/{serialNumber}"};}
void DoGet() final;
void DoDelete() final;
void DoPost() final;
void DoPut() final;
};
}
|
"""Gather -- Collect all your plugins
Gather allows a way to register plugins.
It features the ability to register the plugins from any module,
in any package, in any distribution.
A given module can register plugins of multiple types.
In order to have anything registered from a package,
it needs to declare that it supports :code:`gather` in its `setup.py`:
.. code::
entry_points={
'gather': [
"dummy=ROOT_PACKAGE:dummy",
]
The :code:`ROOT_PACKAGE` should point to the Python name of the package:
i.e., what users are expected to :code:`import` at the top-level.
Note that while having special facilities to run functions as subcommands,
Gather can be used to collect anything.
"""
import importlib
import sys
import pkg_resources
import attr
import venusian
def _get_modules():
for entry_point in pkg_resources.iter_entry_points(group='gather'):
module = importlib.import_module(entry_point.module_name)
yield module
class GatherCollisionError(ValueError):
"""Two or more plugins registered for the same name."""
def _one_of(_registry, _effective_name, objct):
"""
Assign one of the possible options.
When given as a collection strategy to :code:`collect`,
this will assign one of the options to a name in case more
than one item is registered to the same name.
This is the default.
"""
return objct
def _all(registry, effective_name, objct):
"""
Assign all of the possible options.
Collect all registered items into a set,
and assign that set to a name. Note that
even if only one item is assigned to a name,
that name will be assigned to a set of length 1.
"""
myset = registry.get(effective_name, set())
myset.add(objct)
return myset
def _exactly_one(registry, effective_name, objct):
"""
Raise an error on colliding registration.
If more than one item is registered to the
same name, raise a :code:`GatherCollisionError`.
"""
if effective_name in registry:
raise GatherCollisionError("Attempt to double register",
registry, effective_name, objct)
return objct
@attr.s(frozen=True)
class Collector(object):
"""
A plugin collector.
A collector allows to *register* functions or classes by modules,
and *collect*-ing them when they need to be used.
"""
name = attr.ib(default=None)
depth = attr.ib(default=1)
one_of = staticmethod(_one_of)
all = staticmethod(_all)
exactly_one = staticmethod(_exactly_one)
def register(self, name=None, transform=lambda x: x):
"""
Register a class or function
Args:
name (str): optional. Name to register the class or function as.
(default is name of object)
transform (callable): optional. A one-argument function. Will be called,
and the return value used in collection.
Default is identity function
This is meant to be used as a decoator:
.. code::
@COLLECTOR.register()
def specific_subcommand(args):
pass
@COLLECTOR.register(name='another_specific_name')
def main(args):
pass
"""
def callback(scanner, inner_name, objct):
("""
Venusian_ callback, called from scan
.. _Venusian: http://docs.pylonsproject.org/projects/"""
"""venusian/en/latest/api.html#venusian.attach
""")
tag = getattr(scanner, 'tag', None)
if tag is not self:
return
if name is None:
effective_name = inner_name
else:
effective_name = name
objct = transform(objct)
scanner.update(effective_name, objct)
def attach(func):
"""Attach callback to be called when object is scanned"""
venusian.attach(func, callback, depth=self.depth)
return func
return attach
def collect(self, strategy=one_of.__func__):
"""
Collect all registered functions or classes.
Returns a dictionary mapping names to registered elements.
"""
def ignore_import_error(_unused):
"""
Ignore ImportError during collection.
Some modules raise import errors for various reasons,
and should be just treated as missing.
"""
if not issubclass(sys.exc_info()[0], ImportError):
raise # pragma: no cover
params = _ScannerParameters(strategy=strategy)
scanner = venusian.Scanner(update=params.update, tag=self)
for module in _get_modules():
scanner.scan(module, onerror=ignore_import_error)
params.raise_if_needed()
return params.registry
@attr.s
class _ScannerParameters(object):
"""
Parameters for scanner
Update the registry respecting the strategy,
and raise errors at the end.
"""
_please_raise = attr.ib(init=False, default=None)
_strategy = attr.ib()
registry = attr.ib(init=False, default=attr.Factory(dict))
def update(self, name, objct):
"""Update registry with name->objct"""
try:
res = self._strategy(self.registry, name, objct)
self.registry[name] = res
except GatherCollisionError as exc:
self._please_raise = exc
def raise_if_needed(self):
"""Raise exception if any of the updates failed."""
if self._please_raise is not None:
raise self._please_raise
def run(argv, commands, version, output):
"""
Run the specified subcommand.
Args:
argv (list of str): Arguments to be processed
commands (mapping of str to callables): Commands (usually collected by a :code:`Collector`)
version (str): Version to display if :code:`--version` is asked
output (file): Where to write output to
"""
if len(argv) < 1:
argv = argv + ['help']
if argv[0] in ('version', '--version'):
output.write("Version {}\n".format(version))
return
if argv[0] in ('help', '--help') or argv[0] not in commands:
output.write("Available subcommands:\n")
for command in commands.keys():
output.write("\t{}\n".format(command))
output.write("Run subcommand with '--help' for more information\n")
return
commands[argv[0]](argv)
@attr.s(frozen=True)
class Wrapper(object):
"""Add extra data to an object"""
original = attr.ib()
extra = attr.ib()
@classmethod
def glue(cls, extra):
"""
Glue extra data to an object
Args:
extra: what to add
Returns:
callable: function of one argument that returns a :code:`Wrapped`
This method is useful mainly as the :code:`transform` parameter
of a :code:`register` call.
"""
def ret(original):
"""Return a :code:`Wrapper` with the original and extra"""
return cls(original=original, extra=extra)
return ret
__all__ = ['Collector', 'run', 'Wrapper']
|
# def findProfession(level, pos):
# if level == 1:
# return "Engineer"
# tot_nodes = 1
# for i in range(1, level - 1):
# tot_nodes += i*2
# pos -= tot_nodes
# while pos not in range(1, 4):
# pos = (pos - 1)//4
# pos += 1
# if pos == 1 or pos == 4:
#
#
#
#
# print(findProfession(3, 12))
#
#
|
from typing import Set
from sqlalchemy.orm import joinedload, Session
from sqlalchemy.orm.query import Query
from aspen.app.views.api_utils import authz_sample_filters
from aspen.database.models import DataType, Sample, UploadedPathogenGenome
from aspen.database.models.usergroup import User
class FastaStreamer:
def __init__(self, user: User, sample_ids: Set[str], db_session: Session):
self.user = user
self.cansee_groups_private_identifiers: Set[int] = {
cansee.owner_group_id
for cansee in user.group.can_see
if cansee.data_type == DataType.PRIVATE_IDENTIFIERS
}
# query for samples
self.all_samples: Query = (
db_session.query(Sample)
.yield_per(
5
) # Streams a few DB rows at a time but our query must return one row per resolved object.
.options(
joinedload(Sample.uploaded_pathogen_genome, innerjoin=True).undefer(
UploadedPathogenGenome.sequence
),
)
)
# Enforce AuthZ
self.all_samples = authz_sample_filters(self.all_samples, sample_ids, user)
def stream(self):
for sample in self.all_samples:
if sample.uploaded_pathogen_genome:
pathogen_genome: UploadedPathogenGenome = (
sample.uploaded_pathogen_genome
)
sequence: str = "".join(
[
line
for line in pathogen_genome.sequence.splitlines() # type: ignore
if not (line.startswith(">") or line.startswith(";"))
]
)
stripped_sequence: str = sequence.strip("Nn")
# use private id if the user has access to it, else public id
if (
sample.submitting_group_id == self.user.group_id
or sample.submitting_group_id
in self.cansee_groups_private_identifiers
or self.user.system_admin
):
yield (f">{sample.private_identifier}\n") # type: ignore
else:
yield (f">{sample.public_identifier}\n")
yield (stripped_sequence)
yield ("\n")
|
from .. import env
from ..exceptions import EnvironmentFileNotFound
class YamlFileSpec(object):
_environment = None
def __init__(self, filename=None, **kwargs):
self.filename = filename
self.msg = None
def can_handle(self):
try:
self._environment = env.from_file(self.filename)
return True
except EnvironmentFileNotFound as e:
self.msg = str(e)
return False
except TypeError:
self.msg = "{} is not a valid yaml file.".format(self.filename)
return False
@property
def environment(self):
if not self._environment:
self.can_handle()
return self._environment
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-avx512skx-mul32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
size_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(channels != 0);
assert(output_width != 0);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx512.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx512.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx512.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
const int8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
}
const int8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
}
const int8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
}
const int8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
}
const int8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
}
const int8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
}
const int8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
}
const int8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
}
const int8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
}
const int8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
}
const int8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
}
const int8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
}
const int8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
}
const int8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
}
const int8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
}
const int8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
const __m512i vi0x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i0));
const __m512i vk0x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
i0 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
const __m512i vi1x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i1));
const __m512i vk1x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
i1 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
const __m512i vi2x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i2));
const __m512i vk2x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
i2 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
const __m512i vi3x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i3));
const __m512i vk3x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
i3 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
const __m512i vi4x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i4));
const __m512i vk4x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
i4 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
const __m512i vi5x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i5));
const __m512i vk5x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
i5 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
const __m512i vi6x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i6));
const __m512i vk6x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
i6 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
const __m512i vi7x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i7));
const __m512i vk7x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
i7 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
const __m512i vi8x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i8));
const __m512i vk8x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
i8 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
const __m512i vi9x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i9));
const __m512i vk9x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t))));
i9 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
const __m512i vi10x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i10));
const __m512i vk10x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t))));
i10 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
const __m512i vi11x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i11));
const __m512i vk11x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t))));
i11 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
const __m512i vi12x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i12));
const __m512i vk12x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t))));
i12 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
const __m512i vi13x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i13));
const __m512i vk13x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t))));
i13 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
const __m512i vi14x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i14));
const __m512i vk14x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t))));
i14 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
const __m512i vi15x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i15));
const __m512i vk15x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t))));
i15 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
const __m512i vi16x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i16));
const __m512i vk16x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t))));
i16 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
const __m512i vi17x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i17));
const __m512i vk17x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t))));
i17 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
const __m512i vi18x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i18));
const __m512i vk18x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t))));
i18 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
const __m512i vi19x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i19));
const __m512i vk19x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t))));
i19 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
const __m512i vi20x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i20));
const __m512i vk20x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t))));
i20 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
const __m512i vi21x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i21));
const __m512i vk21x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t))));
i21 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
const __m512i vi22x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i22));
const __m512i vk22x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t))));
i22 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
const __m512i vi23x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i23));
const __m512i vk23x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t))));
i23 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
const __m512i vi24x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i24));
const __m512i vk24x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t))));
i24 += 16;
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t));
__m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps(w);
w = (const void*) ((uintptr_t) w + 16 * sizeof(float));
vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale0123456789ABCDEF);
vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
__m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
__m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
// Prepare mask for valid 8-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
{
__m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
const __m512i vi0x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i0));
const __m512i vk0x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
const __m512i vi1x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i1));
const __m512i vk1x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
const __m512i vi2x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i2));
const __m512i vk2x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
const __m512i vi3x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i3));
const __m512i vk3x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
const __m512i vi4x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i4));
const __m512i vk4x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
const __m512i vi5x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i5));
const __m512i vk5x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
const __m512i vi6x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i6));
const __m512i vk6x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
const __m512i vi7x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i7));
const __m512i vk7x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
const __m512i vi8x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i8));
const __m512i vk8x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
const __m512i vi9x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i9));
const __m512i vk9x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
const __m512i vi10x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i10));
const __m512i vk10x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
const __m512i vi11x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i11));
const __m512i vk11x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
const __m512i vi12x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i12));
const __m512i vk12x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
const __m512i vi13x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i13));
const __m512i vk13x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
const __m512i vi14x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i14));
const __m512i vk14x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
const __m512i vi15x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i15));
const __m512i vk15x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
const __m512i vi16x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i16));
const __m512i vk16x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
const __m512i vi17x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i17));
const __m512i vk17x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
const __m512i vi18x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i18));
const __m512i vk18x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
const __m512i vi19x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i19));
const __m512i vk19x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
const __m512i vi20x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i20));
const __m512i vk20x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
const __m512i vi21x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i21));
const __m512i vk21x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
const __m512i vi22x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i22));
const __m512i vk22x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
const __m512i vi23x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i23));
const __m512i vk23x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
const __m512i vi24x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i24));
const __m512i vk24x0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t))));
vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
__m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t)));
vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale0123456789ABCDEF);
vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
__m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
__m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF);
output = (int8_t*) ((uintptr_t) output + c);
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
|
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from hasker.profiles.forms import SignUpForm, SettingsForm
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST, request.FILES)
if form.is_valid():
user_obj = form.save()
with transaction.atomic():
user_obj.avatar = form.cleaned_data.get('avatar')
user_obj.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('main_page')
else:
form = SignUpForm()
return render(request, 'profiles/signup.html', {'form': form})
@login_required
def settings(request):
if request.method == 'POST':
form = SettingsForm(request.POST, request.FILES, instance=request.user)
if form.is_valid():
user_obj = form.save(commit=False)
with transaction.atomic():
if form.cleaned_data.get('avatar'):
user_obj.avatar = form.cleaned_data.get('avatar')
user_obj.save()
return redirect('main_page')
else:
form = SettingsForm(instance=request.user)
return render(request, 'profiles/settings.html', {'form': form})
|
from random import randint
class Person:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.contacts = {}
self.interests = {}
def add_contact(self, contact):
self.contacts[contact.first_name] = contact
return self
def remove_contact(self, contact):
self.contacts.pop(contact)
return self
def add_interest(self, interest):
self.interests[interest.name] = interest
return self
def remove_interest(self, interest):
self.interests.pop(interest)
return self
class Interest:
def __init__(self):
self.name = ""
self.activity = None
moral = 5
amoral = 0
immoral = -5
def wheel_of_morality():
morality = [moral, amoral, immoral]
while True:
values = morality[randint(0, len(morality)-1)]
yield values
class State:
name=''
morality = wheel_of_morality()
class Values:
def __init__(self):
self.loyalties={"state":{'name':'', 'morality':wheel_of_morality()}, "people":[]}
self.love = randint(1,100)
max_love = 100
self.empathy = randint(1, 10)
self.honesty = self.empathy+next(self.loyalties['state']['morality'])
self.respect={'for':{'others':self.empathy+self.love, 'self':self.love+self.honesty}}
class Session:
def __init__(self):
self.list_of_interests = []
self.list_of_people = []
self.page_width = 50
self.filler_character = '='
def main_menu(self):
print("Social Sim".center(self.page_width, self.filler_character))
print("New Game".center(self.page_width, " "))
print("Load Game".center(self.page_width, " "))
print("Options".center(self.page_width, " "))
print("Quit".center(self.page_width, " "))
nav = input('>')
if(nav.lower() == "new game"):
self.newgame()
def newgame(self):
print("You Got to the new game")
new_person = Values()
self.list_of_people.append(new_person)
print(self.list_of_people[0])
'''
begin = Session()
begin.main_menu()
'''
newperson = Values()
print(newperson.honesty)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.category_encoding."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import preprocessing_test_utils
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
# (b/156783625): Outside compilation failed for eager mode only.
distribution=strategy_combinations.strategies_minus_tpu,
mode=["eager", "graph"]))
class CategoryEncodingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, distribution):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
inp_dataset = tf.data.Dataset.from_tensor_slices(input_array)
inp_dataset = batch_wrapper(inp_dataset, 2, distribution)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
tf.config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(4,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(inp_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.test.main()
|
import createSvgIcon from './utils/createSvgIcon.js';
import { jsx as _jsx } from "react/jsx-runtime";
export default createSvgIcon( /*#__PURE__*/_jsx("path", {
d: "M9 4c0-1.11.89-2 2-2s2 .89 2 2-.89 2-2 2-2-.89-2-2zm7 9c-.01-1.34-.83-2.51-2-3 0-1.66-1.34-3-3-3s-3 1.34-3 3v7h2v5h3v-5h3v-4z"
}), 'PregnantWoman');
|
"""
Fractimation Data_Models Subpackage contains various modules and classes related to parameter and
state objects.
Public Modules :
* complex_range_params - Contains class for representing parameters associate with a range of
complex numbers
* formula_params - Contains class for representing parameters associated with a fractal formula
* image_params - Contains class for representing parameters associated with an image
"""
|
/**
* @author mrdoob / http://mrdoob.com/
* @author alteredq / http://alteredqualia.com/
*
* parameters = {
* color: <hex>,
* opacity: <float>,
* map: new THREE.Texture( <Image> ),
*
* size: <float>,
*
* blending: THREE.NormalBlending,
* depthTest: <bool>,
* depthWrite: <bool>,
*
* vertexColors: <bool>,
*
* fog: <bool>
* }
*/
THREE.ParticleBasicMaterial = function ( parameters ) {
THREE.Material.call( this );
this.color = new THREE.Color( 0xffffff );
this.map = null;
this.size = 1;
this.sizeAttenuation = true;
this.vertexColors = false;
this.fog = true;
this.setValues( parameters );
};
THREE.ParticleBasicMaterial.prototype = Object.create( THREE.Material.prototype );
THREE.ParticleBasicMaterial.prototype.clone = function () {
var material = new THREE.ParticleBasicMaterial();
THREE.Material.prototype.clone.call( this, material );
material.color.copy( this.color );
material.map = this.map;
material.size = this.size;
material.sizeAttenuation = this.sizeAttenuation;
material.vertexColors = this.vertexColors;
material.fog = this.fog;
return material;
};
|
// This is the Base controller for our project. In every other controller, we inherit functions from our base controller, which make functions reusable in many places.
sap.ui.define(
[
"sap/ui/core/mvc/Controller",
"sap/ui/core/UIComponent",
"sap/ui/core/routing/History",
],
function (Controller, UIComponent, History) {
"use strict";
return Controller.extend("sap.btp.myUI5App.controller.BaseController", {
// Router function
getRouter: function () {
return UIComponent.getRouterFor(this);
},
onNavBack: function () {
// Navigation to go back
let oHistory, sPreviousHash;
oHistory = History.getInstance();
sPreviousHash = oHistory.getPreviousHash();
if (sPreviousHash !== undefined) {
window.history.go(-1);
} else {
this.getRouter().navTo("home", {}, true /*no history*/);
}
},
});
}
);
|
from adafruit_blinka.agnostic import microcontroller
if microcontroller == "esp8266":
pin_count = 10
elif microcontroller == "samd21":
pin_count = 38
else:
raise NotImplementedError("Microcontroller not supported")
|
var _ = require('../lodash-local')
var util = require('../util')
// task definition function
// .task() simply passes through json raw
var task = function(json) {
return json
}
task.toCode = function(json, Flexio) {
var params = JSON.parse(JSON.stringify(json))
delete params['eid']
return 'task(' + JSON.stringify(params, null, 2) + ')'
}
module.exports = task
|
mycallback( {"CONTRIBUTOR OCCUPATION": "Retired", "CONTRIBUTION AMOUNT (F3L Bundled)": "50.00", "ELECTION CODE": "G2010", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "Retired", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "4237 Trias Street", "CONTRIBUTOR MIDDLE NAME": "", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "CA", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "Mike", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20100629", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00441410", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Chase", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "92103", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "San Diego", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "INCA249", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "15", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110504/727307.fec_1.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "Mr.", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "50.00", "FORM TYPE": "SA11AI"});
|
import reversion
from apps.catalog.models.core import *
from django.contrib.auth.models import User
reversion.register(Note)
reversion.register(User)
reversion.register(Comment)
reversion.register(Documentation)
reversion.register(Image)
reversion.register(Video)
reversion.register(Product)
reversion.register(Makey, follow=['collaborators', 'comments', 'notes',
'documentations', 'images', 'cover_pic',
'videos', ])
|
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
TORCH_API void ScalarTypeAnalysisForONNX(const std::shared_ptr<Graph>& graph);
} // namespace jit
} // namespace torch
|
import lldb # type: ignore[import]
# load into lldb instance with:
# command script import tools/lldb/deploy_debugger.py
target = lldb.debugger.GetSelectedTarget()
bp = target.BreakpointCreateByRegex("__deploy_register_code")
bp.SetScriptCallbackBody("""\
process = frame.thread.GetProcess()
target = process.target
symbol_addr = frame.module.FindSymbol("__deploy_module_info").GetStartAddress()
info_addr = symbol_addr.GetLoadAddress(target)
e = lldb.SBError()
ptr_size = 8
str_addr = process.ReadPointerFromMemory(info_addr, e)
file_addr = process.ReadPointerFromMemory(info_addr + ptr_size, e)
file_size = process.ReadPointerFromMemory(info_addr + 2*ptr_size, e)
load_bias = process.ReadPointerFromMemory(info_addr + 3*ptr_size, e)
name = process.ReadCStringFromMemory(str_addr, 512, e)
r = process.ReadMemory(file_addr, file_size, e)
from tempfile import NamedTemporaryFile
from pathlib import Path
stem = Path(name).stem
with NamedTemporaryFile(prefix=stem, suffix='.so', delete=False) as tf:
tf.write(r)
print("torch_deploy registering debug inforation for ", tf.name)
cmd1 = f"target modules add {tf.name}"
# print(cmd1)
lldb.debugger.HandleCommand(cmd1)
cmd2 = f"target modules load -f {tf.name} -s {hex(load_bias)}"
# print(cmd2)
lldb.debugger.HandleCommand(cmd2)
return False
""")
|
var arrayToSort = [];
var threshold = 100;
var maximumSize = 100;
var bruteForceValue;
var binarySearchValue;
//the number of elements to place in the array
var numElements = Math.floor(Math.random() * threshold);
//display the array when the document loads
$(document).ready(function() {
loop("myList");
});
//execute the bubble sort algorithm
$("#bubbleSort").click(function() {
//current time on start
$("#currentTime1").html(new Date());
bubbleSort();
$("#bubbleSortResult").html(loop("bubbleSortResult"));
//current time once complete
$("#endTime1").html(new Date());
});
//execute the insertion sort algorithm
$("#insertionSort").click(function() {
//current time on start
$("#currentTime2").html(new Date());
$("#insertionSortResult").html(loop("insertionSortResult"));
//write insertion sort here
//current time once complete
$("#endTime2").html(new Date());
});
//execute the selection sort algorithm
$("#selectionSort").click(function() {
//current time on start
$("#currentTime3").html(new Date());
$("#selectionSortResult").html(loop("selectionSortResult"));
//write selection sort here
//current time once complete
$("#endTime3").html(new Date());
});
//do not need to be sorted
$("#bruteForce").click(function() {
bruteForceValue = $("#bruteForceValue").val();
//to do write brute force alogothim
bruteForce();
});
//this alogthim requires the lis r to be sroted in order
$("#binarySearch").click(function() {
binarySearchValue = $("#binarySearchValue").val();
//to do write binary search alogothim
binarySearch();
});
function loop(myId) {
$("#" + myId).append("[");
if(arrayToSort.length == 0)
{
//populate the array with random numbers
for(var i = 0; i < numElements; i = i + 1)
{
var num = Math.floor(Math.random() * maximumSize);
arrayToSort.push(num);
if(i == numElements - 1)
$("#" + myId).append(num);
else
$("#" + myId).append(num + ", ");
}
}
else
{
for(var i = 0; i < numElements; i = i + 1)
{
if(i == numElements - 1)
$("#" + myId).append(arrayToSort[i]);
else
$("#" + myId).append(arrayToSort[i] + ", ");
}
}
$("#" + myId).append("]");
}
function bubbleSort()
{
do
{
var changed = false;//when change is ture do the
//for loop, and when change is false dont do this.
for (var i = 0; i < arrayToSort.length -1; i ++){
//myList[i] start with 0 then we `iterate through the array
// i did this " myList.length - 1 "because we will get an error on the last number and the last number have to be the biggest from the array
if (arrayToSort[i] > arrayToSort[i + 1] ){//if the frist index is greater than the second index
var temp = arrayToSort[i];//then we make a variable named temp and
arrayToSort[i] = arrayToSort[i + 1];//then we going to overwrite the frist number and the secong number
arrayToSort[i + 1] = temp;//then with the one next to the number we re gonna put the value in the the one before it
changed = true;//then we set the change to true
}
}
} while(changed);
}
function bruteForce() {
var matched = false;//we set the matched to false
var position;//the postion of that number
for (var start = 0; start < arrayToSort.length; start++)// this line will break when start is bigger than or equal than the last number
{
if (arrayToSort[start] == bruteForceValue )//if the starting postion is equal to the number u put
{
matched = true;//then the match is ture
position = start;//postion the start
break;//it stops once a value is found
}
if (matched == true) //and if the match is ture say the postion of that number
{
$("#bruteForceResult").html("The result is " + matched + " in position " + position);// this just print out the result
}
else
{
$("#bruteForceResult").html("The result is " + matched);// this just print out the result
}
}
function binarySearch () {
// initial values for start, middle and end
var low = 0;//the low means where u start
var high = arrayToSort.length - 1;//the high means where u finish
var found = false;//we set the found to false
var position;//this tells u where the number is
while (low <= high) // While the middle low is greater than or equal to the high do this:
{
var mid = Math.floor((high + low)/2);// sets a variable name mid and calutate the mid point of the array
//then we look at the mid point inside of the array
if (binarySearchValue == arrayToSort[mid]) //if the number we put in equels to the number in the mid point
{
found = true;//if is that case then say we find the number
position = mid;
break;//it stops once a value is found
}
if (binarySearchValue < arrayToSort[mid] ) //if the number we put in is smaller then the mid point number
{
high = mid-1; //if it is that case then the highest number is going to be the mid point number and we minue one because the number we put in is not the mid point number
}
else // if the number we put in is bigger than mid point number
{
low = mid+ 1; // then we r going to make the low equal to the mid point and puls one because the number we put in is not the mid point number
}
}
if (found == true)
{
$("#binarySearchResult").html("The result is " + found + " in position " + position);// this just print out the result
}
else
{
$("#binarySearchResult").html("The result is " + found);// this just print out the result
}
}
|
# -*- coding: utf-8 -*-
#
# {{ cookiecutter.project_name }} documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "{{ cookiecutter.project_name }}"
author = "{{ cookiecutter.author_name }}"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "{{ cookiecutter.version }}"
# The full version, including alpha/beta/rc tags.
release = "{{ cookiecutter.version }}"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "{{ cookiecutter.package_name }}doc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"{{ cookiecutter.package_name }}.tex",
"{{ cookiecutter.project_name }} Documentation",
"{{ cookiecutter.author_name }}",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"{{ cookiecutter.package_name }}",
"{{ cookiecutter.project_name }} Documentation",
["{{ cookiecutter.author_name }}"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"{{ cookiecutter.package_name }}",
"{{ cookiecutter.project_name }} Documentation",
"{{ cookiecutter.author_name }}",
"{{ cookiecutter.project_name }}",
"{{ cookiecutter.description }}",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
from recommonmark.parser import CommonMarkParser
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
source_parsers = {".md": CommonMarkParser}
|
/*!
* Express - request
* Copyright(c) 2010 TJ Holowaychuk <tj@vision-media.ca>
* MIT Licensed
*/
/**
* Module dependencies.
*/
var http = require('http')
, req = http.IncomingMessage.prototype
, utils = require('./utils')
, mime = require('mime');
/**
* Default notification formatters.
*/
var defaultFormatters = exports.formatters = {
s: function(val){
return String(val);
},
d: function(val){
return val | 0;
}
};
/**
* Return request header or optional default.
*
* The `Referrer` header field is special-cased,
* both `Referrer` and `Referer` will yield are
* interchangeable.
*
* Examples:
*
* req.header('Content-Type');
* // => "text/plain"
*
* req.header('content-type');
* // => "text/plain"
*
* req.header('Accept');
* // => undefined
*
* req.header('Accept', 'text/html');
* // => "text/html"
*
* @param {String} name
* @param {String} defaultValue
* @return {String}
* @api public
*/
req.header = function(name, defaultValue){
switch (name = name.toLowerCase()) {
case 'referer':
case 'referrer':
return this.headers.referrer
|| this.headers.referer
|| defaultValue;
default:
return this.headers[name] || defaultValue;
}
};
/**
* Get `field`'s `param` value, defaulting to ''.
*
* Examples:
*
* req.get('content-disposition', 'filename');
* // => "something.png"
*
* req.get('content-disposition', 'rawr');
* // => ""
*
* @param {String} field
* @param {String} param
* @return {String}
* @api public
*/
req.get = function(field, param){
var val = this.header(field);
if (!val) return '';
var regexp = new RegExp(param + ' *= *(?:"([^"]+)"|([^;]+))', 'i');
if (!regexp.exec(val)) return '';
return RegExp.$1 || RegExp.$2;
};
/**
* Check if the _Accept_ header is present, and includes the given `type`.
*
* When the _Accept_ header is not present `true` is returned. Otherwise
* the given `type` is matched by an exact match, and then subtypes. You
* may pass the subtype such as "html" which is then converted internally
* to "text/html" using the mime lookup table.
*
* Examples:
*
* // Accept: text/html
* req.accepts('html');
* // => true
*
* // Accept: text/*; application/json
* req.accepts('html');
* req.accepts('text/html');
* req.accepts('text/plain');
* req.accepts('application/json');
* // => true
*
* req.accepts('image/png');
* req.accepts('png');
* // => false
*
* @param {String} type
* @return {Boolean}
* @api public
*/
req.accepts = function(type){
var accept = this.header('Accept');
// normalize extensions ".json" -> "json"
if (type && '.' == type[0]) type = type.substr(1);
// when Accept does not exist, or is '*/*' return true
if (!accept || '*/*' == accept) {
return true;
} else if (type) {
// allow "html" vs "text/html" etc
if (!~type.indexOf('/')) type = mime.lookup(type);
// check if we have a direct match
if (~accept.indexOf(type)) return true;
// check if we have type/*
type = type.split('/')[0] + '/*';
return !! ~accept.indexOf(type);
} else {
return false;
}
};
/**
* Return the value of param `name` when present or `defaultValue`.
*
* - Checks route placeholders, ex: _/user/:id_
* - Checks query string params, ex: ?id=12
* - Checks urlencoded body params, ex: id=12
*
* To utilize urlencoded request bodies, `req.body`
* should be an object. This can be done by using
* the `connect.bodyParser` middleware.
*
* @param {String} name
* @param {Mixed} defaultValue
* @return {String}
* @api public
*/
req.param = function(name, defaultValue){
// route params like /user/:id
if (this.params
&& this.params.hasOwnProperty(name)
&& undefined !== this.params[name]) {
return this.params[name];
}
// query string params
if (undefined !== this.query[name]) return this.query[name];
// request body params via connect.bodyParser
if (this.body && undefined !== this.body[name]) return this.body[name];
return defaultValue;
};
/**
* Queue flash `msg` of the given `type`.
*
* This method is aliased as `req.notify()`.
*
* Examples:
*
* req.notify('info', 'email sent');
* req.notify('info', 'email sent');
* req.notify('error', 'email delivery failed');
* req.notify('info', 'email re-sent');
* // => 2
*
* req.notify('info');
* // => ['email sent', 'email re-sent']
*
* req.notify('info');
* // => []
*
* req.notify();
* // => { error: ['email delivery failed'], info: [] }
*
* Formatting:
*
* Flash notifications also support arbitrary formatting support.
* For example you may pass variable arguments to `req.notify()`
* and use the %s specifier to be replaced by the associated argument:
*
* req.notify('info', 'email has been sent to %s.', userName);
*
* You may add addition formatters by defining `app.formatters`,
* for example in the following snippet we define `%u` to uppercase
* a string:
*
* app.formatters = {
* u: function(val){
* return String(val).toUpperCase();
* }
* };
*
* @param {String} type
* @param {String} msg
* @return {Array|Object|Number}
* @api public
*/
req.notify = function(type, msg){
var sess = this.session;
if (null == sess) throw Error('req.notify() requires sessions');
var msgs = sess.notifications = sess.notifications || {};
// notification
if (type && msg) {
var i = 2
, args = arguments
, formatters = this.app.formatters || {};
formatters.__proto__ = defaultFormatters;
msg = utils.miniMarkdown(utils.escape(msg));
msg = msg.replace(/%([a-zA-Z])/g, function(_, format){
var formatter = formatters[format];
if (formatter) return formatter(args[i++]);
});
return (msgs[type] = msgs[type] || []).push(msg);
}
// flush messages for a specific type
if (type) {
var arr = msgs[type];
delete msgs[type];
return arr || [];
}
// flush all messages
sess.notifications = {};
return msgs;
};
/**
* Check if the incoming request contains the "Content-Type"
* header field, and it contains the give mime `type`.
*
* Examples:
*
* // With Content-Type: text/html; charset=utf-8
* req.is('html');
* req.is('text/html');
* // => true
*
* // When Content-Type is application/json
* req.is('json');
* req.is('application/json');
* // => true
*
* req.is('html');
* // => false
*
* Ad-hoc callbacks can also be registered with Express, to perform
* assertions again the request, for example if we need an expressive
* way to check if our incoming request is an image, we can register "an image"
* callback:
*
* app.is('an image', function(req){
* return 0 == req.headers['content-type'].indexOf('image');
* });
*
* app.is('an attachment', function(req){
* return 0 == req.headers['content-disposition'].indexOf('attachment');
* });
*
* Now within our route callbacks, we can use to to assert content types
* such as "image/jpeg", "image/png", etc.
*
* app.post('/image/upload', function(req, res, next){
* if (req.is('an image')) {
* // do something
* } else {
* next();
* }
* });
*
* @param {String} type
* @return {Boolean}
* @api public
*/
req.is = function(type){
var fn = this.app.is(type);
if (fn) return fn(this);
var contentType = this.headers['content-type'];
if (!contentType) return;
if (!~type.indexOf('/')) type = mime.lookup(type);
if (~type.indexOf('*')) {
type = type.split('/')
contentType = contentType.split('/');
if ('*' == type[0] && type[1] == contentType[1]) return true;
if ('*' == type[1] && type[0] == contentType[0]) return true;
}
return !! ~contentType.indexOf(type);
};
// Callback for isXMLHttpRequest / xhr
function isxhr() {
return this.header('X-Requested-With', '').toLowerCase() === 'xmlhttprequest';
}
/**
* Check if the request was an _XMLHttpRequest_.
*
* @return {Boolean}
* @api public
*/
req.__defineGetter__('isXMLHttpRequest', isxhr);
req.__defineGetter__('xhr', isxhr);
|
//先显示div, 在执行初始化代码
/*
<div class="bdsharebuttonbox"><a href="#" class="bds_more" data-cmd="more"></a><a href="#" class="bds_qzone" data-cmd="qzone"></a><a href="#" class="bds_tsina" data-cmd="tsina"></a><a href="#" class="bds_tqq" data-cmd="tqq"></a><a href="#" class="bds_renren" data-cmd="renren"></a><a href="#" class="bds_weixin" data-cmd="weixin"></a></div>
*/
function baidu_share_show(){
window._bd_share_config={"common":{"bdSnsKey":{},"bdText":"","bdMini":"2","bdPic":"","bdStyle":"0","bdSize":"16"},"share":{},"image":{"viewList":["qzone","tsina","tqq","renren","weixin"],"viewText":"分享到:","viewSize":"16"},"selectShare":{"bdContainerClass":null,"bdSelectMiniList":["qzone","tsina","tqq","renren","weixin"]}};
with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='http://bdimg.share.baidu.com/static/api/js/share.js?v=89860593.js?cdnversion='+~(-new Date()/36e5)];
}
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file is using request() which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import urllib2
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, source_url, data, headers):
"""This doesn't do anything.
Args:
source_url: str. The URL.
data: str. Additional data to send to the server.
headers: dict. The request headers.
Returns:
Request(object): Returns Request object.
"""
# Use of Request() is not allowed.
return urllib2.Request(source_url, data, headers)
|
module.exports = {
client: {
name: 'DELISH [web]',
service: 'themealdb-graph'
}
}
|
/** \file
* \brief inheritance of file descriptors
*/
/*
* Copyright (c) 2010, 2012, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <barrelfish/barrelfish.h>
#include <barrelfish/spawn_client.h>
#include "testdesc.h"
/* Copying the actual handles is hard.
* We could try a deep copy, but really we should come up with a serialised
* format for each type. It also involves implementing the underlying
* resources such that the descriptors can actually be used in the new
* dispatcher.
*/
static size_t copy_file_fd(void *dest, lpaddr_t offset, struct fd_store *fds)
{
size_t size = sizeof(void *);
printf("FILE\n\thandle: %p\n", fds->handle);
/* This following isn't correct at all - we're just copying the value of
* the pointer, which is useless in the new dispatcher */
printf("copying %zu bytes from %p to %p\n", size, &fds->handle, dest);
memcpy(dest, &fds->handle, size);
fds->handle = (void*)(offset);
printf("fd %d fixed handle is: %p\n", fds->num, fds->handle);
return size;
}
static size_t copy_unixsock_fd(void *dest, lpaddr_t offset,
struct fd_store *fds)
{
// shallow copy. doesn't really help us.
struct _unix_socket *ush;
size_t size;
ush = fds->handle;
printf("adding UNIX socket (%p)\n\ttype: %x protocol: %x\n\tpassive: %d "
"nonblkng: %d\n",
fds->handle, ush->type, ush->protocol, ush->passive, ush->nonblocking);
size = sizeof(struct _unix_socket);
printf("copying %zu bytes from %p to %p\n", size, fds->handle, dest);
memcpy(dest, fds->handle, size);
fds->handle = (void*)(offset);
printf("fd %d fixed handle is: %p\n", fds->num, fds->handle);
return size;
}
/**
* \brief Setup inherited file descriptors
*
*/
static errval_t spawn_setup_fds(struct capref *frame)
{
errval_t err;
void *fdspg;
// Create frame (actually multiple pages) for fds
err = frame_alloc(frame, FDS_SIZE, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_FDSPG);
}
// map it in so we can write to it
err = vspace_map_one_frame(&fdspg, FDS_SIZE, *frame, NULL, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MAP_FDSPG_TO_SELF);
}
/* Layout of FD page:
* int num_fds
* struct fd_store fdtab[num_fds]
* uint8_t buf[] // store of actual handle data. entries in fdtab above
* // point here (relative offset from the beginning of buf).
* TODO: add the actual handle data!
*/
int *num_fds = (int *)fdspg;
*num_fds = 0;
struct fd_store *fdtab = (struct fd_store *)(num_fds + 1);
/* first copy all the fd table entries */
struct fdtab_entry *fde;
struct fd_store *fds;
int i;
for (i = MIN_FD; i < MAX_FD; i++) {
fde = fdtab_get(i);
if (fde->type != FDTAB_TYPE_AVAILABLE) {
fds = &fdtab[*num_fds];
fds->num = i;
fds->type = fde->type;
fds->handle = fde->handle;
printf("added fd %d to fdtabs[%d]: %p as fd_store (%p: num: %d, "
"type: %d, (unfixed)handle: %p)\n",
i, *num_fds, &fdtab[*num_fds], fds, fds->num, fds->type,
fds->handle);
(*num_fds)++;
}
}
/* then copy all the handle data to the buffer */
char *buf = (char *)&fdtab[*num_fds];
char *dest = buf;
genpaddr_t offset;
size_t size;
for (i = 0; i < *num_fds; i++) {
fds = &fdtab[i];
offset = (genpaddr_t)(dest - buf);
switch (fds->type) {
case FDTAB_TYPE_FILE:
size = copy_file_fd(dest, offset, fds);
break;
case FDTAB_TYPE_UNIX_SOCKET:
size = copy_unixsock_fd(dest, offset, fds);
break;
default:
// nothing to copy
size = 0;
break;
}
dest += size;
}
// unmap frame
err = vspace_unmap(fdspg);
return err;
}
static errval_t spawn_child(struct capref fdcap)
{
errval_t err;
char *argv[2] = { "testdesc-child", NULL };
domainid_t new_domain = -1;
coreid_t core = 0;
// allocate inheritcn
struct capref inheritcn_cap;
err = alloc_inheritcn_with_caps(&inheritcn_cap, fdcap,
NULL_CAP, NULL_CAP);
err = spawn_program_with_caps(core, argv[0], argv, NULL, inheritcn_cap,
NULL_CAP, SPAWN_NEW_DOMAIN, &new_domain);
if (err_is_fail(err)) {
DEBUG_ERR(err, "failed spawn on core %d", core);
return err;
}
return SYS_ERR_OK;
}
int main(int argc, char *argv[])
{
errval_t err;
printf("Test inheritance of file descriptors\n");
// create some file handles
int fd = open("test file", O_CREAT);
printf("opened a file with fd: %d\n", fd);
fd = socket(AF_UNIX, SOCK_STREAM, 0);
printf("opened a socket with fd: %d\n", fd);
struct capref fdcap;
err = spawn_setup_fds(&fdcap);
if (err_is_fail(err)) {
DEBUG_ERR(err, "could not setup fds!\n");
return EXIT_FAILURE;
}
err = spawn_child(fdcap);
if (err_is_fail(err)) {
DEBUG_ERR(err, "could not spawn child!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
/* Generated by CIL v. 1.7.0 */
/* print_CIL_Input is false */
struct _IO_FILE;
struct timeval;
extern float strtof(char const *str , char const *endptr ) ;
extern void signal(int sig , void *func ) ;
typedef struct _IO_FILE FILE;
extern int atoi(char const *s ) ;
extern double strtod(char const *str , char const *endptr ) ;
extern int fclose(void *stream ) ;
extern void *fopen(char const *filename , char const *mode ) ;
extern void abort() ;
extern void exit(int status ) ;
extern int raise(int sig ) ;
extern int fprintf(struct _IO_FILE *stream , char const *format , ...) ;
extern int strcmp(char const *a , char const *b ) ;
extern int rand() ;
extern unsigned long strtoul(char const *str , char const *endptr , int base ) ;
void RandomFunc(unsigned char input[1] , unsigned char output[1] ) ;
extern int strncmp(char const *s1 , char const *s2 , unsigned long maxlen ) ;
extern int gettimeofday(struct timeval *tv , void *tz , ...) ;
extern int printf(char const *format , ...) ;
int main(int argc , char *argv[] ) ;
void megaInit(void) ;
extern unsigned long strlen(char const *s ) ;
extern long strtol(char const *str , char const *endptr , int base ) ;
extern unsigned long strnlen(char const *s , unsigned long maxlen ) ;
extern void *memcpy(void *s1 , void const *s2 , unsigned long size ) ;
struct timeval {
long tv_sec ;
long tv_usec ;
};
extern void *malloc(unsigned long size ) ;
extern int scanf(char const *format , ...) ;
void megaInit(void)
{
{
}
}
void RandomFunc(unsigned char input[1] , unsigned char output[1] )
{
unsigned char state[1] ;
unsigned char local1 ;
{
state[0UL] = (input[0UL] | 51238316UL) >> (unsigned char)3;
local1 = 0UL;
while (local1 < (unsigned char)0) {
if (state[0UL] > local1) {
if (state[0UL] > local1) {
state[local1] = state[0UL] >> ((state[0UL] & (unsigned char)7) | 1UL);
} else {
state[local1] = state[0UL] << (((state[local1] >> (unsigned char)1) & (unsigned char)7) | 1UL);
}
} else
if (state[0UL] > local1) {
state[0UL] = state[local1] << (((state[local1] >> (unsigned char)3) & (unsigned char)7) | 1UL);
} else {
state[local1] = state[0UL] >> (((state[0UL] >> (unsigned char)1) & (unsigned char)7) | 1UL);
}
local1 += 2UL;
}
output[0UL] = state[0UL] << (unsigned char)7;
}
}
int main(int argc , char *argv[] )
{
unsigned char input[1] ;
unsigned char output[1] ;
int randomFuns_i5 ;
unsigned char randomFuns_value6 ;
int randomFuns_main_i7 ;
{
megaInit();
if (argc != 2) {
printf("Call this program with %i arguments\n", 1);
exit(-1);
} else {
}
randomFuns_i5 = 0;
while (randomFuns_i5 < 1) {
randomFuns_value6 = (unsigned char )strtoul(argv[randomFuns_i5 + 1], 0, 10);
input[randomFuns_i5] = randomFuns_value6;
randomFuns_i5 ++;
}
RandomFunc(input, output);
if (output[0] == 128) {
printf("You win!\n");
} else {
}
randomFuns_main_i7 = 0;
while (randomFuns_main_i7 < 1) {
printf("%u\n", output[randomFuns_main_i7]);
randomFuns_main_i7 ++;
}
}
}
|
from django import template
from django.contrib.contenttypes.models import ContentType
register = template.Library()
@register.assignment_tag
def has_unread_events(object, unread_events):
content_type = ContentType.objects.get_for_model(object)
obj_list = [event for event in unread_events.get(content_type.id, []) if event.object_id == object.id]
if obj_list:
return obj_list
else:
return []
|
"""
Statistical tools for time series analysis
"""
from __future__ import annotations
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import deprecate_kwarg
from statsmodels.compat.python import Literal, lzip
from statsmodels.compat.scipy import _next_regular
from typing import Tuple
import warnings
import numpy as np
from numpy.linalg import LinAlgError
import pandas as pd
from scipy import stats
from scipy.interpolate import interp1d
from scipy.signal import correlate
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
from statsmodels.tools.tools import Bunch, add_constant
from statsmodels.tools.validation import (
array_like,
bool_like,
dict_like,
float_like,
int_like,
string_like,
)
from statsmodels.tsa._bds import bds
from statsmodels.tsa._innovations import innovations_algo, innovations_filter
from statsmodels.tsa.adfvalues import mackinnoncrit, mackinnonp
from statsmodels.tsa.tsatools import add_trend, lagmat, lagmat2ds
__all__ = [
"acovf",
"acf",
"pacf",
"pacf_yw",
"pacf_ols",
"ccovf",
"ccf",
"q_stat",
"coint",
"arma_order_select_ic",
"adfuller",
"kpss",
"bds",
"pacf_burg",
"innovations_algo",
"innovations_filter",
"levinson_durbin_pacf",
"levinson_durbin",
"zivot_andrews",
"range_unit_root_test",
]
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
def _autolag(
mod,
endog,
exog,
startlag,
maxlag,
method,
modargs=(),
fitargs=(),
regresults=False,
):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
# TODO: can tcol be replaced by maxlag + 2?
# TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in results.items())
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in results.items())
elif method == "t-stat":
# stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
# Default values to ensure that always set
bestlag = startlag + maxlag
icbest = 0.0
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
bestlag = lag
if np.abs(icbest) >= stop:
# Break for first lag with a significant t-stat
break
else:
raise ValueError(f"Information Criterion {method} not understood.")
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
# this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
# Ng and Perron(2001), Lag length selection and the construction of unit root
# tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
# TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
# TODO: autolag is untested
def adfuller(
x,
maxlag: int | None = None,
regression="c",
autolag="AIC",
store=False,
regresults=False,
):
"""
Augmented Dickey-Fuller unit root test.
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
The data series to test.
maxlag : {None, int}
Maximum lag which is included in test, default value of
12*(nobs/100)^{1/4} is used when ``None``.
regression : {"c","ct","ctt","n"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "ct" : constant and trend.
* "ctt" : constant, and linear and quadratic trend.
* "n" : no constant, no trend.
autolag : {"AIC", "BIC", "t-stat", None}
Method to use when automatically determining the lag length among the
values 0, 1, ..., maxlag.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
* If None, then the number of included lags is set to maxlag.
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False.
regresults : bool, optional
If True, the full regression results are returned. Default is False.
Returns
-------
adf : float
The test statistic.
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010).
usedlag : int
The number of lags used.
nobs : int
The number of observations used for the ADF regression and calculation
of the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010).
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes.
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen"s
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
Examples
--------
See example notebook
"""
x = array_like(x, "x")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(
regression, "regression", options=("c", "ct", "ctt", "n")
)
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
store = bool_like(store, "store")
regresults = bool_like(regresults, "regresults")
if regresults:
store = True
trenddict = {None: "n", 0: "c", 1: "ct", 2: "ctt"}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
nobs = x.shape[0]
ntrend = len(regression) if regression != "n" else 0
if maxlag is None:
# from Greene referencing Schwert 1989
maxlag = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
# -1 for the diff
maxlag = min(nobs // 2 - ntrend - 1, maxlag)
if maxlag < 0:
raise ValueError(
"sample size is too short to use selected "
"regression component"
)
elif maxlag > nobs // 2 - ntrend - 1:
raise ValueError(
"maxlag must be less than (nobs/2 - 1 - ntrend) "
"where n trend is the number of included "
"deterministic regressors"
)
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
from statsmodels.stats.diagnostic import ResultsStore
resstore = ResultsStore()
if autolag:
if regression != "n":
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1
# 1 for level
# search for lag length with smallest information criteria
# Note: use the same number of observations to have comparable IC
# aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(
OLS, xdshort, fullRHS, startlag, maxlag, autolag
)
else:
icbest, bestlag, alres = _autolag(
OLS,
xdshort,
fullRHS,
startlag,
maxlag,
autolag,
regresults=regresults,
)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
# rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != "n":
resols = OLS(
xdshort, add_trend(xdall[:, : usedlag + 1], regression)
).fit()
else:
resols = OLS(xdshort, xdall[:, : usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {
"1%": critvalues[0],
"5%": critvalues[1],
"10%": critvalues[2],
}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = (
"The coefficient on the lagged level equals 1 - " "unit root"
)
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = "Augmented Dickey-Fuller Test Results"
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
@deprecate_kwarg("unbiased", "adjusted")
def acovf(x, adjusted=False, demean=True, fft=True, missing="none", nlag=None):
"""
Estimate autocovariances.
Parameters
----------
x : array_like
Time series data. Must be 1d.
adjusted : bool, default False
If True, then denominators is n-k, otherwise n.
demean : bool, default True
If True, then subtract the mean x from each element of x.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
nlag : {int, None}, default None
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
ndarray
The estimated autocovariances.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
fft = bool_like(fft, "fft", optional=False)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
nlag = int_like(nlag, "nlag", optional=True)
x = array_like(x, "x", ndim=1)
missing = missing.lower()
if missing == "none":
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) # bool
if missing == "conservative":
# Must copy for thread safety
x = x.copy()
x[~notmask_bool] = 0
else: # "drop"
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int) # int
if demean and deal_with_masked:
# whether "drop" or "conservative":
xo = x - x.sum() / notmask_int.sum()
if missing == "conservative":
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError("nlag must be smaller than nobs - 1")
if not fft and nlag is not None:
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1 :].dot(xo[: -(i + 1)])
if not deal_with_masked or missing == "drop":
if adjusted:
acov /= n - np.arange(lag_len + 1)
else:
acov /= n
else:
if adjusted:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = notmask_int[i + 1 :].dot(
notmask_int[: -(i + 1)]
)
divisor[divisor == 0] = 1
acov /= divisor
else: # biased, missing data but npt "drop"
acov /= notmask_int.sum()
return acov
if adjusted and deal_with_masked and missing == "conservative":
d = np.correlate(notmask_int, notmask_int, "full")
d[d == 0] = 1
elif adjusted:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ("drop" or "conservative")
d = notmask_int.sum() * np.ones(2 * n - 1)
else: # biased and no NaNs or missing=="none"
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1 :]
acov = acov.real
else:
acov = np.correlate(xo, xo, "full")[n - 1 :] / d[n - 1 :]
if nlag is not None:
# Copy to allow gc of full array rather than view
return acov[: lag_len + 1].copy()
return acov
def q_stat(x, nobs):
"""
Compute Ljung-Box Q Statistic.
Parameters
----------
x : array_like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int, optional
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : ndarray
Ljung-Box Q-statistic for autocorrelation parameters.
p-value : ndarray
P-value of the Q statistic.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box Q-test for autocorrelation in time series based
on a time series rather than the estimated autocorrelation
function.
Notes
-----
Designed to be used with acf.
"""
x = array_like(x, "x")
nobs = int_like(nobs, "nobs")
ret = (
nobs
* (nobs + 2)
* np.cumsum((1.0 / (nobs - np.arange(1, len(x) + 1))) * x ** 2)
)
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
# NOTE: Changed unbiased to False
# see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(
x,
adjusted=False,
nlags=None,
qstat=False,
fft=True,
alpha=None,
bartlett_confint=True,
missing="none",
):
"""
Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1). The returned value
includes lag 0 (ie., 1) so size of the acf vector is (nlags + 1,).
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default True
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [2] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section 5.3.2
in [3].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the possible
interpretation that if all autocorrelations past a certain lag are
within the limits, the model might be an MA of order defined by the
last significant autocorrelation. In this case, a moving average
model is assumed for the data and the standard errors for the
confidence intervals should be generated using Bartlett's formula.
For more details on Bartlett formula result, see section 7.2 in [2].
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the ACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None.
qstat : ndarray, optional
The Ljung-Box Q-Statistic for lags 1, 2, ..., nlags (excludes lag
zero). Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics for lags 1, 2, ...,
nlags (excludes lag zero). Returned if q_stat is True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
.. [2] Brockwell and Davis, 1987. Time Series Theory and Methods
.. [3] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition.
"""
adjusted = bool_like(adjusted, "adjusted")
nlags = int_like(nlags, "nlags", optional=True)
qstat = bool_like(qstat, "qstat")
fft = bool_like(fft, "fft", optional=False)
alpha = float_like(alpha, "alpha", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
x = array_like(x, "x")
# TODO: should this shrink for missing="drop" and NaNs in x?
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs - 1)
avf = acovf(x, adjusted=adjusted, demean=True, fft=fft, missing=missing)
acf = avf[: nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
_alpha = alpha if alpha is not None else 0.05
if bartlett_confint:
varacf = np.ones_like(acf) / nobs
varacf[0] = 0
varacf[1] = 1.0 / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1] ** 2)
else:
varacf = 1.0 / len(x)
interval = stats.norm.ppf(1 - _alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=None, method="adjusted"):
"""
Partial autocorrelation estimated with non-recursive yule_walker.
Parameters
----------
x : array_like
The observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
method : {"adjusted", "mle"}, default "adjusted"
The method for the autocovariance calculations in yule walker.
Returns
-------
ndarray
The partial autocorrelations, maxlag+1 elements.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs - 1)
method = string_like(method, "method", options=("adjusted", "mle"))
pacf = [1.0]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
def pacf_burg(x, nlags=None, demean=True):
"""
Calculate Burg"s partial autocorrelation estimator.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
demean : bool, optional
Flag indicating to demean that data. Set to False if x has been
previously demeaned.
Returns
-------
pacf : ndarray
Partial autocorrelations for lags 0, 1, ..., nlag.
sigma2 : ndarray
Residual variance estimates where the value in position m is the
residual variance in an AR model that includes m lags.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
x = array_like(x, "x")
if demean:
x = x - x.mean()
nobs = x.shape[0]
p = nlags if nlags is not None else min(int(10 * np.log10(nobs)), nobs - 1)
if p > nobs - 1:
raise ValueError("nlags must be smaller than nobs - 1")
d = np.zeros(p + 1)
d[0] = 2 * x.dot(x)
pacf = np.zeros(p + 1)
u = x[::-1].copy()
v = x[::-1].copy()
d[1] = u[:-1].dot(u[:-1]) + v[1:].dot(v[1:])
pacf[1] = 2 / d[1] * v[1:].dot(u[:-1])
last_u = np.empty_like(u)
last_v = np.empty_like(v)
for i in range(1, p):
last_u[:] = u
last_v[:] = v
u[1:] = last_u[:-1] - pacf[i] * last_v[1:]
v[1:] = last_v[1:] - pacf[i] * last_u[:-1]
d[i + 1] = (1 - pacf[i] ** 2) * d[i] - v[i] ** 2 - u[-1] ** 2
pacf[i + 1] = 2 / d[i + 1] * v[i + 1 :].dot(u[i:-1])
sigma2 = (1 - pacf ** 2) * d / (2.0 * (nobs - np.arange(0, p + 1)))
pacf[0] = 1 # Insert the 0 lag partial autocorrel
return pacf, sigma2
@deprecate_kwarg("unbiased", "adjusted")
def pacf_ols(x, nlags=None, efficient=True, adjusted=False):
"""
Calculate partial autocorrelations via OLS.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
efficient : bool, optional
If true, uses the maximum number of available observations to compute
each partial autocorrelation. If not, uses the same number of
observations to compute all pacf values.
adjusted : bool, optional
Adjust each partial autocorrelation by n / (n - lag).
Returns
-------
ndarray
The partial autocorrelations, (maxlag,) array corresponding to lags
0, 1, ..., maxlag.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves a separate OLS estimation for each desired lag using method in
[1]_. Setting efficient to True has two effects. First, it uses
`nobs - lag` observations of estimate each pacf. Second, it re-estimates
the mean in each regression. If efficient is False, then the data are first
demeaned, and then `nobs - maxlag` observations are used to estimate each
partial autocorrelation.
The inefficient estimator appears to have better finite sample properties.
This option should only be used in time series that are covariance
stationary.
OLS estimation of the pacf does not guarantee that all pacf values are
between -1 and 1.
References
----------
.. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
Time series analysis: forecasting and control. John Wiley & Sons, p. 66
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
efficient = bool_like(efficient, "efficient")
adjusted = bool_like(adjusted, "adjusted")
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs - 1)
pacf = np.empty(nlags + 1)
pacf[0] = 1.0
if efficient:
xlags, x0 = lagmat(x, nlags, original="sep")
xlags = add_constant(xlags)
for k in range(1, nlags + 1):
params = lstsq(xlags[k:, : k + 1], x0[k:], rcond=None)[0]
pacf[k] = params[-1]
else:
x = x - np.mean(x)
# Create a single set of lags for multivariate OLS
xlags, x0 = lagmat(x, nlags, original="sep", trim="both")
for k in range(1, nlags + 1):
params = lstsq(xlags[:, :k], x0, rcond=None)[0]
# Last coefficient corresponds to PACF value (see [1])
pacf[k] = params[-1]
if adjusted:
pacf *= nobs / (nobs - np.arange(nlags + 1))
return pacf
def pacf(x, nlags=None, method="ywadjusted", alpha=None):
"""
Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs // 2 - 1). The returned value
includes lag 0 (ie., 1) so size of the pacf vector is (nlags + 1,).
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
- "burg" : Burg"s partial autocorrelation estimator.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
The partial autocorrelations for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the PACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options.
"""
nlags = int_like(nlags, "nlags", optional=True)
methods = (
"ols",
"ols-inefficient",
"ols-adjusted",
"yw",
"ywa",
"ld",
"ywadjusted",
"yw_adjusted",
"ywm",
"ywmle",
"yw_mle",
"lda",
"ldadjusted",
"ld_adjusted",
"ldb",
"ldbiased",
"ld_biased",
"burg"
)
x = array_like(x, "x", maxdim=2)
method = string_like(method, "method", options=methods)
alpha = float_like(alpha, "alpha", optional=True)
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs // 2 - 1)
if nlags >= x.shape[0] // 2:
raise ValueError(
"Can only compute partial correlations for lags up to 50% of the "
f"sample size. The requested nlags {nlags} must be < "
f"{x.shape[0] // 2}."
)
if method in ("ols", "ols-inefficient", "ols-adjusted"):
efficient = "inefficient" not in method
adjusted = "adjusted" in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, adjusted=adjusted)
elif method in ("yw", "ywa", "ywadjusted", "yw_adjusted"):
ret = pacf_yw(x, nlags=nlags, method="adjusted")
elif method in ("ywm", "ywmle", "yw_mle"):
ret = pacf_yw(x, nlags=nlags, method="mle")
elif method in ("ld", "lda", "ldadjusted", "ld_adjusted"):
acv = acovf(x, adjusted=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
elif method == "burg":
ret, _ = pacf_burg(x, nlags=nlags, demean=True)
# inconsistent naming with ywmle
else: # method in ("ldb", "ldbiased", "ld_biased")
acv = acovf(x, adjusted=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
if alpha is not None:
varacf = 1.0 / len(x) # for all lags >=1
interval = stats.norm.ppf(1.0 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
@deprecate_kwarg("unbiased", "adjusted")
def ccovf(x, y, adjusted=True, demean=True, fft=True):
"""
Calculate the crosscovariance between two series.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool, optional
If True, then denominators for crosscovariance is n-k, otherwise n.
demean : bool, optional
Flag indicating whether to demean x and y.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
ndarray
The estimated crosscovariance function.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
fft = bool_like(fft, "fft", optional=False)
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if adjusted:
d = np.arange(n, 0, -1)
else:
d = n
method = "fft" if fft else "direct"
return correlate(xo, yo, "full", method=method)[n - 1 :] / d
@deprecate_kwarg("unbiased", "adjusted")
def ccf(x, y, adjusted=True, fft=True):
"""
The cross-correlation function.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool
If True, then denominators for cross-correlation is n-k, otherwise n.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
ndarray
The cross-correlation function of x and y.
Notes
-----
If adjusted is true, the denominator for the autocovariance is adjusted.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
fft = bool_like(fft, "fft", optional=False)
cvf = ccovf(x, y, adjusted=adjusted, demean=True, fft=fft)
return cvf / (np.std(x) * np.std(y))
# moved from sandbox.tsa.examples.try_ld_nitime, via nitime
# TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
"""
Levinson-Durbin recursion for autoregressive processes.
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0.
nlags : int, optional
The largest lag to include in recursion or order of the autoregressive
process.
isacov : bool, optional
Flag indicating whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
The estimate of the error variance.
arcoefs : ndarray
The estimate of the autoregressive coefficients for a model including
nlags.
pacf : ndarray
The partial autocorrelation function.
sigma : ndarray
The entire sigma array from intermediate result, last value is sigma_v.
phi : ndarray
The entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags).
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
"""
s = array_like(s, "s")
nlags = int_like(nlags, "nlags")
isacov = bool_like(isacov, "isacov")
order = nlags
if isacov:
sxx_m = s
else:
sxx_m = acovf(s, fft=False)[: order + 1] # not tested
phi = np.zeros((order + 1, order + 1), "d")
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (
sxx_m[k] - np.dot(phi[1:k, k - 1], sxx_m[1:k][::-1])
) / sig[k - 1]
for j in range(1, k):
phi[j, k] = phi[j, k - 1] - phi[k, k] * phi[k - j, k - 1]
sig[k] = sig[k - 1] * (1 - phi[k, k] ** 2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.0
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def levinson_durbin_pacf(pacf, nlags=None):
"""
Levinson-Durbin algorithm that returns the acf and ar coefficients.
Parameters
----------
pacf : array_like
Partial autocorrelation array for lags 0, 1, ... p.
nlags : int, optional
Number of lags in the AR model. If omitted, returns coefficients from
an AR(p) and the first p autocorrelations.
Returns
-------
arcoefs : ndarray
AR coefficients computed from the partial autocorrelations.
acf : ndarray
The acf computed from the partial autocorrelations. Array returned
contains the autocorrelations corresponding to lags 0, 1, ..., p.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
pacf = array_like(pacf, "pacf")
nlags = int_like(nlags, "nlags", optional=True)
pacf = np.squeeze(np.asarray(pacf))
if pacf[0] != 1:
raise ValueError(
"The first entry of the pacf corresponds to lags 0 "
"and so must be 1."
)
pacf = pacf[1:]
n = pacf.shape[0]
if nlags is not None:
if nlags > n:
raise ValueError(
"Must provide at least as many values from the "
"pacf as the number of lags."
)
pacf = pacf[:nlags]
n = pacf.shape[0]
acf = np.zeros(n + 1)
acf[1] = pacf[0]
nu = np.cumprod(1 - pacf ** 2)
arcoefs = pacf.copy()
for i in range(1, n):
prev = arcoefs[: -(n - i)].copy()
arcoefs[: -(n - i)] = prev - arcoefs[i] * prev[::-1]
acf[i + 1] = arcoefs[i] * nu[i - 1] + prev.dot(acf[1 : -(n - i)][::-1])
acf[0] = 1
return arcoefs, acf
def breakvar_heteroskedasticity_test(
resid, subset_length=1 / 3, alternative="two-sided", use_f=True
):
r"""
Test for heteroskedasticity of residuals
Tests whether the sum-of-squares in the first subset of the sample is
significantly different than the sum-of-squares in the last subset
of the sample. Analogous to a Goldfeld-Quandt test. The null hypothesis
is of no heteroskedasticity.
Parameters
----------
resid : array_like
Residuals of a time series model.
The shape is 1d (nobs,) or 2d (nobs, nvars).
subset_length : {int, float}
Length of the subsets to test (h in Notes below).
If a float in 0 < subset_length < 1, it is interpreted as fraction.
Default is 1/3.
alternative : str, 'increasing', 'decreasing' or 'two-sided'
This specifies the alternative for the p-value calculation. Default
is two-sided.
use_f : bool, optional
Whether or not to compare against the asymptotic distribution
(chi-squared) or the approximate small-sample distribution (F).
Default is True (i.e. default is to compare against an F
distribution).
Returns
-------
test_statistic : {float, ndarray}
Test statistic(s) H(h).
p_value : {float, ndarray}
p-value(s) of test statistic(s).
Notes
-----
The null hypothesis is of no heteroskedasticity. That means different
things depending on which alternative is selected:
- Increasing: Null hypothesis is that the variance is not increasing
throughout the sample; that the sum-of-squares in the later
subsample is *not* greater than the sum-of-squares in the earlier
subsample.
- Decreasing: Null hypothesis is that the variance is not decreasing
throughout the sample; that the sum-of-squares in the earlier
subsample is *not* greater than the sum-of-squares in the later
subsample.
- Two-sided: Null hypothesis is that the variance is not changing
throughout the sample. Both that the sum-of-squares in the earlier
subsample is not greater than the sum-of-squares in the later
subsample *and* that the sum-of-squares in the later subsample is
not greater than the sum-of-squares in the earlier subsample.
For :math:`h = [T/3]`, the test statistic is:
.. math::
H(h) = \sum_{t=T-h+1}^T \tilde v_t^2
\Bigg / \sum_{t=1}^{h} \tilde v_t^2
This statistic can be tested against an :math:`F(h,h)` distribution.
Alternatively, :math:`h H(h)` is asymptotically distributed according
to :math:`\chi_h^2`; this second test can be applied by passing
`use_f=False` as an argument.
See section 5.4 of [1]_ for the above formula and discussion, as well
as additional details.
References
----------
.. [1] Harvey, Andrew C. 1990. *Forecasting, Structural Time Series*
*Models and the Kalman Filter.* Cambridge University Press.
"""
squared_resid = np.asarray(resid, dtype=float) ** 2
if squared_resid.ndim == 1:
squared_resid = squared_resid.reshape(-1, 1)
nobs = len(resid)
if 0 < subset_length < 1:
h = int(np.round(nobs * subset_length))
elif type(subset_length) is int and subset_length >= 1:
h = subset_length
numer_resid = squared_resid[-h:]
numer_dof = (~np.isnan(numer_resid)).sum(axis=0)
numer_squared_sum = np.nansum(numer_resid, axis=0)
for i, dof in enumerate(numer_dof):
if dof < 2:
warnings.warn(
"Early subset of data for variable %d"
" has too few non-missing observations to"
" calculate test statistic." % i,
stacklevel=2,
)
numer_squared_sum[i] = np.nan
denom_resid = squared_resid[:h]
denom_dof = (~np.isnan(denom_resid)).sum(axis=0)
denom_squared_sum = np.nansum(denom_resid, axis=0)
for i, dof in enumerate(denom_dof):
if dof < 2:
warnings.warn(
"Later subset of data for variable %d"
" has too few non-missing observations to"
" calculate test statistic." % i,
stacklevel=2,
)
denom_squared_sum[i] = np.nan
test_statistic = numer_squared_sum / denom_squared_sum
# Setup functions to calculate the p-values
if use_f:
from scipy.stats import f
pval_lower = lambda test_statistics: f.cdf( # noqa:E731
test_statistics, numer_dof, denom_dof
)
pval_upper = lambda test_statistics: f.sf( # noqa:E731
test_statistics, numer_dof, denom_dof
)
else:
from scipy.stats import chi2
pval_lower = lambda test_statistics: chi2.cdf( # noqa:E731
numer_dof * test_statistics, denom_dof
)
pval_upper = lambda test_statistics: chi2.sf( # noqa:E731
numer_dof * test_statistics, denom_dof
)
# Calculate the one- or two-sided p-values
alternative = alternative.lower()
if alternative in ["i", "inc", "increasing"]:
p_value = pval_upper(test_statistic)
elif alternative in ["d", "dec", "decreasing"]:
test_statistic = 1.0 / test_statistic
p_value = pval_upper(test_statistic)
elif alternative in ["2", "2-sided", "two-sided"]:
p_value = 2 * np.minimum(
pval_lower(test_statistic), pval_upper(test_statistic)
)
else:
raise ValueError("Invalid alternative.")
if len(test_statistic) == 1:
return test_statistic[0], p_value[0]
return test_statistic, p_value
def grangercausalitytests(x, maxlag, addconst=True, verbose=None):
"""
Four tests for granger non causality of 2 time series.
All four tests give similar results. `params_ftest` and `ssr_ftest` are
equivalent based on F test which is identical to lmtest:grangertest in R.
Parameters
----------
x : array_like
The data for testing whether the time series in the second column Granger
causes the time series in the first column. Missing values are not
supported.
maxlag : {int, Iterable[int]}
If an integer, computes the test for all lags up to maxlag. If an
iterable, computes the tests only for the lags in maxlag.
addconst : bool
Include a constant in the model.
verbose : bool
Print results. Deprecated
.. deprecated: 0.14
verbose is deprecated and will be removed after 0.15 is released
Returns
-------
dict
All test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
`params_ftest`, `ssr_ftest` are based on F distribution
`ssr_chi2test`, `lrtest` are based on chi-square distribution
References
----------
.. [1] https://en.wikipedia.org/wiki/Granger_causality
.. [2] Greene: Econometric Analysis
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.stattools import grangercausalitytests
>>> import numpy as np
>>> data = sm.datasets.macrodata.load_pandas()
>>> data = data.data[["realgdp", "realcons"]].pct_change().dropna()
All lags up to 4
>>> gc_res = grangercausalitytests(data, 4)
Only lag 4
>>> gc_res = grangercausalitytests(data, [4])
"""
x = array_like(x, "x", ndim=2)
if not np.isfinite(x).all():
raise ValueError("x contains NaN or inf values.")
addconst = bool_like(addconst, "addconst")
if verbose is not None:
verbose = bool_like(verbose, "verbose")
warnings.warn(
"verbose is deprecated since functions should not print results",
FutureWarning,
)
else:
verbose = True # old default
try:
maxlag = int_like(maxlag, "maxlag")
if maxlag <= 0:
raise ValueError("maxlag must be a positive integer")
lags = np.arange(1, maxlag + 1)
except TypeError:
lags = np.array([int(lag) for lag in maxlag])
maxlag = lags.max()
if lags.min() <= 0 or lags.size == 0:
raise ValueError(
"maxlag must be a non-empty list containing only "
"positive integers"
)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError(
"Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) / 3) - 1)
)
resli = {}
for mlg in lags:
result = {}
if verbose:
print("\nGranger Causality")
print("number of lags (no zero)", mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim="both", dropex=1)
# add constant
if addconst:
dtaown = add_constant(dta[:, 1 : (mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
if (
dtajoint.shape[1] == (dta.shape[1] - 1)
or (dtajoint.max(0) == dtajoint.min(0)).sum() != 1
):
raise InfeasibleTestError(
"The x values include a column with constant values and so"
" the test statistic cannot be computed."
)
else:
raise NotImplementedError("Not Implemented")
# dtaown = dta[:, 1:mxlg]
# dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
# print results
# for ssr based tests see:
# http://support.sas.com/rnd/app/examples/ets/granger/index.htm
# the other tests are made-up
# Granger Causality test using ssr (F statistic)
if res2djoint.model.k_constant:
tss = res2djoint.centered_tss
else:
tss = res2djoint.uncentered_tss
if (
tss == 0
or res2djoint.ssr == 0
or np.isnan(res2djoint.rsquared)
or (res2djoint.ssr / tss) < np.finfo(float).eps
or res2djoint.params.shape[0] != dtajoint.shape[1]
):
raise InfeasibleTestError(
"The Granger causality test statistic cannot be compute "
"because the VAR has a perfect fit of the data."
)
fgc1 = (
(res2down.ssr - res2djoint.ssr)
/ res2djoint.ssr
/ mxlg
* res2djoint.df_resid
)
if verbose:
print(
"ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
)
result["ssr_ftest"] = (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print(
"ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, "
"df=%d" % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
)
result["ssr_chi2test"] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
# likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print(
"likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d"
% (lr, stats.chi2.sf(lr, mxlg), mxlg)
)
result["lrtest"] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack(
(np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1)))
)
ftres = res2djoint.f_test(rconstr)
if verbose:
print(
"parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)
)
result["params_ftest"] = (
np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom,
ftres.df_num,
)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(
y0,
y1,
trend="c",
method="aeg",
maxlag=None,
autolag: str | None = "aic",
return_results=None,
):
"""
Test for no-cointegration of a univariate equation.
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
"aic". Use `autolag=None` to avoid the lag search.
Parameters
----------
y0 : array_like
The first element in cointegrated system. Must be 1-d.
y1 : array_like
The remaining elements in cointegrated system.
trend : str {"c", "ct"}
The trend term included in regression for cointegrating equation.
* "c" : constant.
* "ct" : constant and linear trend.
* also available quadratic trend "ctt", and no constant "n".
method : {"aeg"}
Only "aeg" (augmented Engle-Granger) is available.
maxlag : None or int
Argument for `adfuller`, largest or given number of lags.
autolag : str
Argument for `adfuller`, lag selection criterion.
* If None, then maxlag lags are used without lag search.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
return_results : bool
For future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned. Set `return_results=False` to
avoid future changes in return.
Returns
-------
coint_t : float
The t-statistic of unit-root test on residuals.
pvalue : float
MacKinnon"s approximate, asymptotic p-value based on MacKinnon (1994).
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
Auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
.. [1] MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions
for Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
.. [2] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen"s University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
y0 = array_like(y0, "y0")
y1 = array_like(y1, "y1", ndim=2)
trend = string_like(trend, "trend", options=("c", "n", "ct", "ctt"))
string_like(method, "method", options=("aeg",))
maxlag = int_like(maxlag, "maxlag", optional=True)
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
return_results = bool_like(return_results, "return_results", optional=True)
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == "n":
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(
res_co.resid, maxlag=maxlag, autolag=autolag, regression="n"
)
else:
warnings.warn(
"y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.",
CollinearityWarning,
stacklevel=2,
)
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == "n":
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I do not know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
from statsmodels.tsa.arima.model import ARIMA
try:
return ARIMA(y, order=order, **model_kw, trend=trend).fit(
start_params=start_params, **fit_kw
)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # do not recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif "initial" not in error.args[0] or "initial" in str(error):
start_params = [0.1] * sum(order)
if trend == "c":
start_params = [0.1] + start_params
return _safe_arma_fit(
y, order, model_kw, trend, fit_kw, start_params
)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(
y, max_ar=4, max_ma=2, ic="bic", trend="c", model_kw=None, fit_kw=None
):
"""
Compute information criteria for many ARMA models.
Parameters
----------
y : array_like
Array of time-series data.
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model.
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
Bunch
Dict-like object with attribute access. Each ic is an attribute with a
DataFrame for the results. The AR order used is the row index. The ma
order used is the column index. The minimum orders are available as
``ic_min_order``.
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : "css"} to fit_kw.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
>>> res.aic_min_order
>>> res.bic_min_order
"""
max_ar = int_like(max_ar, "max_ar")
max_ma = int_like(max_ma, "max_ma")
trend = string_like(trend, "trend", options=("n", "c"))
model_kw = dict_like(model_kw, "model_kw", optional=True)
fit_kw = dict_like(fit_kw, "fit_kw", optional=True)
ar_range = [i for i in range(max_ar + 1)]
ma_range = [i for i in range(max_ma + 1)]
if isinstance(ic, str):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
model_kw = {} if model_kw is None else model_kw
fit_kw = {} if fit_kw is None else fit_kw
y_arr = array_like(y, "y", contiguous=True)
for ar in ar_range:
for ma in ma_range:
mod = _safe_arma_fit(y_arr, (ar, 0, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [
pd.DataFrame(res, columns=ma_range, index=ar_range) for res in results
]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in res.items():
delta = np.ascontiguousarray(np.abs(result.min().min() - result))
ncols = delta.shape[1]
loc = np.argmin(delta)
min_res.update({i + "_min_order": (loc // ncols, loc % ncols)})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if "data" contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(
x,
regression: Literal["c", "ct"] = "c",
nlags: Literal["auto", "legacy"] | int = "auto",
store: bool = False,
) -> Tuple[float, float, int, dict[str, float]]:
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
regression : str{"c", "ct"}
The null hypothesis for the KPSS test.
* "c" : The data is stationary around a constant (default).
* "ct" : The data is stationary around a trend.
nlags : {str, int}, optional
Indicates the number of lags to be used. If "auto" (default), lags
is calculated using the data-dependent method of Hobijn et al. (1998).
See also Andrews (1991), Newey & West (1994), and Schwert (1989). If
set to "legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter.
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is "legacy",
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
.. [1] Andrews, D.W.K. (1991). Heteroskedasticity and autocorrelation
consistent covariance matrix estimation. Econometrica, 59: 817-858.
.. [2] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations of the
KPSS-test for stationarity. Statistica Neerlandica, 52: 483-502.
.. [3] Kwiatkowski, D., Phillips, P.C.B., Schmidt, P., & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of a
unit root. Journal of Econometrics, 54: 159-178.
.. [4] Newey, W.K., & West, K.D. (1994). Automatic lag selection in
covariance matrix estimation. Review of Economic Studies, 61: 631-653.
.. [5] Schwert, G. W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business and Economic Statistics, 7 (2):
147-159.
"""
x = array_like(x, "x")
regression = string_like(regression, "regression", options=("c", "ct"))
store = bool_like(store, "store")
nobs = x.shape[0]
hypo = regression
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == "ct":
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
else: # hypo == "c"
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
if nlags == "legacy":
nlags = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
nlags = min(nlags, nobs - 1)
elif nlags == "auto" or nlags is None:
if nlags is None:
# TODO: Remove before 0.14 is released
warnings.warn(
"None is not a valid value for nlags. It must be an integer, "
"'auto' or 'legacy'. None will raise starting in 0.14",
FutureWarning,
stacklevel=2,
)
# autolag method of Hobijn et al. (1998)
nlags = _kpss_autolag(resids, nobs)
nlags = min(nlags, nobs - 1)
elif isinstance(nlags, str):
raise ValueError("nvals must be 'auto' or 'legacy' when not an int")
else:
nlags = int_like(nlags, "nlags", optional=False)
if nlags >= nobs:
raise ValueError(
f"lags ({nlags}) must be < number of observations ({nobs})"
)
pvals = [0.10, 0.05, 0.025, 0.01]
eta = np.sum(resids.cumsum() ** 2) / (nobs ** 2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, nlags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
if p_value == pvals[-1]:
warnings.warn(
warn_msg.format(direction="smaller"),
InterpolationWarning,
stacklevel=2,
)
elif p_value == pvals[0]:
warnings.warn(
warn_msg.format(direction="greater"),
InterpolationWarning,
stacklevel=2,
)
crit_dict = {"10%": crit[0], "5%": crit[1], "2.5%": crit[2], "1%": crit[3]}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.lags = nlags
rstore.nobs = nobs
stationary_type = "level" if hypo == "c" else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, nlags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = np.sum(resids ** 2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
s_hat += 2 * resids_prod * (1.0 - (i / (lags + 1.0)))
return s_hat / nobs
def _kpss_autolag(resids, nobs):
"""
Computes the number of lags for covariance matrix estimation in KPSS test
using method of Hobijn et al (1998). See also Andrews (1991), Newey & West
(1994), and Schwert (1989). Assumes Bartlett / Newey-West kernel.
"""
covlags = int(np.power(nobs, 2.0 / 9.0))
s0 = np.sum(resids ** 2) / nobs
s1 = 0
for i in range(1, covlags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
resids_prod /= nobs / 2.0
s0 += resids_prod
s1 += i * resids_prod
s_hat = s1 / s0
pwr = 1.0 / 3.0
gamma_hat = 1.1447 * np.power(s_hat * s_hat, pwr)
autolags = int(gamma_hat * np.power(nobs, pwr))
return autolags
def range_unit_root_test(x, store=False):
"""
Range unit-root test for stationarity.
Computes the Range Unit-Root (RUR) test for the null
hypothesis that x is stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
store : bool
If True, then a result instance is returned additionally to
the RUR statistic (default is False).
Returns
-------
rur_stat : float
The RUR test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Aparicio et al. (2006), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Aparicio et al. (2006).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
The p-values are interpolated from
Table 1 of Aparicio et al. (2006). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
.. [1] Aparicio, F., Escribano A., Sipols, A.E. (2006). Range Unit-Root (RUR)
tests: robust against nonlinearities, error distributions, structural breaks
and outliers. Journal of Time Series Analysis, 27 (4): 545-576.
"""
x = array_like(x, "x")
store = bool_like(store, "store")
nobs = x.shape[0]
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
# Table from [1] has been replicated using 200,000 samples
# Critical values for new n_obs values have been identified
pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95]
n = np.array(
[25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]
)
crit = np.array(
[
[0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312],
[0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613],
[0.9070, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393],
[0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049],
[0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482],
[0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2842],
[1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584],
[1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073],
[1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439],
[1.1204, 1.2295, 1.3303, 1.4656, 3.1054, 3.4632],
[1.1309, 1.2347, 1.3378, 1.4693, 3.1165, 3.4717],
[1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807],
]
)
# Interpolation for nobs
inter_crit = np.zeros((1, crit.shape[1]))
for i in range(crit.shape[1]):
f = interp1d(n, crit[:, i])
inter_crit[0, i] = f(nobs)
# Calculate RUR stat
xs = pd.Series(x)
exp_max = xs.expanding(1).max().shift(1)
exp_min = xs.expanding(1).min().shift(1)
count = (xs > exp_max).sum() + (xs < exp_min).sum()
rur_stat = count / np.sqrt(len(x))
k = len(pvals) - 1
for i in range(len(pvals) - 1, -1, -1):
if rur_stat < inter_crit[0, i]:
k = i
else:
break
p_value = pvals[k]
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
direction = ""
if p_value == pvals[-1]:
direction = "smaller"
elif p_value == pvals[0]:
direction = "larger"
if direction:
warnings.warn(
warn_msg.format(direction=direction),
InterpolationWarning,
stacklevel=2,
)
crit_dict = {
"10%": inter_crit[0, 3],
"5%": inter_crit[0, 2],
"2.5%": inter_crit[0, 1],
"1%": inter_crit[0, 0],
}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.nobs = nobs
rstore.H0 = "The series is not stationary"
rstore.HA = "The series is stationary"
return rur_stat, p_value, crit_dict, rstore
else:
return rur_stat, p_value, crit_dict
class ZivotAndrewsUnitRoot:
"""
Class wrapper for Zivot-Andrews structural-break unit-root test
"""
def __init__(self):
"""
Critical values for the three different models specified for the
Zivot-Andrews unit-root test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
100,000 replications and 2000 data points.
"""
self._za_critical_values = {}
# constant-only model
self._c = (
(0.001, -6.78442),
(0.100, -5.83192),
(0.200, -5.68139),
(0.300, -5.58461),
(0.400, -5.51308),
(0.500, -5.45043),
(0.600, -5.39924),
(0.700, -5.36023),
(0.800, -5.33219),
(0.900, -5.30294),
(1.000, -5.27644),
(2.500, -5.03340),
(5.000, -4.81067),
(7.500, -4.67636),
(10.000, -4.56618),
(12.500, -4.48130),
(15.000, -4.40507),
(17.500, -4.33947),
(20.000, -4.28155),
(22.500, -4.22683),
(25.000, -4.17830),
(27.500, -4.13101),
(30.000, -4.08586),
(32.500, -4.04455),
(35.000, -4.00380),
(37.500, -3.96144),
(40.000, -3.92078),
(42.500, -3.88178),
(45.000, -3.84503),
(47.500, -3.80549),
(50.000, -3.77031),
(52.500, -3.73209),
(55.000, -3.69600),
(57.500, -3.65985),
(60.000, -3.62126),
(65.000, -3.54580),
(70.000, -3.46848),
(75.000, -3.38533),
(80.000, -3.29112),
(85.000, -3.17832),
(90.000, -3.04165),
(92.500, -2.95146),
(95.000, -2.83179),
(96.000, -2.76465),
(97.000, -2.68624),
(98.000, -2.57884),
(99.000, -2.40044),
(99.900, -1.88932),
)
self._za_critical_values["c"] = np.asarray(self._c)
# trend-only model
self._t = (
(0.001, -83.9094),
(0.100, -13.8837),
(0.200, -9.13205),
(0.300, -6.32564),
(0.400, -5.60803),
(0.500, -5.38794),
(0.600, -5.26585),
(0.700, -5.18734),
(0.800, -5.12756),
(0.900, -5.07984),
(1.000, -5.03421),
(2.500, -4.65634),
(5.000, -4.40580),
(7.500, -4.25214),
(10.000, -4.13678),
(12.500, -4.03765),
(15.000, -3.95185),
(17.500, -3.87945),
(20.000, -3.81295),
(22.500, -3.75273),
(25.000, -3.69836),
(27.500, -3.64785),
(30.000, -3.59819),
(32.500, -3.55146),
(35.000, -3.50522),
(37.500, -3.45987),
(40.000, -3.41672),
(42.500, -3.37465),
(45.000, -3.33394),
(47.500, -3.29393),
(50.000, -3.25316),
(52.500, -3.21244),
(55.000, -3.17124),
(57.500, -3.13211),
(60.000, -3.09204),
(65.000, -3.01135),
(70.000, -2.92897),
(75.000, -2.83614),
(80.000, -2.73893),
(85.000, -2.62840),
(90.000, -2.49611),
(92.500, -2.41337),
(95.000, -2.30820),
(96.000, -2.25797),
(97.000, -2.19648),
(98.000, -2.11320),
(99.000, -1.99138),
(99.900, -1.67466),
)
self._za_critical_values["t"] = np.asarray(self._t)
# constant + trend model
self._ct = (
(0.001, -38.17800),
(0.100, -6.43107),
(0.200, -6.07279),
(0.300, -5.95496),
(0.400, -5.86254),
(0.500, -5.77081),
(0.600, -5.72541),
(0.700, -5.68406),
(0.800, -5.65163),
(0.900, -5.60419),
(1.000, -5.57556),
(2.500, -5.29704),
(5.000, -5.07332),
(7.500, -4.93003),
(10.000, -4.82668),
(12.500, -4.73711),
(15.000, -4.66020),
(17.500, -4.58970),
(20.000, -4.52855),
(22.500, -4.47100),
(25.000, -4.42011),
(27.500, -4.37387),
(30.000, -4.32705),
(32.500, -4.28126),
(35.000, -4.23793),
(37.500, -4.19822),
(40.000, -4.15800),
(42.500, -4.11946),
(45.000, -4.08064),
(47.500, -4.04286),
(50.000, -4.00489),
(52.500, -3.96837),
(55.000, -3.93200),
(57.500, -3.89496),
(60.000, -3.85577),
(65.000, -3.77795),
(70.000, -3.69794),
(75.000, -3.61852),
(80.000, -3.52485),
(85.000, -3.41665),
(90.000, -3.28527),
(92.500, -3.19724),
(95.000, -3.08769),
(96.000, -3.03088),
(97.000, -2.96091),
(98.000, -2.85581),
(99.000, -2.71015),
(99.900, -2.28767),
)
self._za_critical_values["ct"] = np.asarray(self._ct)
def _za_crit(self, stat, model="c"):
"""
Linear interpolation for Zivot-Andrews p-values and critical values
Parameters
----------
stat : float
The ZA test statistic
model : {"c","t","ct"}
The model used when computing the ZA statistic. "c" is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated ZA test statistic distribution
"""
table = self._za_critical_values[model]
pcnts = table[:, 0]
stats = table[:, 1]
# ZA cv table contains quantiles multiplied by 100
pvalue = np.interp(stat, stats, pcnts) / 100.0
cv = [1.0, 5.0, 10.0]
crit_value = np.interp(cv, pcnts, stats)
cvdict = {
"1%": crit_value[0],
"5%": crit_value[1],
"10%": crit_value[2],
}
return pvalue, cvdict
def _quick_ols(self, endog, exog):
"""
Minimal implementation of LS estimator for internal use
"""
xpxi = np.linalg.inv(exog.T.dot(exog))
xpy = exog.T.dot(endog)
nobs, k_exog = exog.shape
b = xpxi.dot(xpy)
e = endog - exog.dot(b)
sigma2 = e.T.dot(e) / (nobs - k_exog)
return b / np.sqrt(np.diag(sigma2 * xpxi))
def _format_regression_data(self, series, nobs, const, trend, cols, lags):
"""
Create the endog/exog data for the auxiliary regressions
from the original (standardized) series under test.
"""
# first-diff y and standardize for numerical stability
endog = np.diff(series, axis=0)
endog /= np.sqrt(endog.T.dot(endog))
series /= np.sqrt(series.T.dot(series))
# reserve exog space
exog = np.zeros((endog[lags:].shape[0], cols + lags))
exog[:, 0] = const
# lagged y and dy
exog[:, cols - 1] = series[lags : (nobs - 1)]
exog[:, cols:] = lagmat(endog, lags, trim="none")[
lags : exog.shape[0] + lags
]
return endog, exog
def _update_regression_exog(
self, exog, regression, period, nobs, const, trend, cols, lags
):
"""
Update the exog array for the next regression.
"""
cutoff = period - (lags + 1)
if regression != "t":
exog[:cutoff, 1] = 0
exog[cutoff:, 1] = const
exog[:, 2] = trend[(lags + 2) : (nobs + 1)]
if regression == "ct":
exog[:cutoff, 3] = 0
exog[cutoff:, 3] = trend[1 : (nobs - period + 1)]
else:
exog[:, 1] = trend[(lags + 2) : (nobs + 1)]
exog[: (cutoff - 1), 2] = 0
exog[(cutoff - 1) :, 2] = trend[0 : (nobs - period + 1)]
return exog
def run(self, x, trim=0.15, maxlag=None, regression="c", autolag="AIC"):
"""
Zivot-Andrews structural-break unit-root test.
The Zivot-Andrews test tests for a unit root in a univariate process
in the presence of serial correlation and a single structural break.
Parameters
----------
x : array_like
The data series to test.
trim : float
The percentage of series at begin/end to exclude from break-period
calculation in range [0, 0.333] (default=0.15).
maxlag : int
The maximum lag which is included in test, default is
12*(nobs/100)^{1/4} (Schwert, 1989).
regression : {"c","t","ct"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "t" : trend only.
* "ct" : constant and trend.
autolag : {"AIC", "BIC", "t-stat", None}
The method to select the lag length when using automatic selection.
* if None, then maxlag lags are used,
* if "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion,
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
Returns
-------
zastat : float
The test statistic.
pvalue : float
The pvalue based on MC-derived critical values.
cvdict : dict
The critical values for the test statistic at the 1%, 5%, and 10%
levels.
baselag : int
The number of lags used for period regressions.
bpidx : int
The index of x corresponding to endogenously calculated break period
with values in the range [0..nobs-1].
Notes
-----
H0 = unit root with a single structural break
Algorithm follows Baum (2004/2015) approximation to original
Zivot-Andrews method. Rather than performing an autolag regression at
each candidate break period (as per the original paper), a single
autolag regression is run up-front on the base model (constant + trend
with no dummies) to determine the best lag length. This lag length is
then used for all subsequent break-period regressions. This results in
significant run time reduction but also slightly more pessimistic test
statistics than the original Zivot-Andrews method, although no attempt
has been made to characterize the size/power trade-off.
References
----------
.. [1] Baum, C.F. (2004). ZANDREWS: Stata module to calculate
Zivot-Andrews unit root test in presence of structural break,"
Statistical Software Components S437301, Boston College Department
of Economics, revised 2015.
.. [2] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business & Economic Statistics, 7:
147-159.
.. [3] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the
great crash, the oil-price shock, and the unit-root hypothesis.
Journal of Business & Economic Studies, 10: 251-270.
"""
x = array_like(x, "x")
trim = float_like(trim, "trim")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(
regression, "regression", options=("c", "t", "ct")
)
autolag = string_like(
autolag, "autolag", options=("aic", "bic", "t-stat"), optional=True
)
if trim < 0 or trim > (1.0 / 3.0):
raise ValueError("trim value must be a float in range [0, 1/3)")
nobs = x.shape[0]
if autolag:
adf_res = adfuller(
x, maxlag=maxlag, regression="ct", autolag=autolag
)
baselags = adf_res[2]
elif maxlag:
baselags = maxlag
else:
baselags = int(12.0 * np.power(nobs / 100.0, 1 / 4.0))
trimcnt = int(nobs * trim)
start_period = trimcnt
end_period = nobs - trimcnt
if regression == "ct":
basecols = 5
else:
basecols = 4
# normalize constant and trend terms for stability
c_const = 1 / np.sqrt(nobs)
t_const = np.arange(1.0, nobs + 2)
t_const *= np.sqrt(3) / nobs ** (3 / 2)
# format the auxiliary regression data
endog, exog = self._format_regression_data(
x, nobs, c_const, t_const, basecols, baselags
)
# iterate through the time periods
stats = np.full(end_period + 1, np.inf)
for bp in range(start_period + 1, end_period + 1):
# update intercept dummy / trend / trend dummy
exog = self._update_regression_exog(
exog,
regression,
bp,
nobs,
c_const,
t_const,
basecols,
baselags,
)
# check exog rank on first iteration
if bp == start_period + 1:
o = OLS(endog[baselags:], exog, hasconst=1).fit()
if o.df_model < exog.shape[1] - 1:
raise ValueError(
"ZA: auxiliary exog matrix is not full rank.\n"
" cols (exc intercept) = {} rank = {}".format(
exog.shape[1] - 1, o.df_model
)
)
stats[bp] = o.tvalues[basecols - 1]
else:
stats[bp] = self._quick_ols(endog[baselags:], exog)[
basecols - 1
]
# return best seen
zastat = np.min(stats)
bpidx = np.argmin(stats) - 1
crit = self._za_crit(zastat, regression)
pval = crit[0]
cvdict = crit[1]
return zastat, pval, cvdict, baselags, bpidx
def __call__(
self, x, trim=0.15, maxlag=None, regression="c", autolag="AIC"
):
return self.run(
x, trim=trim, maxlag=maxlag, regression=regression, autolag=autolag
)
zivot_andrews = ZivotAndrewsUnitRoot()
zivot_andrews.__doc__ = zivot_andrews.run.__doc__
|
// Copyright 2018, Timothy Davison. All rights reserved.
#pragma once
#include "CoreMinimal.h"
#include "Components/SceneComponent.h"
#include "CollectionSpace.generated.h"
UCLASS(ClassGroup = (Custom), meta = (BlueprintSpawnableComponent))
class LIFEBRUSH_API UInteractionSpace : public USceneComponent
{
GENERATED_BODY()
public:
UInteractionSpace();
};
UINTERFACE(Blueprintable)
class UCollectionSpaceDataSource : public UInterface
{
GENERATED_BODY()
};
class ICollectionSpaceDataSource
{
GENERATED_BODY()
public:
UFUNCTION(BlueprintCallable, BlueprintNativeEvent, Category = "VRUI")
int32 count();
UFUNCTION(BlueprintCallable, BlueprintNativeEvent, Category = "VRUI")
UPrimitiveComponent * primitiveCellAt(int32 index);
};
UINTERFACE(Blueprintable)
class UCollectionSpaceDelegate : public UInterface
{
GENERATED_BODY()
};
class ICollectionSpaceDelegate
{
GENERATED_BODY()
public:
UFUNCTION(BlueprintCallable, BlueprintNativeEvent, Category = "VRUI")
void didGrab(int32 itemAtIndex, FTransform grabTransform, UPrimitiveComponent * grabbedCell, FTransform cellTransform, FBox cellBounds);
};
UCLASS( ClassGroup=(Custom), meta=(BlueprintSpawnableComponent) )
class LIFEBRUSH_API UCollectionSpace : public UInteractionSpace
{
GENERATED_BODY()
public:
// Sets default values for this component's properties
UCollectionSpace();
protected:
// Called when the game starts
virtual void BeginPlay() override;
public:
// Called every frame
virtual void TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction) override;
virtual void insertItemsAt(TArray<uint32> indices);
virtual void reloadData();
FVector nearest(UPrimitiveComponent * interactionPoint);
// Interaction events
virtual void begin_oneHand(UPrimitiveComponent * interactionPoint);
virtual void update_oneHand(float dt, UPrimitiveComponent * interactionPoint, FTransform lastTransform);
virtual void end_oneHand(UPrimitiveComponent * interactionPoint);
virtual void grab(UPrimitiveComponent * interactionPoint);
virtual void query(UPrimitiveComponent * interactionPoint);
virtual TSet<int32> selection();
public:
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
float cellExtents = 10.0f;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
float cellSpacing = 5.0f;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
float damping = 0.1f;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
float width = 0.0f;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
int32 rows = 1;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
UStaticMesh * backgroundMesh;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI")
UMaterialInterface * backgroundMaterial;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI", meta = (MustImplement = "CollectionSpaceDataSource"))
UObject * dataSource;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "VRUI", meta = (MustImplement = "CollectionSpaceDelegate"))
UObject * delegate;
protected:
void _layout();
void _layoutCells();
void _layoutBackground();
float _totalWidth();
float _stepSize();
float _scaleForCell(UPrimitiveComponent& component);
FVector _offsetForCell(const FTransform& toCell, UPrimitiveComponent& component);
void _update_pan(float dt, UPrimitiveComponent * interactionPoint, FTransform lastTransform);
protected:
UPROPERTY()
UStaticMeshComponent * _boundsMesh;
protected:
enum class InteractionMode
{
Pan,
Grab,
None
};
InteractionMode _interactionMode = InteractionMode::None;
TArray<UPrimitiveComponent*> _cells;
TArray<FBox> _localBounds;
FBox _bounds;
FVector _velocity = FVector::ZeroVector;
TSet<int32> _selection;
static const int Forward = 1;
};
|
// Generated by CoffeeScript 1.12.7
/*
@file
Handles the functionality of the photography game.
*/
(function() {
var extend = function(child, parent) { for (var key in parent) { if (hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; },
hasProp = {}.hasOwnProperty;
jQuery(document).ready(function() {
var addShotToInv, calculatePicValue, closeParent, currentGame, deg2rad, displayInv, distanceTravelled, effects, endGame, endTurn, event, eventManager, gameEvents, gameGlobal, gameInsanity, gameLocation, gamePhoto, gameTime, gameTutorial, generateMarkers, getParam, locations, photographyGame, player, playerInsanity, playerMarker, plusMode, processData, randomEvent, retrieveResources, saveItem, setValue, showResStatus, storyMode, submitUserData, timeManager, tutorialHandler, updateMarkers, validData;
event = (function() {
/*
Constructs the event object.
@constructor
@param {string} title
Title of the event.
@param {string} time
The game time of when the event occurred.
@param {string} content
The description of the event.
@param {boolean} special
Whether if the event is a special event.
@param {boolean} warn
Whether if the event is a warning.
*/
function event(title, time, content, special, warn) {
this.title = title;
this.time = time;
this.content = content;
this.special = special != null ? special : false;
this.warn = warn != null ? warn : false;
}
return event;
})();
randomEvent = (function(superClass) {
extend(randomEvent, superClass);
/*
Constructs the random event object. Extends events.
@constructor
@param {string} title
Title of the event.
@param {string} time
The game time of when the event occurred.
@param {string} content
The description of the event.
@param {boolean} special
Whether if the event is a special event.
@param {boolean} warn
Whether if the event is a warning.
@param {boolean} popup
Whether if the event has its own overlay, or added to the event log.
@param {integer} chance
The chance of the event occurance should it be selected.
@param {object} effects
The list of effects to affect the player by.
*/
function randomEvent(title, time, content, special, popup, chance, effects1) {
this.title = title;
this.time = time;
this.content = content;
this.special = special != null ? special : false;
this.popup = popup != null ? popup : false;
this.chance = chance;
this.effects = effects1;
randomEvent.__super__.constructor.call(this, this.title, this.time, this.content, this.special, this.warn);
}
return randomEvent;
})(event);
gamePhoto = (function() {
/*
Constructs the game photo object.
@constructor
@param {integer} value
The value of the photo.
@param {boolean} washed
Whether if the photo has been washed
@param {string} img
The image associated with the photo.
@param {string} title
The title of the photo.
@param {integer} quailty
The quailty of the photo.
*/
function gamePhoto(value1, washed, img1, title, quailty1) {
this.value = value1;
this.washed = washed;
this.img = img1;
this.title = title;
this.quailty = quailty1;
}
return gamePhoto;
})();
/*
Global variables and constants.
*/
locations = [];
validData = [];
gameGlobal = {
eventOccurrence: 0,
init: {
isStory: false,
isPlus: false,
stats: {
CAB: 1000,
workingCapital: 0,
assets: 0,
liabilities: 600,
insanity: 0
}
},
trackers: {
monthPassed: 0,
photosSold: 0,
moneyEarned: 0
},
turnConsts: {
interest: 1.5,
pictureWashingTime: 14,
stdLiabilities: 600,
alert: false,
randomEvents: [
new randomEvent('Machine Gun Fire!', 'currentTime', 'You wake up in a cold sweat. The sound of a german machine gun barks out from the window. How coud this be? Germans in Australia? You grab your rifle from under your pillow and rush to the window. You ready your rifle and aim, looking for the enemy. BANG! BANG! BARK! YAP! You look at the neighbours small terrier. Barking...', false, true, 30, effects = {
insanity: 20
}), new randomEvent('German Bombs!', 'currentTime', 'A loud explosion shakes the ground and you see a building crumble into dust in the distance. Sirens. We have been attacked! You rush to see the chaos, pushing the bystanders aside. They are not running, strangely calm. Do they not recognize death when the see it? Then you see it. A construction crew. Dynamite.', false, true, 20, effects = {
insanity: 30
}), new randomEvent('Air raid!', 'currentTime', 'The sound of engines fills the air. The twins propellers of a German byplane. You look up to the sky, a small dot. It may be far now, but the machine guns will be upon us soon. Cover. Need to get safe. You yell to the people around you. GET INSIDE! GET INSIDE NOW! They look at you confused. They dont understand. You look up again. A toy. You look to your side, a car.', false, true, 24, effects = {
insanity: 20
}), new randomEvent('Landmines!', 'currentTime', 'You scan the ground carefully as you walk along the beaten dirt path. A habit you learned after one of your squadmate had his legs blown off by a German M24 mine. You stop. Under a pile of leaves you spot it. The glimmer of metal. Shrapnel to viciously tear you apart. You are no sapper but this could kill someone. You throw a rock a it. The empty can of beans rolls away.', false, true, 20, effects = {
insanity: 10
}), new randomEvent('Dazed', 'currentTime', 'You aim the camera at the young couple who had asked you for a picture. Slowly. 3. 2. 1. Click. FLASH. You open your eyes. The fields. The soldiers are readying for a charge. OVER THE TOP. You shake yourself awake. The couple is looking at you worryingly. How long was I out?', false, true, 20, effects = {
insanity: 10
}), new randomEvent('The enemy charges!', 'currentTime', 'You are pacing along the street. Footsteps... You turn round and see a man running after you. Yelling. Immediately you run at him. Disarm and subdue you think. Disarm. You tackle him to the ground. He falls with a thud. Subdue. You raise your fist. As you prepare to bring it down on your assailant. Its your wallet. "Please stop! You dropped your wallet! Take it!', false, true, 20, effects = {
insanity: 20
})
]
}
};
/*
Submits session data to the server.
@param {object} data
the data to be submitted.
@return
AJAX deferred promise.
*/
submitUserData = function(data) {
return $.ajax({
url: '/routes/user.php',
type: 'POST',
data: data
});
};
/*
Display the response status to the DOM
@param {DOMElement} target
The DOM element to display the response to.
@param {object} res
The response to display.
*/
showResStatus = function(target, res) {
if (res.status === 'success') {
$(target).css('color', '');
return $(target).text(res.message);
} else {
$(target).css('color', 'red');
return $(target).text(res.message);
}
};
/*
Saves the a item to the user's collection.
*/
saveItem = function(img, des) {
return submitUserData({
method: 'saveItem',
image: img,
description: des
}).then(function(res) {
return showResStatus('#savePicOverlay .status', JSON.parse(res));
});
};
/*
Gets the value of the paramater in the query string of a GET request.
@param {string} name
the key of the corrosponding value to retrieve.
@return
The sorted array.
*/
getParam = function(name) {
var results;
results = new RegExp('[\?&]' + name + '=([^&#]*)').exec(window.location.href);
return results[1] || 0;
};
/*
Retrieves the GET paramater from the query string. Sets up the interface and game constants accordingly.
*/
try {
storyMode = getParam('story') === 'true';
plusMode = getParam('plus') === 'true';
if (storyMode) {
gameGlobal.init.isStory = true;
$('.tutorial .init').text('Welcome to the photography game. As Mark, you must do your job for at least 4 month. Do not let your Working Capital drop below -$2000.');
$('#playAgain').text('Continue');
$('#playAgain').parent().attr('href', 'chapter3.html');
$('.skip').show();
$('.save, #endGame .score').hide();
$('#playAgain').addClass('continue');
if (plusMode) {
$('.continueScreen h3').text('Chapter 4 Completed');
$('.continueScreen p').remove();
$('.continueScreen h3').after('<p>Photography Game Mode Plus Now Avaliable</p>');
$('.continueScreen .buttonContainer a:first-child').attr('href', 'end.html').find('button').text('Continue to Finale');
}
}
if (getParam('diff') === 'extended') {
gameGlobal.init.stats = {
CAB: 2500,
workingCapital: 0,
assets: 0,
liabilities: 1000
};
}
if (plusMode) {
$('#tutorial .pPlus').text('In Plus Mode, you will have to deal with additional events and control your insanity meter. The game will end should it gets too high.');
gameGlobal.init.isPlus = true;
} else {
$('.pPlus').remove();
}
} catch (error) {}
/*
Skips the game when in story mode. Completes the chapter for the user.
*/
$('.skip, .continue').click(function(e) {
var id;
$('.continueScreen').show();
$('#selectionArea, #gameArea').hide();
id = '2';
if (gameGlobal.init.isPlus) {
id = '4';
}
return submitUserData({
method: 'chapterComplete',
chapterId: id
}).then(function(res) {
res = JSON.parse(res);
if (res.status === 'success') {
return 0;
}
});
});
/*
Retrieves resources from the dataset.
@param {integer} amount
The amount of resources to retrieve.
@return
AJAX deferred promise.
*/
retrieveResources = function(amount) {
var reqParam;
reqParam = {
resource_id: '9913b881-d76d-43f5-acd6-3541a130853d',
limit: amount
};
return $.ajax({
url: 'https://data.gov.au/api/action/datastore_search',
data: reqParam,
dataType: 'jsonp',
cache: true
});
};
/*
Converts degrees to radians.
@param {float} deg
The degree to convert to radians.
@return
The corrosponding radian value of the input.
*/
deg2rad = function(deg) {
return deg * (Math.PI / 180);
};
/*
Calculates the distance travelled from two lat, lng coordinates.
@param {object} from
The initial lat, lng coordinates.
@param {object} to
The final lat, lng coordinates.
@return
The distance between the two points in km.
*/
distanceTravelled = function(from, to) {
var R, a, c, dLat, dLng, dist, lat1, lat2, lng1, lng2;
lat1 = from.lat;
lng1 = from.lng;
lat2 = to.lat;
lng2 = to.lng;
R = 6371;
dLat = deg2rad(lat2 - lat1);
dLng = deg2rad(lng2 - lng1);
a = Math.sin(dLat / 2) * Math.sin(dLat / 2) + Math.cos(deg2rad(lat1)) * Math.cos(deg2rad(lat2)) * Math.sin(dLng / 2) * Math.sin(dLng / 2);
c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
dist = R * c;
return dist;
};
tutorialHandler = (function() {
/*
Constructs the game tutorial object.
@constructor
@param {DOMElement} domPanels
The set of tutorial elements active in the DOM.
*/
function tutorialHandler(domPanels) {
this.domPanels = domPanels;
this.step = 0;
}
/*
Displays the panel in view.
*/
tutorialHandler.prototype.init = function() {
$(this.domPanels[this.step]).show();
return this.setButton();
};
/*
Switch to the next panel.
*/
tutorialHandler.prototype.next = function() {
this.step++;
$(this.domPanels[this.step]).show();
$(this.domPanels[this.step - 1]).hide();
return this.setButton();
};
/*
Switch to the previous panel
*/
tutorialHandler.prototype.prev = function() {
this.step--;
$(this.domPanels[this.step]).show();
$(this.domPanels[this.step + 1]).hide();
return this.setButton();
};
/*
Generates the avaliable buttons depending on the step.
@see this.step
*/
tutorialHandler.prototype.setButton = function() {
this.domPanels.find('.buttonContainer').remove();
if (this.step === 0) {
return this.domPanels.append($('<div class="buttonContainer"> <button class="prev hidden">Previous</button> <button class="next">Next</button> </div>'));
} else if (this.step === this.domPanels.length - 1) {
return this.domPanels.append($('<div class="buttonContainer"> <button class="prev">Previous</button> <button class="next hidden">Next</button> </div>'));
} else {
return this.domPanels.append($('<div class="buttonContainer"> <button class="prev">Previous</button> <button class="next">Next</button> </div>'));
}
};
return tutorialHandler;
})();
gameLocation = (function() {
/*
Constructs the game location object
@constructor
@param {object} position
The position of the location.
@param {string}
The name of the location.
@param {data}
Metadata associated with this position.
@param {boolean} rare
Whether if the location is a rare location or not
@param {string} icon
The icon to use for this location.
*/
function gameLocation(position, name1, data1, rare1, icon) {
this.position = position;
this.name = name1;
this.data = data1;
this.rare = rare1;
this.icon = icon;
this.marker;
this.value;
this.travelExpense;
this.travelTime;
}
/*
Adds the location to the map.
@param {object} map
The google map element to add to.
*/
gameLocation.prototype.addTo = function(map) {
var marker;
if (this.icon) {
marker = new google.maps.Marker({
position: this.position,
map: map,
icon: this.icon,
title: this.name
});
} else {
marker = new google.maps.Marker({
position: this.position,
map: map,
title: this.name
});
}
this.marker = marker;
return this.setListener(this.marker);
};
/*
Sets event listeners on a marker
@param {object} marker
The google maps marker object to bind the event listener to.
*/
gameLocation.prototype.setListener = function(marker) {
var self;
self = this;
marker.addListener('click', function() {
return player.moveTo(self);
});
return marker.addListener('mouseover', function() {
var travelDistance, travelTime;
travelDistance = parseInt(distanceTravelled(player.position, self.position));
travelTime = travelDistance / 232;
$('#locationInfoOverlay #title').text(self.data.description);
$('#locationInfoOverlay #position').text('Distance away ' + travelDistance + 'km');
$('#locationInfoOverlay #value').text('Potential Revenue $' + self.value);
$('#locationInfoOverlay #travelExpense').text('Travel Expense $' + parseInt((travelDistance * 0.6) / 10));
$('#locationInfoOverlay #travelTime').text('Travel Time: at least ' + travelTime.toFixed(2) + ' Hours');
return this.value = self.value;
});
};
return gameLocation;
})();
playerMarker = (function(superClass) {
extend(playerMarker, superClass);
/*
Constructs the player marker object. Extends the game location object
@constructor
@param {object} position
The position of the player.
@param {string}
The name of the player.
@param {data}
Metadata associated with this player.
@depreciated @param {string} icon
The icon to use for this player.
@param {object} stats
JSON data of the player's stats.
*/
function playerMarker(position, name1, data1, icon, stats1) {
this.position = position;
this.name = name1;
this.data = data1;
this.icon = icon;
this.stats = stats1;
playerMarker.__super__.constructor.call(this, this.position, this.name, this.data, this.icon);
this.playerMarker;
this.preStat;
this.inventory = [];
}
/*
Adds the player marker to the map.
@param {object} map
The google map element to add to.
*/
playerMarker.prototype.initTo = function(map) {
return this.playerMarker = new SlidingMarker({
position: this.position,
map: map,
icon: 'https://developers.google.com/maps/documentation/javascript/images/custom-marker.png',
title: this.name,
optimized: false,
zIndex: 100
});
};
/*
Moves the player marker to another location and calculates the result of moving to the location.
@param {object} location
gameLocation object, for the player marker to move to.
*/
playerMarker.prototype.moveTo = function(location) {
var newStats, randEvent, timeTaken;
location.marker.setVisible(false);
location.travelExpense = parseInt((distanceTravelled(this.position, location.position) * 0.6) / 10);
location.travelTime = parseFloat((distanceTravelled(this.position, location.position) / 232).toFixed(2));
this.position = location.position;
this.playerAt = location;
this.playerMarker.setPosition(new google.maps.LatLng(location.position.lat, location.position.lng));
newStats = this.stats;
newStats.CAB -= player.playerAt.travelExpense;
timeTaken = location.travelTime + Math.random() * 5;
gameTime.incrementTime(timeTaken);
gameEvents.addEvent(new event('Moved to', gameTime.getFormatted(), location.name + ' in ' + timeTaken.toFixed(2) + ' hours'));
$('#takePic').show();
$('#takeDrink').show();
updateMarkers();
this.updateStats(newStats);
if (gameGlobal.init.isPlus) {
gameGlobal.eventOccurrence += 0.5;
if (gameGlobal.eventOccurrence > 1) {
randEvent = gameGlobal.turnConsts.randomEvents[Math.floor(Math.random() * gameGlobal.turnConsts.randomEvents.length)];
if (randEvent.chance > Math.random() * 100) {
gameEvents.addEvent(randEvent);
return gameGlobal.eventOccurrence = 0;
}
}
}
};
/*
Depreciates the player's inventory.
*/
playerMarker.prototype.depreciateInv = function() {
var depreciation, item, j, len, newStats, ref;
depreciation = 0;
ref = this.inventory;
for (j = 0, len = ref.length; j < len; j++) {
item = ref[j];
if (item.value < 1) {
return;
} else {
depreciation += item.value - item.value * 0.9;
item.value = item.value * 0.9;
}
}
newStats = player.stats;
newStats.assets -= depreciation.toFixed(2);
if (depreciation > 0) {
gameEvents.addEvent(new event('Depreciation - ', gameTime.getFormatted(), 'Photos depreciated by $' + depreciation.toFixed(2), false, true));
}
return this.updateStats(newStats);
};
/*
Updates the player stats and animates it in the DOM.
@param {object} stats
The new stats to update to.
*/
playerMarker.prototype.updateStats = function(stats) {
var animateText, assets, workingCapital;
animateText = function(elem, from, to) {
return $({
current: from
}).animate({
current: to
}, {
duration: 500,
step: function() {
return $('#playerInfoOverlay #stats ' + elem + ' .val').text(this.current.toFixed());
}
});
};
assets = parseInt(this.stats.assets + this.stats.CAB);
workingCapital = parseInt(assets - this.stats.liabilities);
animateText('#CAB', parseInt($('#playerInfoOverlay #stats #CAB .val').text()), stats.CAB);
animateText('#liabilities', parseInt($('#playerInfoOverlay #stats #liabilities .val').text()), stats.liabilities);
animateText('#assets', parseInt($('#playerInfoOverlay #stats #assets .val').text()), assets);
animateText('#workingCapital', parseInt($('#playerInfoOverlay #stats #workingCapital .val').text()), workingCapital);
this.preStat = {
CAB: stats.CAB,
workingCapital: workingCapital,
assets: assets,
liabilities: stats.liabilities,
insanity: stats.insanity
};
if (workingCapital <= -1000 && this.stats.CAB <= 0) {
return $('#playerInfoOverlay #stats #workingCapital, #playerInfoOverlay #stats #CAB').css('color', 'red');
} else {
$('#playerInfoOverlay #stats #workingCapital, #playerInfoOverlay #stats #CAB').css('color', '');
return gameGlobal.turnConsts.alert = false;
}
};
return playerMarker;
})(gameLocation);
timeManager = (function() {
/*
Constructs the time manager object.
@constructor
@param {array} baseTime
The initial date/time to start the game with.
*/
function timeManager(baseTime) {
this.baseTime = baseTime;
this.timeCounter = 0;
this.dateCounter = 0;
this.monthCounter = 0;
this.yearCounter = 0;
}
/*
Increases the game time by hours.
@param {integer} hours
The hours to increase the game time by.
*/
timeManager.prototype.incrementTime = function(hours) {
var results1;
this.timeCounter += hours;
results1 = [];
while (this.timeCounter >= 24) {
this.incrementDays(1);
this.timeCounter -= 24;
if (this.timeCounter < 24) {
this.timeCounter = this.timeCounter % 24;
break;
} else {
results1.push(void 0);
}
}
return results1;
};
/*
Increases the game time by days.
@param {integer} days
The days to increase the game time by.
*/
timeManager.prototype.incrementDays = function(days) {
var i, results1;
i = 0;
while (i <= days) {
this.dateCounter++;
player.depreciateInv();
gameInsanity.setBar(gameInsanity.value * 0.95);
i++;
if (i >= days) {
i = 0;
break;
}
}
results1 = [];
while (this.dateCounter >= 30) {
this.incrementMonths(1);
this.dateCounter -= 30;
endTurn(this.getFormatted());
if (this.dateCounter < 30) {
this.dateCounter = this.dateCounter % 30;
break;
} else {
results1.push(void 0);
}
}
return results1;
};
/*
Increases the game time by months.
@param {integer} months
The monthes to increase the game time by.
*/
timeManager.prototype.incrementMonths = function(months) {
var results1;
this.monthCounter += months;
results1 = [];
while (this.monthCounter >= 12) {
this.incrementYears(1);
this.monthCounter -= 12;
if (this.monthCounter < 12) {
this.monthCounter = this.monthCounter % 12;
break;
} else {
results1.push(void 0);
}
}
return results1;
};
/*
Increases the game time by years.
@param {integer} years
The years to increase the game time by.
*/
timeManager.prototype.incrementYears = function(years) {
return this.yearCounter += years;
};
/*
Gets the current game time.
@return
Array containing the game time.
*/
timeManager.prototype.getAll = function() {
return [this.baseTime[0] + this.yearCounter, this.baseTime[1] + this.monthCounter, this.baseTime[2] + this.dateCounter, parseInt(this.baseTime[3]) + this.timeCounter];
};
/*
Gets the formatted current game time.
@return
Stringified and formatted game time.
*/
timeManager.prototype.getFormatted = function() {
var date, hours, minutes, month, year;
year = this.baseTime[0] + this.yearCounter;
month = this.baseTime[1] + this.monthCounter;
date = this.baseTime[2] + this.dateCounter;
hours = parseInt(this.baseTime[3]) + this.timeCounter;
minutes = parseInt((hours - Math.floor(hours)) * 60);
if (date > 30) {
date -= date - 30;
}
if (String(parseInt(minutes)).length === 2) {
return year + '/' + month + '/' + date + ' ' + String(Math.floor(hours)) + ':' + String(parseInt(minutes));
} else {
return year + '/' + month + '/' + date + ' ' + String(Math.floor(hours)) + ':' + String(parseInt(minutes)) + '0';
}
};
return timeManager;
})();
eventManager = (function() {
/*
Constructs the event manager to handles all events.
@constructor
@param {DOMElement} domSelector
The DOM element to display the event on.
*/
function eventManager(domSelector, domOverlay) {
this.domSelector = domSelector;
this.domOverlay = domOverlay;
this.events = [];
}
/*
Adds a event to the event manager
@param {object} event
The event object to add to the event manager.
*/
eventManager.prototype.addEvent = function(event) {
var effectName, j, len, newStats, ref;
if (event.time === 'currentTime') {
event.time = gameTime.getFormatted();
}
if (event.constructor.name === 'randomEvent') {
if (event.effects) {
gameInsanity.updateBar(event.effects.insanity);
newStats = player.stats;
ref = Object.keys(event.effects);
for (j = 0, len = ref.length; j < len; j++) {
effectName = ref[j];
newStats[effectName] += event.effects[effectName];
}
player.updateStats(newStats);
}
this.domOverlay.find('.title').text(event.title);
this.domOverlay.find('.content').text(event.content);
return this.domOverlay.show();
} else {
this.events.push(event);
if (event.warn) {
return $('<div class="row"> <p class="time">' + event.time + '</p> <p class="title warn">' + event.title + '</p> <p class="content">' + event.content + '</p> </div>').hide().prependTo(this.domSelector).fadeIn();
} else if (event.special) {
return $('<div class="row"> <p class="time special">' + event.time + '</p> <p class="title special">' + event.title + '</p> <p class="content special">' + event.content + '</p> </div>').hide().prependTo(this.domSelector).fadeIn();
} else {
return $('<div class="row"> <p class="time">' + event.time + '</p> <p class="title">' + event.title + '</p> <p class="content">' + event.content + '</p> </div>').hide().prependTo(this.domSelector).fadeIn();
}
}
};
return eventManager;
})();
playerInsanity = (function() {
/*
Constructs playerInsanity object to handle player insanity events.
@constructor
@param {DOMElement} domSelector
The DOM element to display the event on.
@param {integer} initValue
The initial insanity value
*/
function playerInsanity(domSelector, initVal) {
this.domSelector = domSelector;
this.initVal = initVal;
this.value = this.initVal;
}
/*
Sets the insanity bar to a value
@param {integer} value
the level of insanity to set to.
*/
playerInsanity.prototype.setBar = function(value) {
this.value = value;
return this.domSelector.find('.bar').css('height', this.value + '%');
};
/*
Update the insanity level by a value
@param {integer} value
the level to increase the current insanity level by.
*/
playerInsanity.prototype.updateBar = function(value) {
if (this.value + value > 100) {
endGame();
return this.domSelector.find('.bar').css('height', '100%');
} else {
this.value += value;
if (this.value < 0) {
} else {
return this.domSelector.find('.bar').css('height', this.value + '%');
}
}
};
return playerInsanity;
})();
/*
Processes and validates an array of data.
@param {array} data
The set of data to process.
@return
The array of processed data/
*/
processData = function(data) {
var item, j, len, processedData, ref;
processedData = [];
ref = data.result.records;
for (j = 0, len = ref.length; j < len; j++) {
item = ref[j];
if (item['dcterms:spatial']) {
if (item['dcterms:spatial'].split(';')[1]) {
processedData.push(item);
}
}
}
return processedData;
};
/*
Generates google map markers from a set of data
@param {array} data
The set of data to generate markers from.
*/
generateMarkers = function(data) {
var i, j, lat, len, lng, marker, place;
marker = [];
i = 0;
for (j = 0, len = data.length; j < len; j++) {
place = data[j];
lat = parseFloat(place['dcterms:spatial'].split(';')[1].split(',')[0]);
lng = parseFloat(place['dcterms:spatial'].split(';')[1].split(',')[1]);
marker[i] = new gameLocation({
lat: lat,
lng: lng
}, place['dcterms:spatial'].split(';')[0], {
'title': place['dc:title'],
'description': place['dc:description'],
'img': place['150_pixel_jpg']
}, false);
marker[i].addTo(googleMap);
locations.push(marker[i]);
setValue(marker[i]);
i++;
}
return updateMarkers();
};
/*
Sets the value of a given location based on the distance from the player.
@param {object} location
gameLocation object to set the value by.
*/
setValue = function(location) {
var rare;
rare = Math.random() <= 0.05;
if (rare) {
location.value = parseInt(Math.random() * distanceTravelled(player.position, location.position) + 100);
return location.rare = true;
} else {
return location.value = parseInt((Math.random() * distanceTravelled(player.position, location.position) + 100) / 10);
}
};
/*
Updates the markers as the user player moves.
@see playerMarker.prototype.moveTo()
*/
updateMarkers = function() {
var hide, j, len, location, results1, show;
results1 = [];
for (j = 0, len = locations.length; j < len; j++) {
location = locations[j];
hide = Math.random() >= 0.8;
show = Math.random() <= 0.2;
if (hide) {
results1.push(location.marker.setVisible(false));
} else {
results1.push(void 0);
}
}
return results1;
};
/*
Instantiate the game components.
*/
gameEvents = new eventManager($('#eventLog .eventContainer'), $('#randomEventOverlay'));
gameTime = new timeManager([1939, 1, 1, 0]);
gameTutorial = new tutorialHandler($('.tutorial'));
gameInsanity = new playerInsanity($('#insanityBar'), 0);
player = new playerMarker({
lat: -25.363,
lng: 151.044
}, 'player', {
'type': 'self'
}, 'https://developers.google.com/maps/documentation/javascript/images/custom-marker.png');
player.initTo(googleMap);
player.stats = gameGlobal.init.stats;
player.updateStats(player.stats);
photographyGame = (function() {
/*
Constructs the photography game.
@constructor
@param {boolean} debug
The debug state of the game.
*/
function photographyGame(debug) {
this.debug = debug;
this.score = 0;
}
/*
Initialize the photography game.
@param {integer} amount
The amount of markers to initialize the game with.
*/
photographyGame.prototype.init = function(amount) {
var localInit;
localInit = function() {
validData.sort(function() {
return 0.5 - Math.random();
});
generateMarkers(validData.slice(0, amount));
gameTutorial.init();
return gameEvents.addEvent(new event('Game started', gameTime.getFormatted(), ''));
};
if (localStorage.getItem('photographyGameData')) {
validData = processData(JSON.parse(localStorage.getItem('photographyGameData')));
if (amount > validData.length) {
return retrieveResources(3000).then(function(res) {
localStorage.setItem('photographyGameData', JSON.stringify(res));
validData = processData(res);
return localInit();
});
} else {
return localInit();
}
} else {
return retrieveResources(3000).then(function(res) {
localStorage.setItem('photographyGameData', JSON.stringify(res));
validData = processData(res);
return localInit();
});
}
};
/*
Saves the current user score to the database.
*/
photographyGame.prototype.saveScore = function() {
var gameId;
gameId = '2';
if (gameGlobal.init.isPlus) {
gameId = '4';
}
return submitUserData({
method: 'saveScore',
gameId: gameId,
value: this.score
}).then(function(res) {
res = JSON.parse(res);
if (res.status === 'success') {
$('#gameEnd .status').css('color', '');
return $('#gameEnd .status').text(res.message);
} else {
$('#gameEnd .status').css('color', 'red');
return $('#gameEnd .status').text(res.message);
}
});
};
return photographyGame;
})();
/*
Instantiate the photography game.
*/
currentGame = new photographyGame(false);
if (getParam('diff') === 'normal') {
currentGame.init(100);
} else if (getParam('diff') === 'extended') {
currentGame.init(500);
}
/*
Displays the end game screen.
*/
endGame = function() {
$('#gameEnd .stat').text('You survived for ' + gameGlobal.trackers.monthPassed + ' Months, selling ' + gameGlobal.trackers.photosSold + ' photos and making over $' + gameGlobal.trackers.moneyEarned);
currentGame.score = gameGlobal.trackers.monthPassed * gameGlobal.trackers.photosSold * gameGlobal.trackers.moneyEarned;
$('#gameEnd .score').text('Your score: ' + currentGame.score + ' pt');
return $('#gameEnd').show();
};
/*
Ends the month.
@param {string} date
The date which the month ended on.
*/
endTurn = function(date) {
var j, len, location, newStats, results1, show;
if (gameGlobal.init.isStory && gameGlobal.trackers.monthPassed >= 3) {
if (gameGlobal.init.isPlus) {
$('#gameEnd h4').text('You wake up one day, you feel pain all across your body...');
} else {
$('#gameEnd h4').text('You recieve a letter from the army. Now you can finally join the front lines.');
}
$('#gameEnd .score').hide();
endGame();
}
gameGlobal.turnConsts.interest = (Math.random() * 5).toFixed(2);
gameEvents.addEvent(new event('The month comes to an end.', date, 'Paid $' + player.stats.liabilities + ' in expenses', true));
newStats = player.stats;
newStats.CAB -= player.stats.liabilities;
newStats.liabilities = gameGlobal.turnConsts.stdLiabilities;
player.updateStats(newStats);
if (player.preStat.workingCapital <= -1000 && player.preStat.CAB <= 0) {
if (gameGlobal.turnConsts.alert) {
endGame();
} else {
gameGlobal.trackers.monthPassed += 1;
}
gameGlobal.turnConsts.alert = true;
} else {
gameGlobal.trackers.monthPassed += 1;
}
if (gameGlobal.turnConsts.alert && player.preStat.workingCapital > -1000 && player.preStat.CAB > 0) {
gameGlobal.turnConsts.alert = false;
}
results1 = [];
for (j = 0, len = locations.length; j < len; j++) {
location = locations[j];
show = Math.random() > 0.2;
if (show) {
results1.push(location.marker.setVisible(true));
} else {
results1.push(void 0);
}
}
return results1;
};
/*
Displays the taking picture screen.
*/
$('#takePic').hide();
$('#takePic').click(function() {
$('#takingPic .section3').css('width', (Math.floor(Math.random() * (10 + 2))) + 1 + '%');
$('#takingPic .section2').css('width', (Math.floor(Math.random() * (19 + 2))) + '%');
$('#takingPic .section4').css('width', (Math.floor(Math.random() * (19 + 2))) + '%');
$('#takingPic .slider').css('left', 0);
$('#takingPic .start, #takingPic .stop').prop('disabled', false);
$('#takingPic .shotStats').hide();
$('#takingPic').show();
$('#takingPic .viewInv').hide();
$('#takingPic .close').hide();
$(this).hide();
return $('#takeDrink').hide();
});
/*
Starts the animation of the slider when taking the picture.
*/
$('#takingPic .start').click(function() {
$(this).prop('disabled', true);
return $('#takingPic .slider').animate({
'left': $('#takingPic .section1').width() + $('#takingPic .section2').width() + $('#takingPic .section3').width() + $('#takingPic .section4').width() + $('#takingPic .section5').width() + 'px'
}, 1000, function() {
return calculatePicValue();
});
});
/*
Ends the animation of the slider when taking the picture.
*/
$('#takingPic .stop').click(function() {
$(this).prop('disabled', true);
$('#takingPic .slider').stop();
$('#takingPic .close').show();
return calculatePicValue();
});
/*
Calculates the value of the picture based on the slider position.
*/
calculatePicValue = function() {
var inBlue, inGreen, multiplier, quailty, sliderPosition, timeTaken;
$('#takingPic .viewInv').show();
$('#takingPic .shotStats').show();
multiplier = 1;
quailty = 1;
sliderPosition = parseInt($('#takingPic .slider').css('left'), 10);
inBlue = ($('#takingPic .section1').position().left + $('#takingPic .section1').width()) <= sliderPosition && sliderPosition <= $('#takingPic .section5').position().left;
inGreen = ($('#takingPic .section2').position().left + $('#takingPic .section2').width()) <= sliderPosition && sliderPosition <= $('#takingPic .section4').position().left;
if (inBlue && inGreen) {
multiplier = 1.4;
quailty = 0;
$('.shotStats').text('You take a high quailty photo, this will surely sell for more!');
} else if (inBlue) {
$('.shotStats').text('You take a average photo.');
} else {
multiplier = 0.8;
quailty = 2;
$('.shotStats').text('The shot comes out all smudged...');
}
addShotToInv(multiplier, quailty);
timeTaken = Math.floor(Math.random() * 10) + 24;
gameTime.incrementTime(timeTaken);
gameEvents.addEvent(new event('Taking Pictures', gameTime.getFormatted(), 'You spend some time around ' + player.playerAt.name + '. ' + timeTaken + ' hours later, you finally take a picture of value.'));
if (player.playerAt.rare) {
gameEvents.addEvent(new event('Rare Picture -', gameTime.getFormatted(), 'You take a rare picture.', true));
if (!gameGlobal.init.isStory) {
if ($('#savePicOverlay .img img').length === 0) {
$('#savePicOverlay .img').append($('<img src="' + player.playerAt.data.img + '">'));
} else {
$('#savePicOverlay .img img').attr('src', player.playerAt.data.img);
}
$('#savePicOverlay .title').text(player.playerAt.data.title);
$('#savePicOverlay #confirmSavePic').prop('disabled', false);
return $('#savePicOverlay').show();
}
}
};
/*
Instantiate the game photo object and adds a photographic shot to the inventory
@param {integer} multiplier
The scalar to multiple the value of the shot by.
@param {integer} quailty
The quailty of the picture.
*/
addShotToInv = function(multiplier, quailty) {
var newStats, photoValue, shotTaken;
photoValue = player.playerAt.value * multiplier;
shotTaken = new gamePhoto(photoValue, false, player.playerAt.data.img, player.playerAt.data.title, quailty);
player.inventory.push(shotTaken);
player.playerAt.marker.setVisible(false);
newStats = player.stats;
newStats.assets += photoValue;
newStats.workingCapital -= player.playerAt.travelExpense / 2;
return player.updateStats(newStats);
};
/*
Displays the player inventory and closes previous element's parent.
*/
$('.viewInv').click(function() {
closeParent(this);
return displayInv();
});
/*
Displays the player inventory.
*/
$('#checkInv').click(function() {
return displayInv();
});
/*
Generates the player inventory.
*/
displayInv = function() {
var item, j, len, picture, pictureContainer, potentialValue, ref, sellableValue;
$('#blockOverlay').show();
$('#inventory .photoContainer').remove();
$('#inventory').show();
potentialValue = 0;
sellableValue = 0;
ref = player.inventory;
for (j = 0, len = ref.length; j < len; j++) {
item = ref[j];
pictureContainer = $('<div class="photoContainer"></div>');
picture = $('<div class="crop"> <img class="photo" src="' + item.img + '"/> </div>').css('filter', 'blur(' + item.quailty + 'px');
picture.appendTo(pictureContainer);
if (!item.washed) {
pictureContainer.appendTo($('#inventory .cameraRoll'));
potentialValue += item.value;
} else {
pictureContainer.appendTo($('#inventory .washedPics'));
sellableValue += item.value;
}
$('<aside> <p>Value $' + parseInt(item.value) + '</p> <p>' + item.title + '</p> </aside>').appendTo(pictureContainer);
}
$('#rollValue').text('Total value $' + parseInt(potentialValue + sellableValue));
return $('#sellableValue').text('Sellable Pictures value $' + parseInt(sellableValue));
};
/*
Displays the waiting screen.
*/
$('#wait').click(function() {
if ($('#waitTimeInput').val() === '') {
$('#waitTimeInput').parent().find('button.confirm').prop('disabled', true);
}
return $('#waitInfo').show();
});
/*
Waits and passes the game time.
*/
$('#confirmWait').click(function() {
var j, len, location, results1, show;
gameTime.incrementDays(parseInt($('#waitTimeInput').val()));
if (parseInt($('#waitTimeInput').val()) !== 1) {
gameEvents.addEvent(new event('', gameTime.getFormatted(), 'You wait ' + $('#waitTimeInput').val() + ' days'));
} else {
gameEvents.addEvent(new event('', gameTime.getFormatted(), 'You wait ' + $('#waitTimeInput').val() + ' day'));
}
validData.sort(function() {
return 0.5 - Math.random();
});
generateMarkers(validData.slice(0, parseInt($('#waitTimeInput').val()) / 2));
results1 = [];
for (j = 0, len = locations.length; j < len; j++) {
location = locations[j];
show = Math.floor(Math.random() * 30) <= parseInt($('#waitTimeInput').val()) / 2;
if (show) {
results1.push(location.marker.setVisible(true));
} else {
results1.push(void 0);
}
}
return results1;
});
/*
Displays the pictures avaliable for washing.
*/
$('#washPic').click(function() {
var item, j, k, len, len1, notWashed, ref, ref1;
notWashed = [];
ref = player.inventory;
for (j = 0, len = ref.length; j < len; j++) {
item = ref[j];
if (!item.washed) {
notWashed.push(item);
}
}
if (notWashed.length === 0) {
$('#washPicOverlay p').text('There are no pictures to wash.');
$('#washPicOverlay').show();
return $('#washPicOverlay #confirmWashPic').hide();
} else {
ref1 = player.inventory;
for (k = 0, len1 = ref1.length; k < len1; k++) {
item = ref1[k];
item.washed = true;
}
$('#washPicOverlay p').text('Washing photos takes ' + gameGlobal.turnConsts.pictureWashingTime + ' days. Proceed?');
$('#washPicOverlay').show();
return $('#washPicOverlay #confirmWashPic').show();
}
});
/*
Washes all unwashed pictures in the player's inventory.
*/
$('#confirmWashPic').click(function() {
gameTime.incrementTime(10 * Math.random());
gameTime.incrementDays(gameGlobal.turnConsts.pictureWashingTime);
return gameEvents.addEvent(new event('Washed pictures.', gameTime.getFormatted(), 'You wash all pictures in your camera.'));
});
/*
Displays the take loan screen.
*/
$('#takeLoan').click(function() {
$('#IR').text('Current interest rate ' + gameGlobal.turnConsts.interest + '%');
if ($('#loanInput').val() === '') {
$('#loanInput').parent().find('button.confirm').prop('disabled', true);
}
return $('#loanOverlay').show();
});
/*
Confirms the loan to the player.
*/
$('#confirmLoan').click(function() {
var newStats;
newStats = player.stats;
newStats.liabilities += parseInt($('#loanInput').val()) + parseInt($('#loanInput').val()) * (gameGlobal.turnConsts.interest / 10);
newStats.CAB += parseInt($('#loanInput').val());
player.updateStats(newStats);
return gameEvents.addEvent(new event('Bank loan.', gameTime.getFormatted(), 'You take a bank loan of $' + parseInt($('#loanInput').val())));
});
/*
Validates the input to ensure the input is a number and non empty.
*/
$('#loanInput, #waitTimeInput').keyup(function() {
if (!$.isNumeric($(this).val()) || $(this).val() === '') {
$(this).parent().find('.err').text('*Input must be a number');
return $(this).parent().find('button.confirm').prop('disabled', true);
} else {
$(this).parent().find('.err').text('');
return $(this).parent().find('button.confirm').prop('disabled', false);
}
});
/*
Displays the sell pictures screen.
*/
$('#sellPic').click(function() {
var j, len, photo, photosValue, ref, sellablePhotos;
sellablePhotos = 0;
photosValue = 0;
ref = player.inventory;
for (j = 0, len = ref.length; j < len; j++) {
photo = ref[j];
if (photo.washed) {
sellablePhotos += 1;
photosValue += photo.value;
}
}
$('#soldInfoOverlay p').text('Potential Earnings $' + parseInt(photosValue) + ' from ' + sellablePhotos + ' Photo/s');
if (sellablePhotos === 0) {
$('#soldInfoOverlay button').hide();
} else {
$('#soldInfoOverlay button').show();
}
return $('#soldInfoOverlay').show();
});
/*
Sells the washed photos in the player's inventory.
*/
$('#sellPhotos').click(function() {
var earningsAct, earningsEst, j, len, newInventory, newStats, photo, photosSold, ref, timeTaken;
photosSold = 0;
earningsEst = 0;
earningsAct = 0;
newInventory = [];
newStats = player.stats;
ref = player.inventory;
for (j = 0, len = ref.length; j < len; j++) {
photo = ref[j];
if (photo.washed) {
earningsAct += parseInt(photo.value + (photo.value * Math.random()));
earningsEst += photo.value;
photosSold += 1;
gameGlobal.trackers.photosSold += 1;
gameGlobal.trackers.moneyEarned += earningsAct;
} else {
newInventory.push(photo);
}
}
timeTaken = ((Math.random() * 2) + 1) * photosSold;
player.inventory = newInventory;
newStats.CAB += earningsAct;
newStats.assets -= earningsEst;
player.updateStats(newStats);
gameTime.incrementDays(parseInt(timeTaken));
if (parseInt(timeTaken) === 1) {
return gameEvents.addEvent(new event('Selling Pictures.', gameTime.getFormatted(), 'It took ' + parseInt(timeTaken) + ' day to finally sell everything. Earned $' + earningsAct + ' from selling ' + photosSold + ' Photo/s.'));
} else {
return gameEvents.addEvent(new event('Selling Pictures.', gameTime.getFormatted(), 'It took ' + parseInt(timeTaken) + ' days to finally sell everything. Earned $' + earningsAct + ' from selling ' + photosSold + ' Photo/s.'));
}
});
/*
Blocks the game when a overlay/interface is active.
*/
$('#actions button').click(function() {
if ($(this).attr('id') !== 'takeDrink') {
return $('#blockOverlay').show();
}
});
/*
Closes the overlay.
*/
$('.confirm, .close').click(function() {
return closeParent(this);
});
/*
Saves the DOM element to the player's collection.
*/
$('#confirmSavePic').click(function() {
saveItem($('#savePicOverlay .img img').attr('src'), $('#savePicOverlay .title').text());
return $(this).prop('disabled', true);
});
/*
Closes the parent of the original DOM element
@param {DOMElement} self
The element whose parent should be hidden.
*/
closeParent = function(self) {
$(self).parent().hide();
$('#blockOverlay').hide();
return $('.status').text('');
};
/*
jQuery UI draggable handler.
*/
$('#actions').draggable();
$('#actions').mousedown(function() {
return $('#actions p').text('Actions');
});
/*
Saves the current user score.
*/
$('#saveScore').click(function() {
return currentGame.saveScore();
});
/*
Binds the generated buttons to the click event.
*/
$('body').on('click', '.tutorial .next', function() {
return gameTutorial.next();
});
$('body').on('click', '.tutorial .prev', function() {
return gameTutorial.prev();
});
/*
Handles new Plus mode mechanics
*/
$('#randomEventOverlay .break').click(function() {
gameTime.incrementDays(5);
gameEvents.addEvent(new event('You take sometime off...', gameTime.getFormatted(), ''));
return gameInsanity.setBar(gameInsanity.value * 0.75);
});
$('#randomEventOverlay .seeDoc').click(function() {
var newStats;
gameTime.incrementDays(2);
newStats = player.stats;
newStats.CAB -= 500;
player.updateStats(newStats);
gameEvents.addEvent(new event('You visit a nearby doctor...', gameTime.getFormatted(), ''));
gameInsanity.setBar(gameInsanity.value * 0.25);
return gameGlobal.eventOccurrence = -1;
});
$('#takeDrink').hide();
return $('#takeDrink').click(function() {
var newStats;
$('#takePic').hide();
$(this).hide();
gameEvents.addEvent(new event('You go to a nearby pub', gameTime.getFormatted(), 'You spend $50 on some shots. It relieves some of your stress...'));
newStats = player.stats;
newStats.CAB -= 50;
player.updateStats(newStats);
return gameInsanity.updateBar(-5);
});
});
}).call(this);
|
import React, { Component } from "react";
import {
Container,
Header,
Title,
Content,
Button,
Icon,
Left,
Right,
Body,
Text,
ListItem,
List
} from "native-base";
import styles from "./styles";
const datas = [
{
route: "Header1",
text: "Only Title"
},
{
route: "Header2",
text: "Icon Buttons"
},
{
route: "Header3",
text: "Text Buttons"
},
{
route: "Header4",
text: "Icon Button and Text Button"
},
{
route: "Header6",
text: "Multiple Icon Buttons"
},
{
route: "Header7",
text: "Title and Subtitle"
},
{
route: "Header8",
text: "Custom Background Color"
}
];
class HeaderNB extends Component {
// eslint-disable-line
render() {
return (
<Container style={styles.container}>
<Header>
<Left>
<Button
transparent
onPress={() => this.props.navigation.navigate("DrawerOpen")}
>
<Icon name="menu" />
</Button>
</Left>
<Body>
<Title>Headers</Title>
</Body>
<Right />
</Header>
<Content>
<List
dataArray={datas}
renderRow={data => (
<ListItem
button
onPress={() => this.props.navigation.navigate(data.route)}
>
<Left>
<Text>{data.text}</Text>
</Left>
<Right>
<Icon name="arrow-forward" style={{ color: "#999" }} />
</Right>
</ListItem>
)}
/>
</Content>
</Container>
);
}
}
export default HeaderNB;
|
from .base import Unknown
from .dhl import DHLAWB
from .dhl import DHLEXPRESSBOXNUM
from .fedex import FedExExpress
from .fedex import FedExGround96
from .ontrac import OnTrac
from .ups import UPS
from .usps import USPSIMpb
from .usps import USPSS10
from .usps import USPS20
barcode_classes = ['DHLAWB', 'DHLEXPRESSBOXNUM', 'FedExExpress', 'FedExGround96', 'OnTrac', 'UPS', 'USPSIMpb', 'USPSS10', 'USPS20']
|