code
stringlengths 1
199k
|
|---|
from twilio.rest import Client
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
number = client.lookups.phone_numbers("+16502530000").fetch(
type="caller-name",
)
print(number.carrier['type'])
print(number.carrier['name'])
|
import json
from dateutil import parser as datetime_parser
from occam.app import get_redis
from occam.runtime import OCCAM_SERVER_CONFIG_KEY
def get_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
return servers.items()
def iterate_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
for server_name, server_location in servers.iteritems():
yield server_name, server_location
def sorted_by_time_element(l, element_getter=None):
if not element_getter:
element_getter = lambda x: x
key_getter = lambda x: datetime_parser.parse(element_getter(x))
return sorted(l, key=key_getter)
|
import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import UploadBlackListView, DemoView, UdateBlackListView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^upload-blacklist$', login_required(UploadBlackListView.as_view()), name='upload-blacklist'),
url(r'^update-blacklist$', UdateBlackListView.as_view(), name='update-blacklist'),
url(r'^profile/', include('n_profile.urls')),
url(r'^demo$', DemoView.as_view(), name='demo'),
]
|
import requests
from flask import session, Blueprint, redirect
from flask import request
from grano import authz
from grano.lib.exc import BadRequest
from grano.lib.serialisation import jsonify
from grano.views.cache import validate_cache
from grano.core import db, url_for, app
from grano.providers import github, twitter, facebook
from grano.model import Account
from grano.logic import accounts
blueprint = Blueprint('sessions_api', __name__)
@blueprint.route('/api/1/sessions', methods=['GET'])
def status():
permissions = {}
if authz.logged_in():
for permission in request.account.permissions:
permissions[permission.project.slug] = {
'reader': permission.reader,
'editor': permission.editor,
'admin': permission.admin
}
keys = {
'p': repr(permissions),
'i': request.account.id if authz.logged_in() else None
}
validate_cache(keys=keys)
return jsonify({
'logged_in': authz.logged_in(),
'api_key': request.account.api_key if authz.logged_in() else None,
'account': request.account if request.account else None,
'permissions': permissions
})
def provider_not_enabled(name):
return jsonify({
'status': 501,
'name': 'Provider not configured: %s' % name,
'message': 'There are no OAuth credentials given for %s' % name,
}, status=501)
@blueprint.route('/api/1/sessions/logout', methods=['GET'])
def logout():
#authz.require(authz.logged_in())
session.clear()
return redirect(request.args.get('next_url', '/'))
@blueprint.route('/api/1/sessions/login/github', methods=['GET'])
def github_login():
if not app.config.get('GITHUB_CLIENT_ID'):
return provider_not_enabled('github')
callback=url_for('sessions_api.github_authorized')
session.clear()
if not request.args.get('next_url'):
raise BadRequest("No 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return github.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/github', methods=['GET'])
@github.authorized_handler
def github_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'access_token' in resp:
return redirect(next_url)
access_token = resp['access_token']
session['access_token'] = access_token, ''
res = requests.get('https://api.github.com/user?access_token=%s' % access_token,
verify=False)
data = res.json()
account = Account.by_github_id(data.get('id'))
data_ = {
'full_name': data.get('name'),
'login': data.get('login'),
'email': data.get('email'),
'github_id': data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
@blueprint.route('/api/1/sessions/login/twitter', methods=['GET'])
def twitter_login():
if not app.config.get('TWITTER_API_KEY'):
return provider_not_enabled('twitter')
callback=url_for('sessions_api.twitter_authorized')
session.clear()
if not request.args.get('next_url'):
raise BadRequest("No 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return twitter.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/twitter', methods=['GET'])
@twitter.authorized_handler
def twitter_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'oauth_token' in resp:
return redirect(next_url)
session['twitter_token'] = (resp['oauth_token'],
resp['oauth_token_secret'])
res = twitter.get('users/show.json?user_id=%s' % resp.get('user_id'))
account = Account.by_twitter_id(res.data.get('id'))
data_ = {
'full_name': res.data.get('name'),
'login': res.data.get('screen_name'),
'twitter_id': res.data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
@blueprint.route('/api/1/sessions/login/facebook', methods=['GET'])
def facebook_login():
if not app.config.get('FACEBOOK_APP_ID'):
return provider_not_enabled('facebook')
callback=url_for('sessions_api.facebook_authorized')
session.clear()
if not request.args.get('next_url'):
raise BadRequest("No 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return facebook.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/facebook', methods=['GET'])
@facebook.authorized_handler
def facebook_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'access_token' in resp:
return redirect(next_url)
session['facebook_token'] = (resp.get('access_token'), '')
data = facebook.get('/me').data
account = Account.by_facebook_id(data.get('id'))
data_ = {
'full_name': data.get('name'),
'login': data.get('username'),
'email': data.get('email'),
'facebook_id': data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
|
import json
import logging
from foxglove import glove
from httpx import Response
from .settings import Settings
logger = logging.getLogger('ext')
def lenient_json(v):
if isinstance(v, (str, bytes)):
try:
return json.loads(v)
except (ValueError, TypeError):
pass
return v
class ApiError(RuntimeError):
def __init__(self, method, url, status, response_text):
self.method = method
self.url = url
self.status = status
self.body = response_text
def __str__(self):
return f'{self.method} {self.url}, unexpected response {self.status}'
class ApiSession:
def __init__(self, root_url, settings: Settings):
self.settings = settings
self.root = root_url.rstrip('/') + '/'
async def get(self, uri, *, allowed_statuses=(200,), **data) -> Response:
return await self._request('GET', uri, allowed_statuses=allowed_statuses, **data)
async def delete(self, uri, *, allowed_statuses=(200,), **data) -> Response:
return await self._request('DELETE', uri, allowed_statuses=allowed_statuses, **data)
async def post(self, uri, *, allowed_statuses=(200, 201), **data) -> Response:
return await self._request('POST', uri, allowed_statuses=allowed_statuses, **data)
async def put(self, uri, *, allowed_statuses=(200, 201), **data) -> Response:
return await self._request('PUT', uri, allowed_statuses=allowed_statuses, **data)
async def _request(self, method, uri, allowed_statuses=(200, 201), **data) -> Response:
method, url, data = self._modify_request(method, self.root + str(uri).lstrip('/'), data)
kwargs = {}
headers = data.pop('headers_', None)
if headers is not None:
kwargs['headers'] = headers
if timeout := data.pop('timeout_', None):
kwargs['timeout'] = timeout
r = await glove.http.request(method, url, json=data or None, **kwargs)
if isinstance(allowed_statuses, int):
allowed_statuses = (allowed_statuses,)
if allowed_statuses != '*' and r.status_code not in allowed_statuses:
data = {
'request_real_url': str(r.request.url),
'request_headers': dict(r.request.headers),
'request_data': data,
'response_headers': dict(r.headers),
'response_content': lenient_json(r.text),
}
logger.warning(
'%s unexpected response %s /%s -> %s',
self.__class__.__name__,
method,
uri,
r.status_code,
extra={'data': data} if self.settings.verbose_http_errors else {},
)
raise ApiError(method, url, r.status_code, r.text)
else:
logger.debug('%s /%s -> %s', method, uri, r.status_code)
return r
def _modify_request(self, method, url, data):
return method, url, data
class Mandrill(ApiSession):
def __init__(self, settings):
super().__init__(settings.mandrill_url, settings)
def _modify_request(self, method, url, data):
data['key'] = self.settings.mandrill_key
return method, url, data
class MessageBird(ApiSession):
def __init__(self, settings):
super().__init__(settings.messagebird_url, settings)
def _modify_request(self, method, url, data):
data['headers_'] = {'Authorization': f'AccessKey {self.settings.messagebird_key}'}
return method, url, data
|
from distutils.core import setup
setup(
name='sequencehelpers',
py_modules=['sequencehelpers'],
version='0.2.1',
description="A library consisting of functions for interacting with sequences and iterables.",
author='Zach Swift',
author_email='cras.zswift@gmail.com',
url='https://github.com/2achary/sequencehelpers',
download_url='https://github.com/2achary/sequence/tarball/0.2.1',
keywords=['sequence', 'single', 'distinct'],
classifiers=[],
)
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kirppu', '0039_counter_private_key'),
]
operations = [
migrations.AlterUniqueTogether(
name='itemtype',
unique_together={('event', 'order')},
),
migrations.RemoveField(
model_name='itemtype',
name='key',
),
]
|
from django.contrib import admin
from .models import BackgroundImages, Widget
class WidgetAdmin(admin.ModelAdmin):
list_display = ('name', 'link', 'is_featured')
ordering = ('-id',)
class BackgroundAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at')
ordering = ('-id',)
admin.site.register(Widget, WidgetAdmin)
admin.site.register(BackgroundImages, BackgroundAdmin)
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from DistributedMinigame import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.safezone import Walk
from toontown.toonbase import ToontownTimer
from direct.gui import OnscreenText
import MinigameAvatarScorePanel
from direct.distributed import DistributedSmoothNode
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
import TagGameGlobals
import Trajectory
class DistributedTagGame(DistributedMinigame):
DURATION = TagGameGlobals.DURATION
IT_SPEED_INCREASE = 1.3
IT_ROT_INCREASE = 1.3
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTagGame', [State.State('off', self.enterOff, self.exitOff, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.walkStateData = Walk.Walk('walkDone')
self.scorePanels = []
self.initialPositions = ((0, 10, 0, 180, 0, 0),
(10, 0, 0, 90, 0, 0),
(0, -10, 0, 0, 0, 0),
(-10, 0, 0, -90, 0, 0))
base.localAvatar.isIt = 0
self.modelCount = 4
def getTitle(self):
return TTLocalizer.TagGameTitle
def getInstructions(self):
return TTLocalizer.TagGameInstructions
def getMaxDuration(self):
return self.DURATION
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.itText = OnscreenText.OnscreenText('itText', fg=(0.95, 0.95, 0.65, 1), scale=0.14, font=ToontownGlobals.getSignFont(), pos=(0.0, -0.8), wordwrap=15, mayChange=1)
self.itText.hide()
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.ground = loader.loadModel('phase_4/models/minigames/tag_arena')
self.music = base.loadMusic('phase_4/audio/bgm/MG_toontag.ogg')
self.tagSfx = base.loadSfx('phase_4/audio/sfx/MG_Tag_C.ogg')
self.itPointer = loader.loadModel('phase_4/models/minigames/bboard-pointer')
self.tracks = []
self.IT = None
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.ignoreAll()
del self.tracks
del self.IT
self.sky.removeNode()
del self.sky
self.itPointer.removeNode()
del self.itPointer
self.ground.removeNode()
del self.ground
del self.music
del self.tagSfx
self.itText.cleanup()
del self.itText
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.ground.reparentTo(render)
self.sky.reparentTo(render)
myPos = self.avIdList.index(self.localAvId)
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.reparentTo(render)
base.localAvatar.loop('neutral')
camera.reparentTo(render)
camera.setPosHpr(0, -24, 16, 0, -30, 0)
base.camLens.setFar(450.0)
base.transitions.irisIn(0.4)
NametagGlobals.setMasterArrowsOn(1)
DistributedSmoothNode.activateSmoothing(1, 1)
self.IT = None
return
def offstage(self):
self.notify.debug('offstage')
DistributedSmoothNode.activateSmoothing(1, 0)
NametagGlobals.setMasterArrowsOn(0)
DistributedMinigame.offstage(self)
self.sky.reparentTo(hidden)
self.ground.reparentTo(hidden)
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
self.itText.hide()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for avId in self.avIdList:
self.acceptTagEvent(avId)
myPos = self.avIdList.index(self.localAvId)
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avatar = self.getAvatar(avId)
if avatar:
avatar.startSmooth()
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.d_clearSmoothing()
base.localAvatar.sendCurrentPosition()
base.localAvatar.b_setAnimState('neutral', 1)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('play')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avName = self.getAvatarName(avId)
scorePanel = MinigameAvatarScorePanel.MinigameAvatarScorePanel(avId, avName)
scorePanel.setPos(-0.213, 0.0, 0.28 * i + 0.66)
scorePanel.reparentTo(base.a2dBottomRight)
self.scorePanels.append(scorePanel)
base.setCellsAvailable(base.rightCells, 0)
self.walkStateData.enter()
self.walkStateData.fsm.request('walking')
if base.localAvatar.isIt:
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.DURATION)
self.timer.countdown(self.DURATION, self.timerExpired)
base.playMusic(self.music, looping=1, volume=0.9)
base.localAvatar.setIdealCameraPos(Point3(0, -24, 8))
def exitPlay(self):
for task in self.tracks:
task.finish()
self.tracks = []
for avId in self.avIdList:
toon = self.getAvatar(avId)
if toon:
toon.getGeomNode().clearMat()
toon.scale = 1.0
toon.rescaleToon()
self.walkStateData.exit()
self.music.stop()
self.timer.destroy()
del self.timer
for panel in self.scorePanels:
panel.cleanup()
self.scorePanels = []
base.setCellsAvailable(base.rightCells, 1)
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed)
self.itPointer.reparentTo(hidden)
base.localAvatar.cameraIndex = 0
base.localAvatar.setCameraPositionByIndex(0)
def timerExpired(self):
self.notify.debug('local timer expired')
self.gameOver()
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def setIt(self, avId):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'play':
self.notify.debug('Ignoring setIt after done playing')
return
self.itText.show()
self.notify.debug(str(avId) + ' is now it')
if avId == self.localAvId:
self.itText.setText(TTLocalizer.TagGameYouAreIt)
base.localAvatar.isIt = 1
base.localAvatar.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
else:
self.itText.setText(TTLocalizer.TagGameSomeoneElseIsIt % self.getAvatarName(avId))
base.localAvatar.isIt = 0
base.localAvatar.setWalkSpeedNormal()
avatar = self.getAvatar(avId)
if avatar:
self.itPointer.reparentTo(avatar)
self.itPointer.setZ(avatar.getHeight())
base.playSfx(self.tagSfx)
toon = self.getAvatar(avId)
duration = 0.6
if not toon:
return
spinTrack = LerpHprInterval(toon.getGeomNode(), duration, Point3(0, 0, 0), startHpr=Point3(-5.0 * 360.0, 0, 0), blendType='easeOut')
growTrack = Parallel()
gs = 2.5
for hi in xrange(toon.headParts.getNumPaths()):
head = toon.headParts[hi]
growTrack.append(LerpScaleInterval(head, duration, Point3(gs, gs, gs)))
def bounceFunc(t, trajectory, node = toon.getGeomNode()):
node.setZ(trajectory.calcZ(t))
def bounceCleanupFunc(node = toon.getGeomNode(), z = toon.getGeomNode().getZ()):
node.setZ(z)
bounceTrack = Sequence()
startZ = toon.getGeomNode().getZ()
tLen = 0
zVel = 30
decay = 0.6
while tLen < duration:
trajectory = Trajectory.Trajectory(0, Point3(0, 0, startZ), Point3(0, 0, zVel), gravMult=5.0)
dur = trajectory.calcTimeOfImpactOnPlane(startZ)
if dur <= 0:
break
bounceTrack.append(LerpFunctionInterval(bounceFunc, fromData=0.0, toData=dur, duration=dur, extraArgs=[trajectory]))
tLen += dur
zVel *= decay
bounceTrack.append(Func(bounceCleanupFunc))
tagTrack = Sequence(Func(toon.animFSM.request, 'off'), Parallel(spinTrack, growTrack, bounceTrack), Func(toon.animFSM.request, 'Happy'))
self.tracks.append(tagTrack)
tagTrack.start()
if self.IT:
it = self.getAvatar(self.IT)
shrinkTrack = Parallel()
for hi in xrange(it.headParts.getNumPaths()):
head = it.headParts[hi]
scale = ToontownGlobals.toonHeadScales[it.style.getAnimal()]
shrinkTrack.append(LerpScaleInterval(head, duration, scale))
self.tracks.append(shrinkTrack)
shrinkTrack.start()
self.IT = avId
def acceptTagEvent(self, avId):
self.accept('enterdistAvatarCollNode-' + str(avId), self.sendTagIfIt, [avId])
def sendTagIfIt(self, avId, collisionEntry):
if base.localAvatar.isIt:
self.notify.debug('Tagging ' + str(avId))
self.sendUpdate('tag', [avId])
else:
self.notify.debug('Bumped ' + str(avId))
def setTreasureScore(self, scores):
if not self.hasLocalToon:
return
self.notify.debug('setTreasureScore: %s' % scores)
for i in xrange(len(self.scorePanels)):
self.scorePanels[i].setScore(scores[i])
|
"""Pipeline configuration parameters."""
from os.path import dirname, abspath, join
from sqlalchemy import create_engine
OS_TYPES_URL = ('https://raw.githubusercontent.com/'
'openspending/os-types/master/src/os-types.json')
PIPELINE_FILE = 'pipeline-spec.yaml'
SOURCE_DATAPACKAGE_FILE = 'source.datapackage.json'
SOURCE_FILE = 'source.description.yaml'
STATUS_FILE = 'pipeline-status.json'
SCRAPER_FILE = 'scraper.py'
SOURCE_ZIP = 'source.datapackage.zip'
FISCAL_ZIP_FILE = 'fiscal.datapackage.zip'
SOURCE_DB = 'source.db.xlsx'
DATAPACKAGE_FILE = 'datapackage.json'
ROOT_DIR = abspath(join(dirname(__file__), '..'))
DATA_DIR = join(ROOT_DIR, 'data')
SPECIFICATIONS_DIR = join(ROOT_DIR, 'specifications')
PROCESSORS_DIR = join(ROOT_DIR, 'common', 'processors')
CODELISTS_DIR = join(ROOT_DIR, 'codelists')
DROPBOX_DIR = join(ROOT_DIR, 'dropbox')
GEOCODES_FILE = join(ROOT_DIR, 'geography', 'geocodes.nuts.csv')
FISCAL_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.schema.yaml')
FISCAL_MODEL_FILE = join(SPECIFICATIONS_DIR, 'fiscal.model.yaml')
FISCAL_METADATA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.metadata.yaml')
DEFAULT_PIPELINE_FILE = join(SPECIFICATIONS_DIR, 'default-pipeline-spec.yaml')
TEMPLATE_SCRAPER_FILE = join(PROCESSORS_DIR, 'scraper_template.py')
DESCRIPTION_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'source.schema.json')
TEMPLATE_SOURCE_FILE = join(SPECIFICATIONS_DIR, SOURCE_FILE)
LOCAL_PATH_EXTRACTOR = 'ingest_local_file'
REMOTE_CSV_EXTRACTOR = 'simple_remote_source'
REMOTE_EXCEL_EXTRACTOR = 'stream_remote_excel'
DATAPACKAGE_MUTATOR = 'mutate_datapackage'
DB_URI = 'sqlite:///{}/metrics.sqlite'
DB_ENGINE = create_engine(DB_URI.format(ROOT_DIR))
VERBOSE = False
LOG_SAMPLE_SIZE = 15
JSON_FORMAT = dict(indent=4, ensure_ascii=False, default=repr)
SNIFFER_SAMPLE_SIZE = 5000
SNIFFER_MAX_FAILURE_RATIO = 0.01
IGNORED_FIELD_TAG = '_ignored'
UNKNOWN_FIELD_TAG = '_unknown'
WARNING_CUTOFF = 10
NUMBER_FORMATS = [
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ','},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '.'},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ' '},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ''},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': '`'},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '\''},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
]
DATE_FORMATS = [
{'format': '%Y'},
{'format': '%d/%m/%Y'},
{'format': '%d//%m/%Y'},
{'format': '%d-%b-%Y'}, # abbreviated month
{'format': '%d-%b-%y'}, # abbreviated month
{'format': '%d. %b %y'}, # abbreviated month
{'format': '%b %y'}, # abbreviated month
{'format': '%d/%m/%y'},
{'format': '%d-%m-%Y'},
{'format': '%Y-%m-%d'},
{'format': '%y-%m-%d'},
{'format': '%y.%m.%d'},
{'format': '%Y.%m.%d'},
{'format': '%d.%m.%Y'},
{'format': '%d.%m.%y'},
{'format': '%d.%m.%Y %H:%M'},
{'format': '%Y-%m-%d %H:%M:%S'},
{'format': '%Y-%m-%d %H:%M:%S.%f'},
{'format': '%Y-%m-%dT%H:%M:%SZ'},
{'format': '%m/%d/%Y'},
{'format': '%m/%Y'},
{'format': '%y'},
]
|
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.base_task import BaseTask
class IncubateEggs(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
last_km_walked = 0
def initialize(self):
self.ready_incubators = []
self.used_incubators = []
self.eggs = []
self.km_walked = 0
self.hatching_animation_delay = 4.20
self.max_iv = 45.0
self._process_config()
def _process_config(self):
self.longer_eggs_first = self.config.get("longer_eggs_first", True)
def work(self):
try:
self._check_inventory()
except:
return
if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
self.used_incubators.sort(key=lambda x: x.get("km"))
km_left = self.used_incubators[0]['km']-self.km_walked
if km_left <= 0:
self._hatch_eggs()
else:
self.emit_event(
'next_egg_incubates',
formatted='Next egg ({km_needed} km) incubates in {distance_in_km:.2f} km',
data={
'km_needed': self.used_incubators[0]['km_needed'],
'distance_in_km': km_left
}
)
IncubateEggs.last_km_walked = self.km_walked
sorting = self.longer_eggs_first
self.eggs.sort(key=lambda x: x.get("km"), reverse=sorting)
if self.ready_incubators:
self._apply_incubators()
def _apply_incubators(self):
for incubator in self.ready_incubators:
if incubator.get('used', False):
continue
for egg in self.eggs:
if egg["used"] or egg["km"] == -1:
continue
self.emit_event(
'incubate_try',
level='debug',
formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
data={
'incubator_id': incubator['id'],
'egg_id': egg['id']
}
)
ret = self.bot.api.use_item_egg_incubator(
item_id=incubator["id"],
pokemon_id=egg["id"]
)
if ret:
code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
if code == 1:
self.emit_event(
'incubate',
formatted='Incubating a {distance_in_km} egg.',
data={
'distance_in_km': str(egg['km'])
}
)
egg["used"] = True
incubator["used"] = True
break
elif code == 5 or code == 7:
self.emit_event(
'incubator_already_used',
level='debug',
formatted='Incubator in use.',
)
incubator["used"] = True
break
elif code == 6:
self.emit_event(
'egg_already_incubating',
level='debug',
formatted='Egg already incubating',
)
egg["used"] = True
def _check_inventory(self, lookup_ids=[]):
inv = {}
response_dict = self.bot.get_inventory()
matched_pokemon = []
temp_eggs = []
temp_used_incubators = []
temp_ready_incubators = []
inv = reduce(
dict.__getitem__,
["responses", "GET_INVENTORY", "inventory_delta", "inventory_items"],
response_dict
)
for inv_data in inv:
inv_data = inv_data.get("inventory_item_data", {})
if "egg_incubators" in inv_data:
temp_used_incubators = []
temp_ready_incubators = []
incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
if isinstance(incubators, basestring): # checking for old response
incubators = [incubators]
for incubator in incubators:
if 'pokemon_id' in incubator:
start_km = incubator.get('start_km_walked', 9001)
km_walked = incubator.get('target_km_walked', 9001)
temp_used_incubators.append({
"id": incubator.get('id', -1),
"km": km_walked,
"km_needed": (km_walked - start_km)
})
else:
temp_ready_incubators.append({
"id": incubator.get('id', -1)
})
continue
if "pokemon_data" in inv_data:
pokemon = inv_data.get("pokemon_data", {})
if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
temp_eggs.append({
"id": pokemon.get("id", -1),
"km": pokemon.get("egg_km_walked_target", -1),
"used": False
})
elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
pokemon.update({
"iv": [
pokemon.get('individual_attack', 0),
pokemon.get('individual_defense', 0),
pokemon.get('individual_stamina', 0)
]})
matched_pokemon.append(pokemon)
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
if temp_used_incubators:
self.used_incubators = temp_used_incubators
if temp_ready_incubators:
self.ready_incubators = temp_ready_incubators
if temp_eggs:
self.eggs = temp_eggs
return matched_pokemon
def _hatch_eggs(self):
response_dict = self.bot.api.get_hatched_eggs()
log_color = 'green'
try:
result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
except KeyError:
return
pokemon_ids = []
if 'pokemon_id' in result:
pokemon_ids = [id for id in result['pokemon_id']]
stardust = result.get('stardust_awarded', "error")
candy = result.get('candy_awarded', "error")
xp = result.get('experience_awarded', "error")
sleep(self.hatching_animation_delay)
self.bot.latest_inventory = None
try:
pokemon_data = self._check_inventory(pokemon_ids)
for pokemon in pokemon_data:
# pokemon ids seem to be offset by one
if pokemon['pokemon_id']!=-1:
pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']
else:
pokemon['name'] = "error"
except:
pokemon_data = [{"name":"error","cp":"error","iv":"error"}]
if not pokemon_ids or pokemon_data[0]['name'] == "error":
self.emit_event(
'egg_hatched',
data={
'pokemon': 'error',
'cp': 'error',
'iv': 'error',
'exp': 'error',
'stardust': 'error',
'candy': 'error',
}
)
return
for i in range(len(pokemon_data)):
msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies."
self.emit_event(
'egg_hatched',
formatted=msg,
data={
'pokemon': pokemon_data[i]['name'],
'cp': pokemon_data[i]['cp'],
'iv': "{} {}".format(
"/".join(map(str, pokemon_data[i]['iv'])),
sum(pokemon_data[i]['iv'])/self.max_iv
),
'exp': xp[i],
'stardust': stardust[i],
'candy': candy[i],
}
)
|
"""
Verify data doesn't have basic mistakes, like empty text fields or empty label
candidates.
```shell
parlai verify_data --task convai2 --datatype valid
```
"""
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.message import Message
from parlai.core.params import ParlaiParser
from parlai.utils.misc import TimeLogger, warn_once
from parlai.core.worlds import create_task
from parlai.core.script import ParlaiScript, register_script
import parlai.utils.logging as logging
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Check tasks for common errors')
# Get command line arguments
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.set_defaults(datatype='train:stream:ordered')
return parser
def report(world, counts, log_time):
report = world.report()
log = {
'missing_text': counts['missing_text'],
'missing_labels': counts['missing_labels'],
'missing_label_candidates': counts['missing_label_candidates'],
'empty_string_label_candidates': counts['empty_string_label_candidates'],
'label_candidates_with_missing_label': counts[
'label_candidates_with_missing_label'
],
'did_not_return_message': counts['did_not_return_message'],
}
text, log = log_time.log(report['exs'], world.num_examples(), log)
return text, log
def warn(txt, act, opt):
if opt.get('display_examples'):
print(txt + ":\n" + str(act))
else:
warn_once(txt)
def verify(opt):
if opt['datatype'] == 'train':
logging.warning("changing datatype from train to train:ordered")
opt['datatype'] = 'train:ordered'
opt.log()
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
counts = {}
counts['missing_text'] = 0
counts['missing_labels'] = 0
counts['missing_label_candidates'] = 0
counts['empty_string_label_candidates'] = 0
counts['label_candidates_with_missing_label'] = 0
counts['did_not_return_message'] = 0
# Show some example dialogs.
while not world.epoch_done():
world.parley()
act = world.acts[0]
if not isinstance(act, Message):
counts['did_not_return_message'] += 1
if 'text' not in act and 'image' not in act:
warn("warning: missing text field:\n", act, opt)
counts['missing_text'] += 1
if 'labels' not in act and 'eval_labels' not in act:
warn("warning: missing labels/eval_labels field:\n", act, opt)
counts['missing_labels'] += 1
else:
if 'label_candidates' not in act:
counts['missing_label_candidates'] += 1
else:
labels = act.get('labels', act.get('eval_labels'))
is_label_cand = {}
for l in labels:
is_label_cand[l] = False
for c in act['label_candidates']:
if c == '':
warn("warning: empty string label_candidate:\n", act, opt)
counts['empty_string_label_candidates'] += 1
if c in is_label_cand:
if is_label_cand[c] is True:
warn(
"warning: label mentioned twice in candidate_labels:\n",
act,
opt,
)
is_label_cand[c] = True
for _, has in is_label_cand.items():
if has is False:
warn("warning: label missing in candidate_labels:\n", act, opt)
counts['label_candidates_with_missing_label'] += 1
if log_time.time() > log_every_n_secs:
text, log = report(world, counts, log_time)
print(text)
try:
# print dataset size if available
logging.info(
f'Loaded {world.num_episodes()} episodes with a '
f'total of {world.num_examples()} examples'
)
except AttributeError:
pass
counts['exs'] = int(world.report()['exs'])
return counts
def verify_data(opt):
counts = verify(opt)
print(counts)
return counts
@register_script('verify_data', hidden=True)
class VerifyData(ParlaiScript):
@classmethod
def setup_args(cls):
return setup_args()
def run(self):
return verify_data(self.opt)
if __name__ == '__main__':
VerifyData.main()
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.begin()
frappe.db.sql("""UPDATE `tabCommunication` set seen = 1
where
communication_type in ('Chat', 'Notification')
and reference_doctype = 'User'
and reference_name = %s""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.local.flags.commit = True
if frappe.form_dict['contact'] == frappe.session['user']:
# return messages
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and (owner=%(contact)s
or reference_name=%(user)s
or owner=reference_name)
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and ((owner=%(contact)s and reference_name=%(user)s)
or (owner=%(contact)s and reference_name=%(contact)s))
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
"""post message"""
d = frappe.new_doc('Communication')
d.communication_type = 'Notification' if parenttype else 'Chat'
d.subject = subject
d.content = txt
d.reference_doctype = 'User'
d.reference_name = contact
d.sender = frappe.session.user
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Communication", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass
|
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.core.management.base import BaseCommand
from optparse import make_option
from py3compat import PY2
from snisi_core.models.Entities import AdministrativeEntity as AEntity
if PY2:
import unicodecsv as csv
else:
import csv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file',
action='store',
dest='filename'),
)
def handle(self, *args, **options):
headers = ['name', 'region', 'cercle_commune', 'commune_quartier']
f = open(options.get('filename'), 'w')
csv_writer = csv.DictWriter(f, fieldnames=headers)
csv_writer.writeheader()
csv_writer.writerow({
'name': "label",
'region': "Région",
'cercle_commune': "Cercle",
'commune_quartier': "Commune",
})
for region in AEntity.objects.filter(type__slug='region'):
logger.info(region)
is_bko = region.name == 'BAMAKO'
for cercle in AEntity.objects.filter(parent=region):
logger.info(cercle)
for commune in AEntity.objects.filter(parent=cercle):
logger.info(commune)
if not is_bko:
csv_writer.writerow({
'name': "choice_label",
'region': region.name,
'cercle_commune': cercle.name,
'commune_quartier': commune.name
})
continue
for vfq in AEntity.objects.filter(parent=commune):
for v in (region, cercle, commune, vfq):
if not len(v.name.strip()):
continue
csv_writer.writerow({
'name': "choice_label",
'region': region.name,
'cercle_commune': commune.name,
'commune_quartier': vfq.name
})
f.close()
|
import logging
import SoftLayer
client = SoftLayer.Client()
class IterableItems:
u"""Pagenate されているリストを全体を回せるようにする"""
def __init__(self, client, limit=10):
self.master_account = client['Account']
self.offset = 0
self.limit = limit
self.define_fetch_method()
self.fetched = self.fetch()
def define_fetch_method(self):
u"""継承側クラスで実装すること"""
# self.fetch_method に適切な pagenate メソッドを設定
raise NotImpementedError("Not implemented yet.")
def fetch(self):
items = self.fetch_method(limit=self.limit, offset=self.offset)
self.offset += self.limit
return items
def __iter__(self):
return self
def __next__(self):
if len(self.fetched) < 1:
raise StopIteration
item = self.fetched.pop()
if len(self.fetched) < 1: # prefetch for next
self.fetched = self.fetch()
return item
class Users(IterableItems):
u"""List of SoftLayer_User_Customer"""
def define_fetch_method(self):
self.fetch_method = self.master_account.getUsers
class VirtualGuests(IterableItems):
u"""List of SoftLayer_Virtual_Guest"""
def define_fetch_method(self):
self.fetch_method = self.master_account.getVirtualGuests
try:
master_account = client['Account']
print("## Account information ##")
user_mask="id, firstName, lastName, email"
account_info = master_account.getObject(mask=user_mask)
print(account_info)
# all child users
#for user in master_account.getUsers(limit=10, offset=0):
print("## Users ##");
for user in Users(client):
print("id:%d, %s" % (user['id'], user['username']))
# Virtual guest OSes
# for vg in client['Account'].getVirtualGuests(limit=10, offset=0):
print("## Virtual guests ##");
for vg in VirtualGuests(client):
print("AccountId=%s, ID=%d, hostname=%s"
% (vg['accountId'], vg['id'], vg['hostname']))
print("## Instances ##");
cci_manager = SoftLayer.CCIManager(client)
for cci in cci_manager.list_instances():
print("FQDN=%s, IP_addrs=%s, %s"
% (cci['fullyQualifiedDomainName'], cci['primaryIpAddress'], cci['primaryBackendIpAddress']))
print("## Billing items ##")
billing_mask = "id, parentId, description, currentHourlyCharge"
print(master_account.getAllBillingItems(mask=billing_mask))
except SoftLayer.SoftLayerAPIError as e:
print("Unable to retrieve account information faultCode%s, faultString=%s"
% (e.faultCode, e.faultString))
exit(1)
|
from .View import View
class MethuselahView(View):
type = "Methuselah"
trans = {
"stableAfter": {"pick": "l"}
}
|
from runner.koan import *
from collections import Counter
def score(dice):
'''
Calculate the scores for results of up to fice dice rolls
'''
return sum((score_of_three(k) * (v//3) + score_of_one(k) * (v%3) for k, v in Counter(dice).items()))
def score_of_three(num):
'''
Calculate score for set of three
'''
if num == 1:
return 1000
else:
return num*100
def score_of_one(num):
'''
Calculate score for a roll not in a set of three
'''
if num == 1:
return 100
elif num == 5:
return 50
else:
return 0
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2]))
|
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, message_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
self._uri = '/Accounts/{account_sid}/Messages/{message_sid}/Feedback.json'.format(**self._solution)
def create(self, outcome=values.unset):
"""
Create a new FeedbackInstance
:param FeedbackInstance.Outcome outcome: The outcome
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
data = values.of({
'Outcome': outcome,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackInstance(InstanceResource):
class Outcome(object):
CONFIRMED = "confirmed"
UMCONFIRMED = "umconfirmed"
def __init__(self, version, payload, account_sid, message_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'message_sid': payload['message_sid'],
'outcome': payload['outcome'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def message_sid(self):
"""
:returns: The message_sid
:rtype: unicode
"""
return self._properties['message_sid']
@property
def outcome(self):
"""
:returns: The outcome
:rtype: FeedbackInstance.Outcome
"""
return self._properties['outcome']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def uri(self):
"""
:returns: The uri
:rtype: unicode
"""
return self._properties['uri']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackInstance>'
|
""" Tests barbante.api.generate_product_templates_tfidf.
"""
import json
import nose.tools
import barbante.api.generate_product_templates_tfidf as script
import barbante.utils.logging as barbante_logging
import barbante.tests as tests
log = barbante_logging.get_logger(__name__)
def test_script():
""" Tests a call to script barbante.api.generate_product_templates_tfidf.
"""
result = script.main([tests.TEST_ENV])
log.debug(result)
result_json = json.dumps(result)
nose.tools.ok_(result_json) # a well-formed json is enough
if __name__ == '__main__':
test_script()
|
param = dict(
useAIon=True,
verbose=False,
chargePreXlinkIons=[1, 3],
chargePostXlinkIons=[2, 5],
basepeakint = 100.0,
dynamicrange = 0.001,
missedsites = 2,
minlength = 4,
maxlength = 51,
modRes = '',
modMass = 0.0,
linkermass = 136.10005,
ms1tol = dict(measure='ppm', val=5),
ms2tol = dict(measure='da', val=0.01),
minmz = 200,
maxmz = 2000,
mode = 'conservative',
patternstring = '^[ACDEFGHIKLMNPQRSTVWY]*K[ACDEFGHIKLMNPQRSTVWY]+$',
fixedMod = [],
neutralloss=dict(
h2oLoss=dict(
mass=-18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
nh3Loss=dict(
mass=-17.026549,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
h2oGain=dict(
mass=18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY'))))
mass = dict(
A=71.037114,
R=156.101111,
N=114.042927,
D=115.026943,
C=103.009184,
E=129.042593,
Q=128.058578,
G=57.021464,
H=137.058912,
I=113.084064,
L=113.084064,
K=128.094963,
M=131.040485,
F=147.068414,
P=97.052764,
S=87.032028,
T=101.047678,
W=186.079313,
Y=163.063329,
V=99.068414,
Hatom=1.007825032,
Oatom=15.99491462,
neutronmass = 1.008701,
BIonRes=1.0078246,
AIonRes=-26.9870904,
YIonRes=19.0183888,
isotopeInc = [1.008701/4, 1.008701/3, 1.008701/2, 1.008701/1])
modification = dict(
position=[],
deltaMass=[])
for i in range(len(param['fixedMod'])):
aa = param['fixedMod'][i][0]
delta = param['fixedMod'][i][1]
mass[aa] += delta
|
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class IntegrationEntity(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'integration_id': 'str',
'created': 'datetime'
}
attribute_map = {
'integration_id': 'integrationId',
'created': 'created'
}
def __init__(self, integration_id=None, created=None, local_vars_configuration=None): # noqa: E501
"""IntegrationEntity - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._integration_id = None
self._created = None
self.discriminator = None
self.integration_id = integration_id
self.created = created
@property
def integration_id(self):
"""Gets the integration_id of this IntegrationEntity. # noqa: E501
The integration ID for this entity sent to and used in the Talon.One system. # noqa: E501
:return: The integration_id of this IntegrationEntity. # noqa: E501
:rtype: str
"""
return self._integration_id
@integration_id.setter
def integration_id(self, integration_id):
"""Sets the integration_id of this IntegrationEntity.
The integration ID for this entity sent to and used in the Talon.One system. # noqa: E501
:param integration_id: The integration_id of this IntegrationEntity. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and integration_id is None: # noqa: E501
raise ValueError("Invalid value for `integration_id`, must not be `None`") # noqa: E501
self._integration_id = integration_id
@property
def created(self):
"""Gets the created of this IntegrationEntity. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this IntegrationEntity. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this IntegrationEntity.
The exact moment this entity was created. # noqa: E501
:param created: The created of this IntegrationEntity. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IntegrationEntity):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IntegrationEntity):
return True
return self.to_dict() != other.to_dict()
|
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# 6367 km is the radius of the Earth
km = 6367 * c
return km
|
import os
import web
import simplejson as json
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.lib.const import VIRT_COMMAND_APPLY_SNAPSHOT
from karesansui.lib.utils import is_param, is_int
from karesansui.lib.virt.snapshot import KaresansuiVirtSnapshot
from karesansui.db.access.machine import findbyguest1
from karesansui.db.access.snapshot import findbyname_guestby1 as s_findbyname_guestby1
from karesansui.db.access._2pysilhouette import save_job_collaboration
from karesansui.db.access.machine2jobgroup import new as m2j_new
from karesansui.db.model._2pysilhouette import Job, JobGroup
from pysilhouette.command import dict2command
class GuestBy1CurrentSnapshot(Rest):
@auth
def _PUT(self, *param, **params):
(host_id, guest_id) = self.chk_guestby1(param)
if guest_id is None: return web.notfound()
if is_param(self.input, 'id') is False \
or is_int(self.input.id) is False:
return web.badrequest("Request data is invalid.")
snapshot_id = str(self.input.id)
snapshot = s_findbyname_guestby1(self.orm, snapshot_id, guest_id)
if snapshot is None:
pass
# ignore snapshots that is not in database.
#return web.badrequest("Request data is invalid.")
model = findbyguest1(self.orm, guest_id)
kvs = KaresansuiVirtSnapshot(readonly=False)
snapshot_list = []
try:
domname = kvs.kvc.uuid_to_domname(model.uniq_key)
if not domname: return web.notfound()
self.view.is_creatable = kvs.isSupportedDomain(domname)
try:
snapshot_list = kvs.listNames(domname)[domname]
except:
pass
finally:
kvs.finish()
if not snapshot_id in snapshot_list:
self.logger.debug(_("The specified snapshot does not exist in database. - %s") % snapshot_id)
# ignore snapshots that is not in database.
#return web.notfound()
action_cmd = dict2command(
"%s/%s" % (karesansui.config['application.bin.dir'],
VIRT_COMMAND_APPLY_SNAPSHOT),
{"name" : domname, "id" : snapshot_id})
cmdname = 'Apply Snapshot'
_jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey'])
_job = Job('%s command' % cmdname, 0, action_cmd)
_jobgroup.jobs.append(_job)
_machine2jobgroup = m2j_new(machine=model,
jobgroup_id=-1,
uniq_key=karesansui.sheconf['env.uniqkey'],
created_user=self.me,
modified_user=self.me,
)
save_job_collaboration(self.orm,
self.pysilhouette.orm,
_machine2jobgroup,
_jobgroup,
)
self.view.currentsnapshot = snapshot
return web.accepted(url=web.ctx.path)
urls = (
'/host/(\d+)/guest/(\d+)/currentsnapshot/?(\.part)?$', GuestBy1CurrentSnapshot,
)
|
class Stack (object):
def __init__ (self):
self.stack = []
def push (self, data):
self.stack.append(data)
def peek (self):
if self.isEmpty():
return None
return self.stack[-1]
def pop (self):
if self.isEmpty():
return None
return self.stack.pop()
def isEmpty (self):
return len(self.stack) == 0
def __str__ (self):
return ' '.join(str(x) for x in self.stack)
|
import os,sys
folder = "/media/kentir1/Development/Linux_Program/Fundkeep/"
def makinGetYear():
return os.popen("date +'%Y'").read()[:-1]
def makinGetMonth():
return os.popen("date +'%m'").read()[:-1]
def makinGetDay():
return os.popen("date +'%d'").read()[:-1]
def makinGetPrevYear(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%Y'").read()[:-1]
def makinGetPrevMonth(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%m'").read()[:-1]
def makinGetPrevDay(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%d'").read()[:-1]
f = open(folder+"data/last_entry","r")
le = f.read()
le_y=le[:4]
le_m=le[4:6]
le_d=le[6:]
os.system("gedit "+folder+"var/input")
f = open(folder+"var/input","r")
data = f.read()
f.close()
balance_out = int(data[:data.find(" ")])
balance_ket = data[data.find(" ")+1:-1]
print balance_ket
os.system("mkdir "+folder+"data")
os.system("mkdir "+folder+"data/"+makinGetYear())
os.system("mkdir "+folder+"data/"+makinGetYear()+"/"+makinGetMonth())
os.system("mkdir "+folder+"data/"+makinGetYear()+"/"+makinGetMonth()+"/"+makinGetDay())
balance_before = 0
dapet = 0
while (dapet == 0):
dpassed = 1
try:
f = open(folder+"data/"
+makinGetPrevYear(dpassed)
+"/"
+makinGetPrevMonth(dpassed)
+"/"
+makinGetPrevDay(dpassed)
+"/balance_after","r")
if (makinGetDay()=="01"):
t_day = 31
t_bulan = ("0"+str(int(makinGetMonth())-1))[-2:]
t_tahun = makinGetYear()
if (int(makinGetMonth())=1):
t_bulan = 12
t_tahun = makinGetYear()-1
print t_bulan
dapet = 0
while (dapet==0):
try:
f = open(folder+"data/"+t_tahun+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
print t_day
dapet = 1
balance_before = int(f.read())
except:
t_day = t_day - 1
f.close()
else:
t_day = int(makinGetDay())-1
#~ t_bulan = ("0"+str(int(makinGetMonth())))[-2:]
t_bulan = makinGetMonth()
f = open(folder+"data/"+makinGetYear()+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
balance_before = int(f.read())
try:
f = open(folder+"data/"+t_tahun+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
except:
|
age = 28
print("Greetings on your " + str(age) + "th birthday")
|
from cachy import CacheManager
from cachy.serializers import PickleSerializer
class Cache(CacheManager):
_serializers = {
'pickle': PickleSerializer()
}
|
import unittest
import os
import os.path
import json
data_path = os.path.dirname(__file__)
os.environ['TIMEVIS_CONFIG'] = os.path.join(data_path, 'config.py')
import timevis
class TestExperiment(unittest.TestCase):
def setUp(self):
self.app = timevis.app.test_client()
self.url = '/api/v2/experiment'
def test_post(self):
name = os.path.join(data_path, 'post_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.post(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data)
def test_get(self):
resp = self.app.get(self.url)
self.assertIsNotNone(resp.data)
def test_put(self):
name = os.path.join(data_path, 'put_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.put(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data)
if __name__ == '__main__':
unittest.main()
|
import os
import unittest
from hashlib import md5
from django.conf import settings
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.diffviewer.diffutils import patch
from reviewboard.diffviewer.testing.mixins import DiffParserTestingMixin
from reviewboard.scmtools.core import (Branch, Commit, Revision, HEAD,
PRE_CREATION)
from reviewboard.scmtools.errors import SCMError, FileNotFoundError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.scmtools.svn import SVNTool, recompute_svn_backend
from reviewboard.scmtools.svn.utils import (collapse_svn_keywords,
has_expanded_svn_keywords)
from reviewboard.scmtools.tests.testcases import SCMTestCase
from reviewboard.testing.testcase import TestCase
class _CommonSVNTestCase(DiffParserTestingMixin, SpyAgency, SCMTestCase):
"""Common unit tests for Subversion.
This is meant to be subclassed for each backend that wants to run
the common set of tests.
"""
backend = None
backend_name = None
fixtures = ['test_scmtools']
__test__ = False
def setUp(self):
super(_CommonSVNTestCase, self).setUp()
self._old_backend_setting = settings.SVNTOOL_BACKENDS
settings.SVNTOOL_BACKENDS = [self.backend]
recompute_svn_backend()
self.svn_repo_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo'))
self.svn_ssh_path = ('svn+ssh://localhost%s'
% self.svn_repo_path.replace('\\', '/'))
self.repository = Repository.objects.create(
name='Subversion SVN',
path='file://%s' % self.svn_repo_path,
tool=Tool.objects.get(name='Subversion'))
try:
self.tool = self.repository.get_scmtool()
except ImportError:
raise unittest.SkipTest('The %s backend could not be used. A '
'dependency may be missing.'
% self.backend)
assert self.tool.client.__class__.__module__ == self.backend
def tearDown(self):
super(_CommonSVNTestCase, self).tearDown()
settings.SVNTOOL_BACKENDS = self._old_backend_setting
recompute_svn_backend()
def shortDescription(self):
desc = super(_CommonSVNTestCase, self).shortDescription()
desc = desc.replace('<backend>', self.backend_name)
return desc
def test_get_repository_info(self):
"""Testing SVN (<backend>) get_repository_info"""
info = self.tool.get_repository_info()
self.assertIn('uuid', info)
self.assertIsInstance(info['uuid'], str)
self.assertEqual(info['uuid'], '41215d38-f5a5-421f-ba17-e0be11e6c705')
self.assertIn('root_url', info)
self.assertIsInstance(info['root_url'], str)
self.assertEqual(info['root_url'], self.repository.path)
self.assertIn('url', info)
self.assertIsInstance(info['url'], str)
self.assertEqual(info['url'], self.repository.path)
def test_ssh(self):
"""Testing SVN (<backend>) with a SSH-backed Subversion repository"""
self._test_ssh(self.svn_ssh_path, 'trunk/doc/misc-docs/Makefile')
def test_ssh_with_site(self):
"""Testing SVN (<backend>) with a SSH-backed Subversion repository
with a LocalSite
"""
self._test_ssh_with_site(self.svn_ssh_path,
'trunk/doc/misc-docs/Makefile')
def test_get_file(self):
"""Testing SVN (<backend>) get_file"""
tool = self.tool
expected = (b'include ../tools/Makefile.base-vars\n'
b'NAME = misc-docs\n'
b'OUTNAME = svn-misc-docs\n'
b'INSTALL_DIR = $(DESTDIR)/usr/share/doc/subversion\n'
b'include ../tools/Makefile.base-rules\n')
# There are 3 versions of this test in order to get 100% coverage of
# the svn module.
rev = Revision('2')
filename = 'trunk/doc/misc-docs/Makefile'
value = tool.get_file(filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('/%s' % filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('%s/%s' % (self.repository.path, filename), rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
with self.assertRaises(FileNotFoundError):
tool.get_file('')
def test_file_exists(self):
"""Testing SVN (<backend>) file_exists"""
tool = self.tool
self.assertTrue(tool.file_exists('trunk/doc/misc-docs/Makefile'))
self.assertFalse(tool.file_exists('trunk/doc/misc-docs/Makefile2'))
with self.assertRaises(FileNotFoundError):
tool.get_file('hello', PRE_CREATION)
def test_get_file_with_special_url_chars(self):
"""Testing SVN (<backend>) get_file with filename containing
characters that are special in URLs and repository path as a URI
"""
value = self.tool.get_file('trunk/crazy& ?#.txt', Revision('12'))
self.assertTrue(isinstance(value, bytes))
self.assertEqual(value, b'Lots of characters in this one.\n')
def test_file_exists_with_special_url_chars(self):
"""Testing SVN (<backend>) file_exists with filename containing
characters that are special in URLs
"""
self.assertTrue(self.tool.file_exists('trunk/crazy& ?#.txt',
Revision('12')))
# These should not crash. We'll be testing both file:// URLs
# (which fail for anything lower than ASCII code 32) and for actual
# URLs (which support all characters).
self.assertFalse(self.tool.file_exists('trunk/%s.txt' % ''.join(
chr(c)
for c in range(32, 128)
)))
self.tool.client.repopath = 'svn+ssh://localhost:0/svn'
try:
self.assertFalse(self.tool.file_exists('trunk/%s.txt' % ''.join(
chr(c)
for c in range(128)
)))
except SCMError:
# Couldn't connect. Valid result.
pass
def test_normalize_path_with_special_chars_and_remote_url(self):
"""Testing SVN (<backend>) normalize_path with special characters
and remote URL
"""
client = self.tool.client
client.repopath = 'svn+ssh://example.com/svn'
path = client.normalize_path(''.join(
chr(c)
for c in range(128)
))
# This URL was generated based on modified code that directly used
# Subversion's lookup take explicitly, ensuring we're getting the
# results we want from urllib.quote() and our list of safe characters.
self.assertEqual(
path,
"svn+ssh://example.com/svn/%00%01%02%03%04%05%06%07%08%09%0A"
"%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E"
"%1F%20!%22%23$%25&'()*+,-./0123456789:%3B%3C=%3E%3F@ABCDEFGH"
"IJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmnopqrstuvwxyz"
"%7B%7C%7D~%7F")
def test_normalize_path_with_special_chars_and_file_url(self):
"""Testing SVN (<backend>) normalize_path with special characters
and local file:// URL
"""
client = self.tool.client
client.repopath = 'file:///tmp/svn'
path = client.normalize_path(''.join(
chr(c)
for c in range(32, 128)
))
# This URL was generated based on modified code that directly used
# Subversion's lookup take explicitly, ensuring we're getting the
# results we want from urllib.quote() and our list of safe characters.
self.assertEqual(
path,
"file:///tmp/svn/%20!%22%23$%25&'()*+,-./0123456789:%3B%3C=%3E"
"%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmno"
"pqrstuvwxyz%7B%7C%7D~%7F")
# This should provide a reasonable error for each code in 0..32.
for i in range(32):
c = chr(i)
message = (
'Invalid character code %s found in path %r.'
% (i, c)
)
with self.assertRaisesMessage(SCMError, message):
client.normalize_path(c)
def test_normalize_path_with_absolute_repo_path(self):
"""Testing SVN (<backend>) normalize_path with absolute path"""
client = self.tool.client
client.repopath = '/var/lib/svn'
path = '/var/lib/svn/foo/bar'
self.assertEqual(client.normalize_path(path), path)
client.repopath = 'svn+ssh://example.com/svn/'
path = 'svn+ssh://example.com/svn/foo/bar'
self.assertEqual(client.normalize_path(path), path)
def test_normalize_path_with_rel_path(self):
"""Testing SVN (<backend>) normalize_path with relative path"""
client = self.tool.client
client.repopath = 'svn+ssh://example.com/svn'
self.assertEqual(client.normalize_path('foo/bar'),
'svn+ssh://example.com/svn/foo/bar')
self.assertEqual(client.normalize_path('/foo/bar'),
'svn+ssh://example.com/svn/foo/bar')
self.assertEqual(client.normalize_path('//foo/bar'),
'svn+ssh://example.com/svn/foo/bar')
self.assertEqual(client.normalize_path('foo&/b ar?/#file#.txt'),
'svn+ssh://example.com/svn/foo&/b%20ar%3F/'
'%23file%23.txt')
def test_revision_parsing(self):
"""Testing SVN (<backend>) revision number parsing"""
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(working copy)'),
(b'', HEAD))
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b' (revision 0)'),
(b'', PRE_CREATION))
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(revision 1)'),
(b'', b'1'))
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(revision 23)'),
(b'', b'23'))
# Fix for bug 2176
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'\t(revision 4)'),
(b'', b'4'))
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision=b'2007-06-06 15:32:23 UTC (rev 10958)'),
(b'', b'10958'))
# Fix for bug 2632
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(revision )'),
(b'', PRE_CREATION))
with self.assertRaises(SCMError):
self.tool.parse_diff_revision(filename=b'',
revision=b'hello')
# Verify that 'svn diff' localized revision strings parse correctly.
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision='(revisión: 5)'.encode('utf-8')),
(b'', b'5'))
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision='(リビジョン 6)'.encode('utf-8')),
(b'', b'6'))
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision='(版本 7)'.encode('utf-8')),
(b'', b'7'))
def test_revision_parsing_with_nonexistent(self):
"""Testing SVN (<backend>) revision parsing with "(nonexistent)"
revision indicator
"""
# English
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(nonexistent)'),
(b'', PRE_CREATION))
# German
self.assertEqual(
self.tool.parse_diff_revision(filename=b'',
revision=b'(nicht existent)'),
(b'', PRE_CREATION))
# Simplified Chinese
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision='(不存在的)'.encode('utf-8')),
(b'', PRE_CREATION))
def test_revision_parsing_with_nonexistent_and_branches(self):
"""Testing SVN (<backend>) revision parsing with relocation
information and nonexistent revision specifier
"""
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision=b'(.../trunk) (nonexistent)'),
(b'trunk/', PRE_CREATION))
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision=b'(.../branches/branch-1.0) (nicht existent)'),
(b'branches/branch-1.0/', PRE_CREATION))
self.assertEqual(
self.tool.parse_diff_revision(
filename=b'',
revision=' (.../trunk) (不存在的)'.encode('utf-8')),
(b'trunk/', PRE_CREATION))
def test_interface(self):
"""Testing SVN (<backend>) with basic SVNTool API"""
self.assertFalse(self.tool.diffs_use_absolute_paths)
self.assertRaises(NotImplementedError,
lambda: self.tool.get_changeset(1))
def test_binary_diff(self):
"""Testing SVN (<backend>) parsing SVN diff with binary file"""
diff = (
b'Index: binfile\n'
b'============================================================'
b'=======\n'
b'Cannot display: file marked as a binary type.\n'
b'svn:mime-type = application/octet-stream\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'binfile',
orig_file_details=b'(unknown)',
modified_filename=b'binfile',
modified_file_details=b'(working copy)',
index_header_value=b'binfile',
binary=True,
data=diff)
def test_binary_diff_with_property_change(self):
"""Testing SVN (<backend>) parsing SVN diff with binary file with
property change
"""
diff = (
b'Index: binfile\n'
b'============================================================'
b'=======\n'
b'Cannot display: file marked as a binary type.\n'
b'svn:mime-type = application/octet-stream\n'
b'\n'
b'Property changes on: binfile\n'
b'____________________________________________________________'
b'_______\n'
b'Added: svn:mime-type\n'
b'## -0,0 +1 ##\n'
b'+application/octet-stream\n'
b'\\ No newline at end of property\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'binfile',
orig_file_details=b'(unknown)',
modified_filename=b'binfile',
modified_file_details=b'(working copy)',
index_header_value=b'binfile',
binary=True,
insert_count=1,
data=diff)
def test_keyword_diff(self):
"""Testing SVN (<backend>) parsing diff with keywords"""
# 'svn cat' will expand special variables in svn:keywords,
# but 'svn diff' doesn't expand anything. This causes the
# patch to fail if those variables appear in the patch context.
diff = (b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Rev$\n'
b' # $Revision:: $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n')
filename = 'trunk/doc/misc-docs/Makefile'
rev = Revision('4')
file = self.tool.get_file(filename, rev)
patch(diff, file, filename)
def test_unterminated_keyword_diff(self):
"""Testing SVN (<backend>) parsing diff with unterminated keywords"""
diff = (b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Id:\n'
b' # $Rev$\n'
b' # $Revision:: $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n')
filename = 'trunk/doc/misc-docs/Makefile'
rev = Revision('5')
file = self.tool.get_file(filename, rev)
patch(diff, file, filename)
def test_svn16_property_diff(self):
"""Testing SVN (<backend>) parsing SVN 1.6 diff with property changes
"""
diff = (
b'Index:\n'
b'======================================================'
b'=============\n'
b'--- (revision 123)\n'
b'+++ (working copy)\n'
b'Property changes on: .\n'
b'______________________________________________________'
b'_____________\n'
b'Modified: reviewboard:url\n'
b'## -1 +1 ##\n'
b'-http://reviews.reviewboard.org\n'
b'+http://reviews.reviewboard.org\n'
b'Index: binfile\n'
b'======================================================='
b'============\nCannot display: file marked as a '
b'binary type.\nsvn:mime-type = application/octet-stream\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'binfile',
orig_file_details=b'(unknown)',
modified_filename=b'binfile',
modified_file_details=b'(working copy)',
index_header_value=b'binfile',
binary=True,
data=diff)
def test_svn17_property_diff(self):
"""Testing SVN (<backend>) parsing SVN 1.7+ diff with property changes
"""
diff = (
b'Index .:\n'
b'======================================================'
b'=============\n'
b'--- . (revision 123)\n'
b'+++ . (working copy)\n'
b'\n'
b'Property changes on: .\n'
b'______________________________________________________'
b'_____________\n'
b'Modified: reviewboard:url\n'
b'## -0,0 +1,3 ##\n'
b'-http://reviews.reviewboard.org\n'
b'+http://reviews.reviewboard.org\n'
b'Added: myprop\n'
b'## -0,0 +1 ##\n'
b'+Property test.\n'
b'Index: binfile\n'
b'======================================================='
b'============\nCannot display: file marked as a '
b'binary type.\nsvn:mime-type = application/octet-stream\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'binfile',
orig_file_details=b'(unknown)',
modified_filename=b'binfile',
modified_file_details=b'(working copy)',
index_header_value=b'binfile',
binary=True,
data=diff)
def test_unicode_diff(self):
"""Testing SVN (<backend>) parsing diff with unicode characters"""
diff = (
'Index: Filé\n'
'==========================================================='
'========\n'
'--- Filé (revision 4)\n'
'+++ Filé (working copy)\n'
'@@ -1,6 +1,7 @@\n'
'+# foó\n'
' include ../tools/Makefile.base-vars\n'
' NAME = misc-docs\n'
' OUTNAME = svn-misc-docs\n'
).encode('utf-8')
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename='Filé'.encode('utf-8'),
orig_file_details=b'(revision 4)',
modified_filename='Filé'.encode('utf-8'),
modified_file_details=b'(working copy)',
index_header_value='Filé'.encode('utf-8'),
insert_count=1,
data=diff)
def test_diff_with_spaces_in_filenames(self):
"""Testing SVN (<backend>) parsing diff with spaces in filenames"""
diff = (
b'Index: File with spaces\n'
b'==========================================================='
b'========\n'
b'--- File with spaces (revision 4)\n'
b'+++ File with spaces (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'File with spaces',
orig_file_details=b'(revision 4)',
modified_filename=b'File with spaces',
modified_file_details=b'(working copy)',
index_header_value=b'File with spaces',
insert_count=1,
data=diff)
def test_diff_with_added_empty_file(self):
"""Testing parsing SVN diff with added empty file"""
diff = (
b'Index: empty-file\t(added)\n'
b'==========================================================='
b'========\n'
b'--- empty-file\t(revision 0)\n'
b'+++ empty-file\t(revision 0)\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'empty-file',
orig_file_details=b'(revision 0)',
modified_filename=b'empty-file',
modified_file_details=b'(revision 0)',
index_header_value=b'empty-file\t(added)',
data=diff)
def test_diff_with_deleted_empty_file(self):
"""Testing parsing SVN diff with deleted empty file"""
diff = (
b'Index: empty-file\t(deleted)\n'
b'==========================================================='
b'========\n'
b'--- empty-file\t(revision 4)\n'
b'+++ empty-file\t(working copy)\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'empty-file',
orig_file_details=b'(revision 4)',
modified_filename=b'empty-file',
modified_file_details=b'(working copy)',
index_header_value=b'empty-file\t(deleted)',
deleted=True,
data=diff)
def test_diff_with_nonexistent_revision_for_dest_file(self):
"""Testing parsing SVN diff with deleted file using "nonexistent"
destination revision
"""
diff = (
b'Index: deleted-file\n'
b'==========================================================='
b'========\n'
b'--- deleted-file\t(revision 4)\n'
b'+++ deleted-file\t(nonexistent)\n'
b'@@ -1,2 +0,0 @@\n'
b'-line 1\n'
b'-line 2\n'
)
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 1)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'deleted-file',
orig_file_details=b'(revision 4)',
modified_filename=b'deleted-file',
modified_file_details=b'(nonexistent)',
index_header_value=b'deleted-file',
deleted=True,
delete_count=2,
data=diff)
def test_idea_diff(self):
"""Testing parsing SVN diff with multi-file diff generated by IDEA
IDEs
"""
diff1 = (
b'Index: path/to/README\n'
b'IDEA additional info:\n'
b'Subsystem: org.reviewboard.org.test\n'
b'<+>ISO-8859-1\n'
b'=============================================================='
b'=====\n'
b'--- path/to/README\t(revision 4)\n'
b'+++ path/to/README\t(revision )\n'
b'@@ -1,6 +1,7 @@\n'
b' #\n'
b' #\n'
b' #\n'
b'+# test\n'
b' #\n'
b' #\n'
b' #\n'
)
diff2 = (
b'Index: path/to/README2\n'
b'IDEA additional info:\n'
b'Subsystem: org.reviewboard.org.test\n'
b'<+>ISO-8859-1\n'
b'=============================================================='
b'=====\n'
b'--- path/to/README2\t(revision 4)\n'
b'+++ path/to/README2\t(revision )\n'
b'@@ -1,6 +1,7 @@\n'
b' #\n'
b' #\n'
b' #\n'
b'+# test\n'
b' #\n'
b' #\n'
b' #\n'
)
diff = diff1 + diff2
parsed_files = self.tool.get_parser(diff).parse()
self.assertEqual(len(parsed_files), 2)
self.assert_parsed_diff_file(
parsed_files[0],
orig_filename=b'path/to/README',
orig_file_details=b'(revision 4)',
modified_filename=b'path/to/README',
modified_file_details=b'(revision )',
index_header_value=b'path/to/README',
insert_count=1,
data=diff1)
self.assert_parsed_diff_file(
parsed_files[1],
orig_filename=b'path/to/README2',
orig_file_details=b'(revision 4)',
modified_filename=b'path/to/README2',
modified_file_details=b'(revision )',
index_header_value=b'path/to/README2',
insert_count=1,
data=diff2)
def test_get_branches(self):
"""Testing SVN (<backend>) get_branches"""
branches = self.tool.get_branches()
self.assertEqual(len(branches), 3)
self.assertEqual(branches[0], Branch(id='trunk', name='trunk',
commit='12', default=True))
self.assertEqual(branches[1], Branch(id='branches/branch1',
name='branch1',
commit='7', default=False))
self.assertEqual(branches[2], Branch(id='top-level-branch',
name='top-level-branch',
commit='10', default=False))
def test_get_commits(self):
"""Testing SVN (<backend>) get_commits"""
commits = self.tool.get_commits(start='5')
self.assertEqual(len(commits), 5)
self.assertEqual(
commits[0],
Commit('chipx86',
'5',
'2010-05-21T09:33:40.893946',
'Add an unterminated keyword for testing bug #1523\n',
'4'))
commits = self.tool.get_commits(start='7')
self.assertEqual(len(commits), 7)
self.assertEqual(
commits[1],
Commit('david',
'6',
'2013-06-13T07:43:04.725088',
'Add a branches directory',
'5'))
def test_get_commits_with_branch(self):
"""Testing SVN (<backend>) get_commits with branch"""
commits = self.tool.get_commits(branch='/branches/branch1', start='5')
self.assertEqual(len(commits), 5)
self.assertEqual(
commits[0],
Commit('chipx86',
'5',
'2010-05-21T09:33:40.893946',
'Add an unterminated keyword for testing bug #1523\n',
'4'))
commits = self.tool.get_commits(branch='/branches/branch1', start='7')
self.assertEqual(len(commits), 6)
self.assertEqual(
commits[0],
Commit('david',
'7',
'2013-06-13T07:43:27.259554',
'Add a branch',
'5'))
self.assertEqual(
commits[1],
Commit('chipx86',
'5',
'2010-05-21T09:33:40.893946',
'Add an unterminated keyword for testing bug #1523\n',
'4'))
def test_get_commits_with_no_date(self):
"""Testing SVN (<backend>) get_commits with no date in commit"""
def _get_log(*args, **kwargs):
return [
{
'author': 'chipx86',
'revision': '5',
'message': 'Commit 1',
},
]
self.spy_on(self.tool.client.get_log, _get_log)
commits = self.tool.get_commits(start='5')
self.assertEqual(len(commits), 1)
self.assertEqual(
commits[0],
Commit('chipx86',
'5',
'',
'Commit 1'))
def test_get_commits_with_exception(self):
"""Testing SVN (<backend>) get_commits with exception"""
def _get_log(*args, **kwargs):
raise Exception('Bad things happened')
self.spy_on(self.tool.client.get_log, _get_log)
with self.assertRaisesMessage(SCMError, 'Bad things happened'):
self.tool.get_commits(start='5')
def test_get_change(self):
"""Testing SVN (<backend>) get_change"""
commit = self.tool.get_change('5')
self.assertEqual(md5(commit.message.encode('utf-8')).hexdigest(),
'928336c082dd756e3f7af4cde4724ebf')
self.assertEqual(md5(commit.diff).hexdigest(),
'56e50374056931c03a333f234fa63375')
def test_utf8_keywords(self):
"""Testing SVN (<backend>) with UTF-8 files with keywords"""
self.repository.get_file('trunk/utf8-file.txt', '9')
def test_normalize_patch_with_svn_and_expanded_keywords(self):
"""Testing SVN (<backend>) normalize_patch with expanded keywords"""
diff = (
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Rev: 123$\n'
b' # $Revision:: 123 $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n'
)
normalized = self.tool.normalize_patch(
patch=diff,
filename='trunk/doc/misc-docs/Makefile',
revision='4')
self.assertEqual(
normalized,
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Rev$\n'
b' # $Revision:: $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n')
def test_normalize_patch_with_svn_and_no_expanded_keywords(self):
"""Testing SVN (<backend>) normalize_patch with no expanded keywords"""
diff = (
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Rev$\n'
b' # $Revision:: $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n'
)
normalized = self.tool.normalize_patch(
patch=diff,
filename='trunk/doc/misc-docs/Makefile',
revision='4')
self.assertEqual(
normalized,
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'--- Makefile (revision 4)\n'
b'+++ Makefile (working copy)\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Id$\n'
b' # $Rev$\n'
b' # $Revision:: $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = svn-misc-docs\n')
class PySVNTests(_CommonSVNTestCase):
backend = 'reviewboard.scmtools.svn.pysvn'
backend_name = 'pysvn'
class SubvertpyTests(_CommonSVNTestCase):
backend = 'reviewboard.scmtools.svn.subvertpy'
backend_name = 'subvertpy'
class UtilsTests(SCMTestCase):
"""Unit tests for reviewboard.scmtools.svn.utils."""
def test_collapse_svn_keywords(self):
"""Testing collapse_svn_keywords"""
keyword_test_data = [
(b'Id',
b'/* $Id: test2.c 3 2014-08-04 22:55:09Z david $ */',
b'/* $Id$ */'),
(b'id',
b'/* $Id: test2.c 3 2014-08-04 22:55:09Z david $ */',
b'/* $Id$ */'),
(b'id',
b'/* $id: test2.c 3 2014-08-04 22:55:09Z david $ */',
b'/* $id$ */'),
(b'Id',
b'/* $id: test2.c 3 2014-08-04 22:55:09Z david $ */',
b'/* $id$ */')
]
for keyword, data, result in keyword_test_data:
self.assertEqual(collapse_svn_keywords(data, keyword),
result)
def test_has_expanded_svn_keywords(self):
"""Testing has_expanded_svn_keywords"""
self.assertTrue(has_expanded_svn_keywords(b'.. $ID: 123$ ..'))
self.assertTrue(has_expanded_svn_keywords(b'.. $id:: 123$ ..'))
self.assertFalse(has_expanded_svn_keywords(b'.. $Id:: $ ..'))
self.assertFalse(has_expanded_svn_keywords(b'.. $Id$ ..'))
self.assertFalse(has_expanded_svn_keywords(b'.. $Id ..'))
self.assertFalse(has_expanded_svn_keywords(b'.. $Id Here$ ..'))
class SVNAuthFormTests(TestCase):
"""Unit tests for SVNTool's authentication form."""
def test_fields(self):
"""Testing SVNTool authentication form fields"""
form = SVNTool.create_auth_form()
self.assertEqual(list(form.fields), ['username', 'password'])
self.assertEqual(form['username'].help_text, '')
self.assertEqual(form['username'].label, 'Username')
self.assertEqual(form['password'].help_text, '')
self.assertEqual(form['password'].label, 'Password')
@add_fixtures(['test_scmtools'])
def test_load(self):
"""Tetting SVNTool authentication form load"""
repository = self.create_repository(
tool_name='Subversion',
username='test-user',
password='test-pass')
form = SVNTool.create_auth_form(repository=repository)
form.load()
self.assertEqual(form['username'].value(), 'test-user')
self.assertEqual(form['password'].value(), 'test-pass')
@add_fixtures(['test_scmtools'])
def test_save(self):
"""Tetting SVNTool authentication form save"""
repository = self.create_repository(tool_name='Subversion')
form = SVNTool.create_auth_form(
repository=repository,
data={
'username': 'test-user',
'password': 'test-pass',
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(repository.username, 'test-user')
self.assertEqual(repository.password, 'test-pass')
class SVNRepositoryFormTests(TestCase):
"""Unit tests for SVNTool's repository form."""
def test_fields(self):
"""Testing SVNTool repository form fields"""
form = SVNTool.create_repository_form()
self.assertEqual(list(form.fields), ['path', 'mirror_path'])
self.assertEqual(form['path'].help_text,
'The path to the repository. This will generally be '
'the URL you would use to check out the repository.')
self.assertEqual(form['path'].label, 'Path')
self.assertEqual(form['mirror_path'].help_text, '')
self.assertEqual(form['mirror_path'].label, 'Mirror Path')
@add_fixtures(['test_scmtools'])
def test_load(self):
"""Tetting SVNTool repository form load"""
repository = self.create_repository(
tool_name='Subversion',
path='https://svn.example.com/',
mirror_path='https://svn.mirror.example.com')
form = SVNTool.create_repository_form(repository=repository)
form.load()
self.assertEqual(form['path'].value(), 'https://svn.example.com/')
self.assertEqual(form['mirror_path'].value(),
'https://svn.mirror.example.com')
@add_fixtures(['test_scmtools'])
def test_save(self):
"""Tetting SVNTool repository form save"""
repository = self.create_repository(tool_name='Subversion')
form = SVNTool.create_repository_form(
repository=repository,
data={
'path': 'https://svn.example.com/',
'mirror_path': 'https://svn.mirror.example.com',
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(repository.path, 'https://svn.example.com/')
self.assertEqual(repository.mirror_path,
'https://svn.mirror.example.com')
|
import os
from setuptools import setup
import sys
if sys.version_info < (2, 6):
raise Exception('Wiggelen requires Python 2.6 or higher.')
install_requires = []
try:
import argparse
except ImportError:
install_requires.append('argparse')
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict')
try:
with open('README.rst') as readme:
long_description = readme.read()
except IOError:
long_description = 'See https://pypi.python.org/pypi/wiggelen'
distmeta = {}
for line in open(os.path.join('wiggelen', '__init__.py')):
try:
field, value = (x.strip() for x in line.split('='))
except ValueError:
continue
if field == '__version_info__':
value = value.strip('[]()')
value = '.'.join(x.strip(' \'"') for x in value.split(','))
else:
value = value.strip('\'"')
distmeta[field] = value
setup(
name='wiggelen',
version=distmeta['__version_info__'],
description='Working with wiggle tracks in Python',
long_description=long_description,
author=distmeta['__author__'],
author_email=distmeta['__contact__'],
url=distmeta['__homepage__'],
license='MIT License',
platforms=['any'],
packages=['wiggelen'],
install_requires=install_requires,
entry_points = {
'console_scripts': ['wiggelen = wiggelen.commands:main']
},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
keywords='bioinformatics'
)
|
"""Download GTFS file and generate JSON file.
Author: Panu Ranta, panu.ranta@iki.fi, https://14142.net/kartalla/about.html
"""
import argparse
import datetime
import hashlib
import json
import logging
import os
import resource
import shutil
import sys
import tempfile
import time
import zipfile
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='JSON configuration file')
parser.add_argument('--only-download', action='store_true', help='Only download GTFS file')
parser.add_argument('--use-no-q-dirs', action='store_true', help='Do not use Q dirs')
args = parser.parse_args()
_init_logging()
start_time = time.time()
logging.debug('started {}'.format(sys.argv))
config = _load_config(args.config)
gtfs_name = config['name']
downloaded_gtfs_zip = _download_gtfs(config['url'])
modify_date = _get_modify_date(downloaded_gtfs_zip)
gtfs_dir = _get_q_dir(config['gtfs_dir'], modify_date, not args.use_no_q_dirs)
gtfs_zip = _rename_gtfs_zip(gtfs_dir, downloaded_gtfs_zip, gtfs_name, modify_date)
if gtfs_zip and (not args.only_download):
log_dir = _get_q_dir(config['log_dir'], modify_date, not args.use_no_q_dirs)
_generate_json(gtfs_name, modify_date, gtfs_zip, config['json_dir'], log_dir)
logging.debug('took {} seconds, max mem: {} megabytes'.format(
int(time.time() - start_time), resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
def _init_logging():
log_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(funcName)s: %(message)s'
logging.basicConfig(filename='generate.log', format=log_format, level=logging.DEBUG)
def _progress(text):
print(text)
logging.debug(text)
def _progress_warning(text):
print('\033[31m{}\033[0m'.format(text))
logging.warning(text)
def _load_config(config_path):
with open(config_path) as config_file:
return json.load(config_file)
def _download_gtfs(url):
output_file, output_filename = tempfile.mkstemp(dir='.')
os.close(output_file)
curl_options = '--header "Accept-Encoding: gzip" --location'
command = 'curl {} "{}" > {}'.format(curl_options, url, output_filename)
_progress('downloading gtfs file into: {}'.format(os.path.relpath(output_filename)))
_execute_command(command)
return output_filename
def _execute_command(command):
if os.system(command) != 0:
raise SystemExit('failed to execute: {}'.format(command))
def _get_modify_date(zip_filename):
modify_times = _get_modify_times(zip_filename)
if len(modify_times) > 1:
_progress_warning('multiple modify times: {}'.format(modify_times))
return sorted(modify_times)[-1]
def _get_modify_times(zip_filename):
modify_times = set()
with zipfile.ZipFile(zip_filename) as zip_file:
for info in zip_file.infolist():
modify_times.add(datetime.datetime(*info.date_time).strftime('%Y%m%d'))
return modify_times
def _get_q_dir(base_dir, modify_date, create_q_dir):
if create_q_dir:
modify_month = int(modify_date[4:6])
q_dir = '{}_q{}'.format(modify_date[:4], 1 + ((modify_month - 1) // 3))
return os.path.join(base_dir, q_dir)
return base_dir
def _rename_gtfs_zip(gtfs_dir, old_filename, gtfs_name, modify_date):
_create_dir(gtfs_dir)
new_filename = os.path.join(gtfs_dir, '{}_{}.zip'.format(gtfs_name, modify_date))
if os.path.isfile(new_filename):
if _compare_files(old_filename, new_filename):
_progress('downloaded gtfs file is identical to: {}'.format(new_filename))
os.remove(old_filename)
return None
_rename_existing_file(new_filename)
os.rename(old_filename, new_filename)
_progress('renamed: {} -> {}'.format(old_filename, new_filename))
return new_filename
def _create_dir(new_dir):
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
def _compare_files(filename_a, filename_b):
return _get_hash(filename_a) == _get_hash(filename_b)
def _get_hash(filename):
file_hash = hashlib.sha256()
with open(filename, 'rb') as input_file:
file_hash.update(input_file.read())
return file_hash.digest()
def _generate_json(gtfs_name, modify_date, gtfs_zip, json_dir, log_dir):
_create_dir(json_dir)
date_output_file = os.path.join(json_dir, '{}_{}.json'.format(gtfs_name, modify_date))
_rename_existing_file(date_output_file)
_create_dir(log_dir)
log_path = os.path.join(log_dir, 'gtfs2json_{}_{}_{}.log'.format(gtfs_name, modify_date,
_get_now_timestamp()))
_progress('generating json for {}'.format(gtfs_zip))
command = '{}/gtfs2json.py --log-file {} {} {}'.format(os.path.dirname(__file__), log_path,
gtfs_zip, date_output_file)
_execute_command(command)
_create_base_output_file(date_output_file, os.path.join(json_dir, '{}.json'.format(gtfs_name)))
def _create_base_output_file(date_output_file, base_output_file):
if os.path.isfile(base_output_file):
_progress('deleting {}'.format(base_output_file))
os.remove(base_output_file)
_progress('copying {} to {}'.format(date_output_file, base_output_file))
shutil.copyfile(date_output_file, base_output_file)
def _rename_existing_file(filename):
if os.path.isfile(filename):
suffix = filename.split('.')[-1]
new_filename = filename.replace('.{}'.format(suffix),
'_{}.{}'.format(_get_now_timestamp(), suffix))
os.rename(filename, new_filename)
_progress_warning('renamed existing {} file {} -> {}'.format(suffix, filename,
new_filename))
def _get_now_timestamp():
return datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
if __name__ == "__main__":
_main()
|
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
|
from __future__ import unicode_literals
import pytest
from structures.insertion_sort import insertion_sort
@pytest.fixture
def sorted_list():
return [i for i in xrange(10)]
@pytest.fixture
def reverse_list():
return [i for i in xrange(9, -1, -1)]
@pytest.fixture
def average_list():
return [5, 9, 2, 4, 1, 6, 8, 7, 0, 3]
def test_sorted(sorted_list):
insertion_sort(sorted_list)
assert sorted_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_worst(reverse_list):
insertion_sort(reverse_list)
assert reverse_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_average(average_list):
insertion_sort(average_list)
assert average_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_repeats():
l = [3, 6, 7, 3, 9, 5, 2, 7]
insertion_sort(l)
assert l == [2, 3, 3, 5, 6, 7, 7, 9]
def test_multiple_types():
l = [3, 'foo', 2.8, True, []]
# python 2 sorting is crazy
insertion_sort(l)
assert l == [True, 2.8, 3, [], 'foo']
|
from flask import Flask
app = Flask(__name__)
app.config.from_object('blog.config')
from blog import views
|
import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
def proc_input(args):
(n, l) = str_to_int(args[0])
a = tuple(str_to_int(args[1]))
return(l, a)
def solve(args, verbose=False):
(l, a) = proc_input(args)
list_a = list(a)
list_a.sort()
max_dist = max(list_a[0] * 2, (l - list_a[-1]) * 2)
for x in xrange(len(a) - 1):
max_dist = max(max_dist, list_a[x + 1] - list_a[x])
if verbose:
print max_dist / float(2)
return max_dist / float(2)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
assert(proc_input([ '2 5', '2 5' ]) == (5, (2, 5)))
assert(solve([ '2 5', '2 5' ]) == 2.0)
assert(solve([ '4 5', '0 1 2 3' ]) == 2.0)
assert(solve([ '7 15', '15 5 3 7 9 14 0' ]) == 2.5)
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
"""
Copyright (C), 2013, The Schilduil Team. All rights reserved.
"""
import sys
import pony.orm
import suapp.orm
from suapp.logdecorator import loguse, logging
__all__ = ["Wooster", "Drone", "Jeeves"]
class FlowException(Exception):
pass
class ApplicationClosed(FlowException):
pass
class Wooster:
"""
A Wooster represents a UI window/page.
GENERALLY THESE THINGS ARE REUSED SO YOU NEED TO BE VERY CAREFUL ABOUT SIDE EFFECTS.
In case you have something that cannot be reused do something like:
1/ Create a new class instance of a subclass of Wooster
2/ Call inflow on that
"""
def lock(self):
pass
def unlock(self):
pass
def inflow(self, jeeves, drone):
# The only thing it does is store the Jeeves object.
self.jeeves = jeeves
# MODE: Modal=1, Replace=2, Both=3
# jeeves.drone(self, name, mode, dataobject)
def close(self):
pass
def toJSON(self):
return "Wooster %s" % (hex(self.__hash__()))
class Drone(object):
"""
A drone is the connection between two vertices.
"""
def __init__(self, name, tovertex):
self.name = name
self.tovertex = tovertex
@loguse
def get_new_instance_clone(self, dataobject, mode):
"""
Clone the drone and add the dataobject and mode.
"""
drone = Drone(self.name, self.tovertex)
drone.dataobject = dataobject
drone.mode = mode
return drone
def toJSON(self):
return "Drone %s > %s" % (self.name, self.tovertex)
class Jeeves(object):
"""
Jeeves is the controller that determins the flow.
It uses Drones to go from Wooster to Wooster.
"""
MODE_OPEN = 3
MODE_REPLACE = 2
MODE_MODAL = 1
@loguse
def __init__(self, app=None):
"""
Initializes the Jeeves with an empty flow and app name.
"""
self.flow = {"": {}}
self.app = app
self.views = {}
self.queries = {}
# TODO: I have no idea why I added ormscope: get rid of it?
self.ormscope = {}
def toJSON(self):
"""
Makes this object be made into json.
"""
return "Jeeves %s" % (hex(self.__hash__()))
@loguse
def whichDrone(self, fromname, outmessage, **kwargs):
"""
Finding the drone matching the outmessage.
"""
logging.getLogger(__name__).debug(
": Jeeves[%r].whichDrone : Flow: %s", self, self.flow
)
drone = None
try:
drone = self.flow[fromname][outmessage]
except:
try:
drone = self.flow[""][outmessage]
except:
# TODO: do something else then bluntly exiting.
logging.getLogger(__name__).error(
": Jeeves[%r].whichDrone : Not found '%s' - exiting.",
self,
outmessage,
)
if outmessage == "EXIT":
raise ApplicationClosed()
else:
raise FlowException("Unknown outmessage: %s" % (outmessage))
return drone
@loguse("@") # Not logging the return value.
def _do_query_str(self, query_template, scope, parameters):
"""
Execute a query that is a string.
DEPRECATED
"""
query = query_template % parameters
exec("result = %s" % (query), scope)
return scope["result"]
@loguse("@") # Not logging the return value.
def pre_query(self, name, scope=None, params=None):
"""
Returns the the query and parameters.
The query and the default parameters are looked up in self.queries.
The parameters are next updated with the passed params.
The self.queries is filled by moduleloader from the loaded modlib's
view_definitions() function.
"""
if scope is None:
scope = {}
query_template, defaults = self.queries[name]
# Start with the default defined.
parameters = defaults.copy()
parameters.update(params)
# Making sure the paging parameters are integers.
try:
parameters["pagenum"] = int(parameters["pagenum"])
except:
parameters["pagenum"] = 1
try:
parameters["pagesize"] = int(parameters["pagesize"])
except:
parameters["pagesize"] = 10
logging.getLogger(__name__).debug(
"Paging #%s (%s)", parameters["pagenum"], parameters["pagesize"]
)
return (query_template, parameters)
@loguse("@") # Not loggin the return value.
def do_query(self, name, scope=None, params=None):
"""
Executes a query by name and return the result.
The result is always a UiOrmObject by using UiOrmObject.uize on the
results of the query.
"""
query_template, parameters = self.pre_query(name, scope, params)
if callable(query_template):
# A callable, so just call it.
result = query_template(params=parameters)
else:
# DEPRECATED: python code as a string.
result = self._do_query_str(query_template, scope, parameters)
return (suapp.orm.UiOrmObject.uize(r) for r in result)
@loguse
def do_fetch_set(self, module, table, primarykey, link):
"""
Fetches the result from a foreign key that is a set.
This will return the list of objects representing the rows in the
database pointed to by the foreign key (which name should be passed in
link). The return type is either a list of suapp.orm.UiOrmObject's.
Usually you can follow the foreign key directly, but not in an
asynchronous target (UI) like the web where you need to fetch it anew.
For foreign keys that are not sets you can use do_fetch.
The module, table and primarykey are those from the object having the
foreign key and behave the same as with do_fetch. The extra parameter
link is the foreign key that is pointing to the set.
"""
origin = self.do_fetch(module, table, primarykey)
result = getattr(origin, link)
return (suapp.orm.UiOrmObject.uize(r) for r in result)
@loguse
def do_fetch(self, module, table, primarykey):
"""
Fetches a specific object from the database.
This will return the object representing a row in the
specified table from the database. The return type is
either a pony.orm.core.Entity or suapp.orm.UiOrmObject
subclass, depending on the class name specified in table.
Parameters:
- module: In what module the table is defined.
This should start with modlib.
- table: Class name of the object representing the table.
The class should be a subclass of either
- pony.orm.core.Entity
- suapp.orm.UiOrmObject
- primarykey: A string representing the primary key value
or a list of values (useful in case of a
multi variable primary key).
"""
if isinstance(primarykey, str):
primarykey = [primarykey]
module = sys.modules[module]
table_class = getattr(module, table)
params = {}
if issubclass(table_class, pony.orm.core.Entity):
pk_columns = table_class._pk_columns_
elif issubclass(table_class, suapp.orm.UiOrmObject):
pk_columns = table_class._ui_class._pk_columns_
else:
return None
if len(pk_columns) == 1:
if len(primarykey) == 1:
params[pk_columns[0]] = primarykey[0]
else:
i = 0
for column in pk_columns:
params[column] = primarykey[i]
i += 1
# Checking if the primary key is a foreign key.
for column in pk_columns:
logging.getLogger(__name__).debug(
"Primary key column: %s = %s", column, params[column]
)
logging.getLogger(__name__).debug("Fetching %s (%s)", table_class, params)
if issubclass(table_class, suapp.orm.UiOrmObject):
return table_class(**params)
else:
return table_class.get(**params)
@loguse("@") # Not logging the return value.
def drone(self, fromvertex, name, mode, dataobject, **kwargs):
"""
Find the drone and execute it.
"""
# Find the drone
fromname = ""
result = None
if isinstance(fromvertex, Wooster):
fromname = fromvertex.name
else:
fromname = str(fromvertex)
drone_type = self.whichDrone(fromname, name, **kwargs)
# Clone a new instance of the drone and setting dataobject & mode.
drone = drone_type.get_new_instance_clone(dataobject, mode)
# If there is a callback, call it.
if "callback_drone" in kwargs:
try:
kwargs["callback_drone"](drone)
except:
pass
# Depending on the mode
# Some targets depend on what is returned from inflow.
if mode == self.MODE_MODAL:
if isinstance(fromvertex, Wooster):
fromvertex.lock()
drone.fromvertex = fromvertex
result = drone.tovertex.inflow(self, drone)
if isinstance(fromvertex, Wooster):
fromvertex.unlock()
elif mode == self.MODE_REPLACE:
drone.fromvertex = None
fromvertex.close()
result = drone.tovertex.inflow(self, drone)
elif mode == self.MODE_OPEN:
drone.fromvertex = fromvertex
result = drone.tovertex.inflow(self, drone)
return result
@loguse
def start(self, dataobject=None):
"""
Start the Jeeves flow.
"""
self.drone("", "START", self.MODE_MODAL, dataobject)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s %(message)s", level=logging.DEBUG
)
logging.getLogger("__main__").setLevel(logging.DEBUG)
modulename = "__main__"
print(
"__main__: %s (%s)"
% (
modulename,
logging.getLevelName(logging.getLogger(modulename).getEffectiveLevel()),
)
)
class Application(Wooster):
name = "APP"
def inflow(self, jeeves, drone):
self.jeeves = jeeves
print(
"""This is the Jeeves and Wooster library!
Jeeves is Wooster's indispensible valet: a gentleman's personal
gentleman. In fact this Jeeves can manage more then one Wooster
(so he might not be that personal) and guide information from one
Wooster to another in an organised way making all the Woosters
march to the drones.
"""
)
def lock(self):
pass
def unlock(self):
pass
def close(self):
pass
flow = Jeeves()
flow.flow = {"": {"START": Drone("START", Application())}}
flow.start()
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import array
from chainer.utils import type_check
class BilinearFunction(function.Function):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 6),
'%s == %s' % (in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
e1 = array.as_mat(inputs[0])
e2 = array.as_mat(inputs[1])
W = inputs[2]
if not type_check.same_types(*inputs):
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(e1): {1}, type(e2): {2}'
.format(type(W), type(e1), type(e2)))
xp = cuda.get_array_module(*inputs)
if xp is numpy:
y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
else:
i_len, j_len = e1.shape
k_len = e2.shape[1]
# 'ij,ik->ijk'
e1e2 = e1[:, :, None] * e2[:, None, :]
# ijk->i[jk]
e1e2 = e1e2.reshape(i_len, j_len * k_len)
# jkl->[jk]l
W_mat = W.reshape(-1, W.shape[2])
# 'i[jk],[jk]l->il'
y = e1e2.dot(W_mat)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, inputs, grad_outputs):
e1 = array.as_mat(inputs[0])
e2 = array.as_mat(inputs[1])
W = inputs[2]
gy = grad_outputs[0]
xp = cuda.get_array_module(*inputs)
if xp is numpy:
gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
else:
kern = cuda.reduce('T in0, T in1, T in2', 'T out',
'in0 * in1 * in2', 'a + b', 'out = a', 0,
'bilinear_product')
e1_b = e1[:, :, None, None] # ij
e2_b = e2[:, None, :, None] # ik
gy_b = gy[:, None, None, :] # il
W_b = W[None, :, :, :] # jkl
gW = kern(e1_b, e2_b, gy_b, axis=0) # 'ij,ik,il->jkl'
ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3)) # 'ik,jkl,il->ij'
ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3)) # 'ij,jkl,il->ik'
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2, b = inputs[3:]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (~chainer.Variable): Left input variable.
e2 (~chainer.Variable): Right input variable.
W (~chainer.Variable): Quadratic weight variable.
V1 (~chainer.Variable): Left coefficient variable.
V2 (~chainer.Variable): Right coefficient variable.
b (~chainer.Variable): Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<http://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction()(e1, e2, W)
else:
return BilinearFunction()(e1, e2, W, V1, V2, b)
|
import datetime
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.url import make_url
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from ...engine import test_execute
class DialectTest(fixtures.TestBase):
__backend__ = True
__only_on__ = "mysql"
@testing.combinations(
(None, "cONnection was kILLEd", "InternalError", "pymysql", True),
(None, "cONnection aLREady closed", "InternalError", "pymysql", True),
(None, "something broke", "InternalError", "pymysql", False),
(2006, "foo", "OperationalError", "mysqldb", True),
(2006, "foo", "OperationalError", "pymysql", True),
(2007, "foo", "OperationalError", "mysqldb", False),
(2007, "foo", "OperationalError", "pymysql", False),
)
def test_is_disconnect(
self, arg0, message, exc_cls_name, dialect_name, is_disconnect
):
class Error(Exception):
pass
dbapi = mock.Mock()
dbapi.Error = Error
dbapi.ProgrammingError = type("ProgrammingError", (Error,), {})
dbapi.OperationalError = type("OperationalError", (Error,), {})
dbapi.InterfaceError = type("InterfaceError", (Error,), {})
dbapi.InternalError = type("InternalError", (Error,), {})
dialect = getattr(mysql, dialect_name).dialect(dbapi=dbapi)
error = getattr(dbapi, exc_cls_name)(arg0, message)
eq_(dialect.is_disconnect(error, None, None), is_disconnect)
def test_ssl_arguments_mysqldb(self):
from sqlalchemy.dialects.mysql import mysqldb
dialect = mysqldb.dialect()
self._test_ssl_arguments(dialect)
def test_ssl_arguments_oursql(self):
from sqlalchemy.dialects.mysql import oursql
dialect = oursql.dialect()
self._test_ssl_arguments(dialect)
def _test_ssl_arguments(self, dialect):
kwarg = dialect.create_connect_args(
make_url(
"mysql://scott:tiger@localhost:3306/test"
"?ssl_ca=/ca.pem&ssl_cert=/cert.pem&ssl_key=/key.pem"
)
)[1]
# args that differ among mysqldb and oursql
for k in ("use_unicode", "found_rows", "client_flag"):
kwarg.pop(k, None)
eq_(
kwarg,
{
"passwd": "tiger",
"db": "test",
"ssl": {
"ca": "/ca.pem",
"cert": "/cert.pem",
"key": "/key.pem",
},
"host": "localhost",
"user": "scott",
"port": 3306,
},
)
@testing.combinations(
("compress", True),
("connect_timeout", 30),
("read_timeout", 30),
("write_timeout", 30),
("client_flag", 1234),
("local_infile", 1234),
("use_unicode", False),
("charset", "hello"),
)
def test_normal_arguments_mysqldb(self, kwarg, value):
from sqlalchemy.dialects.mysql import mysqldb
dialect = mysqldb.dialect()
connect_args = dialect.create_connect_args(
make_url(
"mysql://scott:tiger@localhost:3306/test"
"?%s=%s" % (kwarg, value)
)
)
eq_(connect_args[1][kwarg], value)
def test_mysqlconnector_buffered_arg(self):
from sqlalchemy.dialects.mysql import mysqlconnector
dialect = mysqlconnector.dialect()
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db?buffered=true")
)[1]
eq_(kw["buffered"], True)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db?buffered=false")
)[1]
eq_(kw["buffered"], False)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db")
)[1]
eq_(kw["buffered"], True)
def test_mysqlconnector_raise_on_warnings_arg(self):
from sqlalchemy.dialects.mysql import mysqlconnector
dialect = mysqlconnector.dialect()
kw = dialect.create_connect_args(
make_url(
"mysql+mysqlconnector://u:p@host/db?raise_on_warnings=true"
)
)[1]
eq_(kw["raise_on_warnings"], True)
kw = dialect.create_connect_args(
make_url(
"mysql+mysqlconnector://u:p@host/db?raise_on_warnings=false"
)
)[1]
eq_(kw["raise_on_warnings"], False)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db")
)[1]
assert "raise_on_warnings" not in kw
@testing.only_on("mysql")
def test_random_arg(self):
dialect = testing.db.dialect
kw = dialect.create_connect_args(
make_url("mysql://u:p@host/db?foo=true")
)[1]
eq_(kw["foo"], "true")
@testing.only_on("mysql")
@testing.skip_if("mysql+mysqlconnector", "totally broken for the moment")
@testing.fails_on("mysql+oursql", "unsupported")
def test_special_encodings(self):
for enc in ["utf8mb4", "utf8"]:
eng = engines.testing_engine(
options={"connect_args": {"charset": enc, "use_unicode": 0}}
)
conn = eng.connect()
eq_(conn.dialect._connection_charset, enc)
def test_no_show_variables(self):
from sqlalchemy.testing import mock
engine = engines.testing_engine()
def my_execute(self, statement, *args, **kw):
if statement.startswith("SHOW VARIABLES"):
statement = "SELECT 1 FROM DUAL WHERE 1=0"
return real_exec(self, statement, *args, **kw)
real_exec = engine._connection_cls.exec_driver_sql
with mock.patch.object(
engine._connection_cls, "exec_driver_sql", my_execute
):
with expect_warnings(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to SHOW VARIABLES"
):
engine.connect()
def test_no_default_isolation_level(self):
from sqlalchemy.testing import mock
engine = engines.testing_engine()
real_isolation_level = testing.db.dialect.get_isolation_level
def fake_isolation_level(connection):
connection = mock.Mock(
cursor=mock.Mock(
return_value=mock.Mock(
fetchone=mock.Mock(return_value=None)
)
)
)
return real_isolation_level(connection)
with mock.patch.object(
engine.dialect, "get_isolation_level", fake_isolation_level
):
with expect_warnings(
"Could not retrieve transaction isolation level for MySQL "
"connection."
):
engine.connect()
def test_autocommit_isolation_level(self):
c = testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
)
assert c.exec_driver_sql("SELECT @@autocommit;").scalar()
c = c.execution_options(isolation_level="READ COMMITTED")
assert not c.exec_driver_sql("SELECT @@autocommit;").scalar()
def test_isolation_level(self):
values = [
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SERIALIZABLE",
]
for value in values:
c = testing.db.connect().execution_options(isolation_level=value)
eq_(testing.db.dialect.get_isolation_level(c.connection), value)
class ParseVersionTest(fixtures.TestBase):
@testing.combinations(
((10, 2, 7), "10.2.7-MariaDB", (10, 2, 7, "MariaDB"), True),
(
(10, 2, 7),
"5.6.15.10.2.7-MariaDB",
(5, 6, 15, 10, 2, 7, "MariaDB"),
True,
),
((10, 2, 10), "10.2.10-MariaDB", (10, 2, 10, "MariaDB"), True),
((5, 7, 20), "5.7.20", (5, 7, 20), False),
((5, 6, 15), "5.6.15", (5, 6, 15), False),
(
(10, 2, 6),
"10.2.6.MariaDB.10.2.6+maria~stretch-log",
(10, 2, 6, "MariaDB", 10, 2, "6+maria~stretch", "log"),
True,
),
(
(10, 1, 9),
"10.1.9-MariaDBV1.0R050D002-20170809-1522",
(10, 1, 9, "MariaDB", "V1", "0R050D002", 20170809, 1522),
True,
),
)
def test_mariadb_normalized_version(
self, expected, raw_version, version, is_mariadb
):
dialect = mysql.dialect()
eq_(dialect._parse_server_version(raw_version), version)
dialect.server_version_info = version
eq_(dialect._mariadb_normalized_version_info, expected)
assert dialect._is_mariadb is is_mariadb
@testing.combinations(
(True, (10, 2, 7, "MariaDB")),
(True, (5, 6, 15, 10, 2, 7, "MariaDB")),
(False, (10, 2, 10, "MariaDB")),
(False, (5, 7, 20)),
(False, (5, 6, 15)),
(True, (10, 2, 6, "MariaDB", 10, 2, "6+maria~stretch", "log")),
)
def test_mariadb_check_warning(self, expect_, version):
dialect = mysql.dialect()
dialect.server_version_info = version
if expect_:
with expect_warnings(
".*before 10.2.9 has known issues regarding "
"CHECK constraints"
):
dialect._warn_for_known_db_issues()
else:
dialect._warn_for_known_db_issues()
class RemoveUTCTimestampTest(fixtures.TablesTest):
"""This test exists because we removed the MySQL dialect's
override of the UTC_TIMESTAMP() function, where the commit message
for this feature stated that "it caused problems with executemany()".
Since no example was provided, we are trying lots of combinations
here.
[ticket:3966]
"""
__only_on__ = "mysql"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("data", DateTime),
)
Table(
"t_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("idata", DateTime, default=func.utc_timestamp()),
Column("udata", DateTime, onupdate=func.utc_timestamp()),
)
def test_insert_executemany(self):
with testing.db.connect() as conn:
conn.execute(
self.tables.t.insert().values(data=func.utc_timestamp()),
[{"x": 5}, {"x": 6}, {"x": 7}],
)
def test_update_executemany(self):
with testing.db.connect() as conn:
timestamp = datetime.datetime(2015, 4, 17, 18, 5, 2)
conn.execute(
self.tables.t.insert(),
[
{"x": 5, "data": timestamp},
{"x": 6, "data": timestamp},
{"x": 7, "data": timestamp},
],
)
conn.execute(
self.tables.t.update()
.values(data=func.utc_timestamp())
.where(self.tables.t.c.x == bindparam("xval")),
[{"xval": 5}, {"xval": 6}, {"xval": 7}],
)
def test_insert_executemany_w_default(self):
with testing.db.connect() as conn:
conn.execute(
self.tables.t_default.insert(), [{"x": 5}, {"x": 6}, {"x": 7}]
)
def test_update_executemany_w_default(self):
with testing.db.connect() as conn:
timestamp = datetime.datetime(2015, 4, 17, 18, 5, 2)
conn.execute(
self.tables.t_default.insert(),
[
{"x": 5, "idata": timestamp},
{"x": 6, "idata": timestamp},
{"x": 7, "idata": timestamp},
],
)
conn.execute(
self.tables.t_default.update()
.values(idata=func.utc_timestamp())
.where(self.tables.t_default.c.x == bindparam("xval")),
[{"xval": 5}, {"xval": 6}, {"xval": 7}],
)
class SQLModeDetectionTest(fixtures.TestBase):
__only_on__ = "mysql"
__backend__ = True
def _options(self, modes):
def connect(con, record):
cursor = con.cursor()
cursor.execute("set sql_mode='%s'" % (",".join(modes)))
e = engines.testing_engine(
options={
"pool_events": [
(connect, "first_connect"),
(connect, "connect"),
]
}
)
return e
def test_backslash_escapes(self):
engine = self._options(["NO_BACKSLASH_ESCAPES"])
c = engine.connect()
assert not engine.dialect._backslash_escapes
c.close()
engine.dispose()
engine = self._options([])
c = engine.connect()
assert engine.dialect._backslash_escapes
c.close()
engine.dispose()
def test_ansi_quotes(self):
engine = self._options(["ANSI_QUOTES"])
c = engine.connect()
assert engine.dialect._server_ansiquotes
c.close()
engine.dispose()
def test_combination(self):
engine = self._options(["ANSI_QUOTES,NO_BACKSLASH_ESCAPES"])
c = engine.connect()
assert engine.dialect._server_ansiquotes
assert not engine.dialect._backslash_escapes
c.close()
engine.dispose()
class ExecutionTest(fixtures.TestBase):
"""Various MySQL execution special cases."""
__only_on__ = "mysql"
__backend__ = True
def test_charset_caching(self):
engine = engines.testing_engine()
cx = engine.connect()
meta = MetaData()
charset = engine.dialect._detect_charset(cx)
meta.reflect(cx)
eq_(cx.dialect._connection_charset, charset)
cx.close()
def test_sysdate(self):
d = testing.db.scalar(func.sysdate())
assert isinstance(d, datetime.datetime)
class AutocommitTextTest(test_execute.AutocommitTextTest):
__only_on__ = "mysql"
def test_load_data(self):
self._test_keyword("LOAD DATA STUFF")
def test_replace(self):
self._test_keyword("REPLACE THING")
|
from accounts.models import Practice
def create_practice(request, strategy, backend, uid, response={}, details={}, user=None, social=None, *args, **kwargs):
"""
if user has a practice skip else create new practice
"""
practice, created = Practice.objects.update_or_create(user=user)
return None
|
from flask import Flask
from flask import render_template, request
app = Flask(__name__)
@app.route("/")
def main():
room = request.args.get('room', '')
if room:
return render_template('watch.html')
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
from nose.tools import with_setup
import os
import hk_glazer as js2deg
import subprocess
import json
class TestClass:
@classmethod
def setup_class(cls):
cls.here = os.path.dirname(__file__)
cls.data = cls.here + '/data'
def test_1(self):
'''Test 1: Check that json_to_degree works when imported'''
with open(self.data + "/json_test_in.json") as config_file:
config_dict = json.load(config_file)
gen_str = js2deg.dict_to_dat(config_dict)
with open(self.data + "/json_test_out.txt") as verif_file:
test_str = verif_file.read()
assert(test_str == gen_str)
pass
def test_2(self):
'''Test 2: Check command line execution when saving to file'''
cmd = os.path.abspath(self.here + '/../../bin/hk_glazer')
print(cmd)
subprocess.check_call([cmd, "js2degree", self.data + "/json_test_in.json", "-o=test2.txt", "-s"])
with open("test2.txt") as file:
gen_str = file.read()
with open(self.data + "/json_test_out.txt") as file:
test_str = file.read()
assert(test_str == gen_str)
os.remove("test2.txt")
pass
def test_3(self):
'''Test 3: Command line execution when outfile already exists'''
cmd = os.path.abspath(self.here + '/../../bin/hk_glazer')
subprocess.check_call([cmd, "js2degree", self.data + "/json_test_in.json", "-o=test3.txt", "-s"])
try:
subprocess.check_call([cmd,"js2degree", self.data + "/json_test_in.json", "-o=test3.txt"])
except Exception as e:
#print(type(e))
assert(type(e) == subprocess.CalledProcessError)
pass
else:
assert(False)
finally:
os.remove("test3.txt")
|
from django.conf.urls import url
from . import views
from django.views.decorators.cache import cache_page
app_name = 'webinter'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^logout/$', views.logout_view, name='logout'),
]
|
"""
Money doctests as unittest Suite
"""
from __future__ import absolute_import
import doctest
import unittest
import money.six
FILES = (
'../../README.rst',
)
def load_tests(loader, tests, pattern):
# RADAR Python 2.x
if money.six.PY2:
# Doc tests are Python 3.x
return unittest.TestSuite()
return doctest.DocFileSuite(*FILES)
|
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
class AtrRsiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'AtrRsiStrategy'
author = u'用Python的交易员'
# 策略参数
atrLength = 22 # 计算ATR指标的窗口数
atrMaLength = 10 # 计算ATR均线的窗口数
rsiLength = 5 # 计算RSI的窗口数
rsiEntry = 16 # RSI的开仓信号
trailingPercent = 0.8 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrCount = 0 # 目前已经缓存了的ATR的计数
atrArray = np.zeros(bufferSize) # ATR指标的数组
atrValue = 0 # 最新的ATR指标数值
atrMa = 0 # ATR移动平均的数值
rsiValue = 0 # RSI指标的数值
rsiBuy = 0 # RSI买开阈值
rsiSell = 0 # RSI卖开阈值
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'atrMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(AtrRsiStrategy, self).__init__(ctaEngine, setting)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, 1)
return
if self.rsiValue < self.rsiSell:
self.short(bar.close-5, 1)
return
# 持有多头仓位
if self.pos == 1:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, 1, stop=True)
self.orderList.append(orderID)
return
# 持有空头仓位
if self.pos == -1:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, 1, stop=True)
self.orderList.append(orderID)
return
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 载入历史数据到引擎中
engine.loadHistoryData(MINUTE_DB_NAME, 'IF0000')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 在引擎中创建策略对象
engine.initStrategy(AtrRsiStrategy, {})
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
|
import os
import yaml
MONGO_USERNAME = os.getenv('MONGO_USERNAME', None)
MONGO_PASSWORD = os.getenv('MONGO_PASSWORD', None)
MONGODB_HOST = os.getenv('MONGODB_HOST', '127.0.0.1')
MONGODB_PORT = int(os.getenv('MONGODB_PORT', '27017'))
MONGODB_SERVERS = os.getenv('MONGODB_SERVERS') \
or '{}:{}'.format(MONGODB_HOST, MONGODB_PORT)
MONGODB_DEFAULT_URL = 'mongodb://{}'.format(MONGODB_SERVERS)
MONGO_URL = os.getenv('MONGO_URL') or MONGODB_DEFAULT_URL
MONGO_INCLUDES = os.getenv('MONGO_INCLUDES', '')
ES_URL = os.getenv('ES_URL', 'http://localhost:9200')
ES_INDEXES = yaml.load(os.getenv('ES_INDEXES') or '{}')
ES_TIMEOUT_SECONDS = int(os.getenv('ES_TIMEOUT_SECONDS', '100'))
LOG_VERBOSITY = int(os.getenv('LOG_VERBOSITY', 2))
MONGO_CONNECTOR_CONFIG = 'mongo-connector.json'
DEFAULTS = {
'es': {
'url': ES_URL,
'indexes': ES_INDEXES
},
'mongo-connector': {
'mainAddress': MONGO_URL,
'authentication': {
'adminUsername': MONGO_USERNAME,
'password': MONGO_PASSWORD
},
'namespaces': {
'include': MONGO_INCLUDES.split(','),
},
'timezoneAware': True,
'docManagers': [
{
'docManager': 'elastic_doc_manager',
'targetURL': ES_URL,
"args": {
"clientOptions": {
"timeout": ES_TIMEOUT_SECONDS
}
}
}
],
'logging': {
'type': 'stream'
},
'verbosity': LOG_VERBOSITY,
'continueOnError': True
},
}
CONFIG_LOCATION = os.getenv('CONFIG_LOCATION')
|
import unittest
import play_file
class TestAssemblyReader(unittest.TestCase):
def test_version_reader(self):
assembly_reader = play_file.AssemblyReader()
version = assembly_reader.get_assembly_version('AssemblyInfo.cs')
self.assertEqual(version, '7.3.1.0210')
def test_version_writer(self):
new_version = '7.3.1.0228'
assembly_writer = play_file.AssemblyWriter()
version = assembly_writer.update_assembly_version('AssemblyInfo.cs', new_version)
self.assertEqual(version, new_version)
|
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
|
import os
import sys
abs_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, abs_path)
__version__ = '0.27.0'
|
import json
import os
from pokemongo_bot import inventory
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.human_behaviour import action_delay
from pokemongo_bot.services.item_recycle_worker import ItemRecycler
from pokemongo_bot.tree_config_builder import ConfigException
from pokemongo_bot.worker_result import WorkerResult
DEFAULT_MIN_EMPTY_SPACE = 6
class RecycleItems(BaseTask):
"""
Recycle undesired items if there is less than five space in inventory.
You can use either item's name or id. For the full list of items see ../../data/items.json
It's highly recommended to put this task before move_to_fort and spin_fort task in the config file so you'll most likely be able to loot.
Example config :
{
"type": "RecycleItems",
"config": {
"min_empty_space": 6, # 6 by default
"item_filter": {
"Pokeball": {"keep": 20},
"Greatball": {"keep": 50},
"Ultraball": {"keep": 100},
"Potion": {"keep": 0},
"Super Potion": {"keep": 0},
"Hyper Potion": {"keep": 20},
"Max Potion": {"keep": 50},
"Revive": {"keep": 0},
"Max Revive": {"keep": 20},
"Razz Berry": {"keep": 20}
}
}
}
"""
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.items_filter = self.config.get('item_filter', {})
self.min_empty_space = self.config.get('min_empty_space', None)
self._validate_item_filter()
def _validate_item_filter(self):
"""
Validate user's item filter config
:return: Nothing.
:rtype: None
:raise: ConfigException: When an item doesn't exist in ../../data/items.json
"""
item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
for config_item_name, bag_count in self.items_filter.iteritems():
if config_item_name not in item_list.viewvalues():
if config_item_name not in item_list:
raise ConfigException(
"item {} does not exist, spelling mistake? (check for valid item names in data/items.json)".format(
config_item_name))
def should_run(self):
"""
Returns a value indicating whether the recycling process should be run.
:return: True if the recycling process should be run; otherwise, False.
:rtype: bool
"""
if inventory.Items.get_space_left() < (DEFAULT_MIN_EMPTY_SPACE if self.min_empty_space is None else self.min_empty_space):
return True
return False
def work(self):
"""
Start the process of recycling items if necessary.
:return: Returns whether or not the task went well
:rtype: WorkerResult
"""
# TODO: Use new inventory everywhere and then remove this inventory update
inventory.refresh_inventory()
worker_result = WorkerResult.SUCCESS
if self.should_run():
for item_in_inventory in inventory.items().all():
if self.item_should_be_recycled(item_in_inventory):
# Make the bot appears more human
action_delay(self.bot.config.action_wait_min, self.bot.config.action_wait_max)
# If at any recycling process call we got an error, we consider that the result of this task is error too.
if ItemRecycler(self.bot, item_in_inventory, self.get_amount_to_recycle(item_in_inventory)).work() == WorkerResult.ERROR:
worker_result = WorkerResult.ERROR
return worker_result
def item_should_be_recycled(self, item):
"""
Returns a value indicating whether the item should be recycled.
:param item: The Item to test
:return: True if the title should be recycled; otherwise, False.
:rtype: bool
"""
return (item.name in self.items_filter or str(item.id) in self.items_filter) and self.get_amount_to_recycle(item) > 0
def get_amount_to_recycle(self, item):
"""
Determine the amount to recycle accordingly to user config
:param item: Item to determine the amount to recycle.
:return: The amount to recycle
:rtype: int
"""
amount_to_keep = self.get_amount_to_keep(item)
return 0 if amount_to_keep is None else item.count - amount_to_keep
def get_amount_to_keep(self, item):
"""
Determine item's amount to keep in inventory.
:param item:
:return: Item's amount to keep in inventory.
:rtype: int
"""
item_filter_config = self.items_filter.get(item.name, 0)
if item_filter_config is not 0:
return item_filter_config.get('keep', 20)
else:
item_filter_config = self.items_filter.get(str(item.id), 0)
if item_filter_config is not 0:
return item_filter_config.get('keep', 20)
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
node = parent = None
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
# search for the node and its parent
self.findNodeAndParent(root, key)
if self.node == root and not root.left and not root.right:
return None
if self.node:
self.deleteNodeHelper(self.node, self.parent)
return root
def deleteNodeHelper(self, node, parent):
# if node is a leaf
if not node.left and not node.right:
if parent:
if parent.left == node:
parent.left = None
else:
parent.right = None
return
# if node has only one child
if not node.left or not node.right:
child = node.left if not node.right else node.right
node.val = child.val
node.left = child.left
node.right = child.right
return
# node has two children
successor, succesorParent = self.getNodeSuccessor(node)
node.val = successor.val
self.deleteNodeHelper(successor, succesorParent)
def getNodeSuccessor(self, node):
succesorParent = node
successor = node.right
while successor.left:
succesorParent = successor
successor = successor.left
return successor, succesorParent
def findNodeAndParent(self, root, key):
if not root:
return
if root.val == key:
self.node = root
return
self.parent = root
if key < root.val:
self.findNodeAndParent(root.left, key)
else:
self.findNodeAndParent(root.right, key)
root = TreeNode(10)
root.left = TreeNode(3)
root.left.left = TreeNode(2)
root.left.right = TreeNode(8)
root.left.right.left = TreeNode(7)
root.left.right.right = TreeNode(9)
root.right = TreeNode(15)
root.right.left = TreeNode(13)
root.right.right = TreeNode(17)
root.right.right.right = TreeNode(19)
ob = Solution()
root = TreeNode(50)
root = ob.deleteNode(root, 50)
print(root)
|
"""
Collects all number values from the db.serverStatus() command, other
values are ignored.
* pymongo
"""
import diamond.collector
from diamond.collector import str_to_bool
import re
import zlib
try:
import pymongo
pymongo # workaround for pyflakes issue #13
except ImportError:
pymongo = None
try:
from pymongo import ReadPreference
ReadPreference # workaround for pyflakes issue #13
except ImportError:
ReadPreference = None
class MongoDBCollector(diamond.collector.Collector):
MAX_CRC32 = 4294967295
def __init__(self, *args, **kwargs):
self.__totals = {}
super(MongoDBCollector, self).__init__(*args, **kwargs)
def get_default_config_help(self):
config_help = super(MongoDBCollector, self).get_default_config_help()
config_help.update({
'hosts': 'Array of hostname(:port) elements to get metrics from'
'Set an alias by prefixing host:port with alias@',
'host': 'A single hostname(:port) to get metrics from'
' (can be used instead of hosts and overrides it)',
'user': 'Username for authenticated login (optional)',
'passwd': 'Password for authenticated login (optional)',
'databases': 'A regex of which databases to gather metrics for.'
' Defaults to all databases.',
'ignore_collections': 'A regex of which collections to ignore.'
' MapReduce temporary collections (tmp.mr.*)'
' are ignored by default.',
'collection_sample_rate': 'Only send stats for a consistent subset '
'of collections. This is applied after collections are ignored via'
' ignore_collections Sampling uses crc32 so it is consistent across'
' replicas. Value between 0 and 1. Default is 1',
'network_timeout': 'Timeout for mongodb connection (in seconds).'
' There is no timeout by default.',
'simple': 'Only collect the same metrics as mongostat.',
'translate_collections': 'Translate dot (.) to underscores (_)'
' in collection names.',
'ssl': 'True to enable SSL connections to the MongoDB server.'
' Default is False'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MongoDBCollector, self).get_default_config()
config.update({
'path': 'mongo',
'hosts': ['localhost'],
'user': None,
'passwd': None,
'databases': '.*',
'ignore_collections': '^tmp\.mr\.',
'network_timeout': None,
'simple': 'False',
'translate_collections': 'False',
'collection_sample_rate': 1,
'ssl': False
})
return config
def collect(self):
"""Collect number values from db.serverStatus()"""
if pymongo is None:
self.log.error('Unable to import pymongo')
return
# we need this for backwards compatibility
if 'host' in self.config:
self.config['hosts'] = [self.config['host']]
# convert network_timeout to integer
if self.config['network_timeout']:
self.config['network_timeout'] = int(
self.config['network_timeout'])
# convert collection_sample_rate to float
if self.config['collection_sample_rate']:
self.config['collection_sample_rate'] = float(
self.config['collection_sample_rate'])
# use auth if given
if 'user' in self.config:
user = self.config['user']
else:
user = None
if 'passwd' in self.config:
passwd = self.config['passwd']
else:
passwd = None
for host in self.config['hosts']:
if len(self.config['hosts']) == 1:
# one host only, no need to have a prefix
base_prefix = []
else:
matches = re.search('((.+)\@)?(.+)?', host)
alias = matches.group(2)
host = matches.group(3)
if alias is None:
base_prefix = [re.sub('[:\.]', '_', host)]
else:
base_prefix = [alias]
try:
# Ensure that the SSL option is a boolean.
if type(self.config['ssl']) is str:
self.config['ssl'] = str_to_bool(self.config['ssl'])
if ReadPreference is None:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
ssl=self.config['ssl'],
slave_okay=True
)
else:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
ssl=self.config['ssl'],
read_preference=ReadPreference.SECONDARY,
)
except Exception, e:
self.log.error('Couldnt connect to mongodb: %s', e)
continue
# try auth
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception, e:
self.log.error('User auth given, but could not autheticate'
+ ' with host: %s, err: %s' % (host, e))
return{}
data = conn.db.command('serverStatus')
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config['simple']):
data = self._extract_simple_data(data)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config['databases'])
ignored_collections = re.compile(self.config['ignore_collections'])
sample_threshold = self.MAX_CRC32 * self.config[
'collection_sample_rate']
for db_name in conn.database_names():
if not db_name_filter.search(db_name):
continue
db_stats = conn[db_name].command('dbStats')
db_prefix = base_prefix + ['databases', db_name]
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
if (self.config['collection_sample_rate'] < 1 and (
zlib.crc32(collection_name) & 0xffffffff
) > sample_threshold):
continue
collection_stats = conn[db_name].command('collstats',
collection_name)
if str_to_bool(self.config['translate_collections']):
collection_name = collection_name.replace('.', '_')
collection_prefix = db_prefix + [collection_name]
self._publish_dict_with_prefix(collection_stats,
collection_prefix)
def _publish_transformed(self, data, base_prefix):
""" Publish values of type: counter or percent """
self._publish_dict_with_prefix(data.get('opcounters', {}),
base_prefix + ['opcounters_per_sec'],
self.publish_counter)
self._publish_dict_with_prefix(data.get('opcountersRepl', {}),
base_prefix + ['opcountersRepl_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['backgroundFlushing_per_sec'],
'flushes',
data.get('backgroundFlushing', {}),
self.publish_counter)
self._publish_dict_with_prefix(data.get('network', {}),
base_prefix + ['network_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['extra_info_per_sec'],
'page_faults',
data.get('extra_info', {}),
self.publish_counter)
def get_dotted_value(data, key_name):
key_name = key_name.split('.')
for i in key_name:
data = data.get(i, {})
if not data:
return 0
return data
def compute_interval(data, total_name):
current_total = get_dotted_value(data, total_name)
total_key = '.'.join(base_prefix + [total_name])
last_total = self.__totals.get(total_key, current_total)
interval = current_total - last_total
self.__totals[total_key] = current_total
return interval
def publish_percent(value_name, total_name, data):
value = float(get_dotted_value(data, value_name) * 100)
interval = compute_interval(data, total_name)
key = '.'.join(base_prefix + ['percent', value_name])
self.publish_counter(key, value, time_delta=bool(interval),
interval=interval)
publish_percent('globalLock.lockTime', 'globalLock.totalTime', data)
publish_percent('indexCounters.btree.misses',
'indexCounters.btree.accesses', data)
locks = data.get('locks')
if locks:
if '.' in locks:
locks['_global_'] = locks['.']
del (locks['.'])
key_prefix = '.'.join(base_prefix + ['percent'])
db_name_filter = re.compile(self.config['databases'])
interval = compute_interval(data, 'uptimeMillis')
for db_name in locks:
if not db_name_filter.search(db_name):
continue
r = get_dotted_value(
locks,
'%s.timeLockedMicros.r' % db_name)
R = get_dotted_value(
locks,
'.%s.timeLockedMicros.R' % db_name)
value = float(r + R) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.read' % db_name,
value, time_delta=bool(interval),
interval=interval)
w = get_dotted_value(
locks,
'%s.timeLockedMicros.w' % db_name)
W = get_dotted_value(
locks,
'%s.timeLockedMicros.W' % db_name)
value = float(w + W) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.write' % db_name,
value, time_delta=bool(interval), interval=interval)
def _publish_dict_with_prefix(self, dict, prefix, publishfn=None):
for key in dict:
self._publish_metrics(prefix, key, dict, publishfn)
def _publish_metrics(self, prev_keys, key, data, publishfn=None):
"""Recursively publish keys"""
if not key in data:
return
value = data[key]
keys = prev_keys + [key]
if not publishfn:
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif isinstance(value, int) or isinstance(value, float):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value))
def _extract_simple_data(self, data):
return {
'connections': data.get('connections'),
'globalLock': data.get('globalLock'),
'indexCounters': data.get('indexCounters')
}
|
import utilities as util
from utilities import parse_image_file, filterBoxes, voxel_2_world, mkdir
import numpy as np
import os
import json
import sys
from PIL import Image, ImageDraw
import SimpleITK as sitk
from env import *
def generate_scan_image(subset):
list_dirs = os.walk(TRUNK_DIR + subset)
jsobjs = []
output_dir = SAMPLE_DIR + subset
mkdir(output_dir)
for root, dirs, files in list_dirs:
for f in files:
if f.lower().endswith('mhd'):
key = os.path.splitext(f)[0]
numpyImage, numpyOrigin, numpySpacing = (
util.load_itk_image(
os.path.join(root, f)))
for z in range(numpyImage.shape[0]):
patch = numpyImage[z, 0:512, 0:512]
patch = util.normalizePlanes(patch)
im = Image.fromarray(patch * 255).convert('L')
output_filename = (
subset + "-" + key + "-" + str(z) + "-scan.bmp")
print(subset + '/' + output_filename)
im.save(os.path.join(
output_dir, output_filename))
jsobjs.append({
"image_path": subset + '/' + output_filename,
"rects":[]
}
)
with open(META_DIR + subset + '-scan.json', 'w') as f:
json.dump(jsobjs, f)
def get_image_map(data_root, input_file, threshold):
result_map = {}
with open(input_file) as f:
result_list = json.load(f)
for it in result_list:
key, subset, z = parse_image_file(it['file'])
src_file = os.path.join(
data_root, subset, key + ".mhd")
boxes = filterBoxes(it['box'], threshold)
if not result_map.get(src_file):
result_map[src_file] = []
result_map[src_file].append((key, z, boxes))
return result_map
def generate_result(result_map, output_file):
with open(output_file) as fout:
fout.write("seriesuid,coordX,coordY,coordZ,probability\n")
for fkey, val in result_map.items():
itkimage = sitk.ReadImage(fkey)
for it in val:
key, z, boxes = val
for box in boxes:
world_box = voxel_2_world(
[z, box[1], box[0]], itkimage)
csv_line = key + "," + str(world_box[2]) + "," + str(world_box[1]) + "," + str(world_box[0]) + "," + str(box[4])
fout.write(csv_line + "\n")
if __name__ == '__main__':
if sys.argv[1] == 'gen':
generate_scan_image(sys.argv[2])
else:
result_map = get_image_map(TRUNK_DIR, sys.argv[2], 0.01)
generate_result(result_map, OUTPUT_FILE)
|
from django.conf.urls import url
from audiotracks import feeds
from audiotracks import views
urlpatterns = [
url(r"^$", views.index, name="audiotracks"),
url(r"^(?P<page_number>\d+)/?$", views.index, name="audiotracks"),
url(r"^track/(?P<track_slug>.*)$", views.track_detail,
name="track_detail"),
url(r"^upload", views.upload_track, name="upload_track"),
url(r"^edit/(?P<track_id>.+)", views.edit_track, name="edit_track"),
url(r"^confirm_delete/(?P<track_id>\d+)$",
views.confirm_delete_track, name="confirm_delete_track"),
url(r"^delete$", views.delete_track, name="delete_track"),
url(r"^tracks$", views.user_index, name="user_index"),
url(r"^tracks/(?P<page_number>\d)/?$", views.user_index,
name="user_index"),
url(r"^feed/?$", feeds.choose_feed, name="tracks_feed"),
url(r"^player.js$", views.player_script, name="player_script"),
url(r"^m3u/?$", views.m3u, name="m3u"),
]
|
import bottle
import datetime
import time
@bottle.get('/')
def index():
return bottle.static_file('index.html', root='.')
@bottle.get('/stream')
def stream():
bottle.response.content_type = 'text/event-stream'
bottle.response.cache_control = 'no-cache'
while True:
yield 'data: %s\n\n' % str(datetime.datetime.now())
time.sleep(5)
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=8080, debug=True)
|
'''
destination.factory
'''
from destination.zeus import ZeusDestination
from destination.aws import AwsDestination
from exceptions import AutocertError
from config import CFG
from app import app
class DestinationFactoryError(AutocertError):
def __init__(self, destination):
msg = f'destination factory error with {destination}'
super(DestinationFactoryError, self).__init__(msg)
def create_destination(destination, ar, cfg, timeout, verbosity):
d = None
if destination == 'aws':
d = AwsDestination(ar, cfg, verbosity)
elif destination == 'zeus':
d = ZeusDestination(ar, cfg, verbosity)
else:
raise DestinationFactoryError(destination)
dests = list(CFG.destinations.zeus.keys())
if d.has_connectivity(timeout, dests):
return d
|
from .entity_health import EntityHealth
class PartitionHealth(EntityHealth):
"""Information about the health of a Service Fabric partition.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param health_events: The list of health events reported on the entity.
:type health_events: list of :class:`HealthEvent
<azure.servicefabric.models.HealthEvent>`
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
:param health_statistics:
:type health_statistics: :class:`HealthStatistics
<azure.servicefabric.models.HealthStatistics>`
:param partition_id:
:type partition_id: str
:param replica_health_states: The list of replica health states associated
with the partition.
:type replica_health_states: list of :class:`ReplicaHealthState
<azure.servicefabric.models.ReplicaHealthState>`
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'},
}
def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_health_states=None):
super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics)
self.partition_id = partition_id
self.replica_health_states = replica_health_states
|
import os,sys
from trans_rot_coords import *
import numpy as np
from read_energy_force_new import *
from grids_structures_general import DS,Grid_Quarts
from orient_struct_2 import OrientDS as OrientDS_2
from orient_struct_3 import OrientDS as OrientDS_3
AU2KCAL = 23.0605*27.2116
R2D = 180.0/3.14159265358979
pi4 = 0.78539816339744817
tMass = [15.999, 1.008, 1.008]
def get_com(coords):
x = [0,0,0]
totalM = 0
for i in range(len(coords)):
x = [ x[k]+ coords[i][k]*tMass[i] for k in range(3)]
totalM += tMass[i]
x = [x[k]/totalM for k in range(3)]
return x
def norm_prob(config,ndx,prob='wtr'):
if prob=='wtr':
v1 = np.array(config[ndx[1]]) - np.array(config[ndx[0]])
v2 = np.array(config[ndx[2]]) - np.array(config[ndx[0]])
vec = get_normal_unit(v1,v2)
return vec
class new_atom():
def __init__(self, line, ftype='gjf'):
if ftype=='gjf': self.addgjf(line)
elif ftype=='gms': self.addinp(line)
elif ftype=='pdb': self.addpdb(line)
def addgjf(self, line):
line = line.split()
self.a_nam = line[0]
self.x = [float(line[1]), float(line[2]), float(line[3])]
def addpdb(self, line):
self.line = line
self.i_atm = int(line[6:11])
self.a_nam = line[11:16].strip()
self.a_res = line[16:20].strip()
self.a_chn = line[20:22].strip()
self.i_res = int(line[22:26])
self.x = []
self.x.append(float(line[30:38]))
self.x.append(float(line[38:46]))
self.x.append(float(line[46:54]))
def addinp(self, line):
line = line.split()
self.a_nam = line[0]
self.x = [float(line[2]), float(line[3]), float(line[4])]
class coordinates():
def __init__(self, n1, n2, FragType, name=''):
## n1,n2 is the number of atoms in mole1 and mole2:
self.n1 = n1
self.n2 = n2
## records of operations of translation and rotation:
self.OperateNdx = []
self.Operation = []
## fragment type:
self.FT = FragType
## symmetry faces:
self.symface = DS[self.FT].symface
self.IsOriented = False
self.facendx = {'yx':2, 'xy':2,
'yz':0, 'zy':0,
'zx':1, 'xz':1,
'zarg':5,
'zben':6}
self.symm = [1,1,1]
self.center = 0
self.natoms = 0
self.original_atoms = []
self.name = name
def addatom(self, line, ftype='pdb'):
temp = new_atom(line, ftype)
self.original_atoms.append(temp)
self.natoms += 1
def addpdbatom(self, line):
self.original_atoms.append(new_atom(line, 'pdb'))
self.natoms += 1
def set_atom(self, i, atom):
if i>=len(self.original_atoms):
self.original_atoms.append( deepcopy(atom) )
self.natoms += 1
else: self.original_atoms[i] = deepcopy(atom)
def MirrorAll(self):
"""
According to the coords of the 1st atom in mole2.
"""
self.orignal_com = deepcopy(self.center2)
for face in self.symface:
fndx = self.facendx[face]
if self.center2[fndx] < 0.0:
self.symm[ fndx ] = -1
for i in range(self.n1, self.natoms):
self.atoms[i].x[fndx] *= -1
self._spherical_x()
def MirrorBackProperty(self):
for face in self.symface:
fndx = self.facendx[face]
if self.orignal_com[fndx] < 0.0:
self.symm[ fndx ] = -1
self.force[fndx] *= -1
for i in range(3):
if not i == fndx:
self.torque[i] *= -1
def ReorientToOrigin(self, cut=0.0000001):
self.atoms = deepcopy(self.original_atoms)
import pdb
pdb.set_trace()
coord1 = get_com([self.atoms[0].x, self.atoms[1].x, self.atoms[2].x ])
coord2 = get_com([self.atoms[3].x, self.atoms[4].x, self.atoms[5].x ])
self.origin_center_coord = get_unit([coord2[i] - coord1[i] for i in range(3)])
dvec = DS[self.FT].calt_dvec( self.atoms[0].x, self.atoms[1].x, self.atoms[2].x )
for i in range(self.natoms):
self.atoms[i].x = translate(self.atoms[i].x, dvec)
self.OperateNdx.append(0)
self.Operation.append(np.array(dvec))
vec, ax0 = DS[self.FT].calt_vec1( self.atoms[0].x, self.atoms[1].x, self.atoms[2].x )
ang = angle(vec, ax0)
ax = get_normal(vec, ax0)
if ax[0]==0.0 and ax[1]==0.0 and ax[2]==0.0: pass
else:
for i in range(self.natoms):
self.atoms[i].x = rotate(self.atoms[i].x, ax, ang)
self.OperateNdx.append(1)
self.Operation.append([ax, ang])
vec, ax0 = DS[self.FT].calt_vec2( self.atoms[0].x, self.atoms[1].x, self.atoms[2].x )
ang = angle(vec, ax0)
if abs(ang)<cut: pass
else:
if abs(ang-np.pi)<cut: ax = [1,0,0]
else: ax = get_normal(vec, ax0)
for i in range(self.natoms):
self.atoms[i].x = rotate(self.atoms[i].x, ax, ang)
self.OperateNdx.append(2)
self.Operation.append([ax, ang])
self.IsOriented = True
self._spherical_x()
def ReorientToOldVec(self):
ax, ang = self.Operation[self.OperateNdx.index(2)]
self.force = rotate(self.force, ax, -1*ang)
self.torque = rotate(self.torque, ax, -1*ang)
ax, ang = self.Operation[self.OperateNdx.index(1)]
self.force = rotate(self.force, ax, -1*ang)
self.torque = rotate(self.torque, ax, -1*ang)
def _spherical_x(self):
"""
Calculate the coords in spherical coordination system for molecule 2.
"""
totalM = 0
x = [0,0,0]
for i in range(self.n1,self.natoms):
x = [ x[k]+self.atoms[i].x[k]*tMass[i-self.n1] for k in range(3)]
totalM += tMass[i-self.n1]
x = [x[k]/totalM for k in range(3)]
r = np.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2])
#print "probe vector:", 4.0*x[0]/r, 4.0*x[1]/r, 4.0*x[2]/r
## phi of principal coords:
ang1 = np.pi*0.5 - np.arccos(x[2]/r)
## theta of principal coords (from -pi to pi):
if abs(x[0])<0.000001:
if x[1]>0: ang2 = np.pi*0.5
else: ang2 = np.pi*1.5
else:
ang2 = np.arctan(x[1]/x[0])
if x[0]<0: ang2 += np.pi
elif x[1]<0: ang2 += np.pi*2
self.r = r
self.ang1 = ang1
self.ang2 = ang2
self.center2 = x
def _spherical_orient(self):
"""
calculate the spherical coordinates for the orientational vector
"""
x = self.orientVec
r = length(x)
# phi, [-pi/2, pi/2]
ang1 = np.pi*0.5 - np.arccos(x[2]/r)
# theta, [0, 2*pi]
if abs(x[0])<0.000001:
if x[1]>0: ang2 = np.pi*0.5
else: ang2 = np.pi*1.5
else:
ang2 = np.arctan(x[1]/x[0])
if x[0]<0: ang2 += np.pi
elif x[1] <0: ang2 += np.pi*2
self.orient_ang1 = ang1
self.orient_ang2 = ang2
def indexing_orient_auto3(self,ri):
"""
find the index automatically for each subsection in which the orientational vector resides
"""
ang1 = self.orient_ang1
ang2 = self.orient_ang2
#print "<<<<<",ang1*R2D,ang2*R2D
OrientDS = self.OrientDS[ri]
#print "attention!!!"
#print OrientDS['wtr'].nGrid
if ang1<OrientDS['wtr'].PHI_angles[0] or ang1>OrientDS['wtr'].PHI_angles[-1]: ih = -1
for i in range(1,OrientDS['wtr'].nPhi):
if ang1 <= OrientDS['wtr'].PHI_angles[i]:
ih = i-1
break
ang1_ndx1 = ih
ang1_ndx2 = ih + 1
if ang1_ndx1 == OrientDS['wtr'].nPhi-2: # near the up vertex
ang1_ndx3 = ih -1
elif ang1_ndx1 == 0: # near the down vertex
ang1_ndx3 = ih + 2
else:
tmp1 = OrientDS['wtr'].PHI_angles[ih+2] - ang1
tmp2 = ang1 - OrientDS['wtr'].PHI_angles[ih-1]
if abs(tmp1) < abs(tmp2):
ang1_ndx3 = ih + 2
else:
ang1_ndx3 = ih - 1
phiList = [ang1_ndx1,ang1_ndx2,ang1_ndx3]
dgrids_sub_ndx = {}
dtheta_ndx = {}
# determine if use linear interpolation or use quadratic interpolation
if len(set(phiList)) == 2:
iflinear = 1
elif len(set(phiList)) == 3:
iflinear = 0
for kk in set(phiList):
dgrids_sub_ndx[kk] = []
dtheta_ndx[kk] = []
ip = -1
for i in range(1, OrientDS['wtr'].NTheta[kk]):
if ang2 <= OrientDS['wtr'].THETA_angles[kk][i]:
ip = i-1
break
if ip == -1: ip = OrientDS['wtr'].NTheta[kk]-1
#print kk, ip
ig = 0
for i in range(kk): ig += OrientDS['wtr'].NTheta[i]
ig += ip
dgrids_sub_ndx[kk].append(ig)
dtheta_ndx[kk].append(ip)
if ip == OrientDS['wtr'].NTheta[kk]-1:
if OrientDS['wtr'].NTheta[kk] == 1: #vertex
dgrids_sub_ndx[kk].append(ig)
dtheta_ndx[kk].append(0)
if iflinear == 0:
dgrids_sub_ndx[kk].append(ig)
dtheta_ndx[kk].append(0)
else:
dgrids_sub_ndx[kk].append(ig-OrientDS['wtr'].NTheta[kk]+1)
dtheta_ndx[kk].append(0+OrientDS['wtr'].NTheta[kk])
if iflinear == 0:
tmp1 = OrientDS['wtr'].THETA_angles[kk][1] - ang2 + 2*np.pi
tmp2 = ang2 - OrientDS['wtr'].THETA_angles[kk][ip-1]
if tmp1 < tmp2:
dgrids_sub_ndx[kk].append(ig-OrientDS['wtr'].NTheta[kk]+1+1)
dtheta_ndx[kk].append(0+OrientDS['wtr'].NTheta[kk]+1)
else:
dgrids_sub_ndx[kk].append(ig-1)
dtheta_ndx[kk].append(ip-1)
else:
dgrids_sub_ndx[kk].append(ig+1)
dtheta_ndx[kk].append(ip+1)
if iflinear == 0:
if ip+2 == OrientDS['wtr'].NTheta[kk]:
tmp1 = 2*np.pi - ang2
else:
tmp1 = OrientDS['wtr'].THETA_angles[kk][ip+2] - ang2
if ip == 0:
tmp2 = ang2 - OrientDS['wtr'].THETA_angles[kk][OrientDS['wtr'].NTheta[kk]-1] + 2*np.pi
else:
tmp2 = ang2 - OrientDS['wtr'].THETA_angles[kk][ip-1]
if tmp1 < tmp2:
if ip+2 == OrientDS['wtr'].NTheta[kk]:
dgrids_sub_ndx[kk].append(ig+1-OrientDS['wtr'].NTheta[kk]+1)
dtheta_ndx[kk].append(0+OrientDS['wtr'].NTheta[kk])
else:
dgrids_sub_ndx[kk].append(ig+2)
dtheta_ndx[kk].append(ip+2)
else:
if ip == 0:
dgrids_sub_ndx[kk].append(ig+OrientDS['wtr'].NTheta[kk]-1)
dtheta_ndx[kk].append(-1)
else:
dgrids_sub_ndx[kk].append(ig-1)
dtheta_ndx[kk].append(ip-1)
self.dgrids_sub_ndx[ri] = dgrids_sub_ndx
self.dtheta_ndx[ri] = dtheta_ndx
def indexing_auto3(self):
if not self.IsOriented: raise Exception, "Error: indexing beforce reorientation."
r = self.r
ang1 = self.ang1
ang2 = self.ang2
#print "probe angles", ang1*R2D, ang2*R2D
## ndx of r:
ir = 10001
if r<DS[self.FT].R_NDX[0]: ir = -1
else:
for i in range(1,DS[self.FT].nDist):
if r<=DS[self.FT].R_NDX[i]:
ir = i-1
break
#print 'ir',ir
if ir>10000:
self.r_ndxs = [ir]
self.vbis = [0,0,0]
self.vnrm = [0,0,0]
self.dgrid_ndx_layer = {}
self.dtheta_ndx_layer = {}
return 10000,0,0
elif ir<0:
self.r_ndxs = [ir]
self.vbis = [0,0,0]
self.vnrm = [0,0,0]
self.dgrid_ndx_layer = {}
self.dtheta_ndx_layer = {}
return -1, 0,0
#print "r=%.1f"%r, ir
r_ndxs = [ir,ir+1]
# find 3 layers which are close to the query one
if ir == 0:
r_ndxs.append(ir+2)
elif ir == DS[self.FT].nDist -2:
r_ndxs.append(ir-1)
else:
tmp1 = r - DS[self.FT].R_NDX[ir-1]
tmp2 = DS[self.FT].R_NDX[ir+2] - r
if abs(tmp1) < abs(tmp2):
r_ndxs.append(ir-1)
else:
r_ndxs.append(ir+2)
## ndx of ang1 (Phi):
if ang1<DS[self.FT].PHI_angles[0]: ih = -1
for i in range(1, DS[self.FT].nPhi):
if ang1<=DS[self.FT].PHI_angles[i]:
ih = i-1
break
ang1_ndx1 = ih
ang1_ndx2 = ih + 1
if ang1_ndx1 == DS[self.FT].nPhi -2:
ang1_ndx3 = ih - 1
elif ang1_ndx1 == 0:
ang1_ndx3 = ih + 2
else:
tmp1 = DS[self.FT].PHI_angles[ih+2] - ang1
tmp2 = ang1 - DS[self.FT].PHI_angles[ih-1]
if tmp1 < tmp2:
ang1_ndx3 = ih+2
else:
ang1_ndx3 = ih-1
phiList = [ang1_ndx1,ang1_ndx2,ang1_ndx3]
dgrid_ndx_layer = {}
dtheta_ndx_layer = {}
# determine if use linear interpolation or use quadratic interpolation
if len(set(phiList)) == 2:
iflinear = 1
elif len(set(phiList)) == 3:
iflinear = 0
for kk in set(phiList):
dgrid_ndx_layer[kk] = []
dtheta_ndx_layer[kk] = []
## ndx_of_ang2 (Theta):
ip = -1
for i in range(1,DS[self.FT].NTheta[kk]):
if ang2<=DS[self.FT].THETA_angles[kk][i]:
ip = i-1
break
if ip==-1: ip = DS[self.FT].NTheta[kk]-1
ig = 0
for i in range(kk): ig += DS[self.FT].NTheta[i]
ig += ip
dgrid_ndx_layer[kk].append(ig)
dtheta_ndx_layer[kk].append(ip)
#print "check", kk, ip, ig
if ip == DS[self.FT].NTheta[kk]-1:
if DS[self.FT].NTheta[kk] == 1: #vertex
dgrid_ndx_layer[kk].append(ig)
dtheta_ndx_layer[kk].append(0)
if iflinear == 0:
dgrid_ndx_layer[kk].append(ig)
dtheta_ndx_layer[kk].append(0)
elif self.FT in ['cys','alc','bck','hid','trp','tyr','gln']:
dgrid_ndx_layer[kk].append(ig-DS[self.FT].NTheta[kk]+1)
dtheta_ndx_layer[kk].append(0+DS[self.FT].NTheta[kk])
if iflinear == 0:
tmp1 = DS[self.FT].THETA_angles[kk][1] - ang2 + 2*np.pi
tmp2 = ang2 - DS[self.FT].THETA_angles[kk][ip-1]
if tmp1 < tmp2:
dgrid_ndx_layer[kk].append(ig-DS[self.FT].NTheta[kk]+1+1)
dtheta_ndx_layer[kk].append(0+DS[self.FT].NTheta[kk]+1)
else:
dgrid_ndx_layer[kk].append(ig-1)
dtheta_ndx_layer[kk].append(ip-1)
else:
dgrid_ndx_layer[kk].append(ig-1)
dtheta_ndx_layer[kk].append(ip-1)
if iflinear == 0:
dgrid_ndx_layer[kk].append(ig-2)
dtheta_ndx_layer[kk].append(ip-2)
else:
dgrid_ndx_layer[kk].append(ig+1)
dtheta_ndx_layer[kk].append(ip+1)
if iflinear == 0:
if self.FT in ['cys','alc','bck','hid','trp','tyr','gln']:
if ip+2 == DS[self.FT].NTheta[kk]:
tmp1 = 2*np.pi -ang2
else:
tmp1 = DS[self.FT].THETA_angles[kk][ip+2] - ang2
if ip == 0:
tmp2 = ang2 - DS[self.FT].THETA_angles[kk][DS[self.FT].NTheta[kk]-1] + 2*np.pi
else:
tmp2 = ang2 - DS[self.FT].THETA_angles[kk][ip-1]
if tmp1 < tmp2:
if ip+2 == DS[self.FT].NTheta[kk]:
dgrid_ndx_layer[kk].append(ig+1-DS[self.FT].NTheta[kk]+1)
dtheta_ndx_layer[kk].append(0+DS[self.FT].NTheta[kk])
else:
dgrid_ndx_layer[kk].append(ig+2)
dtheta_ndx_layer[kk].append(ip+2)
else:
if ip == 0:
dgrid_ndx_layer[kk].append(ig+DS[self.FT].NTheta[kk]-1)
dtheta_ndx_layer[kk].append(-1)
else:
dgrid_ndx_layer[kk].append(ig-1)
dtheta_ndx_layer[kk].append(ip-1)
else:
if ip == DS[self.FT].NTheta[kk]-2:
dgrid_ndx_layer[kk].append(ig-1)
dtheta_ndx_layer[kk].append(ip-1)
elif ip == 0:
dgrid_ndx_layer[kk].append(ig+2)
dtheta_ndx_layer[kk].append(ip+2)
else:
tmp1 = DS[self.FT].THETA_angles[kk][ip+2] - ang2
tmp2 = ang2 - DS[self.FT].THETA_angles[kk][ip-1]
if tmp1 < tmp2:
dgrid_ndx_layer[kk].append(ig+2)
dtheta_ndx_layer[kk].append(ip+2)
else:
dgrid_ndx_layer[kk].append(ig-1)
dtheta_ndx_layer[kk].append(ip-1)
self.dgrid_ndx_layer = dgrid_ndx_layer
self.dtheta_ndx_layer = dtheta_ndx_layer
## calculate the vectors of bisector and normal of mole2:
a20 = self.atoms[self.n1].x
a21 = self.atoms[self.n1+1].x
a22 = self.atoms[self.n1+2].x
a20 = np.array(a20)
a21 = np.array(a21)
a22 = np.array(a22)
v0 = a21 - a20
v1 = a22 - a20
## These two vectors must be unit vector:
bisect = get_bisect_unit(v0,v1)
normal = get_normal_unit(v0,v1)
self.r_ndxs = r_ndxs
self.vbis = bisect
self.vnrm = normal
def calt_conf_energy(self, allconfigs, IsForce=False, ehigh=100.0):
ri_ndxs = self.r_ndxs
self.exit_before = False
for ri in ri_ndxs:
if ri>100:
self.properties = {'E':0.0}
return
elif ri<0:
fuck = [self.origin_center_coord[i] * ehigh for i in range(3)]
self.properties = {'E':ehigh, "Fx": fuck[0], "Fy": fuck[1], "Fz": fuck[2],
"Tx": 0, "Ty": 0, "Tz": 0}
self.exit_before = True
return
bisv = self.vbis
nrmv = self.vnrm
dtheta_ndx_layer = self.dtheta_ndx_layer
grid_ndx_layer = []
for ih in self.dgrid_ndx_layer:
grid_ndx_layer += self.dgrid_ndx_layer[ih]
self.orientVec = bisv
#print "orient vector:%.5f\t%.5f\t%.5f\n"%(bisv[0]*4.0,bisv[1]*4.0,bisv[2]*4.0)
self._spherical_orient()
ang1 = self.orient_ang1
ang2 = self.orient_ang2
ang2 = (ang2*R2D+180)%360 #the original orientational vector of water is located at -x axis
ang2 = ang2/R2D
self.orient_ang2 = ang2
self.OrientDS = {}
self.orient_tr = {}
self.orient_pr = {}
self.dgrids_sub_ndx = {}
self.dtheta_ndx = {}
grids_sub_ndx = {}
dtheta_ndx = {}
wghx1 = {}
wghx2 = {}
wghy = {}
label = {}
for i in ri_ndxs:
dist = DS[self.FT].R_NDX[i] # choose corresponding orientational sampling based on distance
#print "which layer:", dist
if dist > 5.5000001:
cart_ndx, grids_sub_ndx_tmp, wghx_tmp, wghy_tmp = weights_in_subsection( bisv )
grids_sub_ndx[i] = grids_sub_ndx_tmp
wghx1[i] = wghx_tmp/pi4
wghx2[i] = wghx_tmp/pi4
wghy[i] = wghy_tmp/pi4
label[i] = 0
else:
if dist < 2.5000001:
OrientDS = OrientDS_2
elif dist > 2.5000001 and dist < 3.5000001:
OrientDS = OrientDS_3
else:
OrientDS = OrientDS_2
self.OrientDS[i] = OrientDS
self.indexing_orient_auto3(i)
dtheta_ndx[i] = self.dtheta_ndx[i]
if len(dtheta_ndx[i]) == 2: # not in this script
pass
#orient_pr =[]
#for kk in dtheta_ndx[i]:
# ip1=dtheta_ndx[i][kk][0]
# ip2=dtheta_ndx[i][kk][1]
# if ip1 == 0 and ip2 == 0: # vertex
# wtmp = 0
# elif ip1 == OrientDS['wtr'].NTheta[kk]-1:
# wtmp = (ang2-OrientDS['wtr'].THETA_angles[kk][ip1])/(2*np.pi+OrientDS['wtr'].THETA_angles[kk][0]-OrientDS['wtr'].THETA_angles[kk][ip1])
# else:
# wtmp = (ang2-OrientDS['wtr'].THETA_angles[kk][ip1])/(OrientDS['wtr'].THETA_angles[kk][ip2]-OrientDS['wtr'].THETA_angles[kk][ip1])
# orient_pr.append(wtmp)
#wghx1[i] = orient_pr[0]
#wghx2[i] = orient_pr[1]
#ihs = dtheta_ndx[i].keys()
#wghy[i] = (ang1 - OrientDS['wtr'].PHI_angles[ihs[0]])/(OrientDS['wtr'].PHI_angles[ihs[1]]-OrientDS['wtr'].PHI_angles[ihs[0]])
#label[i] = 1
##print "++++++",wghx1[i],wghx2[i],wghy[i]
#grids_sub_ndx[i] = self.dgrids_sub_ndx[i][ihs[0]] + self.dgrids_sub_ndx[i][ihs[1]]
if len(dtheta_ndx[i]) == 3:
ihs = dtheta_ndx[i].keys()
grids_sub_ndx[i] = self.dgrids_sub_ndx[i][ihs[0]] + self.dgrids_sub_ndx[i][ihs[1]] + self.dgrids_sub_ndx[i][ihs[2]]
label[i] = 2
#print "grids_sub_ndx:",grids_sub_ndx[i]
properties = {'E':[], 'Fx':[], 'Fy':[], 'Fz':[], 'Tx':[], 'Ty':[], 'Tz':[]}
propnames = ['E','Fx','Fy','Fz','Tx','Ty','Tz']
tempprop = deepcopy(properties)
for i in ri_ndxs:
for j in grid_ndx_layer:
prop = deepcopy(tempprop)
for ni in grids_sub_ndx[i]:
inpfiles = []
for k in range(DS[self.FT].nNorm[i]):
inpfile = 'r%3.2f/tempconf_d%3.2f_g%03d_c%02d.inp'%(DS[self.FT].R_NDX[i],DS[self.FT].R_NDX[i],j,ni+k*DS[self.FT].nConf[i])
inpfiles.append(inpfile)
xvecs = []
for ff in range(len(inpfiles)):
xconf = allconfigs.allcfg[i][j][ni][ff].xmole2
xvecs.append( norm_prob(xconf,[0,1,2],'wtr') )
nvec = len(xvecs)
if nvec == 2: # linear interpolation for normal vectors
w0, w1, ndx0, ndx1 = weights_for_normal_general( nrmv, xvecs)
#print 'test',i, j, ni, ndx0, ndx1
for pp in propnames:
p0 = allconfigs.get_prop(i,j,ni,ndx0,pp,w0, ehigh=ehigh)
p1 = allconfigs.get_prop(i,j,ni,ndx1,pp,w1, ehigh=ehigh)
p = p1*abs(w1) + p0*abs(w0)
prop[pp].append(p)
#print pp, inpfiles[ndx0],p0,w0,inpfiles[ndx1],p1,w1,p
elif nvec > 2: # quadratic interpolation for normal vectors
angNorm, ndx1, ndx2, ndx3 = get_neighors_for_normal(nrmv, xvecs)
angNorm_1 = ndx1*np.pi/nvec
angNorm_2 = ndx2*np.pi/nvec
angNorm_3 = ndx3*np.pi/nvec
#print "lagrange", i, j, ni, ndx1, ndx2, ndx3, angNorm*R2D, angNorm_1*R2D, angNorm_2*R2D, angNorm_3*R2D
for pp in propnames:
if ndx1 == nvec: ndx1 = 0
if ndx2 == nvec: ndx2 = 0
if ndx3 == nvec: ndx3 = 0
p1 = allconfigs.get_prop(i,j,ni,ndx1,pp,0, ehigh=ehigh)
p2 = allconfigs.get_prop(i,j,ni,ndx2,pp,0, ehigh=ehigh)
p3 = allconfigs.get_prop(i,j,ni,ndx3,pp,0, ehigh=ehigh)
points = [(angNorm_1,p1),(angNorm_2,p2),(angNorm_3,p3)]
p = lagrange_interp(points,angNorm)
prop[pp].append(p)
#print pp, inpfiles[ndx1],p1,inpfiles[ndx2],p2,inpfiles[ndx3],p3,p
for pp in propnames:
# on the level of orientation, theta and phi
if len(prop[pp]) == 4:
psub = bilinear_gen(prop[pp][0], prop[pp][1], prop[pp][2], prop[pp][3], wghx1[i], wghx2[i], wghy[i],label[i])
properties[pp].append(psub)
#print pp, prop[pp][0], prop[pp][1], prop[pp][2], prop[pp][3], grids_sub_ndx[i], wghx1[i], wghx2[i], wghy[i],psub
elif len(prop[pp]) == 9:
cn = 0
points_phi = []
for kk in dtheta_ndx[i]:
#print "here",kk, self.OrientDS[i]['wtr'].nPhi
angPhi = self.OrientDS[i]['wtr'].PHI_angles[kk]
#print "for orientation with phi=",angPhi*R2D
if len(set(dtheta_ndx[i][kk])) == 1: # vertex
p = prop[pp][cn]
points_phi.append((angPhi,p))
cn += 3
continue
points_theta = []
for ip in dtheta_ndx[i][kk]:
if ip >= self.OrientDS[i]['wtr'].NTheta[kk]:
angTheta = 2*np.pi + self.OrientDS[i]['wtr'].THETA_angles[kk][ip-self.OrientDS[i]['wtr'].NTheta[kk]]
elif ip < 0:
angTheta = self.OrientDS[i]['wtr'].THETA_angles[kk][ip] - 2*np.pi
else:
angTheta = self.OrientDS[i]['wtr'].THETA_angles[kk][ip]
points_theta.append((angTheta,prop[pp][cn]))
#print pp, angTheta*R2D, prop[pp][cn]
cn += 1
p = lagrange_interp(points_theta,ang2)
#print 'quadratic interpolation gives',p, 'for property', pp
points_phi.append((angPhi,p))
psub = lagrange_interp(points_phi,ang1)
#print 'interpolated orientational property of %s:'%pp,psub
properties[pp].append(psub)
## on the level of r, theta, phi
self.properties = {}
if len(dtheta_ndx_layer) == 2: # for grids near vertex of each layers, linear interpolation for grids and quadratic interpolation for layers; NOT IN THIS SCRIPT
pass
#Wghx = []
#For kk in dtheta_ndx_layer:
# ip1 = dtheta_ndx_layer[kk][0]
# ip2 = dtheta_ndx_layer[kk][1]
# if ip1 == 0 and ip2 == 0:
# wtmp = 0
# else:
# wtmp = (self.ang2-DS[self.FT].THETA_angles[kk][ip1])/(DS[self.FT].THETA_angles[kk][ip2]-DS[self.FT].THETA_angles[kk][ip1])
# wghx.append(wtmp)
#Ihs = dtheta_ndx_layer.keys()
#Wghy = (self.ang1-DS[self.FT].PHI_angles[ihs[0]])/(DS[self.FT].PHI_angles[ihs[1]]-DS[self.FT].PHI_angles[ihs[0]])
#For pp in propnames:
# psub_r = []
# for m in range(0,len(properties[pp]),4): # for each layer
# #print pp, properties[pp][m], properties[pp][m+1],properties[pp][m+2], properties[pp][m+3], wghx[0], wghx[1], wghy
# psub = bilinear_gen(properties[pp][m], properties[pp][m+1],properties[pp][m+2], properties[pp][m+3], wghx[0], wghx[1], wghy,1)
# psub_r.append(psub)
# if not len(psub_r) == 3:
# #print 'quadratic interpolation needs 3 layers'
# sys.exit()
# points = []
# for t in range(len(ri_ndxs)):
# dist = DS[self.FT].R_NDX[ri_ndxs[t]]
# points.append((dist,psub_r[t]))
# p = lagrange_interp(points,self.r)
# self.properties[pp] = p
elif len(dtheta_ndx_layer) == 3: # quadratic interpolation for layers and grids
for pp in propnames:
psub_r = []
for m in range(0,len(properties[pp]),9): # for each layer
count = 0
points_th = []
for kk in dtheta_ndx_layer:
if len(set(dtheta_ndx_layer[kk])) == 1: # vertex
p = properties[pp][m+count]
points_th.append((DS[self.FT].PHI_angles[kk],p))
count += 3
continue
ip1 = dtheta_ndx_layer[kk][0]
ip2 = dtheta_ndx_layer[kk][1]
ip3 = dtheta_ndx_layer[kk][2]
th1 = DS[self.FT].THETA_angles[kk][ip1]
th2 = DS[self.FT].THETA_angles[kk][ip2]
th3 = DS[self.FT].THETA_angles[kk][ip3]
points = [(th1,properties[pp][m+count]),(th2,properties[pp][m+count+1]),(th3,properties[pp][m+count+2])]
p = lagrange_interp(points,self.ang2)
points_th.append((DS[self.FT].PHI_angles[kk],p))
count += 3
p = lagrange_interp(points_th,self.ang1)
psub_r.append(p)
if not len(psub_r) == 3:
#print 'quadratic interpolation needs 3 layers'
sys.exit()
points = []
for t in range(len(ri_ndxs)):
dist = DS[self.FT].R_NDX[ri_ndxs[t]]
points.append((dist,psub_r[t]))
p = lagrange_interp(points,self.r)
self.properties[pp] = p
def reverse_force_toque(self):
Fx = self.properties['Fx']
Fy = self.properties['Fy']
Fz = self.properties['Fz']
self.force = [Fx, Fy, Fz]
Tx = self.properties['Tx']
Ty = self.properties['Ty']
Tz = self.properties['Tz']
self.torque = [Tx, Ty, Tz]
if self.exit_before:
return
self.MirrorBackProperty()
self.ReorientToOldVec()
def get_interp_energy(self):
return self.properties['E']
def get_interp_force(self):
return self.force
def get_interp_torque(self):
return self.torque
|
from scrapy.conf import settings
import pymongo
from datetime import datetime
from .models import PQDataModel
class ParliamentSearchPipeline(object):
def __init__(self):
self.connection = None
def process_item(self, items, spider):
if spider.name == "ls_questions":
questions = items['questions']
# self.insert_in_db(questions)
else:
raise ValueError("Invalid collection:", spider.name)
return items
def insert_in_db(self, questions):
with PQDataModel.batch_write() as batch:
records = []
for q in questions:
record = PQDataModel()
record.question_number = q['question_number']
record.question_origin = q['question_origin']
record.question_type = q['question_type']
record.question_session = q['question_session']
record.question_ministry = q['question_ministry']
record.question_member = q['question_member']
record.question_subject = q['question_subject']
record.question_type = q['question_type']
record.question_annex = q['question_annex']
record.question_url = q['question_url']
record.question_text = q['question_text']
record.question_url = q['question_url']
record.question_date = datetime.strptime(q['question_date'], '%d.%m.%Y')
records.append(record)
for record in records:
batch.save(record)
|
import arrow
import settings
from . import misc
from . import voting
from . import comments
from . import exceptions as exc
def merge_pr(api, urn, pr, votes, total, threshold):
""" merge a pull request, if possible, and use a nice detailed merge commit
message """
pr_num = pr["number"]
pr_title = pr['title']
pr_description = pr['body']
path = "/repos/{urn}/pulls/{pr}/merge".format(urn=urn, pr=pr_num)
record = voting.friendly_voting_record(votes)
if record:
record = "Vote record:\n" + record
votes_summary = formatted_votes_summary(votes, total, threshold)
pr_url = "https://github.com/{urn}/pull/{pr}".format(urn=urn, pr=pr_num)
title = "merging PR #{num}: {pr_title}".format(
num=pr_num, pr_title=pr_title)
desc = """
{pr_url}: {pr_title}
Description:
{pr_description}
:ok_woman: PR passed {summary}.
{record}
""".strip().format(
pr_url=pr_url,
pr_title=pr_title,
pr_description=pr_description,
summary=votes_summary,
record=record,
)
data = {
"commit_title": title,
"commit_message": desc,
"merge_method": "merge",
# if some clever person attempts to submit more commits while we're
# aggregating votes, this sha check will fail and no merge will occur
"sha": pr["head"]["sha"],
}
try:
resp = api("PUT", path, json=data)
return resp["sha"]
except HTTPError as e:
resp = e.response
# could not be merged
if resp.status_code == 405:
raise exc.CouldntMerge
# someone trying to be sneaky and change their PR commits during voting
elif resp.status_code == 409:
raise exc.CouldntMerge
else:
raise
def formatted_votes_summary(votes, total, threshold):
vfor = sum(v for v in votes.values() if v > 0)
vagainst = abs(sum(v for v in votes.values() if v < 0))
return "with a vote of {vfor} for and {vagainst} against, with a weighted total of {total:.1f} and a threshold of {threshold:.1f}" \
.strip().format(vfor=vfor, vagainst=vagainst, total=total, threshold=threshold)
def formatted_votes_short_summary(votes, total, threshold):
vfor = sum(v for v in votes.values() if v > 0)
vagainst = abs(sum(v for v in votes.values() if v < 0))
return "vote: {vfor}-{vagainst}, weighted total: {total:.1f}, threshold: {threshold:.1f}" \
.strip().format(vfor=vfor, vagainst=vagainst, total=total, threshold=threshold)
def label_pr(api, urn, pr_num, labels):
""" set a pr's labels (removes old labels) """
if not isinstance(labels, (tuple, list)):
labels = [labels]
path = "/repos/{urn}/issues/{pr}/labels".format(urn=urn, pr=pr_num)
data = labels
resp = api("PUT", path, json=data)
def close_pr(api, urn, pr):
""" https://developer.github.com/v3/pulls/#update-a-pull-request """
path = "/repos/{urn}/pulls/{pr}".format(urn=urn, pr=pr["number"])
data = {
"state": "closed",
}
return api("patch", path, json=data)
def get_pr_last_updated(pr_data):
""" a helper for finding the utc datetime of the last pr branch
modifications """
repo = pr_data["head"]["repo"]
if repo:
dt = repo["pushed_at"]
else:
dt = pr_data["created_at"]
return arrow.get(dt)
def get_pr_comments(api, urn, pr_num):
""" yield all comments on a pr, weirdly excluding the initial pr comment
itself (the one the owner makes) """
params = {
"per_page": settings.DEFAULT_PAGINATION
}
path = "/repos/{urn}/issues/{pr}/comments".format(urn=urn, pr=pr_num)
comments = api("get", path, params=params)
for comment in comments:
yield comment
def get_ready_prs(api, urn, window):
""" yield mergeable, non-WIP prs that have had no modifications for longer
than the voting window. these are prs that are ready to be considered for
merging """
open_prs = get_open_prs(api, urn)
for pr in open_prs:
pr_num = pr["number"]
now = arrow.utcnow()
updated = get_pr_last_updated(pr)
delta = (now - updated).total_seconds()
is_wip = "WIP" in pr["title"]
if not is_wip and delta > window:
# we check if its mergeable if its outside the voting window,
# because there seems to be a race where a freshly-created PR exists
# in the paginated list of PRs, but 404s when trying to fetch it
# directly
mergeable = get_is_mergeable(api, urn, pr_num)
if mergeable is True:
label_pr(api, urn, pr_num, [])
yield pr
elif mergeable is False:
label_pr(api, urn, pr_num, ["conflicts"])
if delta >= 60 * 60 * settings.PR_STALE_HOURS:
comments.leave_stale_comment(
api, urn, pr["number"], round(delta / 60 / 60))
close_pr(api, urn, pr)
# mergeable can also be None, in which case we just skip it for now
def voting_window_remaining_seconds(pr, window):
now = arrow.utcnow()
updated = get_pr_last_updated(pr)
delta = (now - updated).total_seconds()
return window - delta
def is_pr_in_voting_window(pr, window):
return voting_window_remaining_seconds(pr, window) <= 0
def get_pr_reviews(api, urn, pr_num):
""" get all pr reviews on a pr
https://help.github.com/articles/about-pull-request-reviews/ """
params = {
"per_page": settings.DEFAULT_PAGINATION
}
path = "/repos/{urn}/pulls/{pr}/reviews".format(urn=urn, pr=pr_num)
data = api("get", path, params=params)
return data
def get_is_mergeable(api, urn, pr_num):
return get_pr(api, urn, pr_num)["mergeable"]
def get_pr(api, urn, pr_num):
""" helper for fetching a pr. necessary because the "mergeable" field does
not exist on prs that come back from paginated endpoints, so we must fetch
the pr directly """
path = "/repos/{urn}/pulls/{pr}".format(urn=urn, pr=pr_num)
pr = api("get", path)
return pr
def get_open_prs(api, urn):
params = {
"state": "open",
"sort": "updated",
"direction": "asc",
"per_page": settings.DEFAULT_PAGINATION,
}
path = "/repos/{urn}/pulls".format(urn=urn)
data = api("get", path, params=params)
return data
def get_reactions_for_pr(api, urn, pr):
path = "/repos/{urn}/issues/{pr}/reactions".format(urn=urn, pr=pr)
params = {"per_page": settings.DEFAULT_PAGINATION}
reactions = api("get", path, params=params)
for reaction in reactions:
yield reaction
def post_accepted_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "success",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_rejected_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "failure",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_pending_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "pending",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_status(api, urn, sha, state, description):
""" apply an issue label to a pr """
path = "/repos/{urn}/statuses/{sha}".format(urn=urn, sha=sha)
data = {
"state": state,
"description": description,
"context": "chaosbot"
}
api("POST", path, json=data)
|
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import radical.pilot as rp
def pilot_state_cb (pilot, state):
""" this callback is invoked on all pilot state changes """
print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state)
if state == rp.FAILED:
sys.exit (1)
def unit_state_cb (unit, state):
""" this callback is invoked on all unit state changes """
print "[Callback]: ComputeUnit '%s' state: %s." % (unit.uid, state)
if state == rp.FAILED:
sys.exit (1)
if __name__ == "__main__":
# we can optionally pass session name to RP
if len(sys.argv) > 1:
session_name = sys.argv[1]
else:
session_name = None
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session(name=session_name)
print "session id: %s" % session.uid
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
pmgr = rp.PilotManager(session=session)
# Register our callback with the PilotManager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state.
pmgr.register_callback(pilot_state_cb)
# Define a X-core on stamped that runs for N minutes and
# uses $HOME/radical.pilot.sandbox as sandbox directoy.
pdesc = rp.ComputePilotDescription()
pdesc.resource = "xsede.stampede"
pdesc.runtime = 15 # N minutes
pdesc.cores = 16 # X cores
pdesc.project = "TG-MCB090174"
# Launch the pilot.
pilot = pmgr.submit_pilots(pdesc)
cud_list = []
for unit_count in range(0, 4):
cu = rp.ComputeUnitDescription()
cu.pre_exec = ["module load python intel mvapich2 mpi4py"]
cu.executable = "python"
cu.arguments = ["helloworld_mpi.py"]
cu.input_staging = ["helloworld_mpi.py"]
# These two parameters are relevant to MPI execution:
# 'cores' sets the number of cores required by the task
# 'mpi' identifies the task as an MPI taskg
cu.cores = 8
cu.mpi = True
cud_list.append(cu)
# Combine the ComputePilot, the ComputeUnits and a scheduler via
# a UnitManager object.
umgr = rp.UnitManager(
session=session,
scheduler=rp.SCHED_DIRECT_SUBMISSION)
# Register our callback with the UnitManager. This callback will get
# called every time any of the units managed by the UnitManager
# change their state.
umgr.register_callback(unit_state_cb)
# Add the previously created ComputePilot to the UnitManager.
umgr.add_pilots(pilot)
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
units = umgr.submit_units(cud_list)
# Wait for all compute units to reach a terminal state (DONE or FAILED).
umgr.wait_units()
if not isinstance(units, list):
units = [units]
for unit in units:
print "* Task %s - state: %s, exit code: %s, started: %s, finished: %s, stdout: %s" \
% (unit.uid, unit.state, unit.exit_code, unit.start_time, unit.stop_time, unit.stdout)
except Exception as e:
# Something unexpected happened in the pilot code above
print "caught Exception: %s" % e
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print "need to exit now: %s" % e
finally:
# always clean up the session, no matter if we caught an exception or
# not.
print "closing session"
session.close ()
# the above is equivalent to
#
# session.close (cleanup=True, terminate=True)
#
# it will thus both clean out the session's database record, and kill
# all remaining pilots (none in our example).
|
from riotwatcher import *
from time import sleep
import logging
log = logging.getLogger('log')
def getTeamOfSummoner( summonerId, game ):
for p in game['participants']:
if p['summonerId'] == summonerId:
return p['teamId']
def getSummonerIdsOfOpponentTeam( summonerId, game ):
teamId = getTeamOfSummoner(summonerId, game)
summoners = []
for p in game['participants']:
if p['teamId'] != teamId:
summoners.append(p['summonerId'])
return summoners
def queryPastGameIdSets( w, summonerIds, past10 ):
sets = {}
rqs = 0
for id in summonerIds:
response = w.get_match_list(id);
matchlist = []
if 'matches' in response:
matchlist = response['matches']
gamelist = []
if past10:
gamelist = w.get_recent_games(id)['games']
rqs += 2
if rqs >= 8:
sleep(10)
rqs = 0
log.debug('matches of summoner '+str(id)+': '+str(len(matchlist)))
s = set()
for match in matchlist:
s.add(match['matchId'])
for game in gamelist:
s.add(game['gameId'])
sets[id] = s
return sets
def computeFriendship( IdSets ):
searchedSets = set()
friendships = {}
for id in IdSets:
friendships[id] = {}
for id in IdSets:
searchedSets.add(id)
for gameId in IdSets[id]:
for id2 in IdSets:
if not id2 in searchedSets:
if gameId in IdSets[id2]:
if not id2 in friendships[id]:
friendships[id][id2] = 1
if not id in friendships[id2]:
friendships[id2][id] = 1
friendships[id][id2] += 1
friendships[id2][id] += 1
return friendships
def computePremades( friendshipRelations ):
premades = []
for id in friendshipRelations:
group = set(friendshipRelations[id].keys())
group.add(id)
if group not in premades:
premades.append(group)
finPremades = []
for group1 in premades:
finGroup = group1
for group2 in premades:
if group1 != group2 and len(group1 & group2) > 0:
finGroup = finGroup | group2
if finGroup not in finPremades:
finPremades.append(finGroup)
return finPremades
def getPremades( summonerName, lolAPIKey, past10 ):
w = riotwatcher.RiotWatcher(lolAPIKey, default_region=riotwatcher.EUROPE_WEST)
id = w.get_summoner(name=summonerName)['id']
game = w.get_current_game(id)
participants = game['participants']
idToParticipantsMap = {}
for p in participants:
log.info(p['summonerName'].encode('utf8')+' '+str(p['summonerId'])+' '+str(p['teamId']))
idToParticipantsMap[p['summonerId']] = p
log.debug(getSummonerIdsOfOpponentTeam(id,game))
gameIdSets = queryPastGameIdSets( w, getSummonerIdsOfOpponentTeam(id,game), past10 )
friendshipRelations = computeFriendship(gameIdSets)
log.debug(friendshipRelations)
premades = computePremades(friendshipRelations)
premadesNames = []
for group in premades:
groupNames = []
if len(group) > 1:
for summonerId in group:
groupNames.append(idToParticipantsMap[summonerId]['summonerName'])
premadesNames.append(groupNames)
return premadesNames
|
import unittest
from datetime import datetime
import numpy as np
import pandas as pd
from excel_helper.helper import DataSeriesLoader
class TestDataFrameWithCAGRCalculation(unittest.TestCase):
def test_simple_CAGR(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
print (res)
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_within_bounds(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_before_start(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_before_start')
def test_CAGR_ref_date_after_end(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_after_end')
def test_simple_CAGR_from_pandas(self):
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
xls = pd.ExcelFile('test.xlsx')
df = xls.parse('Sheet1')
ldr = DataSeriesLoader.from_dataframe(df, times, size=2)
res = ldr['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_simple_CAGR_mm(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2015-01-01', '2016-01-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['mm']
print(res)
# assert res.loc[[datetime(2009, 1, 1)]][0] == 1
# assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
if __name__ == '__main__':
unittest.main()
|
from django.dispatch import Signal
pre_save = Signal(providing_args=['instance', 'action', ])
post_save = Signal(providing_args=['instance', 'action', ])
pre_delete = Signal(providing_args=['instance', 'action', ])
post_delete = Signal(providing_args=['instance', 'action', ])
|
'''
Import this module to have access to a global redis cache named GLOBAL_CACHE.
USAGE:
from caching import GLOBAL_CACHE
GLOBAL_CACHE.store('foo', 'bar')
GLOBAL_CACHE.get('foo')
>> bar
'''
from redis_cache import SimpleCache
try:
GLOBAL_CACHE
except NameError:
GLOBAL_CACHE = SimpleCache(limit=1000, expire=60*60*24, namespace="GLOBAL_CACHE")
else:
# Already defined...
pass
|
def selector(values, setBits):
maxBits = len(values)
def select(v):
out = []
for i in range(maxBits):
if (v & (1 << i)):
out.append(values[i])
return out
v = (2 ** setBits) - 1
endState = v << (maxBits - setBits)
yield select(v)
while v != endState:
t = (v | (v - 1)) + 1
v = t | ((((t & (-t % (1 << maxBits))) // (v & (-v % (1 << maxBits)))) >> 1) - 1)
yield select(v)
def normalize(perm):
ref = sorted(perm)
return [ref.index(x) for x in perm]
def contains_pattern(perm, patt):
if len(patt) > len(perm):
return False
for p in selector(perm, len(patt)):
if normalize(p) == patt:
return True
return False
if __name__ == '__main__':
print(contains_pattern(
[14, 12, 6, 10, 0, 9, 1, 11, 13, 16, 17, 3, 7, 5, 15, 2, 4, 8],
[3, 0, 1, 2]))
print(True)
|
limit = None
hello = str(limit, "")
print(hello)
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'App.created_at'
db.add_column('mobile_apps_app', 'created_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'App.created_at'
db.delete_column('mobile_apps_app', 'created_at')
models = {
'core.level': {
'Meta': {'ordering': "['order']", 'object_name': 'Level'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mobile_apps.app': {
'Meta': {'object_name': 'App'},
'content_areas': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'content_areas'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Level']"}),
'cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'levels'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Level']"}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mobile_apps.Type']"})
},
'mobile_apps.type': {
'Meta': {'object_name': 'Type'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['mobile_apps']
|
import math
import urwid
from mitmproxy.tools.console import common
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import grideditor
class SimpleOverlay(urwid.Overlay):
def __init__(self, master, widget, parent, width, valign="middle"):
self.widget = widget
self.master = master
super().__init__(
widget,
parent,
align="center",
width=width,
valign=valign,
height="pack"
)
def keypress(self, size, key):
key = super().keypress(size, key)
if key == "esc":
signals.pop_view_state.send(self)
if key == "?":
self.master.view_help(self.widget.make_help())
else:
return key
class Choice(urwid.WidgetWrap):
def __init__(self, txt, focus, current):
if current:
s = "option_active_selected" if focus else "option_active"
else:
s = "option_selected" if focus else "text"
return super().__init__(
urwid.AttrWrap(
urwid.Padding(urwid.Text(txt)),
s,
)
)
def selectable(self):
return True
def keypress(self, size, key):
return key
class ChooserListWalker(urwid.ListWalker):
def __init__(self, choices, current):
self.index = 0
self.choices = choices
self.current = current
def _get(self, idx, focus):
c = self.choices[idx]
return Choice(c, focus, c == self.current)
def set_focus(self, index):
self.index = index
def get_focus(self):
return self._get(self.index, True), self.index
def get_next(self, pos):
if pos >= len(self.choices) - 1:
return None, None
pos = pos + 1
return self._get(pos, False), pos
def get_prev(self, pos):
pos = pos - 1
if pos < 0:
return None, None
return self._get(pos, False), pos
class Chooser(urwid.WidgetWrap):
def __init__(self, title, choices, current, callback):
self.choices = choices
self.callback = callback
choicewidth = max([len(i) for i in choices])
self.width = max(choicewidth, len(title) + 5)
self.walker = ChooserListWalker(choices, current)
super().__init__(
urwid.AttrWrap(
urwid.LineBox(
urwid.BoxAdapter(
urwid.ListBox(self.walker),
len(choices)
),
title= title
),
"background"
)
)
def selectable(self):
return True
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "enter":
self.callback(self.choices[self.walker.index])
signals.pop_view_state.send(self)
return super().keypress(size, key)
def make_help(self):
text = []
keys = [
("enter", "choose option"),
("esc", "exit chooser"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
class OptionsOverlay(urwid.WidgetWrap):
def __init__(self, master, name, vals, vspace):
"""
vspace: how much vertical space to keep clear
"""
cols, rows = master.ui.get_cols_rows()
self.ge = grideditor.OptionsEditor(master, name, vals)
super().__init__(
urwid.AttrWrap(
urwid.LineBox(
urwid.BoxAdapter(self.ge, rows - vspace),
title=name
),
"background"
)
)
self.width = math.ceil(cols * 0.8)
def make_help(self):
return self.ge.make_help()
|
import os
ADMINS = (
# ('Eduardo Lopez', 'eduardo.biagi@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.path.dirname(__file__), 'highways.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Mexico_City'
LANGUAGE_CODE = 'es-MX'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
MEDIA_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = 'bre7b$*6!iagzqyi1%q@%_ofbb)e!rawcnm9apx^%kf@b%)le!'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'carreteras',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "templates"),
)
|
import MySQLdb as _mysql
from collections import namedtuple
import re
float_match = re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match
def is_number(string):
return bool(float_match(string))
class MySQLDatabase(object):
"""
This is the driver class that we will use
for connecting to our database. In here we'll
create a constructor (__init__) that will connect
to the database once the driver class is instantiated
and a destructor method that will close the database
connection once the driver object is destroyed.
"""
def __init__(self, database_name, username,
password, host='localhost'):
"""
Here we'll try to connect to the database
using the variables that we passed through
and if the connection fails we'll print out the error
"""
try:
self.db = _mysql.connect(db=database_name, host=host, user=username, passwd=password)
self.database_name = database_name
print "Connected to MySQL!"
except _mysql.Error, e:
print e
def __del__(self):
"""
Here we'll do a check to see if `self.db` is present.
This will only be the case if the connection was
successfully made in the initialiser.
Inside that condition we'll close the connection
"""
if hasattr(self, 'db'):
self.db.close()
print "MySQL Connection Closed"
def get_available_tables(self):
"""
This method will allow us to see what
tables are available to us when we're
running our queries
"""
cursor = self.db.cursor()
cursor.execute("SHOW TABLES;")
self.tables = cursor.fetchall()
cursor.close()
return self.tables
def convert_to_named_tuples(self, cursor):
results = None
names = " ".join(d[0] for d in cursor.description)
klass = namedtuple('Results', names)
try:
results = map(klass._make, cursor.fetchall())
except _mysql.ProgrammingError, e:
print e
return results
def get_columns_for_table(self, table_name):
"""
This method will enable us to interact
with our database to find what columns
are currently in a specific table
"""
cursor = self.db.cursor()
cursor.execute("SHOW COLUMNS FROM `%s`" % table_name)
self.columns = cursor.fetchall()
cursor.close()
return self.columns
def select(self, table, columns=None, named_tuples=False, **kwargs):
"""
We'll create our `select` method in order
to make it simpler for extracting data from
the database.
select(table_name, [list_of_column_names])
"""
sql_str = "SELECT "
# add columns or just use the wildcard
if not columns:
sql_str += " * "
else:
for column in columns:
sql_str += "%s, " % column
sql_str = sql_str[:-2] # remove the last comma!
# add the to the SELECT query
sql_str += " FROM `%s`.`%s`" % (self.database_name, table)
# if there's a JOIN clause attached
if kwargs.has_key('join'):
sql_str += " JOIN %s " % kwargs.get('join')
# if there's a WHERE clause attached
if kwargs.has_key('where'):
sql_str += " WHERE %s " % kwargs.get('where')
# if there's a LIMIT clause attached
if kwargs.has_key('limit'):
sql_str += " LIMIT %s " % kwargs.get('limit')
# Finalise out SQL string
sql_str += ";"
cursor = self.db.cursor()
cursor.execute(sql_str)
if named_tuples:
results = self.convert_to_named_tuples(cursor)
else:
results = cursor.fetchall()
cursor.close()
return results
def delete(self, table, **wheres):
"""
This function will allow us to delete data from a given tables
based on wether or not a WHERE clause is present or not
"""
sql_str = "DELETE FROM `%s`.`%s`" % (self.database_name, table)
if wheres is not None:
first_where_clause = True
for where, term in wheres.iteritems():
if first_where_clause:
# This is the first WHERE clause
sql_str += " WHERE `%s`.`%s` %s" % (table, where, term)
first_where_clause = False
else:
# this is the second (additional) WHERE clause so we use AND
sql_str += " AND `%s`.`%s` %s" % (table, where, term)
sql_str += ";"
cursor = self.db.cursor()
cursor.execute(sql_str)
self.db.commit()
cursor.close()
# Only needs to compile one time so we put it here
float_match = re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match
def is_number(string):
return bool(float_match(string))
def insert(self, table, **column_names):
"""
Insert function
Example usages:-
db.insert('people', first_name='Ringo',
second_name='Starr', DOB=STR_TO_DATE('01-01-1999', '%d-%m-%Y'))
"""
sql_str = "INSERT INTO `%s`.`%s` " % (self.database_name, table)
if column_names is not None:
columns = "("
values = "("
for arg, value in column_names.iteritems():
columns += "`%s`, " % arg
# Check how we should add this to the columns string
if is_number(value) or arg == 'DOB':
# It's a number or date so we don't add the ''
values += "%s, " % value
else:
# It's a string so we add the ''
values += "5S, " % value
columns = columns[:-2] # Strip off the spare ',' from the end
values = values[:-2] # Same here too
columns += ") VALUES" # Add the connecting keyword and brace
values += ");" # Add the brace and like terminator
sql_str += "%s %s" % (columns, values)
cursor = self.db.cursor()
cursor.execute(sql_str)
self.db.commit()
cursor.close()
def update(self, table, where=None, **column_values):
sql_str = "UPDATE `%s`.`%s` SET " % (self.database_name, table)
if column_values is not None:
for column_name, value in column_names.iteritems():
sql_str += "`%s`=" % column_name
# check how we should add this to the column string
if is_number(value):
# it's a number so we don't add ''
sql_str += "%s, " % value
else:
# it's a date or string so add the ''
sql_str += "'%s', " % value
sql_str = sql_str[:-2] # strip off the last , and space character
if where:
sql_str += " WHERE %s" % where
cusrsor = self.db.cursor()
cursor.execute(sql_str)
self.db.commit()
cursor.close()
|
"""
https://codility.com/programmers/task/equi_leader/
"""
from collections import Counter, defaultdict
def solution(A):
def _is_equi_leader(i):
prefix_count_top = running_counts[top]
suffix_count_top = total_counts[top] - prefix_count_top
return (prefix_count_top * 2 > i + 1) and (suffix_count_top * 2 > len(A) - i - 1)
total_counts = Counter(A)
running_counts = defaultdict(int)
top = A[0]
result = 0
for i in xrange(len(A) - 1):
n = A[i]
running_counts[n] += 1
top = top if running_counts[top] >= running_counts[n] else n
if _is_equi_leader(i):
result += 1
return result
|
import os
import shutil
from glob import glob
print 'Content-type:text/html\r\n\r\n'
print '<html>'
found_pages = glob('archive/*.py')
if found_pages:
path = "/cgi-bin/archive/"
moveto = "/cgi-bin/pages/"
files = os.listdir(path)
files.sort()
for f in files:
src = path+f
dst = moveto+f
shutil.move(src, dst)
print 'All pages restored'
print '<meta http-equiv="refresh" content="1";>'
if not found_pages:
print 'Nothing to restore'
print '</html>'
|
"""
@brief test log(time=0s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.ipythonhelper import upgrade_notebook, remove_execution_number
class TestConvertNotebooks(unittest.TestCase):
"""Converts notebooks from v3 to v4. Should not be needed anymore."""
def test_convert_notebooks(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fold2 = os.path.normpath(
os.path.join(fold, "..", "..", "_doc", "notebooks"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
# remove numbers
remove_execution_number(nbf, nbf)
fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
if __name__ == "__main__":
unittest.main()
|
from itertools import permutations
import re
def create_formula(combination,numbers):
formula = ""
index = 0
for op in combination:
formula += str(numbers[index]) + op
index += 1
formula += numbers[index]
return formula
'''
Unnecessary Funtion
'''
def evaluate(form):
result = 0
for index in range(len(form)):
if form[index] == "+":
result += int(form[index+1])
index += 1
elif form[index] == "-":
result -= int(form[index+1])
index += 1
elif form[index] == "*":
result *= int(form[index+1])
index += 1
elif form[index] == "/":
result //= int(form[index+1])
index += 1
else:
result += int(form[index])
return result
def countdown(numbers):
rightCombinations = []
finalScore = numbers.pop()
combinations = returnAllCombinations(len(numbers) - 1)
perms = list(permutations(numbers))
for combination in combinations:
for permut in perms:
formula = create_formula(combination,permut)
#form = re.split("([*+-/])",formula)
#if int(evaluate(form)) == int(finalScore):
if int(eval(formula)) == int(finalScore):
rightCombinations.append(formula)
return rightCombinations
def returnAllCombinations(size):
listFinal = []
for x in range(0,size):
if len(listFinal) == 0:
for y in range(0,4):
if y == 0:
listFinal.append("+")
elif y == 1:
listFinal.append("-")
elif y == 2:
listFinal.append("*")
else:
listFinal.append("/")
else:
newList = []
for l in listFinal:
for y in range(0,4):
newLine = list(l)
if y == 0:
newLine.append("+")
elif y == 1:
newLine.append("-")
elif y == 2:
newLine.append("*")
else:
newLine.append("/")
newList.append(newLine)
listFinal = list(newList)
return listFinal
out = open("output.txt",'w')
for line in open("input.txt",'r'):
for formula in countdown(line.split(" ")):
out.write(formula)
out.write("\n")
out.write("\n\n")
|
from msrest.serialization import Model
class Destination(Model):
"""Capture storage details for capture description.
:param name: Name for capture destination
:type name: str
:param storage_account_resource_id: Resource id of the storage account to
be used to create the blobs
:type storage_account_resource_id: str
:param blob_container: Blob container Name
:type blob_container: str
:param archive_name_format: Blob naming convention for archive, e.g.
{Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}.
Here all the parameters (Namespace,EventHub .. etc) are mandatory
irrespective of order
:type archive_name_format: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'storage_account_resource_id': {'key': 'properties.storageAccountResourceId', 'type': 'str'},
'blob_container': {'key': 'properties.blobContainer', 'type': 'str'},
'archive_name_format': {'key': 'properties.archiveNameFormat', 'type': 'str'},
}
def __init__(self, name=None, storage_account_resource_id=None, blob_container=None, archive_name_format=None):
self.name = name
self.storage_account_resource_id = storage_account_resource_id
self.blob_container = blob_container
self.archive_name_format = archive_name_format
|
"""94. Binary Tree Inorder Traversal
https://leetcode.com/problems/binary-tree-inorder-traversal/
Given a binary tree, return the in-order traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def iterative_inorder_traversal(self, root: TreeNode) -> List[int]:
"""
iterative traversal
"""
ans = []
stack = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
ans.append(root.val)
root = root.right
return ans
def recursive_inorder_traversal(self, root: TreeNode) -> List[int]:
"""
recursive traversal, process left if needed, then val, at last right
"""
if not root:
return []
ans = []
ans += self.recursive_inorder_traversal(root.left)
ans.append(root.val)
ans += self.recursive_inorder_traversal(root.right)
return ans
|
from django.core.cache import cache
from django.shortcuts import render
from django.http import Http404
from styleguide.utils import (Styleguide, STYLEGUIDE_DIR_NAME,
STYLEGUIDE_DEBUG, STYLEGUIDE_CACHE_NAME,
STYLEGUIDE_ACCESS)
def index(request, module_name=None, component_name=None):
if not STYLEGUIDE_ACCESS(request.user):
raise Http404()
styleguide = None
if not STYLEGUIDE_DEBUG:
styleguide = cache.get(STYLEGUIDE_CACHE_NAME)
if styleguide is None:
styleguide = Styleguide()
cache.set(STYLEGUIDE_CACHE_NAME, styleguide, None)
if module_name is not None:
styleguide.set_current_module(module_name)
context = {'styleguide': styleguide}
index_path = "%s/index.html" % STYLEGUIDE_DIR_NAME
return render(request, index_path, context)
|
from quotes.models import Quote
from django.contrib import admin
class QuoteAdmin(admin.ModelAdmin):
list_display = ('message', 'name', 'program', 'class_of',
'submission_time')
admin.site.register(Quote, QuoteAdmin)
|
from tkinter import *
import tkinter
import HoursParser
class UserInterface(tkinter.Frame):
def __init__(self, master):
self.master = master
self.events_list = []
# Set window size
master.minsize(width=800, height=600)
master.maxsize(width=800, height=600)
# File Parser
self.parser = HoursParser.FileParser()
# Filename Label
self.file_select_text = tkinter.StringVar()
self.file_select_text.set(" ")
# Initialize Widgets
super().__init__(master)
self.pack()
# Label for Application
self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)")
self.title_label.pack(fill="both", expand="yes")
self.inner_label = Label(self.title_label, text="Choose Hours File")
self.inner_label.pack()
# Button for File Selection
self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press())
self.file_select_button.pack()
# Label for File Selection Button
self.file_select_label = Label(self.title_label, textvariable=self.file_select_text)
self.file_select_label.pack()
# Button for Parsing File
self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed())
self.file_parse_button.pack()
# List of Events
self.events_list_box = Listbox(self.title_label)
self.events_list_box.pack()
# Show Info Button
self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info())
self.show_info_button.pack()
# Shows information about event
self.text_area = Text(self.title_label)
self.text_area.pack()
# Called when Select File button is pressed.
def file_button_press(self):
self.parser.choose_file()
self.file_select_text.set(self.parser.file_name)
if self.parser.file_name is not None:
self.file_parse_button['state'] = 'normal'
def parse_button_pressed(self):
self.events_list = self.parser.parse_file()
self.populate_list_box(self.events_list_box)
# Puts names of events in a list from parsed file.
def populate_list_box(self, list_box):
i = 0
for event in self.events_list:
list_box.insert(i, event.get_event_name())
i += 1
self.show_info_button['state'] = 'normal'
def show_event_info(self):
# Store Active Time Index
event_list_index = int(self.events_list_box.index(ACTIVE))
# Clear text box from previous event details
# if self.text_area.get(END) is not "\n":
# self.text_area.delete(0, 'end')
# Display Formatted Information about Event.
self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name())
self.text_area.insert(END, '\n')
self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location())
self.text_area.insert(END, '\n')
self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time())
self.text_area.insert(END, '\n')
self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time())
self.text_area.insert(END, '\n')
self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees())
self.text_area.insert(END, '\n')
root = Tk()
root.wm_title("Scheduler (Alpha)")
main_app = UserInterface(master=root)
main_app.mainloop()
|
required_states = ['accept', 'reject', 'init']
class TuringMachine(object):
def __init__(self, sigma, gamma, delta):
self.sigma = sigma
self.gamma = gamma
self.delta = delta
self.state = None
self.tape = None
self.head_position = None
return
def initialize(self, input_string):
for char in input_string:
assert char in self.sigma
self.tape = list(input_string)
self.state = 'init'
self.head_position = 0
return
def simulate_one_step(self, verbose=False):
if self.state in ['accept', 'reject']:
print "# %s " % self.state
cur_symbol = self.tape[self.head_position]
transition = self.delta[(self.state, cur_symbol)]
if verbose:
self.print_tape_contents()
template = "delta({q_old}, {s_old}) = ({q}, {s}, {arr})"
print(template.format(q_old=self.state,
s_old=cur_symbol,
q=transition[0],
s=transition[1],
arr=transition[2])
)
self.state = transition[0]
self.tape[self.head_position] = transition[1]
if(transition[2] == 'left'):
self.head_position = max(0, self.head_position - 1)
else:
assert(transition[2] == 'right')
if self.head_position == len(self.tape) - 1:
self.tape.append('#')
self.head_position +=1
if verbose:
self.print_tape_contents()
return
def print_tape_contents(self):
formatted = ''.join(char if i != self.head_position else '[%s]' % char
for i, char in enumerate(self.tape))
print(formatted)
def run(self, input_string, verbose=False):
self.initialize(input_string)
while self.state not in ['reject', 'accept']:
self.simulate_one_step(verbose)
return str(self.tape)
|
import argparse
from os import path as os_path
import demo_project as demo
import traceback
def set_host_url_arg():
parser.add_argument('--host', required=True,
help='the url for the Materials Commons server')
def set_datapath_arg():
parser.add_argument('--datapath', required=True,
help='the path to the directory containing the files used by the build')
def set_apikey_arg():
parser.add_argument('--apikey', required=True, help='rapikey for the user building the demo project')
parser = argparse.ArgumentParser(description='Build Demo Project.')
set_host_url_arg()
set_datapath_arg()
set_apikey_arg()
args = parser.parse_args()
host = args.host
path = os_path.abspath(args.datapath)
key = args.apikey
try:
builder = demo.DemoProject(host, path, key)
# a basic get request that makes no changes; will fail if there is a problem with the host or key
flag = builder.does_project_exist()
project = builder.build_project()
if flag:
print "Refreshed project with name = " + project.name
else:
print "Built project with name = " + project.name
except Exception as err:
traceback.print_exc()
print 'Error: ', err
|
# -*- coding: utf-8 -*-
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
from pwn.internal.shellcode_helper import *
@shellcode_reqs(arch=['i386', 'amd64'], os=['linux', 'freebsd'])
def fork(parent, child = None, os = None, arch = None):
"""Fork this shit."""
if arch == 'i386':
if os in ['linux', 'freebsd']:
return _fork_i386(parent, child)
elif arch == 'amd64':
if os in ['linux', 'freebsd']:
return _fork_amd64(parent, child)
bug('OS/arch combination (%s, %s) was not supported for fork' % (os, arch))
def _fork_amd64(parent, child):
code = """
push SYS_fork
pop rax
syscall
test rax, rax
jne %s
""" % parent
if child is not None:
code += 'jmp %s\n' % child
return code
def _fork_i386(parent, child):
code = """
push SYS_fork
pop eax
int 0x80
test eax, eax
jne %s
""" % parent
if child is not None:
code += 'jmp %s\n' % child
return code
|
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
default = self.generate_source,
),
Field("scope",
label = T("Scope"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "text",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event_type_id",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
onvalidation = self.cap_alert_form_validation,
# update the approved_on field on approve of the alert
onapprove = self.cap_alert_approve,
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
),
Field("event_code",
label = T("Event Code"),
),
Field("name", notnull = True, length = 64,
label = T("Name"),
),
Field("event_type",
label = T("Event Type"),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("color_code",
label = T("Color Code"),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup = tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en",
requires = IS_EMPTY_OR(
IS_IN_SET(settings.get_cap_languages())
),
),
Field("category", "list:string",
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(),
), # 1 or more allowed
self.event_type_id(empty = False,
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string",
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(),
), # 0 or more allowed
Field("priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
),
Field("urgency",
required = True,
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
required = True,
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
required = True,
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective",
default = "now",
),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
# @ToDo: Move labels into main define_table (can then be lazy & performs better anyway)
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("name",
label = T("Area description"),
required = True,
),
Field("altitude", "integer"), # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
Field("ceiling", "integer"), # Feet above Sea-level in WGS84 (Maximum)
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("name",
"info_id",
# Not yet working with default formstyle or multiple=True
#S3SQLInlineComponent("location",
# name = "location",
# label = "",
# multiple = False,
# fields = [("", "location_id")],
# ),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
)
area_represent = S3Represent(lookup=tablename)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
#configure(tablename,
# deduplicate = self.cap_area_tag_deduplicate,
# )
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d%s%s" % \
(prefix, oid, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
db(itable.id == info_id).update(is_template = True)
return True
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event_type_id=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
record_id = record.id
table = s3db.cap_info
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record_id, record),
_href=URL(c="cap", f="template",
args=[record_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % record_id]),
_target="_blank",
)
auth = current.auth
# Display 'Submit for Approval' based on permission
# and deployment settings
if not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
auth.s3_has_permission("update", "cap_alert", record_id=r.id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
rows = db(agtable.role == "Alert Approver")._select(agtable.id)
group_rows = db(agtable.id.belongs(rows)).select(agtable.id)
if group_rows:
for group_row in group_rows:
group_id = group_row.id
user_ids = auth.s3_group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
for user_id in user_ids:
pe_ids.append(auth.s3_user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn"
)
else:
submit_btn = None
else:
submit_btn = None
table = s3db.cap_area
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
# We have an Area, so we can add Locations
location_tab = (T("Location"), "location")
else:
location_tab = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
location_tab,
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record_id, record),
_href=URL(c="cap", f="alert",
args=[record_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if submit_btn:
rheader.insert(1, TR(submit_btn))
elif tablename == "cap_area":
# Shouldn't ever be called
tabs = [(T("Area"), None),
(T("Locations"), "location"),
#(T("Geocodes"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_area_location":
# Shouldn't ever be called
# We need the rheader only for the link back to the area.
rheader = DIV(TABLE(TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.area_id),
_href=URL(c="cap", f="area",
args=[record.area_id, "update"]))),
),
))
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
db(table.id == _id).update(alert_id = alert_id)
return func
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
#raw = record._row
headline = record["cap_info.headline"]
location = record["cap_area.name"]
description = record["cap_info.description"]
sender = record["cap_info.sender_name"]
headline = A(headline,
# @ToDo: Link to nicely-formatted version of Display page
_href = URL(c="cap", f="alert", args=record_id),
)
headline = DIV(headline,
current.T("in %(location)s") % dict(location=location)
)
item = DIV(headline,
P(description),
P(sender, style="bold"),
_class=item_class,
_id=item_id,
)
return item
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
|
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic import ListView, DetailView
from .models import Category, Product
from cart.forms import CartAddProductForm
def category_list(request):
return render(request, "shop/category_list.html",
{'nodes': Category.objects.all()})
'''
class CategoryList(ListView):
model = Category
template_name = "category_list.html"
'''
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request, "shop/product_list.html",
{'category': category,
'nodes': categories,
'products': products,})
'''
class ProductList(ListView):
model = DesignProduct
template_name = "shop/product_list.html"
'''
def product_detail(request, id, slug):
categories = Category.objects.all()
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product_detail.html',
{'product': product,
'nodes': categories,
'cart_product_form': cart_product_form})
|
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
missionName = '005'
AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
raw_file = 'src'
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
raw_list = raw_string_with_cr.splitlines()
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
ref_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
|
"""
title : testtermopi.py
description : This program runs the termopi.py
: Displays the status of the resources (cpu load and memory usage) consumed by a Raspberry Pi
computer and the resources consumed by one or more containers instantiated in the Pi.
source :
author : Carlos Molina-Jimenez (Carlos.Molina@cl.cam.ac.uk)
date : 27 Mar 2017
institution : Computer Laboratory, University of Cambridge
version : 1.0
usage :
notes :
compile and run : % python termopi.py
: It imports pidict.py, dockerctl.py and picheck.py which are found in
: ./modules.
: You need to include "./modules" in the PYTHONPATH environment variable to
: indicate python where to find the pidict.py, dockerctl.py and picheck.py.
: For example, in a bash shell, you need to include the following lines
: in your .bash_profile file located in you home directory (you can see it with
: (# ls -la).
:
: PYTHONPATH="./modules"
: export PYTHONPATH
python_version : Python 2.7.12
====================================================
"""
from modules.tools.termopi import termopi # class with dictionary data structure
cpuUsageThreshold= 50
cpuLoadThreshold= 3
termo= termopi()
termo.prt_pi_resources()
termo.create_jsonfile_with_pi_status()
|
from . import packet
class Packet5(packet.Packet):
def __init__(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('managers', '0011_auto_20150422_2018'),
]
operations = [
migrations.AlterField(
model_name='managerprofile',
name='picture',
field=models.ImageField(default=b'/static/assets/admin/layout/img/avatar.jpg', upload_to=b'profiles'),
preserve_default=True,
),
]
|
from distutils.core import setup
version = '1.1.1'
setup(name='CacheGenerator',
version=version,
description="CacheGenerator for Django",
author="Ricardo Santos",
author_email="ricardo@getgears.com",
url="http://github.com/ricardovice/CacheGenerator/",
packages = ['cachegenerator']
)
|
"""Shared pytest fixtures and test data."""
import copy
import uuid
import pytest
from django.contrib.auth import get_user_model
from onfido.models import Applicant, Check, Event, Report
APPLICANT_ID = str(uuid.uuid4())
CHECK_ID = str(uuid.uuid4())
IDENTITY_REPORT_ID = str(uuid.uuid4())
DOCUMENT_REPORT_ID = str(uuid.uuid4())
DOCUMENT_ID = str(uuid.uuid4())
User = get_user_model()
@pytest.fixture
def user():
return User.objects.create_user(
"fred", first_name="Fred", last_name="Flinstone", email="fred@example.com"
)
@pytest.fixture
def applicant(user):
data = copy.deepcopy(TEST_APPLICANT)
return Applicant.objects.create_applicant(user=user, raw=data)
@pytest.fixture
def check(applicant):
data = copy.deepcopy(TEST_CHECK)
return Check.objects.create_check(applicant, raw=data)
@pytest.fixture
def identity_report(check):
data = copy.deepcopy(TEST_REPORT_IDENTITY_ENHANCED)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def document_report(check):
data = copy.deepcopy(TEST_REPORT_DOCUMENT)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def report(identity_report):
return identity_report
@pytest.fixture
def event(check):
data = copy.deepcopy(TEST_EVENT)
return Event().parse(data)
TEST_APPLICANT = {
"id": APPLICANT_ID,
"created_at": "2019-10-09T16:52:42Z",
"sandbox": True,
"first_name": "Jane",
"last_name": "Doe",
"email": None,
"dob": "1990-01-01",
"delete_at": None,
"href": f"/v3/applicants/{APPLICANT_ID}",
"id_numbers": [],
"address": {
"flat_number": None,
"building_number": None,
"building_name": None,
"street": "Second Street",
"sub_street": None,
"town": "London",
"state": None,
"postcode": "S2 2DF",
"country": "GBR",
"line1": None,
"line2": None,
"line3": None,
},
}
TEST_CHECK = {
"id": CHECK_ID,
"created_at": "2019-10-09T17:01:59Z",
"status": "in_progress",
"redirect_uri": None,
"result": None,
"sandbox": True,
"tags": [],
"results_uri": f"https://onfido.com/checks/{CHECK_ID}/reports",
"form_uri": None,
"paused": False,
"version": "3.0",
"report_ids": [IDENTITY_REPORT_ID],
"href": f"/v3/checks/{CHECK_ID}",
"applicant_id": APPLICANT_ID,
"applicant_provides_data": False,
}
TEST_REPORT_IDENTITY_ENHANCED = {
"created_at": "2019-10-03T15:54:20Z",
"href": f"/v3/reports/{IDENTITY_REPORT_ID}",
"id": IDENTITY_REPORT_ID,
"name": "identity_enhanced",
"properties": {
"matched_address": 19099121,
"matched_addresses": [
{"id": 19099121, "match_types": ["credit_agencies", "voting_register"]}
],
},
"result": "clear",
"status": "complete",
"sub_result": None,
"breakdown": {
"sources": {
"result": "clear",
"breakdown": {
"total_sources": {
"result": "clear",
"properties": {"total_number_of_sources": "3"},
}
},
},
"address": {
"result": "clear",
"breakdown": {
"credit_agencies": {
"result": "clear",
"properties": {"number_of_matches": "1"},
},
"telephone_database": {"result": "clear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"date_of_birth": {
"result": "clear",
"breakdown": {
"credit_agencies": {"result": "clear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"mortality": {"result": "clear"},
},
"check_id": CHECK_ID,
"documents": [],
}
TEST_REPORT_DOCUMENT = {
"created_at": "2019-10-03T14:05:48Z",
"documents": [{"id": DOCUMENT_ID}],
"href": f"/v3/reports/{DOCUMENT_REPORT_ID}",
"id": DOCUMENT_REPORT_ID,
"name": "document",
"properties": {
"nationality": "",
"last_name": "Names",
"issuing_country": "GBR",
"gender": "",
"first_name": "Report",
"document_type": "passport",
"document_numbers": [{"value": "123456789", "type": "document_number"}],
"date_of_expiry": "2030-01-01",
"date_of_birth": "1990-01-01",
},
"result": "clear",
"status": "complete",
"sub_result": "clear",
"breakdown": {
"data_comparison": {
"result": "clear",
"breakdown": {
"issuing_country": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"date_of_expiry": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
},
},
"data_validation": {
"result": "clear",
"breakdown": {
"gender": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"document_expiration": {"result": "clear", "properties": {}},
"expiry_date": {"result": "clear", "properties": {}},
"mrz": {"result": "clear", "properties": {}},
},
},
"age_validation": {
"result": "clear",
"breakdown": {
"minimum_accepted_age": {"result": "clear", "properties": {}}
},
},
"image_integrity": {
"result": "clear",
"breakdown": {
"image_quality": {"result": "clear", "properties": {}},
"conclusive_document_quality": {"result": "clear", "properties": {}},
"supported_document": {"result": "clear", "properties": {}},
"colour_picture": {"result": "clear", "properties": {}},
},
},
"visual_authenticity": {
"result": "clear",
"breakdown": {
"fonts": {"result": "clear", "properties": {}},
"picture_face_integrity": {"result": "clear", "properties": {}},
"template": {"result": "clear", "properties": {}},
"security_features": {"result": "clear", "properties": {}},
"original_document_present": {"result": "clear", "properties": {}},
"digital_tampering": {"result": "clear", "properties": {}},
"other": {"result": "clear", "properties": {}},
"face_detection": {"result": "clear", "properties": {}},
},
},
"data_consistency": {
"result": "clear",
"breakdown": {
"date_of_expiry": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"issuing_country": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"nationality": {"result": "clear", "properties": {}},
},
},
"police_record": {"result": "clear"},
"compromised_document": {"result": "clear"},
},
"check_id": CHECK_ID,
}
TEST_EVENT = {
"payload": {
"resource_type": "check",
"action": "check.form_opened",
"object": {
"id": CHECK_ID,
"status": "complete",
"completed_at_iso8601": "2019-10-28T15:00:39Z",
"href": f"https://api.onfido.com/v3/checks/{CHECK_ID}",
},
}
}
|
"""
Log file parser for Cheetah by Johannes Niediek
This script reads out the reference settings
by sequentially following all crs, rbs, and gbd commands.
Please keep in mind that the following scenario is possible with Cheetah:
Start the recording
Stop the recording
Change the reference settings
Start the recording
If you do this there will be .ncs with their reference changing
at some point during the recording.
In most cases, this is probably not what you want,
so this script displays a warning message if you did it.
Cheetah ATLAS:
There is an undocumented channel nummber 32000038.
I reverse-engineered its use, but that might depend on the exact version
of ATLAS etc.
This script partially mirrors the system of variable definitions
in Cheeatah. For complex arithmethic with variables, the script might fail.
Please check the GitHub repository (github.com/jniediek/combinato.git)
for updates and manual.
Contact me (jonied@posteo.de) for access to the repository.
"""
from __future__ import print_function, division
import os
import re
from collections import defaultdict
import datetime
from csv import writer as csv_writer
DATE_FNAME = 'start_stop_datetime.txt'
def parse_times(setting):
"""
read out the date and times of a recording
"""
def timestr2timeobj(time_str):
"""
convert a time string with milliseconds to a datetime object
"""
time, milli = time_str.split('.')
time = datetime.datetime.strptime(time, '%H:%M:%S')
time += datetime.timedelta(seconds=int(milli)/1000)
return time
tstart, tstop = [timestr2timeobj(rec[1])
for rec in setting.start_rec, setting.stop_rec]
if setting.folder is None:
folder_date_obj = None
else:
date_str = date_pattern.match(setting.folder).groups()[0]
folder_date_obj = datetime.datetime.strptime(date_str,
r'%Y-%m-%d_%H-%M-%S')
tstart = datetime.datetime.combine(folder_date_obj, tstart.time())
tstop = datetime.datetime.combine(folder_date_obj, tstop.time())
# by default assume that recording is stopped once every day
if tstop < tstart:
tstop += datetime.timedelta(days=1)
return folder_date_obj, tstart, tstop
class Setting(object):
"""
simple class that stores reference settings
"""
def __init__(self):
self.num2name = None
self.name2num = None
self.lrefs = None
self.grefs = None
self.crefs = None
self.start_rec = None
self.stop_rec = None
self.start_timestamp = None
self.stop_timestamp = None
self.folder = None
DEBUG = False
set_drs_strings = ('Processing line: -SetDRS', # old systems
'Processing line: -SetAcqEntReference') # new systems
set_channel_pattern = re.compile(r'Processing line:\s*-SetChannelNumber')
channel_number_pattern = re.compile(r'.*\"(.*)\" (\d.*)')
channel_number_pattern_var = re.compile(r'.* (.*) (.*)')
drs_command_pattern = re.compile(r'DRS Command\(b(\w) (\w*)\s{1,2}'
r'(\d*)\s{0,2}(\d*)')
variable_pattern = re.compile(r'.*(%\w*) = \"?(\w*)\"?')
date_pattern = re.compile(r'.*(\d{4}-\d{1,2}-\d{1,2}_'
'\d{1,2}-\d{1,2}-\d{1,2}).*')
def board_num_to_chan(board, num):
return (board - 1) * 16 + num
def chan_to_board_num(chan):
return 2 * int(chan/32) + 1, chan % 32
def parser(fname):
"""
transform logfile into header, log, and ignored lines
"""
with open(fname, 'r') as fid:
lines = fid.readlines()
fid.close()
in_header = True
is_notice = False
ignored_lines = []
protocol = []
header = {}
for line in lines:
if line[:13] == '-* NOTICE *-':
is_notice = True
else:
is_notice = False
if in_header:
# this means header is over
if is_notice:
in_header = False
else:
if len(line) > 3:
key, value = line.split(':', 1)
header[key] = value.strip()
else:
if is_notice:
fields = line[15:].split(' - ', 4)
time = fields[0]
stamp = int(fields[1])
msg = fields[2].strip().replace('\r', '')
if len(fields) == 4:
msg2 = fields[3].strip().replace('\r', '')
else:
msg2 = ''
protocol.append((time, stamp, msg, msg2))
elif line.startswith('Log file successfully moved to'):
target = line.split()[-1]
# this indicates a log file move
# mov is our key
protocol.append((0, 0, 'mov', target))
else:
ignored_lines.append(line.strip())
try:
bn = 'Cheetah ' + header['Cheetah Build Number']
except KeyError:
bn = 'ATLAS ' + header['Cheetah ATLAS Build Number']
print(bn)
return header, protocol, ignored_lines
def all_defined_check(chnum2name, crefs):
"""
check if a reference has been defined for all existing channels
"""
# print(chnum2name)
for chnum in chnum2name:
board, lnum = chan_to_board_num(chnum)
try:
ref = crefs[chnum]
if DEBUG:
print('Channel {} (board {} channel {}) - {}'.
format(chnum, board, lnum, ref))
except KeyError:
print('No reference defined for channel {} ({})'.
format(chnum, chnum2name[chnum][0]))
def print_refs(lrefs, grefs):
"""
overview of local and global refrences
"""
sorted_keys = sorted(lrefs.keys())
for board, ref in sorted_keys:
lref = lrefs[(board, ref)]
if lref in grefs:
gboard = grefs[lref]
stri = 'global, board {}'.format(gboard)
else:
stri = 'local'
print('board {} ref {} - {} ({})'.
format(board, ref, lrefs[(board, ref)], stri))
def analyze_drs(protocol):
"""
go through a protocol and analyze all drs settings
"""
# for each board, store the 8 local refs
# 32..35 are the 4 local reference wires
# 36, 37 are subject ground, patient ground
# 38 seems to be specific to ATLAS
# this is a (board, ref) -> local_num dict
local_refs = dict()
# 8 ref numbers can be driven globally
# this is a ref_num -> board dict
global_refs = dict()
# each channel has a reference which
# refers to its board's local referenes
# this is a ch_num -> ref_num dict
channel_refs = dict()
# name2num is unique
ch_name2num = dict()
# num2name is *not* unique, values are lists
ch_num2name = defaultdict(list)
# save the settings
all_settings = []
variables = dict()
temp_setting = None
for line in protocol:
time, timestamp, msg1, msg2 = line
if temp_setting is None:
temp_setting = Setting()
if msg1 == 'mov':
temp_setting.folder = msg2
elif '::SendDRSCommand()' in msg1:
# log all reference settings (command file and GUI interaction)
board, cmd, arg1, arg2 = drs_command_pattern.match(msg2).groups()
arg1 = int(arg1)
board = int(board, 16)
if cmd != 'hsp':
arg2 = int(arg2)
else:
arg2 = ''
if cmd == 'gbd':
# this is the global drive
# if a reference is driven globally, it overrides
# the local settings of that reference
if arg2 == 1:
global_refs[arg1] = board
print('{} is now driven by board {}'.format(arg1, board))
elif arg2 == 0:
if arg1 in global_refs:
del global_refs[arg1]
if cmd == 'rbs':
# each board stores 8 references
# arg1 is the stored number
# arg2 is the channel it points to
if (board, arg1) in local_refs:
if DEBUG:
print('board {} ref {} was {}, is now {}'.
format(board, arg1,
local_refs[(board, arg1)], arg2))
local_refs[(board, arg1)] = arg2
elif cmd == 'crs':
# each channel is indexed by board and local number
# arg1 is the local channel number
# arg2 is the local reference it points to
# try:
# local_ref = local_refs[(board, arg2)]
# except KeyError:
# print(msg2)
# raise Warning('Using undefined reference!')
chnum = board_num_to_chan(board, arg1)
channel_refs[chnum] = arg2
# print(cmd, board, arg1, chnum, local_ref)
elif 'StartRecording' in msg1:
temp_setting.num2name = ch_num2name.copy()
temp_setting.name2num = ch_name2num.copy()
temp_setting.lrefs = local_refs.copy()
temp_setting.grefs = global_refs.copy()
temp_setting.crefs = channel_refs.copy()
temp_setting.start_rec = (msg1, time)
temp_setting.start_timestamp = timestamp
elif 'StopRecording' in msg1:
# here, the setting is definite and has to be saved
temp_setting.stop_rec = (msg1, time)
temp_setting.stop_timestamp = timestamp
all_settings.append(temp_setting)
temp_setting = None
elif ' = ' in msg2:
# assigning a variable
var, val = variable_pattern.match(msg2).groups()
variables[var] = val
elif '%currentADChannel += 1' in msg2:
# this is a hack, but it seems to work well
print('Applying hack for += 1 syntax, check results!')
var, val = msg2.split('+=')
variables['%currentADChannel'] = str(int(variables['%currentADChannel']) + 1)
if set_channel_pattern.match(msg2):
# log channel numbers
if '%' in msg2:
var, ch_num = channel_number_pattern_var.match(msg2).groups()
var = var.strip()
ch_num = ch_num.strip()
try:
ch_name = variables[var]
except KeyError:
print('{}, but something is wrong with setting channel'
'numbers. Check for errors'
' in the Cheetah logfile itself.'.format(msg2))
continue
if '%' in ch_num:
ch_num = variables[ch_num]
else:
result = channel_number_pattern.match(msg2)
if result is not None:
ch_name, ch_num = result.groups()
else:
print('Parser skipped the following line: ' + msg2)
continue
ch_num = int(ch_num)
if ch_name in ch_name2num:
raise Warning('channel number reset')
ch_name2num[ch_name] = ch_num
ch_num2name[ch_num].append(ch_name)
elif msg2.startswith(set_drs_strings):
# if needed, insert code to
# log reference settings from command file
pass
return all_settings
def create_rep(num2name, name2num, crefs, lrefs, grefs):
"""
create a human readable representation of the referencing
"""
all_defined_check(num2name, crefs)
if DEBUG:
print_refs(lrefs, grefs)
chnames = []
for num in sorted(num2name.keys()):
chnames += num2name[num]
out_str = []
for name in chnames:
try:
chan = name2num[name]
except KeyError:
print('Processing {}, but no channel number was '
'assigned. Check results carefully!'.format(name))
continue
ch_board, ch_board_num = chan_to_board_num(chan)
local_ref_num = crefs[chan] # gives the local ref number
# this is now a local number, so it's in 0..7
maybe_global = False
if local_ref_num in grefs:
ref_board = grefs[local_ref_num]
if ref_board != ch_board:
maybe_global = True
# here, I have to check whether the
# driving channel is the same number on my local board
# i.e., if b1_15 is b1_ref_2 and b1_ref_2 is gd
# and b3_7 has ref_2, then it's global only if b3_15 is b3_ref_2
else:
ref_board = ch_board
ref_num = lrefs[(ref_board, local_ref_num)]
ref_num2 = lrefs[(ch_board, local_ref_num)]
add_str = ''
if maybe_global:
# print('Special case, global ref {}, local ref {}'
# .format(ref_num, lrefs[(ch_board, local_ref_num)]))
if ref_num2 != 38:
add_str = ' ?'
if ref_num != ref_num2:
# print(ref_num, lrefs[(ch_board, local_ref_num)])
ref_board = ch_board
ref_num = ref_num2
else:
add_str = ' ???'
ref_board = ch_board
ref_num = ref_num2
pass
# print('Using channel 38')
if ref_board == ch_board:
board_str = 'local{}'.format(add_str)
else:
board_str = 'global{}'.format(add_str)
if ref_num > 31:
# these are the reference wires
if ref_num == 38:
ref_name = 'board {} Unknown Ground'.format(ref_board)
elif ref_num == 36:
ref_name = 'board {} Patient Ground'.format(ref_board)
else:
tnum = (ref_num - 32) * 8
refchan = board_num_to_chan(ref_board, tnum)
if refchan in num2name:
pref_name = num2name[refchan]
idx = 0
if len(pref_name) == 2:
if pref_name[0][0] == 'u':
idx = 1
ref_name = pref_name[idx][:-1] + ' reference wire'
else:
ref_name = 'board {} head stage {} reference wire'.\
format(ref_board, ref_num - 32)
else:
global_num = board_num_to_chan(ref_board, ref_num)
chlist = num2name[global_num]
if len(chlist):
ref_name = chlist[0]
else:
ref_name = 'UNDEF'
if name == ref_name:
board_str += ' ZERO'
out_str.append(('{:03d}'.format(chan), name, ref_name, board_str))
return out_str
def check_logfile(fname, write_csv=False, nback=0, write_datetime=False):
"""
run over a Cheetah logfile and analyzed reference settings etc
"""
_, protocol, _ = parser(fname)
base_name = os.path.splitext(os.path.basename(fname))[0]
all_settings = analyze_drs(protocol)
for i_setting, setting in enumerate(all_settings):
print()
if setting.folder is None:
msg = 'Warning: Recording Stop -> Start without folder change!'
else:
msg = setting.folder
print('Start: {} ({})'.format(setting.start_rec[1],
setting.start_timestamp))
print('Stop: {} ({})'.format(setting.stop_rec[1],
setting.stop_timestamp))
# print('Duration: {} min'.
# format((setting.stop_rec[1] - setting.start_rec[1])))
out_str = create_rep(setting.num2name, setting.name2num,
setting.crefs, setting.lrefs, setting.grefs)
if write_csv:
setting = all_settings[-nback-1]
if setting.folder is None:
msg = 'Warning: Recording Stop -> Start without folder change!'
else:
msg = setting.folder
out_str = create_rep(setting.num2name, setting.name2num,
setting.crefs, setting.lrefs, setting.grefs)
outfname = base_name + '_{:02d}.csv'.\
format(len(all_settings) - nback - 1)
with open(outfname, 'w') as outf:
outf.write('# {} {} {}\n'.format(msg,
setting.start_rec[1],
setting.stop_rec[1]))
csvwriter = csv_writer(outf)
for line in out_str:
csvwriter.writerow(line)
if write_datetime:
setting = all_settings[-nback-1]
date, start, stop = parse_times(setting)
print(date, start, stop)
if date is None:
out = '# Date not guessed because Recording was stopped'\
' and re-started without folder change!\n'
else:
out = '# {}\ncreate_folder {}\n'.\
format(setting.folder, date.strftime('%Y-%m-%d %H:%M:%S'))
start_ts = setting.start_timestamp
stop_ts = setting.stop_timestamp
for name, d, t in (('start', start, start_ts),
('stop', stop, stop_ts)):
out += name + '_recording {} {} {}\n'.\
format(d.date().isoformat(), d.time().isoformat(), t)
diff_time = (stop_ts - start_ts)/1e6 - (stop - start).seconds
out += 'cheetah_ahead: {}\n'.format(diff_time)
if os.path.exists(DATE_FNAME):
print('{} exists, not overwriting!'.format(DATE_FNAME))
else:
with open(DATE_FNAME, 'w') as fid:
fid.write(out)
if __name__ == '__main__':
from argparse import ArgumentParser
aparser = ArgumentParser(epilog='Johannes Niediek (jonied@posteo.de)')
aparser.add_argument('--write-csv', action='store_true', default=False,
help='Write out to logfile_number.csv')
aparser.add_argument('--write-datetime', action='store_true',
default=False, help='Write start/stop timestamps to'
' file {}'.format(DATE_FNAME))
aparser.add_argument('--logfile', nargs=1,
help='Logfile, default: CheetahLogFile.txt')
aparser.add_argument('--nback', nargs=1, type=int,
help='Save last-n\'th setting', default=[0])
args = aparser.parse_args()
if not args.logfile:
logfile = 'CheetahLogFile.txt'
else:
logfile = args.logfile[0]
check_logfile(logfile, args.write_csv, args.nback[0], args.write_datetime)
|
import numpy as np
import matplotlib.pyplot as plt
import sys as sys
if len(sys.argv) <2:
print "Need an input file with many rows of 'id score'\n"
sys.exit(1)
fname = sys.argv[1]
vals = np.loadtxt(fname)
ids = vals[:,0]
score = vals[:,1]
score_max = 400; #max(score)
score = np.clip(score, 10, score_max)
score = score/score_max
NUM_COLS=300
fig = plt.figure(figsize=(12,9))
ax = fig.add_subplot(111)
ax.set_axis_bgcolor('0.50')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(len(vals)):
#print i, ids[i]
row = int(ids[i]) / NUM_COLS
col = int(ids[i]) % NUM_COLS
cval = score[i] #score[i]*score[i] # Square the values to drop the lower end
cmap = plt.get_cmap('hot')
val = cmap(cval)
ax.add_patch(plt.Rectangle((col,row),1,1,color=val)); #, cmap=plt.cm.autumn))
ax.set_aspect('equal')
print cmap(0.1)
print cmap(0.9)
plt.xlim([0,NUM_COLS])
plt.ylim([0,1+int(max(ids))/NUM_COLS])
plt.show()
|
from verbs.baseforms import forms
class SuspendForm(forms.VerbForm):
name = "Suspend"
slug = "suspend"
duration_min_time = forms.IntegerField()
|
from __future__ import absolute_import
import json
from twisted.internet import defer, error
from twisted.python import failure
from twisted.test import proto_helpers
from twisted.trial import unittest
from txjsonrpc import jsonrpc, jsonrpclib
class TestJSONRPC(unittest.TestCase):
def setUp(self):
self.deferred = defer.Deferred()
exposed = {
"foo" : lambda : setattr(self, "fooFired", True),
"bar" : lambda p : setattr(self, "barResult", p ** 2),
"baz" : lambda p, q : (q, p),
"late" : lambda p : self.deferred,
}
self.factory = jsonrpc.JSONRPCFactory(exposed.get)
self.proto = self.factory.buildProtocol(("127.0.0.1", 0))
self.tr = proto_helpers.StringTransportWithDisconnection()
self.proto.makeConnection(self.tr)
def assertSent(self, expected):
expected["jsonrpc"] = "2.0"
self.assertEqual(json.loads(self.tr.value()[2:]), expected)
def test_notify(self):
"""
notify() sends a valid JSON RPC notification.
"""
self.proto.notify("foo")
self.assertSent({"method" : "foo", "params" : []})
self.tr.clear()
self.proto.notify("bar", [3])
self.assertSent({"method" : "bar", u"params" : [3]})
def test_request(self):
"""
request() sends a valid JSON RPC request and returns a deferred.
"""
d = self.proto.request("foo")
self.assertSent({"id" : "1", "method" : "foo", "params" : []})
d.addCallback(lambda r : self.assertEqual(r, [2, 3, "bar"]))
receive = {"jsonrpc" : "2.0", "id" : "1", "result" : [2, 3, "bar"]}
self.proto.stringReceived(json.dumps(receive))
return d
def test_unhandledError(self):
"""
An unhandled error gets logged and disconnects the transport.
"""
v = failure.Failure(ValueError("Hey a value error"))
self.proto.unhandledError(v)
errors = self.flushLoggedErrors(ValueError)
self.assertEqual(errors, [v])
def test_invalid_json(self):
"""
Invalid JSON causes a JSON RPC ParseError and disconnects.
"""
self.proto.stringReceived("[1,2,")
err = {"id" : None, "error" : jsonrpclib.ParseError().toResponse()}
self.assertSent(err)
errors = self.flushLoggedErrors(jsonrpclib.ParseError)
self.assertEqual(len(errors), 1)
def test_invalid_request(self):
"""
An invalid request causes a JSON RPC InvalidRequest and disconnects.
"""
self.proto.stringReceived(json.dumps({"id" : 12}))
err = jsonrpclib.InvalidRequest({"reason" : "jsonrpc"})
self.assertSent({"id" : None, "error" : err.toResponse()})
errors = self.flushLoggedErrors(jsonrpclib.InvalidRequest)
self.assertEqual(len(errors), 1)
def test_unsolicited_result(self):
"""
An incoming result for an id that does not exist raises an error.
"""
receive = {"jsonrpc" : "2.0", "id" : "1", "result" : [2, 3, "bar"]}
self.proto.stringReceived(json.dumps(receive))
err = jsonrpclib.InternalError({
"exception" : "KeyError", "message" : "u'1'",
})
expect = {"jsonrpc" : "2.0", "id" : None, "error" : err.toResponse()}
sent = json.loads(self.tr.value()[2:])
tb = sent["error"]["data"].pop("traceback")
self.assertEqual(sent, expect)
self.assertTrue(tb)
# TODO: Raises original exception. Do we want InternalError instead?
errors = self.flushLoggedErrors(KeyError)
self.assertEqual(len(errors), 1)
def _errorTest(self, err):
d = self.proto.request("foo").addErrback(lambda f : self.assertEqual(
f.value.toResponse(), err.toResponse()
))
receive = {"jsonrpc" : "2.0", "id" : "1", "error" : {}}
receive["error"] = {"code" : err.code, "message" : err.message}
self.proto.stringReceived(json.dumps(receive))
return d
def test_parse_error(self):
self._errorTest(jsonrpclib.ParseError())
def test_invalid_request(self):
self._errorTest(jsonrpclib.InvalidRequest())
def test_method_not_found(self):
self._errorTest(jsonrpclib.MethodNotFound())
def test_invalid_params(self):
self._errorTest(jsonrpclib.InvalidParams())
def test_internal_error(self):
self._errorTest(jsonrpclib.InternalError())
def test_application_error(self):
self._errorTest(jsonrpclib.ApplicationError(code=2400, message="Go."))
def test_server_error(self):
self._errorTest(jsonrpclib.ServerError(code=-32020))
def test_received_notify(self):
receive = {"jsonrpc" : "2.0", "method" : "foo"}
self.proto.stringReceived(json.dumps(receive))
self.assertTrue(self.fooFired)
receive = {"jsonrpc" : "2.0", "method" : "bar", "params" : [2]}
self.proto.stringReceived(json.dumps(receive))
self.assertEqual(self.barResult, 4)
def test_received_notify_no_method(self):
receive = {"jsonrpc" : "2.0", "method" : "quux"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(jsonrpclib.MethodNotFound)
self.assertEqual(len(errors), 1)
def test_received_notify_wrong_param_type(self):
receive = {"jsonrpc" : "2.0", "method" : "foo", "params" : [1, 2]}
self.proto.stringReceived(json.dumps(receive))
receive = {"jsonrpc" : "2.0", "method" : "bar", "params" : "foo"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(TypeError)
self.assertEqual(len(errors), 2)
def test_received_request(self):
receive = {
"jsonrpc" : "2.0", "id" : "1", "method" : "baz", "params" : [1, 2]
}
self.proto.stringReceived(json.dumps(receive))
self.assertSent({"jsonrpc" : "2.0", "id" : "1", "result" : [2, 1]})
def test_received_request_deferred(self):
receive = {
"jsonrpc" : "2.0", "id" : "3",
"method" : "late", "params" : {"p" : 3}
}
self.proto.stringReceived(json.dumps(receive))
self.deferred.callback(27)
self.assertSent({"jsonrpc" : "2.0", "id" : "3", "result" : 27})
def test_received_request_no_method(self):
receive = {"jsonrpc" : "2.0", "id" : "3", "method" : "quux"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(jsonrpclib.MethodNotFound)
self.assertEqual(len(errors), 1)
sent = json.loads(self.tr.value()[2:])
self.assertIn("error", sent)
self.assertEqual(sent["error"]["code"], jsonrpclib.MethodNotFound.code)
def test_received_request_error(self):
receive = {
"jsonrpc" : "2.0", "id" : "1", "method" : "foo", "params" : [1, 2]
}
self.proto.stringReceived(json.dumps(receive))
response = json.loads(self.tr.value()[2:])
self.assertNotIn("result", response)
self.assertEqual(response["id"], "1")
self.assertEqual(response["error"]["data"]["exception"], "TypeError")
self.assertTrue(response["error"]["data"]["traceback"])
errors = self.flushLoggedErrors(TypeError)
self.assertEqual(len(errors), 1)
errors = self.flushLoggedErrors(error.ConnectionLost)
self.assertEqual(len(errors), 1)
def test_fail_all(self):
d1, d2 = self.proto.request("foo"), self.proto.request("bar", [1, 2])
exc = failure.Failure(ValueError("A ValueError"))
self.proto.failAll(exc)
d3 = self.proto.request("baz", "foo")
for d in d1, d2, d3:
d.addErrback(lambda reason: self.assertIs(reason, exc))
def test_connection_lost(self):
self.proto.connectionLost(failure.Failure(error.ConnectionLost("Bye")))
return self.proto.request("foo").addErrback(
lambda f : self.assertIs(f.type, error.ConnectionLost)
)
|
import pytest
from canon.seq.seqreader import SeqReader
from .. import resource
def test_read_seq():
reader = SeqReader(resource('seq/Quartz_500Mpa_.SEQ'))
reader.get_Om()
Z, _, N = reader.get_Zmap('orsnr___')
def test_merge_Zmap():
reader = SeqReader()
reader.read_seq(resource('seq/au30_a1_.SEQ'))
Z1, _, N1 = reader.get_Zmap('orsnr___')
reader.read_seq(resource('seq/au30_m1_.SEQ'))
Z2, _, N2 = reader.get_Zmap('orsnr___')
Z, N = SeqReader.merge_Zmap(Z1, Z2, N1, N2)
if __name__ == '__main__':
pytest.main()
|
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.descriptors import descsum_create
from test_framework.script import OP_NOP, CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import get_key, get_multisig, test_address
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None,
error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal(
"\n".join(
sorted(warnings)), "\n".join(
sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
# Sync the timestamp to the wallet, so that importmulti works
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(
self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info(
"Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info(
"Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info(
"Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info(
"Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info(
"Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(
self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
iswatchonly=True,
ismine=False,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info(
"Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info(
"Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info(
"Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info(
"Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should
# replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of
# watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Test that importing of a P2PKH address via descriptor without
# checksum fails
key = get_key(self.nodes[0])
self.log.info(
"Should fail to import a p2pkh address from descriptor with no checksum")
self.test_importmulti({"desc": "pkh(" + key.pubkey + ")",
"timestamp": "now",
"label": "Descriptor import test"},
success=False,
error_code=-5,
error_message='Missing checksum')
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
# hdkeypath=m/0'/0'/0' and 1'
addresses = [
"ecregtest:prvn9ycvgr5atuyh49sua3mapskh2mnnzg7t9yp6dt",
"ecregtest:pp3n087yx0njv2e5wcvltahfxqst7l66rutz8ceeat"]
# pkh subscripts corresponding to the above addresses
addresses += [
"ecregtest:qqdkxd2xnzftq2p8wr3sqqyw8lntap7tncs546s6pr",
"ecregtest:qpyryy83jfaec5u0gpzldk6teadsuq8zlyqh5l30uq",
]
desc = "sh(pkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info(
"Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info(
"Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1], address, solvable=True, ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
# Note: in Core's test, this address refers to the sh(wpkh()) address.
# For a sh(pkh()) this does not refer to a key, so we use the subscript
# address instead, which returns the same privkey.
address = "ecregtest:qzh6rch6st3wjvp0h2ud87gn7xnxvf6h8yrk8gcg8t"
desc = "sh(pkh(" + wif_priv + "))"
self.log.info(
"Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info(
"Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info(
"Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + pub_fpr + pub_keypath[1:] + "]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(
wallet_name="noprivkeys",
disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey
# wallet
self.log.info(
"Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info(
'Imported scripts with pubkeys shoud not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('sh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info(
"Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(
result[0]['error']['message'],
"Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'ecregtest:qp0v86h53rc92hjrlpwzpjtdlgzsxu25svv6g40fpl', # m/0'/0'/0
'ecregtest:qqasy0zlkdleqt4pkn8fs4ehm5gnnz6qpgdcpt90fq', # m/0'/0'/1
'ecregtest:qp0sp4wlhctvprqvdt2dgvqcfdjssu04xgey0l3syw', # m/0'/0'/2
'ecregtest:qrhn24tegn04cptfv4ldhtkduxq55zcwrycjfdj9vr', # m/0'/0'/3
'ecregtest:qzpqhett2uwltq803vrxv7zkqhft5vsnmcjeh50v0p', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range': [0, 4],
}]
)
self.log.info(result)
for i in range(0, 5):
addr = wrpc.getnewaddress('')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
|
import os
import json
from base import BaseController
from nqueens.models import Piece, Panel, Meta
class NQueensController(BaseController):
def __init__(self, view):
super(NQueensController, self)
self._piece_data = None
self._piece_cache = None
self.view = view
@classmethod
def get_instance(cls, view):
return cls(view)
def pre_switch(self):
pass
def start(self):
dim = self.view.get_dimension()
# Cached factory, only 1 file read per list
pieces = [Piece.from_file(self._piece_data) for i in range(dim)]
panel = Panel(dim)
self.view.notify({
'func': 'update_panel',
'data': {
'pieces': {},
}
})
res = self.run(panel, pieces, idx=0, ci=0)
if res:
self.view.notify({
'func': 'update_panel',
'data': {
'pieces': panel.pieces,
}
})
else:
self.view.notify({
'func': 'display_error',
'data': {
'message': 'No solution found :(',
}
})
def run(self, panel, pieces, idx, ci):
dim = panel.dimension
# Base case
if idx == len(pieces):
return True
else:
# Ultra-fast because:
# 1. All the pieces are the same (less combinations and shit)
# 2. We start from the previous index, this makes the panel smaller
# each time
# 3. Instead of keeping track of the killing positions we do a
# check each time a piece is added in order to avoid a kill
# (which is faster)
# 4. Python dict operations are astonishingly fast
for i in range(ci, dim):
for j in range(dim):
if panel.add_piece(pieces[idx], (i, j)):
if self.run(panel, pieces, idx+1, i):
return True
else:
panel.remove_piece(pieces[idx])
return False
def piece_selected(self, piece_name):
if not self._piece_cache:
self._piece_cache = Meta.get_piece_definitions()
self._piece_data = self._piece_cache.get(piece_name)
if self._piece_data:
self._piece_data = self._piece_data[1]
self.view.notify({
'func': 'enable_run',
'data': {
'enable': bool(self._piece_data),
}
})
@staticmethod
def get_pieces_attr(attr):
candidates = Meta.get_piece_definitions()
if all(attr in candidate[0].keys() for candidate in candidates.values()):
return [candidate[0][attr] for candidate in candidates.values()]
else:
return []
|
"""
Organizaţia Internaţională a Aviaţiei Civile propune un alfabet în care
fiecărei litere îi este asignat un cuvânt pentru a evita problemele în
înțelegerea mesajelor critice.
Pentru a se păstra un istoric al conversațiilor s-a decis transcrierea lor
conform următoarelor reguli:
- fiecare cuvânt este scris pe o singură linie
- literele din alfabet sunt separate de o virgulă
Următoarea sarcină ți-a fost asignată:
Scrie un program care să primească un fișier ce conține mesajul
brut (scris folosind alfabetul ICAO) și generează un fișier
numit icao_intrare ce va conține mesajul inițial.
Mai jos găsiți un dicționar ce conține o versiune a alfabetului ICAO:
"""
ICAO = {
'a': 'alfa', 'b': 'bravo', 'c': 'charlie', 'd': 'delta', 'e': 'echo',
'f': 'foxtrot', 'g': 'golf', 'h': 'hotel', 'i': 'india', 'j': 'juliett',
'k': 'kilo', 'l': 'lima', 'm': 'mike', 'n': 'november', 'o': 'oscar',
'p': 'papa', 'q': 'quebec', 'r': 'romeo', 's': 'sierra', 't': 'tango',
'u': 'uniform', 'v': 'victor', 'w': 'whiskey', 'x': 'x-ray', 'y': 'yankee',
'z': 'zulu'
}
def din_icao(fisier_intrare):
"""Funcția va primi calea către fișierul ce conține mesajul brut și
va genera un fișier numit icao_intrare ce va conține mesajul inițial.
"""
try:
in_file = open(fisier_intrare, 'r')
content = in_file.read()
in_file.close()
except IOError:
print "Error! Could not open file."
return
final_message = ''
for line in content.splitlines():
for word in line.split():
for key, value in ICAO.iteritems():
if value == word:
final_message += key
final_message += ' '
print final_message
if __name__ == "__main__":
din_icao("mesaj.icao")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.