text stringlengths 0 1.05M | meta dict |
|---|---|
""" An OpenAI gym environment that uses a WorldModel trained on DonkeyCar data as the environment.
To pass in the arguments needed during environment creation:
your_env = gym.make('DKWMEnv', vae=vae_object, ...etc...)
From: https://stackoverflow.com/questions/54259338/how-to-pass-arguments-to-openai-gym-environments-upon-init
See this for how to create new environments: https://github.com/openai/gym/blob/master/docs/creating-environments.md
"""
from time import sleep
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
def shuffled_circular(data, default=None):
while True:
if data is None:
yield default
else:
np.random.shuffle(data)
for sample in data:
yield sample
class DKWMRendererBase(object):
def __init__( self, window_width=None, window_height=None):
pass
def reset(self):
pass
def set_obs( self, next_obs ):
pass
def clear_label( self, label_id ):
pass
def set_label( self, label_text, label_id, location=None ):
pass
def render( self, mode=None ):
pass
def close(self):
pass
class DKWMRewardBase(object):
def __init__(self, reward_range=(-10.0, 10.0)):
self.reward_range = reward_range
def reset(self):
pass
def step( self, z_obs=None, mu=None, var=None, obs=None, actions=None ):
raise NotImplementedError
def close(self):
pass
class DKWMEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, vae, rnn, reward_func, starts=None, renderer=None, max_steps=None, done_threshhold=0.9):
""" @param starts A list of mu/log_var pairs that can be sampled from to generate the first observation of each episode.
@param vae A Variational Autoencoder with z_dim(), encode() and decode() methods.
@param rnn An MDRNN with hidden_units(), sample_next_output() and sample_z() methods.
@param max_steps Will return done=True after max_steps
"""
super().__init__()
self.z_dim = vae.get_z_dim()
self.vae = vae
self.rnn = rnn
self.max_steps = max_steps
self.done_threshhold = done_threshhold
self.steps = 0
if starts is not None:
self.starts = shuffled_circular(starts, default=np.zeros( self.z_dim ))
else:
self.starts = None
self.action_space = spaces.Box(
low = -1.0,
high = 1.0,
shape = (2,),
dtype = np.float32
)
# z/latent space. Arbitrarily chosen min/max, should be based off normal distribution.
self.observation_space = spaces.Box(
low = -100.0,
high = 100.0,
shape = (self.z_dim,),
dtype = np.float32
)
self.reward_func = reward_func
if reward_func is None:
self.reward_range = (-10.0,10.0)
else:
self.reward_range = self.reward_func.reward_range
if renderer is None:
self.renderer = DKWMRendererBase()
else:
self.renderer = renderer
def reset(self):
if self.starts is None:
self.zobs = np.zeros( self.z_dim )
else:
self.zobs = self.rnn.sample_z( *next(self.starts) )
self.hidden = np.zeros(self.rnn.get_hidden_units())
self.cell_values = np.zeros(self.rnn.get_hidden_units())
self.reward = 0.0
self.steps = 0
next_obs = self.zobs_to_obs( self.zobs )
self.renderer.set_obs( next_obs )
self.renderer.reset()
return self.zobs
def step(self, action):
inputs = np.concatenate([self.zobs, action, [self.reward]])
ret = self.rnn.sample_next_output(inputs, self.hidden, self.cell_values)
self.zobs, mu, log_var, _, rew_pred, self.reward, self.hidden, self.cell_values, self.done = ret
next_obs = self.zobs_to_obs( self.zobs )
if self.reward_func is None:
self.reward = np.clip( rew_pred, self.reward_range[0], self.reward_range[1] )
else:
self.reward = self.reward_func.step( z_obs=self.zobs, mu=mu, var=log_var, obs=next_obs, actions=action )
self.steps += 1
if self.steps >= self.max_steps:
self.done = True
elif self.done >= self.done_threshhold:
self.done = True
else:
self.done = False
self.renderer.set_obs( next_obs )
self.renderer.set_label( "Steering:\t{:+5.3f}\nThrottle:\t{:+5.3f}".format( *action ), "actions" )
return self.zobs, self.reward, self.done, {"decoded": next_obs}
def render(self, mode='human'):
img = self.renderer.render( mode=mode )
if "rgb_array" == mode:
return img
#if "human" == mode:
# sleep(0.13)
def close(self):
self.reward_func.close()
self.renderer.close()
def zobs_to_obs(self, zobs):
next_obs = np.squeeze( self.vae.decode(zobs.reshape( (1,self.z_dim) )) ) * 255.0
next_obs = np.floor( next_obs )
next_obs = np.clip( next_obs, 0, 255 )
return next_obs.astype( np.uint8 )
def sample_code():
#import gym
#import malpi.dkwm.gym_envs
#from malpi.dkwm.gym_envs.lane_reward import LaneReward
#from malpi.dkwm.gym_envs.renderer import DKWMRenderer
#from malpi.dkwm import vae
#from malpi.dkwm import mdrnn
#import numpy as np
#from gym import spaces
print( "Gym: {} at {}".format( gym.__version__, gym.__file__ ) )
z_dim = 512
vae = vae.KerasVAE(z_dim=z_dim, dropout=None)
vae.set_weights( vae_path, by_name=True )
rnn = mdrnn.RNN(z_dim=z_dim, action_dim=2 )
rnn.set_weights( mdrnn_path, by_name=True )
rew = LaneReward( z_dim=z_dim, weights=vae_path, reward_range=(-10.0, 10.0) )
starts = np.load( starts_path )
starts = list(zip(starts['mu'], starts['log_var']))
obs_size=128
renderer = DKWMRenderer( window_width=obs_size*2, window_height=obs_size*2 )
# Passing arguments like this requires OpenAI gym >= 0.12.4
env = gym.make('dkwm-v0', vae=vae, rnn=rnn, reward_func=rew, starts=starts, renderer=renderer)
print( "Env: {}".format( env ) )
obs = env.reset()
print( "Obs: {}".format( obs.shape ) )
for i in range(10):
act = env.action_space.sample()
z_obs, reward, done, info = env.step( env.action_space.sample() )
obs = info["decoded"]
print( "Step act/obs/rew/done/z: {} {} {} {} {}".format( act, obs.shape, reward, done, z_obs.shape ) )
# Sample output:
# Step act/obs/rew/done/z: [ 0.3011232 -0.97818303] (128, 128, 3) 0 False (512,)
env.close()
| {
"repo_name": "Bleyddyn/malpi",
"path": "malpi/dkwm/gym_envs/dkwm_env.py",
"copies": "1",
"size": "6831",
"license": "mit",
"hash": -361775677509184060,
"line_mean": 31.0704225352,
"line_max": 128,
"alpha_frac": 0.5920070268,
"autogenerated": false,
"ratio": 3.312803103782735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9340623213770308,
"avg_score": 0.012837383362485324,
"num_lines": 213
} |
'''An opengl demo for PyQt5
test on Windows with Python3.6
'''
import array
from PyQt5 import QtGui
vertexShaderCode = '''#version 410
layout(location=0) in vec4 position;
layout(location=1) in vec4 color;
out vec4 theColor;
void main()
{
gl_Position = position;
theColor = color;
}
'''
fragmentShaderCode = '''#version 410
in vec4 theColor;
out vec4 outColor;
void main()
{
outColor = theColor;
}
'''
class Triangle(QtGui.QOpenGLWindow):
def __init__(self, parent=None):
super(Triangle, self).__init__(QtGui.QOpenGLWindow.NoPartialUpdate, parent)
format_ = QtGui.QSurfaceFormat()
format_.setRenderableType(QtGui.QSurfaceFormat.OpenGL)
format_.setProfile(QtGui.QSurfaceFormat.CoreProfile)
# PyQt5 5.8.2 only has 4.1.0 opengl binding
format_.setVersion(4, 1)
self.setFormat(format_)
self.gl = None
self.program = QtGui.QOpenGLShaderProgram()
self.vao = QtGui.QOpenGLVertexArrayObject()
def __del__(self):
self.program.deleteLater()
self.vao.destroy()
def initializeGL(self):
self.gl = self.context().versionFunctions()
self.gl.initializeOpenGLFunctions()
#
self.printContextInformation()
#
self.program.create()
self.program.addShaderFromSourceCode(QtGui.QOpenGLShader.Vertex, vertexShaderCode)
self.program.addShaderFromSourceCode(QtGui.QOpenGLShader.Fragment, fragmentShaderCode)
self.program.link()
assert self.program.isLinked()
vertexData = array.array('f', [
# position
0.0, 0.5, 0.0, 1.0,
0.5, -0.366, 0.0, 1.0,
-0.5, -0.366, 0.0, 1.0,
# colors
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 1.0
])
vertexMem = memoryview(vertexData)
vbo = QtGui.QOpenGLBuffer()
vbo.create()
vbo.setUsagePattern(vbo.StaticDraw)
vbo.bind()
# vbo.allocate(vertexData, vertexData.buffer_info()[1]*vertexData.itemsize)
vbo.allocate(vertexData, vertexMem.nbytes)
self.vao.create()
self.vao.bind()
vbo.bind()
self.program.bind()
self.program.enableAttributeArray(0)
self.program.setAttributeBuffer(0, self.gl.GL_FLOAT, 0, 4, 0)
self.program.enableAttributeArray(1)
self.program.setAttributeBuffer(1, self.gl.GL_FLOAT, 12*vertexMem.itemsize, 4, 0)
self.vao.release()
self.program.release()
vbo.destroy()
# def resizeGL(self, w, h):
# super(Triangle, self).resizeGL(w, h)
def paintGL(self):
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT)
self.gl.glClearColor(0.0, 0.0, 0.0, 1.0)
self.program.bind()
self.vao.bind()
self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, 3)
self.program.release()
self.vao.release()
def printContextInformation(self):
glType = 'OpenGL ES' if self.context().isOpenGLES() else "OpenGL"
glVersion = self.gl.glGetString(self.gl.GL_VERSION)
print('{} : {}'.format(glType, glVersion))
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
window = Triangle()
window.setTitle('PyQt5 Opengl demo')
window.resize(1280, 720)
window.show()
sys.exit(app.exec_())
| {
"repo_name": "mackst/opengl-samples",
"path": "triangle_PyQt5.py",
"copies": "1",
"size": "3444",
"license": "mit",
"hash": -8638971356057837000,
"line_mean": 24.5111111111,
"line_max": 94,
"alpha_frac": 0.6019163763,
"autogenerated": false,
"ratio": 3.292543021032505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43944593973325047,
"avg_score": null,
"num_lines": null
} |
'''An opengl demo for PyQt5
test on Windows with Python3.6
'''
import array
from PySide2 import QtGui
GL_FLOAT = 0x1406
GL_COLOR_BUFFER_BIT = 0x00004000
GL_TRIANGLES = 0x0004
GL_VERSION = 0x1F02
vertexShaderCode = '''#version 410
layout(location=0) in vec4 position;
layout(location=1) in vec4 color;
out vec4 theColor;
void main()
{
gl_Position = position;
theColor = color;
}
'''
fragmentShaderCode = '''#version 410
in vec4 theColor;
out vec4 outColor;
void main()
{
outColor = theColor;
}
'''
class Triangle(QtGui.QOpenGLWindow, QtGui.QOpenGLFunctions):
def __init__(self, parent=None):
# super(Triangle, self).__init__(QtGui.QOpenGLWindow.NoPartialUpdate, parent)
QtGui.QOpenGLWindow.__init__(self, QtGui.QOpenGLWindow.NoPartialUpdate, parent)
QtGui.QOpenGLFunctions.__init__(self)
format_ = QtGui.QSurfaceFormat()
format_.setRenderableType(QtGui.QSurfaceFormat.OpenGL)
format_.setProfile(QtGui.QSurfaceFormat.CoreProfile)
format_.setVersion(4, 6)
self.setFormat(format_)
self.program = QtGui.QOpenGLShaderProgram()
self.vao = QtGui.QOpenGLVertexArrayObject()
def __del__(self):
self.program.deleteLater()
self.vao.destroy()
def initializeGL(self):
# self.gl = self.context().versionFunctions()
# self.gl = self.context().functions()
#
self.initializeOpenGLFunctions()
self.printContextInformation()
#
self.program.create()
self.program.addShaderFromSourceCode(QtGui.QOpenGLShader.Vertex, vertexShaderCode)
self.program.addShaderFromSourceCode(QtGui.QOpenGLShader.Fragment, fragmentShaderCode)
self.program.link()
assert self.program.isLinked()
vertexData = array.array('f', [
# position
0.0, 0.5, 0.0, 1.0,
0.5, -0.366, 0.0, 1.0,
-0.5, -0.366, 0.0, 1.0,
# colors
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 1.0
])
vertexMem = memoryview(vertexData)
vbo = QtGui.QOpenGLBuffer()
vbo.create()
vbo.setUsagePattern(vbo.StaticDraw)
vbo.bind()
# vbo.allocate(vertexData, vertexData.buffer_info()[1]*vertexData.itemsize)
vbo.allocate(vertexData, vertexMem.nbytes)
self.vao.create()
self.vao.bind()
vbo.bind()
self.program.bind()
self.program.enableAttributeArray(0)
self.program.setAttributeBuffer(0, GL_FLOAT, 0, 4, 0)
self.program.enableAttributeArray(1)
self.program.setAttributeBuffer(1, GL_FLOAT, 12*vertexMem.itemsize, 4, 0)
self.vao.release()
self.program.release()
vbo.destroy()
# def resizeGL(self, w, h):
# super(Triangle, self).resizeGL(w, h)
def paintGL(self):
self.glClear(GL_COLOR_BUFFER_BIT)
self.glClearColor(0.0, 0.0, 0.0, 1.0)
self.program.bind()
self.vao.bind()
self.glDrawArrays(GL_TRIANGLES, 0, 3)
self.program.release()
self.vao.release()
def printContextInformation(self):
glType = 'OpenGL ES' if self.context().isOpenGLES() else "OpenGL"
# glVersion = self.gl.glGetString(GL_VERSION)
glVersion = self.glGetString(GL_VERSION)
print('{} : {}'.format(glType, glVersion))
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
window = Triangle()
window.setTitle('PySide2 Opengl demo')
window.resize(1280, 720)
window.show()
def cleanup():
global window
del window
app.aboutToQuit.connect(cleanup)
sys.exit(app.exec_())
| {
"repo_name": "mackst/opengl-samples",
"path": "triangle_PySide2.py",
"copies": "1",
"size": "3775",
"license": "mit",
"hash": 4647046154100015000,
"line_mean": 24.3355704698,
"line_max": 94,
"alpha_frac": 0.6098013245,
"autogenerated": false,
"ratio": 3.3348056537102475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9327922943307643,
"avg_score": 0.02333680698052101,
"num_lines": 149
} |
"""An OpRegularizer that applies L1 regularization on batch-norm gammas."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import generic_regularizers
from morph_net.framework import tpu_util
import tensorflow.compat.v1 as tf
class GammaL1Regularizer(generic_regularizers.OpRegularizer):
"""An OpRegularizer that L1-regularizes batch-norm gamma."""
def __init__(self, gamma, gamma_threshold):
"""Creates an instance.
Args:
gamma: A tf.Tensor of shape (n_channels,) with the gammas.
gamma_threshold: A float scalar, the threshold above which a gamma is
considered 'alive'.
"""
self._gamma = tpu_util.maybe_convert_to_variable(gamma)
self._gamma_threshold = gamma_threshold
abs_gamma = tf.abs(self._gamma)
self._alive_vector = abs_gamma > gamma_threshold
self._regularization_vector = abs_gamma
@property
def regularization_vector(self):
return self._regularization_vector
@property
def alive_vector(self):
"""Returns a tf.Tensor of shape (n_channels,) with alive bits."""
return self._alive_vector
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/op_regularizers/gamma_l1_regularizer.py",
"copies": "1",
"size": "1168",
"license": "apache-2.0",
"hash": 4904675211820868000,
"line_mean": 31.4444444444,
"line_max": 75,
"alpha_frac": 0.7183219178,
"autogenerated": false,
"ratio": 3.8675496688741724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007536438333539782,
"num_lines": 36
} |
"""An OpRegularizer that applies regularization on Gating probabilities.
This regularizer targets the gating probability of a LogisticSigmoidGating OP.
It can do so directly by minimizing the log odds ratio of the probability
log(p/1-p), or by minimizing the L1 of the sampled mask.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import generic_regularizers
from morph_net.framework import tpu_util
import tensorflow.compat.v1 as tf
class ProbGatingRegularizer(generic_regularizers.OpRegularizer):
"""An OpRegularizer that regularizes gating probabilities."""
def __init__(self, logits, mask, regularize_on_mask=True,
alive_threshold=0.1, mask_as_alive_vector=True):
"""Creates an instance.
Args:
logits: A tf.Tensor of shape (n_channels,) with the
log odds ratio of the channel being on: log(p / 1-p).
mask: A tf.Tensor of the same shape as `logits`.
The sampled mask/gating vector.
regularize_on_mask: Bool. If True uses the mask as the
regularization vector. Else uses probabilities. Default True.
alive_threshold: Float. Threshold below which values are considered dead.
This can be used both when mask_as_alive_vector is True and then the
threshold is used to binarize the sampled values and
when mask_as_alive_vector is False, and then the threshold is on the
channel probability.
mask_as_alive_vector: Bool. If True use the thresholded sampled mask
as the alive vector. Else, use thresholded probabilities from the
logits.
"""
if len(logits.shape.as_list()) != 1:
raise ValueError('logits tensor should be 1D.')
if len(mask.shape.as_list()) != 1:
raise ValueError('mask tensor should be 1D.')
self._logits = tpu_util.maybe_convert_to_variable(logits)
self._mask = mask
self._probs = tf.sigmoid(self._logits)
alive_vector = self._mask if mask_as_alive_vector else self._probs
self._alive_vector = alive_vector >= alive_threshold
self._regularization_vector = (
self._mask if regularize_on_mask else self._probs)
@property
def regularization_vector(self):
return self._regularization_vector
@property
def alive_vector(self):
"""Returns a tf.Tensor of shape (n_channels,) with alive bits."""
return self._alive_vector
@property
def is_probabilistic(self):
return True
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/op_regularizers/prob_gating_regularizer.py",
"copies": "1",
"size": "2490",
"license": "apache-2.0",
"hash": 6977722329755907000,
"line_mean": 36.1641791045,
"line_max": 79,
"alpha_frac": 0.7048192771,
"autogenerated": false,
"ratio": 3.9461172741679875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006274735146003803,
"num_lines": 67
} |
"""An optimizer that switches between several methods."""
import tensorflow as tf
from tensorflow.python.training import optimizer
class CompositeOptimizer(optimizer.Optimizer):
"""Optimizer that switches between several methods.
"""
def __init__(self,
optimizer1,
optimizer2,
switch,
use_locking=False,
name='Composite'):
"""Construct a new Composite optimizer.
Args:
optimizer1: A tf.python.training.optimizer.Optimizer object.
optimizer2: A tf.python.training.optimizer.Optimizer object.
switch: A tf.bool Tensor, selecting whether to use the first or the second
optimizer.
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Composite".
"""
super(CompositeOptimizer, self).__init__(use_locking, name)
self._optimizer1 = optimizer1
self._optimizer2 = optimizer2
self._switch = switch
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
return tf.cond(
self._switch,
lambda: self._optimizer1.apply_gradients(grads_and_vars,
global_step, name),
lambda: self._optimizer2.apply_gradients(grads_and_vars,
global_step, name)
)
def get_slot(self, var, name):
slot1 = self._optimizer1.get_slot(var, name)
slot2 = self._optimizer2.get_slot(var, name)
if slot1 and slot2:
raise LookupError('Slot named %s for variable %s populated for both '
'optimizers' % (name, var.name))
return slot1 or slot2
def get_slot_names(self):
return sorted(self._optimizer1.get_slot_names() +
self._optimizer2.get_slot_names())
| {
"repo_name": "hang-qi/models",
"path": "syntaxnet/dragnn/python/composite_optimizer.py",
"copies": "2",
"size": "1926",
"license": "apache-2.0",
"hash": 911418610909789600,
"line_mean": 34.0181818182,
"line_max": 80,
"alpha_frac": 0.6225337487,
"autogenerated": false,
"ratio": 4.397260273972603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6019794022672603,
"avg_score": null,
"num_lines": null
} |
# An organization tool which uses the [eyeD3 API](https://eyed3.nicfit.net/index.html) to arrange .mp3 files into folders based on the artist name, album title, and album year as specified in the the .mp3 file's metadata.
import os
import shutil
import glob
import datetime
import eyed3
#http://eyed3.nicfit.net/index.html
#http://eyed3.nicfit.net/api/modules.html
desktopDirectory = "C:/Users/Ajay/Desktop/"
os.chdir(desktopDirectory)
MP3DirectoryChangeLogTxt = open(desktopDirectory + "MP3DirectoryChangeLog.txt", "a")
MP3DirectoryChangeLogTxt.write("==================================================\n")
MP3DirectoryChangeLogTxt.write(str(datetime.datetime.now()) + "\t")
MP3Files = glob.glob("*.mp3")
MP3FileCount = len(MP3Files)
MP3DirectoryChangeLogTxt.write("Found {0} files\n\n".format(str(MP3FileCount)))
if MP3FileCount != 0:
for MP3File in MP3Files:
MP3DirectoryChangeLogTxt.write("Found \t\t{0}\n".format(MP3File))
fileName = MP3File
oldFilePath = desktopDirectory + fileName
if os.path.exists(oldFilePath):
audioFile = eyed3.load(oldFilePath)
artist = audioFile.tag.artist.rsplit('/')[0]
title = audioFile.tag.title
album = audioFile.tag.album
originalReleaseDate = audioFile.tag.original_release_date
if audioFile.tag.album is None:
album = "Unknown Album"
if originalReleaseDate is None:
newFilePath = desktopDirectory + "Music/{0}/{1}".format(artist, album)
else:
newFilePath = desktopDirectory + "Music/{0}/{1} ({2})".format(artist, album, originalReleaseDate)
if not os.path.exists(newFilePath):
os.makedirs(newFilePath)
# MP3DirectoryChangeLogTxt.write("Directory " + newFilePath + " created successfully\n")
shutil.move(oldFilePath, newFilePath)
# TODO: rename file
# os.rename(fileName, artist + " - " + title)
MP3DirectoryChangeLogTxt.write("Moved to \t{0}/{1}\n\n".format(newFilePath, fileName))
else:
MP3DirectoryChangeLogTxt.write("{0} does not exist\n\n".format(oldFilePath))
continue
# TODO: access newFilePath
# if os.path.exists(newFilePath):
# MP3DirectoryChangeLogTxt.write(fileName + " exists in new directory")
MP3DirectoryChangeLogTxt.close()
| {
"repo_name": "AjayAujla/PythonUtilities",
"path": "OrganizeMusic.py",
"copies": "1",
"size": "2451",
"license": "mit",
"hash": -1926059623333608400,
"line_mean": 38.5322580645,
"line_max": 221,
"alpha_frac": 0.6401468788,
"autogenerated": false,
"ratio": 3.753445635528331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9866911284475335,
"avg_score": 0.005336245970599155,
"num_lines": 62
} |
# A normal class instance can serve as a metaclass too
class MetaObj:
def __call__(self, classname, supers, classdict):
print('In MetaObj.call: ', classname, supers, classdict, sep='\n...')
Class = self.__New__(classname, supers, classdict)
self.__Init__(Class, classname, supers, classdict)
return Class
def __New__(self, classname, supers, classdict):
print('In MetaObj.new: ', classname, supers, classdict, sep='\n...')
return type(classname, supers, classdict)
def __Init__(self, Class, classname, supers, classdict):
print('In MetaObj.init:', classname, supers, classdict, sep='\n...')
print('...init class object:', list(Class.__dict__.keys()))
class Eggs:
pass
print('making class')
class Spam(Eggs, metaclass=MetaObj()): # MetaObj is normal class instance
data = 1 # Called at end of statement
def meth(self, arg):
return self.data + arg
print('making instance')
X = Spam()
print('data:', X.data, X.meth(2))
| {
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"path": "notebooks/scripts/book_code/code/metaclass4.py",
"copies": "1",
"size": "1063",
"license": "apache-2.0",
"hash": -5098011466827891000,
"line_mean": 35.6551724138,
"line_max": 82,
"alpha_frac": 0.6058325494,
"autogenerated": false,
"ratio": 3.742957746478873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848790295878873,
"avg_score": null,
"num_lines": null
} |
# A normal superclass can provide __call__ for instance metaclass too
# Instances inherit from classes and their supers normally
class SuperMetaObj:
def __call__(self, classname, supers, classdict):
print('In SuperMetaObj.call: ', classname, supers, classdict, sep='\n...')
Class = self.__New__(classname, supers, classdict)
self.__Init__(Class, classname, supers, classdict)
return Class
class SubMetaObj(SuperMetaObj):
def __New__(self, classname, supers, classdict):
print('In SubMetaObj.new: ', classname, supers, classdict, sep='\n...')
return type(classname, supers, classdict)
def __Init__(self, Class, classname, supers, classdict):
print('In SubMetaObj.init:', classname, supers, classdict, sep='\n...')
print('...init class object:', list(Class.__dict__.keys()))
class Eggs:
pass
print('making class')
class Spam(Eggs, metaclass=SubMetaObj()): # meta is a normal class instance
data = 1 # Called at end of statement
def meth(self, arg):
return self.data + arg
print('making instance')
X = Spam()
print('data:', X.data, X.meth(2))
| {
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"path": "notebooks/scripts/book_code/code/metaclass4-super.py",
"copies": "1",
"size": "1184",
"license": "apache-2.0",
"hash": -2358559555562719000,
"line_mean": 37.1935483871,
"line_max": 82,
"alpha_frac": 0.6317567568,
"autogenerated": false,
"ratio": 3.770700636942675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4902457393742675,
"avg_score": null,
"num_lines": null
} |
""" A Nose Plugin for EARL.
See Also:
http://nose.python-hosting.com/
http://www.w3.org/TR/EARL10-Schema/
"""
import logging
from nose.plugins import Plugin
from rdflib.term import URIRef, BNode, Literal
from rdflib.namespace import RDF, RDFS
from rdflib.graph import Graph
from rdflib.namespace import NamespaceDict as Namespace
from rdflib.util import date_time
log = logging.getLogger(__name__)
EARL = Namespace("http://www.w3.org/ns/earl#")
class EARLPlugin(Plugin):
"""
Activate the EARL plugin to generate a report of the test results
using EARL.
"""
name = 'EARL'
def begin(self):
self.graph = Graph()
self.graph.bind("earl", EARL.uri)
def finalize(self, result):
# TODO: add plugin options for specifying where to send
# output.
self.graph.serialize("file:results-%s.rdf" % date_time(), format="pretty-xml")
def addDeprecated(self, test):
print "Deprecated: %s" % test
def addError(self, test, err, capt):
print "Error: %s" % test
def addFailure(self, test, err, capt, tb_info):
print "Failure: %s" % test
def addSkip(self, test):
print "Skip: %s" % test
def addSuccess(self, test, capt):
result = BNode() # TODO: coin URIRef
self.graph.add((result, RDFS.label, Literal(test)))
self.graph.add((result, RDFS.comment, Literal(type(test))))
self.graph.add((result, RDF.type, EARL.TestResult))
self.graph.add((result, EARL.outcome, EARL["pass"]))
# etc
"""
<earl:TestResult rdf:about="#result">
<earl:outcome rdf:resource="http://www.w3.org/ns/earl#fail"/>
<dc:title xml:lang="en">Invalid Markup (code #353)</dc:title>
<dc:description rdf:parseType="Literal" xml:lang="en">
<div xmlns="http://www.w3.org/1999/xhtml">
<p>The <code>table</code> element is not allowed to appear
inside a <code>p</code> element</p>
</div>
</dc:description>
<dc:date rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2006-08-13</dc:date>
<earl:pointer rdf:resource="#xpointer"/>
<earl:info rdf:parseType="Literal" xml:lang="en">
<div xmlns="http://www.w3.org/1999/xhtml">
<p>It seems the <code>p</code> element has not been closed</p>
</div>
</earl:info>
</earl:TestResult>
"""
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/RDFDatabank/rdfextras/tools/EARLPlugin.py",
"copies": "3",
"size": "2313",
"license": "mit",
"hash": 2288939637219537200,
"line_mean": 27.9125,
"line_max": 86,
"alpha_frac": 0.6411586684,
"autogenerated": false,
"ratio": 3.155525238744884,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5296683907144885,
"avg_score": null,
"num_lines": null
} |
"""An OSLOM Runner for Python."""
import sys
import os
import argparse
import re
import tempfile
import shutil
import time
import subprocess
import itertools
import logging
import simplejson as json
# Defaults
DEF_MIN_CLUSTER_SIZE = 0
DEF_OSLOM_EXEC = "oslom_dir"
DEF_OSLOM_ARGS = ["-w", "-r", "10", "-hr", "10"]
# Constants
OSLOM_LOG_FILE = "oslom.log"
class IdRemapper(object):
"""Maps string Ids into 32 bits signed integer Ids starting from 0."""
INT_MAX = 2147483647
def __init__(self):
"""Construct a new Id Remapper class instance."""
self.curr_id = 0
self.mapping = {}
self.r_mapping = {}
def get_int_id(self, str_id):
"""Get a unique 32 bits signed integer for the given string Id."""
if not str_id in self.mapping:
if self.curr_id == IdRemapper.INT_MAX:
return None # No more 32 bits signed integers available
self.mapping[str_id] = self.curr_id
self.r_mapping[self.curr_id] = str_id
self.curr_id += 1
return self.mapping[str_id]
def get_str_id(self, int_id):
"""Get the original string Id for the given signed integer Id."""
return self.r_mapping[int_id] if int_id in self.r_mapping else None
def store_mapping(self, path):
"""Store the current Id mappings into a TSV file."""
with open(path, "w") as writer:
for key, value in self.mapping.iteritems():
writer.write("{}\t{}\n".format(key, value))
class OslomRunner(object):
"""Handles the execution of OSLOM."""
TMP_EDGES_FILE = "edges.tsv"
OUTPUT_FILE = "tp"
SEED_FILE = "time_seed.dat"
IDS_MAPPING_FILE = "ids_mapping.tsv"
ARGS_FILE = "args.txt"
RE_INFOLINE = re.compile(r"#module (.+) size: (.+) bs: (.+)", re.I)
def __init__(self, working_dir):
"""Construct a new OSLOM Runner class instance."""
self.id_remapper = IdRemapper()
self.working_dir = working_dir
self.last_result = None
def get_path(self, filename):
"""Get the full path to a file using the current working directory."""
return os.path.join(self.working_dir, filename)
def store_edges(self, edges):
"""Store the temporary network edges input file with re-mapped Ids."""
with open(self.get_path(OslomRunner.TMP_EDGES_FILE), "w") as writer:
for edge in edges:
writer.write("{}\t{}\t{}\n".format(
self.id_remapper.get_int_id(edge[0]),
self.id_remapper.get_int_id(edge[1]),
edge[2]))
def run(self, oslom_exec, oslom_args, log_filename):
"""Run OSLOM and wait for the process to finish."""
args = [oslom_exec, "-f", self.get_path(OslomRunner.TMP_EDGES_FILE)]
args.extend(oslom_args)
with open(log_filename, "w") as logwriter:
start_time = time.time()
retval = subprocess.call(
args, cwd=self.working_dir,
stdout=logwriter, stderr=subprocess.STDOUT)
self.last_result = {
"args": args, "retval": retval,
"time": time.time() - start_time,
"output_dir": self.get_path(
"{}_oslo_files".format(OslomRunner.TMP_EDGES_FILE))
}
return self.last_result
def read_clusters(self, min_cluster_size):
"""Read and parse OSLOM clusters output file."""
num_found = 0
clusters = []
with open(self.get_path(OslomRunner.OUTPUT_FILE), "r") as reader:
# Read the output file every two lines
for line1, line2 in itertools.izip_longest(*[reader] * 2):
info = OslomRunner.RE_INFOLINE.match(line1.strip()).groups()
nodes = line2.strip().split(" ")
if len(nodes) >= min_cluster_size: # Apply min_cluster_size
clusters.append({
"id": int(info[0]),
"bs": float(info[2]),
"nodes": [{"id": self.id_remapper.get_str_id(int(n))} for n in nodes],
})
num_found += 1
return {"num_found": num_found, "clusters": clusters}
def store_output_files(self, dir_path):
"""Store OSLOM output files to a directory."""
if self.last_result:
for entry in os.listdir(self.last_result["output_dir"]):
path = os.path.join(self.last_result["output_dir"], entry)
if os.path.isfile(path):
shutil.copy(path, os.path.join(dir_path, entry))
shutil.copy(
self.get_path(OslomRunner.SEED_FILE),
os.path.join(dir_path, OslomRunner.SEED_FILE))
args_file = os.path.join(dir_path, OslomRunner.ARGS_FILE)
with open(args_file, "w") as writer:
writer.write("{}\n".format(" ".join(self.last_result["args"])))
self.id_remapper.store_mapping(
os.path.join(dir_path, OslomRunner.IDS_MAPPING_FILE))
def cleanup(self):
"""Clean the working directory."""
shutil.rmtree(self.working_dir)
def run(args):
"""Main OSLOM runner function."""
# Create an OSLOM runner with a temporary working directory
oslom_runner = OslomRunner(tempfile.mkdtemp())
# (Re-)create OSLOM output directory
shutil.rmtree(args.oslom_output, ignore_errors=True)
os.makedirs(args.oslom_output)
# Read edges file
logging.info("reading edges file: %s", args.edges)
edges = []
with open(args.edges, "r") as reader:
for line in reader:
source, target, weight = line.strip().split("\t", 2)
edges.append((source, target, weight))
logging.info("%d edge(s) found", len(edges))
# Write temporary edges file with re-mapped Ids
logging.info("writing temporary edges file with re-mapped Ids ...")
oslom_runner.store_edges(edges)
# Run OSLOM
logging.info("running OSLOM ...")
result = oslom_runner.run(
args.oslom_exec, args.oslom_args,
os.path.join(args.oslom_output, OSLOM_LOG_FILE))
if result["retval"] != 0:
logging.error("error running OSLOM, check the log file")
return False
logging.info("OSLOM executed in %.3f secs", result["time"])
# Read back clusters found by OSLOM
logging.info("reading OSLOM clusters output file ...")
clusters = oslom_runner.read_clusters(args.min_cluster_size)
logging.info(
"found %d cluster(s) and %d with size >= %d",
clusters["num_found"], len(clusters["clusters"]), args.min_cluster_size)
# Write clusters file
logging.info("writing output clusters file: %s", args.output_clusters)
with open(args.output_clusters, "w") as writer:
json.dump(clusters["clusters"], writer, separators=(",", ":"))
# Store OSLOM output files
logging.info("writing OSLOM output files ...")
oslom_runner.store_output_files(args.oslom_output)
# Clean-up temporary working directory
oslom_runner.cleanup()
# Finished
logging.info("finished")
return True
def run_in_memory(args, edges):
"""Run OSLOM with an in-memory list of edges, return in-memory results."""
# Create an OSLOM runner with a temporary working directory
oslom_runner = OslomRunner(tempfile.mkdtemp())
# Write temporary edges file with re-mapped Ids
logging.info("writing temporary edges file with re-mapped Ids ...")
oslom_runner.store_edges(edges)
# Run OSLOM
logging.info("running OSLOM ...")
log_file = os.path.join(oslom_runner.working_dir, OSLOM_LOG_FILE)
result = oslom_runner.run(args.oslom_exec, args.oslom_args, log_file)
with open(log_file, "r") as reader:
oslom_log = reader.read()
if result["retval"] != 0:
logging.error("error running OSLOM, check the log")
return (None, oslom_log)
logging.info("OSLOM executed in %.3f secs", result["time"])
# Read back clusters found by OSLOM
logging.info("reading OSLOM clusters output file ...")
clusters = oslom_runner.read_clusters(args.min_cluster_size)
logging.info(
"found %d cluster(s) and %d with size >= %d",
clusters["num_found"], len(clusters["clusters"]), args.min_cluster_size)
# Clean-up temporary working directory
oslom_runner.cleanup()
# Finished
logging.info("finished")
return (clusters, oslom_log)
def main():
"""Main interface function for the command line."""
# Setup logging for the command line
name = os.path.splitext(os.path.basename(__file__))[0]
logging.basicConfig(
format="%(asctime)s [%(process)s] %(levelname)s {} - %(message)s".format(name),
level=logging.INFO)
# Program arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--edges", metavar="FILENAME", required=True,
help="input network edges file in TSV format")
parser.add_argument("--output-clusters", metavar="FILENAME", required=True,
help="output clusters file in JSON format")
parser.add_argument("--oslom-output", metavar="DIRECTORY", required=True,
help="output directory for OSLOM files")
parser.add_argument("--min-cluster-size", metavar="INTEGER", type=int,
default=DEF_MIN_CLUSTER_SIZE,
help="minimum cluster size (default: %(default)s)")
parser.add_argument("--oslom-exec", metavar="EXECUTABLE",
default=DEF_OSLOM_EXEC,
help="OSLOM executable program to use "
"(default: %(default)s)")
parser.add_argument("oslom_args", metavar="OSLOM_ARG", nargs="*",
default=DEF_OSLOM_ARGS,
help="argument to pass to OSLOM (don't pass '-f' !) "
"(default: %(default)s)")
# Run OSLOM with parsed arguments
sys.exit(not run(parser.parse_args()))
if __name__ == "__main__":
main()
| {
"repo_name": "hhromic/python-oslom-runner",
"path": "oslom/runner.py",
"copies": "1",
"size": "10932",
"license": "apache-2.0",
"hash": 2322373414513459700,
"line_mean": 38.6086956522,
"line_max": 94,
"alpha_frac": 0.6081229418,
"autogenerated": false,
"ratio": 3.6733870967741935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4781510038574193,
"avg_score": null,
"num_lines": null
} |
"""A notebook manager that uses Azure blob storage.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import azure
from azure.storage import BlobService
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Instance
from IPython.utils import tz
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class AzureNotebookManager(NotebookManager):
account_name = Unicode('', config=True, help='Azure storage account name.')
account_key = Unicode('', config=True, help='Azure storage account key.')
container = Unicode('', config=True, help='Container name for notebooks.')
blob_service_host_base = Unicode('.blob.core.windows.net', config=True,
help='The basename for the blob service URL. If running on the preview site this '
'will be .blob.core.azure-preview.com.')
def _blob_service_host_base_changed(self, new):
self._update_service_host_base(new)
blob_service = Instance('azure.storage.BlobService')
def _blob_service_default(self):
return BlobService(account_name=self.account_name, account_key=self.account_key)
def __init__(self, **kwargs):
super(AzureNotebookManager, self).__init__(**kwargs)
self._update_service_host_base(self.blob_service_host_base)
self._create_container()
def _update_service_host_base(self, shb):
azure.BLOB_SERVICE_HOST_BASE = shb
def _create_container(self):
self.blob_service.create_container(self.container)
def load_notebook_names(self):
"""On startup load the notebook ids and names from Azure.
The blob names are the notebook ids and the notebook names are stored
as blob metadata.
"""
self.mapping = {}
blobs = self.blob_service.list_blobs(self.container)
ids = [blob.name for blob in blobs]
for id in ids:
md = self.blob_service.get_blob_metadata(self.container, id)
name = md['x-ms-meta-nbname']
self.mapping[id] = name
def list_notebooks(self):
"""List all notebooks in the container.
This version uses `self.mapping` as the authoritative notebook list.
"""
data = [dict(notebook_id=id,name=name) for id, name in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def read_notebook_object(self, notebook_id):
"""Get the object representation of a notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
s = self.blob_service.get_blob(self.container, notebook_id)
except:
raise web.HTTPError(500, u'Notebook cannot be read.')
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Todo: The last modified should actually be saved in the notebook document.
# We are just using the current datetime until that is implemented.
last_modified = tz.utcnow()
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
data = current.writes(nb, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
metadata = {'nbname': new_name}
try:
self.blob_service.put_blob(self.container, notebook_id, data, 'BlockBlob', x_ms_meta_name_values=metadata)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
self.mapping[notebook_id] = new_name
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
self.blob_service.delete_blob(self.container, notebook_id)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while deleting notebook: %s' % e)
else:
self.delete_notebook_id(notebook_id)
def info_string(self):
return "Serving notebooks from Azure storage: %s, %s" % (self.account_name, self.container)
| {
"repo_name": "noslenfa/tdjangorest",
"path": "uw/lib/python2.7/site-packages/IPython/html/services/notebooks/azurenbmanager.py",
"copies": "2",
"size": "5554",
"license": "apache-2.0",
"hash": 9222624714568249000,
"line_mean": 37.8391608392,
"line_max": 118,
"alpha_frac": 0.5894850558,
"autogenerated": false,
"ratio": 4.278890600924499,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0038877862473824546,
"num_lines": 143
} |
"""A notebook manager that uses mongodb for storage.
Based on Travis Harrison's shocknbmanager and the azurenbmanager
Authors:
* Steve Chan <sychan@lbl.gov>
Copyright (C) 2013 The Regents of the University of California
Department of Energy contract-operators of the Lawrence Berkeley National Laboratory
1 Cyclotron Road, Berkeley, CA 94720
Copyright (C) 2013 The KBase Project
Distributed unspecified open source license as of 8/14/2013
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import dateutil.parser
from bson.json_util import dumps
from unicodedata import normalize
from tornado import web
from pymongo import MongoClient
from pymongo.read_preferences import ReadPreference
from IPython.html.services.notebooks.nbmanager import NotebookManager
#from IPython.config.configurable import LoggingConfigurable
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Dict, Bool, List, TraitError
#from IPython.utils import tz
# To log narrative itself
from biokbase.narrative.common import kblogging
g_log = kblogging.get_logger("narrative.base")
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class MongoNotebookManager(NotebookManager):
# The MongoDB backend simply wraps the JSON notebook in a enclosing dict
# and pushes it into MongoDB. The dict has the following fields
# {
# '_id' : {mongodb UUID - we set it manually using notebook_id},
# 'owner' : {username of the owner of this notebook},
# 'doc_type' : (ipynb),
# 'ipynb' : { actual ipython notebook dict },
# 'ipynb_chkpt' : { dict for checkpointed notebook },
# 'created' : { creation/update timestamp }
# 'chkpt_created' : { timestamp for ipynb_chkpt }
# }
mongodb_uri = Unicode('mongodb://localhost/', config=True, help='MongoDB connection URI')
mongodb_database = Unicode('narrative', config=True, help='MongoDB database')
mongodb_collection = Unicode('notebooks', config=True, help='MongoDB collection')
ipynb_type = Unicode(u'ipynb')
allowed_formats = List([u'json'])
node_format = ipynb_type
def __init__(self, **kwargs):
"""Verify that we can connect to the MongoDB instance"""
super(MongoNotebookManager, self).__init__(**kwargs)
if not self.mongodb_uri:
raise web.HTTPError(412, u"Missing MongoDB connection URI.")
if not self.mongodb_database:
raise web.HTTPError(412, u"Missing MongoDB database.")
if not self.mongodb_collection:
raise web.HTTPError(412, u"Missing MongoDB collection.")
try:
self.mclient = MongoClient(self.mongodb_uri,
read_preference=ReadPreference.PRIMARY_PREFERRED)
self.db = self.mclient[self.mongodb_database]
self.collection = self.db[self.mongodb_collection]
except Exception as e:
raise web.HTTPError(500, u"Unable to connect to MongoDB service at %s: %s " % (self.mongodb_uri, e))
# setup a mapping dict for MongoDB/notebook_id <-> Notebook name
mapping = Dict()
# Map notebook names to notebook_ids
rev_mapping = Dict()
def list_notebooks(self):
"""List all notebooks in MongoDB.
The _id field used by MongoDB is a UUID like the notebook_id, so
we directly use the notebook_id for the MongoDB _id field
The name field is coming out of document.ipynb.metadata.name
"""
all_ipynb = self.collection.find( {'doc_type' : self.ipynb_type})
all2 = list( all_ipynb)
self.mapping = { doc['_id'] : doc['ipynb']['metadata']['name'] for doc in all2 }
self.rev_mapping = { doc['ipynb']['metadata']['name'] : doc['_id'] for doc in all2 }
data = [ dict(notebook_id = it[0], name = it[1]) for it in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def new_notebook_id(self, name):
"""Generate a new notebook_id for a name and store its mappings."""
notebook_id = super(MongoNotebookManager, self).new_notebook_id(name)
self.rev_mapping[name] = notebook_id
return notebook_id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping."""
name = self.mapping[notebook_id]
super(MongoNotebookManager, self).delete_notebook_id(notebook_id)
del self.rev_mapping[name]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
exists = super(MongoNotebookManager, self).notebook_exists(notebook_id)
if not exists:
return False
return exists
def get_name(self, notebook_id):
"""get a notebook name, raising 404 if not found"""
try:
name = self.mapping[notebook_id]
except KeyError:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
return name
def read_notebook_object(self, notebook_id):
"""Get the Notebook representation of a notebook by notebook_id."""
doc = self.collection.find_one({'_id': notebook_id})
if doc is None:
raise web.HTTPError(500, u'Notebook % not found' % notebook_id)
# Convert from MongoDB doc to plain JSON and then conver to notebook format
jsonnb = dumps(doc['ipynb'] )
nb = current.reads( jsonnb, u'json')
last_modified = dateutil.parser.parse(doc['created'])
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = normalize('NFC', nb.metadata.name)
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
# Carry over some of the metadata stuff from ShockNBManager
try:
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if not hasattr(nb.metadata, 'owner'):
nb.metadata.owner = 'public'
if not hasattr(nb.metadata, 'type'):
nb.metadata.type = 'generic'
if not hasattr(nb.metadata, 'description'):
nb.metadata.description = ''
nb.metadata.created = datetime.datetime.utcnow().isoformat()
nb.metadata.format = self.node_format
except Exception as e:
raise web.HTTPError(400, u'Unexpected error setting notebook attributes: %s' %e)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
doc = { '_id' : notebook_id,
'owner' : nb.metadata.owner,
'doc_type' : self.ipynb_type,
'created' : nb.metadata.created,
'ipynb' : nb
}
# Preserve the old checkpoint if it is there
old = self.collection.find_one( { '_id' : notebook_id })
if old and 'ipynb_chkpt' in old:
doc['ipynb_chkpt'] = old['ipynb_chkpt']
doc['chkpt_created'] = old['chkpt_created']
id = self.collection.save( doc, manipulate = True, safe=True)
except Exception as e:
raise web.HTTPError(500, u'%s saving notebook: %s' % (type(e),e))
self.mapping[id] = new_name
return id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
if notebook_id is None:
raise web.HTTPError(400, u'Missing notebookd_id')
doc = self.collection.find_one( { '_id' : notebook_id });
if doc is None:
raise web.HTTPError(404, u'Notebook not found')
self.log.debug("unlinking notebook %s", notebook_id)
self.collection.remove( { '_id' : notebook_id })
self.delete_notebook_id(notebook_id)
# public checkpoint API
# Checkpoints in the MongoDB manager are just another field in the
# overall MongoDB document. We copy the ipynb field into the ipynb_chkpt
# field (and vice versa for revert)
def create_checkpoint(self, notebook_id):
"""Create a checkpoint from the current state of a notebook"""
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
doc = self.collection.find_one( { '_id' : notebook_id })
if doc is None:
raise web.HTTPError(500, u'Notebook % not found' % notebook_id)
chkpt_created = datetime.datetime.utcnow()
self.collection.update( { '_id' : notebook_id } ,
{ '$set' : { 'ipynb_chkpt' : doc['ipynb'],
'chkpt_created' : chkpt_created.isoformat() } } );
# return the checkpoint info
return { 'checkpoint_id' : checkpoint_id , 'last_modified' : chkpt_created}
def list_checkpoints(self, notebook_id):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
checkpoint_id = u"checkpoint"
doc = self.collection.find_one( { '_id' : notebook_id })
if 'ipynb_chkpt' in doc:
return [{'checkpoint_id' : checkpoint_id, 'last_modified' : dateutil.parser.parse(doc['chkpt_created']) } ]
else:
return []
def restore_checkpoint(self, notebook_id, checkpoint_id):
"""restore a notebook to a checkpointed state"""
doc = self.collection.find_one( { '_id' : notebook_id })
if doc:
if 'ipynb_chkpt' in doc:
doc['ipynb'] = doc['ipynb_chkpt']
doc['created'] = doc['chkpt_created']
id = self.collection.save( doc, manipulate = True, safe=True)
self.log.debug("copying ipynb_chkpt to ipynb for %s", notebook_id)
else:
self.log.debug("checkpoint for %s does not exist" % notebook_id)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s' % notebook_id)
else:
self.log( "notebook %s does not exist" % notebook_id)
raise web.HTTPError(404,
u'Notebook %s does not exist' % notebook_id)
def delete_checkpoint(self, notebook_id, checkpoint_id):
"""delete a notebook's checkpoint"""
doc = self.collection.find_one( { '_id' : notebook_id })
if doc:
if 'ipynb_chkpt' in doc:
self.collection.update( { '_id' : notebook_id },
{ '$unset' : { 'ipynb_chkpt' : 1,
'chkpt_created' : 1}})
else:
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s' % notebook_id)
else:
raise web.HTTPError(404,
u'Notebook %s does not exist' % notebook_id)
def log_info(self):
self.log.info("Serving notebooks from MongoDB URI %s" %self.mongodb_uri)
self.log.info("Serving notebooks from MongoDB db %s" %self.mongodb_database)
self.log.info("Serving notebooks from MongoDB collection %s" %self.mongodb_collection)
def info_string(self):
return "Serving notebooks from mongodb database %s and collection %s" % (self.mongodb_database,
self.mongodb_collection)
| {
"repo_name": "nlharris/narrative",
"path": "src/biokbase/narrative/mongonbmanager.py",
"copies": "4",
"size": "11865",
"license": "mit",
"hash": 36785415298946820,
"line_mean": 42.6213235294,
"line_max": 119,
"alpha_frac": 0.583817952,
"autogenerated": false,
"ratio": 4.080123796423659,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009909037353880698,
"num_lines": 272
} |
"""A notebook manager that uses Shock storage.
https://github.com/MG-RAST/Shock
Authors:
* Travis Harrison
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import cStringIO
import datetime
import requests
import json
import dateutil.parser
from collections import defaultdict
from tornado import web
from IPython.html.services.notebooks.nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Instance
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ShockNotebookManager(NotebookManager):
shock_url = Unicode('', config=True, help='Shock server url')
oauth_url = Unicode('', config=True, help='OAuth server url')
user_token = Unicode('', config=True, help='OAuth user bearer token (OAuth v2.0)')
user_email = None
node_format = 'ipynb'
shock_map = {}
auth_header = {}
def __init__(self, **kwargs):
"""verify Shock Authentication credintals, set auth header"""
super(ShockNotebookManager, self).__init__(**kwargs)
if not self.shock_url:
raise web.HTTPError(412, u"Missing Shock server URI.")
if not (self.oauth_url and self.user_token):
raise web.HTTPError(412, u"Missing credintals for Shock Authentication.")
self.auth_header = {'headers': {'Authorization': 'OAuth %s'%self.user_token}}
self.user_email = self._get_oauth(self.user_token, 'email')
def set_notebook_names(self):
"""load the notebook ids and names from Shock.
The uuid and name are stored as Shock metadata.
1. Skip nb nodes with no files
2. Skip nb nodes tagged as 'deleted'
3. If multiple nb with same uuid, get latest timestamp
"""
self.mapping = {}
self.shock_map = {}
nb_vers = defaultdict(list)
query_path = '?query&format='+self.node_format+'&limit=0'
query_result = self._get_shock_node(query_path, 'json')
if query_result is not None:
for node in query_result:
if node['file']['size'] and ('nbid' in node['attributes']) and node['attributes']['nbid'] and ('name' in node['attributes']) and node['attributes']['name']:
nb_vers[ node['attributes']['nbid'] ].append(node)
# only get listing of latest for each notebook uuid set
for uuid in nb_vers.iterkeys():
nodes = sorted(nb_vers[uuid], key=lambda x: x['attributes']['created'], reverse=True)
# if latest is flaged deleted - don't show
if ('deleted' in nodes[0]['attributes']) and nodes[0]['attributes']['deleted']:
continue
self.mapping[uuid] = nodes[0]['attributes']['name']
self.shock_map[uuid] = nodes[0]
def list_notebooks(self):
"""List all notebooks in the container.
This version uses `self.mapping` as the authoritative notebook list.
"""
self.set_notebook_names()
data = [dict(notebook_id=uuid,name=name) for uuid, name in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping.
This doesn't delete the actual notebook, only its entry in the mapping.
"""
del self.mapping[notebook_id]
del self.shock_map[notebook_id]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
if (notebook_id in self.mapping) and (notebook_id in self.shock_map):
return True
else:
return False
def read_notebook_object(self, notebook_id):
"""Get the object representation of a notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' %notebook_id)
try:
node_path = '/%s?download' %self.shock_map[notebook_id]['id']
node_data = self._get_shock_node(node_path, 'data')
except:
raise web.HTTPError(500, u'Notebook cannot be read')
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(node_data, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.\n%s' %node_data)
dt = self.shock_map[notebook_id]['attributes']['created']
last_modified = dateutil.parser.parse(dt) if dt else datetime.datetime.utcnow().isoformat()
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
try:
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if not hasattr(nb.metadata, 'owner'):
nb.metadata.owner = 'public'
if not hasattr(nb.metadata, 'access'):
nb.metadata.access = []
if not hasattr(nb.metadata, 'type'):
nb.metadata.type = 'generic'
if not hasattr(nb.metadata, 'description'):
nb.metadata.description = ''
nb.metadata.created = datetime.datetime.utcnow().isoformat()
nb.metadata.format = self.node_format
nb.metadata.nbid = notebook_id
except Exception as e:
raise web.HTTPError(400, u'Unexpected error setting notebook attributes: %s' %e)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' %notebook_id)
try:
data = json.dumps(nb)
attr = json.dumps(nb.metadata)
shock_node = self._post_shock_node(new_name, data, attr)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' %e)
self.mapping[notebook_id] = new_name
self.shock_map[notebook_id] = shock_node
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id.
Currently can not delete in shock,
instead we create a new copy flagged as deleted"""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' %notebook_id)
last_modified, nb = self.read_notebook_object(notebook_id)
nb.metadata['deleted'] = 1;
self.write_notebook_object(nb, notebook_id)
self.delete_notebook_id(notebook_id)
def _get_oauth(self, token, key=None):
name = token.split('|')[0].split('=')[1]
url = self.oauth_url+"/"+name
try:
rget = requests.get(url, headers={'Authorization': 'Globus-Goauthtoken %s'%token})
except Exception as e:
raise web.HTTPError(504, u'Unable to connect to OAuth server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise web.HTTPError(504, u'Unable to connect to OAuth server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json
if not (rj and isinstance(rj, dict)):
raise web.HTTPError(401, u'OAuth Authorization failed for %s.'%name)
return rj[key] if key and key in rj else rj
def _get_shock_node(self, path, format):
url = self.shock_url+'/node'+path
try:
rget = requests.get(url, **self.auth_header)
except Exception as e:
raise web.HTTPError(504, u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise web.HTTPError(504, u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
if format == 'json':
rj = rget.json
if not (rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise web.HTTPError(415, u'Return data not valid Shock format')
if rj['error']:
raise web.HTTPError(rj['status'], 'Shock error: '+rj['error'])
return rj['data']
else:
return rget.text
def _post_shock_node(self, name, data, attr):
url = self.shock_url+'/node'
data_hdl = cStringIO.StringIO(data)
attr_hdl = cStringIO.StringIO(attr)
files = { "upload": ('%s.ipynb'%name, data_hdl), "attributes": ('%s_metadata.json'%name, attr_hdl) }
try:
kwargs = {'files': files}
kwargs.update(self.auth_header)
rpost = requests.post(url, **kwargs)
rj = rpost.json
except Exception as e:
raise web.HTTPError(504, u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rpost.ok and rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise web.HTTPError(500, u'Unable to POST to Shock server %s: %s' %(url, rpost.raise_for_status()))
if rj['error']:
raise web.HTTPError(rj['status'], 'Shock error: '+rj['error'])
attr = rj['data']['attributes']
# remove read ACLs for public notebook
if ('owner' in attr) and (attr['owner'] == 'public'):
self._edit_shock_acl(rj['data']['id'], 'delete', 'read', [self.user_email])
# add shared users to node read ACLs
elif ('owner' in attr) and ('access' in attr) and attr['access']:
self._edit_shock_acl(rj['data']['id'], 'put', 'read', attr['access'])
else:
raise web.HTTPError(415, u'POST data not valid Shock OAuth format: %s' %rj['error'])
return rj['data']
def _edit_shock_acl(self, node, action, mode, emails):
url = '%s/node/%s/acl' %(self.shock_url, node)
kwargs = {'params': { mode: ','.join(emails) }}
kwargs.update(self.auth_header)
try:
if action == 'put':
result = requests.put(url, **kwargs)
elif action == 'delete':
result = requests.delete(url, **kwargs)
else:
raise web.HTTPError(500, u'Invalid Shock ACL action: %s' %action)
rj = result.json
except Exception as e:
raise web.HTTPError(504, u'Unable to connect to Shock server %s: %s' %(url, e))
if not (result.ok and rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise web.HTTPError(500, u'Unable to PUT to Shock server %s: %s' %(url, result.raise_for_status()))
if rj['error']:
raise web.HTTPError(rj['status'], 'Shock error: '+rj['error'])
return
def log_info(self):
self.log.info("Serving notebooks from Shock storage %s" %self.shock_url)
| {
"repo_name": "aekazakov/narrative",
"path": "src/biokbase/narrative/shocknbmanager.py",
"copies": "4",
"size": "11410",
"license": "mit",
"hash": 2974782915371769300,
"line_mean": 44.0988142292,
"line_max": 172,
"alpha_frac": 0.5701139351,
"autogenerated": false,
"ratio": 3.88227288193263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.645238681703263,
"avg_score": null,
"num_lines": null
} |
"""A notebook manager that uses the KBase workspace for storage.
Authors:
* Steve Chan <sychan@lbl.gov>
* Bill Riehl <wjriehl@lbl.gov>
Copyright (C) 2013 The Regents of the University of California
Department of Energy contract-operators of the Lawrence Berkeley National Laboratory
1 Cyclotron Road, Berkeley, CA 94720
Copyright (C) 2013 The KBase Project
Distributed unspecified open source license as of 9/27/2013
"""
# System
import datetime
import dateutil.parser
import os
import json
import re
import importlib
# Third-party
from unicodedata import normalize
from tornado.web import HTTPError
# IPython
# from IPython import nbformat
import nbformat
from nbformat import (
sign,
validate,
ValidationError
)
from notebook.services.contents.manager import ContentsManager
from traitlets.traitlets import (
Unicode,
Dict,
Bool,
List,
TraitError
)
from IPython.utils import tz
# Local
from .manager_util import base_model
from .narrativeio import (
KBaseWSManagerMixin,
PermissionsError
)
from .kbasecheckpoints import KBaseCheckpoints
import biokbase.narrative.ws_util as ws_util
from biokbase.workspace.client import Workspace
from biokbase.narrative.common.url_config import URLS
from biokbase.narrative.common import util
import biokbase.auth
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class KBaseWSManager(KBaseWSManagerMixin, ContentsManager):
"""
A notebook manager that uses the KBase workspace for storage.
The Workspace backend simply adds a few metadata fields into the
notebook object and pushes it into the workspace as the 'data'
part of a workspace object
Additional metadata fields
{
'id' : User specified title for the narrative alphanumeric + _
'creator' : {username of the creator of this notebook},
'description' : 'description of notebook',
'data_dependencies' : { list of kbase id strings }
'format' : self.node_format
'workspace' : the workspace that it was loaded from or saved to
}
This handler expects that on every request, the session attribute for an
instance will be populated by the front end handlers. That's gross, but
that's what we're running with for now.
Note: you'll probably see "That's gross, but..." a lot in this rev of the
code
Notebooks are identified with workspace identifiers of the format
{workspace_name}.{object_name}
Object format:
(New)
{
'dependencies' : List of workspace refs,
'notebook' : {
<mostly, the IPython notebook object>,
'metadata' :
}
}
"""
kbasews_uri = Unicode(URLS.workspace, config=True, help='Workspace service endpoint URI')
ipynb_type = Unicode('ipynb')
allowed_formats = List([u'json'])
node_format = ipynb_type
ws_type = Unicode(ws_util.ws_narrative_type, config=True, help='Type to store narratives within workspace service')
# regex for parsing out workspace_id and object_id from
# a "ws.{workspace}.{object}" string
ws_regex = re.compile('^ws\.(?P<wsid>\d+)\.obj\.(?P<objid>\d+)(\.(?P<ver>\d+))?')
# regex for parsing out fully qualified workspace name and object name
ws_regex2 = re.compile('^(?P<wsname>[\w:]+)/(?P<objname>[\w]+)')
# regex for par
kbid_regex = re.compile('^(kb\|[a-zA-Z]+\..+)')
# regex from pretty name path
objid_regex = re.compile('^.*\s-\s(?P<obj_long_id>ws\.(?P<wsid>\d+)\.obj\.(?P<objid>\d+))\s-\s')
# This is a regular expression to make sure that the workspace ID
# doesn't contain non-legit characters in the object ID field
# We use it like this to to translate names:
# wsid_regex.sub('',"Hello! Freaking World! 123".replace(' ','_'))
# to get an id of 'Hello_Freaking_World_123'
# We will enforce validation on the narrative naming GUI, but this is
# a safety net
wsid_regex = re.compile('[\W]+', re.UNICODE)
def __init__(self, *args, **kwargs):
"""Verify that we can connect to the configured WS instance"""
super(KBaseWSManager, self).__init__(*args, **kwargs)
if not self.kbasews_uri:
raise HTTPError(412, u"Missing KBase workspace service endpoint URI.")
# Map Narrative ids to notebook names
mapping = Dict()
# Map notebook names to Narrative ids
rev_mapping = Dict()
# Setup empty hash for session object
self.kbase_session = {}
# Init the session info we need.
def _checkpoints_class_default(self):
return KBaseCheckpoints
def get_userid(self):
"""Return the current user id (if logged in), or None
"""
t = biokbase.auth.Token()
if (t is not None):
return self.kbase_session.get(u'user_id', t.user_id)
else:
return self.kbase_session.get(u'user_id', None)
def _clean_id(self, id):
"""Clean any whitespace out of the given id"""
return self.wsid_regex.sub('', id.replace(' ', '_'))
#####
# API part 1: methods that must be implemented in subclasses.
#####
def dir_exists(self, path):
"""If it's blank, just return True -
we'll be looking up the list of all Narratives from
that dir, so it's real."""
if not path:
return True
else:
return False
def is_hidden(self, path):
"""We can only see what gets returned from Workspace lookup,
so nothing should be hidden"""
return False
def file_exists(self, path):
"""We only support narratives right now, so look up
a narrative from that path."""
path = path.strip('/')
obj_ref = self._obj_ref_from_path(path)
if obj_ref is None:
raise HTTPError(404, u'Path "{}" is not a valid Narrative path'.format(path))
self.log.warn(u'looking up whether a narrative exists')
try:
self.log.warn(u'trying to get narrative {}'.format(obj_ref))
return self.narrative_exists(obj_ref)
except PermissionsError as e:
self.log.warn(u'found a 403 error')
raise HTTPError(403, u"You do not have permission to view the narrative with id {}".format(path))
# except Exception as e:
# self.log.debug('got a 500 error')
# raise HTTPError(500, e)
def exists(self, path):
"""Looks up whether a directory or file path (i.e. narrative)
exists"""
path = path.strip('/')
if not path: # it's a directory, for all narratives
return True
return self.file_exists(path)
def _wsobj_to_model(self, nar, content=True):
nar_id = u'ws.{}.obj.{}'.format(nar['wsid'], nar['objid'])
model = base_model(u'{} - {} - {}'.format(nar['saved_by'], nar_id, nar['name']), nar_id)
model[u'format'] = u'json'
model[u'last_modified'] = nar[u'save_date']
model[u'type'] = u'notebook'
# model = base_model('%s/%s' % (nar['notebook_id'], nar['name']))
# model['format'] = 'v3'
return model
def _obj_ref_from_path(self, path):
parsed = self._parse_path(path)
if parsed is None:
return None
if u'wsid' not in parsed or u'objid' not in parsed:
return None
ref = u'{}/{}'.format(parsed[u'wsid'], parsed[u'objid'])
if parsed[u'ver'] is not None:
ref = ref + u'/{}'.format(parsed[u'ver'])
return ref
def _parse_path(self, path):
m = self.ws_regex.match(path)
if m is None:
return None
return dict(
wsid=m.group(u'wsid'),
objid=m.group(u'objid'),
ver=m.group(u'ver')
)
def get(self, path, content=True, type=None, format=None):
"""Get the model of a file or directory with or without content."""
path = path.strip('/')
model = base_model(path, path)
if self.exists(path) and type != u'directory':
#It's a narrative object, so try to fetch it.
obj_ref = self._parse_path(path)
if not obj_ref:
raise HTTPError(404, u'Unknown Narrative "{}"'.format(path))
try:
nar_obj = self.read_narrative(u'{}/{}'.format(obj_ref[u'wsid'], obj_ref[u'objid']), content)
model[u'type'] = u'notebook'
user = self.get_userid()
if content:
model['format'] = u'json'
nb = nbformat.reads(json.dumps(nar_obj['data']), 4)
nb['metadata'].pop('orig_nbformat', None)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['name'] = nar_obj['data']['metadata'].get('name', 'Untitled')
util.kbase_env.narrative = 'ws.{}.obj.{}'.format(obj_ref['wsid'], obj_ref['objid'])
util.kbase_env.workspace = model['content'].metadata.ws_name
if user is not None:
model['writable'] = self.narrative_writable(u'{}/{}'.format(obj_ref['wsid'], obj_ref['objid']), user)
self.log.info(u'Got narrative {}'.format(model['name']))
except HTTPError:
raise
except PermissionsError as e:
raise HTTPError(403, e)
except Exception as e:
raise HTTPError(500, u'An error occurred while fetching your narrative: {}'.format(e))
if not path or type == 'directory':
#if it's the empty string, look up all narratives, treat them as a dir
model['type'] = type
model['format'] = u'json'
if content:
contents = []
nar_list = self.list_narratives()
for nar in nar_list:
contents.append(self._wsobj_to_model(nar, content=False))
model['content'] = contents
return model
def save(self, model, path):
"""Save the file or directory and return the model with no content.
Save implementations should call self.run_pre_save_hook(model=model, path=path)
prior to writing any data.
"""
path = path.strip('/')
match = self.ws_regex.match(path)
if 'type' not in model:
raise HTTPError(400, u'No IPython model type provided')
if model['type'] != 'notebook':
raise HTTPError(400, u'We currently only support saving Narratives!')
if 'content' not in model and model['type'] != 'directory':
raise HTTPError(400, u'No Narrative content found while trying to save')
self.log.debug(u"writing Narrative %s." % path)
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
try:
result = self.write_narrative(self._obj_ref_from_path(path), nb, self.get_userid())
new_id = u"ws.%s.obj.%s" % (result[1], result[2])
util.kbase_env.narrative = new_id
nb = result[0]
self.validate_notebook_model(model)
validation_message = model.get(u'message', None)
model = self.get(path, content=False)
if validation_message:
model[u'message'] = validation_message
return model
except PermissionsError as err:
raise HTTPError(403, err.message)
except Exception as err:
raise HTTPError(500, u'An error occurred while saving your Narrative: {}'.format(err))
def delete_file(self, path):
"""Delete file or directory by path."""
raise HTTPError(501, u'Narrative deletion not implemented here. Deletion should be handled elsewhere.')
def rename_file(self, path, new_name):
"""Rename a file from old_path to new_path.
This gets tricky in KBase since we don't deal with paths, but with
actual file names. For now, assume that 'old_path' won't actually
change, but the 'new_path' is actually the new Narrative name."""
path = path.strip('/')
try:
self.rename_narrative(self._obj_ref_from_path(path), self.get_userid(), new_name)
except PermissionsError as err:
pass
# raise HTTPError(403, err.message)
except Exception as err:
raise HTTPError(500, u'An error occurred while renaming your Narrative: {}'.format(err))
# API part 2: methods that have useable default
# implementations, but can be overridden in subclasses.
def delete(self, path):
"""Delete a file/directory and any associated checkpoints."""
path = path.strip('/')
if not path:
raise HTTPError(400, "Can't delete root")
self.delete_file(path)
self.checkpoints.delete_all_checkpoints(path)
def rename(self, old_path, new_path):
"""Rename a file and any checkpoints associated with that file."""
self.rename_file(old_path, new_path)
self.checkpoints.rename_all_checkpoints(old_path, new_path)
def update(self, model, path):
"""Update the file's path
For use in PATCH requests, to enable renaming a file without
re-uploading its contents. Only used for renaming at the moment.
"""
self.log.warn(u'update')
self.log.warn(model)
self.log.warn(path)
path = path.strip('/')
new_path = model.get('path', path).strip('/')
if new_path.endswith('.ipynb'):
new_path = new_path[:-len('.ipynb')]
self.rename(path, new_path)
model = self.get(path, content=False)
self.log.warn(model)
return model
def increment_filename(self, filename, path='', insert=''):
"""Increment a filename until it is unique.
Parameters
----------
filename : unicode
The name of a file, including extension
path : unicode
The API path of the target's directory
Returns
-------
name : unicode
A filename that is unique, based on the input filename.
"""
path = path.strip('/')
basename, ext = os.path.splitext(filename)
for i in itertools.count():
if i:
insert_i = '{}{}'.format(insert, i)
else:
insert_i = ''
name = u'{basename}{insert}{ext}'.format(basename=basename,
insert=insert_i, ext=ext)
if not self.exists(u'{}/{}'.format(path, name)):
break
return name
def validate_notebook_model(self, model):
"""Add failed-validation message to model"""
try:
validate(model['content'])
except ValidationError as e:
model['message'] = u'Notebook Validation failed: {}:\n{}'.format(
e.message, json.dumps(e.instance, indent=1, default=lambda obj: '<UNKNOWN>'),
)
return model
def new_untitled(self, path='', type='', ext=''):
"""Create a new untitled file or directory in path
path must be a directory
File extension can be specified.
Use `new` to create files with a fully specified path (including filename).
"""
path = path.strip('/')
if not self.dir_exists(path):
raise HTTPError(404, 'No such directory: %s' % path)
model = {}
if type:
model['type'] = type
if ext == '.ipynb':
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
insert = ''
if model['type'] == 'directory':
untitled = self.untitled_directory
insert = ' '
elif model['type'] == 'notebook':
untitled = self.untitled_notebook
ext = '.ipynb'
elif model['type'] == 'file':
untitled = self.untitled_file
else:
raise HTTPError(400, "Unexpected model type: %r" % model['type'])
name = self.increment_filename(untitled + ext, path, insert=insert)
path = u'{0}/{1}'.format(path, name)
return self.new(model, path)
def new(self, model=None, path=''):
"""Create a new file or directory and return its model with no content.
To create a new untitled entity in a directory, use `new_untitled`.
"""
# TODO
path = path.strip('/')
if model is None:
model = {}
if path.endswith('.ipynb'):
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
# no content, not a directory, so fill out new-file model
if 'content' not in model and model['type'] != 'directory':
if model['type'] == 'notebook':
model['content'] = new_notebook()
model['format'] = 'json'
else:
model['content'] = ''
model['type'] = 'file'
model['format'] = 'text'
model = self.save(model, path)
return model
def copy(self, from_path, to_path=None):
"""Copy an existing file and return its new model.
If to_path not specified, it will be the parent directory of from_path.
If to_path is a directory, filename will increment `from_path-Copy#.ext`.
from_path must be a full path to a file.
"""
# TODO
path = from_path.strip('/')
if to_path is not None:
to_path = to_path.strip('/')
if '/' in path:
from_dir, from_name = path.rsplit('/', 1)
else:
from_dir = ''
from_name = path
model = self.get(path)
model.pop('path', None)
model.pop('name', None)
if model['type'] == 'directory':
raise HTTPError(400, "Can't copy directories")
if to_path is None:
to_path = from_dir
if self.dir_exists(to_path):
name = copy_pat.sub(u'.', from_name)
to_name = self.increment_filename(name, to_path, insert='-Copy')
to_path = u'{0}/{1}'.format(to_path, to_name)
model = self.save(model, to_path)
return model
def log_info(self):
self.log.info(self.info_string())
def trust_notebook(self, path):
"""Explicitly trust a notebook
Parameters
----------
path : string
The path of a notebook
"""
model = self.get(path)
nb = model['content']
self.log.warn("Trusting notebook %s", path)
self.notary.mark_cells(nb, True)
self.save(model, path)
def check_and_sign(self, nb, path=''):
"""Check for trusted cells, and sign the notebook.
Called as a part of saving notebooks.
Parameters
----------
nb : dict
The notebook dict
path : string
The notebook's path (for logging)
"""
if self.notary.check_cells(nb):
self.notary.sign(nb)
else:
self.log.warn("Saving untrusted notebook %s", path)
def mark_trusted_cells(self, nb, path=''):
"""Mark cells as trusted if the notebook signature matches.
Called as a part of loading notebooks.
Parameters
----------
nb : dict
The notebook object (in current nbformat)
path : string
The notebook's path (for logging)
"""
# commenting out, but leaving behind for a while.
# trusted = self.notary.check_signature(nb)
# if not trusted:
# self.log.warn("Notebook %s is not trusted", path)
# self.notary.mark_cells(nb, trusted)
self.log.warn("Notebook %s is totally trusted", path)
# all notebooks are trustworthy, because KBase is Pollyanna.
self.notary.mark_cells(nb, True)
def should_list(self, name):
"""Should this file/directory name be displayed in a listing?"""
return not any(fnmatch(name, glob) for glob in self.hide_globs)
def info_string(self):
return "Workspace Narrative Service with workspace endpoint at %s" % self.kbasews_uri | {
"repo_name": "msneddon/narrative",
"path": "src/biokbase/narrative/contents/kbasewsmanager.py",
"copies": "1",
"size": "20417",
"license": "mit",
"hash": -3595895161004785000,
"line_mean": 34.8210526316,
"line_max": 121,
"alpha_frac": 0.5754028506,
"autogenerated": false,
"ratio": 3.970633994554648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5046036845154648,
"avg_score": null,
"num_lines": null
} |
"""A notebook manager that uses the KBase workspace for storage.
Based on Travis Harrison's shocknbmanager and the azurenbmanager
Authors:
* Steve Chan <sychan@lbl.gov>
* Bill Riehl <wjriehl@lbl.gov>
Copyright (C) 2013 The Regents of the University of California
Department of Energy contract-operators of the Lawrence Berkeley National Laboratory
1 Cyclotron Road, Berkeley, CA 94720
Copyright (C) 2013 The KBase Project
Distributed unspecified open source license as of 9/27/2013
"""
# System
import datetime
import dateutil.parser
import os
import json
import re
import importlib
# Third-party
from unicodedata import normalize
from tornado import web
# IPython
from IPython.html.services.notebooks.nbmanager import NotebookManager
from IPython.config.configurable import LoggingConfigurable
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Dict, Bool, List, TraitError
from IPython.utils import tz
# Local
import biokbase.narrative.ws_util as ws_util
from biokbase.workspace.client import Workspace
import biokbase.narrative.common.service as service
from biokbase.narrative.common import util
import biokbase.auth
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class KBaseWSNotebookManager(NotebookManager):
"""
A notebook manager that uses the KBase workspace for storage.
The Workspace backend simply adds a few metadata fields into the
notebook object and pushes it into the workspace as the 'data'
part of a workspace object
Additional metadata fields
{
'id' : User specified title for the narrative alphanumerica + _
'creator' : {username of the creator of this notebook},
'description' : 'description of notebook',
'data_dependencies' : { list of kbase id strings }
'format' : self.node_format
'workspace' : the workspace that it was loaded from or saved to
}
This handler expects that on every request, the session attribute for an
instance will be populated by the front end handlers. That's gross, but
that's what we're running with for now.
Note: you'll probably see "That's gross, but..." a lot in this rev of the
code
Notebooks are identified with workspace identifiers of the format
{workspace_name}.{object_name}
Object format:
(New)
{
'dependencies' : List of workspace refs,
'notebook' : {
<mostly, the IPython notebook object>,
'metadata' :
}
}
"""
kbasews_uri = Unicode(service.URLS.workspace, config=True, help='Workspace service endpoint URI')
ipynb_type = Unicode('ipynb')
allowed_formats = List([u'json'])
node_format = ipynb_type
ws_type = Unicode(ws_util.ws_narrative_type, config=True, help='Type to store narratives within workspace service')
# regex for parsing out workspace_id and object_id from
# a "ws.{workspace}.{object}" string
ws_regex = re.compile('^ws\.(?P<wsid>\d+)\.obj\.(?P<objid>\d+)')
# regex for parsing out fully qualified workspace name and object name
ws_regex2 = re.compile('^(?P<wsname>[\w:]+)/(?P<objname>[\w]+)')
# regex for par
kbid_regex = re.compile('^(kb\|[a-zA-Z]+\..+)')
# This is a regular expression to make sure that the workspace ID
# doesn't contain non-legit characters in the object ID field
# We use it like this to to translate names:
# wsid_regex.sub('',"Hello! Freaking World! 123".replace(' ','_'))
# to get an id of 'Hello_Freaking_World_123'
# We will enforce validation on the narrative naming GUI, but this is
# a safety net
wsid_regex = re.compile('[\W]+', re.UNICODE)
def __init__(self, **kwargs):
"""Verify that we can connect to the configured WS instance"""
super(NotebookManager, self).__init__(**kwargs)
if not self.kbasews_uri:
raise web.HTTPError(412, u"Missing KBase workspace service endpoint URI.")
# Verify we can poke the Workspace service at that URI by just checking its
# version
try:
wsclient = self.wsclient()
wsclient.ver()
except Exception as e:
raise web.HTTPError(500, u"Unable to connect to workspace service"
u" at %s: %s " % (self.kbasews_uri, e))
# Map Narrative ids to notebook names
mapping = Dict()
# Map notebook names to Narrative ids
rev_mapping = Dict()
# Setup empty hash for session object
self.kbase_session = {}
def get_userid(self):
"""Return the current user id (if logged in), or None
"""
t = biokbase.auth.Token()
if (t is not None):
return self.kbase_session.get('user_id', t.user_id)
else:
return self.kbase_session.get('user_id', None)
def wsclient(self):
"""Return a workspace client object for the workspace
endpoint in kbasews_uri
"""
return Workspace(self.kbasews_uri)
def _clean_id(self, id):
"""Clean any whitespace out of the given id"""
return self.wsid_regex.sub('', id.replace(' ', '_'))
def list_notebooks(self):
"""List all notebooks in WSS
For the ID field, we use "{ws_id}.{obj_id}"
The obj_id field is sanitized version of document.ipynb.metadata.name
Returns a list of dicts with two keys: 'name' and 'notebook_id'. 'name'
should be of the format 'workspace name/Narrative name' and id should have
the format 'ws.###.obj.###'
"""
self.log.debug("Listing Narratives")
self.log.debug("kbase_session = %s" % str(self.kbase_session))
wsclient = self.wsclient()
all = ws_util.get_wsobj_meta(wsclient)
self.mapping = {
ws_id: "%s/%s" % (all[ws_id]['workspace'],all[ws_id]['meta'].get('name',"undefined"))
for ws_id in all.keys()
}
self.rev_mapping = {self.mapping[ws_id] : ws_id for ws_id in self.mapping.keys()}
data = [dict(notebook_id = it[0], name = it[1]) for it in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def new_notebook(self):
"""
Create an empty notebook and push it into the workspace without an object_id
or name, so that the WSS generates a new object ID for us. Then return
that.
This will likely only be called as a developer tool from http://<base url>/narrative
or from starting up locally.
"""
wsclient = self.wsclient()
user_id = self.get_userid()
# Verify that our own home workspace exists, note that we aren't doing this
# as a general thing for other workspaces
#
# This is needed for running locally - a workspace is required.
try:
(homews, homews_id) = ws_util.check_homews(wsclient, user_id)
except Exception as e:
raise web.HTTPError(401, u'User must be logged in to access their workspaces')
# Have IPython create a new, empty notebook
nb = current.new_notebook()
new_name = normalize('NFC', u"Untitled %s" % (datetime.datetime.now().strftime("%y%m%d_%H%M%S")))
new_name = self._clean_id(new_name)
# Add in basic metadata to the ipynb object
try:
nb.metadata.ws_name = os.environ.get('KB_WORKSPACE_ID', homews)
nb.metadata.creator = user_id
nb.metadata.type = self.ws_type
nb.metadata.description = ''
nb.metadata.name = new_name
nb.metadata.data_dependencies = []
nb.metadata.job_ids = { 'methods' : [], 'apps' : [], 'job_usage' : { 'run_time': 0, 'queue_time': 0 } }
nb.metadata.format = self.node_format
except Exception as e:
raise web.HTTPError(400, u'Unexpected error setting notebook attributes: %s' %e)
try:
wsobj = {
'type' : self.ws_type,
'data' : nb,
'provenance' : [],
'meta' : nb.metadata.copy(),
}
# We flatten the data_dependencies array into a json string so that the
# workspace service will accept it
wsobj['meta']['data_dependencies'] = json.dumps(wsobj['meta']['data_dependencies'])
# Same for jobs list
wsobj['meta']['job_ids'] = json.dumps(wsobj['meta']['job_ids'])
wsid = homews_id
self.log.debug("calling ws_util.put_wsobj")
res = ws_util.put_wsobj(wsclient, wsid, wsobj)
self.log.debug("save_object returned %s" % res)
except Exception as e:
raise web.HTTPError(500, u'%s saving Narrative: %s' % (type(e),e))
# use "ws.ws_id.obj.object_id" as the identifier
id = "ws.%s.obj.%s" % (res['wsid'], res['objid'])
self._set_narrative_env(id)
# update the mapping
self.list_notebooks()
return id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping."""
self.log.debug("delete_notebook_id(%s)"%(notebook_id))
user_id = self.get_userid()
if user_id is None:
raise web.HTTPError(400, u'Cannot determine valid user identity!')
if notebook_id in self.mapping:
name = self.mapping[notebook_id]
super(KBaseWSNotebookManager, self).delete_notebook_id(notebook_id)
def notebook_exists(self, notebook_id):
"""Does a Narrative with notebook_id exist?
Returns True if a Narrative with id notebook_id (format = ws.XXX.obj.YYY) exists,
and returns False otherwise.
"""
user_id = self.get_userid()
if user_id is None:
raise web.HTTPError(400, u'Cannot determine valid user identity!')
# Look for it in the currently loaded map
exists = super(KBaseWSNotebookManager, self).notebook_exists(notebook_id)
self.log.debug("notebook_exists(%s) = %s"%(notebook_id,exists))
if not exists:
# The notebook doesn't exist among the notebooks we've loaded, lets see
# if it exists at all in the workspace service
self.log.debug("Checking other workspace")
m = self.ws_regex.match(notebook_id)
if not m:
return False
self.log.debug("Checking other workspace %s for %s"%(m.group('wsid'),m.group('objid')))
try:
objmeta = ws_util.get_wsobj_meta(self.wsclient(), ws_id=m.group('wsid'))
except ws_util.PermissionsError, err:
return False # XXX: kind of a lie!
if notebook_id in objmeta:
self.mapping[notebook_id] = notebook_id
return True
else:
return False
return exists
def get_name(self, notebook_id):
"""Get a notebook name, raising 404 if not found"""
self.log.debug("Checking for name of Narrative %s")
self.list_notebooks()
try:
name = self.mapping[notebook_id]
self.log.debug("get_name(%s) = %s" % (notebook_id, name))
except KeyError:
raise web.HTTPError(404, u'Narrative does not exist: %s' % notebook_id)
return name
def read_notebook_object(self, notebook_id):
"""Get the Notebook representation of a notebook by notebook_id.
There are now new and legacy versions of Narratives that need to be handled.
The old version just included the Notebook object as the Narrative object,
with an optional Metadata field.
The new version has a slightly more structured Metadata field, with a
required data_dependencies array.
This really shouldn't affect reading the object much, but should be kept
in mind.
"""
self.log.debug("Reading Narrative %s." % notebook_id)
user_id = self.get_userid()
if user_id is None:
raise web.HTTPError(400, u'Missing user identity from kbase_session object')
try:
wsobj = ws_util.get_wsobj(self.wsclient(), notebook_id, self.ws_type)
except ws_util.BadWorkspaceID, e:
raise web.HTTPError(500, u'Narrative %s not found: %s' % (notebook_id, e))
if 'notebook' in wsobj['data']:
#
jsonnb = json.dumps['data']['notebook']
else:
jsonnb = json.dumps(wsobj['data'])
nb = current.reads(jsonnb, u'json')
# Set the notebook metadata workspace to the workspace this came from
nb.metadata.ws_name = wsobj['metadata']['workspace']
last_modified = dateutil.parser.parse(wsobj['metadata']['save_date'])
self.log.debug("Narrative successfully read" )
# Stash last read NB in env
self._set_narrative_env(notebook_id)
os.environ['KB_WORKSPACE_ID'] = nb.metadata.ws_name
return last_modified, nb
# def extract_data_dependencies(self, nb):
# """
# This is an internal method that parses out the cells in the notebook nb
# and returns an array of type:value parameters based on the form input
# specification and the values entered by the user.
# I the cell metadata, we look under:
# kb-cell.method.properties.parameters.paramN.type
# for anything that isn't a string or numeric, and we combine that type with
# the corresponding value found under
# kb-cell.widget_state[0].state.paramN
# We create an array of type:value pairs from the params and return that
# """
# # set of types that we ignore
# ignore = set(['string','Unicode','Numeric','Integer','List','a number'])
# deps = set()
# # What default workspace are we going to use?
# ws = os.environ.get('KB_WORKSPACE_ID',nb.metadata.ws_name)
# for wksheet in nb.get('worksheets'):
# for cell in wksheet.get('cells'):
# try:
# allparams = cell['metadata']['kb-cell']['method']['properties']['parameters']
# except KeyError:
# continue
# params = [param for param in allparams.keys() if allparams[param]['type'] not in ignore]
# try:
# paramvals = cell['metadata']['kb-cell']['widget_state'][0]['state']
# except KeyError:
# continue
# for param in params:
# try:
# paramval = paramvals[param]
# # Is this a fully qualified workspace name?
# if (self.ws_regex.match(paramval) or
# self.ws_regex2.match(paramval) or
# self.kbid_regex.match(paramval)):
# dep = "%s %s" % (allparams[param]['type'], paramval)
# else:
# dep = "%s %s" % (allparams[param]['type'], paramval)
# deps.add(dep)
# except KeyError:
# continue
# return list(deps)
def extract_cell_info(self, nb):
"""
This is an internal method that returns, as a dict, how many kb-method,
kb-app, and IPython cells exist in the notebook object.
For app and method cells, it counts them based on their method/app ids
In the end, it returns a dict like this:
{
'method': {
'my_method' : 2,
'your_method' : 1
},
'app': {
'my app' : 3
},
'ipython': {
'code' : 5,
'markdown' : 6
}
}
"""
cell_types = {'method' : {},
'app' : {},
'output': 0,
'ipython' : {'markdown': 0, 'code': 0}}
for wksheet in nb.get('worksheets'):
for cell in wksheet.get('cells'):
meta = cell['metadata']
if 'kb-cell' in meta:
t = None
# It's a KBase cell! So, either an app or method.
if 'type' in meta['kb-cell'] and meta['kb-cell']['type'] == 'function_output':
cell_types['output'] = cell_types['output'] + 1
else:
if 'app' in meta['kb-cell']:
t = 'app'
elif 'method' in meta['kb-cell']:
t = 'method'
else:
# that should cover our cases
continue
if t is not None:
try:
count = 1
app_id = meta['kb-cell'][t]['info']['id']
if app_id in cell_types[t]:
count = cell_types[t][app_id] + 1
cell_types[t][app_id] = count
except KeyError:
continue
else:
t = cell['cell_type']
cell_types['ipython'][t] = cell_types['ipython'][t] + 1
return cell_types
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
self.log.debug("writing Narrative %s." % notebook_id)
wsclient = self.wsclient()
user_id = self.get_userid()
if user_id is None:
raise web.HTTPError(400, u'Cannot determine user identity from '
u'session information')
# we don't rename anymore--- we only set the name in the metadata
#try:
# new_name = normalize('NFC', nb.metadata.name)
#except AttributeError:
# raise web.HTTPError(400, u'Missing Narrative name')
#new_name = self._clean_id(new_name)
# Verify that our own home workspace exists, note that we aren't doing this
# as a general thing for other workspaces
wsclient = self.wsclient()
(homews, homews_id) = ws_util.check_homews(wsclient, user_id)
# Carry over some of the metadata stuff from ShockNBManager
try:
if not hasattr(nb.metadata, 'name'):
nb.metadata.name = 'Untitled'
if not hasattr(nb.metadata, 'ws_name'):
nb.metadata.ws_name = os.environ.get('KB_WORKSPACE_ID',homews)
if not hasattr(nb.metadata, 'creator'):
nb.metadata.creator = user_id
if not hasattr(nb.metadata, 'type'):
nb.metadata.type = self.ws_type
if not hasattr(nb.metadata, 'description'):
nb.metadata.description = ''
# These are now stored on the front end explicitly as a list of object references
# This gets auto-updated on the front end, and is easier to manage.
if not hasattr(nb.metadata, 'data_dependencies'):
nb.metadata.data_dependencies = list()
if not hasattr(nb.metadata, 'job_ids'):
nb.metadata.job_ids = { 'methods' : [], 'apps' : [], 'job_usage': { 'queue_time': 0, 'run_time': 0 } }
if 'methods' not in nb.metadata['job_ids']:
nb.metadata.job_ids['methods'] = list()
if 'apps' not in nb.metadata['job_ids']:
nb.metadata.job_ids['apps'] = list()
if 'job_usage' not in nb.metadata['job_ids']:
nb.metadata.job_ids['job_usage'] = { 'queue_time': 0, 'run_time': 0 }
nb.metadata.format = self.node_format
except Exception as e:
raise web.HTTPError(400, u'Unexpected error setting Narrative attributes: %s' %e)
# First, init wsid and wsobj, since they'll be used later
wsid = '' # the workspace id
wsobj = dict() # the workspace object
# Figure out the workspace and object ids. If we have a notebook_id, get it from there
# otherwise, figure out the workspace id from the metadata.
if notebook_id:
m = self.ws_regex.match(notebook_id)
else:
m = None
# After this logic wsid is guaranteed to get set.
# The objid of wsobj might not, but that's fine! The workspace will just assign a new id if so.
if m:
# wsid, objid = ws.XXX.obj.YYY
wsid = m.group('wsid')
wsobj['objid'] = m.group('objid')
elif nb.metadata.ws_name == homews:
wsid = homews_id
#wsobj['name'] = new_name
else:
wsid = ws_util.get_wsid(nb.metadata.ws_name)
#wsobj['name'] = new_name
# With that set, update the workspace metadata with the new info.
try:
updated_metadata = {
"is_temporary":"false",
"narrative_nice_name":nb.metadata.name
};
ws_util.alter_workspace_metadata(wsclient, None, updated_metadata, ws_id=wsid)
except Exception as e:
raise web.HTTPError(500, u'Error saving Narrative: %s, %s' % (e.__str__(), wsid))
# Now we can save the Narrative object.
try:
# 'wsobj' = the ObjectSaveData type from the workspace client
# requires type, data (the Narrative typed object), provenance,
# optionally, user metadata
#
# requires ONE AND ONLY ONE of objid (existing object id, number) or name (string)
wsobj.update({ 'type' : self.ws_type,
'data' : nb,
'provenance' : [
{
'service' : 'narrative',
'description': 'saved through the narrative interface'
}
],
'meta' : nb.metadata.copy(),
})
# We flatten the data_dependencies array into a json string so that the
# workspace service will accept it
wsobj['meta']['data_dependencies'] = json.dumps(wsobj['meta']['data_dependencies'])
wsobj['meta']['methods'] = json.dumps(self.extract_cell_info(nb))
# Sort out job info we want to keep
# Gonna look like this, so init it that way
nb_job_usage = nb.metadata.job_ids.get('job_usage', {'queue_time':0, 'run_time':0})
job_info = {
'queue_time': nb_job_usage.get('queue_time', 0),
'run_time': nb_job_usage.get('run_time', 0),
'running': 0,
'completed': 0,
'error': 0
}
for job in nb.metadata.job_ids['methods'] + nb.metadata.job_ids['apps']:
status = job.get('status', 'running')
if status.startswith('complete'):
job_info['completed'] += 1
elif 'error' in status:
job_info['error'] += 1
else:
job_info['running'] += 1
wsobj['meta']['job_info'] = json.dumps(job_info)
if 'job_ids' in wsobj['meta']:
wsobj['meta'].pop('job_ids')
# # ------
# # If we're given a notebook id, try to parse it for the save parameters
# if notebook_id:
# m = self.ws_regex.match(notebook_id)
# else:
# m = None
# if m:
# # wsid, objid = ws.XXX.obj.YYY
# wsid = m.group('wsid')
# wsobj['objid'] = m.group('objid')
# elif nb.metadata.ws_name == homews:
# wsid = homews_id
# #wsobj['name'] = new_name
# else:
# wsid = ws_util.get_wsid(nb.metadata.ws_name)
# #wsobj['name'] = new_name
# # --------
self.log.debug("calling ws_util.put_wsobj")
res = ws_util.put_wsobj(wsclient, wsid, wsobj)
self.log.debug("save_object returned %s" % res)
# we no longer update names
# Now that we've saved the object, if its Narrative name (new_name) has changed,
# update that in the Workspace
#if (res['name'] != new_name):
# identity = { 'wsid' : res['wsid'], 'objid' : res['objid'] }
# res = ws_util.rename_wsobj(wsclient, identity, new_name)
except Exception as e:
raise web.HTTPError(500, u'%s saving Narrative: %s' % (type(e),e))
# use "ws.ws_id.obj.object_id" as the identifier
id = "ws.%s.obj.%s" % (res['wsid'], res['objid'])
self.mapping[id] = "%s/%s" % (res['workspace'], res['name'])
self._set_narrative_env(id)
return id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
self.log.debug("deleting Narrative %s" % notebook_id)
wsclient = self.wsclient()
user_id = self.get_userid()
if user_id is None:
raise web.HTTPError(400, u'Cannot determine user identity from session information')
if notebook_id is None:
raise web.HTTPError(400, u'Missing Narrative id')
self.log.debug("deleting Narrative %s", notebook_id)
m = self.ws_regex.match(notebook_id)
if m:
res = ws_util.delete_wsobj(wsclient, m.group('wsid'),m.group('objid'))
self.log.debug("delete object result: %s" % res)
else:
raise ws_util.BadWorkspaceID(notebook_id)
self.delete_notebook_id(notebook_id)
# public checkpoint API
# The workspace service handles versioning and has every ancient version stored
# in it - support for that will be handled by a workspace browser tool, and
# not the narrative
def create_checkpoint(self, notebook_id):
"""Create a checkpoint from the current state of a notebook"""
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
chkpt_created = datetime.datetime.utcnow()
self._set_narrative_env(notebook_id)
# This is a no-op for now
# return the checkpoint info
return { 'checkpoint_id' : checkpoint_id , 'last_modified' : chkpt_created }
def list_checkpoints(self, notebook_id):
"""
list the checkpoints for a given notebook
this is a no-op for now.
"""
return []
def restore_checkpoint(self, notebook_id, checkpoint_id):
"""restore a notebook to a checkpointed state"""
pass
def delete_checkpoint(self, notebook_id, checkpoint_id):
"""delete a notebook's checkpoint"""
pass
def log_info(self):
self.log.info("Service Narratives from the KBase Workspace service")
pass
def info_string(self):
return "Workspace Narrative Service with workspace endpoint at %s" % self.kbasews_uri
def _set_narrative_env(self, id_):
"""Set the narrative id into the environment"""
util.kbase_env.narrative = id_
#
# This is code that patches the regular expressions used in the default routes
# of tornado handlers. IPython installs handlers that recognize a UUID as the
# kbase notebook id, but we're using workspace_name.object_id so the routes
# need to be updated otherwise you can't reach the handlers.
#
# We use these to modify the routes installed by the notebook
# handlers in the main IPython code without having to change the IPython code
# directly
#
def handler_route_replace(handlers,oldre,newre):
""" Look for a regex in a tornado routing table and replace it with a new one"""
if len(handlers) > 0:
findre = re.escape(oldre)
for i in range(0,len(handlers)):
(route,handler) = handlers[i]
route2 = re.sub(findre,newre,route)
if route2 != route:
handlers[i] = (route2,handler)
# Patch the url regex to match our workspace identifiers
import IPython.html.base.handlers
tgt_handlers = ('IPython.html.notebook.handlers',
'IPython.html.services.notebooks.handlers')
for handlerstr in tgt_handlers:
IPython.html.base.handlers.app_log.debug("Patching routes in %s.default_handler" % handlerstr)
handler = importlib.import_module(handlerstr)
handler_route_replace(handler.default_handlers, r'(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)',r'(?P<notebook_id>ws\.\d+\.obj\.\d+)')
# Load the plupload handler
import upload_handler
upload_handler.insert_plupload_handler()
| {
"repo_name": "nlharris/narrative",
"path": "src/biokbase/narrative/kbasewsmanager.py",
"copies": "4",
"size": "28981",
"license": "mit",
"hash": 565373236168708860,
"line_mean": 41.1848617176,
"line_max": 130,
"alpha_frac": 0.56347262,
"autogenerated": false,
"ratio": 3.983095107201759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6546567727201759,
"avg_score": null,
"num_lines": null
} |
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import io
import os
import glob
import shutil
from unicodedata import normalize
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Dict, Bool, TraitError
from IPython.utils import tz
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
checkpoint_dir = Unicode(config=True,
help="""The location in which to keep notebook checkpoints
By default, it is notebook-dir/.ipynb_checkpoints
"""
)
def _checkpoint_dir_default(self):
return os.path.join(self.notebook_dir, '.ipynb_checkpoints')
def _checkpoint_dir_changed(self, name, old, new):
"""do a bit of validation of the checkpoint dir"""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
abs_new = os.path.abspath(new)
self.checkpoint_dir = abs_new
return
if os.path.exists(new) and not os.path.isdir(new):
raise TraitError("checkpoint dir %r is not a directory" % new)
if not os.path.exists(new):
self.log.info("Creating checkpoint dir %s", new)
try:
os.mkdir(new)
except:
raise TraitError("Couldn't create checkpoint dir %r" % new)
filename_ext = Unicode(u'.ipynb')
# Map notebook names to notebook_ids
rev_mapping = Dict()
def get_notebook_names(self):
"""List all notebook names in the notebook dir."""
names = glob.glob(os.path.join(self.notebook_dir,
'*' + self.filename_ext))
names = [normalize('NFC', os.path.splitext(os.path.basename(name))[0])
for name in names]
return names
def list_notebooks(self):
"""List all notebooks in the notebook dir."""
names = self.get_notebook_names()
data = []
for name in names:
if name not in self.rev_mapping:
notebook_id = self.new_notebook_id(name)
else:
notebook_id = self.rev_mapping[name]
data.append(dict(notebook_id=notebook_id,name=name))
data = sorted(data, key=lambda item: item['name'])
return data
def new_notebook_id(self, name):
"""Generate a new notebook_id for a name and store its mappings."""
notebook_id = super(FileNotebookManager, self).new_notebook_id(name)
self.rev_mapping[name] = notebook_id
return notebook_id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping."""
name = self.mapping[notebook_id]
super(FileNotebookManager, self).delete_notebook_id(notebook_id)
del self.rev_mapping[name]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
exists = super(FileNotebookManager, self).notebook_exists(notebook_id)
if not exists:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path)
def get_name(self, notebook_id):
"""get a notebook name, raising 404 if not found"""
try:
name = self.mapping[notebook_id]
except KeyError:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
return name
def get_path(self, notebook_id):
"""Return a full path to a notebook given its notebook_id."""
name = self.get_name(notebook_id)
return self.get_path_by_name(name)
def get_path_by_name(self, name):
"""Return a full path to a notebook given its name."""
filename = name + self.filename_ext
path = os.path.join(self.notebook_dir, filename)
return path
def read_notebook_object_from_path(self, path):
"""read a notebook object from a path"""
info = os.stat(path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
with open(path,'r') as f:
s = f.read()
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except ValueError as e:
msg = u"Unreadable Notebook: %s" % e
raise web.HTTPError(400, msg, reason=msg)
return last_modified, nb
def read_notebook_object(self, notebook_id):
"""Get the Notebook representation of a notebook by notebook_id."""
path = self.get_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
last_modified, nb = self.read_notebook_object_from_path(path)
# Always use the filename as the notebook name.
# Eventually we will get rid of the notebook name in the metadata
# but for now, that name is just an empty string. Until the notebooks
# web service knows about names in URLs we still pass the name
# back to the web app using the metadata though.
nb.metadata.name = os.path.splitext(os.path.basename(path))[0]
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = normalize('NFC', nb.metadata.name)
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
old_name = self.mapping[notebook_id]
old_checkpoints = self.list_checkpoints(notebook_id)
path = self.get_path_by_name(new_name)
# Right before we save the notebook, we write an empty string as the
# notebook name in the metadata. This is to prepare for removing
# this attribute entirely post 1.0. The web app still uses the metadata
# name for now.
nb.metadata.name = u''
try:
self.log.debug("Autosaving notebook %s", path)
with open(path,'w') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s' % e)
# save .py script as well
if self.save_script:
pypath = os.path.splitext(path)[0] + '.py'
self.log.debug("Writing script %s", pypath)
try:
with io.open(pypath,'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s' % e)
# remove old files if the name changed
if old_name != new_name:
# update mapping
self.mapping[notebook_id] = new_name
self.rev_mapping[new_name] = notebook_id
del self.rev_mapping[old_name]
# remove renamed original, if it exists
old_path = self.get_path_by_name(old_name)
if os.path.isfile(old_path):
self.log.debug("unlinking notebook %s", old_path)
os.unlink(old_path)
# cleanup old script, if it exists
if self.save_script:
old_pypath = os.path.splitext(old_path)[0] + '.py'
if os.path.isfile(old_pypath):
self.log.debug("unlinking script %s", old_pypath)
os.unlink(old_pypath)
# rename checkpoints to follow file
for cp in old_checkpoints:
checkpoint_id = cp['checkpoint_id']
old_cp_path = self.get_checkpoint_path_by_name(old_name, checkpoint_id)
new_cp_path = self.get_checkpoint_path_by_name(new_name, checkpoint_id)
if os.path.isfile(old_cp_path):
self.log.debug("renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
os.rename(old_cp_path, new_cp_path)
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
nb_path = self.get_path(notebook_id)
if not os.path.isfile(nb_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
# clear checkpoints
for checkpoint in self.list_checkpoints(notebook_id):
checkpoint_id = checkpoint['checkpoint_id']
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
self.log.debug(path)
if os.path.isfile(path):
self.log.debug("unlinking checkpoint %s", path)
os.unlink(path)
self.log.debug("unlinking notebook %s", nb_path)
os.unlink(nb_path)
self.delete_notebook_id(notebook_id)
def increment_filename(self, basename):
"""Return a non-used filename of the form basename<int>.
This searches through the filenames (basename0, basename1, ...)
until is find one that is not already being used. It is used to
create Untitled and Copy names that are unique.
"""
i = 0
while True:
name = u'%s%i' % (basename,i)
path = self.get_path_by_name(name)
if not os.path.isfile(path):
break
else:
i = i+1
return name
# Checkpoint-related utilities
def get_checkpoint_path_by_name(self, name, checkpoint_id):
"""Return a full path to a notebook checkpoint, given its name and checkpoint id."""
filename = u"{name}-{checkpoint_id}{ext}".format(
name=name,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
path = os.path.join(self.checkpoint_dir, filename)
return path
def get_checkpoint_path(self, notebook_id, checkpoint_id):
"""find the path to a checkpoint"""
name = self.get_name(notebook_id)
return self.get_checkpoint_path_by_name(name, checkpoint_id)
def get_checkpoint_info(self, notebook_id, checkpoint_id):
"""construct the info dict for a given checkpoint"""
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
stats = os.stat(path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
checkpoint_id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, notebook_id):
"""Create a checkpoint from the current state of a notebook"""
nb_path = self.get_path(notebook_id)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(notebook_id, checkpoint_id)
self.log.debug("creating checkpoint for notebook %s", notebook_id)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
shutil.copy2(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_info(notebook_id, checkpoint_id)
def list_checkpoints(self, notebook_id):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
checkpoint_id = u"checkpoint"
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.exists(path):
return []
else:
return [self.get_checkpoint_info(notebook_id, checkpoint_id)]
def restore_checkpoint(self, notebook_id, checkpoint_id):
"""restore a notebook to a checkpointed state"""
self.log.info("restoring Notebook %s from checkpoint %s", notebook_id, checkpoint_id)
nb_path = self.get_path(notebook_id)
cp_path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (notebook_id, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
last_modified, nb = self.read_notebook_object_from_path(cp_path)
shutil.copy2(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, notebook_id, checkpoint_id):
"""delete a notebook's checkpoint"""
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.isfile(path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (notebook_id, checkpoint_id)
)
self.log.debug("unlinking %s", path)
os.unlink(path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
| {
"repo_name": "marcoantoniooliveira/labweb",
"path": "oscar/lib/python2.7/site-packages/IPython/html/services/notebooks/filenbmanager.py",
"copies": "2",
"size": "14448",
"license": "bsd-3-clause",
"hash": 657297923716838400,
"line_mean": 39.2451253482,
"line_max": 101,
"alpha_frac": 0.5761351052,
"autogenerated": false,
"ratio": 4.1410146173688736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01680617382563731,
"num_lines": 359
} |
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep notebook checkpoints
This is a path relative to the notebook's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with io.open(py_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
shutil.move(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
shutil.move(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
shutil.move(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
os_path = self._get_os_path(path=path)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
if not os.path.exists(cp_dir):
os.mkdir(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.exists(os_path):
return []
else:
return [self.get_checkpoint_model(checkpoint_id, name, path)]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
| {
"repo_name": "poojavade/Genomics_Docker",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/html/services/notebooks/filenbmanager.py",
"copies": "1",
"size": "18231",
"license": "apache-2.0",
"hash": -4483239905273025000,
"line_mean": 36.98125,
"line_max": 115,
"alpha_frac": 0.5556469749,
"autogenerated": false,
"ratio": 4.095013477088949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012688654214214641,
"num_lines": 480
} |
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
import uuid
from tornado import web
from IPython.html.services.notebooks.nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
from IPython.utils.tz import utcnow, tzUTC
METADATA_NBNAME = 'x-object-meta-nbname'
METADATA_CHK_ID = 'x-object-meta-checkpoint-id'
METADATA_LAST_MODIFIED = 'x-object-meta-nb-last-modified'
METADATA_NB_ID = 'x-object-meta-notebook-id'
DATE_FORMAT = "%X-%x"
NB_DNEXIST_ERR = 'Notebook does not exist: {}'
NB_SAVE_UNK_ERR = 'Unexpected error while saving notebook: {}'
NB_DEL_UNK_ERR = 'Unexpected error while deleting notebook: {}'
CHK_SAVE_UNK_ERR = 'Unexpected error while saving checkpoint: {}'
MAX_HISTORY_SIZE = 15
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
user_agent = "bookstore v{version}".format(version='1.0.0')
container_name = Unicode('notebooks', config=True,
help='Container name for notebooks.')
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep notebook checkpoints
This is a path relative to the notebook's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with io.open(py_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
shutil.move(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
shutil.move(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
shutil.move(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoints_home(self, path=''):
"""find the home path to the checkpoints"""
path = path.strip('/')
os_path = os.path.join(self._get_os_path(path=path), self.checkpoint_dir)
if not os.path.exists(os_path):
os.mkdir(os_path)
return os_path
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
os_path = self._get_os_path(path=path)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
if not os.path.exists(cp_dir):
os.mkdir(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def new_checkpoint_id(self):
"""Generate a new checkpoint_id and store its mapping."""
return unicode(uuid.uuid4())
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = self.new_checkpoint_id()
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug('name=%s', self.get_checkpoints_home(path))
basename, _ = os.path.splitext(name)
cp_path = os.path.join(self.get_checkpoints_home(path), basename + '-*' + self.filename_ext)
os_paths = glob.glob(cp_path)
os_paths.sort(key=os.path.getmtime, reverse=True)
if not os_paths:
return []
models = []
for p in os_paths:
base_name, _ = os.path.splitext(p)
checkpoint_id = base_name.split(basename + '-')[1]
models.append(self.get_checkpoint_model(checkpoint_id, name, path))
self.log.debug('models=%s', len(models[:MAX_HISTORY_SIZE]))
return models[:MAX_HISTORY_SIZE]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
| {
"repo_name": "wusung/ipython-notebook-store",
"path": "bookstore/filenotebookmanager.py",
"copies": "1",
"size": "20614",
"license": "apache-2.0",
"hash": 6155367249229893000,
"line_mean": 37.3396946565,
"line_max": 115,
"alpha_frac": 0.5489473174,
"autogenerated": false,
"ratio": 4.096581875993641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145529193393641,
"avg_score": null,
"num_lines": null
} |
## another case of deja-vu
## this time, we want the slashdot style (what Yahoo said to do) only allow
## certain tags... we'll make it an option
## we'll have to tie this in some way to our HTML body displayer...
##
## Ok, there are basically four types of tags:
## 1) safe - ie, <b>, <i>, etc.
## 2) render problems - <table><form><body><frame> - these we either strip,
## or we have to ensure they match
## 3) definitely evil independent tags that we always strip
## 4) definitely evil tags which denote a region, we strip the entire region
from PassSGMLParser import PassSGMLParser
from urllib import basejoin
import string, sys
import neo_cgi
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
class SafeHtml (PassSGMLParser):
_safeTags = {"P":1, "LI":1, "DD":1, "DT":1, "EM":1, "BR":1, "CITE":1,
"DFN":1, "Q":1, "STRONG":1, "IMG":1, "HR":1,
"TR":1, "TD":1, "TH":1, "CAPTION":1, "THEAD":1, "TFOOT":1,
"TBODY":1}
_matchTags = {"TABLE":1, "OL":1, "UL":1, "DL":1, "CENTER":1, "DIV":1, "PRE":1,
"SUB":1, "SUP":1, "BIG":1, "SMALL":1, "CODE":1,
"B":1, "I":1, "A":1, "TT":1, "BLOCKQUOTE":1, "U":1,
"H1":1, "H2":1, "H3":1, "H4":1, "H5":1, "H6":1, "FONT":1}
_skipTags = {"FORM":1, "HTML":1, "BODY":1, "EMBED":1, "AREA":1, "MAP":1,
"FRAME":1, "FRAMESET":1, "IFRAME":1, "META":1}
_stripTags = {"HEAD":1, "JAVA":1, "APPLET":1, "OBJECT":1,
"JAVASCRIPT":1, "LAYER":1, "STYLE":1, "SCRIPT":1}
def __init__ (self, fp, extra_safe=1, base=None, map_urls=None, new_window=1):
self._extra_safe = extra_safe
PassSGMLParser.__init__ (self, fp, extra_safe)
self._matchDict = {}
self._stripping = 0
self._base = base
self._map_urls = map_urls
self._new_window = new_window
def safe_start_strip (self):
if self._stripping == 0:
self.flush()
self._stripping = self._stripping + 1
def safe_end_strip (self):
self.flush()
self._stripping = self._stripping - 1
if self._stripping < 0: self._stripping = 0
def write (self, data):
# sys.stderr.write("write[%d] %s\n" % (self._stripping, data))
if self._stripping == 0:
# sys.stderr.write("write %s\n" % data)
PassSGMLParser.write(self, data)
def cleanup_attrs (self, tag, attrs):
new_attrs = []
tag = string.lower(tag)
if self._new_window and tag == "a":
new_attrs.append(('target', '_blank'))
for name, value in attrs:
name = string.lower(name)
if name[:2] == "on": continue ## skip any javascript events
if string.lower(value)[:11] == "javascript:": continue
if self._map_urls and name in ["action", "href", "src", "lowsrc", "background"] and value[:4] == 'cid:':
try:
value = self._map_urls[value[4:]]
except KeyError:
pass
else:
if self._base and name in ["action", "href", "src", "lowsrc", "background"]:
value = basejoin (self._base, value)
if name in ["action", "href", "src", "lowsrc", "background"]:
value = 'http://www.google.com/url?sa=D&q=%s' % (neo_cgi.urlEscape(value))
if self._new_window and tag == "a" and name == "target": continue
new_attrs.append ((name, value))
return new_attrs
def unknown_starttag(self, tag, attrs):
tag = string.upper(tag)
if SafeHtml._stripTags.has_key(tag):
self.safe_start_strip()
# sys.stderr.write("Stripping tag %s: %d\n" % (tag, self._stripping))
elif SafeHtml._skipTags.has_key(tag):
# sys.stderr.write("Skipping tag %s\n" % tag)
pass
elif SafeHtml._matchTags.has_key(tag):
# sys.stderr.write("Matching tag %s\n" % tag)
if self._matchDict.has_key(tag):
self._matchDict[tag] = self._matchDict[tag] + 1
else:
self._matchDict[tag] = 1
self.write_starttag (tag, self.cleanup_attrs(tag, attrs))
elif SafeHtml._safeTags.has_key(tag):
# sys.stderr.write("Safe tag %s\n" % tag)
self.write_starttag (tag, self.cleanup_attrs(tag, attrs))
elif not self._extra_safe:
# sys.stderr.write("Other tag %s\n" % tag)
self.write_starttag (tag, self.cleanup_attrs(tag, attrs))
def unknown_endtag(self, tag):
tag = string.upper(tag)
if SafeHtml._stripTags.has_key(tag):
self.safe_end_strip()
# sys.stderr.write("End Stripping tag %s: %d\n" % (tag, self._stripping))
elif self._stripping == 0:
if SafeHtml._skipTags.has_key(tag):
pass
elif SafeHtml._matchTags.has_key(tag):
if self._matchDict.has_key(tag):
self._matchDict[tag] = self._matchDict[tag] - 1
self.write_endtag (tag)
elif SafeHtml._safeTags.has_key(tag):
self.write_endtag (tag)
elif not self._extra_safe:
self.write_endtag (tag)
def close (self):
self._stripping = 0
for tag in self._matchDict.keys():
if self._matchDict[tag] > 0:
for x in range (self._matchDict[tag]):
self.write_endtag(tag)
PassSGMLParser.close(self)
def SafeHtmlString (s, really_safe=1, map_urls=None):
# fp = open("/tmp/safe_html.in", "w")
# fp.write(s)
# fp.close()
fp = StringIO()
parser = SafeHtml(fp, really_safe, map_urls=map_urls)
parser.feed (s)
parser.close ()
s = fp.getvalue()
# fp = open("/tmp/safe_html.out", "w")
# fp.write(s)
# fp.close()
return s
| {
"repo_name": "WillYee/clearsilver",
"path": "python/examples/base/SafeHtml.py",
"copies": "11",
"size": "5434",
"license": "bsd-2-clause",
"hash": 5203651883341068000,
"line_mean": 36.475862069,
"line_max": 110,
"alpha_frac": 0.5875966139,
"autogenerated": false,
"ratio": 3.0022099447513813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03175139865940524,
"num_lines": 145
} |
# another comment
from indentml.parser import QqTag
class QqXMLFormatter(object):
def __init__(self, root: QqTag=None, allowed_tags=None):
self.root = root
self.allowed_tags = allowed_tags or set()
self.enumerateable_envs = {name: name.capitalize() for name in ['remark', 'theorem', 'example', 'exercise',
'definition', 'proposition', 'lemma',
'question', 'corollary']}
def uses_tags(self):
return self.allowed_tags
def format(self, content) -> str:
"""
:param content: could be QqTag or any iterable of QqTags
:param blanks_to_pars: use blanks_to_pars (True or False)
:return: str: text of tag
"""
if content is None:
return ""
out = []
for child in content:
if isinstance(child, str):
out.append(child)
else:
out.append(self.handle(child))
return "".join(out)
def handle(self, tag):
handlers_available = ['h1', 'h2', 'eq', 'paragraph']
name = tag.name
default_handler = 'handle_dummy'
if name in handlers_available:
return getattr(self, 'handle_'+name)(tag)
elif hasattr(self, default_handler):
return getattr(self, default_handler)(tag)
else:
return ""
def handle_dummy(self, tag): # The original 'handle' function we wrote on December 1 (almost)
"""
Parameters
----------
tag: QqTag
Returns "\begin{tag name}
tag content
\end{tag name}
-------
"""
name = tag.name
return """
\\begin{{{name}}}
{content}
\end{{{name}}}
""".format(name=name, content=self.format(tag))
def handle_h1(self, tag): # h1 = chapter
"""
Parameters
----------
tag: QqTag
Returns "\begin{chapter}
tag content
\end{chapter}
-------
"""
return """
\\begin{{{name}}}
{content}
\end{{{name}}}
""".format(name="chapter", content=self.format(tag))
def handle_h2(self, tag): # h2 = section
"""
Parameters
----------
tag: QqTag
Returns "\begin{section}
tag content
\end{section}
-------
"""
return """
\\begin{{{name}}}
{content}
\end{{{name}}}
""".format(name="section", content=self.format(tag))
def handle_paragraph(self, tag): #paragraph = subsection
"""
Parameters
----------
tag: QqTag
Returns "\begin{subsection}
tag content
\end{subsection}
-------
"""
if tag.find('\label'): #this does not work, but have a look
print(tag)
label = tag.split('\label ')[-1]
return """
\\begin{{{name}}} \label{{{label}}}
{content}
\end{{{name}}}
""".format(name="subsection", content=self.format(tag), label=label)
else:
return """
\\begin{{{name}}}
{content}
\end{{{name}}}
""".format(name="subsection", content=self.format(tag))
def handle_eq(self, tag: QqTag) -> str:
"""
eq tag corresponds to \[ \] or $$ $$ display formula without number.
Example:
\eq
x^2 + y^2 = z^2
:param tag:
:return:
$$x^2 + y^2 = z^2$$
"""
return """
$${content}$$
""".format(content=self.format(tag))
def do_format(self):
return self.format(self.root)
| {
"repo_name": "ischurov/qqmbr",
"path": "qqmbr/qqxml.py",
"copies": "1",
"size": "3741",
"license": "mit",
"hash": -164475809783462700,
"line_mean": 24.4489795918,
"line_max": 115,
"alpha_frac": 0.4774124566,
"autogenerated": false,
"ratio": 4.048701298701299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026113755301299,
"avg_score": null,
"num_lines": null
} |
# another compareg: https://softwarerecs.stackexchange.com/questions/18134/python-library-for-taking-camera-images
# Timestamp: http://startgrid.blogspot.com/2012/08/tutorial-creating-timestamp-on.html
# short note on camera software
# started with opencv but it is such a large library
# used fswebcam for a while but it produced intermitant black frames. However it had build in boarders
# now using v4lctl for taking pictures and imagemagick for banners
import time
import datetime
import wget
import os
from subprocess import call
#from my_globals import settings #import settings from my_globals
import my_globals
import logging
def remove_image(filename):
# remove current picture
try:
os.remove(filename)
except OSError:
pass
def get_cat_picture(filename):
#url = "http://lorempixel.com/1024/768/cats/" # cats
url = "http://207.251.86.238/cctv448.jpg" # NY trafic cam
remove_image(filename)
cat_pic = wget.download(url, out=filename, bar=None)
def get_Picture():
logging.info (" --- Getting picture ------------------")
filename = my_globals.settings["storage_dir"] + my_globals.settings["img_name"] # full path to image
logging.debug("Img Filename: "+ filename)
remove_image(filename)
if my_globals.FAKEWEBCAM == 1: # get fake picture
get_cat_picture(filename)
else: # get picture from webcam
# setup some metadata for fswebcam
compression = "45"
device = "/dev/video0"
resolution = "1280x720"
#resolution = "1024x600
#resolution = "640x480"
textcolor = "#0000cc00"
font = "luxisr:14"
title = str(my_globals.settings["name"])
subtitle = "cpu null" #"cpu: {} C".format(cpu_temp())
info = str(my_globals.settings["uuid"])
# call fswebcam to take the picture
#call(["fswebcam", "-S 3", "--jpeg", compression, "-d", device, "-r", resolution, "--scale", "960x540",
#"--top-banner", "--text-colour", textcolor, "--font", font,
# "--title", title, "--subtitle", subtitle, "--info", info, filename])
# call v4lctl to take the picture
call(["v4lctl", "-c", device, "snap", "jpeg", resolution, filename])
#logging.debug ("Img size: " + str(call(["du", "-h", filename]))) # prints size of picture.
# overlay reference
# overlay_text = "/usr/bin/convert "+ filename + " -pointsize 36 -fill white -annotate +40+728 '" + "hello" + "' "
# overlay_text += " -pointsize 36 -fill white -annotate +40+630 'Your Text annotation here ' " + filename
# The [:-4] truncates the last 4 characters of the string which is used cut out some microsecond digits
text = '"Smartsettia - %s UTC"' % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
overlay_text = "convert " + filename + " -gravity North -background YellowGreen -splice 0x18 \
-annotate +0+2 " + text + " " + filename
#logging.debug ("Webcam: convert command: %s" % overlay_text) # debugger to see command executed
call ([overlay_text], shell=True) | {
"repo_name": "uidaho/smartsettia-unit",
"path": "script/webcam.py",
"copies": "1",
"size": "3171",
"license": "mit",
"hash": -3630195178673902000,
"line_mean": 43.676056338,
"line_max": 121,
"alpha_frac": 0.6278776411,
"autogenerated": false,
"ratio": 3.6075085324232083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47353861735232083,
"avg_score": null,
"num_lines": null
} |
# another cool data type in python is tuple
# tuples contain every thing, even string and tuples themselves
my_tuple = (1, 2, 3)
print my_tuple
print type(my_tuple)
# we said every thing
another_tuple = (1, 2, 1.234, 'Hey')
print another_tuple
print type(another_tuple)
# Mmmm, EVERYTHING, I really mean it
amazing_tuple = (1, 2, 1.23456, 'a string', 'another string', (3.2, 3.3))
print amazing_tuple
# see? that tuple contains another tuple
# tuples are a cool wat to store your data.
# but, bear in mind, once you define a tuple, you cannot alter a
# special part of it. Of course, you are free to replace a specific
# variable with another one, but, while we are talking about tuples
# we can't change anything inside it.
# let's start over, we have a simple tuple like this:
a = (1, 2, 3, 4, 5, 6)
# ok!
# First, I wanna make sure how many elements it has.
# it's 6 of course, but I need a python function to tell it to me,
# right?
# yup! and that function is len(). len() returns the length of tuple
# also works for other data types, but here, we only want to get length
# of a tuple
print len(a) # return: 6
# What if I want to know, what is the 4th itemof my tuple?
# simple, we use indicies
print a[3]
# did I made a mistake? probably not, cuz it shows the true value of
# 4th item. So, what is wring with that [3] index ?
# Mmmm, it's a little tricky. In python, indices starts from zero
# it is our tuple, and the next line is its index numbers
#
# a = ( 1 , 2 , 3 , 4 , 5 , 6 )
# 0 1 2 3 4 5
#
# There's another way to get access to tuple elements, we can use
# negative indices:
#
#
# a = ( 1 , 2 , 3 , 4 , 5 , 6 )
# -6 -5 -4 -3 -2 -1
# so, these two lines, produce same result:
print "reverse indexing"
print a[3]
print a[-4]
# Lists
# lists are simmialr to tuple, but you can change values
# they are NOT immutable,
a_list = [1, 2, 3, 4]
print a_list
a_list[2] = 300
print a_list
# while using lists, lots of methods are available
# for instance, we can add up new elements to list
a_list.append(10)
# now, a_list is [1, 2, 300, 4, 10]
# or remove elements with pop
a_list.pop(2)
# and so forth :)
| {
"repo_name": "iamvee/Python-Course",
"path": "Topics/00.Data_Types/03.tuples_and_lists.py",
"copies": "1",
"size": "2237",
"license": "mit",
"hash": -7777209009923675000,
"line_mean": 25.630952381,
"line_max": 73,
"alpha_frac": 0.6504246759,
"autogenerated": false,
"ratio": 2.8753213367609254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4025746012660925,
"avg_score": null,
"num_lines": null
} |
"""Another example on how to modify the Seamless environment,
adding Go support via "go build"
"""
import traceback
from seamless.highlevel import Context, Cell, Transformer
ctx = Context()
env = ctx.environment
# Define a transformer with signature
# (a: int, b: int) -> int
# and a=13, b=16
ctx.tf = Transformer()
ctx.tf.a = 13
ctx.tf.b = 16
ctx.compute()
ctx.tf.example.a = 0
ctx.tf.example.b = 0
ctx.tf.result.example = 0
print(ctx.tf.schema)
print(ctx.tf.result.schema)
print(ctx.tf.a.value)
# Set the language as Go
# => fail, unknown language
try:
ctx.tf.language = "go"
except KeyError as exc:
traceback.print_exc(limit=0)
print()
# Have a look how languages are defined...
languages = env.get_languages("cson")
print("\n".join(languages.splitlines()[:10]))
print()
# Create a new language "go"
languages = env.get_languages("plain")
languages["go"] = {
"extension": "go",
"mode": "compiled",
}
env.set_languages(languages, "plain")
# Set the language as Go => success
ctx.tf.language = "go"
# Generate C header
ctx.compute()
print(ctx.tf.header.value)
print()
# Write some Go code
# Go can directly use C header declarations
# Here, we make deliberately a mistake, and the compiler will catch it
ctx.tf.code = """
package main
/*
int transform(int a, int b, int *result); //copied from ctx.tf.header.value
*/
import "C"
//export transform
func transform(a C.int, b C.int) C.int { //wrong signature! will give compiler error
return a + b + 2000
}
func main(){}
"""
ctx.compute()
# ... but first, we get a complaint that there is no Go compiler
print(ctx.tf.exception)
print()
# Have a look how compilers are defined...
compilers = env.get_compilers("cson")
print("\n".join(compilers.splitlines()[:20]))
print()
# Set up "go build" as the Go compiler
# "go build" will produce a single archive (.a file)
# for the entire package (all .go files).
# Therefore, the compiler mode is "package"
languages = env.get_languages("plain")
languages["go"] = {
"extension": "go",
"mode": "compiled",
"compiler": "go build"
}
env.set_languages(languages, "plain")
compilers = env.get_compilers("plain")
compilers["go build"] = {
"mode": "package",
"options": ["-buildmode=c-archive"],
"debug_options": ["-buildmode=c-archive", '-gcflags "all=-N -l"'],
"profile_options": [],
"public_options": [],
"compile_flag": "",
"output_flag": "-o",
}
env.set_compilers(compilers, "plain")
ctx.translate()
ctx.tf.clear_exception()
ctx.compute()
# Now we get either:
# 1. The compiler error mentioned above (conflicting types for "transform")
# or:
# 2. A complaint that 'go' is not available
print(ctx.tf.exception)
ctx.tf.clear_exception()
print()
# This will give a much nicer error for case 2.
# as Seamless will now refuse to translate
env.set_which(["go"], "plain")
try:
ctx.translate()
except ValueError:
traceback.print_exc(limit=0)
exit(0)
# If we get here, then there is a working compiler
# all we need to do is fix the function signature
ctx.tf.code = """
package main
/*
int transform(int a, int b, int *result); //copied from ctx.tf.header.value
*/
import "C"
//export transform
func transform(a C.int, b C.int, result *C.int) C.int { //correct
*result = a + b + 2000
return 0
}
func main(){}
"""
ctx.compute()
print(ctx.tf.exception) # None
print(ctx.tf.result.value) # 2029
ctx.tf.a = 80
ctx.compute()
print(ctx.tf.result.value) # 2096
# We can even launch debugging
# GDB is not very good, but other debuggers (Delve) exist
# TODO: source file mapping (Seamless only does this for gcc compilers)
ctx.tf.debug = True
ctx.tf.a = 18
ctx.compute()
print(ctx.tf.result.value)
| {
"repo_name": "sjdv1982/seamless",
"path": "tests/highlevel/environment2.py",
"copies": "1",
"size": "3694",
"license": "mit",
"hash": -1438550479872588300,
"line_mean": 22.2327044025,
"line_max": 85,
"alpha_frac": 0.6786681104,
"autogenerated": false,
"ratio": 3.088628762541806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4267296872941806,
"avg_score": null,
"num_lines": null
} |
# An other speel checker
import re
import collections
class SpellChecker(object):
def __init__(self):
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
print('[...] Initializing spell checker')
with open('big.txt', 'r') as f:
#with open('big.txt', 'r') as f:
NWORDS = train(words(f.read()))
self.NWORDS = NWORDS
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(self, word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1]
replaces = [a + c + b[1:] for a, b in s for c in self.alphabet if b]
inserts = [a + c + b for a, b in s for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words):
return set(w for w in words if w in self.NWORDS)
def correct(self, word):
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
return max(candidates, key=self.NWORDS.get)
| {
"repo_name": "WilliamDiakite/ExperimentationsACA",
"path": "processing/text_processing/spell_checker.py",
"copies": "1",
"size": "1286",
"license": "mit",
"hash": 3820882804178967000,
"line_mean": 25.2448979592,
"line_max": 103,
"alpha_frac": 0.6423017107,
"autogenerated": false,
"ratio": 2.747863247863248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8781066097209114,
"avg_score": 0.021819772270827015,
"num_lines": 49
} |
""" Another (sub)set of models, this one contains only those with literature
driven RL (or related) terms. Prior analyses were more exploratory.
In this set we allow for seperate regressors matching behavoiral accuracy,
as well as inverted and positive-value-only coding schemes. """
from roi.base import Mean
class Rewardrecode(Mean):
""" A Roi analysis class, customized for the catreward project.
Unlike Catreward, this reads in the average bold data from a
text file. """
def __init__(self, TR, roi_name, trials, durations, data):
Mean.__init__(self, TR, roi_name, trials, durations, data)
self.data['meta']['bold'] = self.roi_name
self.create_bold(preprocess=True)
self.create_hrf(function_name='double_gamma')
# --
# Accuracy
def model_0101(self):
""" Behavioral accuracy. """
data_to_use = ['acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0102(self):
""" Behavioral accuracy, diminished by (exponential) similarity. """
data_to_use = ['acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0103(self):
""" Behavioral accuracy, diminished by (gaussian) similarity. """
data_to_use = ['acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Gains and losses
def model_0201(self):
""" Gains and losses. """
data_to_use = ['gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0202(self):
""" Gains and losses, diminished by (exponential) similarity. """
data_to_use = ['gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0203(self):
""" Gains and losses, diminished by (gaussian) similarity. """
data_to_use = ['gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors
def model_0301(self):
""" Gains and losses, in 2 regressors. """
data_to_use = ['gl_1', 'gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0302(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. """
data_to_use = ['gl_exp_1', 'gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0303(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. """
data_to_use = ['gl_gauss_1', 'gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
# RPE
def model_0401(self):
""" RPE - derived from accuracy. """
data_to_use = ['rpe_acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0402(self):
""" RPE - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['rpe_acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0403(self):
""" RPE - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['rpe_acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
## VALUE
def model_0501(self):
""" Value - derived from accuracy. """
data_to_use = ['value_acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0502(self):
""" Value - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['value_acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0503(self):
""" Value - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['value_acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_0701(self):
""" RPE - derived from gains and loses. """
data_to_use = ['rpe_gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0702(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['rpe_gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0703(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['rpe_gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_0801(self):
""" Value - derived from gains and losses. """
data_to_use = ['value_gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0802(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['value_gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0803(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['value_gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_0901(self):
""" RPE - derived from gains and loses. """
data_to_use = ['rpe_gl_1', 'rpe_gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0902(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['rpe_gl_exp_1', 'rpe_gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0903(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['rpe_gl_gauss_1', 'rpe_gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_1001(self):
""" Value - derived from gains and losses. """
data_to_use = ['value_gl_1', 'value_gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1002(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['value_gl_exp_1', 'value_gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1003(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['value_gl_gauss_1', 'value_gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# INVERTED VALUES
# --
# Gains and losses INVERTED
def model_1101(self):
""" Gains and losses. Reward coding inversed. """
data_to_use = ['gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1102(self):
""" Gains and losses, diminished by (exponential) similarity.
Reward coding inversed. """
data_to_use = ['gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1103(self):
""" Gains and losses, diminished by (gaussian) similarity.
Reward coding inversed. """
data_to_use = ['gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors INVERTED
def model_1201(self):
""" Gains and losses, in 2 regressors.
Reward coding inversed. """
data_to_use = ['gl_invert_1', 'gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1202(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. Reward coding inversed. """
data_to_use = ['gl_exp_invert_1', 'gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1203(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. Reward coding inversed. """
data_to_use = ['gl_gauss_invert_1', 'gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding INVERTED
# RPE
def model_1301(self):
""" RPE - derived from accuracy. Reward coding inversed."""
data_to_use = ['rpe_acc_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1302(self):
""" RPE - derived from accuracy diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_acc_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1303(self):
""" RPE - derived from accuracy diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_acc_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
## VALUE
def model_1401(self):
""" Value - derived from accuracy. Reward coding inversed."""
data_to_use = ['value_acc_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1402(self):
""" Value - derived from accuracy diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_acc_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1403(self):
""" Value - derived from accuracy diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_acc_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_1601(self):
""" RPE - derived from gains and loses. Reward coding inversed. """
data_to_use = ['rpe_gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1602(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1603(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_1701(self):
""" Value - derived from gains and losses. Reward coding inversed. """
data_to_use = ['value_gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1702(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1703(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_1801(self):
""" RPE - derived from gains and loses. Reward coding inversed. """
data_to_use = ['rpe_gl_invert_1', 'rpe_gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1802(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_exp_invert_1', 'rpe_gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1803(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_gauss_invert_1', 'rpe_gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_1901(self):
""" Value - derived from gains and losses. Reward coding inversed. """
data_to_use = ['value_gl_invert_1', 'value_gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1902(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_exp_invert_1', 'value_gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1903(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_gauss_invert_1', 'value_gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# POSTIVE CODING
# --
# Gains and losses INVERTED
def model_2001(self):
""" Gains and losses. Reward coding was positive only. """
data_to_use = ['gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2002(self):
""" Gains and losses, diminished by (exponential) similarity.
Reward coding was positive only. """
data_to_use = ['gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2003(self):
""" Gains and losses, diminished by (gaussian) similarity.
Reward coding was positive only. """
data_to_use = ['gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors pos only
def model_2101(self):
""" Gains and losses, in 2 regressors.
Reward coding was positive only. """
data_to_use = ['gl_pos_1', 'gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2102(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. Reward coding was positive only. """
data_to_use = ['gl_exp_pos_1', 'gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2103(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. Reward coding was positive only. """
data_to_use = ['gl_gauss_pos_1', 'gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_2501(self):
""" RPE - derived from gains and loses.
Reward coding was positive only. """
data_to_use = ['rpe_gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2502(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2503(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_2601(self):
""" Value - derived from gains and losses.
Reward coding was positive only. """
data_to_use = ['value_gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2602(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2603(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_2701(self):
""" RPE - derived from gains and loses.
Reward coding was positive only. """
data_to_use = ['rpe_gl_pos_1', 'rpe_gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2702(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_exp_pos_1', 'rpe_gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2703(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_gauss_pos_1', 'rpe_gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_2801(self):
""" Value - derived from gains and losses.
Reward coding was positive only. """
data_to_use = ['value_gl_pos_1', 'value_gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2802(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_exp_pos_1', 'value_gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2803(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_gauss_pos_1', 'value_gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# CONTROL MODELS
def model_29(self):
""" Outcome similarity (exponential). """
data_to_use = ['exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_30(self):
""" Outcome similarity (gaussian). """
data_to_use = ['gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_31(self):
""" Behavoiral/category responses as separate regressors. """
data_to_use = ['resp1', 'resp6']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_32(self):
""" Outcome and contra-outcome similarities (exponential),
as separate regressors. """
data_to_use = ['exp', 'exp_opp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_33(self):
""" Outcome and contra-outcome similarities (gaussian),
as separate regressors. """
data_to_use = ['gauss', 'gauss_opp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_34(self):
""" Gabor angle parameter. """
data_to_use = ['angle']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_35(self):
""" Gabor width parameter. """
data_to_use = ['width']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
| {
"repo_name": "parenthetical-e/fmri",
"path": "catreward/roi/exps/base2.py",
"copies": "1",
"size": "26356",
"license": "bsd-2-clause",
"hash": 5347236819236171000,
"line_mean": 28.98407281,
"line_max": 78,
"alpha_frac": 0.5733798756,
"autogenerated": false,
"ratio": 3.282191780821918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9300733225153781,
"avg_score": 0.010967686253627477,
"num_lines": 879
} |
#Anoushka Alavilli
#tutorial3.py
from ggame import App, RectangleAsset, ImageAsset, SoundAsset
from ggame import LineStyle, Color, Sprite, Sound
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
green = Color(0x00ff00, 1)
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, green)
bg = Sprite(bg_asset, (0,0))
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
# A ball! This is already in the ggame-tutorials repository
ball_asset = ImageAsset("images/orb-150545_640.png")
ball = Sprite(ball_asset, (0, 0))
# Original image is too big. Scale it to 1/10 its original size
ball.scale = 0.1
# custom attributes
ball.dir = 1
ball.go = True
def reverse(b):
b.dir *= -1
def step():
if ball.go:
ball.x += ball.dir
if ball.x + ball.width > SCREEN_WIDTH or ball.x < 0:
ball.x -= ball.dir
reverse(ball)
# Handle the space key
def spaceKey(event):
ball.go = not ball.go
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
# Handle the "reverse" key
def reverseKey(event):
reverse(ball)
# Handle the mouse click
def mouseClick(event):
ball.x = event.x
ball.y = event.y
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run(step)
myapp.listenKeyEvent('keydown', 'space', spaceKey)
myapp.listenKeyEvent('keydown', 'r', reverseKey)
myapp.listenMouseEvent('click', mouseClick)
| {
"repo_name": "anoushkaalavilli/ggame-tutorials",
"path": "tutorial3.py",
"copies": "1",
"size": "1569",
"license": "mit",
"hash": -6537469672937807000,
"line_mean": 25.15,
"line_max": 69,
"alpha_frac": 0.6998087954,
"autogenerated": false,
"ratio": 2.6548223350253806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8814081307963375,
"avg_score": 0.008109964492401138,
"num_lines": 60
} |
""" An overlay for drawing "infinite" vertical and horizontal lines.
This module defines the CoordinateLineOverlay class, a Chaco overlay
for Plot (and similar) objects.
"""
from __future__ import with_statement
from traits.api import Instance, Float, Array
from enable.api import black_color_trait, LineStyle, Component
from chaco.api import AbstractOverlay
class CoordinateLineOverlay(AbstractOverlay):
# The data coordinates of the lines to be drawn perpendicular to the
# index axis.
index_data = Array
# The data coordinates of the lines to be drawn perpendicular to the
# value axis.
value_data = Array
# Width of the lines.
line_width = Float(1.0)
# Color of the lines.
color = black_color_trait
# Style of the lines ('solid', 'dash' or 'dot').
line_style = LineStyle
# The component that this tool overlays. This must be a Component with
# the following attributes:
# x, y, x2, y2
# The screen coordinates of the corners of the component.
# orientation ('h' or 'v')
# The orientation of the component, either horizontal or vertical.
# This is the orientation of the index axis.
# index_mapper
# index_mapper.map_screen maps `index_data` to screen coordinates.
# value_mapper
# value_mapper.map_screen maps `value_data` to screen coordinates.
# Typically this will be a Plot instance.
component = Instance(Component)
#----------------------------------------------------------------------
# Override AbstractOverlay methods
#----------------------------------------------------------------------
def overlay(self, component, gc, view_bounds, mode="normal"):
comp = self.component
x_pts = comp.index_mapper.map_screen(self.index_data)
y_pts = comp.value_mapper.map_screen(self.value_data)
if comp.orientation == "v":
x_pts, y_pts = y_pts, x_pts
with gc:
# Set the line color and style parameters.
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
# Draw the vertical lines.
for screen_x in x_pts:
self._draw_vertical_line(gc, screen_x)
# Draw the horizontal lines.
for screen_y in y_pts:
self._draw_horizontal_line(gc, screen_y)
#----------------------------------------------------------------------
# Private methods
#----------------------------------------------------------------------
def _draw_vertical_line(self, gc, screen_x):
if screen_x < self.component.x or screen_x > self.component.x2:
return
gc.move_to(screen_x, self.component.y)
gc.line_to(screen_x, self.component.y2)
gc.stroke_path()
def _draw_horizontal_line(self, gc, screen_y):
if screen_y < self.component.y or screen_y > self.component.y2:
return
gc.move_to(self.component.x, screen_y,)
gc.line_to(self.component.x2, screen_y)
gc.stroke_path()
| {
"repo_name": "ContinuumIO/chaco",
"path": "chaco/overlays/coordinate_line_overlay.py",
"copies": "3",
"size": "3158",
"license": "bsd-3-clause",
"hash": 4411595335378269000,
"line_mean": 34.4831460674,
"line_max": 78,
"alpha_frac": 0.5702976567,
"autogenerated": false,
"ratio": 4.128104575163399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005913660555884092,
"num_lines": 89
} |
"""An overlay that aligns itself to the plot
"""
from traits.api import Enum, Any
from container_overlay import ContainerOverlay
class AlignedContainerOverlay(ContainerOverlay):
""" Container overlay that aligns itself to the plot
This overlay takes an alignment which specifies which corner of its
container it should align itself with ("ur", "ul", "ll", or "lr").
For tooltip or customizable behaviour, an alternative_position trait
can be specified which gives the point to draw the component at, and
the align is use to lay the container out relative to that position.
"""
# XXX allow 'c' for center as an alignment option?
# XXX make this alignment stuff a Container subclass? A generic mixin?
# Alignment of the text in the box:
#
# * "ur": upper right
# * "ul": upper left
# * "ll": lower left
# * "lr": lower right
align = Enum("ur", "ul", "ll", "lr")
# This allows subclasses to specify an alternate position for the root
# of the text box. Must be a sequence of length 2.
alternate_position = Any
def overlay(self, other, gc, view_bounds, mode):
self._compute_position(other)
self.draw(gc, view_bounds, mode)
# XXX should this method really be _do_layout?
def _compute_position(self, component):
""" Given the alignment and size of the overlay, position it.
"""
if self.layout_needed:
self.do_layout()
valign, halign = self.align
if self.alternate_position:
x, y = self.alternate_position
if valign == "u":
self.outer_y = component.y + y
else:
self.outer_y2 = component.y + y
if halign == "r":
self.outer_x = component.x + x
else:
self.outer_x2 = component.x + x
else:
if valign == "u":
self.outer_y2 = component.y2
else:
self.outer_y = component.y
if halign == "r":
self.outer_x2 = component.x2
else:
self.outer_x = component.x
# attempt to get the box entirely within the component
# (prefer expanding to top-right if we cover entire component)
if self.x2 > component.x2:
self.x2 = component.x2
if self.y2 > component.y2:
self.y2 = component.y2
if self.x < component.x:
self.x = component.x
if self.y < component.y:
self.y = component.y
| {
"repo_name": "burnpanck/chaco",
"path": "chaco/overlays/aligned_container_overlay.py",
"copies": "3",
"size": "2600",
"license": "bsd-3-clause",
"hash": 8896666761208180000,
"line_mean": 33.2105263158,
"line_max": 75,
"alpha_frac": 0.5761538462,
"autogenerated": false,
"ratio": 4.16,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6236153846200001,
"avg_score": null,
"num_lines": null
} |
"""An overview window showing thumbnails of the image set.
"""
import sys
import math
from PySide import QtCore, QtGui
class ThumbnailWidget(QtGui.QLabel):
def __init__(self, image):
super().__init__()
self.setFrameStyle(self.Box | self.Plain)
self.setLineWidth(4)
palette = self.palette()
frameColor = palette.color(self.backgroundRole())
palette.setColor(self.foregroundRole(), frameColor)
self.setPalette(palette)
self.image = image
self.setImagePixmap()
def _getOverviewWindow(self):
w = self
while not isinstance(w, OverviewWindow):
w = w.parent()
return w
def setImagePixmap(self):
self.setPixmap(self.image.getThumbPixmap())
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
ovwin = self._getOverviewWindow()
ovwin.imageViewer.moveCurrentTo(self.image)
def markActive(self, isActive):
palette = self.palette()
if isActive:
frameColor = QtCore.Qt.blue
else:
frameColor = palette.color(self.backgroundRole())
palette.setColor(self.foregroundRole(), frameColor)
self.setPalette(palette)
class OverviewWindow(QtGui.QMainWindow):
def __init__(self, imageViewer):
super().__init__()
self.imageViewer = imageViewer
self.numcolumns = 4
self.setWindowTitle("Overview")
self.mainLayout = QtGui.QGridLayout()
self._populate()
centralWidget = QtGui.QWidget()
centralWidget.setLayout(self.mainLayout)
scrollArea = QtGui.QScrollArea()
scrollArea.setWidget(centralWidget)
scrollArea.setAlignment(QtCore.Qt.AlignCenter)
self.setCentralWidget(scrollArea)
self.closeAct = QtGui.QAction("&Close", self,
triggered=self.close)
self.fileMenu = QtGui.QMenu("&File", self)
self.fileMenu.addAction(self.closeAct)
self.menuBar().addMenu(self.fileMenu)
# Set the width of the window such that the scrollArea just
# fits. We need to add 24 to the central widget, 20 for the
# vertical scroll bar and 4 for the border.
width = centralWidget.size().width() + 24
size = self.size()
size.setWidth(width)
self.resize(size)
self.activeWidget = None
try:
image = self.imageViewer.selection[self.imageViewer.cur]
self.markActive(image)
except IndexError:
pass
def _populate(self):
"""Populate the mainLayout with thumbnail images.
"""
images = self.imageViewer.selection
ncol = self.numcolumns
c = 0
for i in images:
try:
thumb = ThumbnailWidget(i)
except Exception as e:
print(str(e), file=sys.stderr)
else:
self.mainLayout.addWidget(thumb, c // ncol, c % ncol,
QtCore.Qt.AlignCenter)
c += 1
def updateThumbs(self):
"""Update the mainLayout with thumbnail images.
"""
# Note: this code is based on the assumption that no image
# will ever be added to self.imageViewer.selection and thus we
# only need to consider removing ThumbnailWidgets, but not to
# add any. Furthermore, we assume the order given by
# self.mainLayout.itemAt() is the the order that the widgets
# have been added to self.mainLayout and thus the same as
# self.imageViewer.selection.
numImages = len(self.imageViewer.selection)
for i in range(numImages):
widget = self.mainLayout.itemAt(i).widget()
image = self.imageViewer.selection[i]
if widget.image is not image:
widget.image = image
widget.setImagePixmap()
while self.mainLayout.count() > numImages:
item = self.mainLayout.takeAt(numImages)
item.widget().deleteLater()
def getThumbnailWidget(self, image):
for i in range(self.mainLayout.count()):
w = self.mainLayout.itemAt(i).widget()
if w.image is image:
return w
else:
return None
def markActive(self, image):
if self.activeWidget:
self.activeWidget.markActive(False)
self.activeWidget = self.getThumbnailWidget(image)
if self.activeWidget:
self.activeWidget.markActive(True)
| {
"repo_name": "RKrahl/photo-tools",
"path": "photo/qt/overviewWindow.py",
"copies": "1",
"size": "4586",
"license": "apache-2.0",
"hash": -5513328793468195000,
"line_mean": 31.9928057554,
"line_max": 70,
"alpha_frac": 0.6011774967,
"autogenerated": false,
"ratio": 4.262081784386617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5363259281086616,
"avg_score": null,
"num_lines": null
} |
# An Python interface to the Scintilla control.
#
# Exposes Python classes that allow you to use Scintilla as
# a "standard" MFC edit control (eg, control.GetTextLength(), control.GetSel()
# plus many Scintilla specific features (eg control.SCIAddStyledText())
from pywin.mfc import window
from pywin import default_scintilla_encoding
import win32con
import win32ui
import win32api
import array
import struct
import string
import os
from . import scintillacon
# Load Scintilla.dll to get access to the control.
# We expect to find this in the same directory as win32ui.pyd
dllid = None
if win32ui.debug: # If running _d version of Pythonwin...
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla_d.DLL"))
except win32api.error: # Not there - we dont _need_ a debug ver, so ignore this error.
pass
if dllid is None:
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla.DLL"))
except win32api.error:
pass
if dllid is None:
# Still not there - lets see if Windows can find it by searching?
dllid = win32api.LoadLibrary("Scintilla.DLL")
# null_byte is str in py2k, bytes on py3k
null_byte = "\0".encode('ascii')
## These are from Richedit.h - need to add to win32con or commctrl
EM_GETTEXTRANGE = 1099
EM_EXLINEFROMCHAR = 1078
EM_FINDTEXTEX = 1103
EM_GETSELTEXT = 1086
EM_EXSETSEL = win32con.WM_USER + 55
class ScintillaNotification:
def __init__(self, **args):
self.__dict__.update(args)
class ScintillaControlInterface:
def SCIUnpackNotifyMessage(self, msg):
format = "iiiiPiiiPPiiii"
bytes = win32ui.GetBytes( msg, struct.calcsize(format) )
position, ch, modifiers, modificationType, text_ptr, \
length, linesAdded, msg, wParam, lParam, line, \
foldLevelNow, foldLevelPrev, margin \
= struct.unpack(format, bytes)
return ScintillaNotification(position=position,ch=ch,
modifiers=modifiers, modificationType=modificationType,
text_ptr = text_ptr, length=length, linesAdded=linesAdded,
msg = msg, wParam = wParam, lParam = lParam,
line = line, foldLevelNow = foldLevelNow, foldLevelPrev = foldLevelPrev,
margin = margin)
def SCIAddText(self, text):
self.SendMessage(scintillacon.SCI_ADDTEXT, text.encode(default_scintilla_encoding))
def SCIAddStyledText(self, text, style = None):
# If style is None, text is assumed to be a "native" Scintilla buffer.
# If style is specified, text is a normal string, and the style is
# assumed to apply to the entire string.
if style is not None:
text = list(map(lambda char, style=style: char+chr(style), text))
text = ''.join(text)
self.SendMessage(scintillacon.SCI_ADDSTYLEDTEXT, text.encode(default_scintilla_encoding))
def SCIInsertText(self, text, pos=-1):
# SCIInsertText allows unicode or bytes - but if they are bytes,
# the caller must ensure it is encoded correctly.
if isinstance(text, str):
text = text.encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_INSERTTEXT, pos, text + null_byte)
def SCISetSavePoint(self):
self.SendScintilla(scintillacon.SCI_SETSAVEPOINT)
def SCISetUndoCollection(self, collectFlag):
self.SendScintilla(scintillacon.SCI_SETUNDOCOLLECTION, collectFlag)
def SCIBeginUndoAction(self):
self.SendScintilla(scintillacon.SCI_BEGINUNDOACTION)
def SCIEndUndoAction(self):
self.SendScintilla(scintillacon.SCI_ENDUNDOACTION)
def SCIGetCurrentPos(self):
return self.SendScintilla(scintillacon.SCI_GETCURRENTPOS)
def SCIGetCharAt(self, pos):
# Must ensure char is unsigned!
return chr(self.SendScintilla(scintillacon.SCI_GETCHARAT, pos) & 0xFF)
def SCIGotoLine(self, line):
self.SendScintilla(scintillacon.SCI_GOTOLINE, line)
def SCIBraceMatch(self, pos, maxReStyle):
return self.SendScintilla(scintillacon.SCI_BRACEMATCH, pos, maxReStyle)
def SCIBraceHighlight(self, pos, posOpposite):
return self.SendScintilla(scintillacon.SCI_BRACEHIGHLIGHT, pos, posOpposite)
def SCIBraceBadHighlight(self, pos):
return self.SendScintilla(scintillacon.SCI_BRACEBADLIGHT, pos)
####################################
# Styling
# def SCIColourise(self, start=0, end=-1):
# NOTE - dependent on of we use builtin lexer, so handled below.
def SCIGetEndStyled(self):
return self.SendScintilla(scintillacon.SCI_GETENDSTYLED)
def SCIStyleSetFore(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETFORE, num, v)
def SCIStyleSetBack(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETBACK, num, v)
def SCIStyleSetEOLFilled(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETEOLFILLED, num, v)
def SCIStyleSetFont(self, num, name, characterset=0):
buff = (name + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_STYLESETFONT, num, buff)
self.SendScintilla(scintillacon.SCI_STYLESETCHARACTERSET, num, characterset)
def SCIStyleSetBold(self, num, bBold):
self.SendScintilla(scintillacon.SCI_STYLESETBOLD, num, bBold)
def SCIStyleSetItalic(self, num, bItalic):
self.SendScintilla(scintillacon.SCI_STYLESETITALIC, num, bItalic)
def SCIStyleSetSize(self, num, size):
self.SendScintilla(scintillacon.SCI_STYLESETSIZE, num, size)
def SCIGetViewWS(self):
return self.SendScintilla(scintillacon.SCI_GETVIEWWS)
def SCISetViewWS(self, val):
self.SendScintilla(scintillacon.SCI_SETVIEWWS, not (val==0))
self.InvalidateRect()
def SCISetIndentationGuides(self, val):
self.SendScintilla(scintillacon.SCI_SETINDENTATIONGUIDES, val)
def SCIGetIndentationGuides(self):
return self.SendScintilla(scintillacon.SCI_GETINDENTATIONGUIDES)
def SCISetIndent(self, val):
self.SendScintilla(scintillacon.SCI_SETINDENT, val)
def SCIGetIndent(self, val):
return self.SendScintilla(scintillacon.SCI_GETINDENT)
def SCIGetViewEOL(self):
return self.SendScintilla(scintillacon.SCI_GETVIEWEOL)
def SCISetViewEOL(self, val):
self.SendScintilla(scintillacon.SCI_SETVIEWEOL, not(val==0))
self.InvalidateRect()
def SCISetTabWidth(self, width):
self.SendScintilla(scintillacon.SCI_SETTABWIDTH, width, 0)
def SCIStartStyling(self, pos, mask):
self.SendScintilla(scintillacon.SCI_STARTSTYLING, pos, mask)
def SCISetStyling(self, pos, attr):
self.SendScintilla(scintillacon.SCI_SETSTYLING, pos, attr)
def SCISetStylingEx(self, ray): # ray is an array.
address, length = ray.buffer_info()
self.SendScintilla(scintillacon.SCI_SETSTYLINGEX, length, address)
def SCIGetStyleAt(self, pos):
return self.SendScintilla(scintillacon.SCI_GETSTYLEAT, pos)
def SCISetMarginWidth(self, width):
self.SendScintilla(scintillacon.SCI_SETMARGINWIDTHN, 1, width)
def SCISetMarginWidthN(self, n, width):
self.SendScintilla(scintillacon.SCI_SETMARGINWIDTHN, n, width)
def SCISetFoldFlags(self, flags):
self.SendScintilla(scintillacon.SCI_SETFOLDFLAGS, flags)
# Markers
def SCIMarkerDefineAll(self, markerNum, markerType, fore, back):
self.SCIMarkerDefine(markerNum, markerType)
self.SCIMarkerSetFore(markerNum, fore)
self.SCIMarkerSetBack(markerNum, back)
def SCIMarkerDefine(self, markerNum, markerType):
self.SendScintilla(scintillacon.SCI_MARKERDEFINE, markerNum, markerType)
def SCIMarkerSetFore(self, markerNum, fore):
self.SendScintilla(scintillacon.SCI_MARKERSETFORE, markerNum, fore)
def SCIMarkerSetBack(self, markerNum, back):
self.SendScintilla(scintillacon.SCI_MARKERSETBACK, markerNum, back)
def SCIMarkerAdd(self, lineNo, markerNum):
self.SendScintilla(scintillacon.SCI_MARKERADD, lineNo, markerNum)
def SCIMarkerDelete(self, lineNo, markerNum):
self.SendScintilla(scintillacon.SCI_MARKERDELETE, lineNo, markerNum)
def SCIMarkerDeleteAll(self, markerNum=-1):
self.SendScintilla(scintillacon.SCI_MARKERDELETEALL, markerNum)
def SCIMarkerGet(self, lineNo):
return self.SendScintilla(scintillacon.SCI_MARKERGET, lineNo)
def SCIMarkerNext(self, lineNo, markerNum):
return self.SendScintilla(scintillacon.SCI_MARKERNEXT, lineNo, markerNum)
def SCICancel(self):
self.SendScintilla(scintillacon.SCI_CANCEL)
# AutoComplete
def SCIAutoCShow(self, text):
if type(text) in [type([]), type(())]:
text = ' '.join(text)
buff = (text + "\0").encode(default_scintilla_encoding)
return self.SendScintilla(scintillacon.SCI_AUTOCSHOW, 0, buff)
def SCIAutoCCancel(self):
self.SendScintilla(scintillacon.SCI_AUTOCCANCEL)
def SCIAutoCActive(self):
return self.SendScintilla(scintillacon.SCI_AUTOCACTIVE)
def SCIAutoCComplete(self):
return self.SendScintilla(scintillacon.SCI_AUTOCCOMPLETE)
def SCIAutoCStops(self, stops):
buff = (stops + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_AUTOCSTOPS, 0, buff)
def SCIAutoCSetAutoHide(self, hide):
self.SendScintilla(scintillacon.SCI_AUTOCSETAUTOHIDE, hide)
def SCIAutoCSetFillups(self, fillups):
self.SendScintilla(scintillacon.SCI_AUTOCSETFILLUPS, fillups)
# Call tips
def SCICallTipShow(self, text, pos=-1):
if pos==-1: pos = self.GetSel()[0]
buff = (text + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_CALLTIPSHOW, pos, buff)
def SCICallTipCancel(self):
self.SendScintilla(scintillacon.SCI_CALLTIPCANCEL)
def SCICallTipActive(self):
return self.SendScintilla(scintillacon.SCI_CALLTIPACTIVE)
def SCICallTipPosStart(self):
return self.SendScintilla(scintillacon.SCI_CALLTIPPOSSTART)
def SCINewline(self):
self.SendScintilla(scintillacon.SCI_NEWLINE)
# Lexer etc
def SCISetKeywords(self, keywords, kw_list_no = 0):
buff = (keywords+"\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_SETKEYWORDS, kw_list_no, buff)
def SCISetProperty(self, name, value):
name_buff = array.array('b', (name + '\0').encode(default_scintilla_encoding))
val_buff = array.array("b", (str(value)+'\0').encode(default_scintilla_encoding))
address_name_buffer = name_buff.buffer_info()[0]
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(scintillacon.SCI_SETPROPERTY, address_name_buffer, address_val_buffer)
def SCISetStyleBits(self, nbits):
self.SendScintilla(scintillacon.SCI_SETSTYLEBITS, nbits)
# Folding
def SCIGetFoldLevel(self, lineno):
return self.SendScintilla(scintillacon.SCI_GETFOLDLEVEL, lineno)
def SCIToggleFold(self, lineno):
return self.SendScintilla(scintillacon.SCI_TOGGLEFOLD, lineno)
def SCIEnsureVisible(self, lineno):
self.SendScintilla(scintillacon.SCI_ENSUREVISIBLE, lineno)
def SCIGetFoldExpanded(self, lineno):
return self.SendScintilla(scintillacon.SCI_GETFOLDEXPANDED, lineno)
# right edge
def SCISetEdgeColumn(self, edge):
self.SendScintilla(scintillacon.SCI_SETEDGECOLUMN, edge)
def SCIGetEdgeColumn(self):
return self.SendScintilla(scintillacon.SCI_GETEDGECOLUMN)
def SCISetEdgeMode(self, mode):
self.SendScintilla(scintillacon.SCI_SETEDGEMODE, mode)
def SCIGetEdgeMode(self):
return self.SendScintilla(scintillacon.SCI_GETEDGEMODE)
def SCISetEdgeColor(self, color):
self.SendScintilla(scintillacon.SCI_SETEDGECOLOUR, color)
def SCIGetEdgeColor(self):
return self.SendScintilla(scintillacon.SCI_GETEDGECOLOR)
# Multi-doc
def SCIGetDocPointer(self):
return self.SendScintilla(scintillacon.SCI_GETDOCPOINTER)
def SCISetDocPointer(self, p):
return self.SendScintilla(scintillacon.SCI_SETDOCPOINTER, 0, p)
def SCISetWrapMode(self, mode):
return self.SendScintilla(scintillacon.SCI_SETWRAPMODE, mode)
def SCIGetWrapMode(self):
return self.SendScintilla(scintillacon.SCI_GETWRAPMODE)
class CScintillaEditInterface(ScintillaControlInterface):
def close(self):
self.colorizer = None
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def FindText(self, flags, range, findText):
""" LPARAM for EM_FINDTEXTEX:
typedef struct _findtextex {
CHARRANGE chrg;
LPCTSTR lpstrText;
CHARRANGE chrgText;} FINDTEXTEX;
typedef struct _charrange {
LONG cpMin;
LONG cpMax;} CHARRANGE;
"""
findtextex_fmt='llPll'
## Scintilla does not handle unicode in EM_FINDTEXT msg (FINDTEXTEX struct)
txt_buff = (findText+'\0').encode(default_scintilla_encoding)
txt_array = array.array('b', txt_buff)
ft_buff = struct.pack(findtextex_fmt, range[0], range[1], txt_array.buffer_info()[0], 0, 0)
ft_array = array.array('b', ft_buff)
rc = self.SendScintilla(EM_FINDTEXTEX, flags, ft_array.buffer_info()[0])
ftUnpacked = struct.unpack(findtextex_fmt, ft_array)
return rc, (ftUnpacked[3], ftUnpacked[4])
def GetSel(self):
currentPos = self.SendScintilla(scintillacon.SCI_GETCURRENTPOS)
anchorPos = self.SendScintilla(scintillacon.SCI_GETANCHOR)
if currentPos < anchorPos:
return (currentPos, anchorPos)
else:
return (anchorPos, currentPos)
return currentPos;
def GetSelText(self):
start, end = self.GetSel()
txtBuf = array.array('b', null_byte * (end-start+1))
addressTxtBuf = txtBuf.buffer_info()[0]
# EM_GETSELTEXT is documented as returning the number of chars
# not including the NULL, but scintilla includes the NULL. A
# quick glance at the scintilla impl doesn't make this
# obvious - the NULL is included in the 'selection' object
# and reflected in the length of that 'selection' object.
# I expect that is a bug in scintilla and may be fixed by now,
# but we just blindly assume that the last char is \0 and
# strip it.
self.SendScintilla(EM_GETSELTEXT, 0, addressTxtBuf)
return txtBuf.tostring()[:-1].decode(default_scintilla_encoding)
def SetSel(self, start=0, end=None):
if type(start)==type(()):
assert end is None, "If you pass a point in the first param, the second must be None"
start, end = start
elif end is None:
end = start
if start < 0: start = self.GetTextLength()
if end < 0: end = self.GetTextLength()
assert start <= self.GetTextLength(), "The start postion is invalid (%d/%d)" % (start, self.GetTextLength())
assert end <= self.GetTextLength(), "The end postion is invalid (%d/%d)" % (end, self.GetTextLength())
cr = struct.pack('ll', start, end)
crBuff = array.array('b', cr)
addressCrBuff = crBuff.buffer_info()[0]
rc = self.SendScintilla(EM_EXSETSEL, 0, addressCrBuff)
def GetLineCount(self):
return self.SendScintilla(win32con.EM_GETLINECOUNT)
def LineFromChar(self, charPos=-1):
if charPos==-1: charPos = self.GetSel()[0]
assert charPos >= 0 and charPos <= self.GetTextLength(), "The charPos postion (%s) is invalid (max=%s)" % (charPos, self.GetTextLength())
#return self.SendScintilla(EM_EXLINEFROMCHAR, charPos)
# EM_EXLINEFROMCHAR puts charPos in lParam, not wParam
return self.SendScintilla(EM_EXLINEFROMCHAR, 0, charPos)
def LineIndex(self, line):
return self.SendScintilla(win32con.EM_LINEINDEX, line)
def ScrollCaret(self):
return self.SendScintilla(win32con.EM_SCROLLCARET)
def GetCurLineNumber(self):
return self.LineFromChar(self.SCIGetCurrentPos())
def GetTextLength(self):
return self.SendScintilla(scintillacon.SCI_GETTEXTLENGTH)
def GetTextRange(self, start = 0, end = -1, decode = True):
if end == -1: end = self.SendScintilla(scintillacon.SCI_GETTEXTLENGTH)
assert end>=start, "Negative index requested (%d/%d)" % (start, end)
assert start >= 0 and start <= self.GetTextLength(), "The start postion is invalid"
assert end >= 0 and end <= self.GetTextLength(), "The end postion is invalid"
initer = null_byte * (end - start + 1)
buff = array.array('b', initer)
addressBuffer = buff.buffer_info()[0]
tr = struct.pack('llP', start, end, addressBuffer)
trBuff = array.array('b', tr)
addressTrBuff = trBuff.buffer_info()[0]
num_bytes = self.SendScintilla(EM_GETTEXTRANGE, 0, addressTrBuff)
ret = buff.tostring()[:num_bytes]
if decode:
ret = ret.decode(default_scintilla_encoding)
return ret
def ReplaceSel(self, str):
buff = (str + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_REPLACESEL, 0, buff)
def GetLine(self, line=-1):
if line == -1: line = self.GetCurLineNumber()
start = self.LineIndex(line)
end = self.LineIndex(line+1)
return self.GetTextRange(start, end)
def SetReadOnly(self, flag = 1):
return self.SendScintilla(win32con.EM_SETREADONLY, flag)
def LineScroll(self, lines, cols=0):
return self.SendScintilla(win32con.EM_LINESCROLL, cols, lines)
def GetFirstVisibleLine(self):
return self.SendScintilla(win32con.EM_GETFIRSTVISIBLELINE)
def SetWordWrap(self, mode):
if mode != win32ui.CRichEditView_WrapNone:
raise ValueError("We dont support word-wrap (I dont think :-)")
class CScintillaColorEditInterface(CScintillaEditInterface):
################################
# Plug-in colorizer support
def _GetColorizer(self):
if not hasattr(self, "colorizer"):
self.colorizer = self._MakeColorizer()
return self.colorizer
def _MakeColorizer(self):
# Give parent a chance to hook.
parent_func = getattr(self.GetParentFrame(), "_MakeColorizer", None)
if parent_func is not None:
return parent_func()
from . import formatter
## return formatter.PythonSourceFormatter(self)
return formatter.BuiltinPythonSourceFormatter(self)
def Colorize(self, start=0, end=-1):
c = self._GetColorizer()
if c is not None: c.Colorize(start, end)
def ApplyFormattingStyles(self, bReload=1):
c = self._GetColorizer()
if c is not None: c.ApplyFormattingStyles(bReload)
# The Parent window will normally hook
def HookFormatter(self, parent = None):
c = self._GetColorizer()
if c is not None: # No need if we have no color!
c.HookFormatter(parent)
class CScintillaEdit(window.Wnd, CScintillaColorEditInterface):
def __init__(self, wnd=None):
if wnd is None:
wnd = win32ui.CreateWnd()
window.Wnd.__init__(self, wnd)
def SendScintilla(self, msg, w=0, l=0):
return self.SendMessage(msg, w, l)
def CreateWindow(self, style, rect, parent, id):
self._obj_.CreateWindow(
"Scintilla",
"Scintilla",
style,
rect,
parent,
id,
None)
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/pythonwin/pywin/scintilla/control.py",
"copies": "7",
"size": "17921",
"license": "mit",
"hash": 5117654561724590000,
"line_mean": 40.2926267281,
"line_max": 139,
"alpha_frac": 0.7505719547,
"autogenerated": false,
"ratio": 2.90878104203863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7159352996738629,
"avg_score": null,
"num_lines": null
} |
# An Python interface to the Scintilla control.
#
# Exposes Python classes that allow you to use Scintilla as
# a "standard" MFC edit control (eg, control.GetTextLength(), control.GetSel()
# plus many Scintilla specific features (eg control.SCIAddStyledText())
from pywin.mfc import window
import win32con
import win32ui
import win32api
import array
import struct
import string
import os
from scintillacon import *
# Load Scintilla.dll to get access to the control.
# We expect to find this in the same directory as win32ui.pyd
dllid = None
if win32ui.debug: # If running _d version of Pythonwin...
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla_d.DLL"))
except win32api.error: # Not there - we dont _need_ a debug ver, so ignore this error.
pass
if dllid is None:
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla.DLL"))
except win32api.error:
pass
if dllid is None:
# Still not there - lets see if Windows can find it by searching?
dllid = win32api.LoadLibrary("Scintilla.DLL")
EM_GETTEXTRANGE = 1099
EM_EXLINEFROMCHAR = 1078
EM_FINDTEXTEX = 1103
EM_GETSELTEXT = 1086
EM_EXSETSEL = win32con.WM_USER + 55
class ScintillaNotification:
def __init__(self, **args):
self.__dict__.update(args)
class ScintillaControlInterface:
def SCIUnpackNotifyMessage(self, msg):
format = "iiiiPiiiPPiiii"
bytes = win32ui.GetBytes( msg, struct.calcsize(format) )
position, ch, modifiers, modificationType, text_ptr, \
length, linesAdded, msg, wParam, lParam, line, \
foldLevelNow, foldLevelPrev, margin \
= struct.unpack(format, bytes)
return ScintillaNotification(position=position,ch=ch,
modifiers=modifiers, modificationType=modificationType,
text_ptr = text_ptr, length=length, linesAdded=linesAdded,
msg = msg, wParam = wParam, lParam = lParam,
line = line, foldLevelNow = foldLevelNow, foldLevelPrev = foldLevelPrev,
margin = margin)
def SCIAddText(self, text):
self.SendMessage(SCI_ADDTEXT, buffer(text))
def SCIAddStyledText(self, text, style = None):
# If style is None, text is assumed to be a "native" Scintilla buffer.
# If style is specified, text is a normal string, and the style is
# assumed to apply to the entire string.
if style is not None:
text = map(lambda char, style=style: char+chr(style), text)
text = string.join(text, '')
self.SendMessage(SCI_ADDSTYLEDTEXT, buffer(text))
def SCIInsertText(self, text, pos=-1):
sma = array.array('c', text+"\0")
(a,l) = sma.buffer_info()
self.SendScintilla(SCI_INSERTTEXT, pos, a)
def SCISetSavePoint(self):
self.SendScintilla(SCI_SETSAVEPOINT)
def SCISetUndoCollection(self, collectFlag):
self.SendScintilla(SCI_SETUNDOCOLLECTION, collectFlag)
def SCIBeginUndoAction(self):
self.SendScintilla(SCI_BEGINUNDOACTION)
def SCIEndUndoAction(self):
self.SendScintilla(SCI_ENDUNDOACTION)
def SCIGetCurrentPos(self):
return self.SendScintilla(SCI_GETCURRENTPOS)
def SCIGetCharAt(self, pos):
# Must ensure char is unsigned!
return chr(self.SendScintilla(SCI_GETCHARAT, pos) & 0xFF)
def SCIGotoLine(self, line):
self.SendScintilla(SCI_GOTOLINE, line)
def SCIBraceMatch(self, pos, maxReStyle):
return self.SendScintilla(SCI_BRACEMATCH, pos, maxReStyle)
def SCIBraceHighlight(self, pos, posOpposite):
return self.SendScintilla(SCI_BRACEHIGHLIGHT, pos, posOpposite)
def SCIBraceBadHighlight(self, pos):
return self.SendScintilla(SCI_BRACEBADLIGHT, pos)
####################################
# Styling
# def SCIColourise(self, start=0, end=-1):
# NOTE - dependent on of we use builtin lexer, so handled below.
def SCIGetEndStyled(self):
return self.SendScintilla(SCI_GETENDSTYLED)
def SCIStyleSetFore(self, num, v):
return self.SendScintilla(SCI_STYLESETFORE, num, v)
def SCIStyleSetBack(self, num, v):
return self.SendScintilla(SCI_STYLESETBACK, num, v)
def SCIStyleSetEOLFilled(self, num, v):
return self.SendScintilla(SCI_STYLESETEOLFILLED, num, v)
def SCIStyleSetFont(self, num, name, characterset=0):
buff = array.array('c', name + "\0")
addressBuffer = buff.buffer_info()[0]
self.SendScintilla(SCI_STYLESETFONT, num, addressBuffer)
self.SendScintilla(SCI_STYLESETCHARACTERSET, num, characterset)
def SCIStyleSetBold(self, num, bBold):
self.SendScintilla(SCI_STYLESETBOLD, num, bBold)
def SCIStyleSetItalic(self, num, bItalic):
self.SendScintilla(SCI_STYLESETITALIC, num, bItalic)
def SCIStyleSetSize(self, num, size):
self.SendScintilla(SCI_STYLESETSIZE, num, size)
def SCIGetViewWS(self):
return self.SendScintilla(SCI_GETVIEWWS)
def SCISetViewWS(self, val):
self.SendScintilla(SCI_SETVIEWWS, not (val==0))
self.InvalidateRect()
def SCISetIndentationGuides(self, val):
self.SendScintilla(SCI_SETINDENTATIONGUIDES, val)
def SCIGetIndentationGuides(self):
return self.SendScintilla(SCI_GETINDENTATIONGUIDES)
def SCISetIndent(self, val):
self.SendScintilla(SCI_SETINDENT, val)
def SCIGetIndent(self, val):
return self.SendScintilla(SCI_GETINDENT)
def SCIGetViewEOL(self):
return self.SendScintilla(SCI_GETVIEWEOL)
def SCISetViewEOL(self, val):
self.SendScintilla(SCI_SETVIEWEOL, not(val==0))
self.InvalidateRect()
def SCISetTabWidth(self, width):
self.SendScintilla(SCI_SETTABWIDTH, width, 0)
def SCIStartStyling(self, pos, mask):
self.SendScintilla(SCI_STARTSTYLING, pos, mask)
def SCISetStyling(self, pos, attr):
self.SendScintilla(SCI_SETSTYLING, pos, attr)
def SCISetStylingEx(self, ray): # ray is an array.
address, length = ray.buffer_info()
self.SendScintilla(SCI_SETSTYLINGEX, length, address)
def SCIGetStyleAt(self, pos):
return self.SendScintilla(SCI_GETSTYLEAT, pos)
def SCISetMarginWidth(self, width):
self.SendScintilla(SCI_SETMARGINWIDTHN, 1, width)
def SCISetMarginWidthN(self, n, width):
self.SendScintilla(SCI_SETMARGINWIDTHN, n, width)
def SCISetFoldFlags(self, flags):
self.SendScintilla(SCI_SETFOLDFLAGS, flags)
# Markers
def SCIMarkerDefineAll(self, markerNum, markerType, fore, back):
self.SCIMarkerDefine(markerNum, markerType)
self.SCIMarkerSetFore(markerNum, fore)
self.SCIMarkerSetBack(markerNum, back)
def SCIMarkerDefine(self, markerNum, markerType):
self.SendScintilla(SCI_MARKERDEFINE, markerNum, markerType)
def SCIMarkerSetFore(self, markerNum, fore):
self.SendScintilla(SCI_MARKERSETFORE, markerNum, fore)
def SCIMarkerSetBack(self, markerNum, back):
self.SendScintilla(SCI_MARKERSETBACK, markerNum, back)
def SCIMarkerAdd(self, lineNo, markerNum):
self.SendScintilla(SCI_MARKERADD, lineNo, markerNum)
def SCIMarkerDelete(self, lineNo, markerNum):
self.SendScintilla(SCI_MARKERDELETE, lineNo, markerNum)
def SCIMarkerDeleteAll(self, markerNum=-1):
self.SendScintilla(SCI_MARKERDELETEALL, markerNum)
def SCIMarkerGet(self, lineNo):
return self.SendScintilla(SCI_MARKERGET, lineNo)
def SCIMarkerNext(self, lineNo, markerNum):
return self.SendScintilla(SCI_MARKERNEXT, lineNo, markerNum)
def SCICancel(self):
self.SendScintilla(SCI_CANCEL)
# AutoComplete
def SCIAutoCShow(self, text):
if type(text) in [type([]), type(())]:
text = string.join(text)
buff = array.array('c', text + "\0")
addressBuffer = buff.buffer_info()[0]
return self.SendScintilla(SCI_AUTOCSHOW, 0, addressBuffer)
def SCIAutoCCancel(self):
self.SendScintilla(SCI_AUTOCCANCEL)
def SCIAutoCActive(self):
return self.SendScintilla(SCI_AUTOCACTIVE)
def SCIAutoCComplete(self):
return self.SendScintilla(SCI_AUTOCCOMPLETE)
def SCIAutoCStops(self, stops):
buff = array.array('c', stops + "\0")
addressBuffer = buff.buffer_info()[0]
self.SendScintilla(SCI_AUTOCSTOPS, 0, addressBuffer)
def SCIAutoCSetAutoHide(self, hide):
self.SendScintilla(SCI_AUTOCSETAUTOHIDE, hide)
def SCIAutoCSetFillups(self, fillups):
self.SendScintilla(SCI_AUTOCSETFILLUPS, fillups)
# Call tips
def SCICallTipShow(self, text, pos=-1):
if pos==-1: pos = self.GetSel()[0]
if isinstance(text, unicode):
# I'm really not sure what the correct encoding
# to use is - but it has gotta be better than total
# failure due to the array module
text = text.encode("mbcs")
buff = array.array('c', text + "\0")
addressBuffer = buff.buffer_info()[0]
self.SendScintilla(SCI_CALLTIPSHOW, pos, addressBuffer)
def SCICallTipCancel(self):
self.SendScintilla(SCI_CALLTIPCANCEL)
def SCICallTipActive(self):
return self.SendScintilla(SCI_CALLTIPACTIVE)
def SCICallTipPosStart(self):
return self.SendScintilla(SCI_CALLTIPPOSSTART)
def SCINewline(self):
self.SendScintilla(SCI_NEWLINE)
# Lexer etc
def SCISetKeywords(self, keywords, kw_list_no = 0):
ar = array.array('c', keywords+"\0")
(a,l) = ar.buffer_info()
self.SendScintilla(SCI_SETKEYWORDS, kw_list_no, a)
def SCISetProperty(self, name, value):
name_buff = array.array('c', name + "\0")
val_buff = array.array("c", str(value) + "\0")
address_name_buffer = name_buff.buffer_info()[0]
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(SCI_SETPROPERTY, address_name_buffer, address_val_buffer)
def SCISetStyleBits(self, nbits):
self.SendScintilla(SCI_SETSTYLEBITS, nbits)
# Folding
def SCIGetFoldLevel(self, lineno):
return self.SendScintilla(SCI_GETFOLDLEVEL, lineno)
def SCIToggleFold(self, lineno):
return self.SendScintilla(SCI_TOGGLEFOLD, lineno)
def SCIEnsureVisible(self, lineno):
self.SendScintilla(SCI_ENSUREVISIBLE, lineno)
def SCIGetFoldExpanded(self, lineno):
return self.SendScintilla(SCI_GETFOLDEXPANDED, lineno)
# right edge
def SCISetEdgeColumn(self, edge):
self.SendScintilla(SCI_SETEDGECOLUMN, edge)
def SCIGetEdgeColumn(self):
return self.SendScintilla(SCI_GETEDGECOLUMN)
def SCISetEdgeMode(self, mode):
self.SendScintilla(SCI_SETEDGEMODE, mode)
def SCIGetEdgeMode(self):
return self.SendScintilla(SCI_GETEDGEMODE)
def SCISetEdgeColor(self, color):
self.SendScintilla(SCI_SETEDGECOLOUR, color)
def SCIGetEdgeColor(self):
return self.SendScintilla(SCI_GETEDGECOLOR)
# Multi-doc
def SCIGetDocPointer(self):
return self.SendScintilla(SCI_GETDOCPOINTER)
def SCISetDocPointer(self, p):
return self.SendScintilla(SCI_SETDOCPOINTER, 0, p)
def SCISetWrapMode(self, mode):
return self.SendScintilla(SCI_SETWRAPMODE, mode)
def SCIGetWrapMode(self):
return self.SendScintilla(SCI_GETWRAPMODE)
class CScintillaEditInterface(ScintillaControlInterface):
def close(self):
self.colorizer = None
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def FindText(self, flags, range, findText):
""" LPARAM for EM_FINDTEXTEX:
typedef struct _findtextex {
CHARRANGE chrg;
LPCTSTR lpstrText;
CHARRANGE chrgText;} FINDTEXTEX;
typedef struct _charrange {
LONG cpMin;
LONG cpMax;} CHARRANGE;
"""
findtextex_fmt='llPll'
buff = array.array('c', findText + "\0")
addressBuffer = buff.buffer_info()[0]
ft = struct.pack(findtextex_fmt, range[0], range[1], addressBuffer, 0, 0)
ftBuff = array.array('c', ft)
addressFtBuff = ftBuff.buffer_info()[0]
rc = self.SendScintilla(EM_FINDTEXTEX, flags, addressFtBuff)
ftUnpacked = struct.unpack(findtextex_fmt, ftBuff.tostring())
return rc, (ftUnpacked[3], ftUnpacked[4])
def GetSel(self):
currentPos = self.SendScintilla(SCI_GETCURRENTPOS)
anchorPos = self.SendScintilla(SCI_GETANCHOR)
if currentPos < anchorPos:
return (currentPos, anchorPos)
else:
return (anchorPos, currentPos)
return currentPos;
def GetSelText(self):
start, end = self.GetSel()
txtBuf = array.array('c', " " * ((end-start)+1))
addressTxtBuf = txtBuf.buffer_info()[0]
self.SendScintilla(EM_GETSELTEXT, 0, addressTxtBuf)
return txtBuf.tostring()[:-1]
def SetSel(self, start=0, end=None):
if type(start)==type(()):
assert end is None, "If you pass a point in the first param, the second must be None"
start, end = start
elif end is None:
end = start
if start < 0: start = self.GetTextLength()
if end < 0: end = self.GetTextLength()
assert start <= self.GetTextLength(), "The start postion is invalid (%d/%d)" % (start, self.GetTextLength())
assert end <= self.GetTextLength(), "The end postion is invalid (%d/%d)" % (end, self.GetTextLength())
cr = struct.pack('ll', start, end)
crBuff = array.array('c', cr)
addressCrBuff = crBuff.buffer_info()[0]
rc = self.SendScintilla(EM_EXSETSEL, 0, addressCrBuff)
def GetLineCount(self):
return self.SendScintilla(win32con.EM_GETLINECOUNT)
def LineFromChar(self, charPos=-1):
if charPos==-1: charPos = self.GetSel()[0]
assert charPos >= 0 and charPos <= self.GetTextLength(), "The charPos postion (%s) is invalid (max=%s)" % (charPos, self.GetTextLength())
#return self.SendScintilla(EM_EXLINEFROMCHAR, charPos)
# EM_EXLINEFROMCHAR puts charPos in lParam, not wParam
return self.SendScintilla(EM_EXLINEFROMCHAR, 0, charPos)
def LineIndex(self, line):
return self.SendScintilla(win32con.EM_LINEINDEX, line)
def ScrollCaret(self):
return self.SendScintilla(win32con.EM_SCROLLCARET)
def GetCurLineNumber(self):
return self.LineFromChar(self.SCIGetCurrentPos())
def GetTextLength(self):
return self.SendScintilla(win32con.WM_GETTEXTLENGTH)
def GetTextRange(self, start = 0, end = -1):
if end == -1: end = self.SendScintilla(win32con.WM_GETTEXTLENGTH)
assert end>=start, "Negative index requested (%d/%d)" % (start, end)
assert start >= 0 and start <= self.GetTextLength(), "The start postion is invalid"
assert end >= 0 and end <= self.GetTextLength(), "The end postion is invalid"
initer = "=" * (end - start + 1)
buff = array.array('c', initer)
addressBuffer = buff.buffer_info()[0]
tr = struct.pack('llP', start, end, addressBuffer)
trBuff = array.array('c', tr)
addressTrBuff = trBuff.buffer_info()[0]
numChars = self.SendScintilla(EM_GETTEXTRANGE, 0, addressTrBuff)
return buff.tostring()[:numChars]
def ReplaceSel(self, str):
buff = array.array('c', str + "\0")
self.SendScintilla(SCI_REPLACESEL, 0, buff.buffer_info()[0]);
buff = None
def GetLine(self, line=-1):
if line == -1: line = self.GetCurLineNumber()
start = self.LineIndex(line)
end = self.LineIndex(line+1)
return self.GetTextRange(start, end)
def SetReadOnly(self, flag = 1):
return self.SendScintilla(win32con.EM_SETREADONLY, flag)
def LineScroll(self, lines, cols=0):
return self.SendScintilla(win32con.EM_LINESCROLL, cols, lines)
def GetFirstVisibleLine(self):
return self.SendScintilla(win32con.EM_GETFIRSTVISIBLELINE)
def SetWordWrap(self, mode):
if mode <> win32ui.CRichEditView_WrapNone:
raise ValueError, "We dont support word-wrap (I dont think :-)"
class CScintillaColorEditInterface(CScintillaEditInterface):
################################
# Plug-in colorizer support
def _GetColorizer(self):
if not hasattr(self, "colorizer"):
self.colorizer = self._MakeColorizer()
return self.colorizer
def _MakeColorizer(self):
# Give parent a chance to hook.
parent_func = getattr(self.GetParentFrame(), "_MakeColorizer", None)
if parent_func is not None:
return parent_func()
import formatter
## return formatter.PythonSourceFormatter(self)
return formatter.BuiltinPythonSourceFormatter(self)
def Colorize(self, start=0, end=-1):
c = self._GetColorizer()
if c is not None: c.Colorize(start, end)
def ApplyFormattingStyles(self, bReload=1):
c = self._GetColorizer()
if c is not None: c.ApplyFormattingStyles(bReload)
# The Parent window will normally hook
def HookFormatter(self, parent = None):
c = self._GetColorizer()
if c is not None: # No need if we have no color!
c.HookFormatter(parent)
class CScintillaEdit(window.Wnd, CScintillaColorEditInterface):
def __init__(self, wnd=None):
if wnd is None:
wnd = win32ui.CreateWnd()
window.Wnd.__init__(self, wnd)
def SendScintilla(self, msg, w=0, l=0):
return self.SendMessage(msg, w, l)
def CreateWindow(self, style, rect, parent, id):
self._obj_.CreateWindow(
"Scintilla",
"Scintilla",
style,
rect,
parent,
id,
None)
| {
"repo_name": "Southpaw-TACTIC/Team",
"path": "src/python/Lib/site-packages/pythonwin/pywin/scintilla/control.py",
"copies": "1",
"size": "16522",
"license": "epl-1.0",
"hash": -4646137268723455000,
"line_mean": 36.6932084309,
"line_max": 139,
"alpha_frac": 0.7163781625,
"autogenerated": false,
"ratio": 2.911878745153331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41282569076533304,
"avg_score": null,
"num_lines": null
} |
"""An Python re-implementation of hierarchical module import.
This code is intended to be read, not executed. However, it does work
-- all you need to do to enable it is "import knee".
(The name is a pun on the klunkier predecessor of this module, "ni".)
"""
import sys, imp, __builtin__
# Replacement for __import__()
def import_hook(name, globals=None, locals=None, fromlist=None):
parent = determine_parent(globals)
q, tail = find_head_package(parent, name)
m = load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
ensure_fromlist(m, fromlist)
return m
def determine_parent(globals):
if not globals or not globals.has_key("__name__"):
return None
pname = globals['__name__']
if globals.has_key("__path__"):
parent = sys.modules[pname]
assert globals is parent.__dict__
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = sys.modules[pname]
assert parent.__name__ == pname
return parent
return None
def find_head_package(parent, name):
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = import_module(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = import_module(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named " + qname
def load_tail(q, tail):
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = import_module(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
return m
def ensure_fromlist(m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def import_module(partname, fqname, parent):
try:
return sys.modules[fqname]
except KeyError:
pass
try:
fp, pathname, stuff = imp.find_module(partname,
parent and parent.__path__)
except ImportError:
return None
try:
m = imp.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
return m
# Replacement for reload()
def reload_hook(module):
name = module.__name__
if '.' not in name:
return import_module(name, name, None)
i = name.rfind('.')
pname = name[:i]
parent = sys.modules[pname]
return import_module(name[i+1:], name, parent)
# Save the original hooks
original_import = __builtin__.__import__
original_reload = __builtin__.reload
# Now install our hooks
__builtin__.__import__ = import_hook
__builtin__.reload = reload_hook
| {
"repo_name": "Lh4cKg/sl4a",
"path": "python/src/Demo/imputil/knee.py",
"copies": "40",
"size": "3482",
"license": "apache-2.0",
"hash": 5068636659474182000,
"line_mean": 26.6349206349,
"line_max": 73,
"alpha_frac": 0.5476737507,
"autogenerated": false,
"ratio": 3.805464480874317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003487984719698825,
"num_lines": 126
} |
# An question entirely solved by recurrences.
# Memoization to chache answers is helpful.
# Apart from that I see no need for DP.
#
# It is helpful to note that if T(n) is the required answer
# for 3xn grid, then:
# T(0) = 1
# T(1) = 0
# T(2) = 3
# T(3) = 0
#
# There are a few ways to find the correct recurrence.
# Most popular is http://stackoverflow.com/a/16388865/2844164
# My favourite method (http://abitofcs.blogspot.in/2014/10/a-bit-of-dp-spoj-m3tile.html) is
# Let S(n,k) be the ways to fill up a grid where 1 to `k` rows
# of the columns 1 to `n` are already filled up. Then the
# following recurrences exist.
# For k = 0: S(n,0) = S(n-2,0) + 2*S(n-1,1)
# + Case-1: || S(n,0) = S(n-2,0)
# ==
# + Case-2: | S(n,0) = 2*S(n-1,1)
# ==
# For k = 1: S(n,1) = S(n-1,0) + S(n-2,2)
# + Case-1: =
# | S(n,1) = S(n-1,0)
# + Case-2: =
# == S(n,1) = 2*S(n-1,2)
# ==
# For k = 2: S(n,2) = S(n-1,1)
# + Case-1: =
# = S(n,2) = S(n-1,1)
# ==
# Solving the recurrences, we get:
# S(n,0) = 4*S(n-2,0) - S(n-4,0)
# where S(n,0) = T(n).
#
# The solution recurrence is
# T(n) = 4*T(n-2) - T(n-4)
#
# Constraints of the questions make sure that n<31.
# Calculate and memoize
dp = [1,0,3,0] + [None]*27
for i in xrange(4,31):
dp[i] = 4*dp[i-2] - dp[i-4]
n = int(raw_input())
while n != -1:
print dp[n]
n = int(raw_input())
| {
"repo_name": "babhishek21/oj-sols",
"path": "spoj/M3TILE.py",
"copies": "1",
"size": "1399",
"license": "mit",
"hash": -3333090388318665700,
"line_mean": 25.9038461538,
"line_max": 91,
"alpha_frac": 0.5439599714,
"autogenerated": false,
"ratio": 2.1424196018376724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3186379573237672,
"avg_score": null,
"num_lines": null
} |
# An reimplementation of the XDS indexer to work for harder cases, for example
# cases where the whole sweep needs to be read into memory in IDXREF to get
# a decent indexing solution (these do happen) and also cases where the
# crystal is highly mosaic. Perhaps. This will now be directly inherited from
# the original XDSIndexer and only the necessary method overloaded (as I
# should have done this in the first place.)
import logging
import math
import os
import dxtbx
from dials.array_family import flex
from dials.util.ascii_art import spot_counts_per_image_plot
from dxtbx.model import Experiment, ExperimentList
from dxtbx.serialize.xds import to_crystal, to_xds
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import banner
from xia2.lib.bits import auto_logfiler
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Wrappers.Dials.ImportXDS import ImportXDS
from xia2.Wrappers.XDS.XDS import XDSException
logger = logging.getLogger("xia2.Modules.Indexer.XDSIndexerII")
class XDSIndexerII(XDSIndexer):
"""An extension of XDSIndexer using all available images."""
def __init__(self):
super().__init__()
self._index_select_images = "ii"
self._i_or_ii = None
# helper functions
def _index_select_images_ii(self):
"""Select correct images based on image headers."""
phi_width = self.get_phi_width()
if phi_width == 0.0:
raise RuntimeError("cannot use still images")
# use five degrees for the background calculation
five_deg = int(round(5.0 / phi_width)) - 1
turn = int(round(360.0 / phi_width)) - 1
if five_deg < 5:
five_deg = 5
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block? if it is the
# former then we have a problem, as we want *all* the images in the
# sweep...
wedges = []
min_images = PhilIndex.params.xia2.settings.input.min_images
if len(images) < 3 and len(images) < min_images:
raise RuntimeError(
"This INDEXER cannot be used for only %d images" % len(images)
)
# including > 360 degrees in indexing does not add fresh information
start = min(images)
end = max(images)
if (end - start) > turn:
end = start + turn
logger.debug("Adding images for indexer: %d -> %d", start, end)
wedges.append((start, end))
# FIXME this should have a wrapper function!
if start + five_deg in images:
self._background_images = (start, start + five_deg)
else:
self._background_images = (start, end)
return wedges
def _index_prepare(self):
logger.notice(banner("Spotfinding %s" % self.get_indexer_sweep_name()))
super()._index_prepare()
reflections_file = spot_xds_to_reflection_file(
self._indxr_payload["SPOT.XDS"],
working_directory=self.get_working_directory(),
)
refl = flex.reflection_table.from_file(reflections_file)
logger.info(spot_counts_per_image_plot(refl))
def _index(self):
"""Actually do the autoindexing using the data prepared by the
previous method."""
self._index_remove_masked_regions()
if self._i_or_ii is None:
self._i_or_ii = self.decide_i_or_ii()
logger.debug("Selecting I or II, chose %s", self._i_or_ii)
idxref = self.Idxref()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
# set the phi start etc correctly
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
if self._i_or_ii == "i":
blocks = self._index_select_images_i()
for block in blocks[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(
starting_frame
)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in blocks[1:]:
idxref.add_spot_range(block[0], block[1])
else:
for block in self._indxr_images[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(
starting_frame
)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in self._indxr_images[1:]:
idxref.add_spot_range(block[0], block[1])
# FIXME need to also be able to pass in the known unit
# cell and lattice if already available e.g. from
# the helper... indirectly
if self._indxr_user_input_lattice:
idxref.set_indexer_user_input_lattice(True)
if self._indxr_input_lattice and self._indxr_input_cell:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
idxref.set_indexer_input_cell(self._indxr_input_cell)
logger.debug("Set lattice: %s", self._indxr_input_lattice)
logger.debug("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)
original_cell = self._indxr_input_cell
elif self._indxr_input_lattice:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
original_cell = None
else:
original_cell = None
# FIXED need to set the beam centre here - this needs to come
# from the input .xinfo object or header, and be converted
# to the XDS frame... done.
from dxtbx.serialize.xds import to_xds
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
# fixme need to check if the lattice, cell have been set already,
# and if they have, pass these in as input to the indexing job.
done = False
while not done:
try:
done = idxref.run()
# N.B. in here if the IDXREF step was being run in the first
# pass done is FALSE however there should be a refined
# P1 orientation matrix etc. available - so keep it!
except XDSException as e:
# inspect this - if we have complaints about not
# enough reflections indexed, and we have a target
# unit cell, and they are the same, well ignore it
if "solution is inaccurate" in str(e):
logger.debug("XDS complains solution inaccurate - ignoring")
done = idxref.continue_from_error()
elif (
"insufficient percentage (< 70%)" in str(e)
or "insufficient percentage (< 50%)" in str(e)
) and original_cell:
done = idxref.continue_from_error()
lattice, cell, mosaic = idxref.get_indexing_solution()
# compare solutions
check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
for j in range(3):
# allow two percent variation in unit cell length
if (
math.fabs((cell[j] - original_cell[j]) / original_cell[j])
> 0.02
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
# and two degree difference in angle
if (
math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
logger.debug("XDS unhappy but solution ok")
elif "insufficient percentage (< 70%)" in str(
e
) or "insufficient percentage (< 50%)" in str(e):
logger.debug("XDS unhappy but solution probably ok")
done = idxref.continue_from_error()
else:
raise e
FileHandler.record_log_file(
"%s INDEX" % self.get_indexer_full_name(),
os.path.join(self.get_working_directory(), "IDXREF.LP"),
)
for file in ["SPOT.XDS", "XPARM.XDS"]:
self._indxr_payload[file] = idxref.get_output_data_file(file)
# need to get the indexing solutions out somehow...
self._indxr_other_lattice_cell = idxref.get_indexing_solutions()
(
self._indxr_lattice,
self._indxr_cell,
self._indxr_mosaic,
) = idxref.get_indexing_solution()
xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
models = dxtbx.load(xparm_file)
crystal_model = to_crystal(xparm_file)
# this information gets lost when re-creating the models from the
# XDS results - however is not refined so can simply copy from the
# input - https://github.com/xia2/xia2/issues/372
models.get_detector()[0].set_thickness(
converter.get_detector()[0].get_thickness()
)
experiment = Experiment(
beam=models.get_beam(),
detector=models.get_detector(),
goniometer=models.get_goniometer(),
scan=models.get_scan(),
crystal=crystal_model,
# imageset=self.get_imageset(),
)
experiment_list = ExperimentList([experiment])
self.set_indexer_experiment_list(experiment_list)
# I will want this later on to check that the lattice was ok
self._idxref_subtree_problem = idxref.get_index_tree_problem()
def decide_i_or_ii(self):
logger.debug("Testing II or I indexing")
try:
fraction_etc_i = self.test_i()
fraction_etc_ii = self.test_ii()
if not fraction_etc_i and fraction_etc_ii:
return "ii"
if fraction_etc_i and not fraction_etc_ii:
return "i"
logger.debug("I: %.2f %.2f %.2f" % fraction_etc_i)
logger.debug("II: %.2f %.2f %.2f" % fraction_etc_ii)
if (
fraction_etc_i[0] > fraction_etc_ii[0]
and fraction_etc_i[1] < fraction_etc_ii[1]
and fraction_etc_i[2] < fraction_etc_ii[2]
):
return "i"
return "ii"
except Exception as e:
logger.debug(str(e), exc_info=True)
return "ii"
def test_i(self):
idxref = self.Idxref()
self._index_remove_masked_regions()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
# set the phi start etc correctly
blocks = self._index_select_images_i()
for block in blocks[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in blocks[1:]:
idxref.add_spot_range(block[0], block[1])
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
idxref.run()
return idxref.get_fraction_rmsd_rmsphi()
def test_ii(self):
idxref = self.Idxref()
self._index_remove_masked_regions()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
for block in self._indxr_images[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
idxref.run()
return idxref.get_fraction_rmsd_rmsphi()
def spot_xds_to_reflection_file(spot_xds, working_directory):
importer = ImportXDS()
importer.set_working_directory(working_directory)
auto_logfiler(importer)
importer.set_spot_xds(spot_xds)
importer.run()
return importer.get_reflection_filename()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Indexer/XDSIndexerII.py",
"copies": "1",
"size": "13625",
"license": "bsd-3-clause",
"hash": 8029959414767275000,
"line_mean": 34.8552631579,
"line_max": 87,
"alpha_frac": 0.5757798165,
"autogenerated": false,
"ratio": 3.788932146829811,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98628502012183,
"avg_score": 0.00037235242230225485,
"num_lines": 380
} |
"""An rle reader.
Use:
rle_string = rle.string # See rle.string
data = parse_rle(rle_string, xborder = 10, yborder = 10)
data['matrix'] : 2d numpy array matrix.
data['rulestring'] : rule_string in the form "B(somedigits)/S(somedigits)"
data['dimensions'] : {'y' : integer, 'x' : integer}
"""
import re
import numpy as np
def _find_xy(string):
nospace = string.replace(" ", "")
xstring = re.findall('x=\d+', nospace)[0]
x = int(re.findall('\d+', xstring)[0])
ystring = re.findall('y=\d+', nospace)[0]
y = int(re.findall('\d+', ystring)[0])
return y, x
def _find_rule(string):
nospace = string.replace(" ", '')
rulestring = re.findall('[bB]\d+/[sS]\d+', nospace)[0].lower()
return rulestring
def _find_pattern(string):
nospace = string.replace(" ", "")
nospace = nospace.replace(re.findall('x=\d+', nospace)[0], '')
nospace = nospace.replace(re.findall('y=\d+', nospace)[0], '')
norule = nospace.replace(re.findall('b\d+/s\d+', nospace, re.IGNORECASE)[0], '')
nonew = norule.replace("\n", "")
patternstring = re.findall('[bo$\d+]+!', nonew)[0]
return patternstring
def _classify(rle_list):
out_list = list()
sample = ['b', 'o', '$', '!']
spec_index = 0
for i in range(len(rle_list)):
if rle_list[i] in sample:
b = 1
while (rle_list[i-b:i].isdigit()):
b += 1
b -= 1
if not rle_list[i-1:i].isdigit():
out_list.append((1, rle_list[i]))
else:
out_list.append((int(rle_list[i-b:i]), rle_list[i]))
return out_list
def _make_matrix(string):
dims = _find_xy(string)
pattern_list = _classify(_find_pattern(string))
matrix = np.zeros(dims, dtype=np.int)
xcount = 0
ycount = 0
for i in pattern_list:
if i[1] == 'o':
for i in range(i[0]):
matrix[ycount][xcount] = 1
xcount += 1
elif i[1] == 'b':
for i in range(i[0]):
xcount += 1
elif i[1] == '$':
for i in range(i[0]):
ycount += 1
xcount = 0
elif i[1] == '!':
break
return matrix
def _frame(arr, xborder=0, yborder=0):
new_arr = list(arr)
xframe = [0] * xborder
for i in range(len(arr)):
new_arr[i] = xframe + arr[i] + xframe
yframe = [0] * (len(new_arr[0]))
for i in range(yborder):
new_arr.insert(0, yframe)
new_arr.append(yframe)
return new_arr
def parse_rle(string, xborder = 5, yborder = 5):
matrix = _make_matrix(string).tolist()
border_matrix = np.array(_frame(matrix, xborder, yborder))
dims = (len(border_matrix), len(border_matrix[0]))
dimsdict = { 'y' : dims[0], 'x' : dims[1] }
rule = _find_rule(string)
return { "matrix" : border_matrix, "dimensions" : dimsdict, "rulestring" : rule}
string = """
#N twogun
#O V. Everett Boyer and Doug Petrie
#C The smallest known period-60 gun; it uses two copies of the Gosper
#C glider gun.
x = 39, y = 27, rule = b3/s23
27bo11b$25bobo11b$15b2o6b2o12b2o$14bo3bo4b2o12b2o$3b2o8bo5bo3b2o14b$3b
2o8bo3bob2o4bobo11b$13bo5bo7bo11b$14bo3bo20b$15b2o22b$26bo12b$27b2o10b
$26b2o11b4$21b2o16b$9bobo10b2o15b$9bo2bo8bo17b$2o10b2o11b2o12b$2o8bo3b
2o8bobo12b$5b2o5b2o9bo6b2o7b$4bo4bo2bo10bo2bo2bo2bo6b$9bobo11bo6b3o6b$
24bobo5b3o4b$25b2o6bobo3b$35bo3b$35b2o!
"""
| {
"repo_name": "JacobPaulette/PyLife",
"path": "rle.py",
"copies": "1",
"size": "3429",
"license": "mit",
"hash": 6763275576439078000,
"line_mean": 28.5603448276,
"line_max": 84,
"alpha_frac": 0.5780110819,
"autogenerated": false,
"ratio": 2.6397228637413397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8639230886516698,
"avg_score": 0.015700611824928174,
"num_lines": 116
} |
"""An RSS feed mining tool developed for Full Fact https://fullfact.org/"""
import argparse
import pathlib
import sys
import time
from xml.etree.ElementTree import ElementTree
import yaml
from tinydb import TinyDB
from . import PKG_DIR
from .content import fetch_article_content
from .feeds import fetch_new_entries
from .haystack import new_haystack, write_to_haystack
from .nlp import paragraphs_to_sentences
from .storage import insert_entry, serialization
def read_config(filename):
content = yaml.load(stream=open(filename))
feeds = content['feeds']
config = content['config']
# Use path objects
config['output-folder'] = pathlib.Path(config['output-folder'])
# fix user-agent header if lowercase
if 'user-agent' in config['request-headers']:
config['request-headers']['User-Agent'] = config['request-headers']['user-agent']
del config['request-headers']['user-agent']
# convert feeds into list of dictionaries
data_sources = []
for k in feeds.keys():
s = feeds[k]
s.update({'name': k})
data_sources.append(s)
return data_sources, config
def main(args):
if not args.config_filepath:
if not args.generate_config:
raise ValueError('Expected arguments undefined.')
else:
reference_config_path = PKG_DIR / 'example_config.yml'
content = reference_config_path.read_text()
with open('example_config.yml', 'w') as output_config:
output_config.write(content)
sys.exit()
# Read YAML file for config and feeds
data_sources, config = read_config(args.config_filepath)
# Initialise Data Storage
name = config['meta-db-name'] + '.json'
db = TinyDB(name, storage=serialization)
now = time.strftime('%Y-%m-%d-%H:%M', time.localtime())
haystack = new_haystack(batch=now, id='RSS-Feeds')
for source in data_sources:
print(source['name'] + ' - ', end='')
sys.stdout.flush()
# Try to get more entries
entries = fetch_new_entries(source=source, db=db)
if entries:
print('Fetching content ', end='')
sys.stdout.flush()
for ent in entries:
# Fetch Content
paragraphs = fetch_article_content(ent, content_tag=source['content-tag'],
header=config['request-headers'])
# Split into individual sentences
sentences = paragraphs_to_sentences(paragraphs)
if sentences:
write_to_haystack(haystack, source=source, entity=ent, sentences=sentences)
# Store metadata in data base
insert_entry(ent, db)
time.sleep(0.5)
print('.', end='')
sys.stdout.flush()
print('')
sys.stdout.flush()
# Write output
tree = ElementTree(haystack)
output_filename = 'feeds{date}.xml'.format(date=now)
output_dir = config['output-folder']
output_dir.mkdir(exist_ok=True)
filepath = output_dir / output_filename
tree.write(filepath.as_posix())
print('Wrote haystack to {}'.format(filepath.as_posix()))
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-c', '--config', dest='config_filepath',
help='Filename for the configuration yaml file to use.'
)
parser.add_argument(
'-g', '--generate-config', action='store_true', dest='generate_config',
help='Generate an example config file in the current directory for further editing'
)
return parser
def cli_entry_point():
"""This is the function that gets executed first from command line"""
# Parse Arguments
args = get_parser().parse_args()
# Run program
main(args)
if __name__ == '__main__':
# If executed directly, this script will kick off the same entry point
# as if the command line utility was invoked
cli_entry_point()
| {
"repo_name": "MrKriss/full-fact-rss-miner",
"path": "package/src/rss_miner/main.py",
"copies": "1",
"size": "4057",
"license": "mit",
"hash": 4597383072384571000,
"line_mean": 28.6131386861,
"line_max": 95,
"alpha_frac": 0.6184372689,
"autogenerated": false,
"ratio": 4.1062753036437245,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004025624807776132,
"num_lines": 137
} |
"""
Sets up the application to run.
"""
##########################################################################
## Imports
##########################################################################
from ingestor import *
from generate import *
from twisted.internet import reactor
##########################################################################
## Runables
##########################################################################
DPORT = 1025 # Default Port
class App(object):
"""
A class wrapper for the Ansible App
"""
Factory = IngestorFactory
def __init__(self, port=DPORT):
self.port = port
def run(self):
reactor.listenTCP(self.port, self.Factory())
reactor.run()
class Connection(object):
"""
A client connector to the app.
"""
Factory = None
def __init__(self, host, port=DPORT):
self.host = host
self.port = port
def run(self):
reactor.connectTCP(self.host, self.port, self.Factory())
reactor.run()
class Generator(Connection):
"""
A class wrapper for the Sensor simulator
"""
Factory = GeneratorFactory
if __name__ == '__main__':
app = App()
app.run()
| {
"repo_name": "bbengfort/ansible",
"path": "ansible/app.py",
"copies": "1",
"size": "1496",
"license": "mit",
"hash": 8238658569223519000,
"line_mean": 21,
"line_max": 74,
"alpha_frac": 0.4953208556,
"autogenerated": false,
"ratio": 4.2259887005649714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221309556164971,
"avg_score": null,
"num_lines": null
} |
"""Ansible callback plugin to print a nicely formatted summary of failures.
The file / module name is prefixed with `zz_` to make this plugin be loaded last
by Ansible, thus making its output the last thing that users see.
"""
from collections import defaultdict
import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
class CallbackModule(CallbackBase):
"""This callback plugin stores task results and summarizes failures."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'failure_summary'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
super(CallbackModule, self).__init__()
self.__failures = []
self.__playbook_file = ''
def v2_playbook_on_start(self, playbook):
super(CallbackModule, self).v2_playbook_on_start(playbook)
# pylint: disable=protected-access; Ansible gives us no public API to
# get the file name of the current playbook from a callback plugin.
self.__playbook_file = playbook._file_name
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
if not ignore_errors:
self.__failures.append(result)
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
# pylint: disable=broad-except; capturing exceptions broadly is
# intentional, to isolate arbitrary failures in this callback plugin.
try:
if self.__failures:
self._display.display(failure_summary(self.__failures, self.__playbook_file))
except Exception:
msg = stringc(
u'An error happened while generating a summary of failures:\n'
u'{}'.format(traceback.format_exc()), C.COLOR_WARN)
self._display.v(msg)
def failure_summary(failures, playbook):
"""Return a summary of failed tasks, including details on health checks."""
if not failures:
return u''
# NOTE: because we don't have access to task_vars from callback plugins, we
# store the playbook context in the task result when the
# openshift_health_check action plugin is used, and we use this context to
# customize the error message.
# pylint: disable=protected-access; Ansible gives us no sufficient public
# API on TaskResult objects.
context = next((
context for context in
(failure._result.get('playbook_context') for failure in failures)
if context
), None)
failures = [failure_to_dict(failure) for failure in failures]
failures = deduplicate_failures(failures)
summary = [u'', u'', u'Failure summary:', u'']
width = len(str(len(failures)))
initial_indent_format = u' {{:>{width}}}. '.format(width=width)
initial_indent_len = len(initial_indent_format.format(0))
subsequent_indent = u' ' * initial_indent_len
subsequent_extra_indent = u' ' * (initial_indent_len + 10)
for i, failure in enumerate(failures, 1):
entries = format_failure(failure)
summary.append(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
for entry in entries[1:]:
entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent)
indented = u'{}{}'.format(subsequent_indent, entry)
summary.append(indented)
failed_checks = set()
for failure in failures:
failed_checks.update(name for name, message in failure['checks'])
if failed_checks:
summary.append(check_failure_footer(failed_checks, context, playbook))
return u'\n'.join(summary)
def failure_to_dict(failed_task_result):
"""Extract information out of a failed TaskResult into a dict.
The intent is to transform a TaskResult object into something easier to
manipulate. TaskResult is ansible.executor.task_result.TaskResult.
"""
# pylint: disable=protected-access; Ansible gives us no sufficient public
# API on TaskResult objects.
_result = failed_task_result._result
return {
'host': failed_task_result._host.get_name(),
'play': play_name(failed_task_result._task),
'task': failed_task_result.task_name,
'msg': _result.get('msg', FAILED_NO_MSG),
'checks': tuple(
(name, result.get('msg', FAILED_NO_MSG))
for name, result in sorted(_result.get('checks', {}).items())
if result.get('failed')
),
}
def play_name(obj):
"""Given a task or block, return the name of its parent play.
This is loosely inspired by ansible.playbook.base.Base.dump_me.
"""
# pylint: disable=protected-access; Ansible gives us no sufficient public
# API to implement this.
if not obj:
return ''
if hasattr(obj, '_play'):
return obj._play.get_name()
return play_name(getattr(obj, '_parent'))
def deduplicate_failures(failures):
"""Group together similar failures from different hosts.
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
try:
groups[group_key].append(failure)
except TypeError:
# abort and return original list of failures when failures has an
# unhashable type.
return failures
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
if group_key not in groups:
continue
failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key)))
result.append(failure)
return result
def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
if isinstance(failure['host'], string_types):
host = failure['host']
else:
host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
checks = failure['checks']
fields = (
(u'Hosts', host),
(u'Play', play),
(u'Task', task),
(u'Message', stringc(msg, C.COLOR_ERROR)),
)
if checks:
fields += ((u'Details', format_failed_checks(checks)),)
row_format = '{:10}{}'
return [row_format.format(header + u':', body) for header, body in fields]
def format_failed_checks(checks):
"""Return pretty-formatted text describing checks that failed."""
messages = []
for name, message in checks:
messages.append(u'check "{}":\n{}'.format(name, message))
return stringc(u'\n\n'.join(messages), C.COLOR_ERROR)
def check_failure_footer(failed_checks, context, playbook):
"""Return a textual explanation about checks depending on context.
The purpose of specifying context is to vary the output depending on what
the user was expecting to happen (based on which playbook they ran). The
only use currently is to vary the message depending on whether the user was
deliberately running checks or was trying to install/upgrade and checks are
just included. Other use cases may arise.
"""
checks = ','.join(sorted(failed_checks))
summary = [u'']
if context in ['pre-install', 'health', 'adhoc']:
# User was expecting to run checks, less explanation needed.
summary.extend([
u'You may configure or disable checks by setting Ansible '
u'variables. To disable those above, set:',
u' openshift_disable_check={checks}'.format(checks=checks),
u'Consult check documentation for configurable variables.',
])
else:
# User may not be familiar with the checks, explain what checks are in
# the first place.
summary.extend([
u'The execution of "{playbook}" includes checks designed to fail '
u'early if the requirements of the playbook are not met. One or '
u'more of these checks failed. To disregard these results,'
u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook),
u' openshift_disable_check={checks}'.format(checks=checks),
u'Failing check names are shown in the failure details above. '
u'Some checks may be configurable by variables if your requirements '
u'are different from the defaults; consult check documentation.',
])
summary.append(
u'Variables can be set in the inventory or passed on the command line '
u'using the -e flag to ansible-playbook.'
)
return u'\n'.join(summary)
| {
"repo_name": "ivanhorvath/openshift-tools",
"path": "openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py",
"copies": "14",
"size": "9289",
"license": "apache-2.0",
"hash": 8591786346551739000,
"line_mean": 38.5276595745,
"line_max": 99,
"alpha_frac": 0.6544299709,
"autogenerated": false,
"ratio": 4.18046804680468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005220465808935155,
"num_lines": 235
} |
"""Ansible callback plugin to print a summary completion status of installation
phases.
"""
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
DOCUMENTATION = '''
'''
EXAMPLES = '''
---------------------------------------------
Example display of a successful playbook run:
PLAY RECAP *********************************************************************
master01.example.com : ok=158 changed=16 unreachable=0 failed=0
node01.example.com : ok=469 changed=74 unreachable=0 failed=0
node02.example.com : ok=157 changed=17 unreachable=0 failed=0
localhost : ok=24 changed=0 unreachable=0 failed=0
INSTALLER STATUS ***************************************************************
Initialization : Complete
etcd Install : Complete
NFS Install : Not Started
Load balancer Install : Not Started
Master Install : Complete
Master Additional Install : Complete
Node Install : Complete
GlusterFS Install : Not Started
Hosted Install : Complete
Metrics Install : Not Started
Logging Install : Not Started
Service Catalog Install : Not Started
-----------------------------------------------------
Example display if a failure occurs during execution:
INSTALLER STATUS ***************************************************************
Initialization : Complete
etcd Install : Complete
NFS Install : Not Started
Load balancer Install : Not Started
Master Install : In Progress
This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
Master Additional Install : Not Started
Node Install : Not Started
GlusterFS Install : Not Started
Hosted Install : Not Started
Metrics Install : Not Started
Logging Install : Not Started
Service Catalog Install : Not Started
'''
class CallbackModule(CallbackBase):
"""This callback summarizes installation phase status."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'installer_checkpoint'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
super(CallbackModule, self).__init__()
def v2_playbook_on_stats(self, stats):
# Set the order of the installer phases
installer_phases = [
'installer_phase_initialize',
'installer_phase_etcd',
'installer_phase_nfs',
'installer_phase_loadbalancer',
'installer_phase_master',
'installer_phase_master_additional',
'installer_phase_node',
'installer_phase_glusterfs',
'installer_phase_hosted',
'installer_phase_metrics',
'installer_phase_logging',
'installer_phase_servicecatalog',
'installer_phase_management',
]
# Define the attributes of the installer phases
phase_attributes = {
'installer_phase_initialize': {
'title': 'Initialization',
'playbook': ''
},
'installer_phase_etcd': {
'title': 'etcd Install',
'playbook': 'playbooks/byo/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
'playbook': 'playbooks/byo/openshift-nfs/config.yml'
},
'installer_phase_loadbalancer': {
'title': 'Load balancer Install',
'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
},
'installer_phase_master': {
'title': 'Master Install',
'playbook': 'playbooks/byo/openshift-master/config.yml'
},
'installer_phase_master_additional': {
'title': 'Master Additional Install',
'playbook': 'playbooks/byo/openshift-master/additional_config.yml'
},
'installer_phase_node': {
'title': 'Node Install',
'playbook': 'playbooks/byo/openshift-node/config.yml'
},
'installer_phase_glusterfs': {
'title': 'GlusterFS Install',
'playbook': 'playbooks/byo/openshift-glusterfs/config.yml'
},
'installer_phase_hosted': {
'title': 'Hosted Install',
'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml'
},
'installer_phase_metrics': {
'title': 'Metrics Install',
'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
},
'installer_phase_logging': {
'title': 'Logging Install',
'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
},
'installer_phase_servicecatalog': {
'title': 'Service Catalog Install',
'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
},
'installer_phase_management': {
'title': 'Management Install',
'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml'
},
}
# Find the longest phase title
max_column = 0
for phase in phase_attributes:
max_column = max(max_column, len(phase_attributes[phase]['title']))
if '_run' in stats.custom:
self._display.banner('INSTALLER STATUS')
for phase in installer_phases:
phase_title = phase_attributes[phase]['title']
padding = max_column - len(phase_title) + 2
if phase in stats.custom['_run']:
phase_status = stats.custom['_run'][phase]
self._display.display(
'{}{}: {}'.format(phase_title, ' ' * padding, phase_status),
color=self.phase_color(phase_status))
if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phase_attributes[phase]['playbook']))
else:
# Phase was not found in custom stats
self._display.display(
'{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
color=C.COLOR_SKIP)
self._display.display("", screen_only=True)
def phase_color(self, status):
""" Return color code for installer phase"""
valid_status = [
'In Progress',
'Complete',
]
if status not in valid_status:
self._display.warning('Invalid phase status defined: {}'.format(status))
if status == 'Complete':
phase_color = C.COLOR_OK
elif status == 'In Progress':
phase_color = C.COLOR_ERROR
else:
phase_color = C.COLOR_WARN
return phase_color
| {
"repo_name": "rhdedgar/openshift-tools",
"path": "openshift/installer/vendored/openshift-ansible-3.7.0/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py",
"copies": "1",
"size": "7241",
"license": "apache-2.0",
"hash": -2344724517227394000,
"line_mean": 37.7219251337,
"line_max": 95,
"alpha_frac": 0.531004005,
"autogenerated": false,
"ratio": 4.708062418725618,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009484792005552109,
"num_lines": 187
} |
"""Ansible callback plugin to print a summary completion status of installation
phases.
"""
from datetime import datetime
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
"""This callback summarizes installation phase status."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'installer_checkpoint'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
super(CallbackModule, self).__init__()
def v2_playbook_on_stats(self, stats):
phases = stats.custom['_run']
# Find the longest phase title
max_column = 0
for phase in phases:
max_column = max(max_column, len(phases[phase].get('title', '')))
# Sort the phases by start time
ordered_phases = sorted(phases, key=lambda x: (phases[x].get('start', 0)))
self._display.banner('INSTALLER STATUS')
# Display status information for each phase
for phase in ordered_phases:
phase_title = phases[phase].get('title', '')
padding = max_column - len(phase_title) + 2
phase_status = phases[phase]['status']
phase_time = phase_time_delta(phases[phase])
if phase_title:
self._display.display(
'{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time),
color=self.phase_color(phase_status))
# If the phase is not complete, tell the user what playbook to rerun
if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phases[phase]['playbook']))
# Display any extra messages stored during the phase
if 'message' in phases[phase]:
self._display.display(
'\t{}'.format(
phases[phase]['message']))
def phase_color(self, status):
""" Return color code for installer phase"""
valid_status = [
'In Progress',
'Complete',
]
if status not in valid_status:
self._display.warning('Invalid phase status defined: {}'.format(status))
if status == 'Complete':
phase_color = C.COLOR_OK
elif status == 'In Progress':
phase_color = C.COLOR_ERROR
else:
phase_color = C.COLOR_WARN
return phase_color
def phase_time_delta(phase):
""" Calculate the difference between phase start and end times """
if not phase.get('start'):
return ''
time_format = '%Y%m%d%H%M%SZ'
phase_start = datetime.strptime(phase['start'], time_format)
if 'end' not in phase:
# The phase failed so set the end time to now
phase_end = datetime.now()
else:
phase_end = datetime.strptime(phase['end'], time_format)
delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds
return delta
| {
"repo_name": "jwhonce/openshift-ansible",
"path": "roles/installer_checkpoint/callback_plugins/installer_checkpoint.py",
"copies": "3",
"size": "3112",
"license": "apache-2.0",
"hash": -4389648990408254000,
"line_mean": 34.7701149425,
"line_max": 97,
"alpha_frac": 0.5832262211,
"autogenerated": false,
"ratio": 4.352447552447552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6435673773547552,
"avg_score": null,
"num_lines": null
} |
"""
A blocking client that can read data from the Twisted Server
"""
##########################################################################
## Imports
##########################################################################
import json
import socket
NL = "\r\n"
class AnsibleConnection(object):
def __init__(self, host, port, timeout=3):
self.host = host
self.port = port
self.timeout = timeout
self.connect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.sock.settimeout(self.timeout)
def close(self):
self.sock.close()
def fetch(self):
"""
Returns JSON data from the server, if timeout, returns None.
"""
data = ""
while True:
try:
data += self.sock.recv(1024)
except socket.timeout:
return None
if NL in data: break
return json.loads(data)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
connect = AnsibleConnection
if __name__ == '__main__':
with connect('localhost', 1025) as conn:
for x in xrange(0, 10):
print json.dumps(conn.fetch(), indent=2)
| {
"repo_name": "bbengfort/ansible",
"path": "ansible/client.py",
"copies": "1",
"size": "1637",
"license": "mit",
"hash": -8029701579175442000,
"line_mean": 23.4328358209,
"line_max": 74,
"alpha_frac": 0.5369578497,
"autogenerated": false,
"ratio": 4.022113022113022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059070871813022,
"avg_score": null,
"num_lines": null
} |
"""
Data Generators - simulates incoming data from sensors.
"""
##########################################################################
## Imports
##########################################################################
import time
import random
import string
from ingestor import get_timestamp
from twisted.protocols import basic
from twisted.internet import reactor, protocol
from twisted.internet.task import deferLater
from twisted.internet.protocol import ClientFactory
from twisted.internet.error import ReactorNotRunning
##########################################################################
## Generator Protocol
##########################################################################
def random_action():
a = random.choice(string.ascii_uppercase)
b = random.choice('AEIOU')
c = random.choice(string.ascii_uppercase)
return a+b+c
class GeneratorClient(basic.LineReceiver):
def __init__(self, action=None, **kwargs):
self.action = action or random_action()
self.value = random.random() * 100
def connectionMade(self):
print "[%s] connection made" % get_timestamp()
self.generate()
def dataReceived(self, data):
"""
Don't do anything when data is received
"""
pass
def defer(self):
defer = deferLater(reactor, 1, lambda: None)
defer.addErrback(self.onerror)
defer.addCallback(self.generate)
return defer
def onerror(self, failure):
failure.trap(KeyboardInterrupt)
self.loseConnection()
def generate(self, increment=None):
increment = increment or random.random() * 10
increment = increment * -1 if random.randrange(2) > 0 else increment
self.value += increment
self.send_action()
self.defer()
def send_action(self):
line = "%s %0.3f" % (self.action, self.value)
print "[%s] sending: \"%s\"" % (get_timestamp(), line)
self.sendLine(line)
class GeneratorFactory(ClientFactory):
def buildProtocol(self, addr):
return GeneratorClient()
def clientConnectionFailed(self, connector, reason):
self.shutdown()
def clientConnectionLost(self, connector, reason):
self.shutdown()
def shutdown(self):
print "[%s] connection lost" % get_timestamp()
try:
reactor.stop()
except ReactorNotRunning:
pass
if __name__ == '__main__':
factory = GeneratorFactory()
reactor.connectTCP('127.0.0.1', 1025, factory)
reactor.run()
| {
"repo_name": "bbengfort/ansible",
"path": "ansible/generate.py",
"copies": "1",
"size": "2820",
"license": "mit",
"hash": 4219393817898404000,
"line_mean": 27.2,
"line_max": 76,
"alpha_frac": 0.5840425532,
"autogenerated": false,
"ratio": 4.318529862174579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5402572415374579,
"avg_score": null,
"num_lines": null
} |
"""
The Basic LineReceiver Protocol for ingesting action predicates.
Right now, just implementing websockets using Twisted...
"""
##########################################################################
## Imports
##########################################################################
import json
import time
from datetime import datetime
from collections import defaultdict
from twisted.protocols import basic
from twisted.internet.protocol import Factory
HISTORY = 10
def get_timestamp(fmt="%Y-%m-%d %H:%M:%S"):
return datetime.now().strftime(fmt)
def parse_action(line):
action, value = line.split()
return action.upper(), float(value)
class Ingestor(basic.LineReceiver):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
print "[%s] client connected" % get_timestamp()
self.factory.clients.append(self)
self.sendLine(self.json_state())
def connectionLost(self, reason):
print "[%s] client disconnected" % get_timestamp()
if self in self.factory.clients:
self.factory.clients.remove(self)
def lineReceived(self, line):
print "[%s] recieved: \"%s\"" % (get_timestamp(), line)
action, value = parse_action(line)
self.factory.actions[action].append({'x': time.time(), 'y': value})
if len(self.factory.actions[action]) > HISTORY:
self.factory.actions[action] = self.factory.actions[action][-1 * HISTORY:]
self.broadcast()
def broadcast(self, message=None):
message = message or self.json_state()
for protocol in self.factory.clients:
if protocol != self:
protocol.sendLine(message)
def json_state(self):
return json.dumps(self.factory.actions)
class IngestorFactory(Factory):
def __init__(self):
self.clients = []
self.actions = defaultdict(list)
def buildProtocol(self, addr):
return Ingestor(self)
| {
"repo_name": "bbengfort/ansible",
"path": "ansible/ingestor.py",
"copies": "1",
"size": "2295",
"license": "mit",
"hash": 4954352241896145000,
"line_mean": 28.0506329114,
"line_max": 86,
"alpha_frac": 0.6209150327,
"autogenerated": false,
"ratio": 3.9365351629502574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057450195650257,
"avg_score": null,
"num_lines": null
} |
# Ansible lookup plugin for getting the first available file
# (c) 2015,2016 David Lundgren <dlundgren@syberisle.net>
# For each item if the path exists along the regular paths then the first found entry will be returned.
# This operates differently from the file or found-file plugins as it is not an error if the file is not found.
DOCUMENTATION = """
author: David Lundgren
lookup: vars_files
options:
lookup_vars_paths:
type: list
default: []
ini:
- key: lookup_vars_paths
section: defaults
yaml:
key: defaults.lookup_vars_paths
"""
import os
from ansible import utils
from ansible import constants as C
from ansible.plugins.lookup import LookupBase
# ansible 2.4
try:
from ansible.plugins import get_plugin_class
from ansible.parsing.plugin_docs import read_docstring
# load the definitions
dstring = read_docstring(__file__.replace('.pyc', '.py'), verbose = False, ignore_errors = False)
if dstring.get('doc', False):
if 'options' in dstring['doc'] and isinstance(dstring['doc']['options'], dict):
C.config.initialize_plugin_configuration_definitions('lookup', 'vars_files', dstring['doc']['options'])
except:
None
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
paths = self.get_paths(variables)
for term in terms:
for path in paths:
path = os.path.abspath(os.path.join(path, "vars", term))
if os.path.exists(path):
ret.append(path)
break
return ret
def get_paths(self, vars):
paths = []
basedir = self.get_basedir(vars)
try:
# Ansible 2.4
lookupPaths = C.config.get_config_value('lookup_vars_paths', None, 'lookup', 'vars_files')
except AttributeError:
# Ansible 2.3
lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_vars_paths', None, [], value_type='list')
except TypeError:
# Ansible 2.2.x and below
lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_vars_paths', None, [], islist=True)
for path in lookupPaths:
path = utils.path.unfrackpath(path)
if os.path.exists(path):
paths.append(path)
if '_original_file' in vars:
paths.append(self._loader.path_dwim_relative(basedir, '', vars['_original_file']))
if 'playbook_dir' in vars:
paths.append(vars['playbook_dir'])
paths.append(self._loader.path_dwim(basedir))
unq = []
[unq.append(i) for i in paths if not unq.count(i)]
return unq | {
"repo_name": "dlundgren/ansible-plugins",
"path": "plugins/lookup/overrides/vars_files.py",
"copies": "1",
"size": "2767",
"license": "mit",
"hash": -3830800646878701000,
"line_mean": 34.0379746835,
"line_max": 115,
"alpha_frac": 0.5999277196,
"autogenerated": false,
"ratio": 3.9415954415954415,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006680943782872274,
"num_lines": 79
} |
# {{ ansible_managed }}
# -*- coding: utf-8 -*-
import os
from django.utils.translation import ugettext_lazy as _
from horizon.utils import secret_key
from openstack_dashboard import exceptions
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = False
COMPRESS_OFFLINE = True
# WEBROOT is the location relative to Webserver root
# should end with a slash.
WEBROOT = '/'
#LOGIN_URL = WEBROOT + 'auth/login/'
#LOGOUT_URL = WEBROOT + 'auth/logout/'
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
#ALLOWED_HOSTS = ['horizon.example.com', ]
ALLOWED_HOSTS = ['*']
# Set SSL proxy settings:
# Pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SESSION_ENGINE = "{{ horizon.session_engine }}"
# The absolute path to the directory where message files are collected.
# The message file must have a .json file extension. When the user logins to
# horizon, the message files collected are processed and displayed to the user.
#MESSAGES_PATH=None
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# Versions specified here should be integers or floats, not strings.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be 2.0 or 3.
OPENSTACK_API_VERSIONS = {
# "data-processing": 1.1,
"identity": 3,
# "volume": 2,
# "compute": 2,
}
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# NOTE: This value must be the ID of the default domain, NOT the name.
# Also, you will most likely have a value in the keystone policy file like this
# "cloud_admin": "rule:admin_required and domain_id:<your domain id>"
# This value must match the domain id specified there.
#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
# Set this to True to enable panels that provide the ability for users to
# manage Identity Providers (IdPs) and establish a set of rules to map
# federation protocol attributes to Identity API attributes.
# This extension requires v3.0+ of the Identity API.
#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
# Set Console type:
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
# Set to None explicitly if you want to deactivate the console.
#CONSOLE_TYPE = "AUTO"
# If provided, a "Report Bug" link will be displayed in the site header
# which links to the value of this setting (ideally a URL containing
# information on how to report issues).
#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
# Show backdrop element outside the modal, do not close the modal
# after clicking on backdrop.
#HORIZON_CONFIG["modal_backdrop"] = "static"
{% if horizon.password_validator.enabled|bool %}
# Specify a regular expression to validate user passwords.
HORIZON_CONFIG["password_validator"] = {
"regex": "{{ horizon.password_validator.regex }}",
"help_text": _("{{ horizon.password_validator.help_text }}"),
}
{% endif %}
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
#HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for forms including the login form and
# the database creation workflow if so desired.
#HORIZON_CONFIG["password_autocomplete"] = "off"
# Setting this to True will disable the reveal button for password fields,
# including on the login form.
#HORIZON_CONFIG["disable_password_reveal"] = False
{% if horizon.customize %}
HORIZON_CONFIG["customization_module"] = "horizon-customization.horizon_customization"
{% endif %}
HORIZON_CONFIG["help_url"] = "http://docs.openstack.org/newton"
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizon generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
# there may be situations where you would want to set this explicitly, e.g.
# when multiple dashboard instances are distributed on different machines
# (usually behind a load-balancer). Either you have to make sure that a session
# gets all requests routed to the same dashboard instance or you set the same
# SECRET_KEY for all of them.
SECRET_KEY = "{{ secrets.horizon_secret_key }}"
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# },
#}
{% macro memcached_hosts() -%}
{% for host in groups['controller'] -%}
{% if loop.last -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}'
{%- else -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}',
{%- endif -%}
{% endfor -%}
{% endmacro -%}
CACHES = {
'default': {
'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION' : [
{{ memcached_hosts() }}
]
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
#EMAIL_HOST = 'smtp.my-company.com'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = 'djangomail'
#EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
#AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
#]
OPENSTACK_HOST = "{{ endpoints.main }}"
OPENSTACK_KEYSTONE_URL = "https://%s:{{ endpoints.keystone.port.haproxy_api }}/v{{horizon.keystone_api_version}}" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
SESSION_TIMEOUT = {{ horizon.session_timeout }}
{% if keystone.federation.enabled|bool -%}
OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = True
{% endif %}
#TODO: parameterize strings that get shown on the login page
{% if keystone.federation.enabled|bool and (keystone.federation.sp.oidc.enabled|bool or keystone.federation.sp.saml.horizon_enabled|bool) -%}
WEBSSO_ENABLED = True
WEBSSO_CHOICES = (
("credentials", _("{{ horizon.websso.choices.credentials }}")),
{% if keystone.federation.sp.oidc.enabled|bool %}
{% for sp in keystone.federation.sp.oidc.providers_info %}
("{{ sp.idp_name }}-OIDC-IDP", _("{{ sp.horizon_name }}")),
{% endfor %}
{% endif %}
{% if keystone.federation.sp.saml.horizon_enabled|bool %}
{% for sp in keystone.federation.sp.saml.providers %}
{% if sp.enabled|bool and not sp.k2k_enabled %}
("{{ sp.keystone_id }}", _("{{ sp.horizon_name }}")),
{% endif %}
{% endfor %}
{% endif %}
)
{% if keystone.federation.sp.saml.enabled|bool or keystone.federation.sp.oidc.enabled|bool %}
WEBSSO_IDP_MAPPING = {
{% if keystone.federation.sp.oidc.enabled|bool %}
{% for sp in keystone.federation.sp.oidc.providers_info -%}
"{{ sp.idp_name }}-OIDC-IDP": ("{{ sp.idp_name }}", "{{ sp.protocol_name}}"),
{% endfor -%}
{% endif %}
{% if keystone.federation.sp.saml.enabled|bool %}
{% for sp in keystone.federation.sp.saml.providers %}
{% if sp.enabled|bool and not sp.k2k_enabled %}
"{{ sp.keystone_id }}": ("{{ sp.keystone_id }}", "saml2")
{% endif %}
{% endfor %}
{% endif %}
}
{% endif %}
WEBSSO_INITIAL_CHOICE = "credentials"
{% endif -%}
{% if keystone.federation.enabled|bool and keystone.federation.idp.k2k.enabled|bool -%}
# Show K2K provider selection dropdown at login sreen.
K2K_SELECTION_AT_LOGIN_ENABLED = True
# Initial choice, should be the idp id.
# If user selects the idp, then k2k authentication is skipped.
K2K_INITIAL_CHOICE = "local"
{% if keystone.federation.idp.k2k.service_providers|length > 0 %}
#Service Provider choices, value return should be SP ID
K2K_CHOICES = (
("local", _("Identity Provider")),
{% for sp in keystone.federation.idp.k2k.service_providers %}
{% if not loop.last %}
("{{ sp.id }}", _("Service Provider {{ sp.id }} ")),
{% else %}
("{{ sp.id }}", _("Service Provider {{ sp.id }} "))
{% endif %}
{% endfor %}
)
{% endif %}
{% endif %}
# Disable SSL certificate checks (useful for self-signed certificates):
OPENSTACK_SSL_NO_VERIFY = {{ insecure | default('false') | bool }}
# The CA certificate to use to verify SSL connections
OPENSTACK_SSL_CACERT = '{{ ca_bundle }}'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
{% if keystone.ldap_domain.enabled|bool %}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = '{{ keystone.ldap_domain.domain }}'
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
{% endif %}
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True,
}
# Setting this to True, will add a new "Retrieve Password" action on instance,
# allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Launch Instance user experience has been significantly enhanced.
# You can choose whether to enable the new launch instance experience,
# the legacy experience, or both. The legacy experience will be removed
# in a future release, but is available as a temporary backup setting to ensure
# compatibility with existing deployments. Further development will not be
# done on the legacy experience. Please report any problems with the new
# experience via the Launchpad tracking system.
#
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
# determine the experience to enable. Set them both to true to enable
# both.
{% if horizon.legacy_instance_panel|default('True')|bool %}
LAUNCH_INSTANCE_LEGACY_ENABLED = True
{% else %}
LAUNCH_INSTANCE_LEGACY_ENABLED = False
{% endif %}
{% if horizon.nextgen_instance_panel|default('False')|bool %}
LAUNCH_INSTANCE_NG_ENABLED = True
{% else %}
LAUNCH_INSTANCE_NG_ENABLED = False
{% endif %}
# A dictionary of settings which can be used to provide the default values for
# properties found in the Launch Instance modal.
#LAUNCH_INSTANCE_DEFAULTS = {
# 'config_drive': False,
#}
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
'requires_keypair': False,
}
{% if swift.enabled|bool %}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
{% endif %}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': False,
'enable_ha_router': False,
{% if neutron.lbaas.enabled|bool %}
'enable_lb': True,
{% else %}
'enable_lb': False,
{% endif %}
'enable_firewall': True,
'enable_vpn': True,
'enable_fip_topology_check': True,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, and vxlan.
'supported_provider_types': ['*'],
# Set which VNIC types are supported for port binding. Only the VNIC
# types in this list will be available to choose from when creating a
# port.
# VNIC types include 'normal', 'macvtap' and 'direct'.
# Set to empty list or None to disable VNIC type selection.
'supported_vnic_types': ['*'],
}
# The OPENSTACK_HEAT_STACK settings can be used to disable password
# field required while launching the stack.
OPENSTACK_HEAT_STACK = {
'enable_user_pass': False,
}
# Enables upload from remote location
IMAGES_ALLOW_LOCATION = True
HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
#OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', _('Select format')),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('docker', _('Docker')),
# ('iso', _('ISO - Optical Disk Image')),
# ('ova', _('OVA - Open Virtual Appliance')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI - Virtual Disk Image')),
# ('vhd', _('VHD - Virtual Hard Disk')),
# ('vmdk', _('VMDK - Virtual Machine Disk')),
# ],
#}
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type"),
}
# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
# custom properties should not be displayed in the Image Custom Properties
# table.
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The size of chunk in bytes for downloading objects from Swift
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
#CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
#}
# Set this to True to display an 'Admin Password' field on the Change Password
# form to verify that it is indeed the admin logged-in who wants to change
# the password.
#ENFORCE_PASSWORD_CHECK = False
# Modules that provide /auth routes that can be used to handle different types
# of user authentication. Add auth plugins that require extra route handling to
# this list.
#AUTHENTICATION_URLS = [
# 'openstack_auth.urls',
#]
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files.
# Please insure that your identity policy file matches the one being used on
# your keystone servers. There is an alternate policy file that may be used
# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
# This file is not included in the Horizon repository by default but can be
# found at
# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
# policy.v3cloudsample.json
# Having matching policy files on the Horizon and Keystone servers is essential
# for normal operation. This holds true for all services and their policy files.
POLICY_FILES = {
'identity': '/etc/openstack-dashboard/keystone_policy.json',
'compute': '/etc/openstack-dashboard/nova_policy.json',
{% if cinder.enabled|default('False')|bool %}
'volume': '/etc/openstack-dashboard/cinder_policy.json',
{% endif %}
'image': '/etc/openstack-dashboard/glance_policy.json',
{% if heat.enabled|default('False')|bool %}
'orchestration': '/etc/openstack-dashboard/heat_policy.json',
{% endif %}
'network': '/etc/openstack-dashboard/neutron_policy.json',
{% if ceilometer.enabled|default('False')|bool -%}
'telemetry': '/etc/openstack-dashboard/ceilometer_policy.json',
{% endif -%}
}
# TODO: (david-lyle) remove when plugins support adding settings.
# Note: Only used when trove-dashboard plugin is configured to be used by
# Horizon.
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
#TROVE_ADD_USER_PERMS = []
#TROVE_ADD_DATABASE_PERMS = []
# Change this patch to the appropriate list of tuples containing
# a key, label and static directory containing two files:
# _variables.scss and _styles.scss
#AVAILABLE_THEMES = [
# ('default', 'Default', 'themes/default'),
# ('material', 'Material', 'themes/material'),
#]
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
'scss': {
'handlers': ['null'],
'propagate': False,
},
},
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
# Deprecation Notice:
#
# The setting FLAVOR_EXTRA_KEYS has been deprecated.
# Please load extra spec metadata into the Glance Metadata Definition Catalog.
#
# The sample quota definitions can be found in:
# <glance_source>/etc/metadefs/compute-quota.json
#
# The metadata definition catalog supports CLI and API:
# $glance --os-image-api-version 2 help md-namespace-import
# $glance-manage db_load_metadefs <directory_with_definition_files>
#
# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
# TODO: (david-lyle) remove when plugins support settings natively
# Note: This is only used when the Sahara plugin is configured and enabled
# for use in Horizon.
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
# The hash algorithm to use for authentication tokens. This must
# match the hash algorithm that the identity server and the
# auth_token middleware are using. Allowed values are the
# algorithms supported by Python's hashlib library.
#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
# Hashing tokens from Keystone keeps the Horizon session data smaller, but it
# doesn't work in some cases when using PKI tokens. Uncomment this value and
# set it to False if using PKI tokens and there are 401 errors due to token
# hashing.
#OPENSTACK_TOKEN_HASH_ENABLED = True
# AngularJS requires some settings to be made available to
# the client side. Some settings are required by in-tree / built-in horizon
# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
# form of ['SETTING_1','SETTING_2'], etc.
#
# You may remove settings from this list for security purposes, but do so at
# the risk of breaking a built-in horizon feature. These settings are required
# for horizon to function properly. Only remove them if you know what you
# are doing. These settings may in the future be moved to be defined within
# the enabled panel configuration.
# You should not add settings to this list for out of tree extensions.
# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
'LAUNCH_INSTANCE_DEFAULTS',
'OPENSTACK_IMAGE_FORMATS']
# Additional settings can be made available to the client side for
# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
# !! Please use extreme caution as the settings are transferred via HTTP/S
# and are not encrypted on the browser. This is an experimental API and
# may be deprecated in the future without notice.
#REST_API_ADDITIONAL_SETTINGS = []
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
# Scripting (XFS) vulnerability, so this option allows extra security hardening
# where iframes are not used in deployment. Default setting is True.
# For more information see:
# http://tinyurl.com/anticlickjack
#DISALLOW_IFRAME_EMBED = True
{% if openstack_install_method != 'distro' %}
STATIC_ROOT = "{{ horizon.horizon_lib_dir }}/lib/python2.7/site-packages/openstack_dashboard/static"
{% endif %}
| {
"repo_name": "panxia6679/ursula",
"path": "roles/horizon/templates/etc/openstack-dashboard/local_settings.py",
"copies": "1",
"size": "28670",
"license": "mit",
"hash": -680072743102626600,
"line_mean": 35.1994949495,
"line_max": 141,
"alpha_frac": 0.6605859784,
"autogenerated": false,
"ratio": 3.647118687189925,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9730072891297056,
"avg_score": 0.015526354858573763,
"num_lines": 792
} |
# {{ ansible_managed }}
## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
#
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
SECRET_KEY='!!!PLEASE_CHANGE_ME!!!!'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
TIME_ZONE = 'UTC'
# Set the default short date format. See strftime(3) for supported sequences.
#DATE_FORMAT = '%m/%d'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.io/"
# Logging
LOG_ROTATION = False
LOG_ROTATION_COUNT = 1
LOG_RENDERING_PERFORMANCE = False
LOG_CACHE_PERFORMANCE = False
# Enable full debug page display on exceptions (Internal Server Error pages)
DEBUG = False
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (127.0.0.1) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
MEMCACHE_HOSTS = ['memcached:11211']
# Metric data and graphs are cached for one minute by default. If defined,
# DEFAULT_CACHE_POLICY is a list of tuples of minimum query time ranges mapped
# to the cache duration for the results. This allows for larger queries to be
# cached for longer periods of times. All times are in seconds. If the policy is
# empty or undefined, all results will be cached for DEFAULT_CACHE_DURATION.
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#DEFAULT_CACHE_POLICY = [(0, 60), # default is 60 seconds
# (7200, 120), # >= 2 hour queries are cached 2 minutes
# (21600, 180)] # >= 6 hour queries are cached 3 minutes
#MEMCACHE_KEY_PREFIX = 'graphite'
# This lists the memcached options. Default is an empty dict.
# Accepted options depend on the Memcached implementation and the Django version.
# Until Django 1.10, options are used only for pylibmc.
# Starting from 1.11, options are used for both python-memcached and pylibmc.
#MEMCACHE_OPTIONS = { 'socket_timeout': 0.5 }
# Set URL_PREFIX when deploying graphite-web to a non-root location
#URL_PREFIX = '/graphite'
# Graphite uses Django Tagging to support tags in Events. By default each
# tag is limited to 50 characters in length.
#MAX_TAG_LENGTH = 50
# Interval for the Auto-Refresh feature in the Composer, measured in seconds.
AUTO_REFRESH_INTERVAL = 600
#####################################
# Filesystem Paths #
#####################################
#
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
#GRAPHITE_ROOT = '/opt/graphite'
# Most installs done outside of a separate tree such as /opt/graphite will
# need to change these settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT.
#CONF_DIR = '/opt/graphite/conf'
#STORAGE_DIR = '/opt/graphite/storage'
#STATIC_ROOT = '/opt/graphite/static'
LOG_DIR = '/opt/graphite/storage/log'
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
#
# NOTE: If any directory is unreadable in STANDARD_DIRS it will break metric browsing
#
#CERES_DIR = '/opt/graphite/storage/ceres'
#WHISPER_DIR = '/opt/graphite/storage/whisper'
#RRD_DIR = '/opt/graphite/storage/rrd'
#
# Data directories using the "Standard" metrics finder (i.e. not Ceres)
#STANDARD_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
#####################################
# Email Configuration #
#####################################
#
# This is used for emailing rendered graphs. The default backend is SMTP.
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#
# To drop emails on the floor, enable the Dummy backend instead.
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
#####################################
# Authentication Configuration #
#####################################
#
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
#LDAP_USE_TLS = False
## Manual URI / query setup
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
# User DN template to use for binding (and authentication) against the
# LDAP server. %(username) is replaced with the username supplied at
# graphite login.
#LDAP_USER_DN_TEMPLATE = "CN=%(username)s,OU=users,DC=mycompany,DC=com"
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW) # Use ldap.OPT_X_TLS_DEMAND to force TLS
#ldap.set_option(ldap.OPT_REFERRALS, 0) # Enable for Active Directory
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
#ldap.set_option(ldap.OPT_DEBUG_LEVEL, 65535) # To enable verbose debugging
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
#USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
###############################
# Authorization for Dashboard #
###############################
# By default, there is no security on dashboards - any user can add, change or delete them.
# This section provides 3 different authorization models, of varying strictness.
# If set to True, users must be logged in to save or delete dashboards. Defaults to False
DASHBOARD_REQUIRE_AUTHENTICATION = True
# If set to the name of a user group, dashboards can be saved and deleted by any user in this
# group. Groups can be set in the Django Admin app, or in LDAP. Defaults to None.
# NOTE: Ignored if DASHBOARD_REQUIRE_AUTHENTICATION is not set
#DASHBOARD_REQUIRE_EDIT_GROUP = 'dashboard-editors-group'
# If set to True, dashboards can be saved or deleted by any user having the appropriate
# (change or delete) permission (as set in the Django Admin app). Defaults to False
# NOTE: Ignored if DASHBOARD_REQUIRE_AUTHENTICATION is not set
#DASHBOARD_REQUIRE_PERMISSIONS = True
##########################
# Database Configuration #
##########################
#
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN MIGRATIONS AFTER SETTING UP A NEW DATABASE
# http://graphite.readthedocs.io/en/latest/config-database-setup.html
#
#
# The following built-in database engines are available:
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
#DATABASES = {
# 'default': {
# 'NAME': '/opt/graphite/storage/graphite.db',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': ''
# }
#}
#
#########################
# Cluster Configuration #
#########################
#
# To avoid excessive DNS lookups you want to stick to using IP addresses only
# in this entire section.
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
# Creates a pool of worker threads to which tasks can be dispatched. This makes
# sense if there are multiple CLUSTER_SERVERS because then the communication
# with them can be parallelized
# The number of threads is equal to:
# POOL_WORKERS_PER_BACKEND * len(CLUSTER_SERVERS) + POOL_WORKERS
# Be careful when increasing the number of threads, in particular if your start
# multiple graphite-web processes (with uwsgi or similar) as this will increase
# memory consumption (and number of connections to memcached).
#USE_WORKER_POOL = True
# The number of worker threads that should be created per backend server.
# It makes sense to have more than one thread per backend server if
# the graphite-web web server itself is multi threaded and can handle multiple
# incoming requests at once.
#POOL_WORKERS_PER_BACKEND = 1
# A baseline number of workers that should always be created, no matter how many
# cluster servers are configured. These are used for other tasks that can be
# off-loaded from the request handling threads.
#POOL_WORKERS = 1
# This setting controls whether https is used to communicate between cluster members
#INTRACLUSTER_HTTPS = False
# These are timeout values (in seconds) for requests to remote webapps
#REMOTE_FIND_TIMEOUT = 3.0 # Timeout for metric find requests
#REMOTE_FETCH_TIMEOUT = 3.0 # Timeout to fetch series data
#REMOTE_RETRY_DELAY = 60.0 # Time before retrying a failed remote webapp
# Try to detect when a cluster server is localhost and don't forward queries
#REMOTE_EXCLUDE_LOCAL = False
# Number of retries for a specific remote data fetch.
#MAX_FETCH_RETRIES = 2
#FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
# If the query doesn't fall entirely within the FIND_TOLERANCE window
# we disregard the window. This prevents unnecessary remote fetches
# caused when carbon's cache skews node.intervals, giving the appearance
# remote systems have data we don't have locally, which we probably do.
#FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
#REMOTE_STORE_USE_POST = False # Use POST instead of GET for remote requests
# During a rebalance of a consistent hash cluster, after a partition event on a replication > 1 cluster,
# or in other cases we might receive multiple TimeSeries data for a metric key. Merge them together rather
# that choosing the "most complete" one (pre-0.9.14 behaviour).
#REMOTE_STORE_MERGE_RESULTS = True
# Provide a list of HTTP headers that you want forwarded on from this host
# when making a request to a remote webapp server in CLUSTER_SERVERS
#REMOTE_STORE_FORWARD_HEADERS = [] # An iterable of HTTP header names
## Prefetch cache
# set to True to fetch all metrics using a single http request per remote server
# instead of one http request per target, per remote server.
# Especially useful when generating graphs with more than 4-5 targets or if
# there's significant latency between this server and the backends.
#REMOTE_PREFETCH_DATA = False
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind
# a relay using consistent hashing), you'll need to list the ip address, cache
# query port, and instance name of each carbon-cache instance on the local
# machine (NOT every carbon-cache in the entire cluster). The default cache
# query port is 7002 and a common scheme is to use 7102 for instance b, 7202
# for instance c, etc.
# If you're using consistent hashing, please keep an order of hosts the same as
# order of DESTINATIONS in your relay - otherways you'll get cache misses.
#
# You *should* use 127.0.0.1 here in most cases.
#
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#CARBONLINK_RETRY_DELAY = 15 # Seconds to blacklist a failed remote server
#
# Type of metric hashing function.
# The default `carbon_ch` is Graphite's traditional consistent-hashing implementation.
# Alternatively, you can use `fnv1a_ch`, which supports the Fowler-Noll-Vo hash
# function (FNV-1a) hash implementation offered by the carbon-c-relay project
# https://github.com/grobian/carbon-c-relay
#
# Supported values: carbon_ch, fnv1a_ch
#
#CARBONLINK_HASHING_TYPE = 'carbon_ch'
# A "keyfunc" is a user-defined python function that is given a metric name
# and returns a string that should be used when hashing the metric name.
# This is important when your hashing has to respect certain metric groupings.
#CARBONLINK_HASHING_KEYFUNC = "/opt/graphite/bin/keyfuncs.py:my_keyfunc"
# Prefix for internal carbon statistics.
#CARBON_METRIC_PREFIX='carbon'
# The replication factor to use with consistent hashing.
# This should usually match the value configured in Carbon.
#REPLICATION_FACTOR = 1
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
| {
"repo_name": "kiv-box/graphite_stack_single",
"path": "graphite-web/conf/local_settings.py",
"copies": "1",
"size": "15030",
"license": "mit",
"hash": 7852382464633420000,
"line_mean": 40.4049586777,
"line_max": 108,
"alpha_frac": 0.7222887558,
"autogenerated": false,
"ratio": 3.6454038321610476,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9811452730288179,
"avg_score": 0.011247971534573551,
"num_lines": 363
} |
""" Ansible module for building projects with CMake.
See the DOCUMENTATION and EXAMPLES strings below for more information.
"""
from __future__ import print_function
from glob import glob
from os.path import abspath
from subprocess import Popen
from subprocess import PIPE
from ansible.module_utils.basic import AnsibleModule
__all__ = "main",
__version__ = "0.2.0" # PEP 0440 with Semantic Versioning
DOCUMENTATION = """
module: cmake
short_description: Build a project using CMake.
notes:
- U(github.com/mdklatt/ansible-cmake-module)
version_added: "2.6"
author: Michael Klatt
options:
build_type:
description: CMake build type to use.
required: false
default: Debug
choices: [Debug, Release, RelWithDebInfo, MinSizeRel]
binary_dir:
description: Destination for binaries.
required: true
source_dir:
description: |
Location of C(CMakeLists.txt). This is required the first time a project
is built, or use it to tell CMake to regenerate the build files.
required: false
cache_vars:
description: A dictionary of cache variables to define.
required: false
target:
description: The name of the target to build.
required: false
creates:
description: |
If the given path exists (wildcards are allowed), the CMake command will
not be executed.
required: false
executable:
description: Path to the C(cmake) executable.
required: false
""" # must be valid YAML
EXAMPLES = """
# Build a binary.
- cmake:
source_dir: /path/to/project
binary_dir: /path/to/broject/build
# Build and install a binary if it doesn't already exist.
- cmake:
source_dir: /path/to/project
binary_dir: /path/to/project/build
target: install
creates: /path/to/installed/binary
# Clean a built project (source_dir is not required).
- cmake:
binary_dir: /path/to/project/build
target: clean
""" # plain text
_ARGS_SPEC = {
# MUST use list for choices.
"build_type": {
"default": "Debug",
"choices": ["Debug", "Release", "RelWithDebInfo", "MinSizeRel"],
},
"binary_dir": {"required": True},
"source_dir": {"default": None},
"cache_vars": {"type": "dict"},
"target": {},
"creates": {"default": ""}, # empty path never exists
"executable": {"default": "cmake"},
}
def main():
""" Execute the module.
"""
def cmake(args):
""" Execute cmake command. """
# Any output to STDOUT or STDERR must be captured.
args = [module.params["executable"]] + list(args)
process = Popen(args, stdout=PIPE, stderr=PIPE, cwd=binary)
stdout, stderr = process.communicate()
if process.returncode != 0:
module.fail_json(msg=stderr, stdout=stdout, rc=process.returncode)
return
def config():
""" Execute the CMake config step. """
args = []
for var in cache_vars.items():
args.extend(("-D", "=".join(var)))
source = abspath(module.params["source_dir"])
args.append(source)
cmake(args)
return
def build():
""" Execute the CMake build step. """
args = ["--build", binary]
if module.params["target"]:
args.extend(("--target", module.params["target"]))
cmake(args)
return
module = AnsibleModule(_ARGS_SPEC, supports_check_mode=True)
required = not glob(module.params["creates"])
if module.check_mode:
module.exit_json(changed=required) # calls exit(0)
if required:
binary = abspath(module.params["binary_dir"])
cache_vars = {
"CMAKE_BUILD_TYPE": module.params["build_type"],
}
try:
cache_vars.update(module.params["cache_vars"])
except TypeError: # parameter is None
pass
if module.params["source_dir"]:
config()
build()
module.exit_json(changed=required, rc=0, **module.params) # calls exit(0)
# Make the module executable.
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_name": "mdklatt/ansible-cmake-module",
"path": "src/cmake.py",
"copies": "1",
"size": "4070",
"license": "mit",
"hash": 2610537777597762600,
"line_mean": 26.3154362416,
"line_max": 78,
"alpha_frac": 0.6272727273,
"autogenerated": false,
"ratio": 3.883587786259542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 149
} |
"""Ansible playbook tools"""
from collections import OrderedDict
import os.path
import sys
import yaml
def get_tags_from_playbook(playbook_file):
"""Get available tags from Ansible playbook"""
tags = []
playbook_path = os.path.dirname(playbook_file)
with open(playbook_file) as playbook_fp:
playbook = yaml.safe_load(playbook_fp)
for item in playbook:
if "import_playbook" in item:
import_playbook = os.path.join(playbook_path, item["import_playbook"])
imported_tags = get_tags_from_playbook(import_playbook)
tags.extend(imported_tags)
elif "tags" in item:
if isinstance(item["tags"], (list,)):
tags.extend(item["tags"])
else:
tags.append(item["tags"])
else:
print(item)
# Remove duplicates while maintaining order
tags = list(OrderedDict.fromkeys(tags))
if tags.count("always") > 0:
tags.remove("always")
if len(tags) == 0:
sys.stderr.write("%s has no tags\n" % playbook_file)
return tags
| {
"repo_name": "markosamuli/macos-machine",
"path": "machine/playbook.py",
"copies": "1",
"size": "1137",
"license": "mit",
"hash": -7311439656643516000,
"line_mean": 29.7297297297,
"line_max": 86,
"alpha_frac": 0.5848724714,
"autogenerated": false,
"ratio": 4.180147058823529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031065548306927616,
"num_lines": 37
} |
ANSI_CODES = {
'reset': (0, 0),
'bold': (1, 22),
'faint': (2, 22),
'italic': (3, 23),
'underline': (4, 24),
'blinking': (5, 25),
'inverse': (7, 27),
'invisible': (8, 28),
'strikethrough': (9, 29),
'black': (30, 39),
'red': (31, 39),
'green': (32, 39),
'yellow': (33, 39),
'blue': (34, 39),
'magenta': (35, 39),
'cyan': (36, 39),
'white': (37, 39),
'light_black': (90, 39),
'light_red': (91, 39),
'light_green': (92, 39),
'light_yellow': (93, 39),
'light_blue': (94, 39),
'light_magenta': (95, 39),
'light_cyan': (96, 39),
'light_white': (97, 39),
'on_black': (40, 49),
'on_red': (41, 49),
'on_green': (42, 49),
'on_yellow': (43, 49),
'on_blue': (44, 49),
'on_magenta': (45, 49),
'on_cyan': (46, 49),
'on_white': (47, 49),
'on_light_black': (100, 49),
'on_light_red': (101, 49),
'on_light_green': (102, 49),
'on_light_yellow': (103, 49),
'on_light_blue': (104, 49),
'on_light_magenta': (105, 49),
'on_light_cyan': (106, 49),
'on_light_white': (107, 49)
}
# Setting up a few handy aliases & alternative spellings for ease of use
ANSI_CODES['gray'] = ANSI_CODES['light_black']
ANSI_CODES['grey'] = ANSI_CODES['light_black']
ANSI_CODES['on_gray'] = ANSI_CODES['on_light_black']
ANSI_CODES['on_grey'] = ANSI_CODES['on_light_black']
class AnsiStyle(object):
def __init__(self, open_code, close_code):
self.open_code = open_code
self.close_code = close_code
@property
def open(self):
return '\x1b[%im' % self.open_code
@property
def close(self):
return '\x1b[%im' % self.close_code
def __call__(self, text):
return self.open + text + self.close
def __repr__(self):
return (
'<%s open_code=%r, close_code=%r>' %
(self.__class__.__name__, self.open_code, self.close_code)
)
class AnsiStyler(object):
def __init__(self, styles):
self.styles = styles
def __getattr__(self, name):
if name not in self.styles:
raise AttributeError(
"%r object has no attribute %r" %
(self.__class__.__name__, name)
)
open_code, close_code = self.styles[name]
return AnsiStyle(open_code, close_code)
def __dir__(self):
return dir(type(self)) + list(self.__dict__) + list(self.styles)
def __iter__(self):
for style in self.styles:
yield style
def __repr__(self):
return (
'<%s styles=%r>' %
(self.__class__.__name__, sorted(list(self.styles)))
)
styles = AnsiStyler(ANSI_CODES)
| {
"repo_name": "fgimian/painter",
"path": "painter/ansi_styles.py",
"copies": "1",
"size": "2714",
"license": "mit",
"hash": 4442513639723536000,
"line_mean": 24.3644859813,
"line_max": 72,
"alpha_frac": 0.5092114959,
"autogenerated": false,
"ratio": 3.022271714922049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4031483210822049,
"avg_score": null,
"num_lines": null
} |
""" ANSI Color codes module """
class _Text:
"""
ANSI text color codes for easy formatting
"""
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
magenta = "\033[35m"
cyan = "\033[36m"
white = "\033[37m"
b_black = "\033[90m"
b_red = "\033[91m"
b_green = "\033[92m"
b_yellow = "\033[93m"
b_blue = "\033[94m"
b_magenta = "\033[95m"
b_cyan = "\033[96m"
b_white = "\033[97m"
default = "\033[37m"
class _Background:
"""
ANSI background color codes for easy formatting
"""
black = "\033[40m"
red = "\033[41m"
green = "\033[42m"
yellow = "\033[43m"
blue = "\033[44m"
magenta = "\033[45m"
cyan = "\033[46m"
white = "\033[47m"
default = "\033[40m"
b_black = "\033[100m"
b_red = "\033[101m"
b_green = "\033[102m"
b_yellow = "\033[103m"
b_blue = "\033[104m"
b_magenta = "\033[105m"
b_cyan = "\033[106m"
b_white = "\033[107m"
class _Attributes:
"""
ANSI console attribute codes for easy formatting
"""
off = "\033[0m"
bold = "\033[1m"
score = "\033[4m"
blink = "\033[5m"
reverse = "\033[7m"
hidden = "\033[8m"
def paint(text, color):
if hasattr(_Text, color):
return f"{getattr(_Text, color)}{text}{_Attributes.off}"
raise ValueError(f"invalid color name: {color}")
def back(text, color):
if hasattr(_Background, color):
return f"{getattr(_Background, color)}{text}{_Attributes.off}"
raise ValueError(f"invalid color name: {color}")
def attr(text, attribute):
if hasattr(_Attributes, attribute):
return f"{getattr(_Attributes, color)}{text}{_Attributes.off}"
raise ValueError(f"invalid attribute name: {attribute}")
| {
"repo_name": "SpoopySaitama/snake",
"path": "cogs/utils/colors.py",
"copies": "2",
"size": "1800",
"license": "mit",
"hash": 5571378224083937000,
"line_mean": 23,
"line_max": 70,
"alpha_frac": 0.5605555556,
"autogenerated": false,
"ratio": 2.985074626865672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45456301824656725,
"avg_score": null,
"num_lines": null
} |
ANSI_RESET = "\x1b[0m"
def rgb(r, g, b):
"""Returns a xterm256 color index that represents the specified RGB color.
Each argument should be an integer in the range [0, 5]."""
if r < 0 or r > 5 or g < 0 or g > 5 or b < 0 or b > 5:
raise ValueError("Value out of range")
return 16 + r * 36 + g * 6 + b
def gray(graylevel):
"""Returns a xterm256 color index that represents the specified gray level.
The argument should be an integer in the range [0, 25]."""
if graylevel < 0 or graylevel > 25:
raise ValueError("Value out of range")
if graylevel == 0:
return 0
elif graylevel == 25:
return 231
return 231 + graylevel
def sequence(fore=None, back=None):
if fore is None and back is None:
return ""
codes = [ ]
if fore is not None:
codes.extend((38, 5, fore))
if back is not None:
codes.extend((48, 5, back))
return "\x1b[{}m".format(";".join(str(c) for c in codes))
def wrap(text, fore=None, back=None):
if fore is None and back is None:
return str(text)
return "".join([
sequence(fore, back),
str(text),
ANSI_RESET])
def ignore(text, fore=None, back=None):
return str(text)
def wrap_for_stream(stream):
try:
if stream.isatty():
return wrap
except AttributeError:
pass
return ignore
| {
"repo_name": "yellcorp/floppy-recovery",
"path": "utils/xterm.py",
"copies": "1",
"size": "1395",
"license": "mit",
"hash": 5467394577387333000,
"line_mean": 23.4736842105,
"line_max": 79,
"alpha_frac": 0.5906810036,
"autogenerated": false,
"ratio": 3.558673469387755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643872016847403,
"avg_score": 0.0010964912280701754,
"num_lines": 57
} |
"""An SMTP and POP3 server that anonymize all traffic using mixminion
Syntax: python minionProxy.py [-Vh] [-H imap_host] [-P pop3_port] [-S smtp_port] [-L local_host]
-h, --help - prints this help message
-V, --version - prints the version
-H, --host - The remote IMAP host to connect and fetch messages from.
-P, --pop3port - The local POP3 port (default is 20110)
-S, --smtpport - The local SMTP port (default is 20025)
-L, --localhost - The local address servers bind to (default "127.0.0.1")
The minionProxy acts as a local SMTP and POP3 server, and your
favorite email client can be configured to talk to it. It extracts the
subject line, the nickname contained in the "from" field, and the body
of the message and relays them anonymously through the mixminion
network to all receivers. The nickname is the portion of the "from"
field that does not contain the email address e.g. for Red Monkey
<red.monkey@jungle.za> the nickname "Red Monkey" will be
extracted. This can be set by most email clients. Note that not all
mixminion exit nodes support custom nicknames.
Each anonymous message sent out has attached a Single Use Reply Block,
that can be used by your correspondant to reply (once) to your
message. The email portion of the "from" field is encoded as the
recipient email address (e.g. "red.monkey@jungle.za" for the example
above)
(Only) Anonymous messages available on the IMAP server specified are
automatically decoded and can be downloaded using the simple POP3
protocol. Note that the username and password you specify (using your
mail client) for the POP3 server is used in fact to authenticate with
the IMAP server. If the messages contain single use reply blocks the
address of the form "xxxxxx@nym.taz" can be used to reply. Otherwise
it is not possible to reply (and the reply address is
"anonymous@nym.taz")
You need to have the mixminion program on your path for minionProxy
to work. Download it at: http://mixminion.net
For comments and BUGS contact "George.Danezis@cl.cam.ac.uk" """
from IMAPproxy import *
from minionSMTP import *
import getpass
import asyncore
program = sys.argv[0]
__version__ = 'Mixminion SMTP/POP3 form IMAP proxy - 0.0.1'
if __name__ == '__main__':
import __main__
imap_address = None
local_host = '127.0.0.1'
smtp_port = 20025
imap_port = 20110
# Parse the command line arguments
# -V, --version - gives the version
# -h, --help - gives some help
try:
opts, args = getopt.getopt(
sys.argv[1:], 'VhH:P:S:L:',
['version', 'help','host','pop3port','smtpport','localhost'])
except getopt.error, e:
print e
print opts
for opt, arg in opts:
if opt in ('-H', '--host'):
imap_address = arg
print opt,arg,imap_address
elif opt in ('-I', '--imapport'):
try:
imap_port = int(arg)
except ValueError:
print 'POP3 port is not a number'
pass
elif opt in ('-L', '--localhost'):
local_host = arg
elif opt in ('-S', '--smtlport'):
try:
imap_port = int(arg)
except:
print 'SMTP port is not a number'
pass
elif opt in ('-h', '--help'):
print __doc__
sys.exit(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
print 'Mixminion password:'
mm_Pass = getpass.getpass()
if imap_address != None:
proxy1 = IMAPproxy((local_host, imap_port),imap_address,mm_Pass)
proxy2 = minionSMTP((local_host,smtp_port),mm_Pass)
try:
asyncore.loop()
except KeyboardInterupt:
print 'Bye...'
pass
| {
"repo_name": "nmathewson/mixminion",
"path": "etc/minionProxy.py",
"copies": "6",
"size": "3824",
"license": "mit",
"hash": -5747209665741122000,
"line_mean": 36.4901960784,
"line_max": 96,
"alpha_frac": 0.6391213389,
"autogenerated": false,
"ratio": 3.7162293488824103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004541565571525184,
"num_lines": 102
} |
"""A NSUPDATE server class for gevent, emulating apple's mDNSResponder SPS server"""
# Copyright (c) 2013 Russell Cloran
# Copyright (c) 2014 Joey Korkames
import traceback
import struct
import logging
import dns.name
import dns.flags
import dns.message
import dns.reversename
import dns.edns
import ipaddress
import netifaces
import binascii
# https://github.com/aosm/mDNSResponder/commits/master
# http://www.opensource.apple.com/source/mDNSResponder/mDNSResponder-522.1.11/mDNSCore/mDNS.c
# [SLEEPER]BeginSleepProcessing(),NetWakeResolve(),SendSPSRegistration() -> [SPS]mDNSCoreReceiveUpdate() -> [SLEEPER]mDNSCoreReceive(),mDNSCoreReceiveUpdateR()
# [WAKER]*L3 -> [SPS]*BPF,SendResponses(),SendWakeup(),WakeOnResolve++,mDNSSendWakeOnResolve() -> [SLEEPER]
# http://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-11
dns.edns.UL = 2
dns.edns.OWNER = 4
class UpdateLeaseOption(dns.edns.Option):
"""EDNS option for Dynamic DNS Update Leases
http://tools.ietf.org/html/draft-sekar-dns-ul-01"""
def __init__(self, lease):
super(UpdateLeaseOption, self).__init__(dns.edns.UL)
self.lease = lease
def to_wire(self, file):
data = struct.pack("!L", self.lease)
file.write(data)
@classmethod
def from_wire(cls, otype, wire, current, olen):
data = wire[current:current + olen]
(lease,) = struct.unpack("!L", data)
return cls(lease)
def __repr__(self):
return "%s[OPT#%s](%s)" % (
self.__class__.__name__,
self.otype,
self.lease
)
dns.edns._type_to_class.update({dns.edns.UL: UpdateLeaseOption})
#SetupOwnerOpt() mDNS.c
class OwnerOption(dns.edns.Option):
"""EDNS option for DNS-SD Sleep Proxy Service client mac address hinting
http://tools.ietf.org/html/draft-cheshire-edns0-owner-option-00"""
def __init__(self, ver=0, seq=1, pmac=None, wmac=None, passwd=None):
super(OwnerOption, self).__init__(dns.edns.OWNER)
self.ver = ver
self.seq = seq
self.pmac = self._mac2text(pmac)
self.wmac = self._mac2text(wmac)
self.passwd = passwd
@staticmethod
def _mac2text(mac):
if not mac: return mac
#if len(mac) == 6: mac.encode('hex') #this was a wire-format binary
mac = binascii.hexlify(mac)
return mac.lower().translate(None,'.:-') #del common octet delimiters
def to_wire(self, file):
data = '' + ver + seq
#data += self.pmac.decode('hex')
data += binascii.unhexlify(self.pmac)
if self.pmac != self.wmac:
data += self.wmac.decode('hex')
if passwd: data += passwd
file.write(data)
@classmethod
def from_wire(cls, otype, wire, current, olen):
data = wire[current:current + olen]
if olen == 20:
opt = (ver, seq, pmac, wmac, passwd) = struct.unpack('!BB6s6s6s',data)
elif olen == 18:
opt = (ver, seq, pmac, wmac, passwd) = struct.unpack('!BB6s6s4s',data)
elif olen == 14:
opt = (ver, seq, pmac, wmac) = struct.unpack("!BB6s6s",data)
elif olen == 8:
opt = (ver, seq, pmac) = struct.unpack("!BB6s",data)
return cls(*opt)
def __repr__(self):
return "%s[OPT#%s](%s, %s, %s, %s, %s)" % (
self.__class__.__name__,
self.otype,
self.ver,
self.seq,
self.pmac,
self.wmac,
self.passwd
)
dns.edns._type_to_class.update({dns.edns.OWNER: OwnerOption})
from sleepproxy.manager import manage_host
from gevent.server import DatagramServer
#https://github.com/surfly/gevent/blob/master/gevent/server.py#L106
__all__ = ['SleepProxyServer']
class SleepProxyServer(DatagramServer):
# #@classmethod
# #def get_listener(self, address, family=None):
# # #return _udp_socket(address, reuse_addr=self.reuse_addr, family=family)
# # sock = socket.socket(family=family, type=socket.SOCK_DGRAM)
# # #if family == socket.AF_INET6: logging.warning("dual-stacking!"); sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
# # if family == socket.AF_INET6: logging.warning("disabling dual-stacking!"); sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, True)
# # sock.bind(address)
# # return sock
def handle(self, message, raddress):
try:
#with ignored(dns.message.BadEDNS):
message = dns.message.from_wire(message, ignore_trailing=True)
except dns.message.BadEDNS:
#yosemite's discoveryd sends an OPT record per active NIC, dnspython doesn't like more than 1 OPT record
# https://github.com/rthalley/dnspython/blob/master/dns/message.py#L642
# so turn off Wi-Fi for ethernet-connected clients
pass #or send back an nxdomain or servfail
except: #no way to just catch dns.exceptions.*
logging.warning("Error decoding DNS message from %s" % raddress[0])
logging.debug(traceback.format_exc())
return
if message.edns < 0:
logging.warning("Received non-EDNS message from %s, ignoring" % raddress[0])
return
if not (message.opcode() == 5 and message.authority):
logging.warning("Received non-UPDATE message from %s, ignoring" % raddress[0])
return
logging.debug("Received SPS registration from %s, parsing" % raddress[0])
info = {'records': [], 'addresses': []}
# Try to guess the interface this came in on
# todo - precompute this table on new()?
for iface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(iface)
for af, addresses in ifaddresses.items():
if af not in (netifaces.AF_INET, netifaces.AF_INET6): continue
for address in addresses:
mask = address['netmask']
if af == netifaces.AF_INET6: mask = (mask.count('f') * 4) # convert linux masks to prefix length...gooney
if address['addr'].find('%') > -1: continue #more linux ipv6 stupidity
iface_net = ipaddress.ip_interface('%s/%s' % (address['addr'], mask)).network
if ipaddress.ip_address(raddress[0]) in iface_net:
info['mymac'] = ifaddresses[netifaces.AF_LINK][0]['addr']
info['myif'] = iface
for rrset in message.authority:
rrset.rdclass %= dns.rdataclass.UNIQUE #remove cache-flush bit
#rrset.rdata = rrset.rdata.decode(utf-8)
info['records'].append(rrset)
self._add_addresses(info, rrset)
logging.debug('NSUPDATE START--\n\n%s\n\n%s\n\n--NSUPDATE END' % (message,message.options))
for option in message.options:
if option.otype == dns.edns.UL:
info['ttl'] = option.lease #send-WOL-no-later-than timer TTL
if option.otype == dns.edns.OWNER:
info['othermac'] = option.pmac #WOL target mac
#if option.passwd: # password required in wakeup packet
# mDNS.c:SendSPSRegistrationForOwner() doesn't seem to add a password
self._answer(raddress, message)
if len(message.options) == 2:
# need both an owner and an update-lease option, else its just a post-wake notification (incremented seq number)
manage_host(info)
def _add_addresses(self, info, rrset):
if rrset.rdtype != dns.rdatatype.PTR: return
if rrset.rdclass != dns.rdataclass.IN: return
#if not rrset.name.to_text().endswith('.in-addr.arpa.'): return #TODO: support SYN sniffing for .ip6.arpa. hosts
if not rrset.name.to_text().endswith('.arpa.'): return #all we care about are reverse-dns records
info['addresses'].append(dns.reversename.to_address(rrset.name))
def _answer(self, address, query):
response = dns.message.make_response(query)
response.flags = dns.flags.QR | dns.opcode.to_flags(dns.opcode.UPDATE)
#needs a single OPT record to confirm registration: 0 TTL 4500 48 . OPT Max 1440 Lease 7200 Vers 0 Seq 21 MAC D4:9A:20:DE:9D:38
response.use_edns(edns=True, ednsflags=dns.rcode.NOERROR, payload=query.payload, options=[query.options[0]]) #payload should be 1440, theoretical udp-over-eth maxsz stdframe
logging.warning("Confirming SPS registration @%s with %s[%s] for %s secs" % (query.options[1].seq, address[0], query.options[1].pmac, query.options[0].lease))
logging.debug('RESPONSE--\n\n%s\n\n%s\n\n--RESPONSE END' % (response,response.options))
self.socket.sendto(response.to_wire(), address)
| {
"repo_name": "kfix/SleepProxyServer",
"path": "sleepproxy/dnsserve.py",
"copies": "1",
"size": "8824",
"license": "bsd-2-clause",
"hash": 2033440557068590000,
"line_mean": 41.2200956938,
"line_max": 181,
"alpha_frac": 0.6193336355,
"autogenerated": false,
"ratio": 3.446875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45662086355,
"avg_score": null,
"num_lines": null
} |
answ = '4cbf522049d7cc0e46a337fcc3cad331'
match = ''
import hashlib
import binascii
import hashlib # Hash algorithms always present in hashlib: md5(), sha1(), sha224(), sha256(), sha384(), and sha512(). Additional algorithms may be available from OpenSSL library
hashdict = {'TelenorRemoteAdmin': '4caacf3fd454786eed5e429b7b40ce2d', 'qwer1234': '4cbf522049d7cc0e46a337fcc3cad331', 'dfgt5653': 'c46b1eaa51f853eeb8eae453ba620657'}
def md5(hash):
return hashlib.md5(hash).digest()
qw = hashlib.md5(b"qwer1234")
qv = hashlib.md5(b"qwer1234")
qm = md5(b"qwer1234")
def md5iter(times, hash):
for _ in range(times):
hash = md5(hash)
return binascii.hexlify(hash)
for i in range(6):
print(qw.hexdigest(), qv.hexdigest(), binascii.hexlify(qm))
qw.update(qw.digest())
qv.update(bytes(qv.hexdigest(), 'ascii'))
qm = md5(qm)
if qw.hexdigest == answ:
match = str(i)
if qv.hexdigest == answ:
match = str(i)
print('DONE', match)
print(md5iter(6, b'qwer1234') )
# if __name__ == '__main__':
# usr = 'admin'
# pwd = 'qwer1234'
#
# s1 = '- md5(md5($pass)), Double MD5'
# s2 = '- md5(md5($pass).$pass)'
# s3 = '- md5($username.0.$pass)'
#
# decode(s1, usr, pwd, '')
# decode(s2, usr, pwd, '')
# decode(s3, usr, pwd, '')
| {
"repo_name": "wittrup/crap",
"path": "crack/hashing/hashiter.py",
"copies": "1",
"size": "1305",
"license": "mit",
"hash": 5384718746054141000,
"line_mean": 25.6326530612,
"line_max": 178,
"alpha_frac": 0.632183908,
"autogenerated": false,
"ratio": 2.594433399602386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8695226798384961,
"avg_score": 0.006278101843484937,
"num_lines": 49
} |
answer1 = widget_inputs["check1"]
answer2 = widget_inputs["check2"]
answer3 = widget_inputs["check3"]
answer4 = widget_inputs["check4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer1 == False:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the first answer. If the images had the same resolution with different file sizes, then compression might be correct.")
if answer2 == False:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the second answer. Are the images the same size on the page? If so, the display resolutions are the same.")
if answer3 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the third answer. Do the original images have the same resolutions? Open up DevTools to find out.")
if answer4 == False:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the fourth answer. Are the file types different? Open up DevTools to find out.")
if is_correct:
commentizer("Nice job! Remember, there's no need to send images with natural resolutions higher than their display resolutions (unless you need to scale up for high DPI devices, but you'll learn about that later in the course).")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | {
"repo_name": "udacity/responsive-images",
"path": "grading_scripts/2_17_q.py",
"copies": "1",
"size": "1482",
"license": "mit",
"hash": -1375862521177584400,
"line_mean": 35.1707317073,
"line_max": 233,
"alpha_frac": 0.7226720648,
"autogenerated": false,
"ratio": 3.6774193548387095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985736531248292,
"avg_score": 0.008545221431158101,
"num_lines": 41
} |
answer1 = widget_inputs["radio1"]
answer2 = widget_inputs["radio2"]
answer3 = widget_inputs["radio3"]
answer4 = widget_inputs["radio4"]
answer5 = widget_inputs["radio5"]
answer6 = widget_inputs["radio6"]
answer7 = widget_inputs["radio7"]
answer8 = widget_inputs["radio8"]
answer9 = widget_inputs["radio9"]
answer10 = widget_inputs["radio10"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer2 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the Chrome logo.")
if answer3 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the kitten photo.")
if answer6 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the Mexican flag. It's vector, believe it or not.")
if answer8 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the repeat background.")
if answer9 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the gradient background.")
if is_correct:
commentizer("Great job! You're starting to learn how to decide between raster and vector options.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | {
"repo_name": "udacity/responsive-images",
"path": "grading_scripts/2_14_q.py",
"copies": "1",
"size": "1414",
"license": "mit",
"hash": -7444165287377427000,
"line_mean": 25.6981132075,
"line_max": 103,
"alpha_frac": 0.698019802,
"autogenerated": false,
"ratio": 3.31924882629108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.451726862829108,
"avg_score": null,
"num_lines": null
} |
answer1 = widget_inputs["radio1"]
answer2 = widget_inputs["radio2"]
answer3 = widget_inputs["radio3"]
answer4 = widget_inputs["radio4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer1 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the first one. Remember, an SVG animation will animate the rotation of an image, as opposed to a gif which is a series of raster images displayed one after another.")
if answer4 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the second one. Will the image be reused? If so, an external file probably makes more sense.")
if is_correct:
commentizer("Great job!")
commentizer(" I love the internet :)")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | {
"repo_name": "udacity/responsive-images",
"path": "grading_scripts/3_12.4_q.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": 2391542710361420000,
"line_mean": 33.8666666667,
"line_max": 189,
"alpha_frac": 0.7167464115,
"autogenerated": false,
"ratio": 3.3069620253164556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9483215423258065,
"avg_score": 0.00809860271167808,
"num_lines": 30
} |
"""Answer a data request with the following
1) lat/lon of 'F0' first point
2) Picture of location? Hmmm, I did huc12 previously
3) elevation of F0 and G1 through G6 nodes
4) horizontal distance from F0 to G1-G6
5) OFE positions along this path
6) Generic properties, perhaps can ignore?
"""
from __future__ import print_function
import psycopg2
import numpy as np
from geopandas import read_postgis
import pandas as pd
from simpledbf import Dbf5
PGCONN = psycopg2.connect(database="idep")
HUCS = """101702031901
101702031904
070600010905
070600010906
102300020302
102300020303
102400010103
102300060405
071000030504
071000030802
070801020403
070801020504
070801070302
070801070101
102400130102
102400130201
102802010404
102802010209
070801010701
070801010702""".split(
"\n"
)
SCEN2CODE = [None, 12, 13, 14, 0, 15, 16]
def find_ca(zst, fpath, gorder):
"""Get the contributing area"""
df = zst[(zst["fpath"] == fpath) & (zst["gridorder"] == gorder)]
if len(df.index) == 1:
return df.iloc[0]["area"]
# Search backwards until we find!
for _gorder in range(gorder - 1, 0, -1):
df = zst[(zst["fpath"] == fpath) & (zst["gridorder"] == _gorder)]
if len(df.index) == 1:
return df.iloc[0]["area"]
print("Whoa, double failure!")
return None
def dohuc(huc):
"""Do what we need to do for this huc"""
cursor = PGCONN.cursor()
zdbf = Dbf5("zst%s.dbf" % (huc,), codec="utf-8")
zst = zdbf.to_dataframe()
zst.columns = ["value", "count", "area", "max", "fpath", "gridorder"]
zst.sort_values(["fpath", "gridorder"], inplace=True, ascending=True)
# print(zst)
df = read_postgis(
"""
SELECT fpath, fid, st_transform(geom, 4326) as geo, huc_12 from flowpaths
where huc_12 = %s and scenario = 0
""",
PGCONN,
params=(huc,),
geom_col="geo",
index_col="fpath",
)
for col in ["F0_lon", "F0_lat", "F0_elev"]:
df[col] = None
for gorder in range(1, 7):
df["G%s_elev" % (gorder,)] = None
df["G%s_len" % (gorder,)] = None
df["G%s_contribarea" % (gorder,)] = None
for ofe in range(1, 18):
df["ofe%s_pos" % (ofe,)] = None
for fpath, row in df.iterrows():
# 1) lat/lon of 'F0' first point
(df.at[fpath, "F0_lon"], df.at[fpath, "F0_lat"]) = np.asarray(
row["geo"].xy
)[:, 0]
# 3) elevation of F0 and G1 through G6 nodes
for gorder in range(1, 7):
# Contributing area
df.at[fpath, "G%s_contribarea" % (gorder,)] = find_ca(
zst, fpath, gorder
)
cursor.execute(
"""
select max(elevation), min(elevation), max(length)
from flowpaths p JOIN flowpath_points t on (p.fid = t.flowpath)
where p.scenario = %s and huc_12 = %s and fpath = %s
""",
(SCEN2CODE[gorder], huc, fpath),
)
row2 = cursor.fetchone()
df.at[fpath, "F0_elev"] = row2[0] # overwrite each time
df.at[fpath, "G%s_elev" % (gorder,)] = row2[1]
# 4) horizontal distance from F0 to G1-G6
df.at[fpath, "G%s_len" % (gorder,)] = row2[2]
# 5) OFE positions along this path
slpfn = "/i/%s/slp/%s/%s/%s_%s.slp" % (
SCEN2CODE[6],
huc[:8],
huc[8:],
huc,
fpath,
)
lines = open(slpfn).readlines()
ofes = int(lines[5])
pos = 0
for ofe, ln in enumerate(range(7, 7 + ofes * 2, 2)):
pos += float(lines[ln].split()[1])
df.at[fpath, "ofe%s_pos" % (ofe + 1,)] = pos
del df["geo"]
del df["fid"]
# 6) Generic properties, perhaps can ignore?
return df
def main():
"""Main Function"""
dfs = [dohuc(huc) for huc in HUCS]
df = pd.concat(dfs)
df.to_csv("result.csv")
if __name__ == "__main__":
main()
| {
"repo_name": "akrherz/dep",
"path": "scripts/gridorder/flowpath_attrs.py",
"copies": "2",
"size": "3969",
"license": "mit",
"hash": 6958308869863463000,
"line_mean": 28.1838235294,
"line_max": 77,
"alpha_frac": 0.5575711766,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95575711766,
"avg_score": 0,
"num_lines": 136
} |
""" Answer extraction for QA """
from operator import itemgetter
from collections import defaultdict
import importlib
import string
import logging as log
import nltk
from ner import SocketNER
from inquire import config
question_types = {
'DESC': 'description',
'ENTY': 'entity',
'ABBR': 'abbreviation',
'HUM': 'human',
'LOC': 'location',
'NUM': 'numeric',
}
def get_extractor(coarse, fine):
log.debug("getting coarse extractor for '{}'".format(coarse))
# http://stackoverflow.com/questions/301134/dynamic-module-import-in-python
try:
coarse_extractor = importlib.import_module(__package__+'.'+question_types[coarse])
except (ImportError, KeyError):
log.warn("Extractor for coarse type '{}' not implemented".format(coarse))
raise NoExtractorError(coarse)
return coarse_extractor.get_extractor(coarse, fine)
class NoExtractorError(Exception):
pass
class InvalidExtractorError(Exception):
pass
class BaseExtractor(object):
def __init__(self, question, docs):
self.docs = docs
self.question = question
self.lem = nltk.stem.wordnet.WordNetLemmatizer()
self.delete_punctuation_map = dict((ord(char), None) for char in string.punctuation)
def preprocess(self, pos=False, ner=False, tok_q=True):
log.debug("preprocessing documents")
if tok_q:
self.tok_question = unicode(self.question).translate(self.delete_punctuation_map)
self.tok_question = nltk.word_tokenize(self.tok_question.lower())
self.tok_question = [self.lem.lemmatize(word) for word in self.tok_question]
if pos:
# self.tok_docs = [nltk.word_tokenize(doc) for doc in self.docs]
self.pos_docs = [nltk.pos_tag(nltk.word_tokenize(doc)) for doc in self.docs]
if ner:
self.ner = SocketNER(host='localhost', port=config.NER_PORT, collapse=False)
self.ne_docs = [self.ner.get_entities(doc) for doc in self.docs]
def clean(self, s):
return self.lem.lemmatize(unicode(s).translate(self.delete_punctuation_map).lower())
def sort_candidates(self, candidates):
"""
Takes a dict with frequencies {'a':2, 'b':4, 'c':1} and sorts them.
Returns the list of sorted candidates with percentages.
"""
if len(candidates) == 0:
return None
# automatically creates nested dict when they don't exist
cleaned = defaultdict(dict)
for item, count in candidates.iteritems():
cleaned[self.clean(item)][item] = count
results = {}
for item, options in cleaned.iteritems():
selected_option, max_count, total_count = None, 0, 0
for option, count in options.iteritems():
total_count += count
if count > max_count:
selected_option, max_count = option, count
results[selected_option] = total_count
results = sorted(results.iteritems(), key=itemgetter(1), reverse=True)
total = sum(count for item, count in results)
# trim to first 10 items
return [(item, count/float(total)) for item, count in results][:10]
def answer(self):
"""
Answer should return a sorted list of answer tuples with their confidence
"""
return "I don't know how to answer that type of question yet"
class NETagExtractor(BaseExtractor):
""" extractor that uses named entity tagging """
def __init__(self, question, docs, tag=None):
super(NETagExtractor, self).__init__(question, docs)
if not tag:
raise InvalidExtractorError("No tag provided for NETagExtractor")
self.tag = tag
def answer(self):
self.preprocess(ner=True)
candidates = {}
# count up occurrences of the same NE
for doc in self.ne_docs:
for entity in doc:
# entities come first in this output
# don't count things that are part of the question
# TODO: fuzzy match this so we don't get spelling errors of the
# question as answers
# TODO: are we including punctuation in the comparison? (see: eval-1929)
if entity[0] == self.tag and self.clean(entity[1]) not in self.tok_question:
candidates[entity[1]] = candidates.get(entity[1], 0) + 1
# sort candidates by freqency
return self.sort_candidates(candidates)
class POSTagExtractor(BaseExtractor):
""" extractor that uses part-of-speech tagging """
def __init__(self, question, docs, tags=None):
super(POSTagExtractor, self).__init__(question, docs)
if not tags:
raise InvalidExtractorError("No tag provided for POSTagExtractor")
self.tags = tags
def answer(self):
self.preprocess(pos=True)
candidates = {}
# count up occurrences of the same POS
for doc in self.pos_docs:
for word in doc:
# don't count things that are part of the question
# TODO: fuzzy match this so we don't get spelling errors of the
# question as answers
if word[1] in self.tags and self.clean(word[0]) not in self.tok_question:
candidates[word[0]] = candidates.get(word[0], 0) + 1
# sort candidates by freqency
return self.sort_candidates(candidates)
| {
"repo_name": "jcelliott/inquire",
"path": "inquire/extraction/extractors.py",
"copies": "1",
"size": "5468",
"license": "mit",
"hash": -3691992798613264400,
"line_mean": 37.780141844,
"line_max": 93,
"alpha_frac": 0.6228968544,
"autogenerated": false,
"ratio": 4.117469879518072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240366733918073,
"avg_score": null,
"num_lines": null
} |
""" Answer for https://stackoverflow.com/questions/35757560 """
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import numpy as np
corpus = ["The dog ate a sandwich and I ate a sandwich",
"The wizard transfigured a sandwich"]
vectorizer = TfidfVectorizer(stop_words='english')
tfidfs = vectorizer.fit_transform(corpus)
columns = [k for (v, k) in sorted((v, k) for k, v in vectorizer.vocabulary_.items())]
tfidfs = pd.DataFrame(tfidfs.todense(), columns=columns)
# ate dog sandwich transfigured wizard
# 0 0.75 0.38 0.54 0.00 0.00
# 1 0.00 0.00 0.45 0.63 0.63
df = (1 / pd.DataFrame([vectorizer.idf_], columns=columns))
# ate dog sandwich transfigured wizard
# 0 0.71 0.71 1.0 0.71 0.71
corp = [txt.lower().split() for txt in corpus]
corp = [[w for w in d if w in vectorizer.vocabulary_] for d in corp]
tfs = pd.DataFrame([Counter(d) for d in corp]).fillna(0).astype(int)
# ate dog sandwich transfigured wizard
# 0 2 1 2 0 0
# 1 0 0 1 1 1
# The first document's TFIDF vector:
tfidf0 = tfs.iloc[0] * (1. / df)
tfidf0 = tfidf0 / np.linalg.norm(tfidf0)
# ate dog sandwich transfigured wizard
# 0 0.754584 0.377292 0.536893 0.0 0.0
tfidf1 = tfs.iloc[1] * (1. / df)
tfidf1 = tfidf1 / np.linalg.norm(tfidf1)
# ate dog sandwich transfigured wizard
# 0 0.0 0.0 0.449436 0.631667 0.631667
| {
"repo_name": "totalgood/nlpia",
"path": "src/nlpia/posts/so_tfidf.py",
"copies": "1",
"size": "1567",
"license": "mit",
"hash": 5983500431527357000,
"line_mean": 38.175,
"line_max": 85,
"alpha_frac": 0.6145500957,
"autogenerated": false,
"ratio": 2.7157712305026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38303213262026,
"avg_score": null,
"num_lines": null
} |
# Answer
s0 = 'azzf'
def palindrome_string(s):
mid,left = divmod(len(s0), 2) # (3,0)
s1 = bytearray(s0)
while s1[:mid] != s1[-mid:][::-1]:
for i in range(mid):
a,b = s1[i], s1[-i-1]
if a > b:
s1[-i-1] = a
elif a < b:
s1[-i-1] = a
s1[-i-2] += 1
else:
pass
if 123 in s1:
loc_inc = s1.index(chr(123))
while 123 in s1:
chr_loc = s1.index(chr(123))
s1[chr_loc] = 97
s1[chr_loc-1] += 1
return str(s1.decode())
# Old that takes forever
s1 = s0[:mid + left] + s0[:mid][::-1]
s1, s0 = bytearray(s1), bytearray(s0)
while s1[:mid] != s1[-mid:][::-1]:
i = 1
while s1[-i] == 122:
s1[-i] = 97
if s1[-i-1] != 122:
s1[-i-1] += 1
else:
i += 1
if
s1[-1] += 1
print s1
print s1
# Nested Loops: Comfortable Numbers
seg,pairs = xrange(L,R+1),[]
for x in seg:
bound = sum(map(int, str(x)))
pairs.append([(x,y) for y in range(x-bound, x+bound+1) if (y!=x and y in seg)])
P = [(x,y) for x,y in sum(pairs,[]) if x < y]
# Nested Loops: Weak Numbers
divs = lambda x: [y for y in range(1,x) if not x%y]
D = [len(divs(x)) for x in xrange(1,n+1)]
Weak = [len(filter(lambda x: x > D[i], D[:i])) for i in xrange(1,n+1)]
# Rectangle Rotation
a,b = 6,4
P = [(x,y) for x in range(-b/2,b/2+1) for y in range(-a/2,a/2+1)]
def rectangleRotation(a, b):
x_new = lambda x,y: (x-y)*math.sqrt(2)/2
y_new = lambda x,y: (x+y)*math.sqrt(2)/2
slope = lambda x1,y1,x2,y2: (y2-y1)/float((x2-x1))
intercept = lambda slope,x,y: y - x*slope
y_calc = lambda x,slope,intercept: slope*x + intercept
sign_pos = lambda y,y_calc: y_calc - y >= 0
sign_neg = lambda y,y_calc: y_calc - y <= 0
P = [(x,y) for x in range(-b/2,b/2+1) for y in range(-a/2,a/2+1)]
x1,y1,x2,y2,x3,y3,x4,y4 = -b/2., a/2., b/2., a/2., -b/2., -a/2.,b/2., -a/2.,
slope1_4 = slope(x_new(x1,y1),y_new(x1,y1),x_new(x4,y4),y_new(x4,y4))
slope1_2 = slope(x_new(x1,y1),y_new(x1,y1),x_new(x2,y2),y_new(x2,y2))
slope2_3 = slope(x_new(x2,y2),y_new(x2,y2),x_new(x3,y3),y_new(x3,y3))
slope3_4 = slope(x_new(x3,y3),y_new(x3,y3),x_new(x4,y4),y_new(x4,y4))
int1_4 = intercept(slope1_4,x1,y1)
int1_2 = intercept(slope1_2,x2,y2)
int2_3 = intercept(slope2_3,x3,y3)
int3_4 = intercept(slope3_4,x4,y4)
pts_inside = []
for x,y in P:
y_calc14 = y_calc(x,slope1_4,int1_4)
y_calc12 = y_calc(x,slope1_2,int1_2)
y_calc23 = y_calc(x,slope2_3,int2_3)
y_calc34 = y_calc(x,slope3_4,int3_4)
if all(sign_pos(sign_pos(y,y_calc12)), sign_pos(y,y_calc14), sign_neg(y,y_calc23), sign_neg(y,y_calc34)):
pts_inside.append((x,y))
return len(pts_inside)
| {
"repo_name": "rodriggs/pipeline",
"path": "data_transfer_object/__init__.py",
"copies": "1",
"size": "2860",
"license": "mit",
"hash": -2523392714996909600,
"line_mean": 30.7777777778,
"line_max": 113,
"alpha_frac": 0.5174825175,
"autogenerated": false,
"ratio": 2.2274143302180685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32448968477180684,
"avg_score": null,
"num_lines": null
} |
# read in the text file
f = open('beginner.txt', 'r')
# store the text file into a list, one entry per line
values = f.readlines()
# if you've set verbose=True (or verbose=1.), print out the list
if verbose:
print "My list of lines:", values
# turn each value in the list from a string to an integer
# Note the syntax of my for loop
# len(values) gives the length of the list called values
# range returns a list of integers from 0 to len(values)-1
for i in range(len(values)):
# values[i] gives the i-th element of the list values.
# values[i] is then a string object which ends in a new line, '\n', so
# we use the .strip() method to remove the new line symbol.
# we then use the int() function to convert the remaining string to an
# int, and reassign the ith element of values with this integer
values[i] = int(values[i].strip())
if verbose:
print "My list of values (as ints):", values
######################### SORTING THE LIST #####################################
# make a sorted list that we will fill with values
sortedlist = []
# loop through our list of integer values, using similar syntax to above
for i in range(len(values)):
# store the 1st value in the list as our current minimum
currentmin = values[0]
# loop through the remaining values in the list. Note the alternative
# syntax for the for loop.
# the [1:] takes all the elements except the 0th element of values
for value in values[1:]:
if value < currentmin: # check which thing is lower, the value or our current minimum
# if the value is lower, assign that to be the current min.
currentmin = value
# add the minimum value we found to the sorted list we created earlier
sortedlist.append(currentmin)
# remove that value from the values list
values.remove(currentmin)
# Yay, we've created a sorted list. Let's have a look at it.
print "My sorted list:", sortedlist
| {
"repo_name": "astro313/REU2017",
"path": "beginner_soln.py",
"copies": "1",
"size": "2188",
"license": "mit",
"hash": 6111083891715289000,
"line_mean": 38.7818181818,
"line_max": 94,
"alpha_frac": 0.6526508227,
"autogenerated": false,
"ratio": 4.03690036900369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.518955119170369,
"avg_score": null,
"num_lines": null
} |
# Note that each line contains one gaussian time series
for line in f.readlines():
################### CONVERT THE DATA TO A NUMPY ARRAY ####################
# strip the new line character (\n) off the end of the line, then split
# the line into a list of entries
data = line.strip().split(',')
# convert the list into a numpy array (and convert all the entries from
# strings to floats)
dataarray = np.array(data, dtype='float')
################################ MAKE A PLOT ###################################
# determine the number of entries in your array, and then plot your data,
# one data point per entry.
Nentries = len(dataarray)
plt.plot(np.arange(0, Nentries, 1), dataarray, '.')
# Note: arange generates a numpy array with syntax (first entry,length,step).
# Further note: the '.' makes the it plot points - you can also use plt.scatter()
# To display the plot to the screen we would use plt.show() here. However,
# We're going to plot all the data on top of each other and show at the end,
# so we don't use plt.show() right now
####################### CALCULATE SIGNAL TO NOISE RATIO ########################
# find the peak flux
peakflux = np.max(dataarray)
# here are two different ways of calculating the RMS.
# Note that we cheat here: we know the pulse is in the first half of the
# time series, so we just calculate the rms noise on the second half
rmsflux = np.std(dataarray[Nentries / 2:])
rmsflux2 = np.sqrt(np.mean(dataarray[Nentries / 2:]**2))
# print out your signal to noise calculation
print '%(SN).2f\t%(peak).2f\t%(rms).2f\t\t%(rms2).2f' % {'SN': peakflux / rmsflux, 'peak': peakflux, 'rms': rmsflux, 'rms2': rmsflux2}
################################### END LOOP ###################################
############################# DISPLAY THE PLOT #################################
plt.xlabel('time (s)')
plt.ylabel('Flux (mJy)')
plt.show()
| {
"repo_name": "astro313/REU2017",
"path": "intermediate_soln.py",
"copies": "1",
"size": "2413",
"license": "mit",
"hash": 3883647921487865000,
"line_mean": 44.5283018868,
"line_max": 138,
"alpha_frac": 0.5785329465,
"autogenerated": false,
"ratio": 3.8856682769726247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49642012234726246,
"avg_score": null,
"num_lines": null
} |
''' answers_views
annotating answers
'''
from flask import (render_template, request, flash, redirect, url_for, session
, abort)
from decorators import *
from models import *
from ivrhub import app
answer_route = '/organizations/<org_label>/forms/<form_label>/responses'
answer_route += '/<response_sid>/answers/<question_label>'
@app.route(answer_route, methods=['GET', 'POST'])
@verification_required
@csrf_protect
def answers(org_label, form_label, response_sid, question_label):
''' add notes to an answer
much more limited than other (similar) routes
no provision for viewing the answer on its own or for seeing all answers
no provision for deleting answers
'''
user = User.objects(email=session['email'])[0]
# find the relevant organization
orgs = Organization.objects(label=org_label)
if not orgs:
app.logger.error('%s tried to access an organization that does not \
exist' % session['email'])
flash('Organization "%s" not found, sorry!' % org_label, 'warning')
return redirect(url_for('organizations'))
org = orgs[0]
# permission-check
if org not in user.organizations and not user.admin_rights:
app.logger.error('%s tried to access an organization but was denied \
for want of admin rights' % session['email'])
abort(404)
forms = Form.objects(label=form_label, organization=org)
if not forms:
app.logger.error('%s tried to access a form that does not \
exist' % session['email'])
flash('Form "%s" does not exist, sorry!' % form_label, 'warning')
return redirect(url_for('organizations', org_label=org_label))
form = forms[0]
responses = Response.objects(call_sid=response_sid, form=form)
if not responses:
app.logger.error('%s tried to access a response that does not \
exist' % session['email'])
flash('Response "%s" does not exist, sorry!' % response_sid, 'warning')
return redirect(url_for('responses', org_label=org_label
, form_label=form_label))
response = responses[0]
questions = Question.objects(form=form, label=question_label)
if not questions:
app.logger.error('%s tried to access an question that does not \
exist' % session['email'])
flash('Question "%s" does not exist, sorry!' % question_label
, 'warning')
return redirect(url_for('responses', org_label=org_label
, form_label=form_label, response_sid=response_sid))
question = questions[0]
answers = Answer.objects(response=response, question=question)
if not answers:
app.logger.error('%s tried to access an answer that does not \
exist' % session['email'])
flash('An answer to question "%s" does not exist for this response \
, sorry!' % question_label, 'warning')
return redirect(url_for('responses', org_label=org_label
, form_label=form_label, response_sid=response_sid))
answer = answers[0]
if request.method == 'POST':
form_type = request.form.get('form_type', '')
if form_type == 'info':
answer.notes = request.form.get('notes', '')
else:
# bad 'form_type'
abort(404)
try:
answer.save()
flash('Changes saved successfully.', 'success')
except:
answer.reload()
app.logger.error('%s experienced an error saving info about the \
answer to question "%s" for the response initiated at %s' \
% (session['email'], question.name, response.initiation_time))
flash('Error saving changes, sorry :/', 'error')
return redirect(url_for('responses', org_label=org_label
, form_label=form_label, response_sid=response_sid))
if request.method == 'GET':
if request.args.get('edit', '') == 'true':
return render_template('answer_edit.html', answer=answer)
else:
flash('View the answer in the table below.', 'info')
return redirect(url_for('responses', org_label=org_label
, form_label=form_label, response_sid=response_sid))
| {
"repo_name": "aquaya/ivrhub",
"path": "ivrhub/answer_views.py",
"copies": "1",
"size": "4274",
"license": "mit",
"hash": -5924125180934332000,
"line_mean": 39.7047619048,
"line_max": 79,
"alpha_frac": 0.6186242396,
"autogenerated": false,
"ratio": 4.043519394512772,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01475943562312691,
"num_lines": 105
} |
""" answer to """
import numpy as np
# terminiology:
# - segment are prefixed by `s` and followed by upper case nodes: e.g. `sAB` is the length of the semgnet from A to B
# - angles are prefixed with `ang` and followed by upper case nodes: e.g. `angABC` is the angle between A, B, and C
# - area are prefixed with `area` and followed by upper case ndes: e.g. `areaABC` is the area comprised between nodes A, B, and C
#
# new defintions:
# - we define a new node H, between A and B so that angAHC = angDHC = 90 deg
## helper function to convert ang to rad and reverse
def angToRad(ang):
return ang*np.pi/180
def radtoAng(rad):
return rad/np.pi*180
## end helper
# var definition
angCAD = angToRad(80)
angDCB = angToRad(25)
# var from the exercise
sAB = 4.9
sAD = 3.8
# get height length
sHC = sAB * np.sin(angCAD)
# get distance from A to H
sAH = sAB * np.cos(angCAD)
# get distance from H to D
sHD = sAD - sAH
# get angle HCD
angHCD = np.arctan(sHD / sHC)
# get angle HCB
angHCB = angHCD + angDCB
# get HB
sHB = sHC * np.tan(angHCB)
# get distance from A to B
sAB = sAH + sHB
# get two triangles areas
areaACD = sHC * sAD / 2
areaACB = sAB * sHC / 2
# get asked area
areaDCB = areaACB - areaACD
print areaDCB | {
"repo_name": "jboissard/mathExperiments",
"path": "quoraAnswer.py",
"copies": "1",
"size": "1227",
"license": "apache-2.0",
"hash": -4120841009554108000,
"line_mean": 21.7407407407,
"line_max": 129,
"alpha_frac": 0.6862265689,
"autogenerated": false,
"ratio": 2.6790393013100435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8794043031748492,
"avg_score": 0.014244567692310355,
"num_lines": 54
} |
A = [
0x8e20faa72ba0b470, 0x47107ddd9b505a38, 0xad08b0e0c3282d1c, 0xd8045870ef14980e,
0x6c022c38f90a4c07, 0x3601161cf205268d, 0x1b8e0b0e798c13c8, 0x83478b07b2468764,
0xa011d380818e8f40, 0x5086e740ce47c920, 0x2843fd2067adea10, 0x14aff010bdd87508,
0x0ad97808d06cb404, 0x05e23c0468365a02, 0x8c711e02341b2d01, 0x46b60f011a83988e,
0x90dab52a387ae76f, 0x486dd4151c3dfdb9, 0x24b86a840e90f0d2, 0x125c354207487869,
0x092e94218d243cba, 0x8a174a9ec8121e5d, 0x4585254f64090fa0, 0xaccc9ca9328a8950,
0x9d4df05d5f661451, 0xc0a878a0a1330aa6, 0x60543c50de970553, 0x302a1e286fc58ca7,
0x18150f14b9ec46dd, 0x0c84890ad27623e0, 0x0642ca05693b9f70, 0x0321658cba93c138,
0x86275df09ce8aaa8, 0x439da0784e745554, 0xafc0503c273aa42a, 0xd960281e9d1d5215,
0xe230140fc0802984, 0x71180a8960409a42, 0xb60c05ca30204d21, 0x5b068c651810a89e,
0x456c34887a3805b9, 0xac361a443d1c8cd2, 0x561b0d22900e4669, 0x2b838811480723ba,
0x9bcf4486248d9f5d, 0xc3e9224312c8c1a0, 0xeffa11af0964ee50, 0xf97d86d98a327728,
0xe4fa2054a80b329c, 0x727d102a548b194e, 0x39b008152acb8227, 0x9258048415eb419d,
0x492c024284fbaec0, 0xaa16012142f35760, 0x550b8e9e21f7a530, 0xa48b474f9ef5dc18,
0x70a6a56e2440598e, 0x3853dc371220a247, 0x1ca76e95091051ad, 0x0edd37c48a08a6d8,
0x07e095624504536c, 0x8d70c431ac02a736, 0xc83862965601dd1b, 0x641c314b2b8ee083
]
sbox = [
0xFC, 0xEE, 0xDD, 0x11, 0xCF, 0x6E, 0x31, 0x16, 0xFB, 0xC4, 0xFA, 0xDA, 0x23, 0xC5, 0x04, 0x4D,
0xE9, 0x77, 0xF0, 0xDB, 0x93, 0x2E, 0x99, 0xBA, 0x17, 0x36, 0xF1, 0xBB, 0x14, 0xCD, 0x5F, 0xC1,
0xF9, 0x18, 0x65, 0x5A, 0xE2, 0x5C, 0xEF, 0x21, 0x81, 0x1C, 0x3C, 0x42, 0x8B, 0x01, 0x8E, 0x4F,
0x05, 0x84, 0x02, 0xAE, 0xE3, 0x6A, 0x8F, 0xA0, 0x06, 0x0B, 0xED, 0x98, 0x7F, 0xD4, 0xD3, 0x1F,
0xEB, 0x34, 0x2C, 0x51, 0xEA, 0xC8, 0x48, 0xAB, 0xF2, 0x2A, 0x68, 0xA2, 0xFD, 0x3A, 0xCE, 0xCC,
0xB5, 0x70, 0x0E, 0x56, 0x08, 0x0C, 0x76, 0x12, 0xBF, 0x72, 0x13, 0x47, 0x9C, 0xB7, 0x5D, 0x87,
0x15, 0xA1, 0x96, 0x29, 0x10, 0x7B, 0x9A, 0xC7, 0xF3, 0x91, 0x78, 0x6F, 0x9D, 0x9E, 0xB2, 0xB1,
0x32, 0x75, 0x19, 0x3D, 0xFF, 0x35, 0x8A, 0x7E, 0x6D, 0x54, 0xC6, 0x80, 0xC3, 0xBD, 0x0D, 0x57,
0xDF, 0xF5, 0x24, 0xA9, 0x3E, 0xA8, 0x43, 0xC9, 0xD7, 0x79, 0xD6, 0xF6, 0x7C, 0x22, 0xB9, 0x03,
0xE0, 0x0F, 0xEC, 0xDE, 0x7A, 0x94, 0xB0, 0xBC, 0xDC, 0xE8, 0x28, 0x50, 0x4E, 0x33, 0x0A, 0x4A,
0xA7, 0x97, 0x60, 0x73, 0x1E, 0x00, 0x62, 0x44, 0x1A, 0xB8, 0x38, 0x82, 0x64, 0x9F, 0x26, 0x41,
0xAD, 0x45, 0x46, 0x92, 0x27, 0x5E, 0x55, 0x2F, 0x8C, 0xA3, 0xA5, 0x7D, 0x69, 0xD5, 0x95, 0x3B,
0x07, 0x58, 0xB3, 0x40, 0x86, 0xAC, 0x1D, 0xF7, 0x30, 0x37, 0x6B, 0xE4, 0x88, 0xD9, 0xE7, 0x89,
0xE1, 0x1B, 0x83, 0x49, 0x4C, 0x3F, 0xF8, 0xFE, 0x8D, 0x53, 0xAA, 0x90, 0xCA, 0xD8, 0x85, 0x61,
0x20, 0x71, 0x67, 0xA4, 0x2D, 0x2B, 0x09, 0x5B, 0xCB, 0x9B, 0x25, 0xD0, 0xBE, 0xE5, 0x6C, 0x52,
0x59, 0xA6, 0x74, 0xD2, 0xE6, 0xF4, 0xB4, 0xC0, 0xD1, 0x66, 0xAF, 0xC2, 0x39, 0x4B, 0x63, 0xB6
]
tau = [
0, 8, 16, 24, 32, 40, 48, 56,
1, 9, 17, 25, 33, 41, 49, 57,
2, 10, 18, 26, 34, 42, 50, 58,
3, 11, 19, 27, 35, 43, 51, 59,
4, 12, 20, 28, 36, 44, 52, 60,
5, 13, 21, 29, 37, 45, 53, 61,
6, 14, 22, 30, 38, 46, 54, 62,
7, 15, 23, 31, 39, 47, 55, 63
]
C = [
[
0xb1,0x08,0x5b,0xda,0x1e,0xca,0xda,0xe9,0xeb,0xcb,0x2f,0x81,0xc0,0x65,0x7c,0x1f,
0x2f,0x6a,0x76,0x43,0x2e,0x45,0xd0,0x16,0x71,0x4e,0xb8,0x8d,0x75,0x85,0xc4,0xfc,
0x4b,0x7c,0xe0,0x91,0x92,0x67,0x69,0x01,0xa2,0x42,0x2a,0x08,0xa4,0x60,0xd3,0x15,
0x05,0x76,0x74,0x36,0xcc,0x74,0x4d,0x23,0xdd,0x80,0x65,0x59,0xf2,0xa6,0x45,0x07
],
[
0x6f,0xa3,0xb5,0x8a,0xa9,0x9d,0x2f,0x1a,0x4f,0xe3,0x9d,0x46,0x0f,0x70,0xb5,0xd7,
0xf3,0xfe,0xea,0x72,0x0a,0x23,0x2b,0x98,0x61,0xd5,0x5e,0x0f,0x16,0xb5,0x01,0x31,
0x9a,0xb5,0x17,0x6b,0x12,0xd6,0x99,0x58,0x5c,0xb5,0x61,0xc2,0xdb,0x0a,0xa7,0xca,
0x55,0xdd,0xa2,0x1b,0xd7,0xcb,0xcd,0x56,0xe6,0x79,0x04,0x70,0x21,0xb1,0x9b,0xb7
],
[
0xf5,0x74,0xdc,0xac,0x2b,0xce,0x2f,0xc7,0x0a,0x39,0xfc,0x28,0x6a,0x3d,0x84,0x35,
0x06,0xf1,0x5e,0x5f,0x52,0x9c,0x1f,0x8b,0xf2,0xea,0x75,0x14,0xb1,0x29,0x7b,0x7b,
0xd3,0xe2,0x0f,0xe4,0x90,0x35,0x9e,0xb1,0xc1,0xc9,0x3a,0x37,0x60,0x62,0xdb,0x09,
0xc2,0xb6,0xf4,0x43,0x86,0x7a,0xdb,0x31,0x99,0x1e,0x96,0xf5,0x0a,0xba,0x0a,0xb2
],
[
0xef,0x1f,0xdf,0xb3,0xe8,0x15,0x66,0xd2,0xf9,0x48,0xe1,0xa0,0x5d,0x71,0xe4,0xdd,
0x48,0x8e,0x85,0x7e,0x33,0x5c,0x3c,0x7d,0x9d,0x72,0x1c,0xad,0x68,0x5e,0x35,0x3f,
0xa9,0xd7,0x2c,0x82,0xed,0x03,0xd6,0x75,0xd8,0xb7,0x13,0x33,0x93,0x52,0x03,0xbe,
0x34,0x53,0xea,0xa1,0x93,0xe8,0x37,0xf1,0x22,0x0c,0xbe,0xbc,0x84,0xe3,0xd1,0x2e
],
[
0x4b,0xea,0x6b,0xac,0xad,0x47,0x47,0x99,0x9a,0x3f,0x41,0x0c,0x6c,0xa9,0x23,0x63,
0x7f,0x15,0x1c,0x1f,0x16,0x86,0x10,0x4a,0x35,0x9e,0x35,0xd7,0x80,0x0f,0xff,0xbd,
0xbf,0xcd,0x17,0x47,0x25,0x3a,0xf5,0xa3,0xdf,0xff,0x00,0xb7,0x23,0x27,0x1a,0x16,
0x7a,0x56,0xa2,0x7e,0xa9,0xea,0x63,0xf5,0x60,0x17,0x58,0xfd,0x7c,0x6c,0xfe,0x57
],
[
0xae,0x4f,0xae,0xae,0x1d,0x3a,0xd3,0xd9,0x6f,0xa4,0xc3,0x3b,0x7a,0x30,0x39,0xc0,
0x2d,0x66,0xc4,0xf9,0x51,0x42,0xa4,0x6c,0x18,0x7f,0x9a,0xb4,0x9a,0xf0,0x8e,0xc6,
0xcf,0xfa,0xa6,0xb7,0x1c,0x9a,0xb7,0xb4,0x0a,0xf2,0x1f,0x66,0xc2,0xbe,0xc6,0xb6,
0xbf,0x71,0xc5,0x72,0x36,0x90,0x4f,0x35,0xfa,0x68,0x40,0x7a,0x46,0x64,0x7d,0x6e
],
[
0xf4,0xc7,0x0e,0x16,0xee,0xaa,0xc5,0xec,0x51,0xac,0x86,0xfe,0xbf,0x24,0x09,0x54,
0x39,0x9e,0xc6,0xc7,0xe6,0xbf,0x87,0xc9,0xd3,0x47,0x3e,0x33,0x19,0x7a,0x93,0xc9,
0x09,0x92,0xab,0xc5,0x2d,0x82,0x2c,0x37,0x06,0x47,0x69,0x83,0x28,0x4a,0x05,0x04,
0x35,0x17,0x45,0x4c,0xa2,0x3c,0x4a,0xf3,0x88,0x86,0x56,0x4d,0x3a,0x14,0xd4,0x93
],
[
0x9b,0x1f,0x5b,0x42,0x4d,0x93,0xc9,0xa7,0x03,0xe7,0xaa,0x02,0x0c,0x6e,0x41,0x41,
0x4e,0xb7,0xf8,0x71,0x9c,0x36,0xde,0x1e,0x89,0xb4,0x44,0x3b,0x4d,0xdb,0xc4,0x9a,
0xf4,0x89,0x2b,0xcb,0x92,0x9b,0x06,0x90,0x69,0xd1,0x8d,0x2b,0xd1,0xa5,0xc4,0x2f,
0x36,0xac,0xc2,0x35,0x59,0x51,0xa8,0xd9,0xa4,0x7f,0x0d,0xd4,0xbf,0x02,0xe7,0x1e
],
[
0x37,0x8f,0x5a,0x54,0x16,0x31,0x22,0x9b,0x94,0x4c,0x9a,0xd8,0xec,0x16,0x5f,0xde,
0x3a,0x7d,0x3a,0x1b,0x25,0x89,0x42,0x24,0x3c,0xd9,0x55,0xb7,0xe0,0x0d,0x09,0x84,
0x80,0x0a,0x44,0x0b,0xdb,0xb2,0xce,0xb1,0x7b,0x2b,0x8a,0x9a,0xa6,0x07,0x9c,0x54,
0x0e,0x38,0xdc,0x92,0xcb,0x1f,0x2a,0x60,0x72,0x61,0x44,0x51,0x83,0x23,0x5a,0xdb
],
[
0xab,0xbe,0xde,0xa6,0x80,0x05,0x6f,0x52,0x38,0x2a,0xe5,0x48,0xb2,0xe4,0xf3,0xf3,
0x89,0x41,0xe7,0x1c,0xff,0x8a,0x78,0xdb,0x1f,0xff,0xe1,0x8a,0x1b,0x33,0x61,0x03,
0x9f,0xe7,0x67,0x02,0xaf,0x69,0x33,0x4b,0x7a,0x1e,0x6c,0x30,0x3b,0x76,0x52,0xf4,
0x36,0x98,0xfa,0xd1,0x15,0x3b,0xb6,0xc3,0x74,0xb4,0xc7,0xfb,0x98,0x45,0x9c,0xed
],
[
0x7b,0xcd,0x9e,0xd0,0xef,0xc8,0x89,0xfb,0x30,0x02,0xc6,0xcd,0x63,0x5a,0xfe,0x94,
0xd8,0xfa,0x6b,0xbb,0xeb,0xab,0x07,0x61,0x20,0x01,0x80,0x21,0x14,0x84,0x66,0x79,
0x8a,0x1d,0x71,0xef,0xea,0x48,0xb9,0xca,0xef,0xba,0xcd,0x1d,0x7d,0x47,0x6e,0x98,
0xde,0xa2,0x59,0x4a,0xc0,0x6f,0xd8,0x5d,0x6b,0xca,0xa4,0xcd,0x81,0xf3,0x2d,0x1b
],
[
0x37,0x8e,0xe7,0x67,0xf1,0x16,0x31,0xba,0xd2,0x13,0x80,0xb0,0x04,0x49,0xb1,0x7a,
0xcd,0xa4,0x3c,0x32,0xbc,0xdf,0x1d,0x77,0xf8,0x20,0x12,0xd4,0x30,0x21,0x9f,0x9b,
0x5d,0x80,0xef,0x9d,0x18,0x91,0xcc,0x86,0xe7,0x1d,0xa4,0xaa,0x88,0xe1,0x28,0x52,
0xfa,0xf4,0x17,0xd5,0xd9,0xb2,0x1b,0x99,0x48,0xbc,0x92,0x4a,0xf1,0x1b,0xd7,0x20
]
]
from arybo.lib import MBA, simplify, simplify_inplace
from pytanque import symbol, Vector
import copy, random, sys
mba8 = MBA(8)
mba64 = MBA(64)
data = [mba8.from_cst(random.randint(0,255)) for i in range(32)]
nbits = int(sys.argv[1])
idxes = list(range(nbits))
random.shuffle(idxes)
for i in range(nbits):
data[idxes[i]].vec[random.randint(0,7)] = symbol("i%d" % i)
sbox_E,X = mba8.permut2expr(sbox)
sbox = sbox_E.vectorial_decomp([X])
def S(K):
return [mba8.from_vec(simplify_inplace(sbox(K[i].vec))) for i in range(64)]
def P(K):
return [K[tau[i]] for i in range(64)]
def L(K):
state = K
for i in range(8):
v = mba64.from_cst(0)
for k in range(8):
for j in range(8):
v ^= mba64.from_vec(mba64.from_cst(A[k*8+j]).vec * state[i*8+k].vec[7-j])
simplify_inplace(v)
for k in range(8):
for b in range(8):
state[i*8+k].vec[b] = simplify_inplace(v[(7-k)*8+b])
return state
def SPL(K):
K = S(K)
K = P(K)
K = L(K)
return K
def flat_vec(L):
lv = len(L[0].vec)
s = lv*len(L)
ret = Vector(s)
for i in range(s):
ret[i] = L[i//lv][i%lv]
return ret
def xor512(A, B):
return [simplify_inplace(A[i] ^ B[i]) for i in range(64)]
def KeySchedule(K, i):
K = xor512(K, C[i])
return SPL(K)
def simp_inplace(L):
for o in L:
simplify_inplace(o)
return L
def E(K, m):
state = xor512(m,K)
for i in range(12):
print(i)
state = SPL(state)
K = KeySchedule(K, i)
state = xor512(state, K)
return state
def g_N(N, h, m):
K = xor512(N,h)
K = SPL(K)
t = E(K,m)
t = xor512(t,h)
return xor512(t,m)
def hash_X_256(IV, message):
m = [mba8.from_cst(0) for i in range(64)]
N = [mba8.from_cst(0) for i in range(64)]
v512 = [mba8.from_cst(0) for i in range(64)]
v0 = [mba8.from_cst(0) for i in range(64)]
hash_ = [simplify(IV[i]) for i in range(64)]
m[32:64] = message
m[31] |= 1
hash_ = g_N(N, hash_, m)
v512[63] = mba8.from_cst(0)
v512[62] = mba8.from_cst(1)
N = v512
Sigma = m
hash_ = g_N(v0, hash_, N)
hash_ = g_N(v0, hash_, Sigma)
return hash_
def hash_512_256(message):
return hash_X_256([mba8.from_cst(0) for i in range(64)], message)
def print_ar2(ar):
print(' '.join(["%02x" % ar[i].to_cst() for i in range(len(ar))]))
def print_ar(ar):
print(' '.join(["%02x" % ar[i] for i in range(len(ar))]))
print("Compute hash...")
H = hash_512_256(data)
print("Done!")
print(H)
| {
"repo_name": "quarkslab/arybo",
"path": "examples/stribog.py",
"copies": "1",
"size": "9714",
"license": "bsd-3-clause",
"hash": -8711922549137011000,
"line_mean": 40.8706896552,
"line_max": 96,
"alpha_frac": 0.7004323657,
"autogenerated": false,
"ratio": 1.5119066147859923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7311925870523582,
"avg_score": 0.08008262199248192,
"num_lines": 232
} |
#ant1 201410
#Support functions for image display in Planets4X game
from PIL import Image
def ColorAsAlpha(im, colorin, alphalevel):
''' im is PIL RGBA image, color is color index or RGB or RGBA.
ant1 20100826'''
k = []
for i in im.getdata():
match = True
for j in range(3):
if i[j] != colorin[j]:
match = False
if match:
i = (i[0], i[1], i[2], alphalevel)
k.append(i)
im.putdata(k)
return im
def ColorSwitch(im, colorin, colorout):
''' im is PIL RGBA image, color is color index or RGB or RGBA.
ant1 20141028'''
k = []
for i in im.getdata():
match = True
for j in range(3):
if i[j] != colorin[j]:
match = False
if match:
if im.mode == "RGBA":
i = (colorout[0], colorout[1], colorout[2], colorin[3])
elif im.mode == "RGB":
i = (colorout[0], colorout[1], colorout[2])
k.append(i)
im.putdata(k)
return im
def MainColor(im):
if im.format != 'RGBA': im.convert('RGBA')
appear = 0
main_color = 0
for i in im.getcolors():
if appear < i[0]:
appear = i[0]
main_color = i[1]
return main_color
def fit_textinbox(string, box_size, font, font_size):
''' string, tuplet, font file ex:'arialdb.ttf, integer
string is string to be printed
box_size : size in pixels of the area wherestring should fit
font : truetype/opentype file to be used, ex: 'arialbd.ttf'
font_size : initial guess for size of the font, integer
ant1 20100822'''
try:
f = ImageFont.truetype(font, font_size)
w,h = f.getsize(string)
while (w>box_size[0] or h>box_size[1]) and font_size>3:
font_size-=1
f = ImageFont.truetype(font, font_size)
w,h = f.getsize(string)
if (w>box_size[0] or h>box_size[1]) and font_size==3: f = ImageFont.load_default()
except:
f = ImageFont.load_default()
return f
def DisplayZoom( im, fact ):
if fact<1: fact=1
# if im.size[0]*int(fact)>760: fact=int(760/im.size[0])
for i, s in enumerate(["1", "L", "P"]):
if im.mode in s:
im = im.convert("RGB")
if fact==1: Xim=im.copy()
else:
Xim = Image.new(im.mode, (im.size[0]*int(fact), im.size[1]*int(fact)) )
for w in range(im.size[0]):
for h in range(im.size[1]):
for f in range(int(fact)):
for g in range(int(fact)):
Xim.putpixel((w*int(fact)+f, h*int(fact)+g), im.getpixel((w,h)))
return Xim
| {
"repo_name": "ant1b/Planets4X",
"path": "Planets4X_archives/Planets4X_v00.07/ImageSupport.py",
"copies": "1",
"size": "2704",
"license": "mit",
"hash": -5516549123296641000,
"line_mean": 27.7659574468,
"line_max": 90,
"alpha_frac": 0.5325443787,
"autogenerated": false,
"ratio": 3.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42325443787,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
import logging
import struct
import threading
import sys
NETWORK_KEY= [0xb9, 0xa5, 0x21, 0xfb, 0xbd, 0x72, 0xc3, 0x45]
class Monitor():
def __init__(self):
self.heartrate = "n/a";
self.cadence = "n/a";
self.speed = "n/a";
def on_data_heartrate(self, data):
self.heartrate = str(data[7])
self.display()
def on_data_cadence_speed(self, data):
self.cadence = str(data[3]*256 + data[2])
self.speed = str(data[7]*256 + data[6])
self.display()
def display(self):
string = "Hearthrate: " + self.heartrate + " Pedal revolutions: " + self.cadence + " Wheel revolutions: " + self.speed
sys.stdout.write(string)
sys.stdout.flush()
sys.stdout.write("\b" * len(string))
def main():
# logging.basicConfig()
monitor = Monitor()
node = Node()
node.set_network_key(0x00, NETWORK_KEY)
channel = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel.on_broadcast_data = monitor.on_data_heartrate
channel.on_burst_data = monitor.on_data_heartrate
channel.set_period(8070)
channel.set_search_timeout(12)
channel.set_rf_freq(57)
channel.set_id(0, 120, 0)
channel_cadence_speed = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel_cadence_speed.on_broadcast_data = monitor.on_data_cadence_speed
channel_cadence_speed.on_burst_data = monitor.on_data_cadence_speed
channel_cadence_speed.set_period(8085)
channel_cadence_speed.set_search_timeout(30)
channel_cadence_speed.set_rf_freq(57)
channel_cadence_speed.set_id(0, 121, 0)
try:
channel.open()
channel_cadence_speed.open()
node.start()
finally:
node.stop()
if __name__ == "__main__":
main()
| {
"repo_name": "jforge/openant",
"path": "examples/cadence_speed_heart.py",
"copies": "1",
"size": "3127",
"license": "mit",
"hash": 2585055084866475500,
"line_mean": 31.5729166667,
"line_max": 126,
"alpha_frac": 0.6952350496,
"autogenerated": false,
"ratio": 3.4552486187845304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4650483668384531,
"avg_score": null,
"num_lines": null
} |
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
import logging
import struct
import threading
import sys
NETWORK_KEY = [0xB9, 0xA5, 0x21, 0xFB, 0xBD, 0x72, 0xC3, 0x45]
class Monitor:
def __init__(self):
self.heartrate = "n/a"
self.cadence = "n/a"
self.speed = "n/a"
def on_data_heartrate(self, data):
self.heartrate = str(data[7])
self.display()
def on_data_cadence_speed(self, data):
self.cadence = str(data[3] * 256 + data[2])
self.speed = str(data[7] * 256 + data[6])
self.display()
def display(self):
string = (
"Hearthrate: "
+ self.heartrate
+ " Pedal revolutions: "
+ self.cadence
+ " Wheel revolutions: "
+ self.speed
)
sys.stdout.write(string)
sys.stdout.flush()
sys.stdout.write("\b" * len(string))
def main():
# logging.basicConfig()
monitor = Monitor()
node = Node()
node.set_network_key(0x00, NETWORK_KEY)
channel = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel.on_broadcast_data = monitor.on_data_heartrate
channel.on_burst_data = monitor.on_data_heartrate
channel.set_period(8070)
channel.set_search_timeout(12)
channel.set_rf_freq(57)
channel.set_id(0, 120, 0)
channel_cadence_speed = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel_cadence_speed.on_broadcast_data = monitor.on_data_cadence_speed
channel_cadence_speed.on_burst_data = monitor.on_data_cadence_speed
channel_cadence_speed.set_period(8085)
channel_cadence_speed.set_search_timeout(30)
channel_cadence_speed.set_rf_freq(57)
channel_cadence_speed.set_id(0, 121, 0)
try:
channel.open()
channel_cadence_speed.open()
node.start()
finally:
node.stop()
if __name__ == "__main__":
main()
| {
"repo_name": "Tigge/openant",
"path": "examples/cadence_speed_heart.py",
"copies": "1",
"size": "3153",
"license": "mit",
"hash": -1896475997238575400,
"line_mean": 29.6116504854,
"line_max": 80,
"alpha_frac": 0.6758642563,
"autogenerated": false,
"ratio": 3.4916943521594686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46675586084594684,
"avg_score": null,
"num_lines": null
} |
"""Antenna-based gain solver based on StefCal."""
#
# Ludwig Schwardt
# 22 April 2013
#
import numpy as np
def stefcal(vis, num_ants, antA, antB, weights=1.0, num_iters=10, ref_ant=0, init_gain=None):
"""Solve for antenna gains using StefCal (array dot product version).
The observed visibilities are provided in a NumPy array of any shape and
dimension, as long as the last dimension represents baselines. The gains
are then solved in parallel for the rest of the dimensions. For example,
if the *vis* array has shape (T, F, B) containing *T* dumps / timestamps,
*F* frequency channels and *B* baselines, the resulting gain array will be
of shape (T, F, num_ants), where *num_ants* is the number of antennas.
In order to get a proper solution it is important to include the conjugate
visibilities as well by reversing antenna pairs, e.g. by forming
full_vis = np.concatenate((vis, vis.conj()), axis=-1)
full_antA = np.r_[antA, antB]
full_antB = np.r_[antB, antA]
Parameters
----------
vis : array of complex, shape (M, ..., N)
Complex cross-correlations between antennas A and B, assuming *N*
baselines or antenna pairs on the last dimension
num_ants : int
Number of antennas
antA, antB : array of int, shape (N,)
Antenna indices associated with visibilities
weights : float or array of float, shape (M, ..., N), optional
Visibility weights (positive real numbers)
num_iters : int, optional
Number of iterations
ref_ant : int, optional
Index of reference antenna that will be forced to have a gain of 1.0
init_gain : array of complex, shape(num_ants,) or None, optional
Initial gain vector (all equal to 1.0 by default)
Returns
-------
gains : array of complex, shape (M, ..., num_ants)
Complex gains per antenna
Notes
-----
The model visibilities are assumed to be 1, implying a point source model.
The algorithm is iterative but should converge in a small number of
iterations (10 to 30).
"""
# Each row of this array contains the indices of baselines with the same antA
baselines_per_antA = np.array([(antA == m).nonzero()[0] for m in range(num_ants)])
# Each row of this array contains corresponding antB indices with same antA
antB_per_antA = antB[baselines_per_antA]
weighted_vis = weights * vis
weighted_vis = weighted_vis[..., baselines_per_antA]
# Initial estimate of gain vector
gain_shape = tuple(list(vis.shape[:-1]) + [num_ants])
g_curr = np.ones(gain_shape, dtype=np.complex) if init_gain is None else init_gain
for n in range(num_iters):
# Basis vector (collection) represents gain_B* times model (assumed 1)
g_basis = g_curr[..., antB_per_antA]
# Do scalar least-squares fit of basis vector to vis vector for whole collection in parallel
g_new = (g_basis * weighted_vis).sum(axis=-1) / (g_basis.conj() * g_basis).sum(axis=-1)
# Normalise g_new to match g_curr so that taking their average and diff
# make sense (without copy() the elements of g_new are mangled up)
g_new /= g_new[..., ref_ant][..., np.newaxis].copy()
print("Iteration %d: mean absolute gain change = %f" % \
(n + 1, 0.5 * np.abs(g_new - g_curr).mean()))
# Avoid getting stuck during iteration
g_curr = 0.5 * (g_new + g_curr)
return g_curr
def mean(a, axis=None):
if a.dtype.kind == 'c':
r = np.sqrt(a.real ** 2 + a.imag ** 2).mean(axis=axis)
th = np.arctan2(a.imag, a.real)
sa = (np.sin(th)).sum(axis=axis)
ca = (np.cos(th)).sum(axis=axis)
thme = np.arctan2(sa, ca)
return r * np.exp(1j * thme)
else:
return np.mean(a, axis=axis)
def bandpass_cal(vis, num_ants, antA, antB, weights=1.0, num_iters=10,
ref_ant=-1, init_gain=None, delay_chans=slice(200, 800)):
gains = stefcal(vis, num_ants, antA, antB, weights, num_iters,
ref_ant if ref_ant >= 0 else 0, init_gain)
percentile = 0.25
offset = int(np.round(percentile * num_ants))
dphase = np.diff(np.angle(gains[..., delay_chans, :]), axis=-2)
dphase[dphase > np.pi] -= 2 * np.pi
dphase[dphase < -np.pi] += 2 * np.pi
dphase.sort(axis=-1)
avg_phase_slope = dphase[..., offset:-offset].mean(axis=-1)
mean_phase = np.zeros(gains.shape[:-1])
mean_phase[..., delay_chans.start + 1:delay_chans.stop] = avg_phase_slope
mean_phase = np.cumsum(mean_phase)
if ref_ant < 0:
amp = np.abs(gains)
amp.sort(axis=-1)
percentile = 0.25
offset = int(np.round(percentile * num_ants))
avg_amp = amp[..., offset:-offset].mean(axis=-1)
# avg_amp = np.mean(np.abs(g_curr), axis=-1)
middle_angle = np.arctan2(np.median(gains.imag, axis=-1),
np.median(gains.real, axis=-1))
gains /= (avg_amp * np.exp(1j * middle_angle))[..., np.newaxis]
return gains
# Quick test of StefCal by running this as a script
if __name__ == '__main__':
M = 100 # Number of dumps
N = 2 # Number of antennas / inputs
K = 10000 # Number of integrations per dump
noise_power = 10.0 # Compared to signal power of 1.0
ref_ant = 1
# We want lower triangle, but use upper triangle indices instead because
# of 'C' ordering of arrays
vech = np.triu_indices(N, 1)
antA, antB = np.meshgrid(np.arange(N), np.arange(N))
antA, antB = antA[vech], antB[vech]
# Include both triangles when solving (i.e. visibility + its conjugate)
antA, antB = np.r_[antA, antB], np.r_[antB, antA]
# Generate random gain vector with magnitudes around 1 and random phase
abs_g = np.abs(1 + 0.1 * np.random.randn(N))
g = abs_g * np.exp(2j * np.pi * np.random.rand(N))
# Fix reference gain to have magnitude 1 and phase 0
g_norm = g / g[ref_ant]
# Assume simple point source model + diagonal noise contribution
ggH = np.outer(g, g.conj())
Rn = noise_power * np.eye(N)
R = ggH + Rn
vis = np.zeros((M, len(antA)), dtype=np.complex)
for m in range(M):
# Generate random sample covariance matrix V from true covariance R
L = np.linalg.cholesky(R)
X = np.random.randn(R.shape[0], K) + 1.0j * np.random.randn(R.shape[0], K)
V = 0.5 * np.dot(X, X.conj().transpose())
V = np.dot(L, np.dot(V, L.conj().transpose())) / K
# Extract cross-correlations from covariance matrix and stack them into vector
vis[m] = V[(antA, antB)]
print(
'\nTesting StefCal:\n----------------')
g_estm = stefcal(vis, N, antA, antB, num_iters=10, ref_ant=ref_ant)
compare = '\n'.join([("%+5.3f%+5.3fj -> %+5.3f%+5.3fj" %
(gt.real, gt.imag, ge.real, ge.imag))
for gt, ge in np.c_[g_norm, g_estm.mean(axis=0)]])
print(
'\nOriginal gain -> Mean estimated gain vector:\n' + compare)
print(
'StefCal mean absolute gain error = %f' % \
(np.abs(g_estm.mean(axis=0) - g_norm).mean(),)) | {
"repo_name": "SKA-ScienceDataProcessor/algorithm-reference-library",
"path": "util/stefcal.py",
"copies": "1",
"size": "7165",
"license": "apache-2.0",
"hash": -3913275405251715000,
"line_mean": 41.1529411765,
"line_max": 100,
"alpha_frac": 0.6107466853,
"autogenerated": false,
"ratio": 3.234762979683973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9297205361963263,
"avg_score": 0.009660860604141817,
"num_lines": 170
} |
from __future__ import absolute_import, print_function, division
import array
import datetime
import logging
import threading
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import (LinkCommand, DownloadRequest, DownloadResponse,
AuthenticateCommand, AuthenticateResponse, DisconnectCommand,
UploadRequest, UploadResponse, UploadDataCommand, UploadDataResponse,
EraseRequestCommand, EraseResponse)
from ant.fs.commandpipe import CreateFile, Response, Time, TimeResponse
from ant.fs.file import Directory
from ant.fs.commons import crc
_logger = logging.getLogger("ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno is not None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSEraseException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSCreateFileException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSTimeException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = queue.Queue()
self._beacons = queue.Queue()
self._node = Node()
try:
NETWORK_KEY = [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
self._node.set_network_key(0x00, NETWORK_KEY)
print("Request basic information...")
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print(" Capabilities: ", m[2])
# m = self._node.request_message(Message.ID.RESPONSE_VERSION)
# print " ANT version: ", struct.unpack("<10sx", m[2])[0]
#m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
#print " Serial number:", struct.unpack("<I", m[2])[0]
print("Starting system...")
#NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
#self._node.set_network_key(0x00, NETWORK_KEY)
print("Key done...")
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread = threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if beacon.get_client_device_state() == Beacon.ClientDeviceState.AUTHENTICATION:
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
# print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=15.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def _send_commandpipe(self, data):
# print "send commandpipe", data
self.upload(0xfffe, data)
def _get_commandpipe(self):
# print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xfffe))
def create(self, typ, data, callback=None):
# print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xff, 0xff])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
# result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException("Could not create file",
result.get_response())
# print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
# d = self.download_directory()
# Inform the application that the upload request was successfully created
if callback is not None:
callback(0)
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
# print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xffffffff
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
# upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException("Upload request failed",
upload_response._get_argument("response"))
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
# print " uploading", offset, "to", offset + max_block
data_packet = data[offset:offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array('B', [0] * missing_bytes))
# print " adding", str(missing_bytes), "padding"
# print " packet", len(data_packet)
# print " crc ", crc_val, "from seed", crc_seed
self._send_command(UploadDataCommand(crc_seed, offset, data_packet, crc_val))
upload_data_response = self._get_command()
# upload_data_response._debug()
if upload_data_response._get_argument("response") != UploadDataResponse.Response.OK:
raise AntFSUploadException("Upload data failed",
upload_data_response._get_argument("response"))
if callback is not None and len(data) != 0:
callback((offset + len(data_packet)) / len(data))
if offset + len(data_packet) >= len(data):
# print " done"
break
# print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array('B')
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
# print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback is not None and response._get_argument("size") != 0:
callback(total / response._get_argument("size"))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException("Download request failed: ",
response._get_argument("response"))
except queue.Empty:
_logger.debug("Download %d timeout", index)
# print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def set_time(self, time=datetime.datetime.utcnow()):
"""
:param time: datetime in UTC, or None to set to current time
"""
utc_tai_diff_seconds = 35
offset = time - datetime.datetime(1989, 12, 31, 0, 0, 0)
t = Time(int(offset.total_seconds()) + utc_tai_diff_seconds, 0xffffffff, 0)
self._send_commandpipe(t.get())
result = self._get_commandpipe()
if result.get_response() != TimeResponse.Response.OK:
raise AntFSTimeException("Failed to set time", result.get_response())
def erase(self, index):
self._send_command(EraseRequestCommand(index))
response = self._get_command()
if response._get_argument("response") != EraseResponse.Response.ERASE_SUCCESSFUL:
raise AntFSDownloadException("Erase request failed: ",
response._get_argument("response"))
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(3)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.SERIAL,
self._serial_number))
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number, passkey))
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Passkey authentication failed",
response._get_argument("type"))
def authentication_pair(self, friendly_name):
data = array.array('B', map(ord, list(friendly_name)))
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PAIRING,
self._serial_number, data))
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Pair authentication failed",
response._get_argument("type"))
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
| {
"repo_name": "jforge/openant",
"path": "ant/fs/manager.py",
"copies": "1",
"size": "14912",
"license": "mit",
"hash": 1669999059778428000,
"line_mean": 35.7290640394,
"line_max": 118,
"alpha_frac": 0.5861051502,
"autogenerated": false,
"ratio": 4.127318018267368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213423168467368,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
# http://reveng.sourceforge.net/crc-catalogue/16.htm#crc.cat.arc
def crc(data, seed=0x0000):
rem = seed
for byte in data:
rem ^= byte
for _ in range(0, 8):
if rem & 0x0001:
rem = (rem >> 1)
rem ^= 0xa001
else:
rem = rem >> 1
return rem
| {
"repo_name": "jforge/openant",
"path": "ant/fs/commons.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 6252535277960930000,
"line_mean": 38.9473684211,
"line_max": 77,
"alpha_frac": 0.7048748353,
"autogenerated": false,
"ratio": 4.091644204851752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027255639097744363,
"num_lines": 38
} |
from __future__ import absolute_import, print_function
import array
from collections import OrderedDict
import copy
import logging
import struct
_logger = logging.getLogger("ant.fs.command")
class Command:
class Type:
# Commands
LINK = 0x02
DISCONNECT = 0x03
AUTHENTICATE = 0x04
PING = 0x05
DOWNLOAD_REQUEST = 0x09
UPLOAD_REQUEST = 0x0A
ERASE_REQUEST = 0x0B
UPLOAD_DATA = 0x0C
# Responses
AUTHENTICATE_RESPONSE = 0x84
DOWNLOAD_RESPONSE = 0x89
UPLOAD_RESPONSE = 0x8A
ERASE_RESPONSE = 0x8B
UPLOAD_DATA_RESPONSE = 0x8C
_format = "<BB"
_id = None
def __init__(self):
self._arguments = OrderedDict()
self._add_argument('x', 0x44)
self._add_argument('id', self._id)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def get_id(self):
return self._id
def get(self):
arguments = list(self._get_arguments());
data = struct.pack(self._format, *arguments)
lst = array.array('B', data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == 0x44
assert args[1] == cls._id
return cls(*args[2:])
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print("=" * max_length)
print(self.__class__.__name__)
print("-" * max_length)
for key, value in self._arguments.items():
print(str(key) + ":", " " * (max_length - len(key)), str(value))
print("=" * max_length)
class LinkCommand(Command):
_id = Command.Type.LINK
_format = Command._format + "BBI"
def __init__(self, channel_frequency, channel_period, host_serial_number):
Command.__init__(self)
self._add_argument("channel_frequency", channel_frequency)
self._add_argument("channel_period", channel_period)
self._add_argument("host_serial_number", host_serial_number)
class DisconnectCommand(Command):
class Type:
RETURN_LINK = 0
RETURN_BROADCAST = 1
_id = Command.Type.DISCONNECT
_format = Command._format + "BBBxxx"
def __init__(self, command_type, time_duration, application_specific_duration):
Command.__init__(self)
self._add_argument("command_type", command_type)
self._add_argument("time_duration", time_duration)
self._add_argument("application_specific_duration", application_specific_duration)
class AuthenticateBase(Command):
_format = None
def __init__(self, x_type, serial_number, data=[]):
Command.__init__(self)
self._add_argument("type", x_type)
self._add_argument("serial_number", serial_number)
self._add_argument("data", data)
def _pad(self, data):
padded_data = copy.copy(data)
missing = 8 - len(padded_data) % 8
if missing < 8:
padded_data.extend([0x00] * missing)
return padded_data
def get_serial(self):
return self._get_argument("serial_number")
def get_data_string(self):
if not self._get_argument("data"):
return None
else:
return "".join(map(chr, self._get_argument("data")))
def get_data_array(self):
return self._get_argument("data")
def get(self):
arguments = list(self._get_arguments())
data = list(self._get_argument("data"))
lst = array.array('B', struct.pack("<BBBBI", arguments[0],
arguments[1], arguments[2],
len(data), arguments[3]))
padded = self._pad(data)
lst.extend(array.array('B', padded))
return lst
@classmethod
def _parse_args(cls, data):
header = struct.unpack("<BBBxI", data[0:8])
data_length = data[3]
return header + (data[8:8 + data_length],)
class AuthenticateCommand(AuthenticateBase):
class Request:
PASS_THROUGH = 0
SERIAL = 1
PAIRING = 2
PASSKEY_EXCHANGE = 3
_id = Command.Type.AUTHENTICATE
def __init__(self, command_type, host_serial_number, data=[]):
AuthenticateBase.__init__(self, command_type, host_serial_number, data)
class AuthenticateResponse(AuthenticateBase):
class Response:
NOT_AVAILABLE = 0
ACCEPT = 1
REJECT = 2
_id = Command.Type.AUTHENTICATE_RESPONSE
def __init__(self, response_type, client_serial_number, data=[]):
AuthenticateBase.__init__(self, response_type, client_serial_number, data)
class PingCommand(Command):
_id = Command.Type.PING
class DownloadRequest(Command):
_id = Command.Type.DOWNLOAD_REQUEST
_format = Command._format + "HIx?HI"
def __init__(self, data_index, data_offset, initial_request, crc_seed,
maximum_block_size=0):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("data_offset", data_offset)
self._add_argument("initial_request", initial_request)
self._add_argument("crc_seed", crc_seed)
self._add_argument("maximum_block_size", maximum_block_size)
class DownloadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_READABLE = 2
NOT_READY = 3
INVALID_REQUEST = 4
INCORRECT_CRC = 5
_id = Command.Type.DOWNLOAD_RESPONSE
_format = None
def __init__(self, response, remaining, offset, size, data, crc):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("remaining", remaining)
self._add_argument("offset", offset)
self._add_argument("size", size)
self._add_argument("data", data)
self._add_argument("crc", crc)
@classmethod
def _parse_args(cls, data):
return struct.unpack("<BBBxIII", data[0:16]) + (data[16:-8],) + struct.unpack("<6xH", data[-8:])
class UploadRequest(Command):
_id = Command.Type.UPLOAD_REQUEST
_format = Command._format + "HI4xI"
def __init__(self, data_index, max_size, data_offset):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("max_size", max_size)
self._add_argument("data_offset", data_offset)
class UploadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_WRITEABLE = 2
NOT_ENOUGH_SPACE = 3
INVALID_REQUEST = 4
NOT_READY = 5
_id = Command.Type.UPLOAD_RESPONSE
_format = Command._format + "BxIII6xH"
def __init__(self, response, last_data_offset, maximum_file_size,
maximum_block_size, crc):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("last_data_offset", last_data_offset)
self._add_argument("maximum_file_size", maximum_file_size)
self._add_argument("maximum_block_size", maximum_block_size)
self._add_argument("crc", crc)
class UploadDataCommand(Command):
_id = Command.Type.UPLOAD_DATA
_format = None
def __init__(self, crc_seed, data_offset, data, crc):
Command.__init__(self)
self._add_argument("crc_seed", crc_seed)
self._add_argument("data_offset", data_offset)
self._add_argument("data", data)
self._add_argument("crc", crc)
def get(self):
arguments = list(self._get_arguments())
header = struct.pack("<BBHI", *arguments[:4])
footer = struct.pack("<6xH", self._get_argument("crc"))
data = array.array('B', header)
data.extend(self._get_argument("data"))
data.extend(array.array('B', footer))
return data
@classmethod
def _parse_args(cls, data):
return struct.unpack("<BBHI", data[0:8])
+ (data[8:-8],) + struct.unpack("<6xH", data[-8:])
class UploadDataResponse(Command):
class Response:
OK = 0
FAILED = 1
_id = Command.Type.UPLOAD_DATA_RESPONSE
_format = Command._format + "B5x"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
class EraseRequestCommand(Command):
_id = Command.Type.ERASE_REQUEST
_format = Command._format + "I2x"
def __init__(self, data_file_index):
Command.__init__(self)
self._add_argument("data_file_index", data_file_index)
class EraseResponse(Command):
class Response:
ERASE_SUCCESSFUL = 0
ERASE_FAILED = 1
NOT_READY = 2
_id = Command.Type.ERASE_RESPONSE
_format = Command._format + "B5x"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
_classes = {
# Commands
Command.Type.LINK: LinkCommand,
Command.Type.DISCONNECT: DisconnectCommand,
Command.Type.AUTHENTICATE: AuthenticateCommand,
Command.Type.PING: PingCommand,
Command.Type.DOWNLOAD_REQUEST: DownloadRequest,
Command.Type.UPLOAD_REQUEST: UploadRequest,
Command.Type.ERASE_REQUEST: EraseRequestCommand,
Command.Type.UPLOAD_DATA: UploadDataCommand,
# Responses
Command.Type.AUTHENTICATE_RESPONSE: AuthenticateResponse,
Command.Type.DOWNLOAD_RESPONSE: DownloadResponse,
Command.Type.UPLOAD_RESPONSE: UploadResponse,
Command.Type.ERASE_RESPONSE: EraseResponse,
Command.Type.UPLOAD_DATA_RESPONSE: UploadDataResponse}
def parse(data):
_logger.debug("parsing data %r", data)
mark, command_type = struct.unpack("<BB", data[0:2])
assert mark == 0x44
command_class = _classes[command_type]
return command_class._parse(data)
| {
"repo_name": "jforge/openant",
"path": "ant/fs/command.py",
"copies": "1",
"size": "11429",
"license": "mit",
"hash": -1424589343109327400,
"line_mean": 29.972899729,
"line_max": 104,
"alpha_frac": 0.6217516843,
"autogenerated": false,
"ratio": 3.734967320261438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48567190045614383,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import array
from collections import OrderedDict
import logging
import struct
from ant.fs.command import Command
_logger = logging.getLogger("ant.fs.commandpipe")
class CommandPipe(object):
class Type:
REQUEST = 0x01
RESPONSE = 0x02
TIME = 0x03
CREATE_FILE = 0x04
DIRECTORY_FILTER = 0x05
SET_AUTHENTICATION_PASSKEY = 0x06
SET_CLIENT_FRIENDLY_NAME = 0x07
FACTORY_RESET_COMMAND = 0x08
_format = "<BxxB"
_id = None
_sequence = 0
def __init__(self):
self._arguments = OrderedDict()
self._add_argument('command', self._id)
CommandPipe._sequence += 1
self._add_argument('sequence', CommandPipe._sequence)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def __getattr__(self, attr):
# Get arguments with get_*
if attr.startswith("get_"):
name = attr[4:]
if name in self._arguments:
return lambda: self._arguments[name]
raise AttributeError("No such attribute")
def get(self):
arguments = list(self._get_arguments())
data = struct.pack(self._format, *arguments)
lst = array.array('B', data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == cls._id
instance = cls(*args[2:])
instance._arguments["sequence"] = args[1]
return instance
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print("=" * max_length)
print(self.__class__.__name__)
print("-" * max_length)
for key, value in self._arguments.items():
print(str(key) + ":", " " * (max_length - len(key)), str(value))
print("=" * max_length)
class Request(CommandPipe):
_id = CommandPipe.Type.REQUEST
_format = CommandPipe._format + "Bxxx"
def __init__(self, request_id):
CommandPipe.__init__(self)
self._add_argument('request_id', request_id)
class Response(CommandPipe):
class Response:
OK = 0
FAILED = 1
REJECTED = 2
NOT_SUPPORTED = 3
_id = CommandPipe.Type.RESPONSE
_format = CommandPipe._format + "BxBx"
def __init__(self, request_id, response):
CommandPipe.__init__(self)
self._add_argument('request_id', request_id)
self._add_argument('response', response)
class Time(CommandPipe):
class Format:
DIRECTORY = 0
SYSTEM = 1
COUNTER = 2
_id = CommandPipe.Type.TIME
_format = CommandPipe._format + "IIBxxx"
def __init__(self, current_time, system_time, time_format):
CommandPipe.__init__(self)
self._add_argument('current_time', current_time)
self._add_argument('system_time', system_time)
self._add_argument('time_format', time_format)
class TimeResponse(Response):
_format = Response._format + "xxxxxxxx"
def __init__(self, request_id, response):
Response.__init__(self, request_id, response)
class CreateFile(Request):
_id = CommandPipe.Type.CREATE_FILE
_format = None
def __init__(self, size, data_type, identifier, identifier_mask):
CommandPipe.__init__(self)
self._add_argument('size', size)
self._add_argument('data_type', data_type)
self._add_argument('identifier', identifier)
self._add_argument('identifier_mask', identifier_mask)
def get(self):
arguments = list(self._get_arguments())
data = array.array('B', struct.pack(CommandPipe._format + "IB", *arguments[:4]))
data.extend(self._get_argument("identifier"))
data.extend([0])
data.extend(self._get_argument("identifier_mask"))
return data
@classmethod
def _parse_args(cls, data):
return struct.unpack(Command._format + "IB", data[0:9]) + (data[9:12],) + (data[13:16],)
class CreateFileResponse(Response):
_format = Response._format + "BBBBHxx"
def __init__(self, request_id, response, data_type, identifier, index):
Response.__init__(self, request_id, response)
self._add_argument('data_type', data_type)
self._add_argument('identifier', identifier)
self._add_argument('index', index)
@classmethod
def _parse_args(cls, data):
return Response._parse_args(data[:8]) + (data[8], data[9:12], struct.unpack("<H", data[12:14])[0])
_classes = {
CommandPipe.Type.REQUEST: Request,
CommandPipe.Type.RESPONSE: Response,
CommandPipe.Type.TIME: Time,
CommandPipe.Type.CREATE_FILE: CreateFile,
CommandPipe.Type.DIRECTORY_FILTER: None,
CommandPipe.Type.SET_AUTHENTICATION_PASSKEY: None,
CommandPipe.Type.SET_CLIENT_FRIENDLY_NAME: None,
CommandPipe.Type.FACTORY_RESET_COMMAND: None}
_responses = {
CommandPipe.Type.TIME: TimeResponse,
CommandPipe.Type.CREATE_FILE: CreateFileResponse}
def parse(data):
commandpipe_type = _classes[data[0]]
if commandpipe_type == Response:
if data[4] in _responses and len(data) > 8:
commandpipe_type = _responses[data[4]]
return commandpipe_type._parse(data)
| {
"repo_name": "jforge/openant",
"path": "ant/fs/commandpipe.py",
"copies": "1",
"size": "6913",
"license": "mit",
"hash": 4139704367677866000,
"line_mean": 31.3037383178,
"line_max": 106,
"alpha_frac": 0.6395197454,
"autogenerated": false,
"ratio": 3.7652505446623095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49047702900623097,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import datetime
import logging
import struct
_logger = logging.getLogger("ant.fs.file")
class Directory:
def __init__(self, version, time_format, current_system_time,
last_modified, files):
self._version = version
self._time_format = time_format
self._current_system_time = current_system_time
self._last_modified = last_modified
self._files = files
def get_version(self):
return self._version
def get_time_format(self):
return self._time_format
def get_current_system_time(self):
return self._current_system_time
def get_last_modified(self):
return self._last_modified
def get_files(self):
return self._files
def print_list(self):
print("Index\tType\tFIT Type\tFIT Number\tSize\tDate\tFIT Flags\tFlags")
for f in self.get_files():
print(f.get_index(), "\t", f.get_type(), "\t",
f.get_fit_sub_type(), "\t", f.get_fit_file_number(), "\t",
f.get_size(), "\t", f.get_date(), "\t", f._typ_flags, "\t",
f.get_flags_string())
@staticmethod
def parse(data):
_logger.debug("Parse '%s' as directory", data)
# Header
version, structure_length, time_format, current_system_time, last_modified = struct.unpack("<BBB5xII",
data[:16])
version_major = (version & 0xf0) >> 4
version_minor = (version & 0x0f)
files = []
for offset in range(16, len(data), 16):
item_data = data[offset:offset + 16]
_logger.debug(" - (%d - %d) %d, %s", offset, offset + 16, len(item_data), item_data)
files.append(File.parse(item_data))
return Directory((version_major, version_minor), time_format,
current_system_time, last_modified, files)
class File:
class Type:
FIT = 0x80
class Identifier:
DEVICE = 1
SETTING = 2
SPORT = 3
ACTIVITY = 4
WORKOUT = 5
COURSE = 6
SCHEDULES = 7
WEIGHT = 9
TOTALS = 10
GOALS = 11
BLOOD_PRESSURE = 14
MONITORING_A = 15
ACTIVITY_SUMMARY = 20
MONITORING_DAILY = 28
MONITORING_B = 32
def __init__(self, index, typ, ident, typ_flags, flags, size, date):
self._index = index
self._type = typ
self._ident = ident
self._typ_flags = typ_flags
self._flags = flags
self._size = size
self._date = date
def get_index(self):
return self._index
def get_type(self):
return self._type
def get_identifier(self):
return self._ident
def get_fit_sub_type(self):
return self._ident[0]
def get_fit_file_number(self):
return struct.unpack("<xH", self._ident)[0]
def get_size(self):
return self._size
def get_date(self):
return self._date
def is_readable(self):
return self._flags & 0b10000000
def is_writable(self):
return self._flags & 0b01000000
def is_erasable(self):
return self._flags & 0b00100000
def is_archived(self):
return self._flags & 0b00010000
def is_append_only(self):
return self._flags & 0b00001000
def is_encrypted(self):
return self._flags & 0b00000100
def get_flags_string(self):
s = "r" if self.is_readable() else "-"
s += "w" if self.is_writable() else "-"
s += "e" if self.is_erasable() else "-"
s += "A" if self.is_archived() else "-"
s += "a" if self.is_append_only() else "-"
s += "c" if self.is_encrypted() else "-"
return s
@staticmethod
def parse(data):
_logger.debug("Parse '%s' (%d) as file %s", data, len(data), type(data))
# i1, i2, i3 -> three byte integer, not supported by struct
index, data_type, data_flags, flags, file_size, file_date = struct.unpack("<HB3xBBII", data)
file_date = datetime.datetime.fromtimestamp(file_date + 631065600)
identifier = data[3:6]
return File(index, data_type, identifier, data_flags, flags, file_size, file_date)
| {
"repo_name": "jforge/openant",
"path": "ant/fs/file.py",
"copies": "1",
"size": "5477",
"license": "mit",
"hash": -3259529306105505000,
"line_mean": 30.6589595376,
"line_max": 110,
"alpha_frac": 0.6003286471,
"autogenerated": false,
"ratio": 3.746238030095759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9815526487728616,
"avg_score": 0.006208037893428589,
"num_lines": 173
} |
# http://reveng.sourceforge.net/crc-catalogue/16.htm#crc.cat.arc
def crc(data, seed=0x0000):
rem = seed
for byte in data:
rem ^= byte
for _ in range(0, 8):
if rem & 0x0001:
rem = (rem >> 1)
rem ^= 0xa001
else:
rem = rem >> 1
return rem
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/commons.py",
"copies": "1",
"size": "1463",
"license": "mit",
"hash": 24684835045567064,
"line_mean": 39.6388888889,
"line_max": 77,
"alpha_frac": 0.7019822283,
"autogenerated": false,
"ratio": 4.098039215686274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5300021443986275,
"avg_score": null,
"num_lines": null
} |
import array
import collections
import copy
import logging
import struct
_logger = logging.getLogger("garmin.ant.fs.command")
class Command:
class Type:
# Commands
LINK = 0x02
DISCONNECT = 0x03
AUTHENTICATE = 0x04
PING = 0x05
DOWNLOAD_REQUEST = 0x09
UPLOAD_REQUEST = 0x0A
ERASE_REQUEST = 0x0B
UPLOAD_DATA = 0x0C
# Responses
AUTHENTICATE_RESPONSE = 0x84
DOWNLOAD_RESPONSE = 0x89
UPLOAD_RESPONSE = 0x8A
ERASE_RESPONSE = 0x8B
UPLOAD_DATA_RESPONSE = 0x8C
_format = "<BB"
_id = None
def __init__(self):
self._arguments = collections.OrderedDict()
self._add_argument('x', 0x44)
self._add_argument('id', self._id)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def get_id(self):
return self._id
def get(self):
data = struct.pack(self._format, *self._get_arguments())
lst = array.array('B', data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == 0x44
assert args[1] == cls._id
return cls(*args[2:])
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print "=" * max_length
print self.__class__.__name__
print "-" * max_length
for key, value in self._arguments.items():
print str(key) + ":", " " * (max_length - len(key)), str(value)
print "=" * max_length
class LinkCommand(Command):
_id = Command.Type.LINK
_format = Command._format + "BBI"
def __init__(self, channel_frequency, channel_period, host_serial_number):
Command.__init__(self)
self._add_argument("channel_frequency", channel_frequency)
self._add_argument("channel_period", channel_period)
self._add_argument("host_serial_number", host_serial_number)
class DisconnectCommand(Command):
class Type:
RETURN_LINK = 0
RETURN_BROADCAST = 1
_id = Command.Type.DISCONNECT
_format = Command._format + "BBBxxx"
def __init__(self, command_type, time_duration, application_specific_duration):
Command.__init__(self)
self._add_argument("command_type", command_type)
self._add_argument("time_duration", time_duration)
self._add_argument("application_specific_duration", application_specific_duration)
class AuthenticateBase(Command):
_format = None
def __init__(self, x_type, serial_number, data = []):
Command.__init__(self)
self._add_argument("type", x_type)
self._add_argument("serial_number", serial_number)
self._add_argument("data", data)
def _pad(self, data):
padded_data = copy.copy(data)
missing = 8 - len(padded_data) % 8
if missing < 8:
padded_data.extend([0x00] * missing)
return padded_data
def get_serial(self):
return self._get_argument("serial_number")
def get_data_string(self):
if self._get_argument("data") == []:
return None
else:
return "".join(map(chr, self._get_argument("data")))
def get_data_array(self):
return self._get_argument("data")
def get(self):
lst = array.array('B', struct.pack("<BBBBI", self._get_arguments()[0],
self._get_arguments()[1], self._get_arguments()[2],
len(self._get_argument("data")), self._get_arguments()[3]))
padded = self._pad(self._get_argument("data"))
lst.extend(array.array('B', padded))
return lst
@classmethod
def _parse_args(cls, data):
header = struct.unpack("<BBBxI", data[0:8])
data_length = data[3]
return header + (data[8:8 + data_length],)
class AuthenticateCommand(AuthenticateBase):
class Request:
PASS_THROUGH = 0
SERIAL = 1
PAIRING = 2
PASSKEY_EXCHANGE = 3
_id = Command.Type.AUTHENTICATE
def __init__(self, command_type, host_serial_number, data = []):
AuthenticateBase.__init__(self, command_type, host_serial_number, data)
class AuthenticateResponse(AuthenticateBase):
class Response:
NOT_AVAILABLE = 0
ACCEPT = 1
REJECT = 2
_id = Command.Type.AUTHENTICATE_RESPONSE
def __init__(self, response_type, client_serial_number, data = []):
AuthenticateBase.__init__(self, response_type, client_serial_number, data)
class PingCommand(Command):
_id = Command.Type.PING
class DownloadRequest(Command):
_id = Command.Type.DOWNLOAD_REQUEST
_format = Command._format + "HIx?HI"
def __init__(self, data_index, data_offset, initial_request, crc_seed,
maximum_block_size = 0):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("data_offset", data_offset)
self._add_argument("initial_request", initial_request)
self._add_argument("crc_seed", crc_seed)
self._add_argument("maximum_block_size", maximum_block_size)
class DownloadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_READABLE = 2
NOT_READY = 3
INVALID_REQUEST = 4
INCORRECT_CRC = 5
_id = Command.Type.DOWNLOAD_RESPONSE
_format = None
def __init__(self, response, remaining, offset, size, data, crc):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("remaining", remaining)
self._add_argument("offset", offset)
self._add_argument("size", size)
self._add_argument("data", data)
self._add_argument("crc", crc)
@classmethod
def _parse_args(cls, data):
return struct.unpack("<BBBxIII", data[0:16]) + \
(data[16:-8],) + struct.unpack("<6xH", data[-8:])
class UploadRequest(Command):
_id = Command.Type.UPLOAD_REQUEST
_format = Command._format + "HI4xI"
def __init__(self, data_index, max_size, data_offset):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("max_size", max_size)
self._add_argument("data_offset", data_offset)
class UploadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_WRITEABLE = 2
NOT_ENOUGH_SPACE = 3
INVALID_REQUEST = 4
NOT_READY = 5
_id = Command.Type.UPLOAD_RESPONSE
_format = Command._format + "BxIII6xH"
def __init__(self, response, last_data_offset, maximum_file_size,
maximum_block_size, crc):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("last_data_offset", last_data_offset)
self._add_argument("maximum_file_size", maximum_file_size)
self._add_argument("maximum_block_size", maximum_block_size)
self._add_argument("crc", crc)
class UploadDataCommand(Command):
_id = Command.Type.UPLOAD_DATA
_format = None
def __init__(self, crc_seed, data_offset, data, crc):
Command.__init__(self)
self._add_argument("crc_seed", crc_seed)
self._add_argument("data_offset", data_offset)
self._add_argument("data", data)
self._add_argument("crc", crc)
def get(self):
header = struct.pack("<BBHI", *self._get_arguments()[:4])
footer = struct.pack("<6xH", self._get_argument("crc"))
data = array.array('B', header)
data.extend(self._get_argument("data"))
data.extend(array.array('B', footer))
return data
@classmethod
def _parse_args(cls, data):
return struct.unpack("<BBHI", data[0:8]) + \
(data[8:-8],) + struct.unpack("<6xH", data[-8:])
class UploadDataResponse(Command):
class Response:
OK = 0
FAILED = 1
_id = Command.Type.UPLOAD_DATA_RESPONSE
_format = Command._format + "B5x"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
class EraseRequestCommand(Command):
_id = Command.Type.ERASE_REQUEST
_format = Command._format + "I"
def __init__(self, data_file_index):
Command.__init__(self)
self._add_argument("data_file_index", data_file_index)
class EraseResponse(Command):
class Response:
ERASE_SUCCESSFUL = 0
ERASE_FAILED = 1
NOT_READY = 2
_id = Command.Type.ERASE_RESPONSE
_format = Command._format + "B"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
_classes = {
# Commands
Command.Type.LINK: LinkCommand,
Command.Type.DISCONNECT: DisconnectCommand,
Command.Type.AUTHENTICATE: AuthenticateCommand,
Command.Type.PING: PingCommand,
Command.Type.DOWNLOAD_REQUEST: DownloadRequest,
Command.Type.UPLOAD_REQUEST: UploadRequest,
Command.Type.ERASE_REQUEST: EraseRequestCommand,
Command.Type.UPLOAD_DATA: UploadDataCommand,
# Responses
Command.Type.AUTHENTICATE_RESPONSE: AuthenticateResponse,
Command.Type.DOWNLOAD_RESPONSE: DownloadResponse,
Command.Type.UPLOAD_RESPONSE: UploadResponse,
Command.Type.ERASE_RESPONSE: EraseResponse,
Command.Type.UPLOAD_DATA_RESPONSE: UploadDataResponse}
def parse(data):
_logger.debug("parsing data %r", data)
mark, command_type = struct.unpack("<BB", data[0:2])
assert mark == 0x44
command_class = _classes[command_type]
return command_class._parse(data)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/command.py",
"copies": "1",
"size": "11844",
"license": "mit",
"hash": 3465298511343774000,
"line_mean": 31.272479564,
"line_max": 90,
"alpha_frac": 0.5923674434,
"autogenerated": false,
"ratio": 3.8268174474959613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4919184890895961,
"avg_score": null,
"num_lines": null
} |
import array
import collections
import logging
import struct
_logger = logging.getLogger("garmin.ant.fs.commandpipe")
class CommandPipe:
class Type:
REQUEST = 0x01
RESPONSE = 0x02
TIME = 0x03
CREATE_FILE = 0x04
DIRECTORY_FILTER = 0x05
SET_AUTHENTICATION_PASSKEY = 0x06
SET_CLIENT_FRIENDLY_NAME = 0x07
FACTORY_RESET_COMMAND = 0x08
_format = "<BxxB"
_id = None
def __init__(self):
self._arguments = collections.OrderedDict()
self._add_argument('command', self._id)
self._add_argument('sequence', 0)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def get(self):
data = struct.pack(self._format, *self._get_arguments())
lst = array.array('B', data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == cls._id
return cls(*args[2:])
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print "=" * max_length
print self.__class__.__name__
print "-" * max_length
for key, value in self._arguments.items():
print str(key) + ":", " " * (max_length - len(key)), str(value)
print "=" * max_length
class Request(CommandPipe):
_id = CommandPipe.Type.REQUEST
_format = CommandPipe._format + "Bxxx"
def __init__(self, request_id):
CommandPipe.__init__(self)
class Response(CommandPipe):
class Response:
OK = 0
FAILED = 1
REJECTED = 2
NOT_SUPPORTED = 3
_id = CommandPipe.Type.RESPONSE
_format = CommandPipe._format + "BxBx"
def get_request_id(self):
return self._get_argument("request_id")
def get_response(self):
return self._get_argument("response")
def __init__(self, request_id, response):
CommandPipe.__init__(self)
self._add_argument('request_id', request_id)
self._add_argument('response', response)
class Time(CommandPipe):
class Format:
DIRECTORY = 0
SYSTEM = 1
COUNTER = 2
_id = CommandPipe.Type.TIME
_format = CommandPipe._format + "IIBxxx"
def __init__(self, current_time, system_time, time_format):
CommandPipe.__init__(self)
class CreateFile(CommandPipe):
_id = CommandPipe.Type.CREATE_FILE
_format = None
def __init__(self, size, data_type, identifier, identifier_mask):
CommandPipe.__init__(self)
self._add_argument('size', size)
self._add_argument('data_type', data_type)
self._add_argument('identifier', identifier)
self._add_argument('identifier_mask', identifier_mask)
def get(self):
data = array.array('B', struct.pack(CommandPipe._format + "IB",
*self._get_arguments()[:4]))
data.extend(self._get_argument("identifier"))
data.extend([0])
data.extend(self._get_argument("identifier_mask"))
return data
@classmethod
def _parse_args(cls, data):
return struct.unpack(Command._format + "IB", data[0:9])\
+ (data[9:12],) + (data[13:16],)
class CreateFileResponse(Response):
_format = Response._format + "BBBBHxx"
def __init__(self, request_id, response, data_type, identifier, index):
Response.__init__(self, request_id, response)
self._add_argument('data_type', data_type)
self._add_argument('identifier', identifier)
self._add_argument('index', index)
def get_data_type(self):
return self._get_argument("data_type")
def get_identifier(self):
return self._get_argument("identifier")
def get_index(self):
return self._get_argument("index")
@classmethod
def _parse_args(cls, data):
return Response._parse_args(data[:8]) + \
(data[8], data[9:12], struct.unpack("<H", data[12:14])[0])
_classes = {
CommandPipe.Type.REQUEST: Request,
CommandPipe.Type.RESPONSE: Response,
CommandPipe.Type.TIME: Time,
CommandPipe.Type.CREATE_FILE: CreateFile,
CommandPipe.Type.DIRECTORY_FILTER: None,
CommandPipe.Type.SET_AUTHENTICATION_PASSKEY: None,
CommandPipe.Type.SET_CLIENT_FRIENDLY_NAME: None,
CommandPipe.Type.FACTORY_RESET_COMMAND: None}
_responses = {
CommandPipe.Type.CREATE_FILE: CreateFileResponse}
def parse(data):
commandpipe_type = _classes[data[0]]
if commandpipe_type == Response:
if data[4] in _responses:
commandpipe_type = _responses[data[4]]
return commandpipe_type._parse(data)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/commandpipe.py",
"copies": "1",
"size": "6600",
"license": "mit",
"hash": 4156722777131538000,
"line_mean": 31.6732673267,
"line_max": 77,
"alpha_frac": 0.6063636364,
"autogenerated": false,
"ratio": 3.8619075482738445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4968271184673845,
"avg_score": null,
"num_lines": null
} |
import array
import datetime
import logging
import threading
import queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import (
LinkCommand,
DownloadRequest,
DownloadResponse,
AuthenticateCommand,
AuthenticateResponse,
DisconnectCommand,
UploadRequest,
UploadResponse,
UploadDataCommand,
UploadDataResponse,
EraseRequestCommand,
EraseResponse,
)
from ant.fs.commandpipe import CreateFile, Response, Time, TimeResponse
from ant.fs.file import Directory
from ant.fs.commons import crc
_logger = logging.getLogger("ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno is not None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSEraseException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSCreateFileException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSTimeException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = queue.Queue()
self._beacons = queue.Queue()
self._node = Node()
try:
NETWORK_KEY = [0xA8, 0xA4, 0x23, 0xB9, 0xF5, 0x5E, 0x63, 0xC1]
self._node.set_network_key(0x00, NETWORK_KEY)
print("Request basic information...")
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print(" Capabilities: ", m[2])
# m = self._node.request_message(Message.ID.RESPONSE_ANT_VERSION)
# print " ANT version: ", struct.unpack("<10sx", m[2])[0]
# m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
# print " Serial number:", struct.unpack("<I", m[2])[0]
print("Starting system...")
# NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
# self._node.set_network_key(0x00, NETWORK_KEY)
print("Key done...")
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread = threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if (
beacon.get_client_device_state()
== Beacon.ClientDeviceState.AUTHENTICATION
):
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
# print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=15.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def _send_commandpipe(self, data):
# print "send commandpipe", data
self.upload(0xFFFE, data)
def _get_commandpipe(self):
# print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xFFFE))
def create(self, typ, data, callback=None):
# print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xFF, 0xFF])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
# result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException(
"Could not create file", result.get_response()
)
# print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
# d = self.download_directory()
# Inform the application that the upload request was successfully created
if callback is not None:
callback(0)
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
# print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xFFFFFFFF
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
# upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException(
"Upload request failed", upload_response._get_argument("response")
)
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
# print " uploading", offset, "to", offset + max_block
data_packet = data[offset : offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array("B", [0] * missing_bytes))
# print " adding", str(missing_bytes), "padding"
# print " packet", len(data_packet)
# print " crc ", crc_val, "from seed", crc_seed
self._send_command(
UploadDataCommand(crc_seed, offset, data_packet, crc_val)
)
upload_data_response = self._get_command()
# upload_data_response._debug()
if (
upload_data_response._get_argument("response")
!= UploadDataResponse.Response.OK
):
raise AntFSUploadException(
"Upload data failed", upload_data_response._get_argument("response")
)
if callback is not None and len(data) != 0:
callback((offset + len(data_packet)) / len(data))
if offset + len(data_packet) >= len(data):
# print " done"
break
# print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array("B")
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
# print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback is not None and response._get_argument("size") != 0:
callback(total / response._get_argument("size"))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException(
"Download request failed: ", response._get_argument("response")
)
except queue.Empty:
_logger.debug("Download %d timeout", index)
# print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def set_time(self, time=datetime.datetime.utcnow()):
"""
:param time: datetime in UTC, or None to set to current time
"""
utc_tai_diff_seconds = 35
offset = time - datetime.datetime(1989, 12, 31, 0, 0, 0)
t = Time(int(offset.total_seconds()) + utc_tai_diff_seconds, 0xFFFFFFFF, 0)
self._send_commandpipe(t.get())
result = self._get_commandpipe()
if result.get_response() != TimeResponse.Response.OK:
raise AntFSTimeException("Failed to set time", result.get_response())
def erase(self, index):
self._send_command(EraseRequestCommand(index))
response = self._get_command()
if (
response._get_argument("response")
!= EraseResponse.Response.ERASE_SUCCESSFUL
):
raise AntFSDownloadException(
"Erase request failed: ", response._get_argument("response")
)
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(10)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(
AuthenticateCommand(AuthenticateCommand.Request.SERIAL, self._serial_number)
)
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number,
passkey,
)
)
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Passkey authentication failed", response._get_argument("type")
)
def authentication_pair(self, friendly_name):
data = array.array("B", map(ord, list(friendly_name)))
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PAIRING, self._serial_number, data
)
)
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Pair authentication failed", response._get_argument("type")
)
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/manager.py",
"copies": "1",
"size": "14937",
"license": "mit",
"hash": -3456147101239535600,
"line_mean": 33.5763888889,
"line_max": 118,
"alpha_frac": 0.5784963513,
"autogenerated": false,
"ratio": 4.105827377680044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184323728980044,
"avg_score": null,
"num_lines": null
} |
import array
import logging
import struct
import threading
import traceback
import Queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import LinkCommand, DownloadRequest, DownloadResponse, \
AuthenticateCommand, AuthenticateResponse, DisconnectCommand, \
UploadRequest, UploadResponse, UploadDataCommand, UploadDataResponse
from ant.fs.commandpipe import CreateFile, CreateFileResponse, Response
from ant.fs.file import Directory, File
from ant.fs.commons import crc
_logger = logging.getLogger("garmin.ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno != None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = Queue.Queue()
self._beacons = Queue.Queue()
self._node = Node()
try:
NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
self._node.set_network_key(0x00, NETWORK_KEY)
print "Request basic information..."
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print " Capabilities: ", m[2]
#m = self._node.request_message(Message.ID.RESPONSE_VERSION)
#print " ANT version: ", struct.unpack("<10sx", m[2])[0]
#m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
#print " Serial number:", struct.unpack("<I", m[2])[0]
print "Starting system..."
#NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
#self._node.set_network_key(0x00, NETWORK_KEY)
print "Key done..."
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread =threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if beacon.get_client_device_state() == Beacon.ClientDeviceState.AUTHENTICATION:
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
#except Exception as e:
# print e
# traceback.print_exc()
# for line in traceback.format_exc().splitlines():
# _logger.error("%r", line)
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
#print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=3.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def erase(self, index):
pass
def _send_commandpipe(self, data):
#print "send commandpipe", data
self.upload(0xfffe, data)
def _get_commandpipe(self):
#print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xfffe))
def create(self, typ, data, callback=None):
#print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xff, 0xff])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
#result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException("Could not create file",
result.get_response())
#print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
#d = self.download_directory()
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
#print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xffffffff
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
#upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException("Upload request failed",
upload_response._get_argument("response"))
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
#print " uploading", offset, "to", offset + max_block
data_packet = data[offset:offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array('B', [0] * missing_bytes))
#print " adding", str(missing_bytes), "padding"
#print " packet", len(data_packet)
#print " crc ", crc_val, "from seed", crc_seed
self._send_command(UploadDataCommand(crc_seed, offset, data_packet, crc_val))
upload_data_response = self._get_command()
#upload_data_response._debug()
if upload_data_response._get_argument("response") != UploadDataResponse.Response.OK:
raise AntFSUploadException("Upload data failed",
upload_data_response._get_argument("response"))
if callback != None and len(data) != 0:
callback(float(offset) / float(len(data)))
if offset + len(data_packet) >= len(data):
#print " done"
break
#print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array('B')
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
#print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback != None and response._get_argument("size") != 0:
callback(float(total) / float(response._get_argument("size")))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException("Download request failed: ",
response._get_argument("response"))
except Queue.Empty:
_logger.debug("Download %d timeout", index)
#print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(3)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.SERIAL,
self._serial_number))
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number, passkey))
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Passkey authentication failed",
response._get_argument("type"))
def authentication_pair(self, friendly_name):
data = array.array('B', map(ord, list(friendly_name)))
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PAIRING,
self._serial_number, data))
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Pair authentication failed",
response._get_argument("type"))
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/manager.py",
"copies": "1",
"size": "13590",
"license": "mit",
"hash": 5905373989090769000,
"line_mean": 35.7297297297,
"line_max": 117,
"alpha_frac": 0.5741721854,
"autogenerated": false,
"ratio": 4.163602941176471,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.022329020934042182,
"num_lines": 370
} |
import array
from ant.fs.command import parse, DownloadRequest, DownloadResponse,\
AuthenticateCommand
def authenticate_command():
command = AuthenticateCommand(
AuthenticateCommand.Type.REQUEST_SERIAL, 123456789)
assert command.get() == array.array('B',
[0x44, 0x04, 0x01, 0x00, 0x15, 0xcd, 0x5b, 0x7])
command = AuthenticateCommand(
AuthenticateCommand.Type.REQUEST_PAIRING, 987654321,
map(ord, 'hello'))
assert command.get() == array.array('B',
[0x44, 0x04, 0x02, 0x05, 0xb1, 0x68, 0xde, 0x3a,
0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x00, 0x00, 0x00])
def download_request():
# Download request
request = array.array('B', [0x44, 0x09, 0x5f, 0x00, 0x00, 0xba, 0x00,
0x00, 0x00, 0x00, 0x9e, 0xc2, 0x00, 0x00, 0x00, 0x00])
a = parse(request)
assert isinstance(a, DownloadRequest)
def download_response():
# Download response
download_response = array.array('B', [68, 137, 0, 0, 241, 1, 0, 0, 0, 186, 0,
0, 241, 187, 0, 0, 56, 4, 83, 78, 255, 255, 1, 12, 255, 255, 255,3, 72,
129, 233, 42, 96, 64, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 10,
42, 0, 0, 73, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 2, 120,
255, 99, 255, 2, 192, 129, 233, 42, 121, 0, 0, 0, 21, 3, 255, 71, 0, 0,
19, 0, 33, 253, 4, 134, 2, 4, 134, 3, 4, 133, 4, 4, 133, 5, 4, 133, 6, 4,
133, 7, 4, 134, 8, 4, 134, 9, 4, 134, 10, 4, 134, 27, 4, 133, 28, 4, 133,
29, 4, 133, 30, 4, 133, 254, 2, 132, 11, 2, 132, 12, 2, 132, 13,2, 132,
14, 2, 132, 19, 2, 132, 20, 2, 132, 21, 2, 132, 22, 2, 132, 0, 1, 0, 1,
1, 0, 15, 1, 2, 16, 1, 2, 17, 1, 2, 18, 1, 2, 23, 1, 0, 24, 1, 0, 25, 1,
0, 26, 1, 2, 7, 150, 130, 233, 42, 234, 120, 233, 42, 19, 218, 10, 41,
131, 80, 137, 8, 208, 206, 10, 41, 220, 95, 137, 8, 22, 176, 32, 0,22,
176, 32, 0, 88, 34, 9, 0, 255, 255, 255, 255, 172, 1, 11, 41, 164, 238,
139, 8, 58, 63, 10, 41, 131, 80, 137, 8, 0, 0, 137, 2, 0, 0, 234, 10, 57,
14, 255, 255, 255, 255, 184, 0, 227, 0, 9, 1, 164, 172, 255, 255, 255, 7,
1, 255, 2, 150, 130, 233, 42, 1, 0, 0, 0, 8, 9, 1, 72, 0, 0, 18, 0, 34,
253, 4, 134, 2, 4, 134, 3, 4, 133, 4, 4, 133, 7, 4, 134, 8, 4, 134, 9, 4,
134, 10, 4, 134, 29, 4, 133, 30, 4, 133, 31, 4, 133, 32, 4, 133, 254, 2,
132, 11, 2, 132, 13, 2, 132, 14, 2, 132, 15, 2, 132, 20, 2, 132, 21, 2,
132, 22, 2, 132, 23, 2, 132, 25, 2, 132, 26, 2, 132, 0, 1, 0, 1, 1, 0, 5,
1, 0, 6, 1, 0, 16, 1, 2, 17, 1, 2, 18, 1, 2, 19, 1, 2, 24, 1, 2, 27, 1, 2,
28, 1, 0, 8, 150, 130, 233, 42, 234, 120, 233, 42, 19, 218, 10, 41, 131,
80, 137, 8, 22, 176, 32, 0, 22, 176, 32, 0, 88, 34, 9, 0, 255, 255, 255,
255, 172, 1, 11, 41, 164, 238, 139, 8, 58, 63, 10, 41, 131, 80, 137, 8, 0,
0, 137, 2, 0, 0, 234, 10, 57, 14, 255, 255, 255, 255, 184, 0, 227, 0, 0,
0, 1, 0, 9, 1, 1, 0, 164, 172, 255, 255, 46, 255, 0, 73, 0, 0, 34, 0, 7,
253, 4, 134, 0, 4, 134, 1, 2, 132, 2, 1, 0, 3, 1, 0, 4, 1, 0, 6, 1, 2, 9,
150, 130, 233, 42, 22, 176, 32, 0, 1, 0, 0, 26, 1, 255, 233, 66, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
a = parse(download_response)
assert isinstance(a, DownloadResponse)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/test/command_test.py",
"copies": "1",
"size": "4464",
"license": "mit",
"hash": 7379738992366373000,
"line_mean": 50.9069767442,
"line_max": 82,
"alpha_frac": 0.5719086022,
"autogenerated": false,
"ratio": 2.599883517763541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8569027353712511,
"avg_score": 0.020552953250205983,
"num_lines": 86
} |
import array
from ant.fs.commandpipe import parse, CreateFile
def main():
# Test create file
data = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09]
request = CreateFile(len(data), 0x80, [0x04, 0x00, 0x00], [0x00, 0xff, 0xff])
print request
print request.get()
# Test create file response
response_data = array.array('B', [2, 0, 0, 0, 4, 0, 0, 0, 128, 4, 123, 0, 103, 0, 0, 0])
response = parse(response_data)
assert response.get_request_id() == 0x04
assert response.get_response() == 0x00
assert response.get_data_type() == 0x80 #FIT
assert response.get_identifier() == array.array('B', [4, 123, 0])
assert response.get_index() == 103
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/test/commandpipe_test.py",
"copies": "1",
"size": "1838",
"license": "mit",
"hash": -5167285978668780000,
"line_mean": 41.7441860465,
"line_max": 92,
"alpha_frac": 0.7149075082,
"autogenerated": false,
"ratio": 3.575875486381323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4790782994581323,
"avg_score": null,
"num_lines": null
} |
import array
# Most significant bit first (big-endian)
# x^16+x^12+x^5+1 = (1) 0001 0000 0010 0001 = 0x1021
def crc(data):
rem = 0
# A popular variant complements rem here
for byte in data:
rem ^= (byte << 8) # n = 16 in this example
for _ in range(0, 8): # Assuming 8 bits per byte
if rem & 0x8000: # if leftmost (most significant) bit is set
rem = (rem << 1)
rem ^= 0x1021
else:
rem = rem << 1
rem = rem & 0xffff # Trim remainder to 16 bits
return "done", bin(rem)
print crc(array.array("B", "Wikipedia"))
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/test/commons.py",
"copies": "1",
"size": "1764",
"license": "mit",
"hash": 7757229784319395000,
"line_mean": 40.023255814,
"line_max": 77,
"alpha_frac": 0.6859410431,
"autogenerated": false,
"ratio": 3.92,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51059410431,
"avg_score": null,
"num_lines": null
} |
import datetime
import logging
import struct
_logger = logging.getLogger("garmin.ant.fs.file")
class Directory:
def __init__(self, version, time_format, current_system_time,
last_modified, files):
self._version = version
self._time_format = time_format
self._current_system_time = current_system_time
self._last_modified = last_modified
self._files = files
def get_version(self):
return self._version
def get_files(self):
return self._files
def print_list(self):
print "Index\tType\tFIT Type\tFIT Number\tSize\tDate\tFIT Flags\tFlags"
for f in self.get_files():
print f.get_index(), "\t", f.get_type(), "\t",\
f.get_fit_sub_type(), "\t", f.get_fit_file_number(), "\t",\
f.get_size(), "\t", f.get_date(), "\t", f._typ_flags, "\t",\
f.get_flags_string()
@staticmethod
def parse(data):
_logger.debug("Parse '%s' as directory", data)
# Header
version, structure_length, time_format, current_system_time, \
last_modified = struct.unpack("<BBB5xII", data[:16])
version_major = (version & 0xf0) >> 4
version_minor = (version & 0x0f)
files = []
for offset in range(16 , len(data), 16):
item_data = data[offset:offset + 16]
_logger.debug(" - (%d - %d) %d, %s", offset, offset + 16, len(item_data), item_data)
files.append(File.parse(item_data))
return Directory((version_major, version_minor), time_format,
current_system_time, last_modified, files)
class File:
class Type:
FIT = 0x80
class Identifier:
DEVICE = 1
SETTING = 2
SPORT_SETTING = 3
ACTIVITY = 4
WORKOUT = 5
COURSE = 6
WEIGHT = 9
TOTALS = 10
GOALS = 11
BLOOD_PRESSURE = 14
ACTIVITY_SUMMARY = 20
def __init__(self, index, typ, ident, typ_flags, flags, size, date):
self._index = index
self._type = typ
self._ident = ident
self._typ_flags = typ_flags
self._flags = flags
self._size = size
self._date = date
def get_index(self):
return self._index
def get_type(self):
return self._type
def get_identifier(self):
return self._ident
def get_fit_sub_type(self):
return self._ident[0]
def get_fit_file_number(self):
return struct.unpack("<xH", self._ident)[0]
def get_size(self):
return self._size
def get_date(self):
return self._date
def get_flags_string(self):
s = "r" if self._flags & 0b00001000 == 0 else "-"
s += "w" if self._flags & 0b00010000 == 0 else "-"
s += "e" if self._flags & 0b00100000 == 0 else "-"
s += "a" if self._flags & 0b01000000 == 0 else "-"
s += "A" if self._flags & 0b10000000 == 0 else "-"
return s
@staticmethod
def parse(data):
_logger.debug("Parse '%s' (%d) as file %s", data, len(data), type(data))
# i1, i2, i3 -> three byte integer, not supported by struct
(index, data_type, data_flags, flags, file_size, file_date) \
= struct.unpack("<HB3xBBII", data)
file_date = datetime.datetime.fromtimestamp(file_date + 631065600)
identifier = data[3:6]
return File(index, data_type, identifier, data_flags, flags, file_size, file_date)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/file.py",
"copies": "1",
"size": "4725",
"license": "mit",
"hash": 8841981054005178000,
"line_mean": 32.75,
"line_max": 96,
"alpha_frac": 0.5961904762,
"autogenerated": false,
"ratio": 3.729281767955801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9760968489035065,
"avg_score": 0.012900751024147393,
"num_lines": 140
} |
# http://reveng.sourceforge.net/crc-catalogue/16.htm#crc.cat.arc
def crc(data, seed=0x0000):
rem = seed
for byte in data:
rem ^= byte
for _ in range(0, 8):
if rem & 0x0001:
rem = rem >> 1
rem ^= 0xA001
else:
rem = rem >> 1
return rem
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/commons.py",
"copies": "1",
"size": "1460",
"license": "mit",
"hash": 405633578248670850,
"line_mean": 39.5555555556,
"line_max": 77,
"alpha_frac": 0.7034246575,
"autogenerated": false,
"ratio": 4.112676056338028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5316100713838028,
"avg_score": null,
"num_lines": null
} |
import array
from collections import OrderedDict
import copy
import logging
import struct
_logger = logging.getLogger("ant.fs.command")
class Command:
class Type:
# Commands
LINK = 0x02
DISCONNECT = 0x03
AUTHENTICATE = 0x04
PING = 0x05
DOWNLOAD_REQUEST = 0x09
UPLOAD_REQUEST = 0x0A
ERASE_REQUEST = 0x0B
UPLOAD_DATA = 0x0C
# Responses
AUTHENTICATE_RESPONSE = 0x84
DOWNLOAD_RESPONSE = 0x89
UPLOAD_RESPONSE = 0x8A
ERASE_RESPONSE = 0x8B
UPLOAD_DATA_RESPONSE = 0x8C
_format = "<BB"
_id = None
def __init__(self):
self._arguments = OrderedDict()
self._add_argument("x", 0x44)
self._add_argument("id", self._id)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def get_id(self):
return self._id
def get(self):
arguments = list(self._get_arguments())
data = struct.pack(self._format, *arguments)
lst = array.array("B", data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == 0x44
assert args[1] == cls._id
return cls(*args[2:])
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print("=" * max_length)
print(self.__class__.__name__)
print("-" * max_length)
for key, value in self._arguments.items():
print(str(key) + ":", " " * (max_length - len(key)), str(value))
print("=" * max_length)
class LinkCommand(Command):
_id = Command.Type.LINK
_format = Command._format + "BBI"
def __init__(self, channel_frequency, channel_period, host_serial_number):
Command.__init__(self)
self._add_argument("channel_frequency", channel_frequency)
self._add_argument("channel_period", channel_period)
self._add_argument("host_serial_number", host_serial_number)
class DisconnectCommand(Command):
class Type:
RETURN_LINK = 0
RETURN_BROADCAST = 1
_id = Command.Type.DISCONNECT
_format = Command._format + "BBBxxx"
def __init__(self, command_type, time_duration, application_specific_duration):
Command.__init__(self)
self._add_argument("command_type", command_type)
self._add_argument("time_duration", time_duration)
self._add_argument(
"application_specific_duration", application_specific_duration
)
class AuthenticateBase(Command):
_format = None
def __init__(self, x_type, serial_number, data=[]):
Command.__init__(self)
self._add_argument("type", x_type)
self._add_argument("serial_number", serial_number)
self._add_argument("data", data)
def _pad(self, data):
padded_data = copy.copy(data)
missing = 8 - len(padded_data) % 8
if missing < 8:
padded_data.extend([0x00] * missing)
return padded_data
def get_serial(self):
return self._get_argument("serial_number")
def get_data_string(self):
if not self._get_argument("data"):
return None
else:
return "".join(map(chr, self._get_argument("data")))
def get_data_array(self):
return self._get_argument("data")
def get(self):
arguments = list(self._get_arguments())
data = list(self._get_argument("data"))
lst = array.array(
"B",
struct.pack(
"<BBBBI",
arguments[0],
arguments[1],
arguments[2],
len(data),
arguments[3],
),
)
padded = self._pad(data)
lst.extend(array.array("B", padded))
return lst
@classmethod
def _parse_args(cls, data):
header = struct.unpack("<BBBxI", data[0:8])
data_length = data[3]
return header + (data[8 : 8 + data_length],)
class AuthenticateCommand(AuthenticateBase):
class Request:
PASS_THROUGH = 0
SERIAL = 1
PAIRING = 2
PASSKEY_EXCHANGE = 3
_id = Command.Type.AUTHENTICATE
def __init__(self, command_type, host_serial_number, data=[]):
AuthenticateBase.__init__(self, command_type, host_serial_number, data)
class AuthenticateResponse(AuthenticateBase):
class Response:
NOT_AVAILABLE = 0
ACCEPT = 1
REJECT = 2
_id = Command.Type.AUTHENTICATE_RESPONSE
def __init__(self, response_type, client_serial_number, data=[]):
AuthenticateBase.__init__(self, response_type, client_serial_number, data)
class PingCommand(Command):
_id = Command.Type.PING
class DownloadRequest(Command):
_id = Command.Type.DOWNLOAD_REQUEST
_format = Command._format + "HIx?HI"
def __init__(
self, data_index, data_offset, initial_request, crc_seed, maximum_block_size=0
):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("data_offset", data_offset)
self._add_argument("initial_request", initial_request)
self._add_argument("crc_seed", crc_seed)
self._add_argument("maximum_block_size", maximum_block_size)
class DownloadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_READABLE = 2
NOT_READY = 3
INVALID_REQUEST = 4
INCORRECT_CRC = 5
_id = Command.Type.DOWNLOAD_RESPONSE
_format = None
def __init__(self, response, remaining, offset, size, data, crc):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("remaining", remaining)
self._add_argument("offset", offset)
self._add_argument("size", size)
self._add_argument("data", data)
self._add_argument("crc", crc)
@classmethod
def _parse_args(cls, data):
if data[2] == DownloadResponse.Response.OK:
return (
struct.unpack("<BBBxIII", data[0:16])
+ (data[16:-8],)
+ struct.unpack("<6xH", data[-8:])
)
else:
return (
struct.unpack("<BBBxIII", data[0:16]) + (array.array("B", []),) + (0,)
)
class UploadRequest(Command):
_id = Command.Type.UPLOAD_REQUEST
_format = Command._format + "HI4xI"
def __init__(self, data_index, max_size, data_offset):
Command.__init__(self)
self._add_argument("data_index", data_index)
self._add_argument("max_size", max_size)
self._add_argument("data_offset", data_offset)
class UploadResponse(Command):
class Response:
OK = 0
NOT_EXIST = 1
NOT_WRITEABLE = 2
NOT_ENOUGH_SPACE = 3
INVALID_REQUEST = 4
NOT_READY = 5
_id = Command.Type.UPLOAD_RESPONSE
_format = Command._format + "BxIII6xH"
def __init__(
self, response, last_data_offset, maximum_file_size, maximum_block_size, crc
):
Command.__init__(self)
self._add_argument("response", response)
self._add_argument("last_data_offset", last_data_offset)
self._add_argument("maximum_file_size", maximum_file_size)
self._add_argument("maximum_block_size", maximum_block_size)
self._add_argument("crc", crc)
class UploadDataCommand(Command):
_id = Command.Type.UPLOAD_DATA
_format = None
def __init__(self, crc_seed, data_offset, data, crc):
Command.__init__(self)
self._add_argument("crc_seed", crc_seed)
self._add_argument("data_offset", data_offset)
self._add_argument("data", data)
self._add_argument("crc", crc)
def get(self):
arguments = list(self._get_arguments())
header = struct.pack("<BBHI", *arguments[:4])
footer = struct.pack("<6xH", self._get_argument("crc"))
data = array.array("B", header)
data.extend(self._get_argument("data"))
data.extend(array.array("B", footer))
return data
@classmethod
def _parse_args(cls, data):
return (
struct.unpack("<BBHI", data[0:8])
+ (data[8:-8],)
+ struct.unpack("<6xH", data[-8:])
)
class UploadDataResponse(Command):
class Response:
OK = 0
FAILED = 1
_id = Command.Type.UPLOAD_DATA_RESPONSE
_format = Command._format + "B5x"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
class EraseRequestCommand(Command):
_id = Command.Type.ERASE_REQUEST
_format = Command._format + "I2x"
def __init__(self, data_file_index):
Command.__init__(self)
self._add_argument("data_file_index", data_file_index)
class EraseResponse(Command):
class Response:
ERASE_SUCCESSFUL = 0
ERASE_FAILED = 1
NOT_READY = 2
_id = Command.Type.ERASE_RESPONSE
_format = Command._format + "B5x"
def __init__(self, response):
Command.__init__(self)
self._add_argument("response", response)
_classes = {
# Commands
Command.Type.LINK: LinkCommand,
Command.Type.DISCONNECT: DisconnectCommand,
Command.Type.AUTHENTICATE: AuthenticateCommand,
Command.Type.PING: PingCommand,
Command.Type.DOWNLOAD_REQUEST: DownloadRequest,
Command.Type.UPLOAD_REQUEST: UploadRequest,
Command.Type.ERASE_REQUEST: EraseRequestCommand,
Command.Type.UPLOAD_DATA: UploadDataCommand,
# Responses
Command.Type.AUTHENTICATE_RESPONSE: AuthenticateResponse,
Command.Type.DOWNLOAD_RESPONSE: DownloadResponse,
Command.Type.UPLOAD_RESPONSE: UploadResponse,
Command.Type.ERASE_RESPONSE: EraseResponse,
Command.Type.UPLOAD_DATA_RESPONSE: UploadDataResponse,
}
def parse(data):
_logger.debug("parsing data %r", data)
mark, command_type = struct.unpack("<BB", data[0:2])
assert mark == 0x44
command_class = _classes[command_type]
return command_class._parse(data)
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/command.py",
"copies": "1",
"size": "11746",
"license": "mit",
"hash": -89785591063087120,
"line_mean": 29.1179487179,
"line_max": 86,
"alpha_frac": 0.6082070492,
"autogenerated": false,
"ratio": 3.756315957787016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48645230069870166,
"avg_score": null,
"num_lines": null
} |
import array
from collections import OrderedDict
import logging
import struct
from ant.fs.command import Command
_logger = logging.getLogger("ant.fs.commandpipe")
class CommandPipe(object):
class Type:
REQUEST = 0x01
RESPONSE = 0x02
TIME = 0x03
CREATE_FILE = 0x04
DIRECTORY_FILTER = 0x05
SET_AUTHENTICATION_PASSKEY = 0x06
SET_CLIENT_FRIENDLY_NAME = 0x07
FACTORY_RESET_COMMAND = 0x08
_format = "<BxxB"
_id = None
_sequence = 0
def __init__(self):
self._arguments = OrderedDict()
self._add_argument("command", self._id)
CommandPipe._sequence += 1
self._add_argument("sequence", CommandPipe._sequence)
def _add_argument(self, name, value):
self._arguments[name] = value
def _get_argument(self, name):
return self._arguments[name]
def _get_arguments(self):
return self._arguments.values()
def __getattr__(self, attr):
# Get arguments with get_*
if attr.startswith("get_"):
name = attr[4:]
if name in self._arguments:
return lambda: self._arguments[name]
raise AttributeError("No such attribute")
def get(self):
arguments = list(self._get_arguments())
data = struct.pack(self._format, *arguments)
lst = array.array("B", data)
_logger.debug("packing %r in %r,%s", data, lst, type(lst))
return lst
@classmethod
def _parse_args(cls, data):
return struct.unpack(cls._format, data)
@classmethod
def _parse(cls, data):
args = cls._parse_args(data)
assert args[0] == cls._id
instance = cls(*args[2:])
instance._arguments["sequence"] = args[1]
return instance
def _debug(self):
max_key_length, max_value_length = 0, 0
for key, value in self._arguments.items():
max_key_length = max(len(str(key)), max_key_length)
max_value_length = max(len(str(value)), max_value_length)
max_length = max_key_length + max_value_length + 3
print("=" * max_length)
print(self.__class__.__name__)
print("-" * max_length)
for key, value in self._arguments.items():
print(str(key) + ":", " " * (max_length - len(key)), str(value))
print("=" * max_length)
class Request(CommandPipe):
_id = CommandPipe.Type.REQUEST
_format = CommandPipe._format + "Bxxx"
def __init__(self, request_id):
CommandPipe.__init__(self)
self._add_argument("request_id", request_id)
class Response(CommandPipe):
class Response:
OK = 0
FAILED = 1
REJECTED = 2
NOT_SUPPORTED = 3
_id = CommandPipe.Type.RESPONSE
_format = CommandPipe._format + "BxBx"
def __init__(self, request_id, response):
CommandPipe.__init__(self)
self._add_argument("request_id", request_id)
self._add_argument("response", response)
class Time(CommandPipe):
class Format:
DIRECTORY = 0
SYSTEM = 1
COUNTER = 2
_id = CommandPipe.Type.TIME
_format = CommandPipe._format + "IIBxxx"
def __init__(self, current_time, system_time, time_format):
CommandPipe.__init__(self)
self._add_argument("current_time", current_time)
self._add_argument("system_time", system_time)
self._add_argument("time_format", time_format)
class TimeResponse(Response):
_format = Response._format + "xxxxxxxx"
def __init__(self, request_id, response):
Response.__init__(self, request_id, response)
class CreateFile(Request):
_id = CommandPipe.Type.CREATE_FILE
_format = None
def __init__(self, size, data_type, identifier, identifier_mask):
CommandPipe.__init__(self)
self._add_argument("size", size)
self._add_argument("data_type", data_type)
self._add_argument("identifier", identifier)
self._add_argument("identifier_mask", identifier_mask)
def get(self):
arguments = list(self._get_arguments())
data = array.array("B", struct.pack(CommandPipe._format + "IB", *arguments[:4]))
data.extend(self._get_argument("identifier"))
data.extend([0])
data.extend(self._get_argument("identifier_mask"))
return data
@classmethod
def _parse_args(cls, data):
return (
struct.unpack(Command._format + "IB", data[0:9])
+ (data[9:12],)
+ (data[13:16],)
)
class CreateFileResponse(Response):
_format = Response._format + "BBBBHxx"
def __init__(self, request_id, response, data_type, identifier, index):
Response.__init__(self, request_id, response)
self._add_argument("data_type", data_type)
self._add_argument("identifier", identifier)
self._add_argument("index", index)
@classmethod
def _parse_args(cls, data):
return Response._parse_args(data[:8]) + (
data[8],
data[9:12],
struct.unpack("<H", data[12:14])[0],
)
_classes = {
CommandPipe.Type.REQUEST: Request,
CommandPipe.Type.RESPONSE: Response,
CommandPipe.Type.TIME: Time,
CommandPipe.Type.CREATE_FILE: CreateFile,
CommandPipe.Type.DIRECTORY_FILTER: None,
CommandPipe.Type.SET_AUTHENTICATION_PASSKEY: None,
CommandPipe.Type.SET_CLIENT_FRIENDLY_NAME: None,
CommandPipe.Type.FACTORY_RESET_COMMAND: None,
}
_responses = {
CommandPipe.Type.TIME: TimeResponse,
CommandPipe.Type.CREATE_FILE: CreateFileResponse,
}
def parse(data):
commandpipe_type = _classes[data[0]]
if commandpipe_type == Response:
if data[4] in _responses and len(data) > 8:
commandpipe_type = _responses[data[4]]
return commandpipe_type._parse(data)
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/commandpipe.py",
"copies": "1",
"size": "6956",
"license": "mit",
"hash": 3118821827656961500,
"line_mean": 30.3333333333,
"line_max": 88,
"alpha_frac": 0.6293847039,
"autogenerated": false,
"ratio": 3.7866086009798585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9915740242829043,
"avg_score": 0.00005061241016297196,
"num_lines": 222
} |
import array
import unittest
import datetime
from ant.fs.commandpipe import (
parse,
CreateFile,
Request,
CommandPipe,
Time,
TimeResponse,
)
class CreateFileTest(unittest.TestCase):
def runTest(self):
# Test create file
data = b"\x01\x02\x03\x04\x05\x06\x07\x08\x09"
request = CreateFile(len(data), 0x80, [0x04, 0x00, 0x00], [0x00, 0xFF, 0xFF])
# Test create file response
response_data = array.array(
"B", b"\x02\x00\x00\x00\x04\x00\x00\x00\x80\x04\x7b\x00\x67\x00\x00\x00"
)
response = parse(response_data)
self.assertEqual(response.get_request_id(), 0x04)
self.assertEqual(response.get_response(), 0x00)
self.assertEqual(response.get_data_type(), 0x80) # FIT
self.assertEqual(response.get_identifier(), array.array("B", b"\x04\x7b\x00"))
self.assertEqual(response.get_index(), 103)
class TimeTest(unittest.TestCase):
def runTest(self):
# Test time request
request = Request(CommandPipe.Type.TIME)
self.assertEqual(
request.get(),
array.array(
"B",
b"\x01\x00\x00"
+ CommandPipe._sequence.to_bytes(1, byteorder="big")
+ b"\x03\x00\x00\x00",
),
)
# Test time parse
response_data = array.array(
"B", b"\x03\x00\x00\x0F\x78\xB5\xCA\x25\xC8\xA0\xF4\x29\x01\x00\x00\x00"
)
response = parse(response_data)
self.assertIsInstance(response, Time)
self.assertEqual(response.get_command(), 0x03)
self.assertEqual(response.get_sequence(), 0x0F)
current_time = (
datetime.datetime(2010, 2, 2, 10, 42, 0)
- datetime.datetime(1989, 12, 31, 0, 0, 0)
).total_seconds()
self.assertEqual(response.get_current_time(), current_time)
system_time = (
datetime.datetime(2012, 4, 20, 23, 10, 0)
- datetime.datetime(1989, 12, 31, 0, 0, 0)
).total_seconds()
self.assertEqual(response.get_system_time(), system_time)
self.assertEqual(response.get_time_format(), 1)
# Test time create
current_time = (
datetime.datetime(2015, 1, 4, 21, 23, 30)
- datetime.datetime(1989, 12, 31, 0, 0, 0)
).total_seconds()
system_time = (
datetime.datetime(2012, 4, 20, 23, 10, 0)
- datetime.datetime(1989, 12, 31, 0, 0, 0)
).total_seconds()
time = Time(int(current_time), int(system_time), Time.Format.COUNTER)
self.assertEqual(
time.get(),
array.array(
"B",
b"\x03\x00\x00"
+ CommandPipe._sequence.to_bytes(1, byteorder="big")
+ b"\x52\x63\x0c\x2f\xc8\xa0\xf4\x29\x02\x00\x00\x00",
),
)
# Test time request response
response_data = array.array(
"B", b"\x02\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
response = parse(response_data)
self.assertIsInstance(response, TimeResponse)
| {
"repo_name": "Tigge/openant",
"path": "ant/tests/fs/test_commandpipe.py",
"copies": "1",
"size": "4299",
"license": "mit",
"hash": -1573662784471485000,
"line_mean": 36.0603448276,
"line_max": 86,
"alpha_frac": 0.6166550361,
"autogenerated": false,
"ratio": 3.4474739374498795,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9561611030554303,
"avg_score": 0.0005035885991152261,
"num_lines": 116
} |
import datetime
import logging
import struct
import sys
_logger = logging.getLogger("ant.fs.file")
class Directory:
def __init__(self, version, time_format, current_system_time, last_modified, files):
self._version = version
self._time_format = time_format
self._current_system_time = current_system_time
self._last_modified = last_modified
self._files = files
def get_version(self):
return self._version
def get_time_format(self):
return self._time_format
def get_current_system_time(self):
return self._current_system_time
def get_last_modified(self):
return self._last_modified
def get_files(self):
return self._files
def print_list(self):
print("Index\tType\tFIT Type\tFIT Number\tSize\tDate\tFIT Flags\tFlags")
for f in self.get_files():
print(
f.get_index(),
"\t",
f.get_type(),
"\t",
f.get_fit_sub_type(),
"\t",
f.get_fit_file_number(),
"\t",
f.get_size(),
"\t",
f.get_date(),
"\t",
f._typ_flags,
"\t",
f.get_flags_string(),
)
@staticmethod
def parse(data):
_logger.debug("Parse '%s' as directory", data)
# Header
(
version,
structure_length,
time_format,
current_system_time,
last_modified,
) = struct.unpack("<BBB5xII", data[:16])
version_major = (version & 0xF0) >> 4
version_minor = version & 0x0F
files = []
for offset in range(16, len(data), 16):
item_data = data[offset : offset + 16]
_logger.debug(
" - (%d - %d) %d, %s", offset, offset + 16, len(item_data), item_data
)
files.append(File.parse(item_data))
return Directory(
(version_major, version_minor),
time_format,
current_system_time,
last_modified,
files,
)
class File:
class Type:
FIT = 0x80
class Identifier:
DEVICE = 1
SETTING = 2
SPORT = 3
ACTIVITY = 4
WORKOUT = 5
COURSE = 6
SCHEDULES = 7
WAYPOINTS = 8
WEIGHT = 9
TOTALS = 10
GOALS = 11
BLOOD_PRESSURE = 14
MONITORING_A = 15
ACTIVITY_SUMMARY = 20
MONITORING_DAILY = 28
MONITORING_B = 32
SEGMENT = 34
SEGMENT_LIST = 35
def __init__(self, index, typ, ident, typ_flags, flags, size, date):
self._index = index
self._type = typ
self._ident = ident
self._typ_flags = typ_flags
self._flags = flags
self._size = size
self._date = date
def get_index(self):
return self._index
def get_type(self):
return self._type
def get_identifier(self):
return self._ident
def get_fit_sub_type(self):
return self._ident[0]
def get_fit_file_number(self):
return struct.unpack("<xH", self._ident)[0]
def get_size(self):
return self._size
def get_date(self):
return self._date
def is_readable(self):
return self._flags & 0b10000000
def is_writable(self):
return self._flags & 0b01000000
def is_erasable(self):
return self._flags & 0b00100000
def is_archived(self):
return self._flags & 0b00010000
def is_append_only(self):
return self._flags & 0b00001000
def is_encrypted(self):
return self._flags & 0b00000100
def get_flags_string(self):
s = "r" if self.is_readable() else "-"
s += "w" if self.is_writable() else "-"
s += "e" if self.is_erasable() else "-"
s += "A" if self.is_archived() else "-"
s += "a" if self.is_append_only() else "-"
s += "c" if self.is_encrypted() else "-"
return s
@staticmethod
def parse(data):
_logger.debug("Parse '%s' (%d) as file %s", data, len(data), type(data))
# i1, i2, i3 -> three byte integer, not supported by struct
index, data_type, data_flags, flags, file_size, file_date = struct.unpack(
"<HB3xBBII", data
)
file_date = datetime.datetime.fromtimestamp(
file_date + 631065600, datetime.timezone.utc
)
identifier = data[3:6]
return File(
index, data_type, identifier, data_flags, flags, file_size, file_date
)
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/file.py",
"copies": "1",
"size": "5824",
"license": "mit",
"hash": -8530480695355583000,
"line_mean": 27.4097560976,
"line_max": 88,
"alpha_frac": 0.5674793956,
"autogenerated": false,
"ratio": 3.8518518518518516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4919331247451852,
"avg_score": null,
"num_lines": null
} |
"""
Ant-style ``**/*`` globbing functionality, as used by `xpybuild.pathsets.FindPaths`.
"""
from xpybuild.utils.buildexceptions import BuildException
import re, logging, sys
from xpybuild.utils.flatten import flatten
_log = logging.getLogger('antglob')
#def dbg(s, *args): # for manual use with doc tests
# sys.stderr.write((s%args)+'\n')
# sys.stderr.flush()
class GlobPatternSet(object):
"""
Holds a set of one or more ant-style glob patterns, which can be used to
filter a set of paths based on whether each path matches any of the
specified patterns.
Typically there will be one GlobPatternSet for includes and another for
excludes.
This object is immutable and thread-safe.
Glob patterns may contain '*' (indicating zero or more non-slash
characters) or '**' (indicating zero or more characters including slashes).
The '?' character is not supported.
"""
__patternCache = {} # static cache, since there will be lots of these; mostly to save memory, but also a bit of CPU
__patternCache_get = __patternCache.get
# define these so we can use fast 'is' comparisons
STAR = '*'
STARSTAR = '**'
@staticmethod
def create(patterns):
"""
Create a new GlobPatternSet for matching any of the specified patterns.
Patterns ending with a '/' can only match directories, and patterns without
can only match files.
@param patterns: a string, or a list of one or more pattern strings.
Glob patterns may contain '*' (indicating zero or more non-slash
characters) or '**' (indicating zero or more characters including slashes).
Backslashes are not permitted. Empty pattern strings are ignored.
"""
if isinstance(patterns, list): patterns = tuple(patterns) # make it hashable
if patterns in GlobPatternSet.__patternCache:
return GlobPatternSet.__patternCache[patterns]
p = GlobPatternSet(patterns)
GlobPatternSet.__patternCache[patterns] = p
return p
def __init__(self, patterns):
""" Do not call the ``GlobPatternSet()`` constructor - use L{create} instead of constructing directly.
"""
patterns = flatten(patterns)
# values are [ELEMENTS, ORIGINAL_PATTERNS_INDEX]
self.filepatterns = []
self.dirpatterns = [] # with no trailing slash
self.origpatterns = [] # original file+dir patterns
self.hasStarStarPrefixPattern = False # set to true if there are any file or dir patterns starting **/
# some booleans to save calculations later
self.allfiles = False # ** i.e. match all files regardless
self.alldirs = False
for p in patterns:
if not p: continue
if '?' in p:
# would require some more work (maybe regexes), but a rare case so don't bother
raise BuildException('Invalid pattern ("?" is not supported at present): %s'%p)
if '\\' in p:
raise BuildException('Invalid pattern (must use forward slashes not backslashes): %s'%p)
if p.endswith('**/*'): p = p[:-2] # normalize since this is pointless
if '**/*/' in p:
# we disallow this because it messes up our ability to decide how many path elements the ** should eat;
# breaks cases like >>> antGlobMatch('**/*/f/*.e', 'f/PPP/a/b/c/d/e/PPP/f/foo.e') (ought to be True)
# in theory we could solve that case by counting from the back, but the potential presence of
# **s later in the pattern would make that a very inefficient operation.
# There isn't really a compelling case for supporting this so disable it to avoid people
# shooting themselves in the foot
raise BuildException('Invalid pattern (**/* sequences are not permitted): %s'%p)
self.origpatterns.append(p)
if p.startswith('**'): self.hasStarStarPrefixPattern = True
if p[-1] == '/':
if p == '**/': self.alldirs = True
elements = [self.__canonicalizePatternElement(e) for e in p[:-1].split('/')]
self.dirpatterns.append([elements, len(self.origpatterns)-1])
else:
if p == '**': self.allfiles = True
elements = [self.__canonicalizePatternElement(e) for e in p.split('/')]
self.filepatterns.append([elements, len(self.origpatterns)-1])
for e in elements:
if '**' in e and e != '**':
raise BuildException('Invalid pattern (pattern elements containing "**" must not have any other characters): %s'%p)
if self.allfiles: assert len(self.filepatterns)==1, 'No point specifying additional file patterns after adding **'
if self.alldirs: assert len(self.dirpatterns)==1, 'No point specifying additional directory patterns after adding **/'
self.nofiles = self.filepatterns == []
self.nodirs = self.dirpatterns == []
@staticmethod
def __canonicalizePatternElement(element):
# allow faster comparisons with these common strings
if element == '*': return GlobPatternSet.STAR
if element == '**': return GlobPatternSet.STARSTAR
return element
def __str__(self):
"""
>>> str(GlobPatternSet.create(['**/a', '*/b/', '**/c']))
"['**/a', '*/b/', '**/c']"
"""
return str(self.origpatterns)
def __repr__(self):
"""
>>> repr(GlobPatternSet.create(['**/a', '*/b/', '**/c']))
"GlobPatternSet['**/a', '*/b/', '**/c']"
"""
return 'GlobPatternSet%s'%self.__str__()
def getPathMatches(self, rootdir, filenames=None, dirnames=None, unusedPatternsTracker=None):
"""
Check these patterns against one or more basenames (dir names and filenames)
within a single root directory and return the list which match at least
one pattern.
Using this method is a lot more efficient than checking each file
in a directory independently, especially when there are many files.
@param rootdir: The parent of the file/dir names to be matched.
Must use forward slashes not backslashes, and must end with a slash.
Can be empty but not None.
@param filenames: A list of base file names within the rootdir, to be
matched. There must be no empty names in the list, but an empty/None list
can be specified.
No slash characters may be present in the names.
@param dirnames: A list of base directory names within the rootdir, to be
matched. There must be no empty names in the list, but an empty/None list
can be specified.
No slash characters may be present in the names, except optionally
as a suffix.
@param unusedPatternsTracker: Optionally specify an instance of
`GlobUnusedPatternTracker` which will be notified of the patterns that
are used, allow an error to be produced for any that are not used.
@returns: If either filenames or dirnames is None then the result is a
single list of basenames. If both filenames and dirnames are lists
(even if empty) the result is a tuple (filenames, dirnames).
Directory entries will have a trailing slash only if they did in the input
dirnames list.
"""
# this is an algorithm similar to ant's globbing... but not totally identical (to keep things simple), it doesn't support '?' for example
# we also distinguish between file and dir patterns
# we avoid using regexes for efficiency reasons
operations = []
if unusedPatternsTracker is None:
unusedPatternsTrackerIndex = None
else: # speed up lookups since this is frequent
unusedPatternsTrackerIndex = unusedPatternsTracker._used
if filenames is not None:
fileresults = []
if filenames != []:
if self.allfiles: # special-case ** to make it fast
fileresults = filenames
if unusedPatternsTracker is not None: unusedPatternsTrackerIndex[self.filepatterns[0][1]] = True
elif not self.nofiles:
operations.append((self.filepatterns, filenames, False, fileresults))
results = fileresults
if dirnames is not None:
dirresults = []
if dirnames != []:
if self.alldirs: # special-case **/ to make it fast
dirresults = dirnames
if unusedPatternsTracker is not None: unusedPatternsTrackerIndex[self.dirpatterns[0][1]] = True
elif not self.nodirs:
operations.append((self.dirpatterns, dirnames, True, dirresults))
results = dirresults
if dirnames is not None and filenames is not None: results = (fileresults, dirresults)
if len(operations) == 0: return results
# this is _very_ performance critical code. we do less validation than
# normal to keep it fast - caller needs to pass in clean inputs
if rootdir is None:
rootdir = ''
else:
assert rootdir=='' or rootdir[-1]=='/', 'Root directory must end with a slash: %s'%rootdir
if len(rootdir)>0:
rootdir = rootdir[:-1].split('/')
else:
rootdir = []
rootdirlen = len(rootdir)
STAR = GlobPatternSet.STAR
STARSTAR = GlobPatternSet.STARSTAR
for (patternlist, basenames, isdir, thisresultlist) in operations:
#if not basenames: continue
assert '/' not in basenames[0], 'Slashes are not permitted in the base names passed to this function: %s'%basenames[0] # sanity check for correct usage
# start by finding out which patterns match against basenames in this directory
basenamepatterns = [] # patterns to match against basenames
for (patternelements, origpatternindex) in patternlist:
finalpattern = GlobPatternSet.__matchSinglePath(patternelements, rootdir, rootdirlen)
if finalpattern is None: continue
if finalpattern is STARSTAR: finalpattern = STAR #canonicalize further
basenamepatterns.append( (finalpattern, origpatternindex) )
if finalpattern is STAR: break # no point doing any others
if len(basenamepatterns) == 0: continue
if basenamepatterns[0][0] is STAR:
#special-case this common case
thisresultlist.extend(basenames)
if unusedPatternsTracker is not None: unusedPatternsTrackerIndex[basenamepatterns[0][1]] = True
else:
for basename in basenames:
origbasename = basename
if isdir and (basename[-1] == '/'): basename = basename[:-1] # strip off optional dir suffix
for (basenamepattern, origpatternindex) in basenamepatterns:
if GlobPatternSet.__elementMatch(basenamepattern, basename):
thisresultlist.append(basename)
if unusedPatternsTracker is not None: unusedPatternsTrackerIndex[origpatternindex] = True
break
return results
@staticmethod
def __elementMatch(elementPattern, element):
# NB: do this case sensitively, because that's what unix will do anyway
if elementPattern is GlobPatternSet.STAR or elementPattern is GlobPatternSet.STARSTAR:
return True
# simple cases, efficient implementation
star1 = elementPattern.find('*')
if star1 == -1: return elementPattern==element
star2 = elementPattern.find('*', star1+1)
if star2 == -1: # copes with: a* *a and a*b
return element.startswith(elementPattern[:star1]) and element.endswith(elementPattern[star1+1:])
# more complex cases will have to be less efficient
elementPattern = elementPattern.split('*')
return re.match('.*'.join(map(re.escape, elementPattern)), element)
@staticmethod
def __matchSinglePath(pattern, rootdir, lenrootdir):
# matches all elements of pattern against rootdir/basename where basename
# could be anything. Returns None if no match is possible, or the pattern element
# string that basenames must match or .STAR/STARSTAR if any.
#dbg('matching: %s, %s, %s', pattern, rootdir, lenrootdir)
lenpattern = len(pattern)
STARSTAR = GlobPatternSet.STARSTAR
# nb: pattern is a list of pattern elements; rootdir is rstripped and split by /
e = y = 0
while e < lenpattern-1 and y < lenrootdir:
patternelement = pattern[e]
#dbg('loop patternelement: %s, %s, %s, %s', patternelement, e, rootdir[y], y)
if patternelement is STARSTAR:
if e == lenpattern-2: # this is just an optimization for a common case **/pattern
# all that remains is the pattern to match against the basename
return pattern[lenpattern-1]
else:
# consume as much as we need to of ** until we get another match
# be completely greedy; in theory we could try to ignore matches that are too early, i.e. don't leave
# enough remaining elements to possibly match (e.g. antGlobMatch('**/PPP/f/*.e', 'f/PPP/a/b/c/d/e/PPP/f/foo.e') )
# but in practice that gets too hard to handle in case there are further **s later in the pattern.
# don't consume the final pattern element since we'll need that to match the basename
e += 1
patternelement = pattern[e]
#dbg('INNER loop patternelement: %s, %s, %s, %s', patternelement, e, rootdir[y], y)
while y < lenrootdir and not GlobPatternSet.__elementMatch(patternelement, rootdir[y]):
y += 1
#dbg('INNER loop done: %s, %s, %s', patternelement, e, y)
else:
#dbg('normal patternelement: %s, %s, %s, %s', patternelement, e, rootdir[y], y)
if not GlobPatternSet.__elementMatch(patternelement, rootdir[y]):
return None
e += 1
y += 1
if y == lenrootdir:
# we've used up all the rootdir
while e < lenpattern-1 and pattern[e] is STARSTAR:
e += 1
if e == lenpattern-1:
return pattern[-1]
if e == lenpattern-2:
if pattern[-1] is STARSTAR:
return pattern[-2]
return None
# we have some rootdir left over, only ok if we have a final **
if e == lenpattern-1 and pattern[-1] is STARSTAR:
return STARSTAR
return None
@staticmethod
def __dirCouldMatchIncludePattern(includePattern, isdir, d):
# nb: we already checked it doesn't start with ** before calling this function
d = d.split('/')
if isdir:
p = includePattern
else:
p = includePattern[:-1] # strip off trailing filename
if GlobPatternSet.STARSTAR not in includePattern and len(d) > len(p):
# don't go into a dir structure that's more deeply nested than the pattern
#log.debug(' maybe vetoing %s based on counts : %s', d, p)
return False
i = 0
while i < len(d) and i < len(p) and p[i]:
if GlobPatternSet.STAR in p[i]: return True # any kind of wildcard and we give up trying to match
if d[i] != p[i]:
#log.debug(' maybe vetoing %s due to not matching %s', d, includePattern)
return False
i += 1
return True
def removeUnmatchableDirectories(self, rootdir, dirnames):
"""
Modifies the specifies list of dirnames, removing any which cannot possibly
match this include pattern. This is a useful optimization for os.walk.
As this function is intended as a quick optimization it may leave in some
dirs that could not match, but will definitely not remove any that could.
@param rootdir: The parent of the file/dir names to be matched.
Must use forward slashes not backslashes, and must end with a slash.
Can be empty but not None.
@param dirnames: a list of directory basenames contained in rootdir
@returns: the same dirnames instance passed in (with any modifications made).
>>> GlobPatternSet.create(['**']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
['dir1', 'dir2']
>>> GlobPatternSet.create(['**/']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
['dir1', 'dir2']
>>> GlobPatternSet.create(['**/foo']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
['dir1', 'dir2']
>>> GlobPatternSet.create(['a/b/c/d/**']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
[]
>>> GlobPatternSet.create(['abc/def/dir1/d/**']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
['dir1']
>>> GlobPatternSet.create(['abc/def/dir1/d/**/']).removeUnmatchableDirectories('abc/def/', ['dir1', 'dir2'])
['dir1']
"""
# quick optimizations where we do nothing - everything could match a global wildcard
if self.allfiles or self.alldirs or self.hasStarStarPrefixPattern: return dirnames
assert rootdir=='' or rootdir[-1]=='/', 'Root directory must end with a slash: %s'%rootdir
dirnames[:] = [d for d in dirnames if
any(self.__dirCouldMatchIncludePattern(e, False, rootdir+d) for e, _ in self.filepatterns) or
any(self.__dirCouldMatchIncludePattern(e, True, rootdir+d) for e, _ in self.dirpatterns)
]
return dirnames
def matches(self, path):
"""
Returns True if the specified path matches any of the patterns in this set
or False if not.
Note that L{getPathMatches} is considerably more efficient than this method
when there are several paths to be matched in the same directory.
@param path: A path string. Must not be empty, must not contains
backslashes, and must end with a slash if it is a directory.
"""
if path[-1]=='/':
path = path[:-1].split('/')
return self.getPathMatches('/'.join(path[:-1])+'/' if len(path)>1 else None, dirnames=[path[-1]]) != []
else:
path = path.split('/')
return self.getPathMatches('/'.join(path[:-1])+'/' if len(path)>1 else None, filenames=[path[-1]]) != []
class GlobUnusedPatternTracker(object):
"""
Class for recording which patterns have successfully been used in a match,
in order to provide error messages or warnings listing unused patterns.
This object should not be shared between threads.
"""
def __init__(self, patternSet):
self._patterns = patternSet.origpatterns
self._used = [False for i in range(len(self._patterns))]
def getUnusedPatterns(self):
"""
Returns a list of the patterns that have not been used.
"""
unused = []
for i in range(len(self._patterns)):
if not self._used[i]: unused.append(self._patterns[i])
return unused
def antGlobMatch(pattern, path):
"""
Matches a path against an ant-style glob pattern which may contain
``*`` or ``**``.
If the path is a directory, it must end with a slash.
Patterns ending with a ``/`` can only match directories, and patterns without
can only match files.
>>> antGlobMatch('', '')
True
>>> antGlobMatch('*', 'a')
True
>>> antGlobMatch('*.b', 'a')
False
>>> antGlobMatch('*.b', 'a.b')
True
>>> antGlobMatch('b.*', 'b')
False
>>> antGlobMatch('b.*', 'b.a')
True
>>> antGlobMatch('a*b', 'ab')
True
>>> antGlobMatch('a*b', '')
False
>>> antGlobMatch('a*b', 'axxxb')
True
>>> antGlobMatch('a*b', 'axxx')
False
>>> antGlobMatch('a*b', 'xxxb')
False
>>> antGlobMatch('a/b.*/c', 'a/b.x/c')
True
>>> antGlobMatch('a/b.*/c', 'a/b/c')
False
>>> antGlobMatch('**', 'a')
True
>>> antGlobMatch('**', 'a/b/c')
True
>>> antGlobMatch('**/c', 'a/b/c')
True
>>> antGlobMatch('**/*c', 'c')
True
>>> antGlobMatch('**/b/c', 'a/b/c')
True
>>> antGlobMatch('**/d', 'a/b/c')
False
>>> antGlobMatch('a/**/b', 'a/b/c')
False
>>> antGlobMatch('a/b/**', 'a/b/c/d')
True
>>> antGlobMatch('a/b/**/*', 'a/b/c/d')
True
>>> antGlobMatch('a/**/b', 'a/b')
True
>>> antGlobMatch('a/b/**', 'a/b')
True
>>> antGlobMatch('a/b', 'a/b/c/d')
False
>>> antGlobMatch('a/**/d/e', 'a/b/c/d/e')
True
>>> antGlobMatch('*x/**/', 'x/a/b/')
True
>>> antGlobMatch('*x/**', 'x/a/b')
True
>>> antGlobMatch('*x/**/', 'x/a/b')
False
>>> antGlobMatch('*x/**', 'x/a/b/')
False
>>> antGlobMatch('*[[*', '[[') and antGlobMatch('*[[*', 'xx[[') and antGlobMatch('*ab*', 'abxx')
True
>>> antGlobMatch('*[]*', '[')
False
>>> antGlobMatch('aa*.b*c*/', 'aa.bc/') and antGlobMatch('aa*.b*c*/', 'aaxxx.bxcxx/')
True
>>> antGlobMatch('aa*b*c*', 'xaabc')
False
>>> antGlobMatch('aa*.*c*', 'aaYc')
False
>>> antGlobMatch('**/*.x', 'a/b.x/c.x')
True
>>> antGlobMatch('**/*.x', 'a/b.x/c.x/d.x')
True
>>> antGlobMatch('**/*.x', 'a/c.x/y/d.x')
True
>>> antGlobMatch('**/*.x', 'a/y/c.x/d.x')
True
>>> antGlobMatch('**/**/*.x', 'a/y/c.x/d.x')
True
>>> antGlobMatch('**/*.x/', 'a/y/c.x/d.x/')
True
>>> antGlobMatch('a/b/c/d/e/**', 'a/b/c')
False
>>> antGlobMatch('**/PPP/**/*.e', 'a/b/c/d/e/f/PPP/g/h/i/foo.e')
True
>>> antGlobMatch('**/PPP/**/*.e', 'PPP/g/h/i/foo.e')
True
>>> antGlobMatch('**/PPP/**/*.e', 'a/b/c/d/e/f/PPP/foo.e')
True
>>> antGlobMatch('**/PPP/**/*.e', 'a/b/PPP/g/h/i/j/k/l/m/n/o/foo.e')
True
>>> antGlobMatch('**/PPP/**/*.e', 'a/b/c/d/e/f/g/h/i/foo.e')
False
>>> antGlobMatch('**/PPP/**/**/**/PPP/**/**/*.e', 'f/PPP/x/PPP/foo.e')
True
>>> antGlobMatch('**/PPP/f/*.e', 'f/PPP/a/b/c/d/e/PPP/f/foo.e') # this one is debatable (could decide not to match first PPP to allow second to match, but in general hard to reliably detect that condition given any later **s would invalidate any attempt to do it by counting); this behaviour is simpler to describe and more efficient to implement
False
>>> antGlobMatch('*/**/f/*.e', 'f/PPP/a/b/c/d/e/PPP/f/foo.e')
True
"""
if not path: return not pattern # not useful or ideal, but for compatibility with older users keep this behaviour the same
return GlobPatternSet.create(pattern).matches(path) | {
"repo_name": "xpybuild/xpybuild",
"path": "xpybuild/utils/antglob.py",
"copies": "1",
"size": "21342",
"license": "apache-2.0",
"hash": 7064831275719730000,
"line_mean": 32.9856687898,
"line_max": 347,
"alpha_frac": 0.6727110861,
"autogenerated": false,
"ratio": 3.274317275237803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8871338868436986,
"avg_score": 0.11513789858016339,
"num_lines": 628
} |
from __future__ import absolute_import, print_function
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
import logging
import struct
import threading
import sys
NETWORK_KEY= [0xb9, 0xa5, 0x21, 0xfb, 0xbd, 0x72, 0xc3, 0x45]
def on_data(data):
hearthrate = data[7]
string = "Hearthrate: " + str(data[7]) + " "
sys.stdout.write(string)
sys.stdout.flush()
sys.stdout.write("\b" * len(string))
def main():
# logging.basicConfig()
node = Node()
node.set_network_key(0x00, NETWORK_KEY)
channel = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel.on_broadcast_data = on_data
channel.on_burst_data = on_data
channel.set_period(8070)
channel.set_search_timeout(12)
channel.set_rf_freq(57)
channel.set_id(0, 120, 0)
try:
channel.open()
node.start()
finally:
node.stop()
if __name__ == "__main__":
main()
| {
"repo_name": "jforge/openant",
"path": "examples/hearth_rate_monitor.py",
"copies": "1",
"size": "2128",
"license": "mit",
"hash": 5815614907119842000,
"line_mean": 29.4,
"line_max": 77,
"alpha_frac": 0.7161654135,
"autogenerated": false,
"ratio": 3.643835616438356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9770489167845848,
"avg_score": 0.01790237241850145,
"num_lines": 70
} |
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
import logging
import struct
import threading
import sys
NETWORK_KEY = [0xB9, 0xA5, 0x21, 0xFB, 0xBD, 0x72, 0xC3, 0x45]
def on_data(data):
heartrate = data[7]
string = "Heartrate: " + str(heartrate) + " [BPM]"
sys.stdout.write(string)
sys.stdout.flush()
sys.stdout.write("\b" * len(string))
if len(data) > 8:
print(data)
deviceNumberLSB = data[9]
deviceNumberMSB = data[10]
deviceNumber = "{}".format(deviceNumberLSB + (deviceNumberMSB << 8))
deviceType = "{}".format(data[11])
print("New Device Found: %s of type %s" % (deviceNumber, deviceType))
def main():
logging.basicConfig(filename="example.log", level=logging.DEBUG)
node = Node()
node.set_network_key(0x00, NETWORK_KEY)
channel = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE, 0x00, 0x01)
channel.on_broadcast_data = on_data
channel.on_burst_data = on_data
channel.on_acknowledge = on_data
channel.set_id(0, 120, 0)
channel.enable_extended_messages(1)
channel.set_search_timeout(0xFF)
channel.set_period(8070)
channel.set_rf_freq(57)
try:
channel.open()
node.start()
finally:
node.stop()
if __name__ == "__main__":
main()
| {
"repo_name": "Tigge/openant",
"path": "examples/scan.py",
"copies": "1",
"size": "2513",
"license": "mit",
"hash": -525958061769856800,
"line_mean": 31.2179487179,
"line_max": 78,
"alpha_frac": 0.7027457222,
"autogenerated": false,
"ratio": 3.569602272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9772347994927273,
"avg_score": 0,
"num_lines": 78
} |
'''(Anti-)Affinity Server Groups Pool plugin for Cloudify's OpenStack plugin'''
from os import sep, path
import filelock
import json
from tempfile import gettempdir
from cloudify import ctx, manager
from cloudify.exceptions import NonRecoverableError, RecoverableError
from cloudify.decorators import operation
from openstack_plugin_common import with_nova_client
# Metadata
__author__ = "Joshua Cornutt"
__copyright__ = "Copyright 2015, Gigaspaces"
__license__ = "Proprietary"
__maintainer__ = "Joshua Cornutt"
__email__ = "josh@gigaspaces.com"
__status__ = "Development"
CFG_FILE_PATH = gettempdir() + sep + 'affinityplugin.json'
LOCK = filelock.FileLock('cloudify_affinity_plugin')
LOCK.timeout = 60
def discover_dependent_hosts():
'''Find any dependent node hosts of the Facilitator module'''
host_list = []
# Use the REST API to get the list of all nodes in this deployment
client = manager.get_rest_client()
rnodes = client.node_instances.list(deployment_id=ctx.deployment.id)
# Iterate through each node and find ones that
# have our Facilitator as a depends_on relationship
for rnode in rnodes:
node = manager.get_node_instance(rnode.id)
for rel in node.relationships:
ctx.logger.info('Relationship: {0}' . format(rel))
if rel.get('type') == 'cloudify.relationships.depends_on':
if rel.get('target_id') == ctx.instance.id:
# Find the dependent nodes' host (VM)
host_list.append(manager.get_node_instance(node.host_id))
return host_list
# Returns a list of OpenStack Server Groups
# Guaranteed not to be an empty array
def get_openstack_server_groups(nova_client):
'''Queries OpenStack for a list of all Server Groups'''
_data = []
if True:
return [{
'id': 'test-1-1-1-1',
'deployment': None,
'removed': False
}, {
'id': 'test-2-2-2-2',
'deployment': None,
'removed': False
}, {
'id': 'test-3-3-3-3',
'deployment': None,
'removed': False
}]
try:
# This will cause a NonRecoverableError when executing
# against an OpenStack instance that does NOT support,
# or allow, server_groups.list()
groups = nova_client.server_groups.list()
if not len(groups):
NonRecoverableError('nova.server_groups.list() returned '
'an empty result set. No Server Groups '
'are available for use.')
except BaseException as ex:
NonRecoverableError('nova.server_groups.list() is not allowed '
'(please update the policy.json file to allow '
'access to this call) [Exception: {0}]'
. format(ex))
# Iterate through each Server Group returned
for group in groups:
ctx.logger.info('group: {0}' . format(group))
ctx.logger.info(' id: {0}' . format(group.id))
ctx.logger.info(' name: {0}' . format(group.name))
ctx.logger.info(' members: {0}' . format(group.members))
ctx.logger.info(' policies: {0}' . format(group.policies))
_data.append({
'id': group.id, # OpenStack ID of the Server Group
'deployment': None, # Cloudify deployment using the Server Group
'removed': False # Flag indicating if the Server Group
# still exists in OpenStack
})
return _data
def server_group_exists_in_config(_osid, _cfg):
'''Checks if an OpenStack Server Group (osId)
exists in the configuration data (cfgData)'''
return [e for e in _cfg if e.get('id') == _osid]
# This is NOT a thread safe operation.
# Use external locking.
def get_config_data():
'''Load our configuration data (JSON)'''
with open(CFG_FILE_PATH, 'r') as f_cfg:
return json.load(f_cfg)
# This is NOT a thread safe operation.
# Use external locking.
def overwrite_config_data(_cfg):
'''Overwrite our configuration data (JSON)'''
with open(CFG_FILE_PATH, 'w') as f_cfg:
json.dump(_cfg, f_cfg)
def print_config_data():
'''Prints the current configuration data to ctx.logger'''
_cfg = get_config_data()
for group in _cfg:
ctx.logger.debug('Configuration data:\n'
'id: {0}, deployment: {1}, removed: {2}'
. format(
group.get('id'),
group.get('deployment'),
group.get('removed')
))
# This is NOT a thread safe operation.
# Use external locking.
def create_config_data_if_needed(_os_data):
'''Create, or update, our configuration file.
This takes in OpenStack Server Groups (osData)
and either dumps this to file (create) or
it will make sure the data is consistent between
what exists in OpenStack and what we have in
our configuration file.'''
_cfg = _os_data
_cfg_exists = path.exists(CFG_FILE_PATH)
# Configuration file exists
ctx.logger.info('{0} configuration file: {1}' . format(
'Updating' if _cfg_exists else 'Creating',
CFG_FILE_PATH
))
if _cfg_exists:
# If the file exists, read in our configuration data
_cfg = get_config_data()
# Check for inconsistencies and resolve them
for group in _os_data:
# Config is outdated, add in new Server Group
if not server_group_exists_in_config(group.get('id'), _cfg):
ctx.logger.info('Adding {0} to the configuration file'
. format(group.get('id')))
_cfg.append({
'id': group.get('id'),
'deployment': None,
'removed': False
})
for k, group in enumerate(_cfg):
# Config has Server Groups that no longer exist
if not server_group_exists_in_config(group.get('id'), _os_data):
ctx.logger.info('Marking {0} for removal from the '
'configuration file'
. format(group.get('id')))
_cfg[k].removed = True
# Write our changes to our configuration file
overwrite_config_data(_cfg)
# This is NOT a thread safe operation.
# Use external locking.
def acquire_server_group():
'''Searches for a free, clean Server Group
for this deployment to use (this operation
updates the configuration file to reflect
the acquisition)'''
# Read our configuration data (JSON)
_cfg = get_config_data()
# Find a Server Group that's both
# available and clean
for k, group in enumerate(_cfg):
if (not group.get('deployment')) and (not group.get('removed')):
# Let other deployments know that we're
# using this particular Server Group
_cfg[k]['deployment'] = ctx.deployment.id
# Write updated data to our config file
with open(CFG_FILE_PATH, 'w') as f_cfg:
json.dump(_cfg, f_cfg)
return group
def release_server_groups():
'''Searches for all server groups that
are currently acquired by the active deployment
and releases the server group for use by other
deployments'''
# Read our configuration data (JSON)
_cfg = get_config_data()
# Search for any Server Groups
# marked as dirty (removed) and remove
# the from the configuration data.
# Removes the object if it's corrupted
_cfg[:] = [x for x in _cfg if x.get('removed', True)]
# Search for any Server Groups that
# are in use by our current deployment.
# Mark those Server Groups as available.
for k, group in enumerate(_cfg):
if group.get('deployment') == ctx.deployment.id:
_cfg[k]['deployment'] = None
# Replace the configuration file with
# our new configuration data
overwrite_config_data(_cfg)
# Entry point for the Facilitator
@operation
@with_nova_client
def configure(nova_client, **kwargs):
'''Entry point for the Facilitator module'''
del kwargs
ctx.logger.info('Querying OpenStack for (Anti-)Affinity Server Groups')
_os_data = get_openstack_server_groups(nova_client)
try:
with LOCK:
ctx.logger.info('Creating / Updating configuration data')
create_config_data_if_needed(_os_data)
print_config_data()
ctx.logger.info('Selecting an OpenStack Server Group to use')
selected_group = acquire_server_group()
print_config_data()
if not selected_group:
NonRecoverableError('There are no available OpenStack '
'Server Groups for use')
ctx.logger.info('Using OpenStack Server Group {0}'
. format(selected_group.get('id')))
# Iterate through each dependent node instance and
# update their runtime_properties
hosts = discover_dependent_hosts()
for host in hosts:
ctx.logger.info('Assigning (Anti-)Affinity Group '
'{0} to host {1}'
. format(selected_group.get('id'), host.id))
#host.runtime_properties['server_group'] = selected_group.get('id')
except filelock.Timeout:
RecoverableError('Timeout waiting for OpenStack Server Groups '
'configuration file to be released for use')
@operation
def delete(**kwargs):
'''Cloudify "delete" lifecycle operation'''
del kwargs
try:
with LOCK:
ctx.logger.info('Releasing (Anti-)Affinity Group(s) '
'from deployment {0}'
. format(ctx.deployment.id))
release_server_groups()
except filelock.Timeout:
RecoverableError('Timeout waiting for OpenStack Server Groups '
'configuration file to be released for use')
| {
"repo_name": "01000101/cloudify-test-app",
"path": "affinity/plugins/affinity/plugin/tasks.py",
"copies": "1",
"size": "10206",
"license": "mit",
"hash": 1579324263875012400,
"line_mean": 34.4375,
"line_max": 79,
"alpha_frac": 0.589457182,
"autogenerated": false,
"ratio": 4.205191594561187,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008849657287157288,
"num_lines": 288
} |
#antialias graph
import getsdata
DIM = getsdata.DIM
TRAIN_DIR = "/home/banditcat/tfmuse/train/"
TRAIN_FILE = "t"
STOCK_DIR = getsdata.STOCK_DIR
PATCH_SIZE = 5
L1_FEATURES = 32
L2_FEATURES = 64
DENSE_FEATURES = 1024
BATCH_SIZE = 50
STEPS = 15
NUM_STOCKS = 3
GOOD_TICKER_THRESHHOLD = 0
BAD_TICKER_THRESHHOLD = -1
POINTS_CAP = 0
VOLUME_PRICE_MINIMUM = 20000 / NUM_STOCKS
import numpy
import math
import datetime
import calendar
import tensorflow as tf
import matplotlib.pyplot as mpl
mpl.rcParams['backend'] = 'TkAgg'
mpl.rcParams['interactive'] = 'True'
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides = [ 1, 1, 1, 1 ], padding = 'SAME' )
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [ 1, 2, 2, 1 ],
strides = [ 1, 2, 2, 1 ], padding = 'SAME' )
#vars
x = tf.placeholder( tf.float32, [ None, DIM * DIM ] )
y_ = tf.placeholder( tf.float32, [ None, DIM ] )
lasts = tf.placeholder( tf.int64, [ None ] )
prices = tf.placeholder( tf.float32, [ None ] )
volumes = tf.placeholder( tf.float32, [ None ] )
#Conv 1
W_conv1 = weight_variable( [ PATCH_SIZE, PATCH_SIZE, 1, L1_FEATURES ] )
b_conv1 = bias_variable( [ L1_FEATURES ] )
x_image = tf.reshape(x, [ -1, DIM, DIM, 1 ])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#Conv 2
W_conv2 = weight_variable( [ PATCH_SIZE, PATCH_SIZE, L1_FEATURES, L2_FEATURES ])
b_conv2 = bias_variable( [ L2_FEATURES ] )
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Dense layer
W_fc1 = weight_variable( [ ( DIM // 4 ) * ( DIM // 4 ) * L2_FEATURES, DENSE_FEATURES ] )
b_fc1 = bias_variable( [ DENSE_FEATURES ] )
h_pool2_flat = tf.reshape( h_pool2, [-1, ( DIM // 4 ) * ( DIM // 4 ) * L2_FEATURES ] )
h_fc1 = tf.nn.relu( tf.matmul( h_pool2_flat, W_fc1 ) + b_fc1 )
#Dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Softmax
W_fc2 = weight_variable( [ DENSE_FEATURES, DIM ] )
b_fc2 = bias_variable( [ DIM ] )
y_conv = tf.nn.softmax( tf.matmul( h_fc1_drop, W_fc2 ) + b_fc2 )
#model
cross_entropy = -tf.reduce_sum( y_ * tf.log( y_conv ) )
train_step = tf.train.AdamOptimizer( 1e-4 ).minimize( cross_entropy )
score = tf.argmax( y_conv, 1 )
actual_score = tf.argmax( y_, 1 )
correct_prediction = tf.equal( score, actual_score )
accuracy = tf.reduce_mean( tf.cast( correct_prediction, tf.float32 ) )
predictions = score - lasts
actuals = actual_score - lasts
growth_bool = tf.greater( predictions, tf.cast( tf.fill( tf.shape( predictions ), GOOD_TICKER_THRESHHOLD ), tf.int64 ) )
shrink_bool = tf.less( predictions, tf.cast( tf.fill( tf.shape( predictions ), BAD_TICKER_THRESHHOLD ), tf.int64 ) )
vol_price_bool = tf.greater( volumes * prices, VOLUME_PRICE_MINIMUM )
growth_combined_bool = tf.cast( tf.cast( growth_bool, tf.float32 ) * tf.cast( vol_price_bool, tf.float32 ), tf.bool )
growth_tickers = tf.reshape( tf.where( growth_combined_bool ), [ -1 ] )
shrink_combined_bool = tf.cast( tf.cast( shrink_bool, tf.float32 ) * tf.cast( vol_price_bool, tf.float32 ), tf.bool )
shrink_tickers = tf.reshape( tf.where( shrink_combined_bool ), [ -1 ] )
def get_imc1():
return tf.reshape( W_conv1 + b_conv1, [ -1, L1_FEATURES * PATCH_SIZE ] )
def get_imc2():
return tf.reshape( W_conv2 + b_conv2, [ -1, L2_FEATURES * PATCH_SIZE ] )
def get_imdl():
return tf.reshape( W_fc1 + b_fc1, [-1, DENSE_FEATURES ] )
def get_imsm():
return tf.reshape( W_fc2 + b_fc2, [-1, L1_FEATURES ] )
saver = tf.train.Saver()
sess = tf.Session()
chkpt = tf.train.latest_checkpoint( TRAIN_DIR )
if chkpt != None:
print( "Loading", chkpt )
saver.restore( sess, chkpt )
else:
mnist = getsdata.read_data_sets( STOCK_DIR )
sess.run( tf.initialize_all_variables() )
for i in range( STEPS * 1000 + 1 ):
batch = mnist.train.next_batch( BATCH_SIZE )
sess.run( train_step, feed_dict = { x: batch[ 0 ], y_: batch[ 1 ], keep_prob: 0.25 } )
if i % 100 == 0:
train_accuracy = accuracy.eval( session=sess, feed_dict = {
x:batch[0], y_: batch[1], keep_prob: 1.0})
print( "step %d, training accuracy %g epoch %d" % ( i, train_accuracy, mnist.train.epochs_completed ) )
if i % 1000 == 0:
print( "Test accuracy %g" % accuracy.eval( session = sess, feed_dict = {
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0 } ) )
saver.save( sess, TRAIN_DIR + TRAIN_FILE, global_step = i )
imc1 = get_imc1()
imc2 = get_imc2()
imdl = get_imdl()
imsm = get_imsm()
mpl.figure( 1, figsize = ( 5, 5 ), dpi = 129.582774711 )
mpl.subplot( 411 )
mpl.imshow( imc1.eval(session=sess), interpolation='nearest', cmap="prism")
mpl.axis("off")
mpl.subplot( 412 )
mpl.imshow( imc2.eval(session=sess), interpolation='nearest', cmap="prism")
mpl.axis("off")
mpl.subplot( 413 )
mpl.imshow( imdl.eval(session=sess), interpolation='nearest', cmap="prism")
mpl.axis("off")
mpl.subplot( 414 )
mpl.imshow( imsm.eval(session=sess), interpolation='nearest', cmap="prism")
mpl.axis("off")
mpl.draw()
mpl.show()
foog, _, _, _, _, _ = getsdata.build_predictions( STOCK_DIR, 2016, 1, 1 )
mpl.figure( 2, figsize = ( 5, 5 ), dpi = 129.582774711 )
foog = foog.reshape( -1, DIM, DIM )[ 0:10 ]
foog = foog.reshape( -1, DIM )
mpl.imshow( foog, interpolation='nearest', cmap="hot")
mpl.axis( "off" )
mpl.draw()
mpl.show()
feb = []
tot = 100.0
month = 1
year = 2015
total_perfs = []
## Do the prediction
# money = 1000000.0
# timgs, tticks, tlsts = getsdata.build_today()
# print( len( timgs ), "SAdasd" )
# timgs = timgs.reshape( -1, DIM * DIM )
# ttgp = []
# tgprds = growth_tickers.eval( session = sess, feed_dict = { x: timgs, lasts: tlsts, keep_prob: 1.0 } )
# # Remove low-volume
# for i in tgprds:
# vol, _ = getsdata.get_volume_and_price_for_today( tticks[ i ] + "\n" )
# if vol >= 10000:
# ttgp.append( i )
# tgprds = ttgp
# tticks = numpy.array( tticks )
# cnt = len( tgprds )
# if len( tgprds ) > 0:
# for i in tgprds:
# vol, prc = getsdata.get_volume_and_price_for_today( tticks[ i ] + "\n" )
# tospend = money / cnt
# cnt -= 1
# to_buy = round( tospend / prc )
# money -= to_buy * prc
# if money < 0:
# money += prc
# to_buy -= 1
# print( tticks[ i ], " Volume:", vol, " Price:", prc, "To buy:", to_buy, "Money left:", money )
# else:
# print( "None!", money )
# Verify
for year in range( 2015, 2017 ):
for month in range( 1, 13 ):
monthtot = 1.0
for day in range( 1, 33 ):
if year < 2016 or month < 4:
foof, fool, ticks, lsts, prcs, vols = getsdata.build_predictions( STOCK_DIR, year, month, day )
if len( ticks ) != 0:
foof = foof.reshape( -1, DIM * DIM )
ticks = numpy.array( ticks )
gprds = growth_tickers.eval( session = sess, feed_dict = { x: foof, lasts: lsts, keep_prob: 1.0,
prices: prcs, volumes: vols } )
#Added this to select number of stocks
prds = predictions.eval( session = sess, feed_dict = { x: foof, lasts: lsts, keep_prob: 1.0 } )
evls = actuals.eval( session = sess, feed_dict = { x: foof, y_: fool, lasts: lsts, keep_prob: 1.0 } )
numpy.random.shuffle( gprds )
gprds = numpy.intersect1d( numpy.argsort( prds ), gprds )[ -NUM_STOCKS : ]
#numpy.random.shuffle( gprds )
#gprds.sort( kind = "mergesort" )
#gprds = gprds[ 0 : 5 ]
sprds = shrink_tickers.eval( session = sess, feed_dict = { x: foof, lasts: lsts, keep_prob: 1.0,
prices: prcs, volumes: vols } )
#gprds = numpy.random.choice( numpy.arange( 0, len( ticks ) - 1 ), len( ticks ) // 100, replace = False )
wkdy = datetime.date( year, month, day ).weekday()
print( calendar.day_name[ wkdy ], year, month, day )
if len( gprds ) != 0:
prfs = []
nprds = []
for i in gprds:
name = ticks[ i ]
prf, prc, vol = getsdata.get_profit( ticks[ i ] + "\n", STOCK_DIR, year, month, day )
#if prf != 1.0:
print( name, prf, "\t Price", prc, "\t Volume", vol, "\t Predicted points:", prds[ i ], "\t Actual points:", evls[ i ] )
prfs.append( prf )
nprds.append( prds[ i ] )
prfs = numpy.array( prfs )
#BUGBUG weighting gives bad resukts
nprds = numpy.array( nprds )
nprds = numpy.clip( nprds, -1000000000.0, POINTS_CAP )
if POINTS_CAP <= 0:
nprds = numpy.ones( len( nprds ) )
nprdss = nprds.sum()
nprds = numpy.array( [ x / nprdss for x in nprds ] )
tprf = ( prfs * nprds ).sum()
total_perfs.append( tprf )
print( tprf )
tot *= tprf
monthtot *= tprf
print( tot )
print()
else:
print( "====================================== NONE =======================================" )
print( "Month total", monthtot )
total_perfs = numpy.array( total_perfs )
total_avg = numpy.average( total_perfs )
total_var = numpy.var( total_perfs )
total_std = numpy.std( total_perfs )
num_days = 349
total_buying_days = len( total_perfs )
daily_rate = ( tot / 100 ) ** ( 1 / num_days )
three_monthly_rate = daily_rate ** ( 30.42 * 3 )
print( "\nExpected return:", tot, "percent."
"\nTotal days trading:", total_buying_days,
"\nOverall average stock performance:", total_avg,
"\nDaily rate:", daily_rate,
"\nThree month rate:", three_monthly_rate,
"\nOverall performance variance, standard deviation:", total_var, ",", total_std,
"\nSharpe ratio over three months assuming 0.30% t-bill rate:", ( three_monthly_rate - 1.003 ) / total_std )
sess.close()
| {
"repo_name": "BanditCat/tfstocks",
"path": "emnist.py",
"copies": "1",
"size": "10133",
"license": "apache-2.0",
"hash": -7423618805212884000,
"line_mean": 34.6795774648,
"line_max": 134,
"alpha_frac": 0.5915326162,
"autogenerated": false,
"ratio": 2.7784480394845077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3869980655684508,
"avg_score": null,
"num_lines": null
} |
import math
curve_distance_epsilon = 1e-30
curve_collinearity_epsilon = 1e-30
curve_angle_tolerance_epsilon = 0.01
curve_recursion_limit = 32
m_cusp_limit = 0.0
m_angle_tolerance = 10 * math.pi / 180.0
m_approximation_scale = 1.0
m_distance_tolerance_square = (0.5 / m_approximation_scale)**2
epsilon = 1e-10
def calc_sq_distance(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
return dx * dx + dy * dy
def quadratic_recursive(points, x1, y1, x2, y2, x3, y3, level=0):
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
# -------------------------------------------------
x12 = (x1 + x2) / 2.
y12 = (y1 + y2) / 2.
x23 = (x2 + x3) / 2.
y23 = (y2 + y3) / 2.
x123 = (x12 + x23) / 2.
y123 = (y12 + y23) / 2.
dx = x3 - x1
dy = y3 - y1
d = math.fabs((x2 - x3) * dy - (y2 - y3) * dx)
if d > curve_collinearity_epsilon:
# Regular case
# ------------
if d * d <= m_distance_tolerance_square * (dx * dx + dy * dy):
# If the curvature doesn't exceed the distance_tolerance value
# we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x123, y123))
return
# Angle & Cusp Condition
da = math.fabs(
math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da >= math.pi:
da = 2 * math.pi - da
if da < m_angle_tolerance:
# Finally we can stop the recursion
points.append((x123, y123))
return
else:
# Collinear case
# --------------
da = dx * dx + dy * dy
if da == 0:
d = calc_sq_distance(x1, y1, x2, y2)
else:
d = ((x2 - x1) * dx + (y2 - y1) * dy) / da
if d > 0 and d < 1:
# Simple collinear case, 1---2---3, we can leave just two
# endpoints
return
if(d <= 0):
d = calc_sq_distance(x2, y2, x1, y1)
elif d >= 1:
d = calc_sq_distance(x2, y2, x3, y3)
else:
d = calc_sq_distance(x2, y2, x1 + d * dx, y1 + d * dy)
if d < m_distance_tolerance_square:
points.append((x2, y2))
return
# Continue subdivision
# --------------------
quadratic_recursive(points, x1, y1, x12, y12, x123, y123, level + 1)
quadratic_recursive(points, x123, y123, x23, y23, x3, y3, level + 1)
def cubic_recursive(points, x1, y1, x2, y2, x3, y3, x4, y4, level=0):
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
# -------------------------------------------------
x12 = (x1 + x2) / 2.
y12 = (y1 + y2) / 2.
x23 = (x2 + x3) / 2.
y23 = (y2 + y3) / 2.
x34 = (x3 + x4) / 2.
y34 = (y3 + y4) / 2.
x123 = (x12 + x23) / 2.
y123 = (y12 + y23) / 2.
x234 = (x23 + x34) / 2.
y234 = (y23 + y34) / 2.
x1234 = (x123 + x234) / 2.
y1234 = (y123 + y234) / 2.
# Try to approximate the full cubic curve by a single straight line
# -----------------------------------------------------------------
dx = x4 - x1
dy = y4 - y1
d2 = math.fabs(((x2 - x4) * dy - (y2 - y4) * dx))
d3 = math.fabs(((x3 - x4) * dy - (y3 - y4) * dx))
s = int((d2 > curve_collinearity_epsilon) << 1) + \
int(d3 > curve_collinearity_epsilon)
if s == 0:
# All collinear OR p1==p4
# ----------------------
k = dx * dx + dy * dy
if k == 0:
d2 = calc_sq_distance(x1, y1, x2, y2)
d3 = calc_sq_distance(x4, y4, x3, y3)
else:
k = 1. / k
da1 = x2 - x1
da2 = y2 - y1
d2 = k * (da1 * dx + da2 * dy)
da1 = x3 - x1
da2 = y3 - y1
d3 = k * (da1 * dx + da2 * dy)
if d2 > 0 and d2 < 1 and d3 > 0 and d3 < 1:
# Simple collinear case, 1---2---3---4
# We can leave just two endpoints
return
if d2 <= 0:
d2 = calc_sq_distance(x2, y2, x1, y1)
elif d2 >= 1:
d2 = calc_sq_distance(x2, y2, x4, y4)
else:
d2 = calc_sq_distance(x2, y2, x1 + d2 * dx, y1 + d2 * dy)
if d3 <= 0:
d3 = calc_sq_distance(x3, y3, x1, y1)
elif d3 >= 1:
d3 = calc_sq_distance(x3, y3, x4, y4)
else:
d3 = calc_sq_distance(x3, y3, x1 + d3 * dx, y1 + d3 * dy)
if d2 > d3:
if d2 < m_distance_tolerance_square:
points.append((x2, y2))
return
else:
if d3 < m_distance_tolerance_square:
points.append((x3, y3))
return
elif s == 1:
# p1,p2,p4 are collinear, p3 is significant
# -----------------------------------------
if d3 * d3 <= m_distance_tolerance_square * (dx * dx + dy * dy):
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle Condition
# ---------------
da1 = math.fabs(
math.atan2(y4 - y3, x4 - x3) - math.atan2(y3 - y2, x3 - x2))
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da1 < m_angle_tolerance:
points.extend([(x2, y2), (x3, y3)])
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x3, y3))
return
elif s == 2:
# p1,p3,p4 are collinear, p2 is significant
# -----------------------------------------
if d2 * d2 <= m_distance_tolerance_square * (dx * dx + dy * dy):
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle Condition
# ---------------
da1 = math.fabs(
math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da1 < m_angle_tolerance:
points.extend([(x2, y2), (x3, y3)])
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x2, y2))
return
elif s == 3:
# Regular case
# ------------
if (d2 + d3) * (d2 + d3) <= m_distance_tolerance_square * (dx * dx + dy * dy): # noqa
# If the curvature doesn't exceed the distance_tolerance value
# we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle & Cusp Condition
# ----------------------
k = math.atan2(y3 - y2, x3 - x2)
da1 = math.fabs(k - math.atan2(y2 - y1, x2 - x1))
da2 = math.fabs(math.atan2(y4 - y3, x4 - x3) - k)
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da2 >= math.pi:
da2 = 2 * math.pi - da2
if da1 + da2 < m_angle_tolerance:
# Finally we can stop the recursion
# ---------------------------------
points.append((x23, y23))
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x2, y2))
return
if da2 > m_cusp_limit:
points.append((x3, y3))
return
# Continue subdivision
# --------------------
cubic_recursive(
points, x1, y1, x12, y12, x123, y123, x1234, y1234, level + 1)
cubic_recursive(
points, x1234, y1234, x234, y234, x34, y34, x4, y4, level + 1)
def quadratic(p1, p2, p3):
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
points = []
quadratic_recursive(points, x1, y1, x2, y2, x3, y3)
dx, dy = points[0][0] - x1, points[0][1] - y1
if (dx * dx + dy * dy) > epsilon:
points.insert(0, (x1, y1))
dx, dy = points[-1][0] - x3, points[-1][1] - y3
if (dx * dx + dy * dy) > epsilon:
points.append((x3, y3))
return points
def cubic(p1, p2, p3, p4):
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x4, y4 = p4
points = []
cubic_recursive(points, x1, y1, x2, y2, x3, y3, x4, y4)
dx, dy = points[0][0] - x1, points[0][1] - y1
if (dx * dx + dy * dy) > epsilon:
points.insert(0, (x1, y1))
dx, dy = points[-1][0] - x4, points[-1][1] - y4
if (dx * dx + dy * dy) > epsilon:
points.append((x4, y4))
return points
def arc(cx, cy, rx, ry, a1, a2, ccw=False):
scale = 1.0
ra = (abs(rx) + abs(ry)) / 2.0
da = math.acos(ra / (ra + 0.125 / scale)) * 2.0
if ccw:
while a2 < a1:
a2 += math.pi * 2.0
else:
while a1 < a2:
a1 += math.pi * 2.0
da = -da
a_start = a1
a_end = a2
vertices = []
angle = a_start
while (angle < a_end - da / 4) == ccw:
x = cx + math.cos(angle) * rx
y = cy + math.sin(angle) * ry
vertices.append((x, y))
angle += da
x = cx + math.cos(a_end) * rx
y = cy + math.sin(a_end) * ry
vertices.append((x, y))
return vertices
def elliptical_arc(x0, y0, rx, ry, angle, large_arc_flag, sweep_flag, x2, y2):
radii_ok = True
cos_a = math.cos(angle)
sin_a = math.sin(angle)
if rx < 0.0:
rx = -rx
if ry < 0.0:
ry = -rx
# Calculate the middle point between
# the current and the final points
# ------------------------
dx2 = (x0 - x2) / 2.0
dy2 = (y0 - y2) / 2.0
# Calculate (x1, y1)
# ------------------------
x1 = cos_a * dx2 + sin_a * dy2
y1 = -sin_a * dx2 + cos_a * dy2
# Check that radii are large enough
# ------------------------
prx, pry = rx * rx, ry * ry
px1, py1 = x1 * x1, y1 * y1
radii_check = px1 / prx + py1 / pry
if radii_check > 1.0:
rx = math.sqrt(radii_check) * rx
ry = math.sqrt(radii_check) * ry
prx = rx * rx
pry = ry * ry
if radii_check > 10.0:
radii_ok = False # noqa
# Calculate (cx1, cy1)
# ------------------------
if large_arc_flag == sweep_flag:
sign = -1
else:
sign = +1
sq = (prx * pry - prx * py1 - pry * px1) / (prx * py1 + pry * px1)
coef = sign * math.sqrt(max(sq, 0))
cx1 = coef * ((rx * y1) / ry)
cy1 = coef * -((ry * x1) / rx)
# Calculate (cx, cy) from (cx1, cy1)
# ------------------------
sx2 = (x0 + x2) / 2.0
sy2 = (y0 + y2) / 2.0
cx = sx2 + (cos_a * cx1 - sin_a * cy1)
cy = sy2 + (sin_a * cx1 + cos_a * cy1)
# Calculate the start_angle (angle1) and the sweep_angle (dangle)
# ------------------------
ux = (x1 - cx1) / rx
uy = (y1 - cy1) / ry
vx = (-x1 - cx1) / rx
vy = (-y1 - cy1) / ry
# Calculate the angle start
# ------------------------
n = math.sqrt(ux * ux + uy * uy)
p = ux
if uy < 0:
sign = -1.0
else:
sign = +1.0
v = p / n
if v < -1.0:
v = -1.0
if v > 1.0:
v = 1.0
start_angle = sign * math.acos(v)
# Calculate the sweep angle
# ------------------------
n = math.sqrt((ux * ux + uy * uy) * (vx * vx + vy * vy))
p = ux * vx + uy * vy
if ux * vy - uy * vx < 0:
sign = -1.0
else:
sign = +1.0
v = p / n
v = min(max(v, -1.0), +1.0)
sweep_angle = sign * math.acos(v)
if not sweep_flag and sweep_angle > 0:
sweep_angle -= math.pi * 2.0
elif sweep_flag and sweep_angle < 0:
sweep_angle += math.pi * 2.0
start_angle = math.fmod(start_angle, 2.0 * math.pi)
if sweep_angle >= 2.0 * math.pi:
sweep_angle = 2.0 * math.pi
if sweep_angle <= -2.0 * math.pi:
sweep_angle = -2.0 * math.pi
V = arc(cx, cy, rx, ry, start_angle, start_angle + sweep_angle, sweep_flag)
c = math.cos(angle)
s = math.sin(angle)
X, Y = V[:, 0] - cx, V[:, 1] - cy
V[:, 0] = c * X - s * Y + cx
V[:, 1] = s * X + c * Y + cy
return V
| {
"repo_name": "ktritz/vispy",
"path": "vispy/util/svg/geometry.py",
"copies": "19",
"size": "15753",
"license": "bsd-3-clause",
"hash": -7571036668125745000,
"line_mean": 31.9560669456,
"line_max": 94,
"alpha_frac": 0.4967307814,
"autogenerated": false,
"ratio": 3.1785714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 478
} |
from antioch.core import models
from django.contrib import admin
class VerbInline(admin.TabularInline):
model = models.Verb
fk_name = 'origin'
extra = 1
exclude = ('code',)
readonly_fields = ('filename', 'ability', 'method', 'owner')
class PropertyInline(admin.TabularInline):
model = models.Property
fk_name = 'origin'
extra = 1
readonly_fields = ('name', 'value', 'owner')
class ObjectAdmin(admin.ModelAdmin):
list_display = ('__str__', 'unique_name', 'owner', 'location')
inlines = [
VerbInline,
PropertyInline,
]
admin.site.register(models.Object, ObjectAdmin)
admin.site.register(models.Verb)
admin.site.register(models.Property)
admin.site.register(models.Permission)
class AccessAdmin(admin.ModelAdmin):
list_display = ('rule', 'actor', 'action', 'entity', 'origin')
def actor(self, obj):
return obj.actor()
def entity(self, obj):
return obj.entity()
def origin(self, obj):
return obj.origin()
def action(self, obj):
return obj.permission.name
admin.site.register(models.Access, AccessAdmin)
admin.site.register(models.Player)
admin.site.register(models.Task) | {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/admin.py",
"copies": "1",
"size": "1292",
"license": "mit",
"hash": -1839875877234735400,
"line_mean": 23.8653846154,
"line_max": 66,
"alpha_frac": 0.6648606811,
"autogenerated": false,
"ratio": 3.588888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47537495699888893,
"avg_score": null,
"num_lines": null
} |
from django.core import validators
from django.db import models
class URLField(models.CharField):
default_validators = [validators.URLValidator(schemes=['https'])]
class Object(models.Model):
class Meta:
db_table = 'object'
name = models.CharField(max_length=255)
unique_name = models.BooleanField()
owner = models.ForeignKey('self', related_name='+', blank=True, null=True, on_delete=models.SET_NULL,)
location = models.ForeignKey('self', related_name='contents', blank=True, null=True, on_delete=models.SET_NULL)
parents = models.ManyToManyField('self', related_name='children', blank=True, symmetrical=False, through='Relationship')
observers = models.ManyToManyField('self', related_name='observing', blank=True, symmetrical=False, through='Observation')
def __str__(self):
return "#%s (%s)" % (self.id, self.name)
class Relationship(models.Model):
class Meta:
db_table = 'object_relation'
unique_together = [['child', 'parent']]
child = models.ForeignKey(Object, related_name='parent', on_delete=models.CASCADE)
parent = models.ForeignKey(Object, related_name='child', on_delete=models.CASCADE)
weight = models.IntegerField(default=0)
class Observation(models.Model):
class Meta:
db_table = 'object_observer'
object = models.ForeignKey(Object, related_name='observer', on_delete=models.CASCADE)
observer = models.ForeignKey(Object, related_name='object', on_delete=models.CASCADE)
class Alias(models.Model):
class Meta:
verbose_name_plural = 'aliases'
db_table = 'object_alias'
object = models.ForeignKey(Object, related_name='aliases', on_delete=models.CASCADE)
alias = models.CharField(max_length=255)
#TODO: add support for additional URL types and connection details
class Repository(models.Model):
class Meta:
db_table = 'repository'
slug = models.SlugField()
url = URLField(max_length=255)
prefix = models.CharField(max_length=255)
class Verb(models.Model):
class Meta:
db_table = 'verb'
code = models.TextField(blank=True, null=True)
repo = models.ForeignKey(Repository, related_name='+', blank=True, null=True, on_delete=models.SET_NULL)
filename = models.CharField(max_length=255, blank=True, null=True)
ref = models.CharField(max_length=255, blank=True, null=True)
owner = models.ForeignKey(Object, related_name='+', blank=True, null=True, on_delete=models.SET_NULL)
origin = models.ForeignKey(Object, related_name='verbs', on_delete=models.CASCADE)
ability = models.BooleanField()
method = models.BooleanField()
def __str__(self):
return "%s {#%s on %s}" % (
self.annotated(), self.id, self.origin
)
def annotated(self):
ability_decoration = ['', '@'][self.ability]
method_decoration = ['', '()'][self.method]
verb_name = self.name()
return ''.join([ability_decoration, verb_name, method_decoration])
def name(self):
return self.names.all()[0].name
class VerbName(models.Model):
class Meta:
db_table = 'verb_name'
verb = models.ForeignKey(Verb, related_name='names', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
def __str__(self):
return "%s {#%s on %s}" % (
self.name, self.verb.id, self.verb.origin
)
class Property(models.Model):
class Meta:
verbose_name_plural = 'properties'
db_table = 'property'
name = models.CharField(max_length=255)
value = models.TextField(blank=True, null=True)
type = models.CharField(max_length=255, choices=[(x,x) for x in ('string', 'python', 'dynamic')])
owner = models.ForeignKey(Object, related_name='+', null=True, on_delete=models.SET_NULL)
origin = models.ForeignKey(Object, related_name='properties', on_delete=models.CASCADE)
def __str__(self):
return '%s {#%s on %s}' % (self.name, self.id, self.origin)
class Permission(models.Model):
class Meta:
db_table = 'permission'
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Access(models.Model):
class Meta:
verbose_name_plural = 'access controls'
db_table = 'access'
object = models.ForeignKey(Object, related_name='acl', null=True, on_delete=models.CASCADE)
verb = models.ForeignKey(Verb, related_name='acl', null=True, on_delete=models.CASCADE)
property = models.ForeignKey(Property, related_name='acl', null=True, on_delete=models.CASCADE)
rule = models.CharField(max_length=5, choices=[(x,x) for x in ('allow', 'deny')])
permission = models.ForeignKey(Permission, related_name='usage', on_delete=models.CASCADE)
type = models.CharField(max_length=8, choices=[(x,x) for x in ('accessor', 'group')])
accessor = models.ForeignKey(Object, related_name='rights', null=True, on_delete=models.CASCADE)
group = models.CharField(max_length=8, null=True, choices=[(x,x) for x in ('everyone', 'owners', 'wizards')])
weight = models.IntegerField(default=0)
def actor(self):
return self.accessor if self.type == 'accessor' else self.group
def entity(self):
if self.object:
return 'self'
elif self.verb:
return ''.join([
['', '@'][self.verb.ability],
self.verb.names.all()[:1][0].name,
['', '()'][self.verb.method],
])
else:
return self.property.name
def origin(self):
if self.object:
return self.object
elif self.verb:
return self.verb.origin
else:
return self.property.origin
def __str__(self):
try:
return '%(rule)s %(actor)s %(permission)s on %(entity)s (%(weight)s)' % dict(
rule = self.rule,
actor = self.actor(),
permission = self.permission.name,
entity = self.entity(),
weight = self.weight,
)
except Exception as e:
import traceback
traceback.print_exc();
return str(e)
class Player(models.Model):
class Meta:
db_table = 'player'
app_label = 'core'
def __str__(self):
return self.email
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_staff(self):
return self.wizard
@property
def is_superuser(self):
return self.wizard
def has_module_perms(self, app):
return True
def has_perm(self, perm):
return True
@property
def email(self):
return self.avatar.name + '@antioch.net'
avatar = models.ForeignKey(Object, null=True, on_delete=models.SET_NULL)
session_id = models.CharField(max_length=255, null=True, blank=True)
wizard = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
crypt = models.CharField(max_length=255, null=True, blank=True)
last_login = models.DateTimeField(null=True)
last_logout = models.DateTimeField(null=True)
class Task(models.Model):
class Meta:
db_table = 'task'
user = models.ForeignKey(Object, related_name='tasks', on_delete=models.CASCADE)
origin = models.ForeignKey(Object, related_name='+', on_delete=models.CASCADE)
verb_name = models.CharField(max_length=255)
args = models.TextField()
kwargs = models.TextField()
created = models.DateTimeField()
delay = models.IntegerField()
killed = models.BooleanField(default=False)
error = models.CharField(max_length=255, blank=True, null=True)
trace = models.TextField(blank=True, null=True)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/models.py",
"copies": "1",
"size": "8021",
"license": "mit",
"hash": -8654298277764845000,
"line_mean": 34.0262008734,
"line_max": 126,
"alpha_frac": 0.6273531979,
"autogenerated": false,
"ratio": 3.8158896289248334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49432428268248335,
"avg_score": null,
"num_lines": null
} |
"""
A variety of antioch error classes
"""
class UserError(Exception):
"""
This is the superclass for all Errors that may be generated which
should be reported to the user who "caused" them. At construction,
additional information can be supplied that will be presented to the
user if they are a "wizard" user.
"""
def __init__(self, message, data=None):
self._message = message
self.data = data
def __str__(self):
return str(self._message)
def __repr__(self):
data = ""
if(self.data):
data += ("\n " + str(self.data))
return str(self._message) + data
class TestError(Exception):
"""
Special error used by tests.
"""
def __init__(self, message=None, data=None):
self._message = message
self.data = data
def __str__(self):
return str(self._message)
def __repr__(self):
data = ""
if(self.data):
data += ("\n " + str(self.data))
return str(self._message) + data
class UsageError(UserError):
"""
This exception is available as a convenience to verbs that
wish to print an error message and exit (rolling back any changes).
"""
class AmbiguousObjectError(UserError):
"""
When this class is raised, it means that at some point a single
object was expected, but multiple ones were found instead.
When printed to the user, this shows a list of matching items,
along with their unique IDs, so the user can choose the correct one.
"""
def __init__(self, name, matches, message=None):
self.name = name
if(message):
result = message
else:
result = "When you say, \"" + self.name + "\", do you mean "
for index in range(len(matches)):
match = matches[index]
if(index > 0 and index <= len(matches) - 1):
result += ", "
if(index == len(matches) - 1):
result += " or "
result += str(match)
result += "?"
UserError.__init__(self, result, matches)
class AmbiguousVerbError(UserError):
"""
When this class is raised, it means that when searching for a verb,
multiple possibilities were encountered. There are some uncertainties
about whether this should be presented to the user or not, since there
is little they can do about it, unless they are wizardly.
When printed to the user, this shows a list of objects with matching verbs.
"""
def __init__(self, name, matches):
self.name = name
result = "More than one object defines \"" + self.name + "\": "
for match in matches:
index = matches.index(match)
if(index > 0 and index <= len(matches) - 1):
result = result + ", "
if(index == len(matches) - 1):
result = result + " and "
result = result + str(match)
result = result + "."
UserError.__init__(self, result, matches)
class PermissionError(UserError):
"""
The user tried to do something he doesn't have permission for. (duh?)
"""
class AccessError(PermissionError):
"""
A more specific kind of PermissionError.
"""
def __init__(self, accessor, access_str, subject):
self.subject = subject
self.accessor = accessor
self.access_str = access_str
PermissionError.__init__(self, "%s is not allowed to '%s' on %s" % (str(accessor), access_str, str(subject)))
class RecursiveError(UserError):
"""
Raised if the user attempts to put a container inside one of its contents,
or set the parent of an object to one of its children. This is unlikely
to happen because of user error, and may not need to be presented to the
user, but will be for the time.
"""
pass
class QuotaError(UserError):
"""
Raised if the user tries to create objects he does not have enough quota for.
"""
pass
class NoSuchObjectError(UserError):
"""
Most often raised by the parser when a verb requests an object that is not
there. If the verb is written correctly, means the user tried to manipulate
a non-existant object (a typo, or other misteak).
"""
def __init__(self, name):
UserError.__init__(self, "There is no '" + str(name) + "' here.")
class NoSuchPrepositionError(UserError):
"""
Raised by the parser when the programmer attempts to retreive the object
for a preposition that was not used in the sentence.
"""
def __init__(self, prep):
UserError.__init__(self, "I don't understand you.", prep)
class NoSuchVerbError(UserError):
"""
Raised by the parser when it cannot find a verb for a sentence.
"""
def __init__(self, name):
UserError.__init__(self, "I don't know how to do that.", name)
class NoSuchPropertyError(UserError):
"""
Raised by the system when it cannot find a needed property.
"""
def __init__(self, name, origin=None):
UserError.__init__(self, "There is no '" + str(name) + "' property defined" + ['.', ' on %s.' % origin][bool(origin)])
class ExecutionError(UserError):
"""
Raised when user code causes some kind of exception.
"""
def __init__(self, code, e):
self.code = code
self.e = e
UserError.__init__(self, "An error occurred in user code: %s" % e) | {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/errors.py",
"copies": "1",
"size": "5570",
"license": "mit",
"hash": 1700023661697592600,
"line_mean": 32.1607142857,
"line_max": 126,
"alpha_frac": 0.6003590664,
"autogenerated": false,
"ratio": 4.128984432913269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229343499313268,
"avg_score": null,
"num_lines": null
} |
"""
Connect the verb environment to the database
This is the connecting thread between the database and verb code
during a transaction. it is responsible for loading and saving objects,
verbs, properties and permissions, as well as caching objects loaded
during a single verb transaction.
"""
import crypt, string, random, time, logging, collections
from antioch import celery_config
from antioch.core import interface, errors, models
from antioch.util import sql, ason, hash_password
from django.conf import settings
from django.db import transaction
from pygments import highlight
from pygments.lexers.python import Python3TracebackLexer
from pygments.formatters import HtmlFormatter
group_definitions = dict(
owners = lambda x,a,s: a.owns(s),
wizards = lambda x,a,s: x.is_wizard(a.get_id()),
everyone = lambda x,a,s: True,
)
rollback_after_fatal_errors = True
log = logging.getLogger(__name__)
def extract_id(literal):
"""
Given an object literal, return the object ID.
"""
if(isinstance(literal, str) and literal.startswith('#')):
end = literal.find("(")
if(end == -1):
end = literal.find( " ")
if(end == -1):
end = len(literal)
return int(literal[1:end])
if(isinstance(literal, int)):
return literal
return None
class ConnectionWrapper(object):
def __init__(self, connection):
self.connection = connection
def isType(self, type):
return self.connection.vendor == type
def getLastInsertId(self, obj_type):
if(self.isType('postgresql')):
result = self.runQuery("SELECT currval(pg_get_serial_sequence('%s','id'));" % obj_type)
return result[0]['currval']
elif(self.isType('sqlite')):
result = self.runQuery("SELECT last_insert_rowid();")
return result[0]['last_insert_rowid()']
elif(self.isType('mysql')):
result = self.runQuery("SELECT LAST_INSERT_ID();")
return result[0]['LAST_INSERT_ID()']
else:
raise UnsupportedError("Unsupported database type.")
def runOperation(self, query, *args, **kwargs):
with self.connection.cursor() as cursor:
cursor.execute(query, *args)
def runQuery(self, query, *args, **kwargs):
with self.connection.cursor() as cursor:
cursor.execute(query, *args)
columns = [col[0] for col in cursor.description]
return [
dict(list(zip(columns, row)))
for row in cursor.fetchall()
]
class ObjectExchange(object):
"""
Database transaction layer.
This class contains all the queries used to interact with the relational
database. It doesn't take into consideration of who is calling it; that it
dealt with by the object model itself.
"""
permission_list = None
def __init__(self, connection=None, wrapper=None, queue=False, ctx=None):
"""
Create a new object exchange.
The result is attached to the provided Connectionconnection and MessageQueue,
and if the object context (`ctx`) is passed along, ensures rights
enforcement for all objects.
"""
self.cache = collections.OrderedDict()
self.connection = wrapper or ConnectionWrapper(connection)
if(self.connection is None):
raise RuntimeError("No connection provided.")
self.use_queue = queue
self.queue = [] if queue else None
self.default_grants_active = False
self.load_permissions()
if(queue and not ctx):
raise RuntimeError("Exchanges can't use queues without a context.")
self.ctx = ctx
if(isinstance(ctx, int)):
self.ctx_id = ctx
self.ctx = self.get_object(ctx)
elif(ctx):
self.ctx_id = ctx.id
def __enter__(self):
"""
Within the cotext, wrap everything in a database transaction, and queue messages.
"""
self.begin()
return self
def __exit__(self, etype, e, trace):
"""
Ensure that non-UserError exceptions rollback the transaction.
"""
try:
show_all_traces = self.ctx and self.ctx.get('show_all_traces', False).value
except:
show_all_traces = False
try:
if(etype is errors.TestError):
self.commit()
return False
elif(etype is EnvironmentError):
self.rollback()
return False
elif(isinstance(e, errors.UserError) and not show_all_traces):
self.commit()
err = str(e)
log.info('Sending normal exception to user: %s' % err)
if(self.queue is not None):
self.send_message(self.ctx.get_id(), dict(
command = 'write',
text = highlight(err, Python3TracebackLexer(), HtmlFormatter()),
is_error = True,
escape_html = False
))
return True
elif(etype is not None):
if(rollback_after_fatal_errors):
self.rollback()
else:
self.commit()
import traceback, io
io = io.StringIO()
traceback.print_exception(etype, e, trace, None, io)
log.error('Sending fatal exception to user: %s' % str(e))
if(self.queue is not None):
self.send_message(self.ctx.get_id(), dict(
command = 'write',
text = highlight(io.getvalue(), Python3TracebackLexer(), HtmlFormatter()),
is_error = True,
escape_html = False
))
return True
else:
self.commit()
finally:
self.flush()
def begin(self):
"""
Start a database transaction.
"""
self.sid = transaction.savepoint()
# self.connection.runOperation('BEGIN')
def commit(self):
"""
Complete a database transaction.
"""
# self.connection.runOperation('COMMIT')
transaction.savepoint_commit(self.sid)
def rollback(self):
"""
Roll-back a database transaction.
"""
# self.connection.runOperation('ROLLBACK')
transaction.savepoint_rollback(self.sid)
def send_message(self, user_id, msg):
if not(self.use_queue):
log.warning("attempted to send a message to user #%s on an unqueued exchange: %s" % (user_id, msg))
return
self.queue.append((self.get_object(user_id), msg))
def flush(self):
"""
Clear and save the cache, and send all pending messages.
"""
self.cache.clear()
self.cache._order = []
if(self.queue):
with celery_config.app.default_connection() as conn:
from kombu import Exchange, Queue
unbound_exchange = Exchange('antioch',
type = 'direct',
auto_delete = False,
durable = True,
)
channel = conn.channel()
exchange = unbound_exchange(channel)
exchange.declare()
for user, msg in self.queue:
if not(user.is_connected_player()):
log.debug("ignoring message for unconnected player %s" % user)
continue
queue_id = '-'.join([settings.USER_QUEUE, str(user.id)])
log.debug("flushing message to #%s: %s" % (queue_id, msg))
exchange.publish(exchange.Message(ason.dumps(msg), content_type="application/json"), routing_key=queue_id)
def get_context(self):
"""
Return the user this exchange is acting in the context of.
"""
return self.ctx
def load_permissions(self):
"""
Pre-load the list of existing permissions.
"""
if not(ObjectExchange.permission_list):
results = self.connection.runQuery(sql.build_select('permission'))
ObjectExchange.permission_list = dict([(x['name'], x['id']) for x in results])
def activate_default_grants(self):
"""
Setup the default grants verb (`set_default_permissions`).
"""
if(self.default_grants_active):
return
system = self.instantiate('object', default_permissions=False, id=1)
result = self.connection.runQuery(sql.interp(
"""SELECT v.*
FROM verb_name vn
INNER JOIN verb v ON v.id = vn.verb_id
WHERE vn.name = 'set_default_permissions'
AND v.origin_id = %s
""", system.get_id()))
self.instantiate('verb', default_permissions=False, *result)
self.default_grants_active = True
def instantiate(self, obj_type, record=None, *additions, **fields):
"""
Instantiate an object either by loading its record by ID from the database, or creating a new one.
"""
records = []
if(record):
records.append(record)
if(additions):
records.extend(additions)
default_permissions = fields.pop('default_permissions', True)
if(fields):
records.append(fields)
results = []
for record in records:
object_id = record.get('id', None)
object_key = '%s-%s' % (obj_type, object_id)
if(object_key in self.cache):
obj = self.cache[object_key]
else:
# no ID passed, we're creating a new object
if(object_id is None):
def fail(record):
raise RuntimeError("Don't know how to make an object of type '%s'" % obj_type)
if(self.ctx and 'owner_id' not in record):
record['owner_id'] = ctx.get_id()
maker = getattr(self, '_mk%s' % obj_type, fail)
obj = maker(record)
self.save(obj)
if(default_permissions):
try:
self.activate_default_grants()
system = self.get_object(1)
set_default_permissions = system.set_default_permissions
except (errors.NoSuchObjectError, errors.NoSuchVerbError) as e:
set_default_permissions = lambda *a: None
set_default_permissions(obj)
else:
obj = self.load(obj_type, object_id)
results.append(obj)
if(len(records) == 1):
return results[0]
return results
def _mkobject(self, record):
"""
Instantiate a interface.Object
"""
obj = interface.Object(self)
obj._name = record.get('name', '')
obj._unique_name = record.get('unique_name', False)
obj._owner_id = record.get('owner_id', None)
obj._location_id = record.get('location_id', None)
return obj
def _mkverb(self, record):
"""
Instantiate a interface.Verb
"""
origin = self.instantiate('object', id=record['origin_id'])
v = interface.Verb(origin)
v._code = record.get('code', '')
v._filename = record.get('filename', None)
v._ref = record.get('ref', None)
v._owner_id = record.get('owner_id', None)
v._ability = record.get('ability', False)
v._method = record.get('method', False)
v._origin_id = record['origin_id']
if('repo' in record):
repo = models.Repository.objects.get(slug=record['repo'])
v._repo_id = repo.id
if('name' in record):
self.save(v)
v.add_name(record['name'])
return v
def _mkproperty(self, record):
"""
Instantiate a interface.Property
"""
origin = self.instantiate('object', id=record['origin_id'])
p = interface.Property(origin)
p._name = record['name']
p._origin_id = record['origin_id']
p._type = record.get('type', 'string')
p._owner_id = record.get('owner_id', None)
val = record.get('value', '')
p._value = ason.loads(val, exchange=self) if val else val
return p
def _mkpermission(self, record):
"""
Instantiate a interface.Permission
"""
origin = None
for origin_type in ('object', 'verb', 'property'):
origin_id = record.get('%s_id' % origin_type, None)
if(origin_id):
origin = self.instantiate(origin_type, id=origin_id)
break
assert origin is not None, "Can't determine an origin for permission record: %s" % record
perm = interface.Permission(origin)
perm.object_id = record.get('object_id', None)
perm.verb_id = record.get('verb_id', None)
perm.property_id = record.get('property_id', None)
perm.rule = record.get('rule', 'allow')
perm.permission_id = record.get('permission_id', None)
perm.type = record.get('type', 'group')
perm.subject_id = record.get('subject_id', None)
perm.group = record.get('group', 'everyone')
return perm
def load(self, obj_type, obj_id):
"""
Load a specific object from the database.
"""
obj_key = '%s-%s' % (obj_type, obj_id)
if(obj_key in self.cache):
return self.cache[obj_key]
items = self.connection.runQuery(sql.build_select(obj_type, id=obj_id))
if not(items):
raise errors.NoSuchObjectError("%s #%s" % (obj_type, obj_id))
def fail(record):
raise RuntimeError("Don't know how to make an object of type '%s'" % obj_type)
maker = getattr(self, '_mk%s' % obj_type, fail)
obj = maker(items[0])
if not(obj.get_id()):
obj.set_id(obj_id)
self.cache[obj_key] = obj
return obj
def save(self, obj):
"""
Save the provided model back into the database.
"""
obj_type = type(obj).__name__.lower()
obj_id = obj.get_id()
if(obj_type == 'object'):
attribs = dict(
name = obj._name,
unique_name = str(int(obj._unique_name)),
owner_id = obj._owner_id,
location_id = obj._location_id,
)
elif(obj_type == 'verb'):
attribs = dict(
code = obj._code,
filename = obj._filename,
repo_id = obj._repo_id,
ref = obj._ref,
owner_id = obj._owner_id,
origin_id = obj._origin_id,
ability = str(int(obj._ability)),
method = str(int(obj._method)),
)
elif(obj_type == 'property'):
def check(v):
if(v is None):
return False
elif(v is ""):
return False
return True
attribs = dict(
name = obj._name,
value = ason.dumps(obj._value) if check(obj._value) else obj._value,
owner_id = obj._owner_id,
origin_id = obj._origin_id,
type = obj._type,
)
else:
raise RuntimeError("Don't know how to save an object of type '%s'" % obj_type)
if(obj_id):
self.connection.runOperation(sql.build_update(obj_type, attribs, dict(id=obj_id)))
else:
self.connection.runOperation(sql.build_insert(obj_type, attribs))
obj.set_id(self.connection.getLastInsertId(obj_type))
object_key = '%s-%s' % (obj_type, obj.get_id())
if(object_key not in self.cache):
self.cache[object_key] = obj
def get_object(self, key, return_list=False):
"""
Return the object specified by the provided key.
If return_list is True, ambiguous object keys will return a list
of matching objects.
"""
if(isinstance(key, str)):
key = key.strip()
try:
key = int(key)
except:
pass
if(key in ('', 'none', 'None', 'null', 'NULL', None)):
return None
items = None
if(isinstance(key, str)):
if(key.startswith('#')):
end = key.find("(")
if(end == -1):
end = key.find( " ")
if(end == -1):
end = len(key)
key = int(key[1:end])
else:
items = self.connection.runQuery(sql.build_select('object', name=sql.RAW(sql.interp('LOWER(%%s) = LOWER(%s)', key))))
if(len(items) == 0):
if(return_list):
return []
else:
raise errors.NoSuchObjectError(key)
elif(len(items) > 1):
if(return_list):
return self.instantiate('object', *items)
else:
raise errors.AmbiguousObjectError(key, items)
else:
return self.instantiate('object', items[0])
if(isinstance(key, int)):
if(key == -1):
return None
return self.load('object', key)
else:
raise ValueError("Invalid key type: %r" % repr(key))
def get_aliases(self, object_id):
"""
Return all aliases for the given object ID.
"""
result = self.connection.runQuery(sql.interp("SELECT alias FROM object_alias WHERE object_id = %s", object_id))
return [x['alias'] for x in result]
def add_alias(self, object_id, alias):
"""
Add an aliases for the provided object.
"""
self.connection.runOperation(sql.build_insert('object_alias', object_id=object_id, alias=alias));
def remove_alias(self, object_id, alias):
"""
Remove an aliases for the provided object.
"""
self.connection.runOperation(sql.build_delete('object_alias', object_id=object_id, alias=alias));
def get_observers(self, object_id):
"""
Get a list of objects currently observing the provided object.
"""
result = self.instantiate('object', *self.connection.runQuery(sql.interp(
"""SELECT o.*
FROM object o
INNER JOIN object_observer oo ON oo.observer_id = o.id
WHERE oo.object_id = %s
""", object_id)))
if not(isinstance(result, (list, tuple))):
result = [result]
return result
def get_observing(self, object_id):
"""
Get the object that the provided object is observing.
"""
result = self.instantiate('object', *self.connection.runQuery(sql.interp(
"""SELECT o.*
FROM object o
INNER JOIN object_observer oo ON oo.object_id = o.id
WHERE oo.observer_id = %s
""", object_id)))
if(isinstance(result, (list, tuple))):
return result[0] if result else None
return result
def clear_observers(self, object_id):
"""
Make all current observers stop paying attention to the provided object.
"""
self.connection.runOperation(sql.build_delete('object_observer', object_id=object_id));
def add_observer(self, object_id, observer_id):
"""
Add an observer for the provided object.
"""
self.connection.runOperation(sql.build_insert('object_observer', object_id=object_id, observer_id=observer_id));
def remove_observer(self, object_id, observer_id):
"""
Remove an observer for the provided object.
"""
self.connection.runOperation(sql.build_delete('object_observer', object_id=object_id, observer_id=observer_id));
def get_parents(self, object_id, recurse=False):
"""
Return a list of immediate parents for the given object.
Optionally, pass recurse=True to fetch complete ancestry.
"""
#NOTE: the heavier a parent weight is, the more influence its inheritance has.
# e.g., if considering inheritance by left-to-right, the leftmost ancestors will
# have the heaviest weights.
parent_ids = ancestor_ids = self.connection.runQuery(sql.interp(
"""SELECT parent_id AS id
FROM object_relation
WHERE child_id = %s
ORDER BY weight DESC
""", object_id))
while(recurse):
ancestor_ids = self.connection.runQuery(sql.interp(
"""SELECT parent_id AS id
FROM object_relation
WHERE child_id IN %s
ORDER BY weight DESC
""", [x['id'] for x in ancestor_ids]))
if(ancestor_ids):
parent_ids.extend(ancestor_ids)
else:
recurse = False
result = self.instantiate('object', *parent_ids)
return [result] if isinstance(result, interface.Object) else result
def has_parent(self, child_id, object_id):
"""
Does this child have the provided object as an ancestor?
"""
parent_ids = [
x['id'] for x in self.connection.runQuery(sql.interp(
"""SELECT parent_id AS id
FROM object_relation
WHERE child_id = %s
ORDER BY weight DESC
""", child_id))
]
while(parent_ids):
if(object_id in parent_ids):
return True
parent_ids = [
x['id'] for x in self.connection.runQuery(sql.interp(
"""SELECT parent_id AS id
FROM object_relation
WHERE child_id IN %s
ORDER BY weight DESC
""", parent_ids))
]
return False
def remove_parent(self, child_id, parent_id):
"""
Remove the given parent from this child's list of immediate ancestors.
"""
self.connection.runOperation(sql.interp(
"DELETE FROM object_relation WHERE child_id = %s AND parent_id = %s",
child_id, parent_id))
def add_parent(self, child_id, parent_id):
"""
Add the given parent to this child's list of immediate ancestors.
"""
self.connection.runOperation(sql.interp(
"INSERT INTO object_relation (child_id, parent_id, weight) VALUES (%s, %s, 0)",
child_id, parent_id))
def has(self, origin_id, item_type, name, recurse=True, unrestricted=True):
"""
Does the given origin item have a certain verb or property in its ancestry?
"""
if(item_type not in ('property', 'verb')):
raise ValueError("Invalid item type: %s" % type)
a = None
parents = [origin_id]
while(parents):
object_id = parents.pop(0)
if(item_type == 'verb'):
a = self.connection.runQuery(sql.interp(
"""SELECT v.id
FROM verb v
INNER JOIN verb_name vn ON v.id = vn.verb_id
WHERE vn.name = %s
AND v.origin_id = %s
""", name, object_id))
elif(item_type == 'property'):
a = self.connection.runQuery(sql.interp(
"""SELECT p.id
FROM property p
WHERE p.name = %s
AND p.origin_id = %s
""", name, object_id))
if(a):
if(unrestricted):
return True
elif(item_type == 'verb'):
item = self.instantiate('verb', a[0])
return item.is_executable()
elif(item_type == 'property'):
item = self.instantiate('property', a[0])
return item.is_readable()
elif(recurse):
results = self.connection.runQuery(sql.interp("SELECT parent_id FROM object_relation WHERE child_id = %s", object_id))
parents.extend([result['parent_id'] for result in results])
return False
def get_ancestors(self, descendent_id):
"""
Return all ancestors of the provided object.
"""
ancestors = []
descendents = [descendent_id]
while(descendents):
results = self.connection.runQuery(sql.interp("SELECT parent_id FROM object_relation WHERE child_id = %s", descendents.pop()))
descendents.extend([result['parent_id'] for result in results])
ancestors.extend([result['parent_id'] for result in results])
return [self.instantiate('object', id=x) for x in ancestors]
def get_ancestor_with(self, descendent_id, attribute_type, name):
"""
Return the ancestor object that provides the given attribute.
"""
if(attribute_type not in ('property', 'verb')):
raise ValueError("Invalid attribute type: %s" % type)
a = None
parents = [descendent_id]
while(parents):
object_id = parents.pop(0)
if(attribute_type == 'verb'):
a = self.connection.runQuery(sql.interp(
"""SELECT v.origin_id AS id
FROM verb v
INNER JOIN verb_name vn ON v.id = vn.verb_id
WHERE vn.name = %s
AND v.origin_id = %s
""", name, object_id))
elif(attribute_type == 'property'):
a = self.connection.runQuery(sql.interp(
"""SELECT p.origin_id AS id
FROM property p
WHERE p.name = %s
AND p.origin_id = %s
""", name, object_id))
if(a):
break
else:
results = self.connection.runQuery(sql.interp("SELECT parent_id FROM object_relation WHERE child_id = %s", object_id))
parents.extend([result['parent_id'] for result in results])
if not(a):
return None
return self.instantiate('object', a[0])
def get_verb(self, origin_id, name, recurse=True):
"""
Get a verb by this name, recursing by default.
"""
v = None
parents = [origin_id]
while(parents):
parent_id = parents.pop(0)
v = self.connection.runQuery(sql.interp(
"""SELECT v.*
FROM verb v
INNER JOIN verb_name vn ON vn.verb_id = v.id
WHERE vn.name = %s
AND v.origin_id = %s
""", name, parent_id))
if(v or not recurse):
break
else:
results = self.connection.runQuery(sql.interp("SELECT parent_id FROM object_relation WHERE child_id = %s", parent_id))
parents.extend([result['parent_id'] for result in results])
if not(v):
return None
# return self.instantiate('verb', v[0], default_permissions=(name != 'set_default_permissions'))
verb_id = v[0]['id']
if('verb-%s' % verb_id in self.cache):
return self.cache['verb-%s' % verb_id]
v = self._mkverb(v[0])
v.set_id(verb_id)
v._source_id = origin_id
return v
def remove_verb(self, origin_id, name):
"""
Remove a verb defined directly on the given object.
"""
v = self.get_verb(origin_id, name)
if(v):
self.connection.runOperation(sql.build_delete('verb', id=v.get_id()))
def get_verb_list(self, origin_id):
"""
Get a list of verb id and names dictionaries.
"""
query = """SELECT v.id, v.ability, v.method, %s(vn.name) AS names
FROM verb v
INNER JOIN verb_name vn ON v.id = vn.verb_id
WHERE v.origin_id = %%s
GROUP BY v.id
"""
if(self.connection.isType('postgresql')):
agg_function = "array_agg"
verbs = self.connection.runQuery(sql.interp(query % agg_function, origin_id))
return [dict(id=v['id'], ability=v['ability'], method=v['method'], names=','.join(v['names'])) for v in verbs]
else:
agg_function = "group_concat"
verbs = self.connection.runQuery(sql.interp(query % agg_function, origin_id))
return [dict(id=v['id'], ability=v['ability'], method=v['method'], names=v['names']) for v in verbs]
def get_property_list(self, origin_id):
"""
Get a list of property id and name dictionaries.
"""
properties = self.connection.runQuery(sql.interp(
"""SELECT p.id, p.name, p.type
FROM property p
WHERE p.origin_id = %s
""", origin_id))
return [dict(id=p['id'], type=p['type'], name=p['name']) for p in properties]
def get_verb_names(self, verb_id):
"""
Get a list of names for the given verb.
"""
result = self.connection.runQuery(sql.interp("SELECT name FROM verb_name WHERE verb_id = %s", verb_id))
return [x['name'] for x in result]
def add_verb_name(self, verb_id, name):
"""
Add another name for a given verb.
"""
self.connection.runOperation(sql.build_insert('verb_name', verb_id=verb_id, name=name))
def remove_verb_name(self, verb_id, name):
"""
Remove a name for a given verb.
"""
self.connection.runOperation(sql.build_delete('verb_name', verb_id=verb_id, name=name))
def get_property(self, origin_id, name, recurse=True):
"""
Get a property defined on an ancestor of the given origin_id.
"""
p = None
parents = [origin_id]
while(parents):
parent_id = parents.pop(0)
p = self.connection.runQuery(sql.interp(
"""SELECT p.*
FROM property p
WHERE p.name = %s
AND p.origin_id = %s
""", name, parent_id))
if(p or not recurse):
break
else:
results = self.connection.runQuery(sql.interp("SELECT parent_id FROM object_relation WHERE child_id = %s", parent_id))
parents.extend([result['parent_id'] for result in results])
if not(p):
return None
# return self.instantiate('property', p[0])
property_id = p[0]['id']
if('property-%s' % property_id in self.cache):
return self.cache['property-%s' % property_id]
p = self._mkproperty(p[0])
p.set_id(property_id)
p._source_id = origin_id
return p
def remove_property(self, origin_id, name):
"""
Remove a property defined on the given object
"""
v = self.get_property(origin_id, name)
if(v):
self.connection.runOperation(sql.build_delete('property', id=v.get_id()))
def refs(self, key):
"""
How many objects in the store share the name given?
"""
result = self.connection.runQuery(sql.interp("SELECT COUNT(*) AS count FROM object WHERE name = %s", key))
return result[0]['count']
def is_unique_name(self, key):
"""
Has the given key been designated as a unique name?
"""
result = self.connection.runQuery(sql.build_select('object', dict(
name = sql.RAW(sql.interp('LOWER(%%s) = LOWER(%s)', key)),
unique_name = True
)))
return bool(result)
def remove(self, obj_type, object_id):
"""
Destroy an object in the database.
"""
self.connection.runOperation(sql.build_delete(obj_type, id=object_id))
self.cache.pop('%s-%s' % (obj_type, object_id), None)
def is_player(self, object_id):
"""
Is the given object the avatar for a player?
"""
result = self.connection.runQuery(sql.interp("SELECT id FROM player WHERE avatar_id = %s", object_id))
return bool(len(result))
def is_wizard(self, avatar_id):
"""
Does the given player have wizard rights?
"""
result = self.connection.runQuery(sql.interp("SELECT id FROM player WHERE wizard = '1' AND avatar_id = %s", avatar_id))
return bool(len(result))
def is_connected_player(self, avatar_id):
"""
Is the given player currently logged on?
"""
if(self.connection.isType('postgresql')):
timestamp_function = "to_timestamp(0)"
elif(self.connection.isType('sqlite')):
timestamp_function = "date(0,'unixepoch')"
elif(self.connection.isType('mysql')):
timestamp_function = "from_unixtime(0)"
else:
raise UnsupportedError("Unsupported database type.")
result = self.connection.runQuery(sql.interp(
"""SELECT 1 AS connected
FROM player
WHERE COALESCE(last_login, %s) > COALESCE(last_logout, %s)
AND avatar_id = %%s
""" % (timestamp_function, timestamp_function), avatar_id))
return bool(result)
def get_avatar_id(self, player_id):
result = self.connection.runQuery(sql.build_select('player', dict(id=player_id)))
return result[0]['avatar_id']
def set_player(self, object_id, player=None, wizard=None, passwd=None, test_salt=None, **attribs):
"""
Edit the player attributes of an object.
"""
crypt = None
if(passwd is not None):
crypt = attribs['crypt'] = hash_password(passwd, salt=test_salt)
elif(player is False):
crypt = attribs['crypt'] = '!'
attribs['enabled'] = str(int(player is True))
attribs['wizard'] = str(int(wizard is True))
if(self.is_player(object_id)):
if not(attribs):
return
self.connection.runOperation(sql.build_update('player', attribs, dict(avatar_id=object_id)))
else:
self.connection.runOperation(sql.build_insert('player', dict(avatar_id=object_id, **attribs)))
def login_player(self, avatar_id, session_id):
"""
Register a player as logged in.
"""
self.connection.runOperation(sql.build_update('player', dict(session_id=session_id, last_login=sql.RAW('now()')), dict(avatar_id=avatar_id)))
def logout_player(self, avatar_id):
"""
Register a player as logged out.
"""
self.connection.runOperation(sql.build_update('player', dict(last_logout=sql.RAW('now()')), dict(avatar_id=avatar_id)))
def get_last_client_ip(self, avatar_id):
"""
Get the last IP used to login as this player.
"""
result = self.connection.runQuery(sql.build_select('session', user_id=avatar_id))
return result[0]['last_client_ip'] if result else None
def get_contents(self, container_id, recurse=False):
"""
Get the immediate contents of a provided object.
Optionally supply recurse=True to fetch all contents.
"""
nested_location_ids = location_ids = self.connection.runQuery(sql.interp(
"""SELECT id
FROM object
WHERE location_id = %s
""", container_id))
while(recurse):
location_ids = self.connection.runQuery(sql.interp(
"""SELECT id
FROM object
WHERE location_id IN %s
""", [x['id'] for x in location_ids]))
if(location_ids):
nested_location_ids.extend(location_ids)
else:
recurse = False
result = self.instantiate('object', *nested_location_ids)
return [result] if isinstance(result, interface.Object) else result
def find(self, container_id, name):
"""
Find an object immediately inside the provided container.
"""
match_ids = self.connection.runQuery(sql.interp(
"""SELECT id
FROM object
WHERE LOWER(name) = LOWER(%s)
AND location_id = %s
""", name, container_id))
match_ids.extend(self.connection.runQuery(sql.interp(
"""SELECT o.id
FROM property p
INNER JOIN object o ON p.origin_id = o.id
WHERE p.name = 'name'
AND LOWER(p.value) = LOWER(%s)
AND o.location_id = %s
""", '"%s"' % name, container_id)))
match_ids.extend(self.connection.runQuery(sql.interp(
"""SELECT o.id
FROM object o
INNER JOIN object_alias oa ON oa.object_id = o.id
WHERE LOWER(oa.alias) = LOWER(%s)
AND o.location_id = %s
""", name, container_id)))
return self.instantiate('object', *match_ids)
def contains(self, container_id, object_id, recurse=False):
"""
Is the provided object immediately contained by the provided container object?
Optionally supply recurse=True to check for any containment.
"""
location_ids = self.connection.runQuery(sql.interp(
"""SELECT id
FROM object
WHERE location_id = %s
ORDER BY CASE WHEN id = %s THEN 0 ELSE 1 END
""", container_id, object_id))
if(location_ids and location_ids[0]['id'] == object_id):
return True
while(recurse):
container_ids = [x['id'] for x in location_ids]
if(container_ids):
location_ids = self.connection.runQuery(sql.interp(
"""SELECT id
FROM object
WHERE location_id IN %s
ORDER BY CASE WHEN id = %s THEN 0 ELSE 1 END
""", container_ids, object_id))
if(location_ids):
if(location_ids[0]['id'] == object_id):
return True
else:
recurse = False
return False
def get_access(self, object_id, type):
"""
Return the access list for a particular entity.
"""
return self.connection.runQuery(sql.interp(
"""SELECT a.*, p.name AS permission_name
FROM access a
INNER JOIN permission p ON a.permission_id = p.id
WHERE %s_id = %%s
ORDER BY a.weight
""" % type, object_id))
def update_access(self, access_id, rule, access, accessor, permission, weight, subject, deleted):
"""
Modify an access rule.
"""
record = {} if not access_id else self.connection.runQuery(sql.interp(
"""SELECT a.*, p.name AS permission
FROM access a
INNER JOIN permission p ON a.permission_id = p.id
WHERE a.id = %s
""", access_id))
if(record):
record = record[0]
else:
record = {}
if(deleted):
self.connection.runOperation(sql.build_delete('access', id=access_id))
return
record['rule'] = rule
record['type'] = access
record['weight'] = weight
quoted_group = '`group`' if self.connection.isType('mysql') else '"group"'
record.pop('group', '')
if(access == 'group'):
record[quoted_group] = accessor
record['accessor_id'] = None
else:
record[quoted_group] = None
record['accessor_id'] = accessor.get_id()
if(record.pop('permission', '') != permission):
if(permission not in self.permission_list):
raise ValueError("Unknown permission: %s" % permission)
record['permission_id'] = self.permission_list[permission]
if(subject.get_type() == 'object'):
record['object_id'] = subject.get_id()
elif(subject.get_type() == 'verb'):
record['verb_id'] = subject.get_id()
elif(subject.get_type() == 'property'):
record['property_id'] = subject.get_id()
if(access_id):
self.connection.runOperation(sql.build_update('access', record, dict(id=access_id)))
else:
self.connection.runOperation(sql.build_insert('access', **record))
def is_allowed(self, accessor, permission, subject):
"""
Is `accessor` allowed to do `permission` on `subject`?.
"""
if(permission not in self.permission_list):
import warnings
warnings.warn("Unknown permission encountered: %s" % permission)
return False
permission_id = self.permission_list[permission]
anything_id = self.permission_list['anything']
access_query = sql.build_select('access', dict(
object_id = subject.get_id() if isinstance(subject, interface.Object) else None,
verb_id = subject.get_id() if isinstance(subject, interface.Verb) else None,
property_id = subject.get_id() if isinstance(subject, interface.Property) else None,
permission_id = (permission_id, anything_id),
__order_by = 'weight DESC',
))
access = self.connection.runQuery(access_query)
result = False
for rule in access:
rule_type = (rule['rule'] == 'allow')
if(rule['type'] == 'group'):
if(rule['group'] not in group_definitions):
raise ValueError("Unknown group: %s" % rule['accessor'])
if(group_definitions[rule['group']](self, accessor, subject)):
result = rule_type
elif(rule['type'] == 'accessor'):
if(rule['accessor_id'] == accessor.get_id()):
result = rule_type
return result
def allow(self, subject, accessor, permission, create=False):
"""
Add an allow rule.
"""
self._grant('allow', subject, accessor, permission, create)
def deny(self, subject, accessor, permission, create=False):
"""
Add a deny rule.
"""
self._grant('deny', subject, accessor, permission, create)
def _grant(self, rule, subject, accessor, permission, create=False):
"""
Add an access rule.
"""
if(isinstance(accessor, str) and accessor not in group_definitions):
raise ValueError("Unknown group: %s" % accessor)
if(permission in self.permission_list):
permission_id = self.permission_list[permission]
elif(create):
self.connection.runOperation(sql.build_insert('permission', dict(
name = permission
)))
permission_id = self.connection.getLastInsertId('permission')
else:
raise ValueError("No such permission %r" % permission)
quoted_group = '`group`' if self.connection.isType('mysql') else '"group"'
self.connection.runOperation(sql.build_insert('access', {
'object_id' : subject.get_id() if isinstance(subject, interface.Object) else None,
'verb_id' : subject.get_id() if isinstance(subject, interface.Verb) else None,
'property_id' : subject.get_id() if isinstance(subject, interface.Property) else None,
'rule' : rule,
'permission_id' : permission_id,
'type' : 'accessor' if isinstance(accessor, int) else 'group',
'accessor_id' : accessor if isinstance(accessor, int) else None,
quoted_group : accessor if isinstance(accessor, str) else None,
'weight' : 0,
}))
def validate_password(self, avatar_id, password):
"""
Match the given password for the provided avatar.
"""
saved_crypt = self.connection.runQuery(sql.interp(
"""SELECT crypt
FROM player
WHERE avatar_id = %s
""", avatar_id))
if not(saved_crypt):
return False
saved_crypt = saved_crypt[0]['crypt']
return crypt.crypt(password, saved_crypt[0:2]) == saved_crypt
def iterate_task(self, responder):
"""
Check for waiting tasks using the given ampoule TransactionChild.
Returns False if there's no task waiting
Returns None if an exception occurs
Returns True if it processes a task
"""
next_task = self.connection.runQuery(
"""SELECT t.*
FROM task t
WHERE t.created + (t.delay * interval '1 second') < NOW()
AND t.killed = 0
ORDER BY t.created ASC
LIMIT 1
""")
if not(next_task):
return False
try:
responder.run_task(
user_id = next_task[0]['user_id'],
task_id = next_task[0]['id'],
)
except Exception as e:
import traceback
trace = traceback.format_exc()
err = '%s: %s' % (e.__class__.__name__, str(e))
self.connection.runOperation(sql.build_update('task', dict(killed=True, error=err, trace=trace), dict(id=next_task[0]['id'])))
return None
else:
self.connection.runOperation(sql.build_delete('task', dict(id=next_task[0]['id'])))
return True
def register_task(self, user_id, delay, origin_id, verb_name, args, kwargs):
"""
Register a delayed verb call.
"""
self.connection.runOperation(sql.build_insert('task', dict(
user_id = user_id,
delay = delay,
origin_id = origin_id,
verb_name = verb_name,
args = args,
kwargs = kwargs,
)))
return self.connection.getLastInsertId('task')
def get_task(self, task_id):
"""
Fetch the record for the provided task id.
"""
result = self.connection.runQuery(sql.build_select('task', id=task_id))
return result[0] if result else None
def get_tasks(self, user_id=None):
"""
Get a list of waiting tasks.
"""
if(user_id):
return self.connection.runQuery(sql.build_select('task', user_id=user_id))
else:
return self.connection.runQuery(sql.build_select('task'))
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/exchange.py",
"copies": "1",
"size": "48923",
"license": "mit",
"hash": 149457760698841860,
"line_mean": 36.6041506533,
"line_max": 149,
"alpha_frac": 0.519019684,
"autogenerated": false,
"ratio": 4.340993788819876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5360013472819876,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.