text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'Daan Wierstra and Tom Schaul'
from pybrain.rl.learners.blackboxoptimizers.blackboxoptimizer import BlackBoxOptimizer
from pybrain.tools.rankingfunctions import TopLinearRanking
from pybrain.utilities import flat2triu, triu2flat
from scipy import eye, multiply, ones, dot, array, outer, rand, zeros, diag, reshape, randn, exp
from scipy.linalg import cholesky, inv, det
class VanillaGradientEvolutionStrategies(BlackBoxOptimizer):
""" Vanilla gradient-based evolution strategy. """
# mandatory parameters
online = False
learningRate = 1.
learningRateSigma = None # default: the same than learningRate
initialFactorSigma = None # default: identity matrix
# NOT YET SUPPORTED:
diagonalOnly = False
batchSize = 100
momentum = None
elitism = False
shapingFunction = TopLinearRanking(topFraction = 0.5)
# initialization parameters
rangemins = None
rangemaxs = None
initCovariances = None
vanillaScale = False
# use of importance sampling to get away with fewer samples:
importanceMixing = True
forcedRefresh = 0.01
def __init__(self, evaluator, evaluable, **parameters):
BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
self.numParams = self.xdim + self.xdim * (self.xdim+1) / 2
if self.momentum != None:
self.momentumVector = zeros(self.numParams)
if self.learningRateSigma == None:
self.learningRateSigma = self.learningRate
if self.rangemins == None:
self.rangemins = -ones(self.xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(self.xdim)
if self.initCovariances == None:
if self.diagonalOnly:
self.initCovariances = ones(self.xdim)
else:
self.initCovariances = eye(self.xdim)
self.x = rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins
self.sigma = dot(eye(self.xdim), self.initCovariances)
self.factorSigma = cholesky(self.sigma)
self.reset()
def reset(self):
self.generation = 0
self.evalsDone = 0
# keeping track of history
self.allSamples = []
self.allFitnesses = []
self.allPs = []
self.allGenerated = [0]
self.allCenters = [self.x.copy()]
self.allFactorSigmas = [self.factorSigma.copy()]
# for baseline computation
self.phiSquareWindow = zeros((self.batchSize, self.numParams))
def _produceNewSample(self, z = None, p = None):
if z == None:
p = randn(self.xdim)
z = dot(self.factorSigma.T, p) + self.x
if p == None:
p = dot(inv(self.factorSigma).T, (z-self.x))
self.allPs.append(p)
self.allSamples.append(z)
fit = self.evaluator(z)
self.evalsDone += 1
self.allFitnesses.append(fit)
if fit > self.bestEvaluation:
self.bestEvaluation = fit
self.bestEvaluable = z.copy()
return z, fit
def _produceSamples(self):
""" Append batchsize new samples and evaluate them. """
if self.generation == 0 or not self.importanceMixing:
for _ in range(self.batchSize):
self._produceNewSample()
self.allGenerated.append(self.batchSize + self.allGenerated[-1])
else:
olds = len(self.allSamples)
oldDetFactorSigma = det(self.allFactorSigmas[-2])
newDetFactorSigma = det(self.factorSigma)
invA = inv(self.factorSigma)
# All pdfs computed here are off by a coefficient of 1/power(2.0*pi, self.numParams/2.)
# but as only their relative values matter, we ignore it.
# stochastically reuse old samples, according to the change in distribution
for s in range(olds-self.batchSize, olds):
oldPdf = exp(-0.5*dot(self.allPs[s],self.allPs[s])) / oldDetFactorSigma
sample = self.allSamples[s]
newPs = dot(invA.T, (sample-self.x))
newPdf = exp(-0.5*dot(newPs,newPs)) / newDetFactorSigma
r = rand()
if r < (1-self.forcedRefresh) * newPdf / oldPdf:
self.allSamples.append(sample)
self.allFitnesses.append(self.allFitnesses[s])
self.allPs.append(newPs)
# never use only old samples
if (olds+self.batchSize) - len(self.allSamples) < self.batchSize * self.forcedRefresh:
break
self.allGenerated.append(self.batchSize - (len(self.allSamples) - olds) + self.allGenerated[-1])
# add the remaining ones
oldInvA = inv(self.allFactorSigmas[-2])
while len(self.allSamples) < olds + self.batchSize:
r = rand()
if r < self.forcedRefresh:
self._produceNewSample()
else:
p = randn(self.xdim)
newPdf = exp(-0.5*dot(p,p)) / newDetFactorSigma
sample = dot(self.factorSigma.T, p) + self.x
oldPs = dot(oldInvA.T, (sample-self.allCenters[-2]))
oldPdf = exp(-0.5*dot(oldPs,oldPs)) / oldDetFactorSigma
if r < 1 - oldPdf/newPdf:
self._produceNewSample(sample, p)
def _batchLearn(self, maxSteps):
""" Batch learning. """
while (self.evalsDone < maxSteps
and not self.bestEvaluation >= self.desiredEvaluation):
# produce samples and evaluate them
try:
self._produceSamples()
# shape their fitnesses
shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:])
# update parameters (unbiased: divide by batchsize)
update = self._calcBatchUpdate(shapedFits)
if self.elitism:
self.x = self.bestEvaluable
else:
self.x += self.learningRate * update[:self.xdim]
self.factorSigma += self.learningRateSigma * flat2triu(update[self.xdim:], self.xdim)
self.sigma = dot(self.factorSigma.T, self.factorSigma)
except ValueError:
print 'Numerical Instability. Stopping.'
break
if self._hasConverged():
print 'Premature convergence. Stopping.'
break
if self.verbose:
print 'G:', self.generation, 'Evals:', self.evalsDone, 'MaxG:', max(self.allFitnesses[-self.batchSize:])
self.allCenters.append(self.x.copy())
self.allFactorSigmas.append(self.factorSigma.copy())
self.generation += 1
def _learnStep(self):
""" Online learning. """
# produce one sample and evaluate
self._produceNewSample()
if len(self.allSamples) <= self.batchSize:
return
# shape the fitnesses of the last samples
shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:])
# update parameters
update = self._calcOnlineUpdate(shapedFits)
self.x += self.learningRate * update[:self.xdim]
self.factorSigma += self.learningRateSigma * reshape(update[self.xdim:], (self.xdim, self.xdim))
self.sigma = dot(self.factorSigma.T, self.factorSigma)
if len(self.allSamples) % self.batchSize == 0:
self.generation += 1
print self.generation, len(self.allSamples), max(self.allFitnesses[-self.batchSize:])
def _calcBatchUpdate(self, fitnesses):
gradient = self._calcVanillaBatchGradient(self.allSamples[-self.batchSize:], fitnesses)
if self.momentum != None:
self.momentumVector *= self.momentum
self.momentumVector += gradient
return self.momentumVector
else:
return gradient
def _calcOnlineUpdate(self, fitnesses):
gradient = self._calcVanillaOnlineGradient(self.allSamples[-1], fitnesses[-self.batchSize:])
if self.momentum != None:
self.momentumVector *= self.momentum
self.momentumVector += gradient
return self.momentumVector
else:
return gradient
def _logDerivX(self, sample, x, invSigma):
return dot(invSigma, (sample - x))
def _logDerivsX(self, samples, x, invSigma):
samplesArray = array(samples)
tmpX = multiply(x, ones((len(samplesArray), self.xdim)))
return dot(invSigma, (samplesArray - tmpX).T).T
def _logDerivFactorSigma(self, sample, x, invSigma, factorSigma):
logDerivSigma = 0.5 * dot(dot(invSigma, outer(sample-x, sample-x)), invSigma) - 0.5 * invSigma
if self.vanillaScale:
logDerivSigma = multiply(outer(diag(abs(self.factorSigma)), diag(abs(self.factorSigma))), logDerivSigma)
return triu2flat(dot(factorSigma, (logDerivSigma+logDerivSigma.T)))
def _logDerivsFactorSigma(self, samples, x, invSigma, factorSigma):
return [self._logDerivFactorSigma(sample, x, invSigma, factorSigma) for sample in samples]
def _calcVanillaBatchGradient(self, samples, shapedfitnesses):
invSigma = inv(self.sigma)
phi = zeros((len(samples), self.numParams))
phi[:, :self.xdim] = self._logDerivsX(samples, self.x, invSigma)
logDerivFactorSigma = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma)
phi[:, self.xdim:] = array(logDerivFactorSigma)
Rmat = outer(shapedfitnesses, ones(self.numParams))
# optimal baseline
self.phiSquareWindow = multiply(phi, phi)
baselineMatrix = self._calcBaseline(shapedfitnesses)
gradient = sum(multiply(phi, (Rmat - baselineMatrix)), 0)
return gradient
def _calcVanillaOnlineGradient(self, sample, shapedfitnesses):
invSigma = inv(self.sigma)
phi = zeros(self.numParams)
phi[:self.xdim] = self._logDerivX(sample, self.x, invSigma)
logDerivSigma = self._logDerivFactorSigma(sample, self.x, invSigma, self.factorSigma)
phi[self.xdim:] = logDerivSigma.flatten()
index = len(self.allSamples) % self.batchSize
self.phiSquareWindow[index] = multiply(phi, phi)
baseline = self._calcBaseline(shapedfitnesses)
gradient = multiply((ones(self.numParams)*shapedfitnesses[-1] - baseline), phi)
return gradient
def _calcBaseline(self, shapedfitnesses):
paramWeightings = dot(ones(self.batchSize), self.phiSquareWindow)
baseline = dot(shapedfitnesses, self.phiSquareWindow) / paramWeightings
return baseline
def _hasConverged(self):
""" When the largest eigenvalue is smaller than 10e-20, we assume the
algorithms has converged. """
eigs = abs(diag(self.factorSigma))
return min(eigs) < 1e-10
def _revertToSafety(self):
""" When encountering a bad matrix, this is how we revert to a safe one. """
self.factorSigma = eye(self.xdim)
self.x = self.bestEvaluable
self.allFactorSigmas[-1][:] = self.factorSigma
self.sigma = dot(self.factorSigma.T, self.factorSigma)
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/rl/learners/blackboxoptimizers/ves.py",
"copies": "1",
"size": "11775",
"license": "bsd-3-clause",
"hash": -8266704419852137000,
"line_mean": 39.4639175258,
"line_max": 120,
"alpha_frac": 0.5894692144,
"autogenerated": false,
"ratio": 3.946045576407507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035514790807507,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from pybrain.utilities import abstractMethod, Named
from pybrain.structure.moduleslice import ModuleSlice
class Connection(Named):
""" A connection links 2 modules, more precisely: the output of the first module
to the input of the second. It can potentially transform the information on the way.
It also transmits errors backwards between the same modules. """
inmod = None
outmod = None
paramdim = 0
def __init__(self, inmod, outmod, name = None,
inSliceFrom = 0, inSliceTo = None, outSliceFrom = 0, outSliceTo = None):
""" Every connection requires an input and an output module. Optionally, it is possible to define slices on the buffers.
:arg inmod: input module
:arg outmod: output module
:key inSliceFrom: starting index on the buffer of inmod (default = 0)
:key inSliceTo: ending index on the buffer of inmod (default = last)
:key outSliceFrom: starting index on the buffer of outmod (default = 0)
:key outSliceTo: ending index on the buffer of outmod (default = last)
"""
self._name = name
self.inSliceFrom = inSliceFrom
self.outSliceFrom = outSliceFrom
if inSliceTo is not None:
self.inSliceTo = inSliceTo
else:
self.inSliceTo = inmod.outdim
if outSliceTo is not None:
self.outSliceTo = outSliceTo
else:
self.outSliceTo = outmod.indim
if isinstance(inmod, ModuleSlice):
self.inmod = inmod.base
self.inSliceFrom += inmod.outOffset
self.inSliceTo += inmod.outOffset
else:
self.inmod = inmod
if isinstance(outmod, ModuleSlice):
self.outmod = outmod.base
self.outSliceFrom += outmod.inOffset
self.outSliceTo += outmod.inOffset
else:
self.outmod = outmod
self.indim = self.inSliceTo - self.inSliceFrom
self.outdim = self.outSliceTo - self.outSliceFrom
# arguments for for xml
self.setArgs(inmod = self.inmod, outmod = self.outmod)
if self.inSliceFrom > 0:
self.setArgs(inSliceFrom = self.inSliceFrom)
if self.outSliceFrom > 0:
self.setArgs(outSliceFrom = self.outSliceFrom)
if self.inSliceTo < self.inmod.outdim:
self.setArgs(inSliceTo = self.inSliceTo)
if self.outSliceTo < self.outmod.indim:
self.setArgs(outSliceTo = self.outSliceTo)
def forward(self, inmodOffset=0, outmodOffset=0):
"""Propagate the information from the incoming module's output buffer,
adding it to the outgoing node's input buffer, and possibly transforming
it on the way.
For this transformation use inmodOffset as an offset for the inmod and
outmodOffset as an offset for the outmodules offset."""
self._forwardImplementation(
self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo],
self.outmod.inputbuffer[outmodOffset, self.outSliceFrom:self.outSliceTo])
def backward(self, inmodOffset=0, outmodOffset=0):
"""Propagate the error found at the outgoing module, adding it to the
incoming module's output-error buffer and doing the inverse
transformation of forward propagation.
For this transformation use inmodOffset as an offset for the inmod and
outmodOffset as an offset for the outmodules offset.
If appropriate, also compute the parameter derivatives. """
self._backwardImplementation(
self.outmod.inputerror[outmodOffset, self.outSliceFrom:self.outSliceTo],
self.inmod.outputerror[inmodOffset, self.inSliceFrom:self.inSliceTo],
self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo])
def _forwardImplementation(self, inbuf, outbuf):
abstractMethod()
def _backwardImplementation(self, outerr, inerr, inbuf):
abstractMethod()
def __repr__(self):
"""A simple representation (this should probably be expanded by
subclasses). """
params = {
'class': self.__class__.__name__,
'name': self.name,
'inmod': self.inmod.name,
'outmod': self.outmod.name
}
return "<%(class)s '%(name)s': '%(inmod)s' -> '%(outmod)s'>" % params | {
"repo_name": "cmorgan/pybrain",
"path": "pybrain/structure/connections/connection.py",
"copies": "31",
"size": "4459",
"license": "bsd-3-clause",
"hash": -1020667426499859800,
"line_mean": 39.5454545455,
"line_max": 128,
"alpha_frac": 0.642296479,
"autogenerated": false,
"ratio": 4.024368231046932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from pybrain.utilities import abstractMethod, Named
from pybrain.structure.moduleslice import ModuleSlice
class Connection(Named):
""" a connection links 2 modules, more precisely: the output of the first module
to the input of the second. It can potentially transform the information on the way.
It also transmits errors backwards between the same modules. """
inmod = None
outmod = None
paramdim = 0
def __init__(self, inmod, outmod, name = None,
inSliceFrom = 0, inSliceTo = None, outSliceFrom = 0, outSliceTo = None):
""" every connection requires an input and an output module. Optionally, it is possible to define slices on the buffers.
@param inmod: input module
@param outmod: output module
@param inSliceFrom: starting index on the buffer of inmod (default = 0)
@param inSliceTo: ending index on the buffer of inmod (default = last)
@param outSliceFrom: starting index on the buffer of outmod (default = 0)
@param outSliceTo: ending index on the buffer of outmod (default = last)
"""
self._name = name
self.inSliceFrom = inSliceFrom
self.outSliceFrom = outSliceFrom
if inSliceTo:
self.inSliceTo = inSliceTo
else:
self.inSliceTo = inmod.outdim
if outSliceTo:
self.outSliceTo = outSliceTo
else:
self.outSliceTo = outmod.indim
if isinstance(inmod, ModuleSlice):
self.inmod = inmod.base
self.inSliceFrom += inmod.outOffset
self.inSliceTo += inmod.outOffset
else:
self.inmod = inmod
if isinstance(outmod, ModuleSlice):
self.outmod = outmod.base
self.outSliceFrom += outmod.inOffset
self.outSliceTo += outmod.inOffset
else:
self.outmod = outmod
self.indim = self.inSliceTo - self.inSliceFrom
self.outdim = self.outSliceTo - self.outSliceFrom
# arguments for for xml
self.setArgs(inmod = self.inmod, outmod = self.outmod)
if self.inSliceFrom > 0:
self.setArgs(inSliceFrom = self.inSliceFrom)
if self.outSliceFrom > 0:
self.setArgs(outSliceFrom = self.outSliceFrom)
if self.inSliceTo < self.inmod.outdim:
self.setArgs(inSliceTo = self.inSliceTo)
if self.outSliceTo < self.outmod.indim:
self.setArgs(outSliceTo = self.outSliceTo)
def forward(self, inmodOffset=0, outmodOffset=0):
"""Propagate the information from the incoming module's output buffer,
adding it to the outgoing node's input buffer, and possibly transforming
it on the way.
For this transformation use inmodOffset as an offset for the inmod and
outmodOffset as an offset for the outmodules offset."""
self._forwardImplementation(
self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo],
self.outmod.inputbuffer[outmodOffset, self.outSliceFrom:self.outSliceTo])
def backward(self, inmodOffset=0, outmodOffset=0):
"""Propagate the error found at the outgoing module, adding it to the
incoming module's output-error buffer and doing the inverse
transformation of forward propagation.
For this transformation use inmodOffset as an offset for the inmod and
outmodOffset as an offset for the outmodules offset.
If appropriate, also compute the parameter derivatives. """
self._backwardImplementation(
self.outmod.inputerror[outmodOffset, self.outSliceFrom:self.outSliceTo],
self.inmod.outputerror[inmodOffset, self.inSliceFrom:self.inSliceTo],
self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo])
def _forwardImplementation(self, inbuf, outbuf):
abstractMethod()
def _backwardImplementation(self, outerr, inerr, inbuf):
abstractMethod()
def __repr__(self):
"""A simple representation (this should probably be expanded by
subclasses). """
params = {
'class': self.__class__.__name__,
'name': self.name,
'inmod': self.inmod.name,
'outmod': self.outmod.name
}
return "<%(class)s '%(name)s': '%(inmod)s' -> '%(outmod)s'>" % params | {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/structure/connections/connection.py",
"copies": "1",
"size": "4555",
"license": "bsd-3-clause",
"hash": 4645916295458093000,
"line_mean": 40.7981651376,
"line_max": 128,
"alpha_frac": 0.627442371,
"autogenerated": false,
"ratio": 4.118444846292948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02099885028328187,
"num_lines": 109
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import append, zeros
from pybrain.utilities import abstractMethod, Named
class Module(Named):
"""A module has an input and an output buffer and does some processing
to produce the output from the input -- the "forward" method.
Optionally it can have a "backward" method too, which processes a given
output error to derive the input error.
Input, output and errors are (flat) scipy arrays.
A module memorizes the buffers for all input-output pairs it encounters
until .reset() is called."""
# Flag that marks modules that treat a sequence of samples not as
# independent.
sequential = False
# Flag which at the same time provides info on how many trainable parameters
# the module might contain.
paramdim = 0
# An offset that is added upon any array access. Useful for implementing
# things like time.
offset = 0
bufferlist = None
def __init__(self, indim, outdim, name=None, **args):
"""Create a Module with an input dimension of indim and an output
dimension of outdim."""
self.setArgs(name=name, **args)
# Make sure that it does not matter wether Module.__init__ is called
# before or after adding elements to bufferlist in subclasses.
# TODO: it should be possible to use less than these buffers. For some
# methods, an error is not completely necessary. (e.g. evolution)
self.bufferlist = [] if not self.bufferlist else self.bufferlist
self.bufferlist += [('inputbuffer', indim),
('inputerror', indim),
('outputbuffer', outdim),
('outputerror', outdim), ]
self.indim = indim
self.outdim = outdim
# Those buffers are 2D arrays (time, dim)
self._resetBuffers()
def _resetBuffers(self, length=1):
"""Reset buffers to a length (in time dimension) of 1."""
for buffername, dim in self.bufferlist:
setattr(self, buffername, zeros((length, dim)))
if length==1:
self.offset = 0
def _growBuffers(self):
"""Double the size of the modules buffers in its first dimension and
keep the current values."""
currentlength = getattr(self, self.bufferlist[0][0]).shape[0]
# Save the current buffers
tmp = [getattr(self, n) for n, _ in self.bufferlist]
Module._resetBuffers(self, currentlength * 2)
for previous, (buffername, _dim) in zip(tmp, self.bufferlist):
buffer_ = getattr(self, buffername)
buffer_[:currentlength] = previous
def forward(self):
"""Produce the output from the input."""
self._forwardImplementation(self.inputbuffer[self.offset],
self.outputbuffer[self.offset])
def backward(self):
"""Produce the input error from the output error."""
self._backwardImplementation(self.outputerror[self.offset],
self.inputerror[self.offset],
self.outputbuffer[self.offset],
self.inputbuffer[self.offset])
def reset(self):
"""Set all buffers, past and present, to zero."""
self.offset = 0
for buffername, l in self.bufferlist:
buf = getattr(self, buffername)
buf[:] = zeros(l)
def shift(self, items):
"""Shift all buffers up or down a defined number of items on offset axis.
Negative values indicate backward shift."""
if items == 0:
return
self.offset += items
for buffername, _ in self.bufferlist:
buf = getattr(self, buffername)
assert abs(items) <= len(buf), "Cannot shift further than length of buffer."
fill = zeros((abs(items), len(buf[0])))
if items < 0:
buf[:] = append(buf[-items:], fill, 0)
else:
buf[:] = append(fill ,buf[0:-items] , 0)
def activateOnDataset(self, dataset):
"""Run the module's forward pass on the given dataset unconditionally
and return the output."""
dataset.reset()
self.reset()
out = zeros((len(dataset), self.outdim))
for i, sample in enumerate(dataset):
# FIXME: Can we always assume that sample[0] is the input data?
out[i, :] = self.activate(sample[0])
self.reset()
dataset.reset()
return out
def activate(self, inpt):
"""Do one transformation of an input and return the result."""
assert len(self.inputbuffer[self.offset]) == len(inpt), str((len(self.inputbuffer[self.offset]), len(inpt)))
self.inputbuffer[self.offset] = inpt
self.forward()
return self.outputbuffer[self.offset].copy()
def backActivate(self, outerr):
"""Do one transformation of an output error outerr backward and return
the error on the input."""
self.outputerror[self.offset] = outerr
self.backward()
return self.inputerror[self.offset].copy()
def _forwardImplementation(self, inbuf, outbuf):
"""Actual forward transformation function. To be overwritten in
subclasses."""
abstractMethod()
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
"""Converse of the module's transformation function. Can be overwritten
in subclasses, does not have to.
Should also compute the derivatives of the parameters."""
| {
"repo_name": "hassaanm/stock-trading",
"path": "src/pybrain/structure/modules/module.py",
"copies": "3",
"size": "5625",
"license": "apache-2.0",
"hash": -769360206077423600,
"line_mean": 38.6126760563,
"line_max": 116,
"alpha_frac": 0.6083555556,
"autogenerated": false,
"ratio": 4.39453125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001302556904634008,
"num_lines": 142
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import dot, argmax
from random import shuffle
from trainer import Trainer
from pybrain.utilities import fListToString
from pybrain.auxiliary import GradientDescent
class BackpropTrainer(Trainer):
"""Trainer that trains the parameters of a module according to a
supervised dataset (potentially sequential) by backpropagating the errors
(through time)."""
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
momentum=0., verbose=False, batchlearning=False,
weightdecay=0.):
"""Create a BackpropTrainer to train the specified `module` on the
specified `dataset`.
The learning rate gives the ratio of which parameters are changed into
the direction of the gradient. The learning rate decreases by `lrdecay`,
which is used to to multiply the learning rate after each training
step. The parameters are also adjusted with respect to `momentum`, which
is the ratio by which the gradient of the last timestep is used.
If `batchlearning` is set, the parameters are updated only at the end of
each epoch. Default is False.
`weightdecay` corresponds to the weightdecay rate, where 0 is no weight
decay at all.
"""
Trainer.__init__(self, module)
self.setData(dataset)
self.verbose = verbose
self.batchlearning = batchlearning
self.weightdecay = weightdecay
self.epoch = 0
self.totalepochs = 0
# set up gradient descender
self.descent = GradientDescent()
self.descent.alpha = learningrate
self.descent.momentum = momentum
self.descent.alphadecay = lrdecay
self.descent.init(module.params)
def train(self):
"""Train the associated module for one epoch."""
assert len(self.ds) > 0, "Dataset cannot be empty."
self.module.resetDerivatives()
errors = 0
ponderation = 0.
shuffledSequences = []
for seq in self.ds._provideSequences():
shuffledSequences.append(seq)
shuffle(shuffledSequences)
for seq in shuffledSequences:
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
if not self.batchlearning:
gradient = self.module.derivs - self.weightdecay * self.module.params
new = self.descent(gradient, errors)
if new is not None:
self.module.params[:] = new
self.module.resetDerivatives()
if self.verbose:
print "Total error:", errors / ponderation
if self.batchlearning:
self.module._setParameters(self.descent(self.module.derivs))
self.epoch += 1
self.totalepochs += 1
return errors / ponderation
def _calcDerivs(self, seq):
"""Calculate error function and backpropagate output errors to yield
the gradient."""
self.module.reset()
for sample in seq:
self.module.activate(sample[0])
error = 0
ponderation = 0.
for offset, sample in reversed(list(enumerate(seq))):
# need to make a distinction here between datasets containing
# importance, and others
target = sample[1]
outerr = target - self.module.outputbuffer[offset]
if len(sample) > 2:
importance = sample[2]
error += 0.5 * dot(importance, outerr ** 2)
ponderation += sum(importance)
self.module.backActivate(outerr * importance)
else:
error += 0.5 * sum(outerr ** 2)
ponderation += len(target)
# FIXME: the next line keeps arac from producing NaNs. I don't
# know why that is, but somehow the __str__ method of the
# ndarray class fixes something,
str(outerr)
self.module.backActivate(outerr)
return error, ponderation
def _checkGradient(self, dataset=None, silent=False):
"""Numeric check of the computed gradient for debugging purposes."""
if dataset:
self.setData(dataset)
res = []
for seq in self.ds._provideSequences():
self.module.resetDerivatives()
self._calcDerivs(seq)
e = 1e-6
analyticalDerivs = self.module.derivs.copy()
numericalDerivs = []
for p in range(self.module.paramdim):
storedoldval = self.module.params[p]
self.module.params[p] += e
righterror, dummy = self._calcDerivs(seq)
self.module.params[p] -= 2 * e
lefterror, dummy = self._calcDerivs(seq)
approxderiv = (righterror - lefterror) / (2 * e)
self.module.params[p] = storedoldval
numericalDerivs.append(approxderiv)
r = zip(analyticalDerivs, numericalDerivs)
res.append(r)
if not silent:
print r
return res
def testOnData(self, dataset=None, verbose=False):
"""Compute the MSE of the module performance on the given dataset.
If no dataset is supplied, the one passed upon Trainer initialization is
used."""
if dataset == None:
dataset = self.ds
dataset.reset()
if verbose:
print '\nTesting on data:'
errors = []
importances = []
ponderatedErrors = []
for seq in dataset._provideSequences():
self.module.reset()
e, i = dataset._evaluateSequence(self.module.activate, seq, verbose)
importances.append(i)
errors.append(e)
ponderatedErrors.append(e / i)
if verbose:
print 'All errors:', ponderatedErrors
assert sum(importances) > 0
avgErr = sum(errors) / sum(importances)
if verbose:
print 'Average error:', avgErr
print ('Max error:', max(ponderatedErrors), 'Median error:',
sorted(ponderatedErrors)[len(errors) / 2])
return avgErr
def testOnClassData(self, dataset=None, verbose=False,
return_targets=False):
"""Return winner-takes-all classification output on a given dataset.
If no dataset is given, the dataset passed during Trainer
initialization is used. If return_targets is set, also return
corresponding target classes.
"""
if dataset == None:
dataset = self.ds
dataset.reset()
out = []
targ = []
for seq in dataset._provideSequences():
self.module.reset()
for input, target in seq:
res = self.module.activate(input)
out.append(argmax(res))
targ.append(argmax(target))
if return_targets:
return out, targ
else:
return out
def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
continueEpochs=10, validationProportion=0.25):
"""Train the module on the dataset until it converges.
Return the module with the parameters that gave the minimal validation
error.
If no dataset is given, the dataset passed during Trainer
initialization is used. validationProportion is the ratio of the dataset
that is used for the validation dataset.
If maxEpochs is given, at most that many epochs
are trained. Each time validation error hits a minimum, try for
continueEpochs epochs to find a better one."""
epochs = 0
if dataset == None:
dataset = self.ds
if verbose == None:
verbose = self.verbose
# Split the dataset randomly: validationProportion of the samples for
# validation.
trainingData, validationData = (
dataset.splitWithProportion(1 - validationProportion))
if not (len(trainingData) > 0 and len(validationData)):
raise ValueError("Provided dataset too small to be split into training " +
"and validation sets with proportion " + str(validationProportion))
self.ds = trainingData
bestweights = self.module.params.copy()
bestverr = self.testOnData(validationData)
trainingErrors = []
validationErrors = [bestverr]
while True:
trainingErrors.append(self.train())
validationErrors.append(self.testOnData(validationData))
if epochs == 0 or validationErrors[-1] < bestverr:
# one update is always done
bestverr = validationErrors[-1]
bestweights = self.module.params.copy()
if maxEpochs != None and epochs >= maxEpochs:
self.module.params[:] = bestweights
break
epochs += 1
if len(validationErrors) >= continueEpochs * 2:
# have the validation errors started going up again?
# compare the average of the last few to the previous few
old = validationErrors[-continueEpochs * 2:-continueEpochs]
new = validationErrors[-continueEpochs:]
if min(new) > max(old):
self.module.params[:] = bestweights
break
trainingErrors.append(self.testOnData(trainingData))
self.ds = dataset
if verbose:
print 'train-errors:', fListToString(trainingErrors, 6)
print 'valid-errors:', fListToString(validationErrors, 6)
return trainingErrors, validationErrors
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/pybrain/supervised/trainers/backprop.py",
"copies": "1",
"size": "10063",
"license": "mit",
"hash": 6585253547639681000,
"line_mean": 40.4115226337,
"line_max": 96,
"alpha_frac": 0.5789525986,
"autogenerated": false,
"ratio": 4.590784671532846,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5669737270132846,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import dot, rand, ones, eye, zeros, outer, isnan, multiply, argmax, product, log
from numpy.random import normal, multivariate_normal
from numpy import sort
from scipy.stats import norm
from copy import deepcopy
from pybrain.utilities import drawIndex, fListToString
from pybrain.tools.functions import multivariateNormalPdf
from pybrain.tools.rankingfunctions import TopLinearRanking
from pybrain.optimization.distributionbased.distributionbased import DistributionBasedOptimizer
class FEM(DistributionBasedOptimizer):
""" Fitness Expectation-Maximization (PPSN 2008).
"""
# fundamental parameters
numberOfCenters = 1
diagonalOnly = False
forgetFactor = 0.1
muMultiplier = 1.
windowSize = 50
adaptiveShaping = False
shapingFunction = TopLinearRanking(topFraction=0.5)
minimumCenterWeight = 0.01
# advanced improvement parameters
# elitism: always keep best mu in distribution
elitism = False
# sampleElitism: every $windowSize samples, produce best sample ever
sampleElitism = False
oneFifthRule = False
useAnticipatedMeanShift = False
# rank-mu update, presumably
doMadnessUpdate = False
mutative = False
# initialization parameters
rangemins = None
rangemaxs = None
initCovariances = None
def _additionalInit(self):
assert self.numberOfCenters == 1, 'Mixtures of Gaussians not supported yet.'
xdim = self.numParameters
self.alphas = ones(self.numberOfCenters) / float(self.numberOfCenters)
self.mus = []
self.sigmas = []
if self.rangemins == None:
self.rangemins = -ones(xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(xdim)
if self.initCovariances == None:
if self.diagonalOnly:
self.initCovariances = ones(xdim)
else:
self.initCovariances = eye(xdim)
for _ in range(self.numberOfCenters):
self.mus.append(rand(xdim) * (self.rangemaxs - self.rangemins) + self.rangemins)
self.sigmas.append(dot(eye(xdim), self.initCovariances))
self.samples = range(self.windowSize)
self.fitnesses = zeros(self.windowSize)
self.generation = 0
self.allsamples = []
self.muevals = []
self.allmus = []
self.allsigmas = []
self.allalphas = []
self.allUpdateSizes = []
self.allfitnesses = []
self.meanShifts = [zeros((self.numParameters)) for _ in range(self.numberOfCenters)]
self._oneEvaluation(self._initEvaluable)
def _produceNewSample(self):
""" returns a new sample, its fitness and its densities """
chosenOne = drawIndex(self.alphas, True)
mu = self.mus[chosenOne]
if self.useAnticipatedMeanShift:
if len(self.allsamples) % 2 == 1 and len(self.allsamples) > 1:
if not(self.elitism and chosenOne == self.bestChosenCenter):
mu += self.meanShifts[chosenOne]
if self.diagonalOnly:
sample = normal(mu, self.sigmas[chosenOne])
else:
sample = multivariate_normal(mu, self.sigmas[chosenOne])
if self.sampleElitism and len(self.allsamples) > self.windowSize and len(self.allsamples) % self.windowSize == 0:
sample = self.bestEvaluable.copy()
fit = self._oneEvaluation(sample)
if ((not self.minimize and fit >= self.bestEvaluation)
or (self.minimize and fit <= self.bestEvaluation)
or len(self.allsamples) == 0):
# used to determine which center produced the current best
self.bestChosenCenter = chosenOne
self.bestSigma = self.sigmas[chosenOne].copy()
if self.minimize:
fit = -fit
self.allfitnesses.append(fit)
self.allsamples.append(sample)
return sample, fit
def _computeDensities(self, sample):
""" compute densities, and normalize """
densities = zeros(self.numberOfCenters)
for c in range(self.numberOfCenters):
if self.diagonalOnly:
pdf = product([norm.pdf(x, self.mus[c][i], self.sigmas[c][i]) for i, x in enumerate(sample)])
else:
pdf = multivariateNormalPdf(sample, self.mus[c], self.sigmas[c])
if pdf > 1e40:
pdf = 1e40
elif pdf < 1e-40:
pdf = 1e-40
if isnan(pdf):
print 'NaN!'
pdf = 0.
densities[c] = self.alphas[c] * pdf
densities /= sum(densities)
return densities
def _computeUpdateSize(self, densities, sampleIndex):
""" compute the the center-update-size for each sample
using transformed fitnesses """
# determine (transformed) fitnesses
transformedfitnesses = self.shapingFunction(self.fitnesses)
# force renormaliziation
transformedfitnesses /= max(transformedfitnesses)
updateSize = transformedfitnesses[sampleIndex] * densities
return updateSize * self.forgetFactor
def _updateMus(self, updateSize, lastSample):
for c in range(self.numberOfCenters):
oldmu = self.mus[c]
self.mus[c] *= 1. - self.muMultiplier * updateSize[c]
self.mus[c] += self.muMultiplier * updateSize[c] * lastSample
# don't update with the ones that were produced with a mean shift
if ((self.useAnticipatedMeanShift and len(self.allsamples) % self.windowSize == 1)
or (not self.useAnticipatedMeanShift and self.numberOfCenters > 1)):
self.meanShifts[c] *= 1. - self.forgetFactor
self.meanShifts[c] += self.mus[c] - oldmu
if self.doMadnessUpdate and len(self.allsamples) > 2 * self.windowSize:
self.mus[c] = zeros(self.numParameters)
updateSum = 0.
for i in range(self.windowSize):
self.mus[c] += self.allsamples[-i - 1] * self.allUpdateSizes[-i - 1][c]
updateSum += self.allUpdateSizes[-i - 1][c]
self.mus[c] /= updateSum
if self.elitism:
# dirty hack! TODO: koshify
self.mus[0] = self.bestEvaluable.copy()
def _updateSigmas(self, updateSize, lastSample):
for c in range(self.numberOfCenters):
self.sigmas[c] *= (1. - updateSize[c])
dif = self.mus[c] - lastSample
if self.diagonalOnly:
self.sigmas[c] += updateSize[c] * multiply(dif, dif)
else:
self.sigmas[c] += updateSize[c] * 1.2 * outer(dif, dif)
def _updateAlphas(self, updateSize):
for c in range(self.numberOfCenters):
x = updateSize[c]
x /= sum(updateSize)
self.alphas[c] = (1.0 - self.forgetFactor) * self.alphas[c] + self.forgetFactor * x
self.alphas /= sum(self.alphas)
for c in range(self.numberOfCenters):
if self.alphas[c] < self.minimumCenterWeight:
# center-splitting
if self.verbose:
print 'Split!'
bestCenter = argmax(self.alphas)
totalWeight = self.alphas[c] + self.alphas[bestCenter]
self.alphas[c] = totalWeight / 2
self.alphas[bestCenter] = totalWeight / 2
self.mus[c] = self.mus[bestCenter].copy()
self.sigmas[c] = 4.0 * self.sigmas[bestCenter].copy()
self.sigmas[bestCenter] *= 0.25
break
def _updateShaping(self):
""" Daan: "This won't work. I like it!" """
assert self.numberOfCenters == 1
possible = self.shapingFunction.getPossibleParameters(self.windowSize)
matchValues = []
pdfs = [multivariateNormalPdf(s, self.mus[0], self.sigmas[0])
for s in self.samples]
for p in possible:
self.shapingFunction.setParameter(p)
transformedFitnesses = self.shapingFunction(self.fitnesses)
#transformedFitnesses /= sum(transformedFitnesses)
sumValue = sum([x * log(y) for x, y in zip(pdfs, transformedFitnesses) if y > 0])
normalization = sum([x * y for x, y in zip(pdfs, transformedFitnesses) if y > 0])
matchValues.append(sumValue / normalization)
self.shapingFunction.setParameter(possible[argmax(matchValues)])
if len(self.allsamples) % 100 == 0:
print possible[argmax(matchValues)]
print fListToString(matchValues, 3)
def _learnStep(self):
k = len(self.allsamples) % self.windowSize
sample, fit = self._produceNewSample()
self.samples[k], self.fitnesses[k] = sample, fit
self.generation += 1
if len(self.allsamples) < self.windowSize:
return
if self.verbose and len(self.allsamples) % 100 == 0:
print len(self.allsamples), min(self.fitnesses), max(self.fitnesses)#, self.alphas
updateSize = self._computeUpdateSize(self._computeDensities(sample), k)
self.allUpdateSizes.append(deepcopy(updateSize))
if sum(updateSize) > 0:
# update parameters
if self.numberOfCenters > 1:
self._updateAlphas(updateSize)
if not self.mutative:
self._updateMus(updateSize, sample)
self._updateSigmas(updateSize, sample)
else:
self._updateSigmas(updateSize, sample)
self._updateMus(updateSize, sample)
if self.adaptiveShaping:
self._updateShaping()
# storage, e.g. for plotting
self.allalphas.append(deepcopy(self.alphas))
self.allsigmas.append(deepcopy(self.sigmas))
self.allmus.append(deepcopy(self.mus))
if self.oneFifthRule and len(self.allsamples) % 10 == 0 and len(self.allsamples) > 2 * self.windowSize:
lastBatch = self.allfitnesses[-self.windowSize:]
secondLast = self.allfitnesses[-2 * self.windowSize:-self.windowSize]
sortedLast = sort(lastBatch)
sortedSecond = sort(secondLast)
index = int(self.windowSize * 0.8)
if sortedLast[index] >= sortedSecond[index]:
self.sigmas = [1.2 * sigma for sigma in self.sigmas]
#print "+"
else:
self.sigmas = [0.5 * sigma for sigma in self.sigmas]
#print "-"
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/pybrain/optimization/distributionbased/fem.py",
"copies": "1",
"size": "10990",
"license": "mit",
"hash": -3695932602404504600,
"line_mean": 40.0074626866,
"line_max": 125,
"alpha_frac": 0.5818926297,
"autogenerated": false,
"ratio": 3.916607270135424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9894924617392757,
"avg_score": 0.02071505648853349,
"num_lines": 268
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import dot, rand, ones, eye, zeros, outer, isnan, multiply
from numpy.random import multivariate_normal
from numpy import average
from pybrain.utilities import drawIndex
from blackboxoptimizer import BlackBoxOptimizer
from pybrain.tools.rankingfunctions import RankingFunction
from pybrain.tools.functions import multivariateNormalPdf, multivariateCauchy
class FEM(BlackBoxOptimizer):
""" Fitness expectation-maximization"""
batchsize = 50 #a.k.a: lambda
numberOfCenters = 1 #a.k.a: k
rangemins = None
rangemaxs = None
initCovariances = None
onlineLearning = True
forgetFactor = 0.5
elitist = False
evalMus = True
rankingFunction = RankingFunction()
useCauchy = False
# TODO: interface changed: make coherent
online = False
def __init__(self, evaluator, evaluable, **parameters):
BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
self.alphas = ones(self.numberOfCenters)/self.numberOfCenters
self.mus = []
self.sigmas = []
self.tau = 1.
if self.rangemins == None:
self.rangemins = -ones(self.xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(self.xdim)
if self.initCovariances == None:
self.initCovariances = eye(self.xdim)
if self.elitist and self.numberOfCenters == 1 and not self.noisyEvaluator:
# in the elitist case seperate evaluations are not necessary.
# CHECKME: maybe in the noisy case?
self.evalMus = False
assert not(self.useCauchy and self.numberOfCenters > 1)
for dummy in range(self.numberOfCenters):
self.mus.append(rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins)
self.sigmas.append(dot(eye(self.xdim), self.initCovariances))
self.reset()
def reset(self):
self.samples = range(self.batchsize)
self.densities = zeros((self.batchsize, self.numberOfCenters))
self.fitnesses = zeros(self.batchsize)
self.generation = 0
self.allsamples = []
self.muevals = []
self.allmus = []
self.allsigmas =[]
def _stoppingCriterion(self):
if self.evalMus:
evals = len(self.allsamples)+len(self.muevals)
else:
evals = len(self.allsamples)
return (self.bestEvaluation >= self.desiredEvaluation or evals >= self.maxEvaluations)
def _batchLearn(self, maxSteps):
if self.verbose:
print
print "==================="
print "Fitness Expectation Maximization"
print "==================="
if self.onlineLearning:
print "ONLINE"
print "Forget-factor:", self.forgetFactor
else:
print 'OFFLINE'
print 'Distribution:',
if self.useCauchy:
print 'Cauchy'
else:
print 'Gaussian'
print "Batch-size:", self.batchsize
print "Elitist:", self.elitist
print 'Ranking function:', self.rankingFunction.name
if self.numberOfCenters > 1:
print "Number of centers:", self.numberOfCenters
print
# go through a number of generations
while not self._stoppingCriterion():
for k in range(self.batchsize):
self.samples[k], self.fitnesses[k], self.densities[k] = self._produceNewSample()
if self.onlineLearning and self.generation >= 1:
self._updateWeightings()
self._updateParameters(k)
if self._stoppingCriterion(): break
if not self.onlineLearning:
self._updateWeightings()
self._updateParameters()
#print diag(self.sigmas[0])
# evaluate the mu points seperately (for filtered progression values)
if self.evalMus:
for m in self.mus:
me = self.evaluator(m)
if me > self.bestEvaluation:
self.bestEvaluation, self.bestEvaluable = me, m
self.muevals.append(me)
import copy
self.allsigmas.append(copy.deepcopy(self.sigmas))
self.allmus.append(copy.deepcopy(self.mus))
else:
self.muevals.append(self.bestEvaluation)
if self.verbose:
print 'gen:', self.generation, 'max,min,avg:',max(self.fitnesses), min(self.fitnesses), average(self.fitnesses),
if self.evalMus: print ' mu-fitness(es):', self.muevals[-len(self.mus):]
else: print
self.generation += 1
self.notify()
def _produceNewSample(self):
""" returns a new sample, its fitness and its densities """
sample = self._generateSample()
fit = self.evaluator(sample)
if fit >= self.bestEvaluation:
self.bestEvaluation = fit
self.bestEvaluable = sample.copy()
self.allsamples.append(sample)
# compute densities, and normalize
densities = zeros(self.numberOfCenters)
if self.numberOfCenters > 1:
for c in range(self.numberOfCenters):
densities[c] = self.alphas[c] * multivariateNormalPdf(sample, self.mus[c], self.sigmas[c])
densities /= sum(densities)
return sample, fit, densities
def _generateSample(self):
""" generate a new sample from the current distribution. """
if self.useCauchy:
# Cauchy distribution
chosenOne = drawIndex(self.alphas, True)
return multivariateCauchy(self.mus[chosenOne], self.sigmas[chosenOne])
else:
# Normal distribution
chosenOne = drawIndex(self.alphas, True)
return multivariate_normal(self.mus[chosenOne], self.sigmas[chosenOne])
def _updateWeightings(self):
""" update the weightings using transformed fitnesses """
# determine (transformed) fitnesses
transformedfitnesses = self.rankingFunction(self.fitnesses)
# force renormaliziation
transformedfitnesses /= max(transformedfitnesses)
if self.numberOfCenters > 1:
self.weightings = multiply(outer(transformedfitnesses, ones(self.numberOfCenters)), self.densities)
else:
self.weightings = transformedfitnesses.reshape(self.batchsize, 1)
if self.onlineLearning:
for c in range(self.numberOfCenters):
self.weightings[:,c] /= max(self.weightings[:,c])
else:
#CHECKME: inconsistency?
self.weightings /= sum(self.weightings)
def _cauchyUpdate(self, weightings):
""" computation of parameter updates if the distribution is Cauchy """
# make sure the weightings sum to 1
weightings = weightings / sum(weightings)
newSigma = zeros((self.xdim, self.xdim))
newMu = zeros(self.xdim)
for d in range(self.xdim):
# build a list of tuples of (value, weight)
tuplist = zip(map(lambda s: s[d], self.samples), weightings)
tuplist.sort()
# determine the values at the 1/4 and 3/4 points of cumulative weighting
cum = 0
quart = None
for elem, w in tuplist:
cum += w
if cum >= 0.25 and not quart:
quart = elem
if cum >= 0.75:
threequart = elem
break
assert threequart != quart
newMu[d] = (quart + threequart)/2
newSigma[d,d] = threequart - newMu[d]
return newMu, newSigma
def _gaussianUpdate(self, weightings, indices, oldMu):
""" computation of parameter updates if the distribution is gaussian """
newMu = zeros(self.xdim)
newSigma = zeros((self.xdim, self.xdim))
for i in indices:
newMu += weightings[i] * self.samples[i]
# THIS LINE IS A HACK! REMOVE IT!
newMu = self.forgetFactor * oldMu + (1-self.forgetFactor) * newMu
for i in indices:
dif = self.samples[i]-newMu
newSigma += weightings[i] * outer(dif, dif)
return newMu, newSigma
def _updateParameters(self, sampleIndex = None):
for c in range(self.numberOfCenters):
weightings = self.weightings[:,c]
if self.onlineLearning:
lr = self.forgetFactor * weightings[sampleIndex]
self.alphas[c] = (1.0-lr) * self.alphas[c] + lr
else:
self.alphas[c] = sum(weightings)
# determine the updates to the parameters, depending on the distribution used
if self.useCauchy:
newMu, newSigma = self._cauchyUpdate(weightings)
else:
# gaussian case
if self.onlineLearning:
newMu, newSigma = self._gaussianUpdate(weightings, [sampleIndex], self.mus[c])
else:
newMu, newSigma = self._gaussianUpdate(weightings, range(self.batchsize), self.mus[c],)
# CHECKME: redundant!?
#newMu /= sum(weightings)
#newSigma /= sum(weightings)
# update the mus
if self.elitist:
self.mus[c] = self.bestEvaluable.copy()
else:
if self.onlineLearning:
# use the forget-factor
self.mus[c] = (1-lr) * self.mus[c] + lr * newMu
else:
self.mus[c] = newMu
# update the sigmas
if True in isnan(newSigma):
print "NaNs! We'll ignore them."
else:
if self.onlineLearning:
# use the forget-factor
self.sigmas[c] = (1-lr) * self.sigmas[c] + self.forgetFactor * newSigma
else:
self.sigmas[c] = newSigma
# nomalize alphas
self.alphas /= sum(self.alphas)
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/rl/learners/blackboxoptimizers/fem.py",
"copies": "1",
"size": "10725",
"license": "bsd-3-clause",
"hash": 7575903952360249000,
"line_mean": 39.4754716981,
"line_max": 128,
"alpha_frac": 0.5465734266,
"autogenerated": false,
"ratio": 4.232438831886346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016295022694090056,
"num_lines": 265
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import eye, multiply, ones, dot, array, outer, rand, zeros, diag, randn, exp
from scipy.linalg import cholesky, inv, det
from pybrain.optimization.distributionbased.distributionbased import DistributionBasedOptimizer
from pybrain.tools.rankingfunctions import TopLinearRanking
from pybrain.utilities import flat2triu, triu2flat
from pybrain.auxiliary import importanceMixing
class VanillaGradientEvolutionStrategies(DistributionBasedOptimizer):
""" Vanilla gradient-based evolution strategy. """
# mandatory parameters
online = False
learningRate = 0.01
learningRateSigma = None # default: the same than learningRate
initialFactorSigma = None # default: identity matrix
# NOT YET SUPPORTED:
diagonalOnly = False
batchSize = 100
momentum = None
elitism = False
shapingFunction = TopLinearRanking(topFraction=0.5)
# initialization parameters
rangemins = None
rangemaxs = None
initCovariances = None
vanillaScale = False
# use of importance sampling to get away with fewer samples:
importanceMixing = True
forcedRefresh = 0.01
mustMaximize = True
def _additionalInit(self):
xdim = self.numParameters
assert not self.diagonalOnly, 'Diagonal-only not yet supported'
self.numDistrParams = xdim + xdim * (xdim + 1) / 2
if self.momentum != None:
self.momentumVector = zeros(self.numDistrParams)
if self.learningRateSigma == None:
self.learningRateSigma = self.learningRate
if self.batchSize is None:
self.batchSize = 10 * self.numParameters
if self.rangemins == None:
self.rangemins = -ones(xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(xdim)
if self.initCovariances == None:
if self.diagonalOnly:
self.initCovariances = ones(xdim)
else:
self.initCovariances = eye(xdim)
self.x = rand(xdim) * (self.rangemaxs - self.rangemins) + self.rangemins
self.sigma = dot(eye(xdim), self.initCovariances)
self.factorSigma = cholesky(self.sigma)
# keeping track of history
self.allSamples = []
self.allFitnesses = []
self.allGenerated = [0]
self.allCenters = [self.x.copy()]
self.allFactorSigmas = [self.factorSigma.copy()]
# for baseline computation
self.phiSquareWindow = zeros((self.batchSize, self.numDistrParams))
if self.storeAllDistributions:
self._allDistributions = [(self.x.copy(), self.sigma.copy())]
def _produceNewSample(self, z=None):
if z == None:
p = randn(self.numParameters)
z = dot(self.factorSigma.T, p) + self.x
self.allSamples.append(z)
fit = self._oneEvaluation(z)
self.allFitnesses.append(fit)
return z, fit
def _produceSamples(self):
""" Append batchsize new samples and evaluate them. """
if self.numLearningSteps == 0 or not self.importanceMixing:
for _ in range(self.batchSize):
self._produceNewSample()
self.allGenerated.append(self.batchSize + self.allGenerated[-1])
# using new importance mixing code
else:
oldpoints = self.allSamples[-self.batchSize:]
oldDetFactorSigma = det(self.allFactorSigmas[-2])
newDetFactorSigma = det(self.factorSigma)
invA = inv(self.factorSigma)
offset = len(self.allSamples) - self.batchSize
oldInvA = inv(self.allFactorSigmas[-2])
oldX = self.allCenters[-2]
def oldpdf(s):
p = dot(oldInvA.T, (s- oldX))
return exp(-0.5 * dot(p, p)) / oldDetFactorSigma
def newpdf(s):
p = dot(invA.T, (s - self.x))
return exp(-0.5 * dot(p, p)) / newDetFactorSigma
def newSample():
p = randn(self.numParameters)
return dot(self.factorSigma.T, p) + self.x
reused, newpoints = importanceMixing(oldpoints, oldpdf, newpdf,
newSample, self.forcedRefresh)
self.allGenerated.append(self.allGenerated[-1]+len(newpoints))
for i in reused:
self.allSamples.append(self.allSamples[offset+i])
self.allFitnesses.append(self.allFitnesses[offset+i])
for s in newpoints:
self._produceNewSample(s)
def _learnStep(self):
if self.online:
self._onlineLearn()
else:
self._batchLearn()
def _batchLearn(self):
""" Batch learning. """
xdim = self.numParameters
# produce samples and evaluate them
try:
self._produceSamples()
# shape their fitnesses
shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:])
# update parameters (unbiased: divide by batchsize)
update = self._calcBatchUpdate(shapedFits)
if self.elitism:
self.x = self.bestEvaluable
else:
self.x += self.learningRate * update[:xdim]
self.factorSigma += self.learningRateSigma * flat2triu(update[xdim:], xdim)
self.sigma = dot(self.factorSigma.T, self.factorSigma)
except ValueError:
print 'Numerical Instability. Stopping.'
self.maxLearningSteps = self.numLearningSteps
if self._hasConverged():
print 'Premature convergence. Stopping.'
self.maxLearningSteps = self.numLearningSteps
if self.verbose:
print 'Evals:', self.numEvaluations,
self.allCenters.append(self.x.copy())
self.allFactorSigmas.append(self.factorSigma.copy())
if self.storeAllDistributions:
self._allDistributions.append((self.x.copy(), self.sigma.copy()))
def _onlineLearn(self):
""" Online learning. """
# produce one sample and evaluate
xdim = self.numParameters
self._produceNewSample()
if len(self.allSamples) <= self.batchSize:
return
# shape the fitnesses of the last samples
shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:])
# update parameters
update = self._calcOnlineUpdate(shapedFits)
self.x += self.learningRate * update[:xdim]
self.factorSigma += self.learningRateSigma * flat2triu(update[xdim:], xdim)
self.sigma = dot(self.factorSigma.T, self.factorSigma)
if self.storeAllDistributions:
self._allDistributions.append(self.x.copy(), self.sigma.copy())
def _calcBatchUpdate(self, fitnesses):
gradient = self._calcVanillaBatchGradient(self.allSamples[-self.batchSize:], fitnesses)
if self.momentum != None:
self.momentumVector *= self.momentum
self.momentumVector += gradient
return self.momentumVector
else:
return gradient
def _calcOnlineUpdate(self, fitnesses):
gradient = self._calcVanillaOnlineGradient(self.allSamples[-1], fitnesses[-self.batchSize:])
if self.momentum != None:
self.momentumVector *= self.momentum
self.momentumVector += gradient
return self.momentumVector
else:
return gradient
def _logDerivX(self, sample, x, invSigma):
return dot(invSigma, (sample - x))
def _logDerivsX(self, samples, x, invSigma):
samplesArray = array(samples)
tmpX = multiply(x, ones((len(samplesArray), self.numParameters)))
return dot(invSigma, (samplesArray - tmpX).T).T
def _logDerivFactorSigma(self, sample, x, invSigma, factorSigma):
logDerivSigma = 0.5 * dot(dot(invSigma, outer(sample - x, sample - x)), invSigma) - 0.5 * invSigma
if self.vanillaScale:
logDerivSigma = multiply(outer(diag(abs(self.factorSigma)), diag(abs(self.factorSigma))), logDerivSigma)
return triu2flat(dot(factorSigma, (logDerivSigma + logDerivSigma.T)))
def _logDerivsFactorSigma(self, samples, x, invSigma, factorSigma):
return [self._logDerivFactorSigma(sample, x, invSigma, factorSigma) for sample in samples]
def _calcVanillaBatchGradient(self, samples, shapedfitnesses):
invSigma = inv(self.sigma)
phi = zeros((len(samples), self.numDistrParams))
phi[:, :self.numParameters] = self._logDerivsX(samples, self.x, invSigma)
logDerivFactorSigma = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma)
phi[:, self.numParameters:] = array(logDerivFactorSigma)
Rmat = outer(shapedfitnesses, ones(self.numDistrParams))
# optimal baseline
self.phiSquareWindow = multiply(phi, phi)
baselineMatrix = self._calcBaseline(shapedfitnesses)
gradient = sum(multiply(phi, (Rmat - baselineMatrix)), 0)
return gradient
def _calcVanillaOnlineGradient(self, sample, shapedfitnesses):
invSigma = inv(self.sigma)
phi = zeros(self.numDistrParams)
phi[:self.numParameters] = self._logDerivX(sample, self.x, invSigma)
logDerivSigma = self._logDerivFactorSigma(sample, self.x, invSigma, self.factorSigma)
phi[self.numParameters:] = logDerivSigma.flatten()
index = len(self.allSamples) % self.batchSize
self.phiSquareWindow[index] = multiply(phi, phi)
baseline = self._calcBaseline(shapedfitnesses)
gradient = multiply((ones(self.numDistrParams) * shapedfitnesses[-1] - baseline), phi)
return gradient
def _calcBaseline(self, shapedfitnesses):
paramWeightings = dot(ones(self.batchSize), self.phiSquareWindow)
baseline = dot(shapedfitnesses, self.phiSquareWindow) / paramWeightings
return baseline
def _hasConverged(self):
""" When the largest eigenvalue is smaller than 10e-20, we assume the
algorithms has converged. """
eigs = abs(diag(self.factorSigma))
return min(eigs) < 1e-10
def _revertToSafety(self):
""" When encountering a bad matrix, this is how we revert to a safe one. """
self.factorSigma = eye(self.numParameters)
self.x = self.bestEvaluable
self.allFactorSigmas[-1][:] = self.factorSigma
self.sigma = dot(self.factorSigma.T, self.factorSigma)
| {
"repo_name": "arnaudsj/pybrain",
"path": "pybrain/optimization/distributionbased/ves.py",
"copies": "5",
"size": "10520",
"license": "bsd-3-clause",
"hash": 2806415885453108700,
"line_mean": 36.8417266187,
"line_max": 116,
"alpha_frac": 0.6331749049,
"autogenerated": false,
"ratio": 3.8563049853372435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6989479890237243,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import size, zeros, ndarray, array
from numpy.random import randn
from pybrain.structure.evolvables.evolvable import Evolvable
class ParameterContainer(Evolvable):
""" A common interface implemented by all classes which
contains data that can change during execution (i.e. trainable parameters)
and should be losslessly storable and retrievable to files. """
# standard deviation for random values, and for mutation
stdParams = 1.
mutationStd = 0.1
# if this variable is set, then only the owner can set the params or the derivs of the container
owner = None
# a flag that enables storage of derivatives
hasDerivatives = False
def __init__(self, paramdim = 0, **args):
""" initialize all parameters with random values, normally distributed around 0
:key stdParams: standard deviation of the values (default: 1).
"""
self.setArgs(**args)
self.paramdim = paramdim
if paramdim > 0:
self._params = zeros(self.paramdim)
# enable derivatives if it is a instance of Module or Connection
# CHECKME: the import can not be global?
from pybrain.structure.modules.module import Module
from pybrain.structure.connections.connection import Connection
if isinstance(self, Module) or isinstance(self, Connection):
self.hasDerivatives = True
if self.hasDerivatives:
self._derivs = zeros(self.paramdim)
self.randomize()
@property
def params(self):
""" @rtype: an array of numbers. """
return self._params
def __len__(self):
return self.paramdim
def _setParameters(self, p, owner = None):
""" :key p: an array of numbers """
if isinstance(p, list):
p = array(p)
assert isinstance(p, ndarray)
if self.owner == self:
# the object owns it parameter array, which means it cannot be set,
# only updated with new values.
self._params[:] = p
elif self.owner != owner:
raise Exception("Parameter ownership mismatch: cannot set to new array.")
else:
self._params = p
self.paramdim = size(self.params)
@property
def derivs(self):
""" :rtype: an array of numbers. """
return self._derivs
def _setDerivatives(self, d, owner = None):
""" :key d: an array of numbers of self.paramdim """
assert self.owner == owner
assert size(d) == self.paramdim
self._derivs = d
def resetDerivatives(self):
""" :note: this method only sets the values to zero, it does not initialize the array. """
assert self.hasDerivatives
self._derivs *= 0
def randomize(self):
self._params[:] = randn(self.paramdim)*self.stdParams
if self.hasDerivatives:
self.resetDerivatives()
def mutate(self):
self._params += randn(self.paramdim)*self.mutationStd
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/pybrain/structure/parametercontainer.py",
"copies": "1",
"size": "3215",
"license": "mit",
"hash": 1592284671947426000,
"line_mean": 34.7333333333,
"line_max": 100,
"alpha_frac": 0.5968895801,
"autogenerated": false,
"ratio": 4.416208791208791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019178838490561138,
"num_lines": 89
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import size, zeros
from numpy.random import randn
from pybrain.structure.evolvables.evolvable import Evolvable
class ParameterContainer(Evolvable):
""" A common interface implemented by all classes which
contains data that can change during execution (i.e. trainable parameters)
and should be losslessly storable and retrievable to files. """
# standard deviation for random values, and for mutation
stdParams = 1.
mutationStd = 0.1
# if this variable is set, then only the owner can set the params or the derivs of the container
owner = None
# a flag that enables storage of derivatives
hasDerivatives = False
def __init__(self, paramdim = 0, **args):
""" initialize all parameters with random values, normally distributed around 0
@param stdParams: standard deviation of the values (default: 1).
"""
self.setArgs(**args)
self.paramdim = paramdim
if paramdim > 0:
self._params = zeros(self.paramdim)
# enable derivatives if it is a instance of Module or Connection
# CHECKME: the import can not be global?
from pybrain.structure.modules.module import Module
from pybrain.structure.connections.connection import Connection
if isinstance(self, Module) or isinstance(self, Connection):
self.hasDerivatives = True
if self.hasDerivatives:
self._derivs = zeros(self.paramdim)
self.randomize()
@property
def params(self):
""" @rtype: an array of numbers. """
return self._params
def __len__(self):
return self.paramdim
def _setParameters(self, p, owner = None):
""" @param p: an array of numbers """
assert self.owner == owner
self._params = p
self.paramdim = size(self.params)
@property
def derivs(self):
""" @rtype: an array of numbers. """
return self._derivs
def _setDerivatives(self, d, owner = None):
""" @param d: an array of numbers of self.paramdim """
assert self.owner == owner
assert size(d) == self.paramdim
self._derivs = d
def resetDerivatives(self):
""" @note: this method only sets the values to zero, it does not initialize the array. """
assert self.hasDerivatives
self._derivs *= 0
def randomize(self):
self._params[:] = randn(self.paramdim)*self.stdParams
if self.hasDerivatives:
self.resetDerivatives()
def mutate(self):
self._params += randn(self.paramdim)*self.mutationStd
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/structure/parametercontainer.py",
"copies": "1",
"size": "2787",
"license": "bsd-3-clause",
"hash": 5409903261788317000,
"line_mean": 34.2911392405,
"line_max": 100,
"alpha_frac": 0.6106925009,
"autogenerated": false,
"ratio": 4.3546875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029421487865965455,
"num_lines": 79
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import tanh
from neuronlayer import NeuronLayer
from module import Module
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.tools.functions import sigmoid, sigmoidPrime, tanhPrime
class LSTMLayer(NeuronLayer, ParameterContainer):
"""Long short-term memory cell layer.
The input consists of 4 parts, in the following order:
- input gate
- forget gate
- cell input
- output gate
"""
sequential = True
peepholes = False
maxoffset = 0
# Transfer functions and their derivatives
f = lambda _, x: sigmoid(x)
fprime = lambda _, x: sigmoidPrime(x)
g = lambda _, x: tanh(x)
gprime = lambda _, x: tanhPrime(x)
h = lambda _, x: tanh(x)
hprime = lambda _, x: tanhPrime(x)
def __init__(self, dim, peepholes = False, name = None):
self.setArgs(dim = dim, peepholes = peepholes)
# Internal buffers:
self.bufferlist = [
('ingate', dim),
('outgate', dim),
('forgetgate', dim),
('ingatex', dim),
('outgatex', dim),
('forgetgatex', dim),
('state', dim),
('ingateError', dim),
('outgateError', dim),
('forgetgateError', dim),
('stateError', dim),
]
Module.__init__(self, 4*dim, dim, name)
if self.peepholes:
ParameterContainer.__init__(self, dim*3)
self._setParameters(self.params)
self._setDerivatives(self.derivs)
def _setParameters(self, p, owner = None):
ParameterContainer._setParameters(self, p, owner)
dim = self.outdim
self.ingatePeepWeights = self.params[:dim]
self.forgetgatePeepWeights = self.params[dim:dim*2]
self.outgatePeepWeights = self.params[dim*2:]
def _setDerivatives(self, d, owner = None):
ParameterContainer._setDerivatives(self, d, owner)
dim = self.outdim
self.ingatePeepDerivs = self.derivs[:dim]
self.forgetgatePeepDerivs = self.derivs[dim:dim*2]
self.outgatePeepDerivs = self.derivs[dim*2:]
def _isLastTimestep(self):
"""Tell wether the current offset is the maximum offset."""
return self.maxoffset == self.offset
def _forwardImplementation(self, inbuf, outbuf):
self.maxoffset = max(self.offset + 1, self.maxoffset)
dim = self.outdim
# slicing the input buffer into the 4 parts
try:
self.ingatex[self.offset] = inbuf[:dim]
except IndexError:
raise str((self.offset, self.ingatex.shape))
self.forgetgatex[self.offset] = inbuf[dim:dim*2]
cellx = inbuf[dim*2:dim*3]
self.outgatex[self.offset] = inbuf[dim*3:]
# peephole treatment
if self.peepholes and self.offset > 0:
self.ingatex[self.offset] += self.ingatePeepWeights * self.state[self.offset-1]
self.forgetgatex[self.offset] += self.forgetgatePeepWeights * self.state[self.offset-1]
self.ingate[self.offset] = self.f(self.ingatex[self.offset])
self.forgetgate[self.offset] = self.f(self.forgetgatex[self.offset])
self.state[self.offset] = self.ingate[self.offset] * self.g(cellx)
if self.offset > 0:
self.state[self.offset] += self.forgetgate[self.offset] * self.state[self.offset-1]
if self.peepholes:
self.outgatex[self.offset] += self.outgatePeepWeights * self.state[self.offset]
self.outgate[self.offset] = self.f(self.outgatex[self.offset])
outbuf[:] = self.outgate[self.offset] * self.h(self.state[self.offset])
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
dim = self.outdim
cellx = inbuf[dim*2:dim*3]
self.outgateError[self.offset] = self.fprime(self.outgatex[self.offset]) * outerr * self.h(self.state[self.offset])
self.stateError[self.offset] = outerr * self.outgate[self.offset] * self.hprime(self.state[self.offset])
if not self._isLastTimestep():
self.stateError[self.offset] += self.stateError[self.offset+1] * self.forgetgate[self.offset+1]
if self.peepholes:
self.stateError[self.offset] += self.ingateError[self.offset+1] * self.ingatePeepWeights
self.stateError[self.offset] += self.forgetgateError[self.offset+1] * self.forgetgatePeepWeights
if self.peepholes:
self.stateError[self.offset] += self.outgateError[self.offset] * self.outgatePeepWeights
cellError = self.ingate[self.offset] * self.gprime(cellx) * self.stateError[self.offset]
if self.offset > 0:
self.forgetgateError[self.offset] = self.fprime(self.forgetgatex[self.offset]) * self.stateError[self.offset] * self.state[self.offset-1]
self.ingateError[self.offset] = self.fprime(self.ingatex[self.offset]) * self.stateError[self.offset] * self.g(cellx)
# compute derivatives
if self.peepholes:
self.outgatePeepDerivs += self.outgateError[self.offset] * self.state[self.offset]
if self.offset > 0:
self.ingatePeepDerivs += self.ingateError[self.offset] * self.state[self.offset-1]
self.forgetgatePeepDerivs += self.forgetgateError[self.offset] * self.state[self.offset-1]
inerr[:dim] = self.ingateError[self.offset]
inerr[dim:dim*2] = self.forgetgateError[self.offset]
inerr[dim*2:dim*3] = cellError
inerr[dim*3:] = self.outgateError[self.offset]
def whichNeuron(self, inputIndex = None, outputIndex = None):
if inputIndex != None:
return inputIndex % self.dim
if outputIndex != None:
return outputIndex
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/structure/modules/lstm.py",
"copies": "1",
"size": "6002",
"license": "bsd-3-clause",
"hash": 2770442308338941000,
"line_mean": 39.8367346939,
"line_max": 149,
"alpha_frac": 0.6101299567,
"autogenerated": false,
"ratio": 3.5347467608951706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46448767175951705,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import tanh
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.structure.modules.module import Module
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.tools.functions import sigmoid, sigmoidPrime, tanhPrime
class LSTMLayer(NeuronLayer, ParameterContainer):
"""Long short-term memory cell layer.
The input consists of 4 parts, in the following order:
- input gate
- forget gate
- cell input
- output gate
"""
sequential = True
peepholes = False
maxoffset = 0
# Transfer functions and their derivatives
f = lambda _, x: sigmoid(x)
fprime = lambda _, x: sigmoidPrime(x)
g = lambda _, x: tanh(x)
gprime = lambda _, x: tanhPrime(x)
h = lambda _, x: tanh(x)
hprime = lambda _, x: tanhPrime(x)
def __init__(self, dim, peepholes = False, name = None):
"""
:arg dim: number of cells
:key peepholes: enable peephole connections (from state to gates)? """
self.setArgs(dim = dim, peepholes = peepholes)
# Internal buffers, created dynamically:
self.bufferlist = [
('ingate', dim),
('outgate', dim),
('forgetgate', dim),
('ingatex', dim),
('outgatex', dim),
('forgetgatex', dim),
('state', dim),
('ingateError', dim),
('outgateError', dim),
('forgetgateError', dim),
('stateError', dim),
]
Module.__init__(self, 4*dim, dim, name)
if self.peepholes:
ParameterContainer.__init__(self, dim*3)
self._setParameters(self.params)
self._setDerivatives(self.derivs)
def _setParameters(self, p, owner = None):
ParameterContainer._setParameters(self, p, owner)
dim = self.outdim
self.ingatePeepWeights = self.params[:dim]
self.forgetgatePeepWeights = self.params[dim:dim*2]
self.outgatePeepWeights = self.params[dim*2:]
def _setDerivatives(self, d, owner = None):
ParameterContainer._setDerivatives(self, d, owner)
dim = self.outdim
self.ingatePeepDerivs = self.derivs[:dim]
self.forgetgatePeepDerivs = self.derivs[dim:dim*2]
self.outgatePeepDerivs = self.derivs[dim*2:]
def _isLastTimestep(self):
"""Tell wether the current offset is the maximum offset."""
return self.maxoffset == self.offset
def _forwardImplementation(self, inbuf, outbuf):
self.maxoffset = max(self.offset + 1, self.maxoffset)
dim = self.outdim
# slicing the input buffer into the 4 parts
try:
self.ingatex[self.offset] = inbuf[:dim]
except IndexError:
raise str((self.offset, self.ingatex.shape))
self.forgetgatex[self.offset] = inbuf[dim:dim*2]
cellx = inbuf[dim*2:dim*3]
self.outgatex[self.offset] = inbuf[dim*3:]
# peephole treatment
if self.peepholes and self.offset > 0:
self.ingatex[self.offset] += self.ingatePeepWeights * self.state[self.offset-1]
self.forgetgatex[self.offset] += self.forgetgatePeepWeights * self.state[self.offset-1]
self.ingate[self.offset] = self.f(self.ingatex[self.offset])
self.forgetgate[self.offset] = self.f(self.forgetgatex[self.offset])
self.state[self.offset] = self.ingate[self.offset] * self.g(cellx)
if self.offset > 0:
self.state[self.offset] += self.forgetgate[self.offset] * self.state[self.offset-1]
if self.peepholes:
self.outgatex[self.offset] += self.outgatePeepWeights * self.state[self.offset]
self.outgate[self.offset] = self.f(self.outgatex[self.offset])
outbuf[:] = self.outgate[self.offset] * self.h(self.state[self.offset])
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
dim = self.outdim
cellx = inbuf[dim*2:dim*3]
self.outgateError[self.offset] = self.fprime(self.outgatex[self.offset]) * outerr * self.h(self.state[self.offset])
self.stateError[self.offset] = outerr * self.outgate[self.offset] * self.hprime(self.state[self.offset])
if not self._isLastTimestep():
self.stateError[self.offset] += self.stateError[self.offset+1] * self.forgetgate[self.offset+1]
if self.peepholes:
self.stateError[self.offset] += self.ingateError[self.offset+1] * self.ingatePeepWeights
self.stateError[self.offset] += self.forgetgateError[self.offset+1] * self.forgetgatePeepWeights
if self.peepholes:
self.stateError[self.offset] += self.outgateError[self.offset] * self.outgatePeepWeights
cellError = self.ingate[self.offset] * self.gprime(cellx) * self.stateError[self.offset]
if self.offset > 0:
self.forgetgateError[self.offset] = self.fprime(self.forgetgatex[self.offset]) * self.stateError[self.offset] * self.state[self.offset-1]
self.ingateError[self.offset] = self.fprime(self.ingatex[self.offset]) * self.stateError[self.offset] * self.g(cellx)
# compute derivatives
if self.peepholes:
self.outgatePeepDerivs += self.outgateError[self.offset] * self.state[self.offset]
if self.offset > 0:
self.ingatePeepDerivs += self.ingateError[self.offset] * self.state[self.offset-1]
self.forgetgatePeepDerivs += self.forgetgateError[self.offset] * self.state[self.offset-1]
inerr[:dim] = self.ingateError[self.offset]
inerr[dim:dim*2] = self.forgetgateError[self.offset]
inerr[dim*2:dim*3] = cellError
inerr[dim*3:] = self.outgateError[self.offset]
def whichNeuron(self, inputIndex = None, outputIndex = None):
if inputIndex != None:
return inputIndex % self.dim
if outputIndex != None:
return outputIndex
| {
"repo_name": "blueburningcoder/pybrain",
"path": "pybrain/structure/modules/lstm.py",
"copies": "26",
"size": "5992",
"license": "bsd-3-clause",
"hash": -6223830950947725000,
"line_mean": 39.2147651007,
"line_max": 149,
"alpha_frac": 0.6338451268,
"autogenerated": false,
"ratio": 3.4417001723147616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import zeros
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.structure.connections import FullConnection
from pybrain.utilities import combineLists
# CHECKME: allow modules that do not inherit from NeuronLayer? and treat them as single neurons?
class NeuronDecomposableNetwork(object):
""" A Network, that allows accessing parameters decomposed by their
corresponding individual neuron. """
# ESP style treatment:
espStyleDecomposition = True
def addModule(self, m):
assert isinstance(m, NeuronLayer)
super(NeuronDecomposableNetwork, self).addModule(m)
def sortModules(self):
super(NeuronDecomposableNetwork, self).sortModules()
self._constructParameterInfo()
# contains a list of lists of indices
self.decompositionIndices = {}
for neuron in self._neuronIterator():
self.decompositionIndices[neuron] = []
for w in range(self.paramdim):
inneuron, outneuron = self.paramInfo[w]
if self.espStyleDecomposition and outneuron[0] in self.outmodules:
self.decompositionIndices[inneuron].append(w)
else:
self.decompositionIndices[outneuron].append(w)
def _neuronIterator(self):
for m in self.modules:
for n in range(m.dim):
yield (m, n)
def _constructParameterInfo(self):
""" construct a dictionnary with information about each parameter:
The key is the index in self.params, and the value is a tuple containing
(inneuron, outneuron), where a neuron is a tuple of it's module and an index.
"""
self.paramInfo = {}
index = 0
for x in self._containerIterator():
if isinstance(x, FullConnection):
for w in range(x.paramdim):
inbuf, outbuf = x.whichBuffers(w)
self.paramInfo[index+w] = ((x.inmod, x.inmod.whichNeuron(outputIndex = inbuf)),
(x.outmod, x.outmod.whichNeuron(inputIndex = outbuf)))
elif isinstance(x, NeuronLayer):
for n in range(x.paramdim):
self.paramInfo[index+n] = ((x,n),(x,n))
else:
raise
index += x.paramdim
def getDecomposition(self):
""" return a list of arrays, each corresponding to one neuron's relevant parameters """
res = []
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
tmp = zeros(len(nIndices))
for i, ni in enumerate(nIndices):
tmp[i] = self.params[ni]
res.append(tmp)
return res
def setDecomposition(self, decomposedParams):
""" set parameters by neuron decomposition,
each corresponding to one neuron's relevant parameters """
nindex = 0
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
for i, ni in enumerate(nIndices):
self.params[ni] = decomposedParams[nindex][i]
nindex += 1
@staticmethod
def convertNormalNetwork(n):
""" convert a normal network into a decomposable one """
if isinstance(n, RecurrentNetwork):
res = RecurrentDecomposableNetwork()
for c in n.recurrentConns:
res.addRecurrentConnection(c)
else:
res = FeedForwardDecomposableNetwork()
for m in n.inmodules:
res.addInputModule(m)
for m in n.outmodules:
res.addOutputModule(m)
for m in n.modules:
res.addModule(m)
for c in combineLists(n.connections.values()):
res.addConnection(c)
res.name = n.name
res.sortModules()
return res
class FeedForwardDecomposableNetwork(NeuronDecomposableNetwork, FeedForwardNetwork):
pass
class RecurrentDecomposableNetwork(NeuronDecomposableNetwork, RecurrentNetwork):
pass | {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/structure/networks/neurondecomposable.py",
"copies": "1",
"size": "4464",
"license": "bsd-3-clause",
"hash": 2671272494600325600,
"line_mean": 37.8260869565,
"line_max": 101,
"alpha_frac": 0.6046146953,
"autogenerated": false,
"ratio": 4.363636363636363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017375279244844454,
"num_lines": 115
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from scipy import zeros
from pybrain.utilities import abstractMethod, Named
class Module(Named):
"""A module has an input and an output buffer and does some processing
to produce the output from the input -- the "forward" method.
Optionally it can have a "backward" method too, which processes a given
output error to derive the input error.
Input, output and errors are (flat) scipy arrays.
A module memorizes the buffers for all input-output pairs it encounters
until .reset() is called."""
# Flag that marks modules that treat a sequence of samples not as
# independent.
sequential = False
# Flag which at the same time provides info on how many trainable parameters
# the module might contain.
paramdim = 0
# An offset that is added upon any array access. Useful for implementing
# things like time.
offset = 0
bufferlist = None
def __init__(self, indim, outdim, name=None, **args):
"""Create a Module with an input dimension of indim and an output
dimension of outdim."""
self.setArgs(name=name, **args)
# Make sure that it does not matter wether Module.__init__ is called
# before or after adding elements to bufferlist in subclasses.
# TODO: it should be possible to use less than these buffers. For some
# methods, an error is not completely necessary. (e.g. evolution)
self.bufferlist = [] if not self.bufferlist else self.bufferlist
self.bufferlist += [('inputbuffer', indim),
('inputerror', indim),
('outputbuffer', outdim),
('outputerror', outdim), ]
self.indim = indim
self.outdim = outdim
# Those buffers are 2D arrays (time, dim)
self._resetBuffers()
def _resetBuffers(self, length=1):
"""Reset buffers to a length (in time dimension) of 1."""
for buffername, dim in self.bufferlist:
setattr(self, buffername, zeros((length, dim)))
def _growBuffers(self):
"""Double the size of the modules buffers in its first dimension and
keep the current values."""
currentlength = getattr(self, self.bufferlist[0][0]).shape[0]
# Save the current buffers
tmp = [getattr(self, n) for n, _ in self.bufferlist]
Module._resetBuffers(self, currentlength * 2)
for previous, (buffername, _dim) in zip(tmp, self.bufferlist):
buffer_ = getattr(self, buffername)
buffer_[:currentlength] = previous
def forward(self):
"""Produce the output from the input."""
self._forwardImplementation(self.inputbuffer[self.offset],
self.outputbuffer[self.offset])
def backward(self):
"""Produce the input error from the output error."""
self._backwardImplementation(self.outputerror[self.offset],
self.inputerror[self.offset],
self.outputbuffer[self.offset],
self.inputbuffer[self.offset])
def reset(self):
"""Set all buffers, past and present, to zero."""
self.offset = 0
for buffername, l in self.bufferlist:
buf = getattr(self, buffername)
buf[:] = zeros(l)
def activateOnDataset(self, dataset):
"""Run the module's forward pass on the given dataset unconditionally
and return the output."""
dataset.reset()
self.reset()
out = zeros((len(dataset), self.outdim))
for i, sample in enumerate(dataset):
# FIXME: Can we always assume that sample[0] is the input data?
out[i, :] = self.activate(sample[0])
self.reset()
dataset.reset()
return out
def activate(self, inpt):
"""Do one transformation of an input and return the result."""
assert len(self.inputbuffer[self.offset]) == len(inpt), str((len(self.inputbuffer[self.offset]), len(inpt)))
self.inputbuffer[self.offset] = inpt
self.forward()
return self.outputbuffer[self.offset].copy()
def backActivate(self, outerr):
"""Do one transformation of an output error outerr backward and return
the error on the input."""
self.outputerror[self.offset] = outerr
self.backward()
return self.inputerror[self.offset].copy()
def _forwardImplementation(self, inbuf, outbuf):
"""Actual forward transformation function. To be overwritten in
subclasses."""
abstractMethod()
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
"""Converse of the module's transformation function. Can be overwritten
in subclasses, does not have to.
Should also compute the derivatives of the parameters."""
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/pybrain/structure/modules/module.py",
"copies": "3",
"size": "5106",
"license": "mit",
"hash": 5177746975442639000,
"line_mean": 39.848,
"line_max": 117,
"alpha_frac": 0.6010575793,
"autogenerated": false,
"ratio": 4.54270462633452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025103384522346166,
"num_lines": 125
} |
__author__ = 'Daan Wierstra and Tom Schaul'
import pickle
from scipy import dot, argmax
from random import shuffle
from trainer import Trainer
from pybrain.utilities import fListToString
from pybrain.auxiliary import GradientDescent
class BackpropTrainer(Trainer):
"""Trainer that trains the parameters of a module according to a
supervised dataset (potentially sequential) by backpropagating the errors
(through time)."""
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
momentum=0., verbose=False, batchlearning=False,
weightdecay=0.):
"""Create a BackpropTrainer to train the specified `module` on the
specified `dataset`.
The learning rate gives the ratio of which parameters are changed into
the direction of the gradient. The learning rate decreases by `lrdecay`,
which is used to to multiply the learning rate after each training
step. The parameters are also adjusted with respect to `momentum`, which
is the ratio by which the gradient of the last timestep is used.
If `batchlearning` is set, the parameters are updated only at the end of
each epoch. Default is False.
`weightdecay` corresponds to the weightdecay rate, where 0 is no weight
decay at all.
"""
Trainer.__init__(self, module)
self.setData(dataset)
self.verbose = verbose
self.batchlearning = batchlearning
self.weightdecay = weightdecay
self.epoch = 0
self.totalepochs = 0
# set up gradient descender
self.descent = GradientDescent()
self.descent.alpha = learningrate
self.descent.momentum = momentum
self.descent.alphadecay = lrdecay
self.descent.init(module.params)
def train(self):
"""Train the associated module for one epoch."""
assert len(self.ds) > 0, "Dataset cannot be empty."
self.module.resetDerivatives()
errors = 0
ponderation = 0.
shuffledSequences = []
for seq in self.ds._provideSequences():
shuffledSequences.append(seq)
shuffle(shuffledSequences)
for seq in shuffledSequences:
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
if not self.batchlearning:
gradient = self.module.derivs - self.weightdecay * self.module.params
new = self.descent(gradient, errors)
if new is not None:
self.module.params[:] = new
self.module.resetDerivatives()
if self.verbose:
print "Total error:", errors / ponderation
if self.batchlearning:
self.module._setParameters(self.descent(self.module.derivs))
self.epoch += 1
self.totalepochs += 1
return errors / ponderation
def _calcDerivs(self, seq):
"""Calculate error function and backpropagate output errors to yield
the gradient."""
self.module.reset()
for sample in seq:
self.module.activate(sample[0])
error = 0
ponderation = 0.
for offset, sample in reversed(list(enumerate(seq))):
# need to make a distinction here between datasets containing
# importance, and others
target = sample[1]
outerr = target - self.module.outputbuffer[offset]
if len(sample) > 2:
importance = sample[2]
error += 0.5 * dot(importance, outerr ** 2)
ponderation += sum(importance)
self.module.backActivate(outerr * importance)
else:
error += 0.5 * sum(outerr ** 2)
ponderation += len(target)
# FIXME: the next line keeps arac from producing NaNs. I don't
# know why that is, but somehow the __str__ method of the
# ndarray class fixes something,
str(outerr)
self.module.backActivate(outerr)
return error, ponderation
def _checkGradient(self, dataset=None, silent=False):
"""Numeric check of the computed gradient for debugging purposes."""
if dataset:
self.setData(dataset)
res = []
for seq in self.ds._provideSequences():
self.module.resetDerivatives()
self._calcDerivs(seq)
e = 1e-6
analyticalDerivs = self.module.derivs.copy()
numericalDerivs = []
for p in range(self.module.paramdim):
storedoldval = self.module.params[p]
self.module.params[p] += e
righterror, dummy = self._calcDerivs(seq)
self.module.params[p] -= 2 * e
lefterror, dummy = self._calcDerivs(seq)
approxderiv = (righterror - lefterror) / (2 * e)
self.module.params[p] = storedoldval
numericalDerivs.append(approxderiv)
r = zip(analyticalDerivs, numericalDerivs)
res.append(r)
if not silent:
print r
return res
def testOnData(self, dataset=None, verbose=False):
"""Compute the MSE of the module performance on the given dataset.
If no dataset is supplied, the one passed upon Trainer initialization is
used."""
if dataset == None:
dataset = self.ds
dataset.reset()
if verbose:
print '\nTesting on data:'
errors = []
importances = []
ponderatedErrors = []
for seq in dataset._provideSequences():
self.module.reset()
e, i = dataset._evaluateSequence(self.module.activate, seq, verbose)
importances.append(i)
errors.append(e)
ponderatedErrors.append(e / i)
if verbose:
print 'All errors:', ponderatedErrors
assert sum(importances) > 0
avgErr = sum(errors) / sum(importances)
if verbose:
print 'Average error:', avgErr
print ('Max error:', max(ponderatedErrors), 'Median error:',
sorted(ponderatedErrors)[len(errors) / 2])
return avgErr
def testOnClassData(self, dataset=None, verbose=False,
return_targets=False):
"""Return winner-takes-all classification output on a given dataset.
If no dataset is given, the dataset passed during Trainer
initialization is used. If return_targets is set, also return
corresponding target classes.
"""
if dataset == None:
dataset = self.ds
dataset.reset()
out = []
targ = []
for seq in dataset._provideSequences():
self.module.reset()
for input, target in seq:
res = self.module.activate(input)
out.append(argmax(res))
targ.append(argmax(target))
if return_targets:
return out, targ
else:
return out
def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
continueEpochs=10, validationProportion=0.25, outFile=None):
"""Train the module on the dataset until it converges.
Return the module with the parameters that gave the minimal validation
error.
If no dataset is given, the dataset passed during Trainer
initialization is used. validationProportion is the ratio of the dataset
that is used for the validation dataset.
If maxEpochs is given, at most that many epochs
are trained. Each time validation error hits a minimum, try for
continueEpochs epochs to find a better one."""
epochs = 0
if dataset == None:
dataset = self.ds
if verbose == None:
verbose = self.verbose
# Split the dataset randomly: validationProportion of the samples for
# validation.
trainingData, validationData = (
dataset.splitWithProportion(1 - validationProportion))
if not (len(trainingData) > 0 and len(validationData)):
raise ValueError("Provided dataset too small to be split into training " +
"and validation sets with proportion " + str(validationProportion))
self.ds = trainingData
bestweights = self.module.params.copy()
bestverr = self.testOnData(validationData)
trainingErrors = []
validationErrors = [bestverr]
while True:
trainingErrors.append(self.train())
validationErrors.append(self.testOnData(validationData))
if epochs == 0 or validationErrors[-1] < bestverr:
# one update is always done
bestverr = validationErrors[-1]
bestweights = self.module.params.copy()
if maxEpochs != None and epochs >= maxEpochs:
self.module.params[:] = bestweights
break
epochs += 1
if outFile != None :
pfile = open(outFile + '.net', 'w')
pickle.dump(self.module, pfile)
f = open(outFile, 'a')
f.write(str(epochs) + '\n')
f.write('train-errors:' + fListToString(trainingErrors, 6) + '\n')
f.write('valid-errors:' + fListToString(validationErrors, 6) + '\n')
f.close()
if len(validationErrors) >= continueEpochs * 2:
# have the validation errors started going up again?
# compare the average of the last few to the previous few
old = validationErrors[-continueEpochs * 2:-continueEpochs]
new = validationErrors[-continueEpochs:]
if min(new) > max(old):
self.module.params[:] = bestweights
break
trainingErrors.append(self.testOnData(trainingData))
self.ds = dataset
if verbose:
print 'train-errors:', fListToString(trainingErrors, 6)
print 'valid-errors:', fListToString(validationErrors, 6)
return trainingErrors, validationErrors
| {
"repo_name": "hassaanm/stock-trading",
"path": "src/pybrain/supervised/trainers/backprop.py",
"copies": "1",
"size": "10288",
"license": "apache-2.0",
"hash": 4294103652845233000,
"line_mean": 39.6640316206,
"line_max": 96,
"alpha_frac": 0.5867029549,
"autogenerated": false,
"ratio": 4.467216673903604,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001612492627778181,
"num_lines": 253
} |
__author__ = 'Daan Wierstra, Tom Schaul and Sun Yi'
from ves import VanillaGradientEvolutionStrategies
from pybrain.utilities import triu2flat, blockCombine
from scipy.linalg import inv, pinv2
from scipy import outer, dot, multiply, zeros, diag, mat, sum
class ExactNES(VanillaGradientEvolutionStrategies):
""" A new version of NES, using the exact instead of the approximate
Fisher Information Matrix, as well as a number of other improvements.
"""
# 4 kinds of baselines can be used:
NOBASELINE = 0
UNIFORMBASELINE = 1
SPECIFICBASELINE = 2
BLOCKBASELINE = 3
# the most robust one is also the default:
baselineType = BLOCKBASELINE
def _calcBatchUpdate(self, fitnesses):
samples = self.allSamples[-self.batchSize:]
d = self.xdim
invA = inv(self.factorSigma)
invSigma = inv(self.sigma)
diagInvA = diag(diag(invA))
# efficient computation of V, which corresponds to inv(Fisher)*logDerivs
V = zeros((self.numParams,self.batchSize))
# u is used to compute the uniform baseline
u = zeros(self.numParams)
for i in range(self.batchSize):
s = dot(invA.T, (samples[i] - self.x))
R = outer(s, dot(invA, s)) - diagInvA
flatR = triu2flat(R)
u[:d] += fitnesses[i] * (samples[i]-self.x)
u[d:] += fitnesses[i] * flatR
V[:d,i] += samples[i]-self.x
V[d:,i] += flatR
j = self.numParams -1
D = 1/invSigma[-1, -1]
# G corresponds to the blocks of the inv(Fisher)
G = 1/(invSigma[-1, -1]+invA[-1,-1]**2)
u[j] = dot(G, u[j])
V[j,:] = dot(G, V[j,:])
j -= 1
for k in reversed(range(d-1)):
p = invSigma[k+1:,k]
w = invSigma[k,k]
wg = w + invA[k,k]**2
q = dot(D, p)
c = dot(p, q)
r = 1/(w-c)
rg = 1/ (wg-c)
t = -(1+r*c)/w
tg = -(1+rg*c)/wg
G = blockCombine([[rg, tg*q],
[mat(tg*q).T, D+rg*outer(q,q)]])
D = blockCombine([[r , t *q],
[mat(t *q).T, D+r *outer(q,q)]])
u[j-(d-k-1):j+1] = dot(G, u[j-(d-k-1):j+1])
V[j-(d-k-1):j+1,:] = dot(G, V[j-(d-k-1):j+1,:])
j -= d-k
# determine the update vector, according to different baselines.
if self.baselineType == self.BLOCKBASELINE:
update = zeros(self.numParams)
vsquare = multiply(V, V)
j = self.numParams-1
for k in reversed(range(self.xdim)):
b0 = sum(vsquare[j-(d-k-1):j+1,:], 0)
b = dot(b0, fitnesses) / sum(b0)
update[j-(d-k-1):j+1] = dot(V[j-(d-k-1):j+1, :], (fitnesses - b))
j -= d-k
b0 = sum(vsquare[:j+1,:], 0)
b = dot(b0, fitnesses) / sum(b0)
update[:j+1] = dot(V[:j+1, :], (fitnesses - b))
elif self.baselineType == self.SPECIFICBASELINE:
update = zeros(self.numParams)
vsquare = multiply(V, V)
for j in range(self.numParams):
b = dot(vsquare[j,:], fitnesses) / sum(vsquare[j,:])
update[j] = dot(V[j,:], (fitnesses - b))
elif self.baselineType == self.UNIFORMBASELINE:
v = sum(V, 1)
update = u - dot(v, u)/dot(v, v) * v
elif self.baselineType == self.NOBASELINE:
update = dot(V, fitnesses)
else:
raise NotImplementedError('No such baseline implemented')
return update / self.batchSize
class OriginalNES(VanillaGradientEvolutionStrategies):
""" In the same framework, the formulation of the original NES algorithm (CEC-2008)
is now much simpler too. """
def _calcBatchUpdate(self, fitnesses):
invSigma = inv(self.sigma)
samples = self.allSamples[-self.batchSize:]
phi = zeros((self.batchSize, self.numParams+1))
phi[:, :self.xdim] = self._logDerivsX(samples, self.x, invSigma)
phi[:, self.xdim:-1] = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma)
phi[:, -1] = 1
update = dot(pinv2(phi), fitnesses)[:-1]
return update
def _logDerivsFactorSigma(self, samples, mu, invSigma, factorSigma):
""" Compute the log-derivatives w.r.t. the factorized covariance matrix components.
This implementation should be faster than the one in Vanilla. """
res = zeros((len(samples), self.numParams-self.xdim))
invA = inv(factorSigma)
diagInvA = diag(diag(invA))
for i, sample in enumerate(samples):
s = dot(invA.T, (sample-mu))
R = outer(s, dot(invA, s)) - diagInvA
res[i] = triu2flat(R)
return res
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/rl/learners/blackboxoptimizers/enes.py",
"copies": "1",
"size": "4991",
"license": "bsd-3-clause",
"hash": 9040246722059842000,
"line_mean": 35.7058823529,
"line_max": 102,
"alpha_frac": 0.5281506712,
"autogenerated": false,
"ratio": 3.322902796271638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9228061764783916,
"avg_score": 0.02459834053754435,
"num_lines": 136
} |
__author__ = 'Daan Wierstra, Tom Schaul and Sun Yi'
from .ves import VanillaGradientEvolutionStrategies
from pybrain.utilities import triu2flat, blockCombine
from scipy.linalg import inv, pinv2
from scipy import outer, dot, multiply, zeros, diag, mat, sum
class ExactNES(VanillaGradientEvolutionStrategies):
""" A new version of NES, using the exact instead of the approximate
Fisher Information Matrix, as well as a number of other improvements.
(GECCO 2009).
"""
# 4 kinds of baselines can be used:
NOBASELINE = 0
UNIFORMBASELINE = 1
SPECIFICBASELINE = 2
BLOCKBASELINE = 3
#: Type of baseline. The most robust one is also the default.
baselineType = BLOCKBASELINE
learningRate = 1.
def _calcBatchUpdate(self, fitnesses):
samples = self.allSamples[-self.batchSize:]
d = self.numParameters
invA = inv(self.factorSigma)
invSigma = inv(self.sigma)
diagInvA = diag(diag(invA))
# efficient computation of V, which corresponds to inv(Fisher)*logDerivs
V = zeros((self.numDistrParams, self.batchSize))
# u is used to compute the uniform baseline
u = zeros(self.numDistrParams)
for i in range(self.batchSize):
s = dot(invA.T, (samples[i] - self.x))
R = outer(s, dot(invA, s)) - diagInvA
flatR = triu2flat(R)
u[:d] += fitnesses[i] * (samples[i] - self.x)
u[d:] += fitnesses[i] * flatR
V[:d, i] += samples[i] - self.x
V[d:, i] += flatR
j = self.numDistrParams - 1
D = 1 / invSigma[-1, -1]
# G corresponds to the blocks of the inv(Fisher)
G = 1 / (invSigma[-1, -1] + invA[-1, -1] ** 2)
u[j] = dot(G, u[j])
V[j, :] = dot(G, V[j, :])
j -= 1
for k in reversed(list(range(d - 1))):
p = invSigma[k + 1:, k]
w = invSigma[k, k]
wg = w + invA[k, k] ** 2
q = dot(D, p)
c = dot(p, q)
r = 1 / (w - c)
rg = 1 / (wg - c)
t = -(1 + r * c) / w
tg = -(1 + rg * c) / wg
G = blockCombine([[rg, tg * q],
[mat(tg * q).T, D + rg * outer(q, q)]])
D = blockCombine([[r , t * q],
[mat(t * q).T, D + r * outer(q, q)]])
u[j - (d - k - 1):j + 1] = dot(G, u[j - (d - k - 1):j + 1])
V[j - (d - k - 1):j + 1, :] = dot(G, V[j - (d - k - 1):j + 1, :])
j -= d - k
# determine the update vector, according to different baselines.
if self.baselineType == self.BLOCKBASELINE:
update = zeros(self.numDistrParams)
vsquare = multiply(V, V)
j = self.numDistrParams - 1
for k in reversed(list(range(self.numParameters))):
b0 = sum(vsquare[j - (d - k - 1):j + 1, :], 0)
b = dot(b0, fitnesses) / sum(b0)
update[j - (d - k - 1):j + 1] = dot(V[j - (d - k - 1):j + 1, :], (fitnesses - b))
j -= d - k
b0 = sum(vsquare[:j + 1, :], 0)
b = dot(b0, fitnesses) / sum(b0)
update[:j + 1] = dot(V[:j + 1, :], (fitnesses - b))
elif self.baselineType == self.SPECIFICBASELINE:
update = zeros(self.numDistrParams)
vsquare = multiply(V, V)
for j in range(self.numDistrParams):
b = dot(vsquare[j, :], fitnesses) / sum(vsquare[j, :])
update[j] = dot(V[j, :], (fitnesses - b))
elif self.baselineType == self.UNIFORMBASELINE:
v = sum(V, 1)
update = u - dot(v, u) / dot(v, v) * v
elif self.baselineType == self.NOBASELINE:
update = dot(V, fitnesses)
else:
raise NotImplementedError('No such baseline implemented')
return update / self.batchSize
class OriginalNES(VanillaGradientEvolutionStrategies):
""" Reference implementation of the original Natural Evolution Strategies algorithm (CEC-2008). """
learningRate = 1.
def _calcBatchUpdate(self, fitnesses):
xdim = self.numParameters
invSigma = inv(self.sigma)
samples = self.allSamples[-self.batchSize:]
phi = zeros((self.batchSize, self.numDistrParams + 1))
phi[:, :xdim] = self._logDerivsX(samples, self.x, invSigma)
phi[:, xdim:-1] = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma)
phi[:, -1] = 1
update = dot(pinv2(phi), fitnesses)[:-1]
return update
def _logDerivsFactorSigma(self, samples, mu, invSigma, factorSigma):
""" Compute the log-derivatives w.r.t. the factorized covariance matrix components.
This implementation should be faster than the one in Vanilla. """
res = zeros((len(samples), self.numDistrParams - self.numParameters))
invA = inv(factorSigma)
diagInvA = diag(diag(invA))
for i, sample in enumerate(samples):
s = dot(invA.T, (sample - mu))
R = outer(s, dot(invA, s)) - diagInvA
res[i] = triu2flat(R)
return res
| {
"repo_name": "sepehr125/pybrain",
"path": "pybrain/optimization/distributionbased/nes.py",
"copies": "25",
"size": "5171",
"license": "bsd-3-clause",
"hash": 4192387093650943000,
"line_mean": 36.2014388489,
"line_max": 103,
"alpha_frac": 0.536259911,
"autogenerated": false,
"ratio": 3.297831632653061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007973921254551947,
"num_lines": 139
} |
class dstat_plugin(dstat):
"""
CPU frequency in percentage as reported by ACPI.
"""
def __init__(self):
self.name = 'frequency'
self.type = 'p'
self.width = 4
self.scale = 34
def check(self):
for cpu in glob.glob('/sys/devices/system/cpu/cpu[0-9]*'):
if not os.access(cpu+'/cpufreq/scaling_cur_freq', os.R_OK):
raise Exception, 'Cannot access acpi %s frequency information' % os.path.basename(cpu)
def vars(self):
ret = []
for name in glob.glob('/sys/devices/system/cpu/cpu[0-9]*'):
ret.append(os.path.basename(name))
ret.sort()
return ret
# return os.listdir('/sys/devices/system/cpu/')
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for cpu in self.vars:
for line in dopen('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_max_freq').readlines():
l = line.split()
max = int(l[0])
for line in dopen('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_cur_freq').readlines():
l = line.split()
cur = int(l[0])
### Need to close because of bug in sysfs (?)
dclose('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_cur_freq')
self.set1[cpu] = self.set1[cpu] + cur * 100.0 / max
if op.update:
self.val[cpu] = self.set1[cpu] / elapsed
else:
self.val[cpu] = self.set1[cpu]
if step == op.delay:
self.set1[cpu] = 0
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_cpufreq.py",
"copies": "8",
"size": "1664",
"license": "apache-2.0",
"hash": 2358538679360418000,
"line_mean": 32.28,
"line_max": 102,
"alpha_frac": 0.5300480769,
"autogenerated": false,
"ratio": 3.488469601677149,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.801851767857715,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Percentage of remaining battery power as reported by ACPI.
"""
def __init__(self):
self.name = 'battery'
self.type = 'p'
self.width = 4
self.scale = 34
self.battery_type = "none"
def check(self):
if os.path.exists('/proc/acpi/battery/'):
self.battery_type = "procfs"
elif glob.glob('/sys/class/power_supply/BAT*'):
self.battery_type = "sysfs"
else:
raise Exception, "No ACPI battery information found."
def vars(self):
ret = []
if self.battery_type == "procfs":
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
if l[0] == 'present:' and l[1] == 'yes':
ret.append(battery)
elif self.battery_type == "sysfs":
for battery in glob.glob('/sys/class/power_supply/BAT*'):
for line in dopen(battery+'/present').readlines():
if int(line[0]) == 1:
ret.append(os.path.basename(battery))
ret.sort()
return ret
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for battery in self.vars:
if self.battery_type == "procfs":
for line in dopen('/proc/acpi/battery/'+battery+'/info').readlines():
l = line.split()
if len(l) < 4: continue
if l[0] == 'last':
full = int(l[3])
break
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0] == 'remaining':
current = int(l[2])
break
if current:
self.val[battery] = current * 100.0 / full
else:
self.val[battery] = -1
elif self.battery_type == "sysfs":
for line in dopen('/sys/class/power_supply/'+battery+'/capacity').readlines():
current = int(line)
break
if current:
self.val[battery] = current
else:
self.val[battery] = -1
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_battery.py",
"copies": "4",
"size": "2648",
"license": "apache-2.0",
"hash": -738157285798286000,
"line_mean": 36.2957746479,
"line_max": 94,
"alpha_frac": 0.4652567976,
"autogenerated": false,
"ratio": 4.061349693251533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003585311872221954,
"num_lines": 71
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'system'
self.nick = ('int', 'csw')
self.vars = ('intr', 'ctxt')
self.type = 'd'
self.width = 5
self.scale = 1000
self.server = os.getenv('DSTAT_SNMPSERVER') or '192.168.1.1'
self.community = os.getenv('DSTAT_SNMPCOMMUNITY') or 'public'
def check(self):
try:
global cmdgen
from pysnmp.entity.rfc3413.oneliner import cmdgen
except:
raise Exception, 'Needs pysnmp and pyasn1 modules'
def extract(self):
self.set2['intr'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,59,0)))
self.set2['ctxt'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,60,0)))
if update:
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_snmp_sys.py",
"copies": "4",
"size": "1057",
"license": "apache-2.0",
"hash": 9062723107006619000,
"line_mean": 32.03125,
"line_max": 97,
"alpha_frac": 0.5591296121,
"autogenerated": false,
"ratio": 3.011396011396011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5570525623496012,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'total cpu'
self.vars = ( 'usr', 'sys', 'idl' )
self.type = 'p'
self.width = 3
self.scale = 34
self.server = os.getenv('DSTAT_SNMPSERVER') or '192.168.1.1'
self.community = os.getenv('DSTAT_SNMPCOMMUNITY') or 'public'
def check(self):
try:
global cmdgen
from pysnmp.entity.rfc3413.oneliner import cmdgen
except:
raise Exception, 'Needs pysnmp and pyasn1 modules'
def extract(self):
self.set2['usr'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,50,0)))
self.set2['sys'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,52,0)))
self.set2['idl'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,53,0)))
# self.set2['usr'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawUser'), 0)))
# self.set2['sys'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawSystem'), 0)))
# self.set2['idl'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawIdle'), 0)))
if update:
for name in self.vars:
if sum(self.set2.values()) > sum(self.set1.values()):
self.val[name] = 100.0 * (self.set2[name] - self.set1[name]) / (sum(self.set2.values()) - sum(self.set1.values()))
else:
self.val[name] = 0
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_snmp_cpu.py",
"copies": "4",
"size": "1635",
"license": "apache-2.0",
"hash": -1125329061263014900,
"line_mean": 42.0263157895,
"line_max": 134,
"alpha_frac": 0.5620795107,
"autogenerated": false,
"ratio": 2.924865831842576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5486945342542576,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Number of active dbus sessions.
"""
def __init__(self):
self.name = 'dbus'
self.nick = ('sys', 'ses')
self.vars = ('system', 'session')
self.type = 'd'
self.width = 3
self.scale = 100
def check(self):
# dstat.info(1, 'The dbus module is an EXPERIMENTAL module.')
try:
global dbus
import dbus
try:
self.sysbus = dbus.Bus(dbus.Bus.TYPE_SYSTEM).get_service('org.freedesktop.DBus').get_object('/org/freedesktop/DBus', 'org.freedesktop.DBus')
try:
self.sesbus = dbus.Bus(dbus.Bus.TYPE_SESSION).get_service('org.freedesktop.DBus').get_object('/org/freedesktop/DBus', 'org.freedesktop.DBus')
except:
self.sesbus = None
except:
raise Exception, 'Unable to connect to dbus message bus'
except:
raise Exception, 'Needs python-dbus module'
def extract(self):
self.val['system'] = len(self.sysbus.ListServices()) - 1
try:
self.val['session'] = len(self.sesbus.ListServices()) - 1
except:
self.val['session'] = -1
# print dir(b); print dir(s); print dir(d); print d.ListServices()
# print dir(d)
# print d.ListServices()
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_dbus.py",
"copies": "8",
"size": "1426",
"license": "apache-2.0",
"hash": 8652286392009474000,
"line_mean": 33.7804878049,
"line_max": 161,
"alpha_frac": 0.5441795231,
"autogenerated": false,
"ratio": 3.5829145728643215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8127094095964321,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Number of read and write transactions per device.
Displays the number of read and write I/O transactions per device.
"""
def __init__(self):
self.nick = ('reads', 'writs' )
self.type = 'd'
self.scale = 1000
self.diskfilter = re.compile('^(dm-\d+|md\d+|[hsv]d[a-z]+\d+)$')
self.open('/proc/diskstats')
self.cols = 2
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception, "No suitable block devices found to monitor"
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
elif not op.full:
varlist = ('total',)
else:
varlist = []
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices(): continue
varlist.append(name)
# if len(varlist) > 2: varlist = varlist[0:2]
varlist.sort()
for name in varlist:
if name in self.discover + ['total'] + op.diskset.keys():
ret.append(name)
return ret
def name(self):
return ['dsk/'+sysfs_dev(name) for name in self.vars]
def extract(self):
for name in self.vars: self.set2[name] = (0, 0)
for l in self.splitlines():
if len(l) < 13: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if l[3:] == ['0',] * 11: continue
if not self.diskfilter.match(name):
self.set2['total'] = ( self.set2['total'][0] + long(l[3]), self.set2['total'][1] + long(l[7]) )
if name in self.vars and name != 'total':
self.set2[name] = ( self.set2[name][0] + long(l[3]), self.set2[name][1] + long(l[7]))
for diskset in self.vars:
if diskset in op.diskset.keys():
for disk in op.diskset[diskset]:
if re.match('^'+disk+'$', name):
self.set2[diskset] = ( self.set2[diskset][0] + long(l[3]), self.set2[diskset][1] + long(l[7]) )
for name in self.set2.keys():
self.val[name] = (
(self.set2[name][0] - self.set1[name][0]) / elapsed,
(self.set2[name][1] - self.set1[name][1]) / elapsed,
)
if step == op.delay:
self.set1.update(self.set2)
# S_VALUE(ioj->rd_ios, ioi->rd_ios, itv),
# S_VALUE(ioj->wr_ios, ioi->wr_ios, itv),
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_disk_tps.py",
"copies": "1",
"size": "2829",
"license": "apache-2.0",
"hash": 7452889950388287000,
"line_mean": 35.2692307692,
"line_max": 123,
"alpha_frac": 0.4966419229,
"autogenerated": false,
"ratio": 3.3920863309352516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9294777751180779,
"avg_score": 0.018790100530894506,
"num_lines": 78
} |
class dstat_plugin(dstat):
"""
Percentage of bandwidth utilization for block devices.
Displays percentage of CPU time during which I/O requests were issued
to the device (bandwidth utilization for the device). Device saturation
occurs when this value is close to 100%.
"""
def __init__(self):
self.nick = ('reads', 'writs' )
self.type = 'd'
self.scale = 1000
self.diskfilter = re.compile('^(dm-[0-9]+|md[0-9]+|[hsv]d[a-z]+[0-9]+)$')
self.open('/proc/diskstats')
self.cols = 2
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception, "No suitable block devices found to monitor"
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
elif not op.full:
varlist = ('total',)
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
# if len(varlist) > 2: varlist = varlist[0:2]
varlist.sort()
for name in varlist:
if name in self.discover + ['total'] + op.diskset.keys():
ret.append(name)
return ret
def name(self):
return ['dsk/'+name for name in self.vars]
def extract(self):
for name in self.vars: self.set2[name] = (0, 0)
for l in self.splitlines():
if len(l) < 13: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if l[3:] == ['0',] * 11: continue
if not self.diskfilter.match(name):
self.set2['total'] = ( self.set2['total'][0] + long(l[3]), self.set2['total'][1] + long(l[7]) )
if name in self.vars and name != 'total':
self.set2[name] = ( self.set2[name][0] + long(l[3]), self.set2[name][1] + long(l[7]))
for diskset in self.vars:
if diskset in op.diskset.keys():
for disk in op.diskset[diskset]:
if re.match('^'+disk+'$', name):
self.set2[diskset] = ( self.set2[diskset][0] + long(l[3]), self.set2[diskset][1] + long(l[7]) )
for name in self.set2.keys():
self.val[name] = (
(self.set2[name][0] - self.set1[name][0]) / elapsed,
(self.set2[name][1] - self.set1[name][1]) / elapsed,
)
if step == op.delay:
self.set1.update(self.set2)
# S_VALUE(ioj->rd_ios, ioi->rd_ios, itv),
# S_VALUE(ioj->wr_ios, ioi->wr_ios, itv),
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_disk_tps.py",
"copies": "3",
"size": "3052",
"license": "apache-2.0",
"hash": 7831346218263361000,
"line_mean": 36.6790123457,
"line_max": 123,
"alpha_frac": 0.5121231979,
"autogenerated": false,
"ratio": 3.436936936936937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5449060134836937,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Percentage of remaining battery power as reported by ACPI.
"""
def __init__(self):
self.name = 'battery'
self.type = 'p'
self.width = 4
self.scale = 34
def check(self):
if not os.path.exists('/proc/acpi/battery/'):
raise Exception, "No ACPI battery information found."
def vars(self):
ret = []
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
if l[0] == 'present:' and l[1] == 'yes':
ret.append(battery)
ret.sort()
return ret
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for battery in self.vars:
for line in dopen('/proc/acpi/battery/'+battery+'/info').readlines():
l = line.split()
if len(l) < 4: continue
if l[0] == 'last':
full = int(l[3])
break
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0] == 'remaining':
current = int(l[2])
break
if current:
self.val[battery] = current * 100.0 / full
else:
self.val[battery] = -1
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/dstat_for_server/plugins/dstat_battery.py",
"copies": "4",
"size": "1607",
"license": "apache-2.0",
"hash": 919497425492015000,
"line_mean": 31.14,
"line_max": 82,
"alpha_frac": 0.4741754823,
"autogenerated": false,
"ratio": 3.9099756690997567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6384151151399756,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Provide CPU information related to the dstat process.
This plugin shows the CPU utilization for the dstat process itself,
including the user-space and system-space (kernel) utilization and
a total of both. On a system with one cpu and one core, the total
cputime is 1000ms. On a system with 2 cores the total is 2000ms.
It may help to vizualise the performance of Dstat and its selection
of plugins.
"""
def __init__(self):
self.name = 'dstat cpu'
self.vars = ('user', 'system', 'total')
self.type = 'd'
self.width = 4
self.scale = 100
def extract(self):
res = resource.getrusage(resource.RUSAGE_SELF)
self.set2['user'] = float(res.ru_utime)
self.set2['system'] = float(res.ru_stime)
self.set2['total'] = float(res.ru_utime) + float(res.ru_stime)
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1000.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_dstat_cpu.py",
"copies": "1",
"size": "1144",
"license": "apache-2.0",
"hash": 3322234994332524500,
"line_mean": 32.6470588235,
"line_max": 83,
"alpha_frac": 0.6206293706,
"autogenerated": false,
"ratio": 3.3746312684365782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4495260639036578,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Provide memory information related to the dstat process.
The various values provide information about the memory usage of the
dstat process. This plugin gives you the possibility to follow memory
usage changes of dstat over time. It may help to vizualise the
performance of Dstat and its selection of plugins.
"""
def __init__(self):
self.name = 'dstat memory usage'
self.vars = ('virtual', 'resident', 'shared', 'data')
self.type = 'd'
self.open('/proc/%s/statm' % ownpid)
def extract(self):
l = self.splitline()
# l = linecache.getline('/proc/%s/schedstat' % self.pid, 1).split()
self.val['virtual'] = long(l[0]) * pagesize / 1024
self.val['resident'] = long(l[1]) * pagesize / 1024
self.val['shared'] = long(l[2]) * pagesize / 1024
# self.val['text'] = long(l[3]) * pagesize / 1024
# self.val['library'] = long(l[4]) * pagesize / 1024
self.val['data'] = long(l[5]) * pagesize / 1024
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_dstat_mem.py",
"copies": "5",
"size": "1109",
"license": "apache-2.0",
"hash": 3890729920546638300,
"line_mean": 38.6071428571,
"line_max": 74,
"alpha_frac": 0.607754734,
"autogenerated": false,
"ratio": 3.3403614457831323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6448116179783132,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Provide more information related to the dstat process.
The dstat cputime is the total cputime dstat requires per second. On a
system with one cpu and one core, the total cputime is 1000ms. On a system
with 2 cores the total is 2000ms. It may help to vizualise the performance
of Dstat and its selection of plugins.
"""
def __init__(self):
self.name = 'dstat'
self.vars = ('cputime', 'latency')
self.type = 'd'
self.width = 4
self.scale = 100
self.open('/proc/%s/schedstat' % ownpid)
def extract(self):
l = self.splitline()
# l = linecache.getline('/proc/%s/schedstat' % self.pid, 1).split()
self.set2['cputime'] = long(l[0])
self.set2['latency'] = long(l[1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_dstat.py",
"copies": "1",
"size": "1051",
"license": "apache-2.0",
"hash": 3011366744095862300,
"line_mean": 31.84375,
"line_max": 80,
"alpha_frac": 0.595623216,
"autogenerated": false,
"ratio": 3.2741433021806854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9358543324800523,
"avg_score": 0.0022446386760323544,
"num_lines": 32
} |
class dstat_plugin(dstat):
"""
Total amount of read and write throughput (in bytes) on a GPFS filesystem.
"""
def __init__(self):
self.name = 'gpfs i/o'
self.nick = ('read', 'write')
self.vars = ('_br_', '_bw_')
def check(self):
if os.access('/usr/lpp/mmfs/bin/mmpmon', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/lpp/mmfs/bin/mmpmon -p -s')
self.stdin.write('reset\n')
readpipe(self.stdout)
except IOError:
raise Exception, 'Cannot interface with gpfs mmpmon binary'
return True
raise Exception, 'Needs GPFS mmpmon binary'
def extract(self):
try:
self.stdin.write('io_s\n')
# readpipe(self.stderr)
for line in readpipe(self.stdout):
if not line: continue
l = line.split()
for name in self.vars:
self.set2[name] = long(l[l.index(name)+1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
except IOError, e:
if op.debug > 1: print '%s: lost pipe to mmpmon, %s' % (self.filename, e)
for name in self.vars: self.val[name] = -1
except Exception, e:
if op.debug > 1: print '%s: exception %s' % (self.filename, e)
for name in self.vars: self.val[name] = -1
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_gpfs.py",
"copies": "5",
"size": "1617",
"license": "apache-2.0",
"hash": -6915810745624728000,
"line_mean": 34.9333333333,
"line_max": 95,
"alpha_frac": 0.5213358071,
"autogenerated": false,
"ratio": 3.4258474576271185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00464759199875119,
"num_lines": 45
} |
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'innodb io ops '
self.nick = ('rea', 'wri', 'syn')
self.vars = ('read', 'write', 'sync')
self.type = 'f'
self.width = 3
self.scale = 1000
def check(self):
if os.access('/usr/bin/mysql', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError:
raise Exception, 'Cannot interface with MySQL binary'
return True
raise Exception, 'Needs MySQL binary'
def extract(self):
try:
self.stdin.write('show engine innodb status\G\n')
line = greppipe(self.stdout, 'OS file reads ')
if line:
l = line.split()
self.set2['read'] = l[0].rstrip(',')
self.set2['write'] = l[4].rstrip(',')
self.set2['sync'] = l[8]
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError, e:
if op.debug > 1: print '%s: lost pipe to mysql, %s' % (self.filename, e)
for name in self.vars: self.val[name] = -1
except Exception, e:
if op.debug > 1: print '%s: exception' % (self.filename, e)
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_innodb_io.py",
"copies": "1",
"size": "1617",
"license": "apache-2.0",
"hash": 3302783962264956000,
"line_mean": 32,
"line_max": 101,
"alpha_frac": 0.5139146568,
"autogenerated": false,
"ratio": 3.4551282051282053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9439930713536226,
"avg_score": 0.005822429678395627,
"num_lines": 49
} |
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'innodb ops'
self.nick = ('ins', 'upd', 'del', 'rea')
self.vars = ('inserted', 'updated', 'deleted', 'read')
self.type = 'f'
self.width = 3
self.scale = 1000
def check(self):
if os.access('/usr/bin/mysql', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError:
raise Exception, 'Cannot interface with MySQL binary'
return True
raise Exception, 'Needs MySQL binary'
def extract(self):
try:
self.stdin.write('show engine innodb status\G\n')
line = greppipe(self.stdout, 'Number of rows inserted')
if line:
l = line.split()
self.set2['inserted'] = int(l[4].rstrip(','))
self.set2['updated'] = int(l[6].rstrip(','))
self.set2['deleted'] = int(l[8].rstrip(','))
self.set2['read'] = int(l[10])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError, e:
if op.debug > 1: print '%s: lost pipe to mysql, %s' % (self.filename, e)
for name in self.vars: self.val[name] = -1
except Exception, e:
if op.debug > 1: print '%s: exception' % (self.filename, e)
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_innodb_ops.py",
"copies": "4",
"size": "1728",
"license": "apache-2.0",
"hash": -9054044663240183000,
"line_mean": 33.56,
"line_max": 101,
"alpha_frac": 0.5185185185,
"autogenerated": false,
"ratio": 3.456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.59745185185,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'extended nfs3 client operations'
self.nick = ('null', 'gatr', 'satr', 'look', 'aces', 'rdln', 'read', 'writ', 'crea', 'mkdr', 'syml', 'mknd', 'rm', 'rmdr', 'ren', 'link', 'rdir', 'rdr+', 'fstt', 'fsnf', 'path', 'cmmt')
self.vars = ('null', 'getattr', 'setattr', 'lookup', 'access', 'readlink', 'read', 'write', 'create', 'mkdir', 'symlink', 'mknod', 'remove', 'rmdir', 'rename', 'link', 'readdir', 'readdirplus', 'fsstat', 'fsinfo', 'pathconf', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def check(self):
info(1, 'Module %s is still experimental.') % self.filename
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
for i, name in enumerate(self.vars):
self.set2[name] = long(l[i+2])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_nfs3_ops.py",
"copies": "4",
"size": "1193",
"license": "apache-2.0",
"hash": -5810560103220747000,
"line_mean": 44.8846153846,
"line_max": 243,
"alpha_frac": 0.5431684828,
"autogenerated": false,
"ratio": 2.9676616915422884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5510830174342288,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'extended nfs3 server operations'
self.nick = ('null', 'gatr', 'satr', 'look', 'aces', 'rdln', 'read', 'writ', 'crea', 'mkdr', 'syml', 'mknd', 'rm', 'rmdr', 'ren', 'link', 'rdir', 'rdr+', 'fstt', 'fsnf', 'path', 'cmmt')
self.vars = ('null', 'getattr', 'setattr', 'lookup', 'access', 'readlink', 'read', 'write', 'create', 'mkdir', 'symlink', 'mknod', 'remove', 'rmdir', 'rename', 'link', 'readdir', 'readdirplus', 'fsstat', 'fsinfo', 'pathconf', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfsd')
def check(self):
info(1, 'Module %s is still experimental.') % self.filename
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
for i, name in enumerate(self.vars):
self.set2[name] = long(l[i+2])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_nfsd3_ops.py",
"copies": "4",
"size": "1194",
"license": "apache-2.0",
"hash": 6084406598566101000,
"line_mean": 44.9230769231,
"line_max": 243,
"alpha_frac": 0.5435510888,
"autogenerated": false,
"ratio": 2.9627791563275436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5506330245127544,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs3 client'
self.nick = ('read', 'writ', 'rdir', 'inod', 'fs', 'cmmt')
self.vars = ('read', 'write', 'readdir', 'inode', 'filesystem', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
self.set2['read'] = long(l[8])
self.set2['write'] = long(l[9])
self.set2['readdir'] = long(l[17]) + long(l[18])
self.set2['inode'] = long(l[3]) + long(l[4]) + long(l[5]) + long(l[6]) + long(l[7]) + long(l[10]) + long(l[11]) + long(l[12]) + long(l[13]) + long(l[14]) + long(l[15]) + long(l[16])
self.set2['filesystem'] = long(l[19]) + long(l[20]) + long(l[21])
self.set2['commit'] = long(l[22])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_nfs3.py",
"copies": "4",
"size": "1254",
"license": "apache-2.0",
"hash": -4183282887807630000,
"line_mean": 40.8,
"line_max": 193,
"alpha_frac": 0.5119617225,
"autogenerated": false,
"ratio": 2.8179775280898878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003415773930553356,
"num_lines": 30
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs3 server'
self.nick = ('read', 'writ', 'rdir', 'inod', 'fs', 'cmmt')
self.vars = ('read', 'write', 'readdir', 'inode', 'filesystem', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfsd')
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
self.set2['read'] = long(l[8])
self.set2['write'] = long(l[9])
self.set2['readdir'] = long(l[18]) + long(l[19])
self.set2['inode'] = long(l[3]) + long(l[4]) + long(l[5]) + long(l[6]) + long(l[7]) + long(l[10]) + long(l[11]) + long(l[12]) + long(l[13]) + long(l[14]) + long(l[15]) + long(l[16]) + long(l[17])
self.set2['filesystem'] = long(l[20]) + long(l[21]) + long(l[22])
self.set2['commit'] = long(l[23])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_nfsd3.py",
"copies": "4",
"size": "1271",
"license": "apache-2.0",
"hash": 1029410707070497700,
"line_mean": 38.71875,
"line_max": 207,
"alpha_frac": 0.5114083399,
"autogenerated": false,
"ratio": 2.8057395143487858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003191445970282352,
"num_lines": 32
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'thermal'
self.type = 'd'
self.width = 3
self.scale = 20
if os.path.exists('/proc/acpi/ibm/thermal'):
self.namelist = ['cpu', 'pci', 'hdd', 'cpu', 'ba0', 'unk', 'ba1', 'unk']
self.nick = []
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.nick.append(name)
self.vars = self.nick
elif os.path.exists('/proc/acpi/thermal_zone/'):
self.vars = os.listdir('/proc/acpi/thermal_zone/')
# self.nick = [name.lower() for name in self.vars]
self.nick = []
for name in self.vars:
self.nick.append(name.lower())
else:
raise Exception, 'Needs kernel ACPI or IBM-ACPI support'
def check(self):
if not os.path.exists('/proc/acpi/ibm/thermal') and not os.path.exists('/proc/acpi/thermal_zone/'):
raise Exception, 'Needs kernel ACPI or IBM-ACPI support'
def extract(self):
if os.path.exists('/proc/acpi/ibm/thermal'):
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.val[name] = int(l[i+1])
elif os.path.exists('/proc/acpi/thermal_zone/'):
for zone in self.vars:
for line in dopen('/proc/acpi/thermal_zone/'+zone+'/temperature').readlines():
l = line.split()
self.val[zone] = int(l[1])
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_thermal.py",
"copies": "4",
"size": "1806",
"license": "apache-2.0",
"hash": -4876748690479531000,
"line_mean": 40.0454545455,
"line_max": 107,
"alpha_frac": 0.5083056478,
"autogenerated": false,
"ratio": 3.4864864864864864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5994792134286486,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.name = 'thermal'
self.type = 'd'
self.width = 3
self.scale = 20
if os.path.exists('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'):
self.vars = os.listdir('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/')
self.nick = []
for name in self.vars:
self.nick.append(name.lower())
elif os.path.exists('/proc/acpi/ibm/thermal'):
self.namelist = ['cpu', 'pci', 'hdd', 'cpu', 'ba0', 'unk', 'ba1', 'unk']
self.nick = []
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.nick.append(name)
self.vars = self.nick
elif os.path.exists('/proc/acpi/thermal_zone/'):
self.vars = os.listdir('/proc/acpi/thermal_zone/')
# self.nick = [name.lower() for name in self.vars]
self.nick = []
for name in self.vars:
self.nick.append(name.lower())
else:
raise Exception, 'Needs kernel ACPI or IBM-ACPI support'
def check(self):
if not os.path.exists('/proc/acpi/ibm/thermal') and \
not os.path.exists('/proc/acpi/thermal_zone/') and \
not os.path.exists('/sys/bus/acpi/devices/LNXTHERM:00/thermal_zone/'):
raise Exception, 'Needs kernel ACPI or IBM-ACPI support'
def extract(self):
if os.path.exists('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'):
for zone in self.vars:
if os.path.isdir('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'+zone) == False:
for line in dopen('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'+zone).readlines():
l = line.split()
if l[0].isdigit() == True:
self.val[zone] = int(l[0])
else:
self.val[zone] = 0
elif os.path.exists('/proc/acpi/ibm/thermal'):
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.val[name] = int(l[i+1])
elif os.path.exists('/proc/acpi/thermal_zone/'):
for zone in self.vars:
for line in dopen('/proc/acpi/thermal_zone/'+zone+'/temperature').readlines():
l = line.split()
self.val[zone] = int(l[1])
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_thermal.py",
"copies": "2",
"size": "2730",
"license": "apache-2.0",
"hash": 4162006691888896500,
"line_mean": 41.65625,
"line_max": 106,
"alpha_frac": 0.506959707,
"autogenerated": false,
"ratio": 3.464467005076142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951014325582901,
"avg_score": 0.004082477298648317,
"num_lines": 64
} |
class dstat_plugin(dstat):
"""
Number of packets received and send per interface.
"""
def __init__(self):
self.nick = ('#recv', '#send')
self.type = 'f'
self.width = 5
self.scale = 1000
self.totalfilter = re.compile('^(lo|bond[0-9]+|face|.+\.[0-9]+)$')
self.open('/proc/net/dev')
self.cols = 2
def discover(self, *objlist):
ret = []
for l in self.splitlines(replace=':'):
if len(l) < 17: continue
if l[2] == '0' and l[10] == '0': continue
name = l[0]
if name not in ('lo', 'face'):
ret.append(name)
ret.sort()
for item in objlist: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
varlist = op.netlist
elif not op.full:
varlist = ('total',)
else:
varlist = self.discover
# if len(varlist) > 2: varlist = varlist[0:2]
varlist.sort()
for name in varlist:
if name in self.discover + ['total', 'lo']:
ret.append(name)
if not ret:
raise Exception, "No suitable network interfaces found to monitor"
return ret
def name(self):
return ['pkt/'+name for name in self.vars]
def extract(self):
self.set2['total'] = [0, 0]
for l in self.splitlines(replace=':'):
if len(l) < 17: continue
if l[2] == '0' and l[10] == '0': continue
name = l[0]
if name in self.vars :
self.set2[name] = ( long(l[2]), long(l[10]) )
if not self.totalfilter.match(name):
self.set2['total'] = ( self.set2['total'][0] + long(l[2]), self.set2['total'][1] + long(l[10]))
if update:
for name in self.set2.keys():
self.val[name] = (
(self.set2[name][0] - self.set1[name][0]) * 1.0 / elapsed,
(self.set2[name][1] - self.set1[name][1]) * 1.0 / elapsed,
)
if step == op.delay:
self.set1.update(self.set2)
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_net_packets.py",
"copies": "3",
"size": "2198",
"license": "apache-2.0",
"hash": 9033042897884385000,
"line_mean": 32.303030303,
"line_max": 111,
"alpha_frac": 0.4767970883,
"autogenerated": false,
"ratio": 3.488888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00431911530517228,
"num_lines": 66
} |
class dstat_plugin(dstat):
"""
Power usage information from ACPI.
Displays the power usage in mwh of your system's battery using ACPI
information. This information is only available when the battery is
being used (or being charged).
"""
def __init__(self):
self.name = 'power'
self.nick = ( 'usage', )
self.vars = ( 'rate', )
self.type = 'f'
self.width = 5
self.scale = 1
self.rate = 0
self.batteries = []
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
self.batteries.append(battery)
break
def check(self):
if not self.batteries:
raise Exception, 'No battery information found, no power usage statistics'
def extract(self):
amperes_drawn = 0
voltage = 0
watts_drawn = 0
for battery in self.batteries:
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0] == 'present:' and l[1] != 'yes': continue
if l[0:2] == ['charging','state:'] and l[2] != 'discharging':
voltage = 0
break
if l[0:2] == ['present','voltage:']:
voltage = int(l[2]) / 1000.0
elif l[0:2] == ['present','rate:'] and l[3] == 'mW':
watts_drawn = int(l[2]) / 1000.0
elif l[0:2] == ['present','rate:'] and l[3] == 'mA':
amperes_drawn = int(l[2]) / 1000.0
self.rate = self.rate + watts_drawn + voltage * amperes_drawn
### Return error if we found no information
if self.rate == 0:
self.rate = -1
if op.update:
self.val['rate'] = self.rate / elapsed
else:
self.val['rate'] = self.rate
if step == op.delay:
self.rate = 0
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/rs-sysmon2/plugins/dstat_power.py",
"copies": "3",
"size": "2190",
"license": "apache-2.0",
"hash": -7955416247595141000,
"line_mean": 32.6923076923,
"line_max": 86,
"alpha_frac": 0.4945205479,
"autogenerated": false,
"ratio": 3.674496644295302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5669017192195303,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Top interrupt
Displays the name of the most frequent interrupt
"""
def __init__(self):
self.name = 'most frequent'
self.vars = ('interrupt',)
self.type = 's'
self.width = 20
self.scale = 0
self.intset1 = [ 0 ] * 256
self.open('/proc/stat')
self.names = self.names()
def names(self):
ret = {}
for line in dopen('/proc/interrupts'):
l = line.split()
if len(l) <= cpunr: continue
l1 = l[0].split(':')[0]
### Cleanup possible names from /proc/interrupts
l2 = ' '.join(l[cpunr+2:])
l2 = l2.replace('_hcd:', '/')
l2 = re.sub('@pci[:\d+\.]+', '', l2)
ret[l1] = l2
return ret
def extract(self):
self.output = ''
self.val['total'] = 0.0
for line in self.splitlines():
if line[0] == 'intr':
self.intset2 = [ long(int) for int in line[3:] ]
for i in range(len(self.intset2)):
total = (self.intset2[i] - self.intset1[i]) * 1.0 / elapsed
### Put the highest value in self.val
if total > self.val['total']:
if str(i+1) in self.names.keys():
self.val['name'] = self.names[str(i+1)]
else:
self.val['name'] = 'int ' + str(i+1)
self.val['total'] = total
if step == op.delay:
self.intset1 = self.intset2
if self.val['total'] != 0.0:
self.output = '%-15s%s' % (self.val['name'], cprint(self.val['total'], 'd', 5, 1000))
def showcsv(self):
return '%s / %f' % (self.val['name'], self.val['total'])
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_top_int.py",
"copies": "1",
"size": "1824",
"license": "apache-2.0",
"hash": -3672554239119188000,
"line_mean": 29.9152542373,
"line_max": 97,
"alpha_frac": 0.4709429825,
"autogenerated": false,
"ratio": 3.3653136531365315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4336256635636531,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Top most expensive block I/O process.
Displays the name of the most expensive block I/O process.
"""
def __init__(self):
self.name = 'most expensive'
self.vars = ('block i/o process',)
self.type = 's'
self.width = 22
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception, 'Kernel has no I/O accounting, use at least 2.6.20'
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['usage'] = 0.0
for pid in proc_pidlist():
try:
### Reset values
if not self.pidset2.has_key(pid):
self.pidset2[pid] = {'read_bytes:': 0, 'write_bytes:': 0}
if not self.pidset1.has_key(pid):
self.pidset1[pid] = {'read_bytes:': 0, 'write_bytes:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['read_bytes:'] - self.pidset1[pid]['read_bytes:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['write_bytes:'] - self.pidset1[pid]['write_bytes:']) * 1.0 / elapsed
usage = read_usage + write_usage
### Get the process that spends the most jiffies
if usage > self.val['usage']:
self.val['usage'] = usage
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
# st = os.stat("/proc/%s" % pid)
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['usage'] != 0.0:
self.output = '%-*s%s %s' % (self.width-11, self.val['name'][0:self.width-11], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
### Debug (show PID)
# self.output = '%*s %-*s%s %s' % (5, self.val['pid'], self.width-17, self.val['name'][0:self.width-17], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
def showcsv(self):
return '%s / %d:%d' % (self.val['name'], self.val['read_usage'], self.val['write_usage'])
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_top_bio.py",
"copies": "1",
"size": "2746",
"license": "apache-2.0",
"hash": 3737035171122390500,
"line_mean": 38.2285714286,
"line_max": 204,
"alpha_frac": 0.5032774945,
"autogenerated": false,
"ratio": 3.3776137761377614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4380891270637761,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Top most expensive I/O process
Displays the name of the most expensive I/O process
"""
def __init__(self):
self.name = 'most expensive'
self.vars = ('i/o process',)
self.type = 's'
self.width = 22
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception, 'Kernel has no I/O accounting, use at least 2.6.20'
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['usage'] = 0.0
for pid in proc_pidlist():
try:
### Reset values
if not self.pidset2.has_key(pid):
self.pidset2[pid] = {'rchar:': 0, 'wchar:': 0}
if not self.pidset1.has_key(pid):
self.pidset1[pid] = {'rchar:': 0, 'wchar:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['rchar:'] - self.pidset1[pid]['rchar:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['wchar:'] - self.pidset1[pid]['wchar:']) * 1.0 / elapsed
usage = read_usage + write_usage
# if usage > 0.0:
# print '%s %s:%s' % (pid, read_usage, write_usage)
### Get the process that spends the most jiffies
if usage > self.val['usage']:
self.val['usage'] = usage
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['usage'] != 0.0:
self.output = '%-*s%s %s' % (self.width-11, self.val['name'][0:self.width-11], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
### Debug (show PID)
# self.output = '%*s %-*s%s %s' % (5, self.val['pid'], self.width-17, self.val['name'][0:self.width-17], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
def showcsv(self):
return '%s / %d:%d' % (self.val['name'], self.val['read_usage'], self.val['write_usage'])
# vim:ts=4:sw=4:et
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_top_io.py",
"copies": "1",
"size": "2730",
"license": "apache-2.0",
"hash": 6107580023951060000,
"line_mean": 37.4507042254,
"line_max": 204,
"alpha_frac": 0.495970696,
"autogenerated": false,
"ratio": 3.357933579335793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4353904275335793,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Top most expesnive block I/O process.
Displays the name of the most expensive block I/O process.
"""
def __init__(self):
self.name = 'most expensive'
self.vars = ('block i/o process',)
self.type = 's'
self.width = 22
self.scale = 0
self.pidset1 = {}; self.pidset2 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception, 'Kernel has no I/O accounting, use at least 2.6.20'
def extract(self):
self.val['usage'] = 0.0
self.val['block i/o process'] = ''
for pid in proc_pidlist():
try:
### Reset values
if not self.pidset2.has_key(pid):
self.pidset2[pid] = {'read_bytes:': 0, 'write_bytes:': 0}
if not self.pidset1.has_key(pid):
self.pidset1[pid] = {'read_bytes:': 0, 'write_bytes:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['read_bytes:'] - self.pidset1[pid]['read_bytes:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['write_bytes:'] - self.pidset1[pid]['write_bytes:']) * 1.0 / elapsed
usage = read_usage + write_usage
### Get the process that spends the most jiffies
if usage > self.val['usage']:
self.val['usage'] = usage
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
# st = os.stat("/proc/%s" % pid)
if step == op.delay:
for pid in self.pidset2.keys():
self.pidset1[pid].update(self.pidset2[pid])
if self.val['usage'] != 0.0:
self.val['block i/o process'] = '%-*s%s %s' % (self.width-11, self.val['name'][0:self.width-11], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
### Debug (show PID)
# self.val['block i/o process'] = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
def showcsv(self):
return '%s / %d:%d' % (self.val['name'], self.val['read_usage'], self.val['write_usage'])
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "rs-sysmon2/plugins/dstat_top_bio.py",
"copies": "3",
"size": "2741",
"license": "apache-2.0",
"hash": 3491228826989262300,
"line_mean": 38.1571428571,
"line_max": 201,
"alpha_frac": 0.5027362277,
"autogenerated": false,
"ratio": 3.396530359355638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399266587055638,
"avg_score": null,
"num_lines": null
} |
### Dstat most expensive process plugin
### Displays the name of the most expensive process
### More information:
### http://lwn.net/Articles/317814/
class dstat_plugin(dstat):
def __init__(self):
self.name = 'out of memory'
self.vars = ('kill score',)
self.type = 's'
self.width = 18
self.scale = 0
def check(self):
if not os.access('/proc/self/oom_score', os.R_OK):
raise Exception, 'Kernel does not support /proc/pid/oom_score, use at least 2.6.11.'
def extract(self):
self.output = ''
self.val['max'] = 0.0
for pid in proc_pidlist():
try:
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Using dopen() will cause too many open files
l = proc_splitline('/proc/%s/oom_score' % pid)
except IOError:
continue
except IndexError:
continue
if len(l) < 1: continue
oom_score = int(l[0])
### Is it a new topper ?
if oom_score <= self.val['max']: continue
self.val['max'] = oom_score
self.val['name'] = getnamebypid(pid, name)
self.val['pid'] = pid
if self.val['max'] != 0.0:
self.output = '%-*s%s' % (self.width-4, self.val['name'][0:self.width-4], cprint(self.val['max'], 'f', 4, 1000))
### Debug (show PID)
# self.output = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
def showcsv(self):
return '%s / %d%%' % (self.val['name'], self.val['max'])
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_top_oom.py",
"copies": "5",
"size": "1721",
"license": "apache-2.0",
"hash": 8777200711671029000,
"line_mean": 30.2909090909,
"line_max": 124,
"alpha_frac": 0.5090063916,
"autogenerated": false,
"ratio": 3.3877952755905514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6396801667190551,
"avg_score": null,
"num_lines": null
} |
### Example content for /proc/bc/<veid>/ioacct
# read 2773011640320
# write 2095707136000
# dirty 4500342390784
# cancel 4080624041984
# missed 0
# syncs_total 2
# fsyncs_total 1730732
# fdatasyncs_total 3266
# range_syncs_total 0
# syncs_active 0
# fsyncs_active 0
# fdatasyncs_active 0
# range_syncs_active 0
# vfs_reads 3717331387
# vfs_read_chars 3559144863185798078
# vfs_writes 901216138
# vfs_write_chars 23864660931174682
# io_pbs 16
class dstat_plugin(dstat):
def __init__(self):
self.nick = ['read', 'write', 'dirty', 'cancel', 'missed']
self.cols = len(self.nick)
def check(self):
if not os.path.exists('/proc/vz'):
raise Exception, 'System does not have OpenVZ support'
elif not os.path.exists('/proc/bc'):
raise Exception, 'System does not have (new) OpenVZ beancounter support'
elif not glob.glob('/proc/bc/*/ioacct'):
raise Exception, 'System does not have any OpenVZ containers'
info(1, 'Module %s is still experimental.' % self.filename)
def name(self):
return ['ve/'+name for name in self.vars]
def vars(self):
ret = []
if not op.full:
varlist = ['total',]
else:
varlist = [os.path.basename(veid) for veid in glob.glob('/proc/vz/*')]
ret = varlist
return ret
def extract(self):
global update
for veid in self.vars:
self.set2['total'] = {}
for line in dopen('/proc/bc/%s/ioacct' % veid).readlines():
# for line in dopen('ioacct.%d' % (update % 3)).readlines():
l = line.split()
if len(l) != 2: continue
if l[0] not in self.nick: continue
index = self.nick.index(l[0])
self.set2[veid][index] = long(l[1])
self.set2['total'][index] = self.set2['total'][index] + long(l[1])
# print veid, self.val[veid], self.set2[veid][0], self.set2[veid][1]
# print veid, self.val[veid], self.set1[veid][0], self.set1[veid][1]
for i in range(len(self.nick)):
self.val[veid][i] = (self.set2[veid][i] - self.set1[veid][i]) / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/dstat_for_server/plugins/dstat_vz_io.py",
"copies": "3",
"size": "2861",
"license": "apache-2.0",
"hash": 1366098103703546600,
"line_mean": 41.0735294118,
"line_max": 87,
"alpha_frac": 0.4732610975,
"autogenerated": false,
"ratio": 3.626108998732573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003708793420554075,
"num_lines": 68
} |
global socket
import socket
global struct
import struct
### FIXME: Implement millisecond granularity as well
### FIXME: Interrupts socket if data is overdue (more than 250ms ?)
class dstat_plugin(dstat):
"""
Time from an NTP server.
BEWARE: this dstat plugin typically takes a lot longer to run than
system plugins and for that reason it is important to use an NTP server
located nearby as well as make sure that it does not impact your other
counters too much.
"""
def __init__(self):
self.name = 'ntp'
self.nick = ('date/time',)
self.vars = ('time',)
self.timefmt = os.getenv('DSTAT_TIMEFMT') or '%d-%m %H:%M:%S'
self.ntpserver = os.getenv('DSTAT_NTPSERVER') or '0.fedora.pool.ntp.org'
self.type = 's'
self.width = len(time.strftime(self.timefmt, time.localtime()))
self.scale = 0
self.epoch = 2208988800L
# socket.setdefaulttimeout(0.25)
self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
self.socket.settimeout(0.25)
def gettime(self):
self.socket.sendto( '\x1b' + 47 * '\0', ( self.ntpserver, 123 ))
data, address = self.socket.recvfrom(1024)
return struct.unpack( '!12I', data )[10] - self.epoch
def check(self):
try:
self.gettime()
except socket.gaierror:
raise Exception, 'Failed to connect to NTP server %s.' % self.ntpserver
except socket.error:
raise Exception, 'Error connecting to NTP server %s.' % self.ntpserver
def extract(self):
try:
self.val['time'] = time.strftime(self.timefmt, time.localtime(self.gettime()))
except:
self.val['time'] = theme['error'] + '-'.rjust(self.width-1) + ' '
def showcsv(self):
return time.strftime(self.timefmt, time.localtime(self.gettime()))
# vim:ts=4:sw=4:et
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_ntp.py",
"copies": "8",
"size": "1949",
"license": "apache-2.0",
"hash": -6668046228503562000,
"line_mean": 32.6034482759,
"line_max": 90,
"alpha_frac": 0.6177526937,
"autogenerated": false,
"ratio": 3.511711711711712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8129464405411713,
"avg_score": null,
"num_lines": null
} |
#Version: 2.2
#VEID user nice system uptime idle strv uptime used maxlat totlat numsched
#302 142926 0 10252 152896388 852779112954062 0 427034187248480 1048603937010 0 0 0
#301 27188 0 7896 152899846 853267000490282 0 427043845492614 701812592320 0 0 0
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('usr', 'sys', 'idl', 'nic')
self.type = 'p'
self.width = 3
self.scale = 34
self.open('/proc/vz/vestat')
self.cols = 4
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def discover(self, *list):
ret = []
for l in self.splitlines():
if len(l) < 6 or l[0] == 'VEID': continue
ret.append(l[0])
ret.sort()
for item in list: ret.append(item)
return ret
def name(self):
ret = []
for name in self.vars:
if name == 'total':
ret.append('total ve usage')
else:
ret.append('ve ' + name + ' usage')
return ret
def vars(self):
ret = []
if not op.full:
list = ('total', )
else:
list = self.discover
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def extract(self):
self.set2['total'] = [0, 0, 0, 0]
for line in self.splitlines():
if len(l) < 6 or l[0] == 'VEID': continue
name = l[0]
self.set2[name] = ( long(l[1]), long(l[3]), long(l[4]) - long(l[1]) - long(l[2]) - long(l[3]), long(l[2]) )
self.set2['total'] = ( self.set2['total'][0] + long(l[1]), self.set2['total'][1] + long(l[3]), self.set2['total'][2] + long(l[4]) - long(l[1]) - long(l[2]) - long(l[3]), self.set2['total'][3] + long(l[2]) )
for name in self.vars:
for i in range(4):
self.val[name][i] = 100.0 * (self.set2[name][i] - self.set1[name][i]) / (sum(self.set2[name]) - sum(self.set1[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "rs-sysmon2/plugins/dstat_vz_cpu.py",
"copies": "4",
"size": "2295",
"license": "apache-2.0",
"hash": -5664816957251502000,
"line_mean": 36.0161290323,
"line_max": 218,
"alpha_frac": 0.4906318083,
"autogenerated": false,
"ratio": 3.0805369127516777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5571168721051679,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dai Tianyu (dtysky)'
from PIL import Image
import os,re
def hemorrhage(im,border={'top':10,'bottom':10,'left':10,'right':10},opcity=0.5):
opcity = int(opcity * 255)
xsize,ysize = im.size
im = im.convert('RGBA')
res_xsize = xsize + border['left'] + border['right']
res_ysize = ysize + border['top'] + border['bottom']
hem_buffer = {}
alpha = {'all':None,'tb':None,'lr':None}
alpha['all'] = Image.new('L',(xsize,ysize),color=255)
alpha['tb'] = Image.new('L',(xsize,1),color=opcity)
hem_buffer['top'] = im.crop((0,0,xsize,1))
hem_buffer['bottom'] = im.crop((0,ysize - 1,xsize,ysize))
im_tmp = Image.new('RGBA',(xsize,res_ysize))
im_tmp.paste(im,(0,border['top']),mask=alpha['all'])
for i in range(border['top']):
box = (0,i,xsize,i + 1)
im_tmp.paste(hem_buffer['top'],box,mask=alpha['tb'])
for i in range(border['bottom']):
box = (0,ysize + i,xsize,ysize + i + 1)
im_tmp.paste(hem_buffer['bottom'],box,mask=alpha['tb'])
alpha['lr'] = Image.new('L',(1,res_ysize),color=opcity)
hem_buffer['left'] = im_tmp.crop((0,0,1,res_ysize))
hem_buffer['right'] = im_tmp.crop((xsize - 1,0,xsize,res_ysize))
im_res = Image.new('RGBA',(res_xsize,res_ysize))
im_res.paste(im_tmp,(border['left'],0))
for i in range(border['left']):
box = (i,0,i + 1,res_ysize)
im_res.paste(hem_buffer['left'],box,mask=alpha['lr'])
for i in range(border['right']):
box = (xsize + i,0,xsize + i + 1,res_ysize)
im_res.paste(hem_buffer['right'],box,mask=alpha['lr'])
return im_res | {
"repo_name": "dtysky/ImageHemorrhage",
"path": "PyProject/ImageHemorrhage.py",
"copies": "1",
"size": "1496",
"license": "mit",
"hash": -6574934327774350000,
"line_mean": 36.425,
"line_max": 81,
"alpha_frac": 0.6296791444,
"autogenerated": false,
"ratio": 2.3485086342229198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.347818777862292,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dak'
import sys
from BeautifulSoup import BeautifulSoup
# Text colors retrieved from css file
text_colors = {
'color' : '0xFFFFFFDE',
'color light': '0xFFFFFFDE',
'color light-strong': '0xFFFFFFFF',
'color dark': '0x000000DE',
'color dark-when-small' : '0x000000DE',
'color dark-strong' : '0x00000000'
}
# Args check
if len(sys.argv) != 3 :
print "Syntax: python generator.py <htmlfile> <outputfile>"
exit(1)
# Go directly to our div
parsed_html = BeautifulSoup(open(sys.argv[1]).read())
div = parsed_html.find('div', attrs={'id':'ui-color-palette-extended-color-palette'})
if not div:
print "Cannot find color palette section"
exit(1)
# Every section is a color palette (primary + secondary)
sections = div.findAll('section', attrs={'class':'color-group'})
print "Found %d colors:" % len(sections)
# Intro is index struct
intro = ['struct MaterialColors {\n']
code = []
for section in sections:
# Get name from first li element
color = section.find('li', attrs={'class': 'color main-color'})
colorName = color.find('span').text.replace(' ', '')
intro.append('\tstatic let %s = _MaterialColor%s.self\n' % (colorName, colorName))
code.append('struct _MaterialColor%s {\n' % colorName)
lis = section.findAll('li')[1:]
for li in lis:
textColor = text_colors[li['class']]
spans = li.findAll('span')
colorHue = spans[0].text
colorHex = '0x' + spans[1].text[1:].upper() + 'FF'
constantName = 'P' + colorHue
if colorHue[0] == 'A':
constantName = colorHue
code.append('\tstatic let %s\t: (HUE: UInt, TEXT: UInt)\t= (HUE: %s, TEXT: %s)\n' % (constantName, colorHex, textColor))
code.append('}\n\n')
print "\tProcessed %s color" % colorName
intro.append('}\n\n')
# Write results to file
with open(sys.argv[2], 'w') as fout:
fout.writelines(intro)
fout.writelines(code) | {
"repo_name": "daktales/MaterialDesignColorsSwift",
"path": "Scripts/generator.py",
"copies": "2",
"size": "1940",
"license": "mit",
"hash": -1650783714432978700,
"line_mean": 29.8095238095,
"line_max": 128,
"alpha_frac": 0.6345360825,
"autogenerated": false,
"ratio": 3.1960461285008237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9812833332354363,
"avg_score": 0.003549775729292283,
"num_lines": 63
} |
__author__ = 'dalegaspi'
import requests
def main():
# sign up for developer key at http://developer.ap.org/
apikey = '** your API key **'
keyword_search = 'star wars'
payload = requests.get('http://api.ap.org/v2/search/photo',
headers={'accept': 'application/json'},
params={'apikey': apikey, 'q': keyword_search, 'count': '1'})
# requests does not automatically handle BOM for UTF-8, hence specifying the encoding
# with utf-8-sig
payload.encoding = 'utf-8-sig'
payload = payload.json()
suggested_term = payload['suggestedTerm']['title']
print 'suggested term: "{0}"'.format(suggested_term)
print 'title of the most recent image about "{0}" is "{1}"'.format(keyword_search, payload['entries'][0]['title'])
img_url = next(u['href'] for u in payload['entries'][0]['contentLinks'] if u['rel'] == 'thumbnail')
print 'downloading thumbnail link {0}'.format(img_url)
img_payload = requests.get(img_url, params={'apikey': apikey})
if img_payload.status_code == 200:
f = open('/tmp/thumbnail.jpg', 'wb')
f.write(img_payload.content)
f.close()
if __name__ == "__main__":
main()
| {
"repo_name": "TheAssociatedPress/APISamples",
"path": "APContentAPI/python/apiclient.py",
"copies": "1",
"size": "1261",
"license": "mit",
"hash": 6142776464927444000,
"line_mean": 33.0277777778,
"line_max": 118,
"alpha_frac": 0.5852498017,
"autogenerated": false,
"ratio": 3.730769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9790967309500842,
"avg_score": 0.005010344593677928,
"num_lines": 36
} |
__author__ = 'dale mcdiarmid'
import re
import os.path
from six import string_types
def modify_list(values=[], pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` on every item in the list'''
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return [_re.sub(replacement, value) for value in values]
def append_to_list(values=[], suffix=''):
if isinstance(values, string_types):
values = values.split(',')
return [str(value+suffix) for value in values]
def array_to_str(values=[],separator=','):
return separator.join(values)
def extract_role_users(users={},exclude_users=[]):
role_users=[]
for user,details in users.iteritems():
if user not in exclude_users and "roles" in details:
for role in details["roles"]:
role_users.append(role+":"+user)
return role_users
def filename(filename=''):
return os.path.splitext(os.path.basename(filename))[0]
def remove_reserved(user_roles={}):
not_reserved = []
for user_role,details in user_roles.items():
if not "metadata" in details or not "_reserved" in details["metadata"] or not details["metadata"]["_reserved"]:
not_reserved.append(user_role)
return not_reserved
def filter_reserved(users_role={}):
reserved = []
for user_role,details in users_role.items():
if "metadata" in details and "_reserved" in details["metadata"] and details["metadata"]["_reserved"]:
reserved.append(user_role)
return reserved
class FilterModule(object):
def filters(self):
return {'modify_list': modify_list,
'append_to_list':append_to_list,
'filter_reserved':filter_reserved,
'array_to_str':array_to_str,
'extract_role_users':extract_role_users,
'remove_reserved':remove_reserved,
'filename':filename} | {
"repo_name": "AtlasOfLivingAustralia/ala-install",
"path": "ansible/roles/ansible-elasticsearch/filter_plugins/custom.py",
"copies": "2",
"size": "1915",
"license": "apache-2.0",
"hash": -6675523234492988000,
"line_mean": 32.6140350877,
"line_max": 119,
"alpha_frac": 0.6412532637,
"autogenerated": false,
"ratio": 3.8071570576540754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5448410321354076,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dale mcdiarmid'
import re
import os.path
def modify_list(values=[], pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` on every item in the list'''
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return [_re.sub(replacement, value) for value in values]
def append_to_list(values=[], suffix=''):
if isinstance(values, basestring):
values = values.split(',')
return [str(value+suffix) for value in values]
def array_to_str(values=[],separator=','):
return separator.join(values)
def extract_role_users(users={}):
role_users=[]
for user,details in users.iteritems():
if "roles" in details:
for role in details["roles"]:
role_users.append(role+":"+user)
return role_users
def filename(filename=''):
return os.path.splitext(os.path.basename(filename))[0]
def filter_reserved(user_roles={}):
not_reserved = []
for user_role,details in user_roles.items():
if not "metadata" in details or not "_reserved" in details["metadata"] or not details["metadata"]["_reserved"]:
not_reserved.append(user_role)
return not_reserved
class FilterModule(object):
def filters(self):
return {'modify_list': modify_list,
'append_to_list':append_to_list,
'array_to_str':array_to_str,
'extract_role_users':extract_role_users,
'filter_reserved':filter_reserved,
'filename':filename}
| {
"repo_name": "IHTSDO/ihtsdo-ansible",
"path": "roles/IHTSDO.elastic/filter_plugins/custom.py",
"copies": "2",
"size": "1524",
"license": "apache-2.0",
"hash": -1810588791846381800,
"line_mean": 29.48,
"line_max": 119,
"alpha_frac": 0.6312335958,
"autogenerated": false,
"ratio": 3.726161369193154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5357394964993154,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dalton'
from player import Player
class Action():
def __init__(self, method, name, hotkey, **kwargs):
self.method = method
self.name = name
self.hotkey = hotkey
self.kwargs = kwargs
def __str__(self):
return '{}: {}'.format(self.hotkey, self.name)
class MoveNorth(Action):
def __init__(self):
super().__init__(method=Player.move_north, name='Move North', hotkey='n')
class MoveSouth(Action):
def __init__(self):
super().__init__(method=Player.move_south, name='Move South', hotkey='s')
class MoveEast(Action):
def __init__(self):
super().__init__(method=Player.move_east, name='Move East', hotkey='e')
class MoveWest(Action):
def __init__(self):
super().__init__(method=Player.move_west, name='Move West', hotkey='w')
class ViewInventory(Action):
"""Print the player's inventory"""
def __init__(self):
super().__init__(method=Player.print_inventory, name='View inventory', hotkey='i')
class Attack(Action):
def __init__(self, enemy):
super().__init__(method=Player.attack, name='Attack', hotkey='a', enemy=enemy)
class Flee(Action):
def __init__(self, tile):
super().__init__(method=Player.flee, name='Flee', hotkey='f', tile=tile) | {
"repo_name": "dalrrard/text-adventure-tut",
"path": "adventuretutorial/actions.py",
"copies": "1",
"size": "1299",
"license": "mit",
"hash": 8522518629449418000,
"line_mean": 25,
"line_max": 90,
"alpha_frac": 0.5996920708,
"autogenerated": false,
"ratio": 3.4365079365079363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529012656531435,
"avg_score": 0.0014374701553002646,
"num_lines": 50
} |
__author__ = 'dalton'
import random
import items, world
class Player():
def __init__(self):
self.inventory = [items.Gold(15), items.Rock()]
self.hp = 100
self.location_x, self.location_y = world.starting_position
self.victory = False
def is_alive(self):
return self.hp > 0
def print_inventory(self):
for item in self.inventory:
print(item, '\n')
def move(self, dx, dy):
self.location_x += dx
self.location_y += dy
print(world.tile_exists(self.location_x, self.location_y).intro_text())
def move_north(self):
self.move(dx=0, dy=-1)
def move_south(self):
self.move(dx=0, dy=1)
def move_east(self):
self.move(dx=1, dy=0)
def move_west(self):
self.move(dx=-1, dy=0)
def attack(self, enemy):
best_weapon = None
max_dmg = 0
for i in self.inventory:
if isinstance(i, items.Weapon):
if i.damage > max_dmg:
max_dmg = i.damage
best_weapon = i
print('You attack {} with {} for {} damage!'.format(enemy.name, best_weapon.name, best_weapon.damage))
enemy.hp -= best_weapon.damage
if not enemy.is_alive():
print('You killed {}!'.format(enemy.name))
else:
print('{} HP is {}'.format(enemy.name, enemy.hp))
def do_action(self, action, **kwargs):
action_method = getattr(self, action.method.__name__)
if action_method:
action_method(**kwargs)
def flee(self, tile):
"""Moves the player randomly to an adjacent tile"""
available_moves = tile.adjacent_moves()
r = random.randint(0, len(available_moves) - 1)
self.do_action(available_moves[r]) | {
"repo_name": "dalrrard/text-adventure-tut",
"path": "adventuretutorial/player.py",
"copies": "1",
"size": "1803",
"license": "mit",
"hash": -5216338075921278000,
"line_mean": 28.0967741935,
"line_max": 110,
"alpha_frac": 0.556849695,
"autogenerated": false,
"ratio": 3.4739884393063583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4530838134306358,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dalton'
import world
from player import Player
visited = []
def play():
world.load_tiles()
player = Player()
room = world.tile_exists(player.location_x, player.location_y)
print(room.intro_text())
while player.is_alive() and not player.victory:
room = world.tile_exists(player.location_x, player.location_y)
if (room in visited) and ('LootRoom' in str(room.__class__.__bases__)):
pass
else:
visited.append(room)
room.modify_player(player)
# check again since the room could have changed the player's state
if player.is_alive() and not player.victory:
print('Choose an action:\n')
available_actions = room.available_actions()
for action in available_actions:
print(action)
action_input = input('Action: ')
print('\n')
for action in available_actions:
if action_input == action.hotkey:
player.do_action(action, **action.kwargs)
break
if __name__ == '__main__':
play()
| {
"repo_name": "dalrrard/text-adventure-tut",
"path": "adventuretutorial/game.py",
"copies": "1",
"size": "1129",
"license": "mit",
"hash": 2889269496770251000,
"line_mean": 32.2058823529,
"line_max": 79,
"alpha_frac": 0.5686448184,
"autogenerated": false,
"ratio": 4.090579710144928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5159224528544928,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dalton'
class Item():
"""The base class for all items"""
def __init__(self, name, description, value):
self.name = name
self.description = description
self.value = value
def __str__(self):
return "{}\n====={}\nValue: {}\n".format(self.name, self.description, self.value)
class Gold(Item):
def __init__(self, amt):
self.amt = amt
super().__init__(name="Gold",
description="A round coin with worn markings on the front.",
value=self.amt)
class Weapon(Item):
def __init__(self, name, description, value, damage):
self.damage = damage
super().__init__(name, description, value)
def __str__(self):
return "{}\n====={}\nValue: {}\nDamage: {}\n".format(self.name, self.description, self.value, self.damage)
class Rock(Weapon):
def __init__(self):
super().__init__(name="Rock",
description="A fist-sized rock, suitable for bludgeoning.",
value=0,
damage=5)
class Dagger(Weapon):
def __init__(self):
super().__init__(name="Dagger",
description="A small dagger with some rust. Somewhat more dangerous than a rock.",
value=10,
damage=10) | {
"repo_name": "dalrrard/text-adventure-tut",
"path": "adventuretutorial/items.py",
"copies": "1",
"size": "1373",
"license": "mit",
"hash": -2430616262608933400,
"line_mean": 28.8695652174,
"line_max": 114,
"alpha_frac": 0.5156591406,
"autogenerated": false,
"ratio": 4.014619883040936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017615231298871545,
"num_lines": 46
} |
__author__ = 'Damien'
from flask import Flask, render_template,request,session, \
flash,redirect,url_for,g
from functools import wraps
import sqlite3
DATABASE = 'blog.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'hard_to_guess'
app = Flask(__name__)
app.config['DEBUG'] = True
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.route('/',methods=['GET','POST'])
def login():
error = None
if request.method=='POST':
if request.form['username'] != app.config['USERNAME'] or \
request.form['password'] != app.config['PASSWORD']:
error = 'Invalid credentials. Please try again.'
else:
session['logged_in'] = True
return redirect(url_for('main'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in',None)
flash('You were logged out')
return redirect(url_for('login'))
def login_required(test):
@wraps(test)
def wrap(*args,**kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
@app.route('/add', methods=['POST'])
@login_required
def add():
title = request.form['title']
post = request.form['post']
if not title or not post:
flash('All fields are required. Please try again.')
else:
g.db = connect_db()
g.db.execute('INSERT into posts (title, post) values (?,?)',[request.form['title'],request.form['post']])
g.db.commit()
g.db.close()
flash('New entry was successfully posted!')
return redirect(url_for('main'))
@app.route('/main')
@login_required
def main():
g.db = connect_db()
cur = g.db.execute('select * from posts')
posts = [dict(title=row[0], post=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('main.html',posts=posts)
if __name__ == '__main__':
app.run(debug=True) | {
"repo_name": "dmanier/flask-blog",
"path": "blog.py",
"copies": "1",
"size": "2078",
"license": "cc0-1.0",
"hash": -8269445947634668000,
"line_mean": 27.4794520548,
"line_max": 113,
"alpha_frac": 0.6082771896,
"autogenerated": false,
"ratio": 3.546075085324232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9608674646719879,
"avg_score": 0.009135525640870494,
"num_lines": 73
} |
__author__ = '@DamonLPollard'
import argparse
SECTOR_SIZE = 0x200
XBOX_ONE_NT_DISK_SIGNATURE = '12345678'.decode('hex')
XBOX_ONE_BOOT_SIGNATURE = '99cc'.decode('hex')
PC_BOOT_SIGNATURE = '55aa'.decode('hex')
def update_boot_signature(sector, signature):
print 'NEW Boot Signature: \t0x{0}'.format(signature.encode('hex'))
return sector[:-2] + signature
def update_nt_disk_signature(sector, signature):
print 'NEW NT Disk Signature: \t0x{0}'.format(signature.encode('hex'))
return sector[:0x1b8] + signature + sector[0x1bc:]
def main():
parser = argparse.ArgumentParser(description='Xbox One External HDD Tool')
parser.add_argument('-d', '--drive', required=True, help='The target physical drive')
parser.add_argument('-i', '--ignore', action='store_true',
help="Ignore the 'Xbox One NT Disk Signature' sanity check")
parser.add_argument('-bs', '--bootsignature', action='store_true', help="Update 'Boot Signature'")
parser.add_argument('-ds', '--disksignature', action='store_true', help="Update 'NT Disk Signature'")
args = parser.parse_args()
changes = False
try:
with open(args.drive, 'r+b') as disk:
disk.seek(0)
master_boot_record = disk.read(SECTOR_SIZE)
nt_disk_signature = master_boot_record[0x1b8:0x1bc]
boot_signature = master_boot_record[0x1fe:0x200]
print
print 'NT Disk Signature: \t0x{0}'.format(nt_disk_signature.encode('hex'))
print 'Boot Signature: \t0x{0}'.format(boot_signature.encode('hex'))
if nt_disk_signature == XBOX_ONE_NT_DISK_SIGNATURE or args.ignore == True:
if boot_signature == XBOX_ONE_BOOT_SIGNATURE:
if args.bootsignature:
print 'Operation: \t\tXbox One->PC'
master_boot_record = update_boot_signature(master_boot_record, PC_BOOT_SIGNATURE)
changes = True
elif boot_signature == PC_BOOT_SIGNATURE:
if args.bootsignature:
print 'Operation: \t\tPC->Xbox One'
master_boot_record = update_boot_signature(master_boot_record, XBOX_ONE_BOOT_SIGNATURE)
changes = True
else:
raise Exception('Error: Unexpected Boot Signature.')
if args.disksignature:
master_boot_record = update_nt_disk_signature(master_boot_record, XBOX_ONE_NT_DISK_SIGNATURE)
changes = True
if changes:
print
print 'Writing new MBR ...',
disk.seek(0)
disk.write(master_boot_record)
print 'done.'
else:
raise Exception('Error: Unexpected NT Disk Signature.')
except Exception as ex:
print(ex)
exit(-1)
if __name__ == "__main__":
main() | {
"repo_name": "gitFurious/xboxoneexternal",
"path": "xboxoneexternal.py",
"copies": "1",
"size": "2998",
"license": "mit",
"hash": 3828826675840712700,
"line_mean": 37.4487179487,
"line_max": 113,
"alpha_frac": 0.5730486991,
"autogenerated": false,
"ratio": 3.9603698811096435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002768992541948869,
"num_lines": 78
} |
__author__ = 'Damon Pollard (@DamonLPollard)'
import re
import random
import requests
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/32.0.1700.107 Safari/537.36'
SIGNIN = "https://app.halowaypoint.com/oauth/signin" \
"?returnUrl=https%3a%2f%2fapp.halowaypoint.com%2foauth%2fspartanToken" \
"&locale=en-US"
R_PPFT = "<input type=\"hidden\" name=\"PPFT\" id=\"i0327\" value=\"(.+?)\"\/>"
R_PPSX = "I:'(.+?)'"
R_URLPOST = "urlPost:'(.+?)'"
class HaloFour():
""" Class for retrieving a Halo Waypoint Token (required to authenticate with the Halo Waypoint API)
allow_redirects is False where applicable in order to not disguise Location Header redirects.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def get_new_token(self):
s = requests.Session()
response_one = s.get(SIGNIN,
headers={
'user-agent': USER_AGENT,
'host': 'app.halowaypoint.com'
},
allow_redirects=False
)
oauth20_authorize = response_one.headers['Location']
response_two = s.get(oauth20_authorize,
headers={
'user-agent': USER_AGENT,
'host': 'login.live.com'
}
)
ppft = re.search(R_PPFT, response_two.text).group(1)
ppsx = re.search(R_PPSX, response_two.text).group(1)
urlpost = re.search(R_URLPOST, response_two.text).group(1)
response_three = s.post(urlpost,
data={
'PPFT': ppft,
'login': self.username,
'passwd': self.password,
'LoginOptions': '3',
'NewUser': '1',
'PPSX': ppsx,
'type': '11',
'i3': random.randrange(5000, 10000),
'm1': '1920',
'm2': '1080',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__MobileLogin|1,',
},
headers={
'user-agent': USER_AGENT,
'referer': oauth20_authorize,
'host': 'login.live.com',
'origin': 'https://login.live.com'
},
verify=False,
allow_redirects=False
)
callback_url = response_three.headers['Location']
response_four = s.get(callback_url,
headers={
'user-agent': USER_AGENT,
'referer': oauth20_authorize,
'host': 'www.halowaypoint.com'
},
verify=False,
allow_redirects=False
)
spartantoken_url = response_four.headers['Location']
response_five = s.get(spartantoken_url,
headers={
'user-agent': USER_AGENT,
'referer': oauth20_authorize,
'host': 'app.halowaypoint.com'
},
verify=False
)
j = response_five.json()
return self.HaloWaypointToken(j['SpartanToken'],
j['Gamertag'],
j['AnalyticsToken'],
self.username,
self.password
)
class HaloWaypointToken:
def __init__(self, spartan_token, gamertag, analytics_token, live_username, live_password):
self.spartan_token = spartan_token
self.gamertag = gamertag
self.analytics_token = analytics_token
self.live_username = live_username
self.live_password = live_password
| {
"repo_name": "gitFurious/PyHaloFour",
"path": "pyhalo/authentication.py",
"copies": "2",
"size": "4638",
"license": "mit",
"hash": 2534617140339531000,
"line_mean": 39.3304347826,
"line_max": 104,
"alpha_frac": 0.4010349288,
"autogenerated": false,
"ratio": 4.7326530612244895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.613368799002449,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Damon Pollard (@DamonLPollard)'
import requests
class HaloFour():
def __init__(self, waypoint_token):
self.waypoint_token = waypoint_token
def get_api_version(self):
url = "https://app.halowaypoint.com/en-US/home/version"
return self._fetch_json(url)
def get_api_services(self):
url = "https://settings.svc.halowaypoint.com/RegisterClientService.svc" \
"/register/webapp/AE5D20DCFA0347B1BCE0A5253D116752"
return self._fetch_json(url)
def get_user_achievements(self, params):
url = "https://haloplayer.svc.halowaypoint.com/HaloPlayer/GetOtherUserAchievements"
return self._fetch_json(url, params)
def get_playlists(self):
url = "https://presence.svc.halowaypoint.com/en-US/h4/playlists"
return self._fetch_json(url)
def get_global_challenges(self):
url = "https://stats.svc.halowaypoint.com/en-US/h4/challenges"
return self._fetch_json(url)
def get_player_challenges(self):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/challenges" % self.waypoint_token.gamertag
return self._fetch_json(url)
def get_game_metadata(self, params=None):
url = "https://stats.svc.halowaypoint.com/en-US/h4/metadata"
return self._fetch_json(url, params)
def get_playercard(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/playercard" % gamertag
return self._fetch_json(url)
def get_multiple_playercards(self, params):
url = "https://stats.svc.halowaypoint.com/en-US/h4/playercards"
return self._fetch_json(url, params)
def get_service_record(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord" % gamertag
return self._fetch_json(url)
def get_game_history(self, gamertag, params=None):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/matches" % gamertag
return self._fetch_json(url, params)
def get_game_details(self, game_id):
url = "https://stats.svc.halowaypoint.com/en-US/h4/matches/%s" % game_id
return self._fetch_json(url)
def get_commendations(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/commendations" % gamertag
return self._fetch_json(url)
# HTTP 500
#def get_ranks(self, gamertag):
# url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/ranks" % gamertag
# return self._fetch_json(url)
def get_campaign_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/campaign" % gamertag
return self._fetch_json(url)
def get_spartanops_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/spartanops" % gamertag
return self._fetch_json(url)
def get_wargame_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/wargames" % gamertag
return self._fetch_json(url)
def get_customgame_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/custom" % gamertag
return self._fetch_json(url)
def get_spartan_image(self, gamertag, pose, params=None):
url = "https://spartans.svc.halowaypoint.com/players/%s/h4/spartans/%s" % (gamertag, pose)
return self._fetch_png(url, params)
def _fetch_json(self, url, params=None):
r = requests.get(url,
headers={
'user-agent': 'PyHalo/0.1 (%s)' % self.waypoint_token.live_username,
'accept': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'X-343-Authorization-Spartan': self.waypoint_token.spartan_token
},
params=params,
verify=False
)
return r.json()
def _fetch_png(self, url, params=None):
r = requests.get(url,
headers={
'user-agent': 'PyHalo/0.1 (%s)' % self.waypoint_token.live_username,
'accept': 'image/png',
'Accept-Encoding': 'gzip,deflate',
'X-343-Authorization-Spartan': self.waypoint_token.spartan_token
},
params=params,
verify=False
)
return r.content
| {
"repo_name": "gitFurious/PyHaloFour",
"path": "pyhalo/api.py",
"copies": "2",
"size": "4649",
"license": "mit",
"hash": -8208576937866891000,
"line_mean": 40.5089285714,
"line_max": 112,
"alpha_frac": 0.6024951602,
"autogenerated": false,
"ratio": 3.285512367491166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869442406728277,
"avg_score": 0.003713024192578059,
"num_lines": 112
} |
__author__ = 'damons'
__email__ = 'stephen.m.damon@vanderbilt.edu'
__purpose__ = "Run Xnatinfo on the database."
__version__ = '1.0.0'
__modifications__ = '13 August 2015 - Original write' \
'26 August 2015 - Remove host since only running on localhost'
__todo__ = ['Add in scans', 'Add in sessions/subjects/assessors', 'Add in scan type']
import psycopg2
from psycopg2 import OperationalError
import sys
from copy import deepcopy
# Select all the assessors
SELECT_CALL = "SELECT proctype, procstatus FROM proc_genprocdata WHERE id IN (SELECT id FROM xnat_experimentData WHERE project='%s' AND label LIKE '%s-x-%%')"
def parse_args():
"""Set up the ArgumentParser"""
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-p', '--project', dest='project',
help='Project ID on XNAT', required=True)
p.add_argument('-u', '--user', dest='user', help='DB owner username',
required=True)
p.add_argument('-d', '--dbname', dest='dbname',
help='Database name', required=True)
p.add_argument('-c', '--cred', dest='password',
help='Database username password', required=True)
return p.parse_args()
if __name__ == '__main__':
ARGS = parse_args()
# Try to connect and exit if we catch the most common OperationalError
# will add more as they are encountered.
try:
conn = psycopg2.connect(dbname=ARGS.dbname,
password=ARGS.password, user=ARGS.user)
except OperationalError as operr:
print "FATAL: Caught an OperationalError. Please check your dbname, " \
"host ip address, username, and password"
sys.exit(1)
cur = conn.cursor()
cur.execute(SELECT_CALL % (ARGS.project, ARGS.project))
result = cur.fetchall()
# count, complete, ready_to_complete, uploading, ready_to_upload, job_failed, job_running, NEED_TO_RUN, need_inputs, no_data, unknown
COUNT = [0,0,0,0,0,0,0,0,0,0,0]
procs = {}
known_list = ['COMPLETE', 'READY_TO_COMPLETE', 'UPLOADING', 'READY_TO_UPLOAD', 'JOB_FAILED', 'JOB_RUNNING', 'NEED_TO_RUN', 'NEED_INPUTS', 'NO_DATA']
i=0
# This is gross and needs some TLC
for r in result:
if r[0] not in procs.keys():
procs[r[0]] = deepcopy(COUNT)
if r[1] == 'COMPLETE':
procs[r[0]][1] = procs[r[0]][1] +1
procs[r[0]][0] +=1
elif r[1] == 'READY_TO_COMPLETE':
procs[r[0]][2] +=1
procs[r[0]][0] +=1
elif r[1] == 'UPLOADING':
procs[r[0]][3] +=1
procs[r[0]][0] +=1
elif r[1] == 'READY_TO_UPLOAD':
procs[r[0]][4] +=1
procs[r[0]][0] +=1
elif r[1] == 'JOB_FAILED':
procs[r[0]][5] +=1
procs[r[0]][0] +=1
elif r[1] == 'JOB_RUNNING':
procs[r[0]][6] +=1
procs[r[0]][0] +=1
elif r[1] == 'NEED_TO_RUN':
procs[r[0]][7] +=1
procs[r[0]][0] +=1
elif r[1] == 'NEED_INPUTS':
procs[r[0]][8] +=1
procs[r[0]][0] +=1
elif r[1] == 'NO_DATA':
procs[r[0]][9] +=1
procs[r[0]][0] +=1
else:
procs[r[0]][10] +=1
procs[r[0]][0] +=1
i+=1
aslen = 25
for key in procs.keys():
if key is not None:
if len(key) > aslen:
aslen=len(key)
print ' %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s ' % (-1*aslen, 'Process type', -5, 'Count',-8,'COMPLETE',-17, 'READY_TO_COMPLETE',-9, 'UPLOADING',-15, 'READY_TO_UPLOAD',-10, 'JOB_FAILED',-11,'JOB_RUNNING',-11, 'NEED_TO_RUN',-11, 'NEED_INPUTS',-7,'NO_DATA',-7,'UNKNOWN')
for key in sorted(procs) :
print ' %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s | %*s ' % (-1*aslen, key, -5, procs[key][0], -8, procs[key][1], -17, procs[key][2],-9, procs[key][3],-15, procs[key][4],-10, procs[key][5],-11, procs[key][6],-11, procs[key][7],-11, procs[key][8],-7, procs[key][9],-7, procs[key][10])
| {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "psql_xnatinfo.py",
"copies": "1",
"size": "4122",
"license": "mit",
"hash": 6543196692298437000,
"line_mean": 39.8118811881,
"line_max": 314,
"alpha_frac": 0.5276564774,
"autogenerated": false,
"ratio": 2.948497854077253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8859071367756686,
"avg_score": 0.023416592744113382,
"num_lines": 101
} |
__author__ = 'damons'
__email__ = 'stephen.m.damon@vanderbilt.edu'
__purpose__ = "Run Xnatsessionupdate on the database. This gives a " \
"HUGE improvement rather than using pyxnat's set method" \
"time Xnatsessionupdate -p VUSTP -a:" \
" real 0m40.719s" \
" user 0m0.547s" \
" sys 0m0.152s" \
"time psql_xnatsessionupdate -p VUSTP -a ...:" \
" real 0m3.341s" \
" user 0m0.410s" \
" sys 0m0.133s"
__version__ = '1.0.0'
__modifications__ = '13 August 2015 - Original write' \
'26 August 2015 - Remove host since only running on localhost' \
'27 August 2015 - Add in autocommit=True to autoexecute calls on an execute command' \
' - semantics fix' \
' - strip out XnatUtils and get all session labels from the db.'
import psycopg2
from psycopg2 import OperationalError
import sys
# This call is just for debugging to make sure it worked.
# Probably won't ever be used
SELECT_CALL = "SELECT original FROM xnat_experimentData WHERE project='%s' AND label IN (%s);"
# This call is used to update the database
UPDATE_CALL_USER = "UPDATE xnat_experimentData SET original=' ' WHERE project='%s' AND label IN (%s);"
# If the user wants to run all of the sessions, grab them via the database and not XnatUtils
UPDATE_CALL_ALL = "UPDATE xnat_experimentData SET original=' ' WHERE project='%s' AND label IN " \
"(SELECT label FROM xnat_experimentData WHERE project='%s' AND label NOT LIKE '%s-x-%%');"
def parse_args():
"""Set up the ArgumentParser"""
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-p', '--project', dest='project',
help='Project ID on XNAT', required=True)
p.add_argument('-s', '--session', dest='session',
help='Session(s) on Xnat. Comma separate if multiple',
required=False)
p.add_argument('-a', '--all', dest='all',
help='Reset all sessions in the project',
action='store_true',
default=False)
p.add_argument('-u', '--user', dest='user', help='DB owner username',
required=True)
p.add_argument('-d', '--dbname', dest='dbname',
help='Database name', required=True)
p.add_argument('-c', '--cred', dest='password',
help='Database username password', required=True)
return p.parse_args()
if __name__ == '__main__':
ARGS = parse_args()
if ARGS.session:
USER_SESSIONS = ARGS.session.split(',')
# Try to connect and exit if we catch the most common OperationalError
# will add more as they are encountered.
try:
conn = psycopg2.connect(dbname=ARGS.dbname,
password=ARGS.password, user=ARGS.user)
conn.autocommit = True
except OperationalError as operr:
print "FATAL: Caught an OperationalError. Please check your dbname, " \
"host ip address, username, and password"
sys.exit(1)
cur = conn.cursor()
if ARGS.all:
# Grab every session from the project and reset
cur.execute(UPDATE_CALL_ALL % (ARGS.project, ARGS.project, ARGS.project))
else:
# We assume the user gave the correct sessions.
# TODO run the query to make sure these are all correct
SESSIONS = USER_SESSIONS
# *************** BEGIN debugging code
# cur.execute(SELECT_CALL % (ARGS.project, (','.join("'" + item + "'" for item in SESSIONS))))
# res= cur.fetchall()
# for r in res:
# print r
# *************** END debugging code
cur.execute(UPDATE_CALL_USER % (ARGS.project, (','.join("'" + item + "'" for item in SESSIONS))))
| {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "psql_xnatsessionupdate.py",
"copies": "1",
"size": "3944",
"license": "mit",
"hash": -6119464142675416000,
"line_mean": 42.8222222222,
"line_max": 108,
"alpha_frac": 0.5740365112,
"autogenerated": false,
"ratio": 3.893385982230997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9958068718327435,
"avg_score": 0.001870755020712223,
"num_lines": 90
} |
__author__ = 'damons'
import assessor_create_util
import sys
"""
This taks a 2 column csv file. The first column should be the assessor label
and the second should be the xsitype. Due to variable insertion concers in
psycopg2, the map between xsitype and the associated table in postgres is
hardcoded. The method get_statement_from_xsitype can easily be extended
by updating the map to handle extra xsitypes.
"""
if __name__ == '__main__':
# Get the connection to postgres and the cursor
conn = assessor_create_util.get_connection(password='')
cursor = assessor_create_util.get_cursor(conn)
if len(sys.argv) != 2:
sys.stderr.write('Error. must pass <csv_file>\n')
sys.exit(1)
csv_file = sys.argv[1]
with open(csv_file,'r') as f:
data = f.readlines()
for line in data:
assessor = line.split(',')[0]
xsi_type = line.split(',')[1].strip()
assessor_label = assessor
project_id = assessor_label.split('-x-')[0]
subject_label = assessor_label.split('-x-')[1]
experiment_label = assessor_label.split('-x-')[2]
proctype = assessor_label.split('-x-')[-1]
print "START ASSESSOR %s" % assessor_label
# Check that the project, subject, experiment, and assessor label exist
if not assessor_create_util.check_if_project_exits(cursor, project_id):
print "Project ID %s does not exist. Cannot create assessor %s" % (project_id, assessor_label)
continue
if not assessor_create_util.check_if_subject_exists(cursor,project_id,subject_label):
print "Subject Label %s does not exist for Project ID %s. Cannot create assessor %s" % (subject_label, project_id, assessor_label)
continue
if not assessor_create_util.check_if_experiment_exists(cursor,project_id,experiment_label):
print "Experiment Label %s does not exist for Project ID %s. Cannot create assessor %s" % (experiment_label, project_id, assessor_label)
continue
if assessor_create_util.check_if_assessor_exists(cursor,project_id,assessor_label):
print "Assessor Label %s already exists for Project ID %s. Cannot create." % (assessor_label, project_id )
continue
if not assessor_create_util.check_if_xsitype_exists(cursor, xsi_type):
print "xsitype %s does not exist. Cannot create." % xsi_type
continue
# By default set the procstatus to NEED_INPUTS so we can let dax_build handle the rest of the checking.
procstatus='NEED_INPUTS'
# Reserve a lot of the (metadata) ids
experiment_id = assessor_create_util.get_next_experiment_id(cursor)
xdat_change_info_id = assessor_create_util.get_xdat_change_info_id(cursor)
xnat_experimentdata_meta_data_meta_data_id = assessor_create_util.get_experimentdata_meta_data_id(cursor)
xnat_deriveddata_meta_data_id = assessor_create_util.get_deriveddata_meta_data_id(cursor)
xnat_imageassessordata_meta_data_meta_data_id = assessor_create_util.get_imageassesordata_meta_data_id(cursor)
proc_genprocdata_meta_data_meta_data_id = assessor_create_util.get_procgenprocdata_meta_data_id(cursor)
# experimentdata
assessor_create_util.insert_xnat_experimentdata_meta_data(conn,cursor,xdat_change_info_id,xnat_experimentdata_meta_data_meta_data_id)
assessor_create_util.insert_xnat_experimentdata(conn, cursor, xnat_experimentdata_meta_data_meta_data_id, assessor_label, project_id, experiment_id)
# deriveddata
assessor_create_util.insert_xnat_deriveddata_meta_data(conn,cursor,xdat_change_info_id, xnat_deriveddata_meta_data_id)
assessor_create_util.insert_xnat_deriveddata(conn, cursor, xnat_deriveddata_meta_data_id, experiment_id)
# image assessordata
session_id = assessor_create_util.get_experiment_id_from_label(cursor, project_id, experiment_label)
assessor_create_util.insert_xnat_imageassessordata_meta_data(conn, cursor, xdat_change_info_id, xnat_imageassessordata_meta_data_meta_data_id)
assessor_create_util.insert_xnat_imageassessordata(conn, cursor, xnat_imageassessordata_meta_data_meta_data_id, experiment_id, session_id[0])
# finally, the assesor type
assessor_create_util.insert_proc_genprocdata_meta_data(conn, cursor, xdat_change_info_id, proc_genprocdata_meta_data_meta_data_id)
assessor_create_util.insert_proc_genprocdata(conn,cursor,proc_genprocdata_meta_data_meta_data_id,experiment_id)
assessor_create_util.insert_need_inputs_status(conn,cursor,procstatus,proctype, experiment_id)
print "FINISHED ASSESSOR %s" % assessor_label
| {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "create_assessor.py",
"copies": "2",
"size": "4715",
"license": "mit",
"hash": -3524034686664108500,
"line_mean": 52.5795454545,
"line_max": 156,
"alpha_frac": 0.6992576882,
"autogenerated": false,
"ratio": 3.260719225449516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9936100999085624,
"avg_score": 0.004775182912778503,
"num_lines": 88
} |
__author__ = 'damons'
import psycopg2
import sys
import re
import datetime
def get_connection(dbname='xnat', user='xnat', password=''):
"""
:param dbname: The postgres database name
:param user: The postgres username
:param password: The associated password with the postgres user
:return: psycopg2 connection object
"""
try:
conn = psycopg2.connect(dbname=dbname,
password=password, user=user,
host=None)
except psycopg2.OperationalError:
print "FATAL: Caught an OperationalError. Please check your dbname, " \
"host ip address, username, and password"
sys.exit(1)
return conn
def get_cursor(connection):
"""
:param connection: psycopg2 Connection object
:return: Re
"""
return connection.cursor()
def get_user_id_by_name(cursor, name):
"""
:param cursor:
:param name:
:return:
"""
cursor.execute("""SELECT xdat_user_id from xdat_user where login=%s""", name)
result = cursor.fetchone()
return result[0]
def get_xdat_change_info_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xdat_change_info_xdat_change_info_id_seq') AS xdat_change_info_id""")
result = cursor.fetchall()
return result[0]
def get_experimentdata_meta_data_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_experimentdata_meta_data_meta_data_id_seq') AS meta_data_id""")
result = cursor.fetchall()
return result[0]
def get_deriveddata_meta_data_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_deriveddata_meta_data_meta_data_id_seq') AS meta_data_id""")
result = cursor.fetchall()
return result[0]
def get_imageassesordata_meta_data_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_imageassessordata_meta_data_meta_data_id_seq') AS meta_data_id""")
result = cursor.fetchall()
return result[0]
def get_procgenprocdata_meta_data_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.proc_genprocdata_meta_data_meta_data_id_seq') AS meta_data_id""")
result = cursor.fetchall()
return result[0]
def insert_xnat_experimentdata_meta_data(conn, cursor, xft_version, meta_data_id):
"""
:param cursor:
:param xft_version:
:param activation_date:
:param row_last_modified:
:param insert_date:
:param meta_data_id:
:return:
"""
activation_date = get_timestamp()
row_last_modified = activation_date
insert_date = activation_date
cmd = """INSERT INTO xnat_experimentData_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES ((%s),'active',1,(%s),(%s),(%s),0,1,(%s),1)"""
cursor.execute(cmd, (xft_version, activation_date, row_last_modified, insert_date, meta_data_id))
conn.commit()
def insert_xnat_experimentdata(conn, cursor, experimentdata_info, label, project, id):
"""
:param cursor:
:return:
"""
cmd="""INSERT INTO xnat_experimentData (experimentdata_info,label,project,extension,id) VALUES ((%s),(%s),(%s),586,(%s))"""
cursor.execute(cmd,(str(experimentdata_info[0].__int__()),
label,
project,
id))
conn.commit()
def insert_xnat_deriveddata_meta_data(conn, cursor, xft_version, meta_data_id):
"""
:param conn:
:param cursor:
:param xft_version:
:param meta_data_id:
:return:
"""
activation_date = get_timestamp()
row_last_modified = activation_date
insert_date = activation_date
cmd="""INSERT INTO xnat_derivedData_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES ((%s),'active',1,(%s),(%s),(%s),0,1,(%s),1)"""
cursor.execute(cmd, (xft_version, activation_date, row_last_modified, insert_date, meta_data_id))
conn.commit()
def insert_xnat_deriveddata(conn, cursor, deriveddata_info,experiment_id):
"""
:param cursor:
:return:
"""
cmd="""INSERT INTO xnat_derivedData (deriveddata_info,id) VALUES ((%s),(%s))"""
cursor.execute(cmd, (deriveddata_info, experiment_id))
conn.commit()
def insert_xnat_imageassessordata_meta_data(conn,cursor, xft_version, meta_data_id):
"""
:param cursor:
:return:
"""
activation_date = get_timestamp()
row_last_modified = activation_date
insert_date = activation_date
cmd = """INSERT INTO xnat_imageAssessorData_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES ((%s),'active',1,(%s),(%s),(%s),0,1,(%s),1)"""
cursor.execute(cmd, (xft_version, activation_date, row_last_modified, insert_date, meta_data_id))
conn.commit()
def insert_xnat_imageassessordata(conn, cursor, meta_data_id, assessor_id, session_id):
"""
:param cusor:
:return:
"""
cmd = """INSERT INTO xnat_imageAssessorData (imageassessordata_info,id,imagesession_id) VALUES ((%s),(%s),(%s))"""
cursor.execute(cmd, (meta_data_id, assessor_id, session_id))
conn.commit()
def insert_proc_genprocdata_meta_data(conn,cursor, xft_version, meta_data_id):
"""
:param cursor:
:return:
"""
activation_date = get_timestamp()
row_last_modified = activation_date
insert_date = activation_date
cmd = """INSERT INTO proc_genProcData_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES ((%s),'active',1,(%s),(%s),(%s),0,1,(%s),1)"""
cursor.execute(cmd, (xft_version, activation_date, row_last_modified, insert_date, meta_data_id))
conn.commit()
def insert_proc_genprocdata(conn, cursor, meta_data_id, experiment_id):
"""
:param cursor:
:return:
"""
cmd = """INSERT INTO proc_genProcData (genprocdata_info,id) VALUES ((%s),(%s))"""
cursor.execute(cmd, (meta_data_id, experiment_id))
conn.commit()
def insert_need_inputs_status(conn,cursor,procstatus,proctype, experiment_id):
"""
:param conn:
:param cursor:
:param experiment_id:
:param proctype:
:return:
"""
cmd = """UPDATE proc_genProcData SET procstatus=(%s), proctype=(%s) WHERE id=(%s)"""
cursor.execute(cmd, (procstatus, proctype, experiment_id))
conn.commit()
def get_next_experiment_id(cursor):
"""
These IDs should really be some sort of serial8 or something of the like,
but this sorts the tuple to get the max value and then adds
:param cursor:
:param subject_ids:
:return: the new ID (in a tuple)
"""
cmd = """SELECT DISTINCT id FROM (SELECT id FROM xnat_experimentData WHERE id LIKE 'DEV_VUIISXNAT99_E%' UNION SELECT DISTINCT id FROM xnat_experimentData_history WHERE id LIKE 'DEV_VUIISXNAT99_E%') SRCHA"""
cursor.execute(cmd)
experiment_ids = cursor.fetchall()
# Sort here rather than using postgres to mimic XNAT
experiment_ids.sort()
try:
last_id = experiment_ids[len(experiment_ids)-1]
except IndexError:
last_id=('DEV_VUIISXNAT99_E00000',)
# I don't know if numbers are OK in the XNAT instance name. Thus, split the
# '_' and take the last element
tail_of_id = last_id[0].split('_')
last_id_digits = re.findall(r'\d+', tail_of_id[len(tail_of_id)-1])
# pop the last element of the tail_of_id to reconstruct the XNAT instance name
tail_of_id.pop(len(tail_of_id)-1)
# I don't know if the zeropadding is a hard limit. This mimics the current install
new_id = "%s_E%05d" % ('_'.join(tail_of_id), int(last_id_digits[0])+1)
return (new_id,)
def get_timestamp():
"""
:return: timestamp
"""
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def get_timestamp_from_datetime_object(obj):
"""
:param obj:
:return:
"""
return obj.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def check_if_project_exits(cursor, project_id):
"""
:param cursor:
:param project_id:
:return:
"""
cmd = """SELECT id from xnat_projectdata WHERE id=(%s)"""
cursor.execute(cmd, (project_id,))
res = cursor.fetchall()
if res:
return True
else:
return False
def check_if_subject_exists(cursor, project_id, subject_label):
"""
:param cursor:
:param project_id:
:param subject_label:
:return:
"""
cmd = """SELECT id from xnat_subjectdata where project=(%s) AND label=(%s)"""
cursor.execute(cmd, (project_id, subject_label,))
res = cursor.fetchall()
if res:
return True
else:
return False
def check_if_experiment_exists(cursor, project_id,experiment_label):
"""
:param cursor:
:param project_id:
:param experiment_label:
:return:
"""
cmd = """SELECT id from xnat_experimentdata where project=(%s) AND label=(%s)"""
cursor.execute(cmd, (project_id, experiment_label,))
res = cursor.fetchall()
if res:
return True
else:
return False
def check_if_assessor_exists(cursor, project_id, assessor_label):
"""
:param cursor:
:param project_id:
:param assessor_label:
:return:
"""
cmd = """SELECT id from xnat_experimentdata where project=(%s) AND label=(%s)"""
cursor.execute(cmd, (project_id, assessor_label,))
res = cursor.fetchall()
if res:
return True
else:
return False
def check_if_xsitype_exists(cursor, xsitype):
"""
:param cursor:
:param xsitype:
:return:
"""
# bad idea to pass in table names. So we have them hard coded
cmd = get_statement_from_xsitype(xsitype)
if cmd is None:
return False
else:
try:
cursor.execute(cmd)
return True
except psycopg2.ProgrammingError:
return False
def get_experiment_id_from_label(cursor, project_id, experiment_label):
"""
:param cursor:
:param project_id:
:param experiment_label:
:return:
"""
cmd = """SELECT id FROM xnat_experimentdata where project=(%s) and label=(%s)"""
cursor.execute(cmd, (project_id, experiment_label,))
result = cursor.fetchone()
return result
def execute_wrapper(cursor, template, values):
"""
Wrap the execution of a statement and catch the base class error.
:param cursor: psycopg2
:param template: string template of a SQL statement
:param values: tuple of values to insert into the SQL template
:return: None
"""
try:
cursor.execute(template,values)
except psycopg2.Error as e:
sys.stderr.write('[ERROR]: Caught psycopg2 error code %s\n' % e.pgcode)
sys.stderr.write('[ERROR]: Message is %s\n' % e.pgerror)
# TODO
# Should we cache everything that is done so we can "undo" it on exception?
sys.exit(1)
def get_statement_from_xsitype(xsitype):
mp = {'proc:genProcData': """SELECT 1 from proc_genprocdata""",
'fs:fsData': """SELECT 1 from fs_fsdata"""}
if xsitype not in mp.keys():
return None
else:
return mp[xsitype]
| {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "assessor_create_util.py",
"copies": "1",
"size": "11557",
"license": "mit",
"hash": -3634568369988509000,
"line_mean": 29.5740740741,
"line_max": 264,
"alpha_frac": 0.637449165,
"autogenerated": false,
"ratio": 3.3644832605531296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9474040843292411,
"avg_score": 0.0055783164521438465,
"num_lines": 378
} |
__author__ = 'damons'
import subprocess
import sys
import os
if __name__ == '__main__':
MASIMATLAB_SPIDERS = sys.argv[1]
if not os.path.isdir(MASIMATLAB_SPIDERS):
sys.stderr.write('Directory %s does not exist' % MASIMATLAB_SPIDERS)
files = os.listdir(MASIMATLAB_SPIDERS)
for f in files:
print f
output = subprocess.Popen(["svn", "log",
os.path.join(MASIMATLAB_SPIDERS,f)],
stdout=subprocess.PIPE).communicate()[0]
output = output.replace('------------------------------------------------------------------------','')
output = output.split('\n')
output_keep = list()
for line in output:
if not line.startswith('r') and line != '\n':
output_keep.append('\t%s\n' % line)
elif not line.startswith('\n'):
output_keep.append('%s\n' % line)
rst_file = f.replace('py','rst').replace('Spider_','')
if not os.path.isfile(rst_file):
print "Warning: %s does not exist for %s. Please generate" % (rst_file, f)
else:
with open(rst_file,'a') as rst:
rst.writelines(output_keep)
| {
"repo_name": "VUIIS/dax_spiders",
"path": "docs/news/spiders/spider_level/get_version_history.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": -8599040835590297000,
"line_mean": 33.2777777778,
"line_max": 111,
"alpha_frac": 0.5008103728,
"autogenerated": false,
"ratio": 3.785276073619632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4786086446419632,
"avg_score": null,
"num_lines": null
} |
__author__ = 'damons'
import psycopg2
import datetime
import os
import paramiko
def parse_args():
"""Set up the ArgumentParser"""
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-p', '--project', dest='project',
help='Project ID on XNAT', required=True)
p.add_argument('-u', '--user', dest='user', help='DB owner username',
required=True)
p.add_argument('-d', '--dbname', dest='dbname',
help='Database name', required=True)
p.add_argument('-c', '--cred', dest='password',
help='Database username password', required=True)
return p.parse_args()
def get_xdat_change_info_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_abstractresource_xnat_abstractresource_id_seq')""");
result = cursor.fetchone()
return result[0].__int__()
def get_meta_data_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_resource_meta_data_meta_data_id_seq')""");
result = cursor.fetchone()
return result[0].__int__()
def get_meta_data_id2(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_abstractresource_meta_data_meta_data_id_seq')""");
result = cursor.fetchone()
return result[0].__int__()
def get_xnat_abstractresource_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_abstractresource_xnat_abstractresource_id_seq')""");
result = cursor.fetchone()
return result[0].__int__()
def get_resourcecatalog_id(cursor):
"""
:param cursor:
:return:
"""
cursor.execute("""SELECT nextval('public.xnat_resourcecatalog_meta_data_meta_data_id_seq')""");
result = cursor.fetchone()
return result[0].__int__()
def get_timestamp():
"""
:return: timestamp
"""
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def insert_xnat_resource_meta_data(conn,cursor,xft_version,meta_data_id):
"""
:param cursor:
:return:
"""
date=get_timestamp()
print date
cursor.execute("""INSERT INTO xnat_resource_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES (%s,'active',1,'%s','%s','%s',0,1,%s,1)""" %(xft_version,date,date,date,meta_data_id))
conn.commit()
def insert_xnat_abstractResource_meta_data(conn,cursor,xft_version,meta_data_id):
"""
:param cursor:
:return:
"""
date=get_timestamp()
print date
cursor.execute("""INSERT INTO xnat_abstractResource_meta_data (xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES (%s,'active',1,'%s','%s','%s',0,1,%s,1)""" %(xft_version,date,date,date,meta_data_id))
conn.commit()
def insert_xnat_abstractResource(conn,cursor,abstractresource_info,xnat_abstractresource_id,resource_name):
"""
:param cursor:
:return:
"""
date=get_timestamp()
print date
cursor.execute("""INSERT INTO xnat_abstractResource (abstractresource_info,xnat_abstractresource_id,label,extension) VALUES (%s,%s,'%s',234)""" % (abstractresource_info,xnat_abstractresource_id,resource_name))
conn.commit()
def insert_xnat_resource(conn,cursor,xnat_abstractresource_id,resource_info,uri):
"""
:param cursor:
:param xnat_abstractresource_id:
:param resource_info:
:param uri:
:return:
"""
cursor.execute("""INSERT INTO xnat_resource (xnat_abstractresource_id,resource_info,uri) VALUES (%s,%s,'%s')""" %(xnat_abstractresource_id,resource_info,uri))
conn.commit()
def insert_xnat_resourceCatalog_meta_data(conn,cursor,xft_version,meta_data_id):
"""
:param cursor:
:return:
"""
date=get_timestamp()
print date
cursor.execute("""INSERT INTO xnat_resourceCatalog_meta_data(xft_version,status,activation_user_xdat_user_id,activation_date,row_last_modified,insert_date,modified,insert_user_xdat_user_id,meta_data_id,shareable) VALUES ('%s','active',1,'%s','%s','%s',0,1,%s,1)""" %(xft_version,date,date,date,meta_data_id))
conn.commit()
def insert_xnat_resourceCatalog(conn,cursor,xnat_abstractresource_id, resourcecatalog_info):
"""
:param cursor:
:param xnat_abstractresource_id:
:param resourcecatalog_info:
:return:
"""
cursor.execute("""INSERT INTO xnat_resourceCatalog (xnat_abstractresource_id,resourcecatalog_info) VALUES (%s,%s)"""%(xnat_abstractresource_id,resourcecatalog_info))
conn.commit()
def insert_img_assessor_out_resource(conn,cursor,assessor_id,xnat_abstractresource_id):
"""
:param cursor:
:param assessor_id:
:param xnat_abstractresource_id:
:return:
"""
cursor.execute("""INSERT INTO img_assessor_out_resource (xnat_imageAssessorData_id,xnat_abstractResource_xnat_abstractresource_id) VALUES ('%s',%s)""" %(assessor_id,xnat_abstractresource_id))
conn.commit()
def load_queue(upload_dir):
dirs = os.listdir(upload_dir)
known_bad = ['TRASH', 'DISKQ','FlagFiles','PBS','TRASH', 'OUTLOG']
for known in known_bad:
if known in dirs:
dirs.remove(known)
return dirs
def get_assessor_id_from_assessor_label(cursor,assessor):
"""
:
:param assessor:
:return:
"""
cursor.execute("""SELECT id from xnat_experimentdata where label=('%s')""" % assessor)
result = cursor.fetchone()
try:
return result[0]
except TypeError as te:
return None
def get_archive_path(cursor,project_id):
"""
:param cursor:
:param project_id:
:return:
"""
cursor.execute("""SELECT archivepath from arc_pathinfo where archivepath LIKE '%%/%s/'""" % project_id)
return cursor.fetchone()
def mkdir_p(sftp, remote_directory):
print remote_directory
"""Change to this directory, recursively making new folders if needed.
Returns True if any folders were created."""
if remote_directory == '/':
# absolute path so change directory to root
sftp.chdir('/')
return
if remote_directory == '':
# top-level relative directory must exist
return
try:
sftp.chdir(remote_directory) # sub-directory exists
except IOError:
dirname, basename = os.path.split(remote_directory.rstrip('/'))
print basename
mkdir_p(sftp, dirname) # make parent directories
sftp.mkdir(basename) # sub-directory missing, so created it
sftp.chdir(basename)
return True
if __name__ == '__main__':
ARGS = parse_args()
ssh = paramiko.Transport('129.59.91.137',22)
ssh.connect(username='', password='')
sftp = paramiko.SFTPClient.from_transport(ssh)
# Try to connect and exit if we catch the most common OperationalError
# will add more as they are encountered.
conn = psycopg2.connect(dbname=ARGS.dbname,
password=ARGS.password, user=ARGS.user,
host='')
cursor = conn.cursor()
upload_dir =''
queue = load_queue(upload_dir)
for assessor in queue:
print "Begin assessor %s" % assessor
fullpath = os.path.join(upload_dir,assessor)
if os.path.exists(os.path.join(fullpath,'READY_TO_UPLOAD.txt')):
print "\tREADY flag exists. Begin upload"
assessor_id = get_assessor_id_from_assessor_label(cursor,assessor)
if assessor_id is None:
print "\tAssessor %s does not exist. Skipping..." % assessor
continue
archive_path = get_archive_path(cursor,assessor.split('-x-')[0])
if not archive_path:
print "Could not fetch archive path for project %s" % assessor.split('-x-')[0]
continue
resources = os.listdir(fullpath)
for resource in resources:
if os.path.isdir(os.path.join(fullpath,resource)):
os.chdir(os.path.join(fullpath,resource))
for f in os.walk('.'):
#mkdir_p(sftp,f[0].replace(archive_))
tail_of_dir = f[0].replace(archive_path[0],'')
uri = os.path.join(archive_path[0],'arc001',assessor.split('-x-')[1],'ASSESSORS',assessor,resource,tail_of_dir)
print uri
mkdir_p(sftp,uri)
for fl in f[2]:
sftp.put(os.path.join(f[0],fl),os.path.join(uri,fl))
xft_version = get_xdat_change_info_id(cursor)
meta_data_id =get_meta_data_id(cursor)
abstract_meta_data_id = get_meta_data_id2(cursor)
xnat_abstractresource_id = get_xnat_abstractresource_id(cursor)
resroucecatalog_id = get_resourcecatalog_id(cursor)
# xnat_resource_meta_data
insert_xnat_resource_meta_data(conn,cursor,xft_version,meta_data_id)
insert_xnat_abstractResource_meta_data(conn,cursor,xft_version,meta_data_id)
insert_xnat_abstractResource(conn,cursor,abstract_meta_data_id,xnat_abstractresource_id,resource)
uri = '%sarc001/%s/ASSESSORS/%s/%s/%s_catalog.xml' % (archive_path,
assessor.split('-x-')[2],
assessor,
resource,
resource)
insert_xnat_resourceCatalog_meta_data(conn,cursor,xft_version,meta_data_id)
insert_xnat_resource(conn,cursor,xnat_abstractresource_id,resroucecatalog_id,uri)
insert_xnat_resourceCatalog(conn,cursor,xnat_abstractresource_id,resroucecatalog_id)
insert_img_assessor_out_resource(conn,cursor,assessor_id,xnat_abstractresource_id)
else:
print "\tREADY flag does NOT exist. skipping..."
| {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "psql_upload.py",
"copies": "1",
"size": "10283",
"license": "mit",
"hash": 4260305824998676000,
"line_mean": 36.5291970803,
"line_max": 312,
"alpha_frac": 0.6125644267,
"autogenerated": false,
"ratio": 3.5954545454545452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4708018972154545,
"avg_score": null,
"num_lines": null
} |
__author__ = 'damons'
__purpose__ = 'Every now and then a database transaction trashes and leaves a project in a perpetually ' \
'corrupt state the purpose of this script is to find those and delete them'
from dax import XnatUtils
import psycopg2
import sys
from psycopg2 import OperationalError
def parse_args():
"""Set up the ArgumentParser"""
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-p', '--project', dest='project',
help='Project ID on XNAT', required=True)
p.add_argument('-u', '--user', dest='user', help='DB owner username',
required=True)
p.add_argument('-d', '--dbname', dest='dbname',
help='Database name', required=True)
p.add_argument('-c', '--cred', dest='password',
help='Database username password', required=True)
return p.parse_args()
if __name__ == '__main__':
ARGS = parse_args()
# Try to connect and exit if we catch the most common OperationalError
# will add more as they are encountered.
try:
conn = psycopg2.connect(dbname=ARGS.dbname,
password=ARGS.password, user=ARGS.user)
except OperationalError as operr:
print "FATAL: Caught an OperationalError. Please check your dbname, " \
"host ip address, username, and password"
sys.exit(1)
cur = conn.cursor()
XNAT = XnatUtils.get_interface()
PROJECTS = XNAT.select('/projects').get()
for PROJECT in PROJECTS:
sessions = XnatUtils.list_experiments(XNAT, PROJECT)
for session in sessions:
if '-x-' in session['session_label']:
sys.stdout.write('Found corrupt assessors %s for project %s\n' % (session['session_label'],
PROJECT))
cur.execute("""DELETE FROM xnat_experimentdata where label=(%s)""",(session['session_label'],))
conn.commit()
XNAT.disconnect()
conn.close() | {
"repo_name": "VUIIS/psql_xnat_tools",
"path": "psql_remove_corrupt_assessors.py",
"copies": "1",
"size": "2072",
"license": "mit",
"hash": -3012601117962615300,
"line_mean": 39.6470588235,
"line_max": 111,
"alpha_frac": 0.5916988417,
"autogenerated": false,
"ratio": 4.237218813905931,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.532891765560593,
"avg_score": null,
"num_lines": null
} |
__author__ = 'damyers'
import re
import requests
import json
import time
import datetime
from lxml import html
def main():
"""
Scrape application listings from appdb.winehq and store them in an json object.
:return: json application listing
"""
winehq_file = 'winehq.json'
ratings = ['Platinum', 'Gold']
main_url = "https://appdb.winehq.org/objectManager.php?bIsQueue=false&bIsRejected=false&sClass=application&sTitle=Browse+Applications&iItemsPerPage=200&iPage=1&iappVersion-ratingOp0=5&sappVersion-ratingData0={rating}%3Cbr%20/%3E&sOrderBy=appId&bAscending=true&iItemsPerPage=200&iPage={page}"
page_regex = re.compile("Page 1 of ([0-9]*)")
results = {}
results['date'] = str(datetime.datetime.now())
for rating in ratings:
time.sleep(2)
first_page = requests.get(main_url.format(rating=rating, page=1))
first_tree = html.fromstring(first_page.text)
page_range = [result.group(1) for item in [b.text_content() for b in first_tree.xpath('//b')] for result in [page_regex.search(item)] if result][0]
items = [td.text_content() for td in first_tree.xpath('//td')]
for pos in range(3, len(items), 3):
results[items[pos + 1]] = {"app": items[pos], "description": items[pos + 2], "rating": rating}
for page_number in range(int(page_range)):
time.sleep(2)
page = requests.get(main_url.format(rating=rating, page=page_number))
tree = html.fromstring(page.text)
items = [td.text_content() for td in tree.xpath('//td')]
for pos in range(3, len(items), 3):
results[items[pos]] = {"winehq": items[pos + 1], "description": items[pos + 2], "rating": rating}
with open(winehq_file, 'w') as winehq_raw:
json.dump(results, winehq_raw, indent=4)
if __name__ == "__main__":
main() | {
"repo_name": "moird/linux-game-report",
"path": "tools/winhqscrape.py",
"copies": "1",
"size": "1875",
"license": "mit",
"hash": 6086463394899159000,
"line_mean": 39.7826086957,
"line_max": 295,
"alpha_frac": 0.6368,
"autogenerated": false,
"ratio": 3.210616438356164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4347416438356164,
"avg_score": null,
"num_lines": null
} |
__author__ = "Dan C Williams"
__version__ = "0.01"
__date__ = "Oct-9-2016"
__email__ = "dan.c.williams@gmail.com"
__status__ = "Development"
import netaddr
import collections
import re
import yaml
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
def main():
raw_lines = [line.rstrip('\n') for line in open('CLEAR_TEST_PL_DATA.txt') if line[:-1]]
black_list = ['!']
split_list = []
pl_list = []
pl_names_dict = {}
pl_dict = collections.defaultdict(list)
pl_dict_final = {}
delete_list = []
temporary_list = []
pl_description_list = []
pl_default_route = []
pl_desc_dict = collections.defaultdict(list)
pl_default_dict = collections.defaultdict(list)
yaml_dict = {}
for i, line in enumerate(raw_lines): # Identify garbage in data set
for j in black_list:
if j in line:
temporary_list.append(line)
for del_lines in temporary_list: # CLEARS OUT GARBAGE
raw_lines.remove(del_lines)
for i, line in enumerate(raw_lines):
temporary_list = line.split() #SPLITTING LINES INTO LIST
split_list.append(temporary_list) #ADDING LIST TO LIST OF LIST
temporary_list = [] #Reset TEMP List
for i, line in enumerate(split_list): #Grab PL name and description and place them in pl_descripiton_list
pl_names_dict[line[2]] = ''
if line[3] == 'description': # in a list of lists for further processing
temporary_list.append(line[2])
temporary_list.append(' '.join(line[4::]))
pl_description_list.append(temporary_list)
delete_list.append(line)
if line [6] == '0.0.0.0/0': # Finds and removes default routes
temporary_list.append(line[2])
temporary_list.append(line[6])
pl_default_route.append(temporary_list)
delete_list.append(line)
temporary_list = []
for del_lines in delete_list: # CLEARS OUT DESCRIPTIONS & DEFAULT ROUTES
split_list.remove(del_lines)
for i, line in enumerate(split_list): # Grab PL name and network and place them
temporary_list.append(line[2]) # in a list of lists for further processing
temporary_list.append(line[6])
pl_list.append(temporary_list)
temporary_list = []
for key, value in pl_description_list: #create pl_desc_dict using the key and description string
tempString = value
pl_desc_dict[key] = tempString
for key, value in pl_default_route: #create pl_default_dict using the key and network
pl_default_dict[key].append(netaddr.IPNetwork(value))
for key, value in pl_list: #create pl_dict using the key and network
pl_dict[key].append(netaddr.IPNetwork(value))
for key, value in pl_dict.items():
value = netaddr.cidr_merge(value)
value.sort()
pl_dict_final[key] = value
d = collections.OrderedDict(sorted(pl_names_dict.items()))
yaml_dict['allowed'] = {}
yaml_dict['remediation'] = {}
for key, blank in d.items():
if "ALLOWED" in key:
m = re.findall('\d:\d{4}', key)
if m:
vrf = m
yaml_dict['allowed'][vrf[0]] = {}
if key in pl_desc_dict:
yaml_dict['allowed'][vrf[0]]['description'] = pl_desc_dict[key]
yaml_dict['allowed'][vrf[0]]['prefix'] = {}
if key in pl_dict_final:
temp_list = []
for i, ip_address in enumerate(pl_dict_final[key]):
temp_list.append(str(ip_address))
yaml_dict['allowed'][vrf[0]]['prefix'] = temp_list
if "REMEDIATION" in key:
m = re.findall('\d:\d{4}', key)
if m:
vrf = m
yaml_dict['remediation'][vrf[0]] = {}
if key in pl_desc_dict:
yaml_dict['remediation'][vrf[0]]['description'] = pl_desc_dict[key]
yaml_dict['remediation'][vrf[0]]['prefix'] = {}
if key in pl_dict_final:
temp_list = []
for i, ip_address in enumerate(pl_dict_final[key]):
temp_list.append(str(ip_address))
yaml_dict['remediation'][vrf[0]]['prefix'] = temp_list
with open('initial_load.yaml', 'w') as outfile:
stream = yaml.dump(yaml_dict, Dumper=MyDumper, default_flow_style=False)
outfile.write('---\n')
outfile.write(stream)
outfile.close()
print('COMPLETE')
if __name__ == "__main__":
main()
| {
"repo_name": "dancwilliams/Prefix_List_Script",
"path": "EXTRA_SCRIPTS/INITIAL_YAML/initial_yaml.py",
"copies": "1",
"size": "4652",
"license": "mit",
"hash": 4362936724134158300,
"line_mean": 35.34375,
"line_max": 109,
"alpha_frac": 0.5808254514,
"autogenerated": false,
"ratio": 3.575710991544965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4656536442944965,
"avg_score": null,
"num_lines": null
} |
import getpass, sys, telnetlib, time
def command_telnet(num, totalTries):
if num == 0: #1st time run
print("Commands: \"testlink AP-IP SU-ID\"")
print("or \"rssi IP\"")
else:
print("Improper syntax. Reiterating commands:")
print("\"testlink AP-IP SU-ID\"")
print("\"rssi IP\"")
print("Special Command: \"c\" to close")
command = input("Enter command: ") #input() use to be raw_input()
parserCommand = command.split(" ") #Splits the command string w/ spaces being the delimeter
#Check and see if sent syntax is correct
if parserCommand[0].lower() == "testlink":
print("Works, telneting in...")
testlink()
elif parserCommand[0].lower() == "rssi":
print("Works, telneting in...")
rssi()
elif parserCommand[0].lower() == "c":
print("Closing the program.")
sys.exit()
else:
totalTries += 1
if totalTries >= 4:
print("Too many tries. Closing program.")
sys.exit()
else:
command_telnet(1, totalTries)
def rssi():
ipAddress = "10.128.41.93"
password = b"Password"
tn = telnetlib.Telnet(ipAddress)
tn.read_until(b"Password: ")
tn.write(password + b"\n")
tn.read_until(b"#>")
tn.write(b"rssi" + b"\n")
tn.write(b"exit" + b"\n")
print(tn.read_all())
def testlink():
ipAddress = "10.128.41.6"
password = b"WisperISP123"
tn = telnetlib.Telnet(ipAddress)
tn.read_until(b"Password: ")
tn.write(password + b"\n")
print("1")
tn.read_until(b"#>", 2)
print("2")
tn.write(b"su testrflink 47" + b"\n")
print("3")
tn.write(b"exit" + b"\n")
print("4")
print(tn.read_all())
command_telnet(0, 0)
| {
"repo_name": "Testermous/TrangoInferface",
"path": "TrangoRSSITestlink.py",
"copies": "1",
"size": "1647",
"license": "mpl-2.0",
"hash": 4060825373158295000,
"line_mean": 22.8695652174,
"line_max": 92,
"alpha_frac": 0.6405585914,
"autogenerated": false,
"ratio": 2.618441971383148,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8244563470276911,
"avg_score": 0.10288741850124725,
"num_lines": 69
} |
#read input file
def readInputData ():
f = open ('input.txt', 'r')
input = []
lines = f.read (). split ('\n')
for line in lines:
l = line.split(" ")
l = [int(i) for i in l]
input.append(l)
return input
#parameter: 2 dimensions list
def solve (list):
max = 0
#for each list[i][i]
#take care three directions: right, down and downward diagonal
length = len (list)
for i in range (length):
for j in range (length):
#for row:
if j + 3 < length:
tmp = list[i][j] * list[i][j + 1] * list[i][j + 2] * list[i][j + 3]
if tmp > max:
max = tmp
if i + 3 < length:
tmp = list[i][j] * list[i + 1][j] * list[i + 2][j] * list[i + 3][j]
if tmp > max:
max = tmp
if i + 3 < length and j + 3 < length:
tmp = list[i][j] * list[i + 1][j + 1] * list[i + 2][j + 2] * list[i + 3][j + 3]
if tmp > max:
max = tmp
if i >= 3 and j <= 16:
tmp = list[i][j] * list[i - 1][j + 1] * list[i - 2][j + 2] * list[i - 3][j + 3]
if tmp > max:
max = tmp
return max
print solve (readInputData()) | {
"repo_name": "vinhqdang/my_mooc",
"path": "MOOC-work/projecteuler/problem11/problem11.py",
"copies": "1",
"size": "1411",
"license": "mit",
"hash": 8965666043978920000,
"line_mean": 30.1136363636,
"line_max": 95,
"alpha_frac": 0.4145995748,
"autogenerated": false,
"ratio": 3.327830188679245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.908372105089881,
"avg_score": 0.03174174251608698,
"num_lines": 44
} |
import pyrene3d
pyrene3d.init()
pyrene3d.set_window_size(1280, 720)
pyrene3d.set_window_title('spinning primitives')
scene = pyrene3d.Scene()
objects = []
for i in range(10):
pyramid = scene.add_object('pyramid')
cube = scene.add_object('cube')
sphere = scene.add_object('sphere:10000')
objects += [pyramid, cube, sphere]
scene.translate_object(sphere, i * 2, 0.0, -1.7, True, False)
scene.translate_object(pyramid, i * 2, 0.0, -0.2, True, False)
scene.translate_object(cube, i * 2, 0.0, 1.5, True, False)
camera = pyrene3d.Camera()
camera.translate(-2.5, 2.0, 0.0, False)
camera.focus(1.0, 0.0, 0.0, False)
pyrene3d.set_scene(scene)
pyrene3d.set_camera(camera)
def tick_callback(dt):
for object in objects:
scene.rotate_object(object, 0.0, 0.01, 0.0, True)
pyrene3d.set_tick_callback(tick_callback)
pyrene3d.run()
pyrene3d.free()
| {
"repo_name": "DAN-2029/Pyrene3D",
"path": "example.py",
"copies": "1",
"size": "1026",
"license": "mit",
"hash": 6175938744093948000,
"line_mean": 26.7297297297,
"line_max": 92,
"alpha_frac": 0.6929824561,
"autogenerated": false,
"ratio": 2.577889447236181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8760325893491433,
"avg_score": 0.0021092019689495146,
"num_lines": 37
} |
__author__ = 'Daniel Bugl'
__copyright__ = "Copyright 2013, Daniel Bugl"
__credits__ = ["Daniel Bugl"]
__license__ = "BSD"
__version__ = "0.1.0"
__maintainer__ = "Daniel Bugl"
__email__ = "daniel.bugl@touchlay.com"
__status__ = "Prototype"
from . import environment
class LatexDocument:
def getDocument(self, no_prefix=True):
""" Returns a string that contains the whole document """
return self.__header.getString(no_prefix) + self.__content.getString(no_prefix)
def getLines(self):
""" Returns a list of all lines """
return self.__header.getLines() + self.__content.getLines()
def getLinesHeader(self):
""" Returns a list of all lines of the header """
return self.__header.getLines()
def getLinesContent(self):
""" Returns a list of all lines of the content """
return self.__content.getLines()
def addHeaderLine(self, line):
""" Adds a LatexLine to the LatexDocument object header """
return self.__header.addLine(line)
def addContentLine(self, line):
""" Adds a LatexLine to the LatexDocument object content """
return self.__content.addLine(line)
def setHeader(self, header):
""" Set the LatexDocument header to a specific list """
return self.__header.setLines(header)
def setContent(self, content):
""" Set the LatexDocument content to a specific list """
return self.__content.setLines(content)
def setHeaderLine(self, index, line):
""" Set a line with a specific index in the LatexDocument header list """
return self.__header.setLine(index, line)
def setContentLine(self, index, line):
""" Set a line with a specific index in the LatexDocument content list """
return self.__content.setLine(index, line)
def __init__(self):
self.__header = environment.LatexEnvironment() # global environment
self.__content = environment.LatexEnvironment("document") # document environment
self.text_append_prefix = ""
self.text_append_suffix = ""
self.comment_prefix = "% "
self.comment_append_prefix = ""
self.comment_append_suffix = "" | {
"repo_name": "omnidan/python-latex",
"path": "latex/document.py",
"copies": "1",
"size": "2209",
"license": "bsd-3-clause",
"hash": -783291623281662500,
"line_mean": 34.6451612903,
"line_max": 89,
"alpha_frac": 0.6360344047,
"autogenerated": false,
"ratio": 3.9801801801801804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5116214584880181,
"avg_score": null,
"num_lines": null
} |
import sys
import random
# Global vars
gMinLetters = 4
gMaxLetters = 12
gMaxAttempts = 10
gGameTitle = "Akemi's Word Game"
debug = 0 # non-zero to print out debug info
gListOfReadWords = list()
gWordList = list()
#gWordList = [ 'blahh', 'hello', 'world', 'foool', 'barrr' ]
# Determines if passed in string is an integer
def Is_Str_Integer( str ) :
try :
int( str )
return True
except:
return False
# Get string from user
def GetInput( str ) :
if ( sys.version[0] < '3' ) :
retStr = raw_input( str )
else :
retStr = input( str )
return ( retStr )
# Pick valid number of words in secret word
def PickNumLetters() :
while ( True ) :
numLettersStr = GetInput( "Enter the number of letters in secret word [%d-%d]: " %(gMinLetters, gMaxLetters) )
if ( ( Is_Str_Integer( numLettersStr ) is False ) ) :
print "ERROR: Invalid number of letters"
continue
else :
numLetters = int( numLettersStr )
if ( ( numLetters < gMinLetters ) or ( numLetters > gMaxLetters ) ) :
print "ERROR: enter a number between", gMinLetters, "and", gMaxLetters
else :
# Valid number of letters chosen
break
return ( numLetters )
def PickNumAttempts() :
while ( True ) :
numAttemptsStr = GetInput( "Enter the number of guesses [1-%d]: " %(gMaxAttempts) )
if ( ( Is_Str_Integer( numAttemptsStr ) is False ) ) :
print "ERROR: Invalid number of attempts"
continue
else :
numAttempts = int( numAttemptsStr )
if ( ( numAttempts < 1 ) or ( numAttempts > gMaxAttempts ) ) :
print "ERROR: enter a number between 1 and", gMaxAttempts
else :
# Valid number of letters chosen
break
return ( numAttempts )
def GetRandomWord( listOfWords, numLetters ) :
randomWord = None
# TODO: optimize so that the list doesn't have to be re-created
# if the same number of letters are chosen
del gWordList[:]
# Make sure to omit pronouns and words that are the incorrect length
for word in listOfWords :
if ( len(word) == numLetters ) :
if ( ( word[0] >= 'a' ) and ( word[0] <= 'z' ) ) :
gWordList.append(word)
# Make sure the list is populated with words
if ( len(gWordList) != 0 ) :
# Reselect if the same word comes up twice?????
randomIndex = random.randint( 0, ( len( gWordList ) - 1 ) )
randomWord = gWordList[ randomIndex ]
return ( randomWord )
def GetGuessWord( chance, numLetters ) :
while ( True ) :
guessWord = GetInput( "Guess %d: " %(chance) )
# Check for correct num letters
if ( len( guessWord ) != numLetters ) :
print "Guess word does not contain %d letters, guess again" %(numLetters)
else :
invalidLetter = False
for letter in guessWord :
if ( ( letter < 'a' ) or ( letter > 'z' ) ) :
invalidLetter = True
break
if ( invalidLetter == True ) :
print "Guess word does not contain all letters, guess again"
else :
# Valid guess
break
return ( guessWord.lower() )
def OpenWordsFile() :
fileHandle = None
wordFile = "SINGLE.TXT"
try:
fileHandle = open( wordFile )
except:
print 'ERROR: Unable to open word file: ' + wordFile
return ( fileHandle )
if __name__ == '__main__':
wordListHandle = None
changeParams = True # User has to pick params first time
chances = 5
# Open and read large words file once
wordListHandle = OpenWordsFile()
if ( wordListHandle == None ) :
print "Unable to open file containing the list of words"
quit()
gListOfReadWords = wordListHandle.read().splitlines()
print "\n-----------------------------------------------------------------------------"
print "Welcome to "+gGameTitle+"!"
print "Guess at the randomly chosen n-letter word"
print "X = correct letter, wrong location"
print "O = correct letter and location"
print "_ = letter not used in word"
# What if a letter is used more than once? Are plurals legal?
# Main game loop; stay inside until player wants to exit
while ( True ) :
resultList = list()
if ( changeParams != 0 ) :
# User picks the number of characters in secret word
numLetters = PickNumLetters()
# User picks the number of attempts at the secret word
chances = PickNumAttempts()
# Computer picks a random word containing the selected characters
randomWord = GetRandomWord( gListOfReadWords, numLetters )
if ( debug != 0 ) :
print "Random word = ", randomWord
print "Chances = ", chances
print "Number of letters = ", numLetters
#quit()
# Display the screen for the game; a line for each number of attempts
while ( chances > 0 ) :
wrongLetters = 0
# User guesses the word
guess = GetGuessWord( chances, numLetters )
del resultList[:]
# Game gives clues as to which letters are correct and in the right location
for index, letter in enumerate( guess ) :
if ( letter in randomWord ) :
if ( guess[index] == randomWord[index] ) :
resultList.append("O")
else :
resultList.append("X")
wrongLetters = wrongLetters + 1
else :
resultList.append("_")
wrongLetters = wrongLetters + 1
if ( wrongLetters == 0 ) :
print "Correct!"
break
else :
# "Guess %d:"
print " " + ''.join(resultList) # char list to string
chances = chances - 1
if ( chances == 0 ) :
print "The word was:", randomWord
# Replay????
replay = GetInput( "Play again? [y=yes; n=no; m=change letters/guesses]" )
if replay == 'y' :
changeParams = False
elif replay == 'm' :
changeParams = True
else :
# Exit game
break
| {
"repo_name": "dencee/AWG",
"path": "AWG_py2.py",
"copies": "1",
"size": "6877",
"license": "mit",
"hash": 3214264566941953000,
"line_mean": 31.2863849765,
"line_max": 118,
"alpha_frac": 0.5378798895,
"autogenerated": false,
"ratio": 4.255569306930693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293449196430693,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Dinu'
NUMERATOR_STR_FORMAT = '{0}'
RATIONAL_STR_FORMAT = '{0}/{1}'
RATIONAL_REPR_FORMAT = '{0}({1}, {2})'
NUMERATOR_TYPE_ERROR_MESSAGE = 'The numerator of a rational must be a rational or an integer value!'
DENOMINATOR_TYPE_ERROR_MESSAGE = 'The denominator of a rational must be a rational or an integer value!'
DENOMINATOR_ZERO_DIVISION_ERROR_MESSAGE = 'The denominator of a rational number can not be zero!'
DIVIDER_ZERO_DIVISION_ERROR_MESSAGE = 'The divider cannot be 0!'
POWER_TYPE_ERROR_MESSAGE = 'The power must be an integer value!'
ZERO_TO_NEGATIVE_POWER_ZERO_DIVISION_ERROR_MESSAGE = '0 cannot be raised to a negative power!'
NEGATIVE_INTEGER_TO_FRACTIONAL_POWER_ERROR_MESSAGE = 'Negative number cannot be raised to a fractional power!'
FIRST_TERM_TYPE_ERROR_MESSAGE = 'The first term must be a rational or an integer value!'
SECOND_TERM_TYPE_ERROR_MESSAGE = 'The second term must be a rational or an integer value!'
def gcd(a, b):
while 0 != b:
r = a % b
a = b
b = r
return a
class Rational:
def __init__(self, numerator=0, denominator=1):
if not isinstance(numerator, Rational) and not isinstance(numerator, int):
raise TypeError(NUMERATOR_TYPE_ERROR_MESSAGE)
if not isinstance(numerator, Rational) and not isinstance(denominator, int):
raise TypeError(DENOMINATOR_TYPE_ERROR_MESSAGE)
if isinstance(denominator, int) and 0 == denominator:
raise ZeroDivisionError(DENOMINATOR_ZERO_DIVISION_ERROR_MESSAGE)
if isinstance(denominator, Rational) and 0 == denominator.numerator:
raise ZeroDivisionError(DENOMINATOR_ZERO_DIVISION_ERROR_MESSAGE)
if isinstance(numerator, Rational) or isinstance(denominator, Rational):
numerator, denominator = self.transform(numerator, denominator)
divisor = gcd(numerator, denominator)
self.__numerator = numerator // divisor
self.__denominator = denominator // divisor
@staticmethod
def transform(a, b):
if isinstance(a, Rational):
numerator = a.numerator
denominator = a.denominator
else:
numerator = a
denominator = 1
if isinstance(b, Rational):
numerator *= b.denominator
denominator *= b.numerator
else:
denominator *= b
return numerator, denominator
@property
def numerator(self):
return self.__numerator
@property
def denominator(self):
return self.__denominator
@property
def value(self):
return self.__numerator / (self.__denominator * 1.0)
@property
def quotient(self):
return self.__numerator // self.__denominator
@property
def remainder(self):
return self.__numerator % self.__denominator
def __str__(self):
if 1 == self.__denominator:
return NUMERATOR_STR_FORMAT.format(self.__numerator)
else:
return RATIONAL_STR_FORMAT.format(self.__numerator, self.__denominator)
def __repr__(self):
return RATIONAL_REPR_FORMAT.format(self.__class__.__name__, self.__numerator, self.__denominator)
def __float__(self):
return self.__numerator / (self.__denominator * 1.0)
def __int__(self):
return self.__numerator // self.__denominator
def __neg__(self):
return Rational(-self.__numerator, self.__denominator)
def __pos__(self):
return Rational(self.__numerator, self.__denominator)
def __abs__(self):
return Rational(abs(self.__numerator), self.__denominator)
def __invert__(self):
return Rational(self.__denominator, self.__numerator)
def __lt__(self, other):
return self.__numerator * other.__denominator < self.__denominator * other.__numerator
def __le__(self, other):
return self.__numerator * other.__denominator <= self.__denominator * other.__numerator
def __eq__(self, other):
return self.__numerator * other.__denominator == self.__denominator * other.__numerator
def __ne__(self, other):
return self.__numerator * other.__denominator != self.__denominator * other.__numerator
def __ge__(self, other):
return self.__numerator * other.__denominator >= self.__denominator * other.__numerator
def __gt__(self, other):
return self.__numerator * other.__denominator > self.__denominator * other.__numerator
def __add__(self, other):
if not isinstance(other, Rational) and not isinstance(other, int):
raise TypeError(SECOND_TERM_TYPE_ERROR_MESSAGE)
if isinstance(other, int):
other = Rational(other)
numerator = self.__numerator * other.__denominator + self.__denominator * other.__numerator
denominator = self.__denominator * other.__denominator
return Rational(numerator, denominator)
def __sub__(self, other):
if not isinstance(other, Rational) and not isinstance(other, int):
raise TypeError(SECOND_TERM_TYPE_ERROR_MESSAGE)
if isinstance(other, int):
other = Rational(other)
numerator = self.__numerator * other.__denominator - self.__denominator * other.__numerator
denominator = self.__denominator * other.__denominator
return Rational(numerator, denominator)
def __mul__(self, other):
if not isinstance(other, Rational) and not isinstance(other, int):
raise TypeError(SECOND_TERM_TYPE_ERROR_MESSAGE)
if isinstance(other, int):
other = Rational(other)
numerator = self.__numerator * other.__numerator
denominator = self.__denominator * other.__denominator
return Rational(numerator, denominator)
def __truediv__(self, other):
if not isinstance(other, Rational) and not isinstance(other, int):
raise TypeError(SECOND_TERM_TYPE_ERROR_MESSAGE)
if isinstance(other, int):
other = Rational(other)
if 0 == other.__numerator:
raise ZeroDivisionError(DIVIDER_ZERO_DIVISION_ERROR_MESSAGE)
numerator = self.__numerator * other.__denominator
denominator = self.__denominator * other.__numerator
return Rational(numerator, denominator)
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
if not isinstance(power, int):
raise TypeError(POWER_TYPE_ERROR_MESSAGE)
if 0 > power and 0 == self.__numerator:
raise ZeroDivisionError(ZERO_TO_NEGATIVE_POWER_ZERO_DIVISION_ERROR_MESSAGE)
if 0 == power and 0 == self.__numerator:
return Rational(self.__numerator, self.__denominator)
if 0 > power:
numerator = self.__denominator ** -power
denominator = self.__numerator ** -power
else:
numerator = self.__numerator ** power
denominator = self.__denominator ** power
return Rational(numerator, denominator)
def __radd__(self, other):
if not isinstance(other, int):
raise TypeError(FIRST_TERM_TYPE_ERROR_MESSAGE)
else:
return Rational(other) + self
def __rsub__(self, other):
if not isinstance(other, int):
raise TypeError(FIRST_TERM_TYPE_ERROR_MESSAGE)
else:
return Rational(other) - self
def __rmul__(self, other):
if not isinstance(other, int):
raise TypeError(FIRST_TERM_TYPE_ERROR_MESSAGE)
else:
return Rational(other) * self
def __rtruediv__(self, other):
if not isinstance(other, int):
raise TypeError(FIRST_TERM_TYPE_ERROR_MESSAGE)
else:
return Rational(other) / self
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __rpow__(self, power):
if 0 > power and 1 != self.denominator:
raise ValueError(NEGATIVE_INTEGER_TO_FRACTIONAL_POWER_ERROR_MESSAGE)
return power ** self.value
| {
"repo_name": "daniel-dinu/rational-python",
"path": "rational/rational.py",
"copies": "1",
"size": "8078",
"license": "mit",
"hash": -7057385504997498000,
"line_mean": 32.3801652893,
"line_max": 110,
"alpha_frac": 0.6257737064,
"autogenerated": false,
"ratio": 4.433589462129528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009723763948571812,
"num_lines": 242
} |
__author__ = 'Daniel Dittenhafer'
__date__ = 'Oct 25, 2015'
__version__ = 1.0
# Based in part on: https://github.com/dwdii/IS602-AdvProgTech/blob/master/Lesson12/hw12_dittenhafer.ipynb
from zipline.api import order_target, record, symbol, history, add_history
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_bars_from_yahoo
import matplotlib.pyplot as plt
import pandas as pd
import pytz
from datetime import datetime
import kellyCriterion
class MonteCarloTradingAlgorithm(TradingAlgorithm):
def initialize(self):
self.i = 0
self.kelly = kellyCriterion.KellyCriterion()
self.mcHistoryDays = 10
self.mcIterations = 100
# The number of days in the future to simulate
self.mcFutureDays = 1
self.add_history(self.mcHistoryDays, '1d', 'price')
# Need to manually specify the analyze in this mode of execution.
# It would come for free if using the run_algo.py CLI.
#self._analyze = self.analyze
def monteCarloIteration(self, mean, std, start):
import random
sample = list()
for i in range(0, self.mcFutureDays, 1):
sample.append(random.gauss(mean, std))
curPrice = start
walk = list()
for d in sample:
newPrice = curPrice + d
curPrice = newPrice
walk.append(curPrice)
return walk[-1]
def _handle_data(self, context, data):
"""
Overloading the _handle_data method. It must be _handle_data (with leading underscore), not handle_data,
in order to take advantage of base class's history container auto updates, which we use in the history call below.
:param context: TradingAlogorithm base class passes in an extra self so we are calling this context
:param data: The data.
:return:
"""
# Skip first X days to get full windows
self.i += 1
if self.i < self.mcHistoryDays:
return
# What day are we currently processing?
#print(self.datetime)
sym = symbol(eqSymbol)
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
histData = self.history(self.mcHistoryDays, '1d', 'price')
curPrice = histData[sym][-1]
priceDiffs = histData[sym].diff()
meanDiff = priceDiffs.mean()
sdDiff = priceDiffs.std()
mcResults = list()
for i in range(0, self.mcIterations, 1):
res = self.monteCarloIteration(meanDiff, sdDiff, curPrice)
mcResults.append(res)
# Convert to a pandas series so we can use the statistics functions.
mcResultsPd = pd.Series(mcResults)
# What is the price we predict for tomorrow?
# Using some summary statistic of the individual Monte Carlo iteration results.
predictedPrice = mcResultsPd.mean()
wagerFrac = self.kelly.WagerFraction(priceDiffs, curPrice, predictedPrice)
shares = (self.portfolio.cash * wagerFrac) / curPrice
# this function auto balances our cash/stock mix based on a fractional amount we input.
# anything outside the range of [-1.0, 1.0] will utilize financial leverage
self.order_target_percent(sym,wagerFrac)
# Save values for later inspection
self.record(eqSymbol, data[sym].price,
'mc_price', predictedPrice)
print(context.portfolio.portfolio_value)
def analyze(context, perf):
fig = plt.figure()
ax1 = fig.add_subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value in $')
ax2 = fig.add_subplot(212)
perf[eqSymbol].plot(ax=ax2)
perf[['mc_price']].plot(ax=ax2)
ax2.set_ylabel('price in $')
plt.legend(loc=0)
plt.show()
if __name__ == "__main__":
# Load data manually from Yahoo! finance
eqSymbol = 'YHOO'
start = datetime(2010, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=[eqSymbol], start=start,
end=end)
# Create algorithm object
algo_obj = MonteCarloTradingAlgorithm()
# Run algorithm
perf_manual = algo_obj.run(data)
#print(perf_manual)
#print(perf_manual.ending_value[-1])
| {
"repo_name": "dwdii/stockyPuck",
"path": "src/zL_monteCarlo.py",
"copies": "1",
"size": "4416",
"license": "mit",
"hash": 7561755056009801000,
"line_mean": 31.7111111111,
"line_max": 122,
"alpha_frac": 0.6324728261,
"autogenerated": false,
"ratio": 3.7109243697478993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98301722443082,
"avg_score": 0.0026449903079399607,
"num_lines": 135
} |
__author__ = 'Daniel Dittenhafer'
__version__ = '0.1'
__date__ = '2015-03-23'
import pandas as pd
import gviz_data_table as gv
import numpy
import os
from datetime import datetime
def ToGvizDataTable(dataframe):
table = gv.Table()
dt = dataframe.dtypes
# Loop to add the columns from the dataframe to the gviz datatable
for col in dataframe.columns.values:
gvdt = dt[col]
if dt[col] == object:
# Skip
gvdt = str
elif dt[col] == "float64":
gvdt = float
elif dt[col] == "datetime64[ns]":
gvdt = datetime
elif dt[col] == "int64":
gvdt = int
# If a datatype was specified, then add the column
if gvdt != None:
table.add_column(col, gvdt)
for row in dataframe.iterrows():
vals = row[1].values
newVals = []
for v in vals:
nv = v
if (type(v) is float) and numpy.isnan(v):
nv = None
elif type(v) is long:
nv = int(v)
elif type(v) is pd.tslib.Timestamp:
nv = datetime(v.year, v.month, v.day)
newVals.append(nv)
table.append(newVals)
return table
def parseDateYearMonth(year, month):
"""
Helper method for converting individual year and month columns into a first of the month date.
:param year:
:param month:
:return: the first of the month, year datetime object
"""
if type(year) is str:
year = int(year)
if type(month) is str:
month = int(month)
if numpy.isnan(year) or numpy.isnan(month):
dt = None
else:
dt = datetime(year, month, 1)
return dt
def main():
"""Our cheap unit test main function."""
#dataFile = "C:\Code\R\IS608-VizAnalytics\FinalProject\Data\Natality, 2007-2013-StateCounty.txt"
#dataFile = "C:\Code\R\IS608-VizAnalytics\FinalProject\Data\LA-Natality-Combined.csv"
#dataFile = "C:\Code\R\IS608-VizAnalytics\FinalProject\Data\LA-Natality-Census-Combined.csv"
#dataFile = "C:\Code\R\IS608-VizAnalytics\FinalProject\Data\LA-Natality-Census-Age-Combined.csv"
fileFieldMap = {
"C:\Code\R\IS608-VizAnalytics\FinalProject\Data\LA-Natality-Census-Combined.csv" : ["State", "Date", "UnemploymentRate", "BirthsPer1000Pop"],
"C:\Code\R\IS608-VizAnalytics\FinalProject\Data\LA-Natality-Census-Age-Combined.csv" : ["Age.of.Mother", "Date", "UnemploymentRate", "BirthsPer1000Pop"]} # "StateAgeOfMother", "State"
for k, v in fileFieldMap.iteritems():
# Load the data and prep the Date column
data = pd.read_table(k, sep=",",
parse_dates={'Date': ["Year.Code", "Month.Code"]}, date_parser=parseDateYearMonth)
data["Date"] = pd.to_datetime(data["Date"])
# Aggregate the births
dataStateSum = data.groupby(v)["Births"].sum().reset_index()
#dataStateSum = data.reset_index()
print(dataStateSum.head())
# Call our helper function
dt = ToGvizDataTable(dataStateSum)
# Convert to the JSON encoding
dtJson = dt.encode()
# Save to a file
with open(os.path.splitext(k)[0] + ".json", "w") as text_file:
text_file.write(dtJson)
# This is the main of the program.
if __name__ == "__main__":
main()
| {
"repo_name": "dwdii/PanGviz",
"path": "pangviz.py",
"copies": "1",
"size": "3366",
"license": "mit",
"hash": -3960068043803422000,
"line_mean": 32,
"line_max": 195,
"alpha_frac": 0.5971479501,
"autogenerated": false,
"ratio": 3.39656912209889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.449371707219889,
"avg_score": null,
"num_lines": null
} |
__author__ = "Daniel Fath <daniel DOT fath 7 AT gmail DOT com>"
__version__ = "0.1"
from setuptools import setup
NAME = 'DOMMLite parser and utilities'
VERSION = __version__
DESC = 'DOMM Parser for master thesis'
AUTHOR = 'Daniel Fath'
AUTHOR_EMAIL = 'daniel DOT fath 7 AT gmail DOT com'
LICENCE = 'MIT'
URL = 'https://github.com/danielfath/master'
setup(
name = NAME,
version = VERSION,
description = DESC,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
license = LICENCE,
url = URL,
packages = ["domm"],
keywords = "parser dommlite utils graphic",
classifiers=[
'Development Status :: 1 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Libraries :: Python Modules'
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
| {
"repo_name": "Ygg01/master",
"path": "domm/setup.py",
"copies": "1",
"size": "1191",
"license": "mit",
"hash": -1639904732898437600,
"line_mean": 29.5384615385,
"line_max": 70,
"alpha_frac": 0.6330814442,
"autogenerated": false,
"ratio": 3.983277591973244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5116359036173244,
"avg_score": null,
"num_lines": null
} |
__author__ = "Daniel Gaspar"
import logging
from flask import flash, redirect, request, session, url_for
from flask_babel import lazy_gettext
from .forms import LoginForm_oid, RegisterUserDBForm, RegisterUserOIDForm
from .. import const as c
from .._compat import as_unicode
from ..validators import Unique
from ..views import expose, PublicFormView
log = logging.getLogger(__name__)
def get_first_last_name(fullname):
names = fullname.split()
if len(names) > 1:
return names[0], " ".join(names[1:])
elif names:
return names[0], ""
class BaseRegisterUser(PublicFormView):
"""
Make your own user registration view and inherit from this class if you
want to implement a completely different registration process. If not,
just inherit from RegisterUserDBView or RegisterUserOIDView depending on
your authentication method.
then override SecurityManager property that defines the class to use::
from flask_appbuilder.security.registerviews import RegisterUserDBView
class MyRegisterUserDBView(BaseRegisterUser):
email_template = 'register_mail.html'
...
class MySecurityManager(SecurityManager):
registeruserdbview = MyRegisterUserDBView
When instantiating AppBuilder set your own SecurityManager class::
appbuilder = AppBuilder(
app,
db.session,
security_manager_class=MySecurityManager
)
"""
route_base = "/register"
email_template = "appbuilder/general/security/register_mail.html"
""" The template used to generate the email sent to the user """
email_subject = lazy_gettext("Account activation")
""" The email subject sent to the user """
activation_template = "appbuilder/general/security/activation.html"
""" The activation template, shown when the user is activated """
message = lazy_gettext("Registration sent to your email")
""" The message shown on a successful registration """
error_message = lazy_gettext(
"Not possible to register you at the moment, try again later"
)
""" The message shown on an unsuccessful registration """
false_error_message = lazy_gettext("Registration not found")
""" The message shown on an unsuccessful registration """
form_title = lazy_gettext("Fill out the registration form")
""" The form title """
def send_email(self, register_user):
"""
Method for sending the registration Email to the user
"""
try:
from flask_mail import Mail, Message
except Exception:
log.error("Install Flask-Mail to use User registration")
return False
mail = Mail(self.appbuilder.get_app)
msg = Message()
msg.subject = self.email_subject
url = url_for(
".activation",
_external=True,
activation_hash=register_user.registration_hash,
)
msg.html = self.render_template(
self.email_template,
url=url,
username=register_user.username,
first_name=register_user.first_name,
last_name=register_user.last_name,
)
msg.recipients = [register_user.email]
try:
mail.send(msg)
except Exception as e:
log.error("Send email exception: {0}".format(str(e)))
return False
return True
def add_registration(self, username, first_name, last_name, email, password=""):
"""
Add a registration request for the user.
:rtype : RegisterUser
"""
register_user = self.appbuilder.sm.add_register_user(
username, first_name, last_name, email, password
)
if register_user:
if self.send_email(register_user):
flash(as_unicode(self.message), "info")
return register_user
else:
flash(as_unicode(self.error_message), "danger")
self.appbuilder.sm.del_register_user(register_user)
return None
@expose("/activation/<string:activation_hash>")
def activation(self, activation_hash):
"""
Endpoint to expose an activation url, this url
is sent to the user by email, when accessed the user is inserted
and activated
"""
reg = self.appbuilder.sm.find_register_user(activation_hash)
if not reg:
log.error(c.LOGMSG_ERR_SEC_NO_REGISTER_HASH.format(activation_hash))
flash(as_unicode(self.false_error_message), "danger")
return redirect(self.appbuilder.get_url_for_index)
if not self.appbuilder.sm.add_user(
username=reg.username,
email=reg.email,
first_name=reg.first_name,
last_name=reg.last_name,
role=self.appbuilder.sm.find_role(
self.appbuilder.sm.auth_user_registration_role
),
hashed_password=reg.password,
):
flash(as_unicode(self.error_message), "danger")
return redirect(self.appbuilder.get_url_for_index)
else:
self.appbuilder.sm.del_register_user(reg)
return self.render_template(
self.activation_template,
username=reg.username,
first_name=reg.first_name,
last_name=reg.last_name,
appbuilder=self.appbuilder,
)
def add_form_unique_validations(self, form):
datamodel_user = self.appbuilder.sm.get_user_datamodel
datamodel_register_user = self.appbuilder.sm.get_register_user_datamodel
if len(form.username.validators) == 1:
form.username.validators.append(Unique(datamodel_user, "username"))
form.username.validators.append(Unique(datamodel_register_user, "username"))
if len(form.email.validators) == 2:
form.email.validators.append(Unique(datamodel_user, "email"))
form.email.validators.append(Unique(datamodel_register_user, "email"))
class RegisterUserDBView(BaseRegisterUser):
"""
View for Registering a new user, auth db mode
"""
form = RegisterUserDBForm
""" The WTForm form presented to the user to register himself """
redirect_url = "/"
def form_get(self, form):
self.add_form_unique_validations(form)
def form_post(self, form):
self.add_form_unique_validations(form)
self.add_registration(
username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
password=form.password.data,
)
class RegisterUserOIDView(BaseRegisterUser):
"""
View for Registering a new user, auth OID mode
"""
route_base = "/register"
form = RegisterUserOIDForm
default_view = "form_oid_post"
@expose("/formoidone", methods=["GET", "POST"])
def form_oid_post(self, flag=True):
if flag:
self.oid_login_handler(self.form_oid_post, self.appbuilder.sm.oid)
form = LoginForm_oid()
if form.validate_on_submit():
session["remember_me"] = form.remember_me.data
return self.appbuilder.sm.oid.try_login(
form.openid.data, ask_for=["email", "fullname"]
)
resp = session.pop("oid_resp", None)
if resp:
self._init_vars()
form = self.form.refresh()
self.form_get(form)
form.username.data = resp.email
first_name, last_name = get_first_last_name(resp.fullname)
form.first_name.data = first_name
form.last_name.data = last_name
form.email.data = resp.email
widgets = self._get_edit_widget(form=form)
# self.update_redirect()
return self.render_template(
self.form_template,
title=self.form_title,
widgets=widgets,
form_action="form",
appbuilder=self.appbuilder,
)
else:
flash(as_unicode(self.error_message), "warning")
return redirect(self.get_redirect())
def oid_login_handler(self, f, oid):
"""
Hackish method to make use of oid.login_handler decorator.
"""
from flask_openid import OpenIDResponse, SessionWrapper
from openid.consumer.consumer import CANCEL, Consumer, SUCCESS
if request.args.get("openid_complete") != u"yes":
return f(False)
consumer = Consumer(SessionWrapper(self), oid.store_factory())
openid_response = consumer.complete(
request.args.to_dict(), oid.get_current_url()
)
if openid_response.status == SUCCESS:
return self.after_login(OpenIDResponse(openid_response, []))
elif openid_response.status == CANCEL:
oid.signal_error(u"The request was cancelled")
return redirect(oid.get_current_url())
oid.signal_error(u"OpenID authentication error")
return redirect(oid.get_current_url())
def after_login(self, resp):
"""
Method that adds the return OpenID response object on the session
this session key will be deleted
"""
session["oid_resp"] = resp
def form_get(self, form):
self.add_form_unique_validations(form)
def form_post(self, form):
self.add_registration(
username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
)
class RegisterUserOAuthView(BaseRegisterUser):
"""
View for Registering a new user, auth OID mode
"""
form = RegisterUserOIDForm
def form_get(self, form):
self.add_form_unique_validations(form)
# fills the register form with the collected data from OAuth
form.username.data = request.args.get("username", "")
form.first_name.data = request.args.get("first_name", "")
form.last_name.data = request.args.get("last_name", "")
form.email.data = request.args.get("email", "")
def form_post(self, form):
log.debug("Adding Registration")
self.add_registration(
username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
)
| {
"repo_name": "dpgaspar/Flask-AppBuilder",
"path": "flask_appbuilder/security/registerviews.py",
"copies": "1",
"size": "10650",
"license": "bsd-3-clause",
"hash": -8261469578952163000,
"line_mean": 35.3481228669,
"line_max": 88,
"alpha_frac": 0.6057276995,
"autogenerated": false,
"ratio": 4.194564789287121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002871470427226567,
"num_lines": 293
} |
__author__ = 'Daniel Greenfeld'
__version__ = "0.1.0"
import math
class String(str):
""" Adding critically unimportant functionality to Python's str type """
def len(self):
return self.__len__()
@property
def length(self):
return self.len()
@property
def size(self):
return self.length
@property
def width(self):
return max(len(l) for l in self.splitlines())
@property
def height(self):
return len(self.splitlines())
@property
def area(self):
return self.height * self.width
class ConqueringString(String):
""" Adding stupidly dangerous functionality to Python's str type """
def __init__(self, text):
super(ConqueringString, self).__init__(text)
self._length = self.__len__()
def __len__(self):
try:
return self._length
except AttributeError:
return super(ConqueringString, self).__len__()
def len(self, value=None):
if value is None:
return self._length
self._length = value
@property
def length(self):
return self.len()
@length.setter
def length(self, value):
self._length = value
@property
def size(self):
return self.length
@size.setter
def size(self, value):
self.length = value
@property
def area(self):
return self.height * self.width
@area.setter
def area(self, value):
self.length = math.sqrt(value)
if __name__ == "__main__":
s = ConqueringString("Hello, World!")
print(s)
print(s.length)
s.length = 5
print(s.length)
print(s.area)
s.area = 50
print(s.area)
print(len(s))
print(s[5:10]) # slicing still works!
print(s.upper()) # other methods still work!
| {
"repo_name": "pydanny/stringtheory",
"path": "stringtheory.py",
"copies": "1",
"size": "1825",
"license": "mit",
"hash": -1392344524910734600,
"line_mean": 19.9770114943,
"line_max": 76,
"alpha_frac": 0.575890411,
"autogenerated": false,
"ratio": 3.8259958071278826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9898693370107449,
"avg_score": 0.0006385696040868454,
"num_lines": 87
} |
import os.path as op
from copy import deepcopy
from functools import partial
import warnings
import numpy as np
from scipy.io import savemat
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from mne.channels import rename_channels, read_ch_connectivity
from mne.channels.channels import _ch_neighbor_connectivity
from mne.io import read_info, read_raw_fif
from mne.io.constants import FIFF
from mne.utils import _TempDir, run_tests_if_main
from mne import pick_types, pick_channels
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
warnings.simplefilter('always')
def test_rename_channels():
"""Test rename channels"""
info = read_info(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test improper mapping configuration
mapping = {'MEG 2641': 1.0}
assert_raises(ValueError, rename_channels, info, mapping)
# Test non-unique mapping configuration
mapping = {'MEG 2641': 'MEG 2642'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test bad input
assert_raises(ValueError, rename_channels, info, 1.)
# Test successful changes
# Test ch_name and ch_names are changed
info2 = deepcopy(info) # for consistency at the start of each test
info2['bads'] = ['EEG 060', 'EOG 061']
mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
rename_channels(info2, mapping)
assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
assert_true(info2['ch_names'][374] == 'EEG060')
assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
assert_true(info2['ch_names'][375] == 'EOG061')
assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
info2 = deepcopy(info)
rename_channels(info2, lambda x: x.replace(' ', ''))
assert_true(info2['chs'][373]['ch_name'] == 'EEG059')
info2 = deepcopy(info)
info2['bads'] = ['EEG 060', 'EEG 060']
rename_channels(info2, mapping)
assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
def test_set_channel_types():
"""Test set_channel_types"""
raw = read_raw_fif(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test change to illegal channel type
mapping = {'EOG 061': 'xxx'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test changing type if in proj (avg eeg ref here)
mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog',
'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg',
'MEG 2442': 'hbo'}
assert_raises(RuntimeError, raw.set_channel_types, mapping)
# Test type change
raw2 = read_raw_fif(raw_fname)
raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
with warnings.catch_warnings(record=True): # MEG channel change
assert_raises(RuntimeError, raw2.set_channel_types, mapping) # has prj
raw2.add_proj([], remove_existing=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw2.set_channel_types(mapping)
assert_true(len(w) >= 1, msg=[str(ww.message) for ww in w])
assert_true(all('The unit for channel' in str(ww.message) for ww in w))
info = raw2.info
assert_true(info['chs'][372]['ch_name'] == 'EEG 058')
assert_true(info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH)
assert_true(info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG)
assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH)
assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG)
idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH)
assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL)
assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO)
# Test meaningful error when setting channel type with unknown unit
raw.info['chs'][0]['unit'] = 0.
ch_types = {raw.ch_names[0]: 'misc'}
assert_raises(ValueError, raw.set_channel_types, ch_types)
def test_read_ch_connectivity():
"""Test reading channel connectivity templates"""
tempdir = _TempDir()
a = partial(np.array, dtype='<U7')
# no pep8
nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
(['MEG0121'], [[a(['MEG0111'])],
[a(['MEG0131'])]]),
(['MEG0131'], [[a(['MEG0111'])],
[a(['MEG0121'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_connectivity, ch_names = read_ch_connectivity(mat_fname)
x = ch_connectivity
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (3, 3))
assert_equal(x[0, 1], False)
assert_equal(x[0, 2], True)
assert_true(np.all(x.diagonal()))
assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
ch_connectivity, ch_names = read_ch_connectivity(mat_fname, picks=[0, 2])
assert_equal(ch_connectivity.shape[0], 2)
assert_equal(len(ch_names), 2)
ch_names = ['EEG01', 'EEG02', 'EEG03']
neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names[:2],
neighbors)
neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
assert_equal(connectivity.shape, (102, 102))
assert_equal(len(ch_names), 102)
assert_raises(ValueError, read_ch_connectivity, 'bananas!')
def test_get_set_sensor_positions():
"""Test get/set functions for sensor positions"""
raw1 = read_raw_fif(raw_fname)
picks = pick_types(raw1.info, meg=False, eeg=True)
pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
raw_pos = raw1._get_channel_positions(picks=picks)
assert_array_equal(raw_pos, pos)
ch_name = raw1.info['ch_names'][13]
assert_raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
raw2 = read_raw_fif(raw_fname)
raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
raw1._set_channel_positions([[1, 2, 3]], [ch_name])
assert_array_equal(raw1.info['chs'][13]['loc'],
raw2.info['chs'][13]['loc'])
run_tests_if_main()
| {
"repo_name": "nicproulx/mne-python",
"path": "mne/channels/tests/test_channels.py",
"copies": "2",
"size": "7930",
"license": "bsd-3-clause",
"hash": -4033417694577686000,
"line_mean": 42.8121546961,
"line_max": 79,
"alpha_frac": 0.6247162673,
"autogenerated": false,
"ratio": 3.0476556495003844,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46723719168003847,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from copy import deepcopy
from functools import partial
import pytest
import numpy as np
from scipy.io import savemat
from numpy.testing import assert_array_equal, assert_equal
from mne.channels import (rename_channels, read_ch_adjacency, combine_channels,
find_ch_adjacency, make_1020_channel_selections,
read_custom_montage, equalize_channels)
from mne.channels.channels import (_ch_neighbor_adjacency,
_compute_ch_adjacency)
from mne.io import (read_info, read_raw_fif, read_raw_ctf, read_raw_bti,
read_raw_eeglab, read_raw_kit, RawArray)
from mne.io.constants import FIFF
from mne.utils import _TempDir, run_tests_if_main
from mne import (pick_types, pick_channels, EpochsArray, EvokedArray,
make_ad_hoc_cov, create_info, read_events, Epochs)
from mne.datasets import testing
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
eve_fname = op .join(base_dir, 'test-eve.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
def test_reorder_channels():
"""Test reordering of channels."""
raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1)
raw_new = raw.copy().reorder_channels(raw.ch_names[::-1])
assert_array_equal(raw[:][0], raw_new[:][0][::-1])
raw_new.reorder_channels(raw_new.ch_names[::-1][1:-1])
raw.drop_channels(raw.ch_names[:1] + raw.ch_names[-1:])
assert_array_equal(raw[:][0], raw_new[:][0])
with pytest.raises(ValueError, match='repeated'):
raw.reorder_channels(raw.ch_names[:1] + raw.ch_names[:1])
def test_rename_channels():
"""Test rename channels."""
info = read_info(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
pytest.raises(ValueError, rename_channels, info, mapping)
# Test improper mapping configuration
mapping = {'MEG 2641': 1.0}
pytest.raises(TypeError, rename_channels, info, mapping)
# Test non-unique mapping configuration
mapping = {'MEG 2641': 'MEG 2642'}
pytest.raises(ValueError, rename_channels, info, mapping)
# Test bad input
pytest.raises(ValueError, rename_channels, info, 1.)
pytest.raises(ValueError, rename_channels, info, 1.)
# Test name too long (channel names must be less than 15 characters)
A16 = 'A' * 16
mapping = {'MEG 2641': A16}
pytest.raises(ValueError, rename_channels, info, mapping)
# Test successful changes
# Test ch_name and ch_names are changed
info2 = deepcopy(info) # for consistency at the start of each test
info2['bads'] = ['EEG 060', 'EOG 061']
mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
rename_channels(info2, mapping)
assert info2['chs'][374]['ch_name'] == 'EEG060'
assert info2['ch_names'][374] == 'EEG060'
assert info2['chs'][375]['ch_name'] == 'EOG061'
assert info2['ch_names'][375] == 'EOG061'
assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
info2 = deepcopy(info)
rename_channels(info2, lambda x: x.replace(' ', ''))
assert info2['chs'][373]['ch_name'] == 'EEG059'
info2 = deepcopy(info)
info2['bads'] = ['EEG 060', 'EEG 060']
rename_channels(info2, mapping)
assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
def test_set_channel_types():
"""Test set_channel_types."""
raw = read_raw_fif(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
pytest.raises(ValueError, raw.set_channel_types, mapping)
# Test change to illegal channel type
mapping = {'EOG 061': 'xxx'}
pytest.raises(ValueError, raw.set_channel_types, mapping)
# Test changing type if in proj (avg eeg ref here)
mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog',
'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg',
'MEG 2442': 'hbo'}
pytest.raises(RuntimeError, raw.set_channel_types, mapping)
# Test type change
raw2 = read_raw_fif(raw_fname)
raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
pytest.raises(RuntimeError, raw2.set_channel_types, mapping) # has prj
raw2.add_proj([], remove_existing=True)
with pytest.warns(RuntimeWarning, match='The unit for channel'):
raw2 = raw2.set_channel_types(mapping)
info = raw2.info
assert info['chs'][372]['ch_name'] == 'EEG 058'
assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH
assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG
assert info['chs'][373]['ch_name'] == 'EEG 059'
assert info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH
assert info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE
assert info['chs'][374]['ch_name'] == 'EEG 060'
assert info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH
assert info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE
assert info['chs'][375]['ch_name'] == 'EOG 061'
assert info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH
assert info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG
for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
assert info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH
assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG
idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
assert info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH
assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL
assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO
# Test meaningful error when setting channel type with unknown unit
raw.info['chs'][0]['unit'] = 0.
ch_types = {raw.ch_names[0]: 'misc'}
pytest.raises(ValueError, raw.set_channel_types, ch_types)
def test_read_ch_adjacency():
"""Test reading channel adjacency templates."""
tempdir = _TempDir()
a = partial(np.array, dtype='<U7')
# no pep8
nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
(['MEG0121'], [[a(['MEG0111'])],
[a(['MEG0131'])]]),
(['MEG0131'], [[a(['MEG0111'])],
[a(['MEG0121'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_adjacency, ch_names = read_ch_adjacency(mat_fname)
x = ch_adjacency
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (3, 3))
assert_equal(x[0, 1], False)
assert_equal(x[0, 2], True)
assert np.all(x.diagonal())
pytest.raises(ValueError, read_ch_adjacency, mat_fname, [0, 3])
ch_adjacency, ch_names = read_ch_adjacency(mat_fname, picks=[0, 2])
assert_equal(ch_adjacency.shape[0], 2)
assert_equal(len(ch_names), 2)
ch_names = ['EEG01', 'EEG02', 'EEG03']
neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names, neighbors)
neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names[:2],
neighbors)
neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names, neighbors)
adjacency, ch_names = read_ch_adjacency('neuromag306mag')
assert_equal(adjacency.shape, (102, 102))
assert_equal(len(ch_names), 102)
pytest.raises(ValueError, read_ch_adjacency, 'bananas!')
# In EGI 256, E31 sensor has no neighbour
a = partial(np.array)
nbh = np.array([[(['E31'], []),
(['E1'], [[a(['E2'])],
[a(['E3'])]]),
(['E2'], [[a(['E1'])],
[a(['E3'])]]),
(['E3'], [[a(['E1'])],
[a(['E2'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_isolated_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_adjacency, ch_names = read_ch_adjacency(mat_fname)
x = ch_adjacency.todense()
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (4, 4))
assert np.all(x.diagonal())
assert not np.any(x[0, 1:])
assert not np.any(x[1:, 0])
# Check for neighbours consistency. If a sensor is marked as a neighbour,
# then it should also have its neighbours defined.
a = partial(np.array)
nbh = np.array([[(['E31'], []),
(['E1'], [[a(['E8'])],
[a(['E3'])]]),
(['E2'], [[a(['E1'])],
[a(['E3'])]]),
(['E3'], [[a(['E1'])],
[a(['E2'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_error_mat.mat')
savemat(mat_fname, mat, oned_as='row')
pytest.raises(ValueError, read_ch_adjacency, mat_fname)
def test_get_set_sensor_positions():
"""Test get/set functions for sensor positions."""
raw1 = read_raw_fif(raw_fname)
picks = pick_types(raw1.info, meg=False, eeg=True)
pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
raw_pos = raw1._get_channel_positions(picks=picks)
assert_array_equal(raw_pos, pos)
ch_name = raw1.info['ch_names'][13]
pytest.raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
raw2 = read_raw_fif(raw_fname)
raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
raw1._set_channel_positions([[1, 2, 3]], [ch_name])
assert_array_equal(raw1.info['chs'][13]['loc'],
raw2.info['chs'][13]['loc'])
@testing.requires_testing_data
def test_1020_selection():
"""Test making a 10/20 selection dict."""
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname = op.join(base_dir, 'test_raw.set')
loc_fname = op.join(base_dir, 'test_chans.locs')
raw = read_raw_eeglab(raw_fname, preload=True)
montage = read_custom_montage(loc_fname)
raw = raw.rename_channels(dict(zip(raw.ch_names, montage.ch_names)))
raw.set_montage(montage)
for input in ("a_string", 100, raw, [1, 2]):
pytest.raises(TypeError, make_1020_channel_selections, input)
sels = make_1020_channel_selections(raw.info)
# are all frontal channels placed before all occipital channels?
for name, picks in sels.items():
fs = min([ii for ii, pick in enumerate(picks)
if raw.ch_names[pick].startswith("F")])
ps = max([ii for ii, pick in enumerate(picks)
if raw.ch_names[pick].startswith("O")])
assert fs > ps
# are channels in the correct selection?
fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")]
for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")):
assert channel in sels[roi]
@testing.requires_testing_data
def test_find_ch_adjacency():
"""Test computing the adjacency matrix."""
data_path = testing.data_path()
raw = read_raw_fif(raw_fname, preload=True)
sizes = {'mag': 828, 'grad': 1700, 'eeg': 384}
nchans = {'mag': 102, 'grad': 204, 'eeg': 60}
for ch_type in ['mag', 'grad', 'eeg']:
conn, ch_names = find_ch_adjacency(raw.info, ch_type)
# Silly test for checking the number of neighbors.
assert_equal(conn.getnnz(), sizes[ch_type])
assert_equal(len(ch_names), nchans[ch_type])
pytest.raises(ValueError, find_ch_adjacency, raw.info, None)
# Test computing the conn matrix with gradiometers.
conn, ch_names = _compute_ch_adjacency(raw.info, 'grad')
assert_equal(conn.getnnz(), 2680)
# Test ch_type=None.
raw.pick_types(meg='mag')
find_ch_adjacency(raw.info, None)
bti_fname = op.join(data_path, 'BTi', 'erm_HFH', 'c,rfDC')
bti_config_name = op.join(data_path, 'BTi', 'erm_HFH', 'config')
raw = read_raw_bti(bti_fname, bti_config_name, None)
_, ch_names = find_ch_adjacency(raw.info, 'mag')
assert 'A1' in ch_names
ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
raw = read_raw_ctf(ctf_fname)
_, ch_names = find_ch_adjacency(raw.info, 'mag')
assert 'MLC11' in ch_names
pytest.raises(ValueError, find_ch_adjacency, raw.info, 'eog')
raw_kit = read_raw_kit(fname_kit_157)
neighb, ch_names = find_ch_adjacency(raw_kit.info, 'mag')
assert neighb.data.size == 1329
assert ch_names[0] == 'MEG 001'
def test_drop_channels():
"""Test if dropping channels works with various arguments."""
raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1)
raw.drop_channels(["MEG 0111"]) # list argument
raw.drop_channels("MEG 0112") # str argument
raw.drop_channels({"MEG 0132", "MEG 0133"}) # set argument
pytest.raises(ValueError, raw.drop_channels, ["MEG 0111", 5])
pytest.raises(ValueError, raw.drop_channels, 5) # must be list or str
def test_equalize_channels():
"""Test equalizing channels and their ordering."""
# This function only tests the generic functionality of equalize_channels.
# Additional tests for each instance type are included in the accompanying
# test suite for each type.
pytest.raises(TypeError, equalize_channels, ['foo', 'bar'],
match='Instances to be modified must be an instance of')
raw = RawArray([[1.], [2.], [3.], [4.]],
create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.))
epochs = EpochsArray([[[1.], [2.], [3.]]],
create_info(['CH5', 'CH2', 'CH1'], sfreq=1.))
cov = make_ad_hoc_cov(create_info(['CH2', 'CH1', 'CH8'], sfreq=1.,
ch_types='eeg'))
cov['bads'] = ['CH1']
ave = EvokedArray([[1.], [2.]], create_info(['CH1', 'CH2'], sfreq=1.))
raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave],
copy=True)
# The Raw object was the first in the list, so should have been used as
# template for the ordering of the channels. No bad channels should have
# been dropped.
assert raw2.ch_names == ['CH1', 'CH2']
assert_array_equal(raw2.get_data(), [[1.], [2.]])
assert epochs2.ch_names == ['CH1', 'CH2']
assert_array_equal(epochs2.get_data(), [[[3.], [2.]]])
assert cov2.ch_names == ['CH1', 'CH2']
assert cov2['bads'] == cov['bads']
assert ave2.ch_names == ave.ch_names
assert_array_equal(ave2.data, ave.data)
# All objects should have been copied, except for the Evoked object which
# did not have to be touched.
assert raw is not raw2
assert epochs is not epochs2
assert cov is not cov2
assert ave is ave2
# Test in-place operation
raw2, epochs2 = equalize_channels([raw, epochs], copy=False)
assert raw is raw2
assert epochs is epochs2
def test_combine_channels():
"""Test channel combination on Raw, Epochs, and Evoked."""
raw = read_raw_fif(raw_fname, preload=True)
raw_ch_bad = read_raw_fif(raw_fname, preload=True)
raw_ch_bad.info['bads'] = ['MEG 0113', 'MEG 0112']
epochs = Epochs(raw, read_events(eve_fname))
evoked = epochs.average()
good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag
# Test good cases
combine_channels(raw, good)
combined_epochs = combine_channels(epochs, good)
assert_array_equal(combined_epochs.events, epochs.events)
combine_channels(evoked, good)
combine_channels(raw, good, drop_bad=True)
combine_channels(raw_ch_bad, good, drop_bad=True)
# Test with stimulus channels
combine_stim = combine_channels(raw, good, keep_stim=True)
target_nchan = len(good) + len(pick_types(raw.info, meg=False, stim=True))
assert combine_stim.info['nchan'] == target_nchan
# Test results with one ROI
good_single = dict(foo=[0, 1, 3, 4]) # good grad
combined_mean = combine_channels(raw, good_single, method='mean')
combined_median = combine_channels(raw, good_single, method='median')
combined_std = combine_channels(raw, good_single, method='std')
foo_mean = np.mean(raw.get_data()[good_single['foo']], axis=0)
foo_median = np.median(raw.get_data()[good_single['foo']], axis=0)
foo_std = np.std(raw.get_data()[good_single['foo']], axis=0)
assert_array_equal(combined_mean.get_data(),
np.expand_dims(foo_mean, axis=0))
assert_array_equal(combined_median.get_data(),
np.expand_dims(foo_median, axis=0))
assert_array_equal(combined_std.get_data(),
np.expand_dims(foo_std, axis=0))
# Test bad cases
bad1 = dict(foo=[0, 376], bar=[5, 2]) # out of bounds
bad2 = dict(foo=[0, 2], bar=[5, 2]) # type mix in same group
with pytest.raises(ValueError, match='"method" must be a callable, or'):
combine_channels(raw, good, method='bad_method')
with pytest.raises(TypeError, match='"keep_stim" must be of type bool'):
combine_channels(raw, good, keep_stim='bad_type')
with pytest.raises(TypeError, match='"drop_bad" must be of type bool'):
combine_channels(raw, good, drop_bad='bad_type')
with pytest.raises(ValueError, match='Some channel indices are out of'):
combine_channels(raw, bad1)
with pytest.raises(ValueError, match='Cannot combine sensors of diff'):
combine_channels(raw, bad2)
# Test warnings
raw_no_stim = read_raw_fif(raw_fname, preload=True)
raw_no_stim.pick_types(meg=True, stim=False)
warn1 = dict(foo=[375, 375], bar=[5, 2]) # same channel in same group
warn2 = dict(foo=[375], bar=[5, 2]) # one channel (last channel)
warn3 = dict(foo=[0, 4], bar=[5, 2]) # one good channel left
with pytest.warns(RuntimeWarning, match='Could not find stimulus'):
combine_channels(raw_no_stim, good, keep_stim=True)
with pytest.warns(RuntimeWarning, match='Less than 2 channels') as record:
combine_channels(raw, warn1)
combine_channels(raw, warn2)
combine_channels(raw_ch_bad, warn3, drop_bad=True)
assert len(record) == 3
run_tests_if_main()
| {
"repo_name": "cjayb/mne-python",
"path": "mne/channels/tests/test_channels.py",
"copies": "1",
"size": "18754",
"license": "bsd-3-clause",
"hash": 321485707847990460,
"line_mean": 42.6139534884,
"line_max": 79,
"alpha_frac": 0.6057374427,
"autogenerated": false,
"ratio": 3.1540531449714093,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42597905876714093,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from mne.channels import rename_channels, read_ch_connectivity
from mne.channels.channels import _ch_neighbor_connectivity
from mne.io import read_info, Raw
from mne.io.constants import FIFF
from mne.fixes import partial, savemat
from mne.utils import _TempDir, run_tests_if_main
from mne import pick_types, pick_channels
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
warnings.simplefilter('always')
def test_rename_channels():
"""Test rename channels
"""
info = read_info(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test improper mapping configuration
mapping = {'MEG 2641': 1.0}
assert_raises(ValueError, rename_channels, info, mapping)
# Test non-unique mapping configuration
mapping = {'MEG 2641': 'MEG 2642'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test bad input
assert_raises(ValueError, rename_channels, info, 1.)
# Test successful changes
# Test ch_name and ch_names are changed
info2 = deepcopy(info) # for consistency at the start of each test
info2['bads'] = ['EEG 060', 'EOG 061']
mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
rename_channels(info2, mapping)
assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
assert_true(info2['ch_names'][374] == 'EEG060')
assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
assert_true(info2['ch_names'][375] == 'EOG061')
assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
info2 = deepcopy(info)
rename_channels(info2, lambda x: x.replace(' ', ''))
assert_true(info2['chs'][373]['ch_name'] == 'EEG059')
info2 = deepcopy(info)
info2['bads'] = ['EEG 060', 'EEG 060']
rename_channels(info2, mapping)
assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
def test_set_channel_types():
"""Test set_channel_types
"""
raw = Raw(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test change to illegal channel type
mapping = {'EOG 061': 'xxx'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test changing type if in proj (avg eeg ref here)
mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog',
'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg'}
assert_raises(RuntimeError, raw.set_channel_types, mapping)
# Test type change
raw2 = Raw(raw_fname, add_eeg_ref=False)
raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
assert_raises(RuntimeError, raw2.set_channel_types, mapping) # has proj
raw2.add_proj([], remove_existing=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw2.set_channel_types(mapping)
assert_true(len(w) >= 1, msg=[str(ww.message) for ww in w])
assert_true(all('The unit for channel' in str(ww.message) for ww in w))
info = raw2.info
assert_true(info['chs'][372]['ch_name'] == 'EEG 058')
assert_true(info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH)
assert_true(info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG)
assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH)
assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG)
# Test meaningful error when setting channel type with unknown unit
raw.info['chs'][0]['unit'] = 0.
ch_types = {raw.ch_names[0]: 'misc'}
assert_raises(ValueError, raw.set_channel_types, ch_types)
def test_read_ch_connectivity():
"Test reading channel connectivity templates"
tempdir = _TempDir()
a = partial(np.array, dtype='<U7')
# no pep8
nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
(['MEG0121'], [[a(['MEG0111'])],
[a(['MEG0131'])]]),
(['MEG0131'], [[a(['MEG0111'])],
[a(['MEG0121'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_connectivity, ch_names = read_ch_connectivity(mat_fname)
x = ch_connectivity
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (3, 3))
assert_equal(x[0, 1], False)
assert_equal(x[0, 2], True)
assert_true(np.all(x.diagonal()))
assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
ch_connectivity, ch_names = read_ch_connectivity(mat_fname, picks=[0, 2])
assert_equal(ch_connectivity.shape[0], 2)
assert_equal(len(ch_names), 2)
ch_names = ['EEG01', 'EEG02', 'EEG03']
neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names[:2],
neighbors)
neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
assert_equal(connectivity.shape, (102, 102))
assert_equal(len(ch_names), 102)
assert_raises(ValueError, read_ch_connectivity, 'bananas!')
def test_get_set_sensor_positions():
"""Test get/set functions for sensor positions
"""
raw1 = Raw(raw_fname)
picks = pick_types(raw1.info, meg=False, eeg=True)
pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
raw_pos = raw1._get_channel_positions(picks=picks)
assert_array_equal(raw_pos, pos)
ch_name = raw1.info['ch_names'][13]
assert_raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
raw2 = Raw(raw_fname)
raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
raw1._set_channel_positions([[1, 2, 3]], [ch_name])
assert_array_equal(raw1.info['chs'][13]['loc'],
raw2.info['chs'][13]['loc'])
run_tests_if_main()
| {
"repo_name": "wronk/mne-python",
"path": "mne/channels/tests/test_channels.py",
"copies": "3",
"size": "7529",
"license": "bsd-3-clause",
"hash": 7935057849870949000,
"line_mean": 41.5367231638,
"line_max": 77,
"alpha_frac": 0.6234559702,
"autogenerated": false,
"ratio": 3.060569105691057,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184025075891057,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from copy import deepcopy
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from mne.channels import rename_channels, read_ch_connectivity
from mne.channels.channels import _ch_neighbor_connectivity
from mne.io import read_info, Raw
from mne.io.constants import FIFF
from mne.fixes import partial, savemat
from mne.utils import _TempDir, run_tests_if_main
from mne import pick_types
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
def test_rename_channels():
"""Test rename channels
"""
info = read_info(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test improper mapping configuration
mapping = {'MEG 2641': 1.0}
assert_raises(ValueError, rename_channels, info, mapping)
# Test non-unique mapping configuration
mapping = {'MEG 2641': 'MEG 2642'}
assert_raises(ValueError, rename_channels, info, mapping)
# Test bad input
assert_raises(ValueError, rename_channels, info, 1.)
# Test successful changes
# Test ch_name and ch_names are changed
info2 = deepcopy(info) # for consistency at the start of each test
info2['bads'] = ['EEG 060', 'EOG 061']
mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
rename_channels(info2, mapping)
assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
assert_true(info2['ch_names'][374] == 'EEG060')
assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
assert_true(info2['ch_names'][375] == 'EOG061')
assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
info2 = deepcopy(info)
rename_channels(info2, lambda x: x.replace(' ', ''))
assert_true(info2['chs'][373]['ch_name'] == 'EEG059')
info2 = deepcopy(info)
info2['bads'] = ['EEG 060', 'EEG 060']
rename_channels(info2, mapping)
assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
def test_set_channel_types():
"""Test set_channel_types
"""
raw = Raw(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test change to illegal channel type
mapping = {'EOG 061': 'xxx'}
assert_raises(ValueError, raw.set_channel_types, mapping)
# Test changing type if in proj (avg eeg ref here)
mapping = {'EEG 060': 'eog', 'EEG 059': 'ecg', 'EOG 061': 'seeg'}
assert_raises(RuntimeError, raw.set_channel_types, mapping)
# Test type change
raw2 = Raw(raw_fname, add_eeg_ref=False)
raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
raw2.set_channel_types(mapping)
info = raw2.info
assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
def test_read_ch_connectivity():
"Test reading channel connectivity templates"
tempdir = _TempDir()
a = partial(np.array, dtype='<U7')
# no pep8
nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
(['MEG0121'], [[a(['MEG0111'])],
[a(['MEG0131'])]]),
(['MEG0131'], [[a(['MEG0111'])],
[a(['MEG0121'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_connectivity, ch_names = read_ch_connectivity(mat_fname)
x = ch_connectivity
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (3, 3))
assert_equal(x[0, 1], False)
assert_equal(x[0, 2], True)
assert_true(np.all(x.diagonal()))
assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
ch_connectivity, ch_names = read_ch_connectivity(mat_fname, picks=[0, 2])
assert_equal(ch_connectivity.shape[0], 2)
assert_equal(len(ch_names), 2)
ch_names = ['EEG01', 'EEG02', 'EEG03']
neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names[:2],
neighbors)
neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
assert_equal(connectivity.shape, (102, 102))
assert_equal(len(ch_names), 102)
assert_raises(ValueError, read_ch_connectivity, 'bananas!')
def test_get_set_sensor_positions():
"""Test get/set functions for sensor positions
"""
raw1 = Raw(raw_fname)
picks = pick_types(raw1.info, meg=False, eeg=True)
pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
raw_pos = raw1._get_channel_positions(picks=picks)
assert_array_equal(raw_pos, pos)
ch_name = raw1.info['ch_names'][13]
assert_raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
raw2 = Raw(raw_fname)
raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
raw1._set_channel_positions([[1, 2, 3]], [ch_name])
assert_array_equal(raw1.info['chs'][13]['loc'],
raw2.info['chs'][13]['loc'])
run_tests_if_main()
| {
"repo_name": "cmoutard/mne-python",
"path": "mne/channels/tests/test_channels.py",
"copies": "1",
"size": "6292",
"license": "bsd-3-clause",
"hash": -5750904665511494000,
"line_mean": 39.8571428571,
"line_max": 77,
"alpha_frac": 0.6223776224,
"autogenerated": false,
"ratio": 3.072265625,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9193019870776624,
"avg_score": 0.0003246753246753247,
"num_lines": 154
} |
__author__ = "Daniel Hallman"
__email__ = "daniel.hallman@grepstar.net"
__copyright__ = "Copyright 2015, Grepstar, LLC"
__license__ = "MIT"
import json, urllib2, os
import datetime
from optparse import OptionParser
from sourcegen import SwiftSource, ObjCSource
# Parse constants
PARSE_CLASS_PREFIX = "PF"
def main():
parser = OptionParser()
parser.add_option("-u", "--url", dest="parse_server_url",
help="Parse Server URL",)
parser.add_option("-a", "--parseappid", dest="parse_app_id",
help="Parse App ID",)
parser.add_option("-m", "--parsemaster", dest="parse_master_key",
help="Parse Master Key",)
parser.add_option("-p", "--prefix", dest="subclass_prefix",
help="Subclass Prefix",)
parser.add_option("-l", "--language", dest="language",
help="Language to build templates",)
(options, args) = parser.parse_args()
if options.parse_server_url:
PARSE_SERVER_URL = options.parse_server_url
else:
PARSE_SERVER_URL = 'http://localhost:1337/parse'
# assert False, 'PARSE_SERVER_URL is blank!'
if options.parse_app_id:
PARSE_APP_ID = options.parse_app_id
else:
assert False, 'PARSE_APP_ID is blank!'
if options.parse_master_key:
PARSE_MASTER_KEY = options.parse_master_key
else:
assert False, 'PARSE_MASTER_KEY is blank! Visit https://www.parse.com to obtain your keys.'
if options.subclass_prefix:
SUBCLASS_PREFIX = options.subclass_prefix
else:
assert False, 'SUBCLASS_PREFIX is blank! You should probably use a custom prefix.'
if options.language:
LANGUAGE = options.language
languages = ["swift", "objc"]
assert LANGUAGE.lower() in languages, 'LANGUAGE must be one of the following: ' + ', '.join(languages)
else:
LANGUAGE = "swift"
req = urllib2.Request(PARSE_SERVER_URL+'/schemas')
req.add_header("X-Parse-Application-Id", PARSE_APP_ID)
req.add_header("X-Parse-Master-Key", PARSE_MASTER_KEY)
req.add_header("Content-Type", "application/json")
opener = urllib2.build_opener()
f = opener.open(req)
result = json.loads(f.read())
schemas = result['results']
today = datetime.date.today().strftime('%m/%d/%y')
if LANGUAGE == 'swift':
generator = SwiftSource.SwiftSource(SUBCLASS_PREFIX, today, True)
if LANGUAGE == 'objc':
generator = ObjCSource.ObjCSource(SUBCLASS_PREFIX, today)
generator.createImplementation(schemas)
if __name__ == '__main__':
main() | {
"repo_name": "Grepstar/GSParseSchema",
"path": "parse-schema.py",
"copies": "1",
"size": "2616",
"license": "mit",
"hash": -6301942544436239000,
"line_mean": 32.987012987,
"line_max": 110,
"alpha_frac": 0.625382263,
"autogenerated": false,
"ratio": 3.653631284916201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9760172643065825,
"avg_score": 0.003768180970075072,
"num_lines": 77
} |
__author__ = 'Daniel James Evans'
__copyright__ = 'Copyright (c) 2017 Daniel James Evans'
__license__ = 'MIT'
import Tkinter
import tkFileDialog
import keyword
from functools import partial
import subprocess
import pipes
colored_item_list = []
for word in keyword.kwlist:
colored_item_list.append([word])
def create_tags(text_widget):
'''Create Tkinter text tags for the syntax highlighter.'''
for word_entry in colored_item_list:
text_widget.tag_config(word_entry[0], foreground = 'red')
text_widget.tag_config('comment', foreground = 'yellow')
text_widget.tag_config('string', foreground = 'cyan')
def update_highlight(event):
'''Update syntax highlighting when a key is pressed.'''
for word_entry in colored_item_list:
word = word_entry[0]
event.widget.tag_remove(word, event.widget.index(Tkinter.INSERT)[0]
+ '.0', Tkinter.INSERT
+ '+%sc' %(str(len(word))))
search_starting_position = event.widget.index(Tkinter.INSERT)[0] + '.0'
search_ending_position = (event.widget.index(Tkinter.INSERT)[0]
+ '.%s'%(str(len(word))))
pos = True
while pos:
pos = event.widget.search('([^A-Za-z_0-9]|^)%s([^A-Za-z_]|$)'
%(word), search_starting_position,
stopindex = Tkinter.INSERT
+ '+%sc' %(str(len(word))), regexp = True)
if pos:
pos_carat = event.widget.search('(^)%s([^A-Za-z_]|$)' %(word),
search_starting_position,
stopindex = Tkinter.INSERT
+ '+%sc' %(str(len(word))),
regexp = True)
if pos_carat:
event.widget.tag_add(word, pos, pos + '+%sc'
%(str(len(word) + 1)))
else:
event.widget.tag_add(word, pos + '+1c', pos + '+%sc'
%(str(len(word) + 1)))
search_starting_position = pos + '+%sc' %(str(len(word)))
current_pos_in_char = 0 #needed for determining when end-of-file is reached
current_line = 1
current_column = 0
code_text = event.widget.get(1.0, Tkinter.END)
event.widget.tag_remove('string', '1.0', Tkinter.END)
while current_pos_in_char < len(code_text):
next_char = code_text[current_pos_in_char]
current_column += 1
current_pos_in_char += 1
if next_char == '\n' or next_char == '\r':
current_line += 1
current_column = 0
if next_char == '#':
comment_line_number = current_line
comment_start_column = current_column - 1
if current_pos_in_char == len(code_text) - 1:
comment_end_column = current_column
while (next_char != '\n' and next_char != '\r'
and current_pos_in_char != len(code_text) - 1):
current_pos_in_char += 1
current_column += 1
next_char = code_text[current_pos_in_char]
if (next_char == '\n' or next_char == '\r'
or current_pos_in_char == len(code_text) - 1):
comment_end_column = current_column
event.widget.tag_add('comment', '%d.%d' %(comment_line_number,
comment_start_column),
'%d.%d' %(comment_line_number,
comment_end_column))
if next_char == '\'' or next_char == '"':
quote_mark = next_char
string_start_line_number = current_line
string_start_column = current_column - 1
if current_pos_in_char == len(code_text) - 1:
string_end_column = current_column
else:
next_char = code_text[current_pos_in_char]
if (next_char == quote_mark
and code_text[current_pos_in_char + 1] == quote_mark):
docstring = True
else:
docstring = False
if docstring != True:
while (next_char != quote_mark
and current_pos_in_char != len(code_text) - 1):
current_pos_in_char += 1
current_column += 1
next_char = code_text[current_pos_in_char]
else:
closing_quote_count = 0
#Increase pos because the next char was already checked.
current_pos_in_char += 1
current_column += 1
while closing_quote_count < 3:
current_pos_in_char += 1
current_column += 1
if current_pos_in_char == len(code_text) - 1:
break
next_char = code_text[current_pos_in_char]
if next_char == quote_mark:
closing_quote_count += 1
else:
closing_quote_count = 0
if next_char == '\n' or next_char == '\r':
current_line += 1
#Decrease column pos because of the newline char.
current_column = -1
string_end_line_number = current_line
string_end_column = current_column
event.widget.tag_add('string', '%d.%d' %(string_start_line_number,
string_start_column),
'%d.%d' %(string_end_line_number,
string_end_column + 1))
current_pos_in_char += 1
current_column += 1
def highlight_loaded_file(text_entry):
'''Highlight the syntax of a newly opened document.'''
for word_entry in colored_item_list:
word = word_entry[0]
search_starting_position = '1.0'
search_ending_position = '1.0 + %sc' %(str(len(word)))
pos = True
while pos:
pos = text_entry.search('([^A-Za-z_0-9]|\n|\r|^)%s([^A-Za-z_]|$)'
%(word), search_starting_position,
stopindex = Tkinter.END, regexp = True)
if pos:
#Put the tag in the right place, depending on if pos has ^.
pos_carat = text_entry.search('(^)%s([^A-Za-z_]|$)' %(word),
search_starting_position,
stopindex = Tkinter.END,
regexp = True)
if pos_carat:
text_entry.tag_add(word, pos, pos + '+%sc'
%(str(len(word) + 1)))
else:
text_entry.tag_add(word, pos + '+1c', pos + '+%sc'
%(str(len(word) + 1)))
search_starting_position = pos + '+%sc' %(str(len(word)))
current_pos_in_char = 0 #needed for determining when end-of-file is reached
current_line = 1
current_column = 0
code_text = text_entry.get(1.0,Tkinter.END)
text_entry.tag_remove('string', '1.0', Tkinter.END)
while current_pos_in_char < len(code_text):
next_char = code_text[current_pos_in_char]
current_column += 1
current_pos_in_char += 1
if next_char == '\n' or next_char == '\r':
current_line += 1
current_column = 0
if next_char == '#':
comment_line_number = current_line
comment_start_column = current_column - 1
if current_pos_in_char == len(code_text) - 1:
comment_end_column = current_column
while (next_char != '\n' and next_char != '\r'
and current_pos_in_char != len(code_text) - 1):
current_pos_in_char += 1
current_column += 1
next_char = code_text[current_pos_in_char]
if (next_char == '\n' or next_char == '\r'
or current_pos_in_char == len(code_text) - 1):
comment_end_column = current_column
text_entry.tag_add('comment', '%d.%d' %(comment_line_number,
comment_start_column),
'%d.%d' %(comment_line_number,
comment_end_column))
if next_char == '\'' or next_char == '"':
quote_mark = next_char
string_start_line_number = current_line
string_start_column = current_column - 1
if current_pos_in_char == len(code_text) - 1:
string_end_column = current_column
else:
next_char = code_text[current_pos_in_char]
if (next_char == quote_mark
and code_text[current_pos_in_char + 1] == quote_mark):
docstring = True
else:
docstring = False
if docstring != True:
while (next_char != quote_mark
and current_pos_in_char != len(code_text) - 1):
current_pos_in_char += 1
current_column += 1
next_char = code_text[current_pos_in_char]
else:
closing_quote_count = 0
#Increase pos because the next char was already checked.
current_pos_in_char += 1
current_column += 1
while closing_quote_count < 3:
current_pos_in_char += 1
current_column += 1
if current_pos_in_char == len(code_text) - 1:
break
next_char = code_text[current_pos_in_char]
if next_char == quote_mark:
closing_quote_count += 1
else:
closing_quote_count = 0
if next_char == '\n' or next_char == '\r':
current_line += 1
#Correctly place tag, depending on if pos has ^.
current_column = -1
string_end_line_number = current_line
string_end_column = current_column
text_entry.tag_add('string', '%d.%d' %(string_start_line_number,
string_start_column),
'%d.%d' %(string_end_line_number,
string_end_column + 1))
current_pos_in_char += 1
current_column += 1
| {
"repo_name": "DanielJamesEvans/Python_IDE",
"path": "syntax_highlighter.py",
"copies": "1",
"size": "11202",
"license": "mit",
"hash": 3265802519654550000,
"line_mean": 47.0772532189,
"line_max": 80,
"alpha_frac": 0.4475093733,
"autogenerated": false,
"ratio": 4.341860465116279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005495457032155028,
"num_lines": 233
} |
__author__ = 'Daniel Kapellusch'
import astropy.io.fits as fits,os,csv,json,sys,multiprocessing as mp #necessary imports. Note: this is written in python 2.
from datetime import datetime
from os import path
def main(path):
if not path: #set default path in the case of no passed param
path = "sample_fits/"
fits_lst = [path+"/"+fit for fit in os.listdir(path) if fit.endswith(".fits")] #get files in dir if they are .fits
with fits.open(fits_lst[0]) as fits_file:
items = list(set([str(header_field) for header_field in fits_file[0].header.keys()]+["FILENAME"])) #get fieldnames from first fits file
pool = mp.Pool(processes=None) #setup multiprocessing pool
ls = pool.map(get_metadata_and_sort,fits_lst) #asynchronously gather metadata
make_tsv(ls,items) #generate tsv of metadata
build_json({item["FILENAME"]:item for item in ls}) #create json from list of metadata
return(sort_list(ls)) #return a dictionary of lists of filenames sorted by type
def get_metadata_and_sort(image):
hdulist = fits.open(image) # open each fits file in the list
header = hdulist[0].header #get all the metadata from the fits file hdulist
hdulist.close()
header["FILENAME"] = path.basename(image)
temp = str(str(header["COMMENT"]).encode('ascii', 'ignore')) #encode in ascii as unicode doesn't play nice
header = {key: value for key, value in header.items() #remove double comment field
if key is not "COMMENT"}
header["COMMENT"] = temp.replace("\n"," ") #put comments back in
return(header)
def make_tsv(header,items):
with open('metadata.tsv',"wb") as csvfile: #create a file called metadata.tsv for the output
writer = csv.DictWriter(csvfile,fieldnames=items,delimiter= "\t") #set up the writer, header fields, and delimiter
writer.writeheader() # write the headers to the file
[writer.writerow({k:str(image[k]) for k in items}) for image in header]
def build_json(total_dic):
with open("metadata.json",'w') as jsonfile: #builds json file of metadata not sorted by VIMTYPE
json.dump(total_dic,jsonfile, separators=(',',':'),indent=4)
def sort_list(ls):
#sort filenames into dictionary by VIMTYPE
dic = {"SCIENCE":[],"DARK":[]}
[dic["SCIENCE"].append(i["FILENAME"]) if i["VIMTYPE"] == "SCIENCE" else dic["DARK"].append(i["FILENAME"]) for i in ls]
return(dic)
if __name__ =="__main__":
start = datetime.now()
result = main(sys.argv[1:])
end = datetime.now()
duration = end - start
#This module runs in ~0.44 seconds on my machine, processing my test batch of 10 files, although if I set the processes
# in mp.pool() to be 1 the run time becomes ~0.22 seconds
#The serial version of this module runs in ~0.55 seconds under the same conditions
print "Total Execution Time (seconds): %d.%d" % (duration.seconds, duration.microseconds)
print(result)
| {
"repo_name": "acic2015/findr",
"path": "deprecated/Fits_Preprocessing/parallel_extract_and_sort.py",
"copies": "1",
"size": "2933",
"license": "mit",
"hash": -8277711280187321000,
"line_mean": 49.5689655172,
"line_max": 143,
"alpha_frac": 0.6815547221,
"autogenerated": false,
"ratio": 3.5811965811965814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4762751303296581,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Kapellusch'
import astropy.io.fits as fits,os,csv,json,sys #necessary imports. Note: this is written in python 2.
def main(path):
if not path: #set default path in the case of no passed param
path = "sample_fits/"
return(build_json(create_metadata_and_sort(path))) #call all functions below, passing each function's return to the next
def create_metadata_and_sort(path):
lst = [fit for fit in os.listdir(path) if fit.endswith(".fits")] #get files in dir if they are .fits
total_dic = {}
with fits.open((path+lst[0])) as fits_file:
items = list(set([str(header_field) for header_field in fits_file[0].header.keys()]+["FILENAME"])) #get fieldnames from first fits file
with open('metadata.tsv',"wb") as csvfile: #create a file called metadata.tsv for the output
writer = csv.DictWriter(csvfile,fieldnames=items,delimiter= "\t") #set up the writer, header fields, and delimiter
writer.writeheader() # write the headers to the file
for i in lst: # iterate through the list of fits files
hdulist = fits.open(path+i) # open each fits file in the list
header = hdulist[0].header #get all the metadata from the fits file hdulist
hdulist.close()
header["FILENAME"] = i
temp = str(str(header["COMMENT"]).encode('ascii', 'ignore')) #encode in ascii as unicode doesn't play nice
header = {key: value for key, value in header.items() #remove double comment field
if key is not "COMMENT"}
header["COMMENT"] = temp.replace("\n"," ") #put comments back in
total_dic[header["FILENAME"]] = header #add fits image metadata to dictionary
writer.writerow({k:str(header[k]) for k in items}) #write metadata to tsv
return(total_dic)
def build_json(total_dic):
sorted_dic = {"SCIENCE":{},"DARK":{}}
for image in total_dic: #sort dictionary by darks and science
if total_dic[image]["VIMTYPE"] == "SCIENCE":
sorted_dic["SCIENCE"][image] = total_dic[image]
elif total_dic[image]["VIMTYPE"] == "DARK":
sorted_dic["DARK"][image] = total_dic[image]
else:
print("was nothing somehow")
with open("metadata.json",'w') as jsonfile: #builds json file of metadata not sorted by VIMTYPE
json.dump(total_dic,jsonfile, separators=(',',':'),indent=4)
print("DONE!")
return({key:sorted_dic[key].keys() for (key) in sorted_dic})
if __name__ =="__main__":
main(sys.argv[1:])
| {
"repo_name": "acic2015/findr",
"path": "deprecated/Fits_Preprocessing/metadata_extract.py",
"copies": "1",
"size": "2553",
"license": "mit",
"hash": 6069847970951185000,
"line_mean": 52.1875,
"line_max": 143,
"alpha_frac": 0.6365060713,
"autogenerated": false,
"ratio": 3.641940085592011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4778446156892011,
"avg_score": null,
"num_lines": null
} |
__author__ = 'danielkershaw'
import datetime, os, sys, re, time
from rdflib import ConjunctiveGraph, Namespace, Literal
from rdflib.store import NO_STORE, VALID_STORE
import pandas
from tempfile import mktemp
try:
import imdb
except ImportError:
imdb = None
from rdflib import BNode, Graph, URIRef, Literal, Namespace, RDF
from rdflib.namespace import FOAF, DC
from rdflib.namespace import XSD
storefn = os.path.dirname(os.path.realpath(__file__)) + '/Output/Fire2.rdf'
storen3 = os.path.dirname(os.path.realpath(__file__)) + '/Output/Fire2.ttl'
#storefn = '/home/simon/codes/film.dev/movies.n3'
storeuri = 'file://'+storefn
storeun3 = 'file://'+storen3
title = 'Movies viewed by %s'
r_who = re.compile('^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$')
SPACIAL = Namespace('http://data.ordnancesurvey.co.uk/ontology/spatialrelations/')
POST = Namespace('http://data.ordnancesurvey.co.uk/ontology/postcode/')
ADMINGEO = Namespace('http://data.ordnancesurvey.co.uk/ontology/admingeo/')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')
SCHEME = Namespace('http://schema.org/')
SDMX = Namespace("http://purl.org/linked-data/sdmx#")
SDMXCONCEPT = Namespace("http://purl.org/linked-data/sdmx/2009/concept#")
SDMXDIMENSION = Namespace("http://purl.org/linked-data/sdmx/2009/dimension#")
SDMXATTRIBUTE = Namespace("http://purl.org/linked-data/sdmx/2009/attribute#")
SDMXMEASURE= Namespace("http://purl.org/linked-data/sdmx/2009/measure#")
qb = Namespace("http://purl.org/linked-data/cube#")
INTERVAL = Namespace("http://www.w3.org/2006/time#")
COUNCILTAX = Namespace('http://data.gmdsp.org.uk/data/manchester/council-tax/')
DATEREF = Namespace('http://reference.data.gov.uk/id/day/')
COUNCILBAND = Namespace('http://data.gmdsp.org.uk/def/council/counciltax/council-tax-bands/')
class Store:
def __init__(self):
self.graph = Graph()
rt = self.graph.open(storeuri, create=False)
if rt == None:
# There is no underlying Sleepycat infrastructure, create it
self.graph.open(storeuri, create=True)
else:
assert rt == VALID_STORE, 'The underlying store is corrupt'
self.graph.bind('os', POST)
self.graph.bind('rdfs', RDFS)
self.graph.bind('geo', GEO)
self.graph.bind('vcard', VCARD)
self.graph.bind('scheme', SCHEME)
self.graph.bind('counciltax', COUNCILTAX)
self.graph.bind('qb', qb)
self.graph.bind('admingeo',ADMINGEO)
self.graph.bind('sdmx-attribute', SDMXATTRIBUTE)
self.graph.bind('interval', INTERVAL)
self.graph.bind('day', DATEREF)
self.graph.bind('councilband', COUNCILBAND)
def save(self):
self.graph.serialize(storeuri, format='pretty-xml')
self.graph.serialize(storeun3, format='n3')
def new_postcode(self, postcode):
pc = COUNCILTAX
def refArea(self):
d = COUNCILTAX["refArea"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference area")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refArea"]))
self.graph.add((d, RDFS["range"], POST["PostcodeArea"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refArea"]))
def refPeriod(self):
d = COUNCILTAX["refPeriod"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference period")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refPeriod"]))
self.graph.add((d, RDFS["range"], INTERVAL["Interval"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refPeriod"]))
def refBand(self):
d = COUNCILTAX["refBand"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference band")))
self.graph.add((d, RDFS["domain"], URIRef("http://data.gmdsp.org.uk/def/council/counciltax/councilTaxBand")))
def countDef(self):
d = COUNCILTAX["countDef"]
self.graph.add((d, RDF.type, RDF["Property"]))
self.graph.add((d, RDF.type, qb["MeasureProperty"]))
self.graph.add((d, RDFS["label"], Literal("Council tax band count")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXMEASURE["obsValue"]))
self.graph.add((d, RDFS["range"], XSD.decimal))
def new_DSD(self):
dsd = COUNCILTAX["DSD"]
self.graph.add((dsd, RDF.type, qb["DataStructureDefinition"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refArea"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refPeriod"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refBand"]))
self.graph.add((dsd, qb["measure"], COUNCILTAX["countDef"]))
def new_dataset(self):
ds = COUNCILTAX["dataset-le1"]
self.graph.add((ds, RDF.type, qb["DataSet"]))
self.graph.add((ds, RDFS["label"], Literal("Tax Banding")))
self.graph.add((ds, RDFS["comment"], Literal("xxxxx")))
self.graph.add((ds, qb["structure"], COUNCILTAX['data']))
def new_observation(self, HSC, LSOA_CODE, date, count):
observation = COUNCILTAX[LSOA_CODE.replace(" ", "-").lower()+HSC.replace(" ", "-").lower()]
self.graph.add((observation, RDF.type, qb['Observation']))
self.graph.add((observation, qb["dataSet"], URIRef('http://data.gmdsp.org.uk/data/manchester/council-tax')))
self.graph.add((observation, COUNCILTAX['refArea'], URIRef("http://data.ordnancesurvey.co.uk/id/postcodeunit/"+LSOA_CODE.replace(" ",""))))
self.graph.add((observation, COUNCILTAX['countDef'], Literal(count, datatype=XSD.integer)))
#refrence this to the list in the data set which Ian is making.
self.graph.add((observation, COUNCILTAX['refBand'], COUNCILBAND[LSOA_CODE]))
self.graph.add((observation, COUNCILTAX['refPeriod'], DATEREF[time.strftime('%Y-%m-%d',date)]))
def keyfn(x):
return x['Postcode']
def keyfnp(x):
return x['Band']
def main(argv=None):
s = Store()
s.refPeriod()
s.refArea()
s.refBand()
s.countDef()
s.new_dataset()
#s.new_DSD()
count = 0
a = pandas.DataFrame.from_csv('./Data/HSCDatabyWard.csv')
print a
for i in a.index.tolist():
for j in list(a.columns.values):
print "--------"
print j
if isNaN(i) == False:
print "HERE"
print(i, j, a.ix[i,j])
try:
print time.strptime(j.split()[2].split("/")[0], "%Y")
s.new_observation(j.split()[0]+" "+j.split()[1] , i, time.strptime(j.split()[2].split("/")[0], "%Y"), a.ix[i,j])
except:
print "Unexpected error:", sys.exc_info()[0]
#reader = csv.DictReader(open('./Data/HSCDatabyLSOA.csv', mode='rU'))
#for r in reader:
#
# s.new_observation(b, k, time.strptime("01/01/0001", "%d/%m/%Y"), len(n))
# count = count + 1
print "-- Saving --"
s.save()
def isNaN(num):
return num!= num
if __name__ == '__main__':
main()
| {
"repo_name": "GMDSP-Linked-Data/RDF-work-in-progress",
"path": "Fire/FireStatsDataCube2.py",
"copies": "1",
"size": "7444",
"license": "mit",
"hash": -2440442763558187000,
"line_mean": 39.6775956284,
"line_max": 147,
"alpha_frac": 0.6180816765,
"autogenerated": false,
"ratio": 3.034651447207501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9101478444917137,
"avg_score": 0.010250935758072897,
"num_lines": 183
} |
__author__ = 'danielkershaw'
import datetime, os, sys, re, time
from rdflib import ConjunctiveGraph, Namespace, Literal
from rdflib.store import NO_STORE, VALID_STORE
from tempfile import mktemp
try:
import imdb
except ImportError:
imdb = None
from rdflib import BNode, Graph, URIRef, Literal, Namespace, RDF
from rdflib.namespace import FOAF, DC
from rdflib.namespace import XSD
from itertools import groupby
import csv
import pprint
import utm
from bs4 import BeautifulSoup
storefn = os.path.dirname(os.path.realpath(__file__)) + '/Output/councilTax.rdf'
storen3 = os.path.dirname(os.path.realpath(__file__)) + '/Output/councilTax.ttl'
#storefn = '/home/simon/codes/film.dev/movies.n3'
storeuri = 'file://'+storefn
storeun3 = 'file://'+storen3
title = 'Movies viewed by %s'
r_who = re.compile('^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$')
SPACIAL = Namespace('http://data.ordnancesurvey.co.uk/ontology/spatialrelations/')
POST = Namespace('http://data.ordnancesurvey.co.uk/ontology/postcode/')
ADMINGEO = Namespace('http://data.ordnancesurvey.co.uk/ontology/admingeo/')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')
SCHEME = Namespace('http://schema.org/')
SDMX = Namespace("http://purl.org/linked-data/sdmx#")
SDMXCONCEPT = Namespace("http://purl.org/linked-data/sdmx/2009/concept#")
SDMXDIMENSION = Namespace("http://purl.org/linked-data/sdmx/2009/dimension#")
SDMXATTRIBUTE = Namespace("http://purl.org/linked-data/sdmx/2009/attribute#")
SDMXMEASURE= Namespace("http://purl.org/linked-data/sdmx/2009/measure#")
qb = Namespace("http://purl.org/linked-data/cube#")
INTERVAL = Namespace("http://www.w3.org/2006/time#")
COUNCILTAX = Namespace('http://data.gmdsp.org.uk/data/manchester/council-tax/')
DATEREF = Namespace('http://reference.data.gov.uk/id/day/')
COUNCILBAND = Namespace('http://data.gmdsp.org.uk/def/council/counciltax/council-tax-bands/')
class Store:
def __init__(self):
self.graph = Graph()
rt = self.graph.open(storeuri, create=False)
if rt == None:
# There is no underlying Sleepycat infrastructure, create it
self.graph.open(storeuri, create=True)
else:
assert rt == VALID_STORE, 'The underlying store is corrupt'
self.graph.bind('os', POST)
self.graph.bind('rdfs', RDFS)
self.graph.bind('geo', GEO)
self.graph.bind('vcard', VCARD)
self.graph.bind('scheme', SCHEME)
self.graph.bind('counciltax', COUNCILTAX)
self.graph.bind('qb', qb)
self.graph.bind('admingeo',ADMINGEO)
self.graph.bind('sdmx-attribute', SDMXATTRIBUTE)
self.graph.bind('interval', INTERVAL)
self.graph.bind('day', DATEREF)
self.graph.bind('councilband', COUNCILBAND)
def save(self):
#self.graph.serialize(storeuri, format='pretty-xml')
self.graph.serialize(storeun3, format='n3')
def new_postcode(self, postcode):
pc = COUNCILTAX
def refArea(self):
d = COUNCILTAX["refArea"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference area")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refArea"]))
self.graph.add((d, RDFS["range"], POST["PostcodeArea"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refArea"]))
def refPeriod(self):
d = COUNCILTAX["refPeriod"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference period")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refPeriod"]))
self.graph.add((d, RDFS["range"], INTERVAL["Interval"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refPeriod"]))
def refBand(self):
d = COUNCILTAX["refBand"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference band")))
self.graph.add((d, RDFS["domain"], URIRef("http://data.gmdsp.org.uk/def/council/counciltax/councilTaxBand")))
def countDef(self):
d = COUNCILTAX["countDef"]
self.graph.add((d, RDF.type, RDF["Property"]))
self.graph.add((d, RDF.type, qb["MeasureProperty"]))
self.graph.add((d, RDFS["label"], Literal("Council tax band count")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXMEASURE["obsValue"]))
self.graph.add((d, RDFS["range"], XSD.decimal))
def new_DSD(self):
dsd = COUNCILTAX["DSD"]
self.graph.add((dsd, RDF.type, qb["DataStructureDefinition"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refArea"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refPeriod"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refBand"]))
self.graph.add((dsd, qb["measure"], COUNCILTAX["countDef"]))
def new_dataset(self):
ds = COUNCILTAX["dataset-le1"]
self.graph.add((ds, RDF.type, qb["DataSet"]))
self.graph.add((ds, RDFS["label"], Literal("Tax Banding")))
self.graph.add((ds, RDFS["comment"], Literal("xxxxx")))
self.graph.add((ds, qb["structure"], COUNCILTAX['data']))
def new_observation(self, band, postcode, date, count):
observation = COUNCILTAX[postcode.replace(" ", "-").lower()+band.replace(" ", "-").lower()]
self.graph.add((observation, RDF.type, qb['Observation']))
self.graph.add((observation, qb["dataSet"], URIRef('http://data.gmdsp.org.uk/data/manchester/council-tax')))
self.graph.add((observation, COUNCILTAX['refArea'], URIRef("http://data.ordnancesurvey.co.uk/id/postcodeunit/"+postcode.replace(" ",""))))
self.graph.add((observation, COUNCILTAX['countDef'], Literal(count, datatype=XSD.integer)))
#refrence this to the list in the data set which Ian is making.
self.graph.add((observation, COUNCILTAX['refBand'], COUNCILBAND[band]))
self.graph.add((observation, COUNCILTAX['refPeriod'], DATEREF[time.strftime('%Y-%m-%d',date)]))
def keyfn(x):
return x['Postcode']
def keyfnp(x):
return x['Band']
def main(argv=None):
s = Store()
s.refPeriod()
s.refArea()
s.refBand()
s.countDef()
s.new_dataset()
#s.new_DSD()
count = 0
reader = csv.DictReader(open('./Data/Ctax Extract.csv', mode='rU'))
for k,g in [(k, list(g)) for k,g in groupby(sorted(reader, key=keyfn), keyfn)]:
for b,n in [(kq, list(go)) for kq,go in groupby(sorted(g, key=keyfnp), keyfnp)]:
if count <= 1000000:
s.new_observation(b, k, time.strptime("01/01/0001", "%d/%m/%Y"), len(n))
count = count + 1
print "-- Saving --"
s.save()
if __name__ == '__main__':
main()
| {
"repo_name": "GMDSP-Linked-Data/RDF-work-in-progress",
"path": "manchester/DataToRDF/CouncilTaxData.py",
"copies": "1",
"size": "7048",
"license": "mit",
"hash": -3227427799243605000,
"line_mean": 40.4588235294,
"line_max": 146,
"alpha_frac": 0.6374858116,
"autogenerated": false,
"ratio": 3.024892703862661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4162378515462661,
"avg_score": null,
"num_lines": null
} |
__author__ = 'daniel.kirov'
import re
try:
from urlparse import urlparse
except ImportError:
# Python 3 location
from urllib.parse import urlparse
from django.utils import six
from django.template import TemplateSyntaxError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django import template
try:
from django.utils.safestring import mark_safe
except ImportError:
mark_safe = lambda s: s
import imgix
register = template.Library()
WH_PATTERN = re.compile(r'(\d+)x(\d+)$')
FM_PATTERN = re.compile(r'([^\?]+)')
FM_MATCHES = {
'jpg': 'jpg',
'jpeg': 'jpg',
'png': 'png',
'gif': 'gif',
'jp2': 'jp2',
'jxr': 'jxr',
'webp': 'webp',
}
def get_settings_variables():
try:
use_https = settings.IMGIX_HTTPS
except AttributeError:
use_https = True
try:
sign_key = settings.IMGIX_SIGN_KEY
except AttributeError:
sign_key = None
try:
shard_strategy = settings.IMGIX_SHARD_STRATEGY
except AttributeError:
shard_strategy = None
try:
aliases = settings.IMGIX_ALIASES
except AttributeError:
aliases = None
try:
format_detect = settings.IMGIX_DETECT_FORMAT
except AttributeError:
format_detect = False
try:
web_proxy = settings.IMGIX_WEB_PROXY_SOURCE
except AttributeError:
web_proxy = False
return shard_strategy, sign_key, use_https, aliases, format_detect, web_proxy
def get_kwargs(alias, aliases, kwargs):
# Check if we are using an alias or inline arguments
if not alias:
return kwargs
elif not aliases:
raise ImproperlyConfigured(
"No aliases set. Please set IMGIX_ALIASES in settings.py"
)
elif alias not in aliases:
raise ImproperlyConfigured(
"Alias {0} not found in IMGIX_ALIASES".format(alias)
)
else:
return aliases[alias]
def get_fm(image_url):
image_end = image_url.split('.')[-1]
m = FM_PATTERN.match(image_end)
if m:
fm = m.group(1)
try:
format = FM_MATCHES[fm]
return format
except:
return False
else:
return False
"""
Template tag for returning an image from imgix.
This template tag takes the following arguments:
1. image_url -- the image URL that we will pass onto Imgix
2. any number of optional arguments, which Imgix can accept.
For reference - https://www.imgix.com/docs/reference
You must also put IMGIX_DOMAINS in your settings.py file.
Thix can be a single domain, e.g.:
IMGIX_DOMAINS = 'test.imgix.net'
or a list of domains, if you have sharding enabled in your Imgix account, e.g.:
IMGIX_DOMAINS = [
'test-1.imgix.net',
'test-2.imgix.net',
'test-3.imgix.net',
]
If you do indeed use sharding, you can choose a sharding strategy by setting
IMGIX_SHARD_STRATEGY in your settings.py file.
If you want to disable HTTPS support, put IMGIX_HTTPS = False in settings.py.
This template tag returns a string that represents the Imgix URL for the image.
"""
@register.simple_tag
def get_imgix(image_url, alias=None, wh=None, **kwargs):
try:
domains = settings.IMGIX_DOMAINS
except:
raise ImproperlyConfigured(
"IMGIX_DOMAINS not set in settings.py"
)
### Build arguments
args = {}
# Get arguments from settings
shard_strategy, sign_key, use_https, aliases,\
format_detect, web_proxy = get_settings_variables()
args['use_https'] = use_https
if sign_key:
args['sign_key'] = sign_key
if shard_strategy:
args['shard_strategy'] = shard_strategy
# Imgix by default appends ?ixlib=python-<version_number> to the end
# of the URL, but we don't want that.
args['sign_with_library_version'] = False
# Get builder instance
builder = imgix.UrlBuilder(
domains,
**args
)
# Has the wh argument been passed? If yes,
# set w and h arguments accordingly
if wh:
size = wh
if isinstance(size, six.string_types):
m = WH_PATTERN.match(size)
if m:
w = int(m.group(1))
h = int(m.group(2))
if w > 0:
kwargs['w'] = int(m.group(1))
if h > 0:
kwargs['h'] = int(m.group(2))
else:
raise TemplateSyntaxError(
"%r is not a valid size." % size
)
# Is format detection on? If yes, use the appropriate image format.
arguments = get_kwargs(alias, aliases, kwargs)
if format_detect and 'fm' not in arguments:
fm = get_fm(image_url)
if fm:
arguments['fm'] = fm
# Take only the relative path of the URL if the source is not a Web Proxy Source
if not web_proxy:
image_url = urlparse(image_url).path
# Build the Imgix URL
url = builder.create_url(image_url, arguments)
return mark_safe(url)
| {
"repo_name": "pancentric/django-imgix",
"path": "django_imgix/templatetags/imgix_tags.py",
"copies": "1",
"size": "5095",
"license": "isc",
"hash": 8836277402736675000,
"line_mean": 24.9948979592,
"line_max": 84,
"alpha_frac": 0.6123650638,
"autogenerated": false,
"ratio": 3.7573746312684366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48697396950684363,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Lindsley'
__license__ = 'New BSD'
__version__ = (1, 0, 0)
class VersionError(Exception):
pass
def load_version(file_path):
"""
Reads in the version file & pulls out the version info.
Requires a ``file_path`` argument, which should be the path to the file.
Example::
>>> import rose
>>> rose.load_version('VERSION')
'1.0.0-final'
>>> rose.load_version(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'VERSION'))
'1.0.0-final'
"""
return open(file_path, 'r').readline().strip()
def build_version(package_name, version_string):
"""
Given a package name & version, return a tuple of the semver version bits.
Example::
>>> import rose
>>> rose.build_version('rose', '1.0.0-final')
(1, 0, 0, 'final')
"""
version_bits = version_string.split('-')
if len(version_bits) > 2:
raise VersionError("%s releases must be in '<major>.<minor>.<patch>[-<release>]' format. Saw: %s" % (package_name, version_bits))
major_minor_patch = version_bits.pop(0).split('.')
if len(major_minor_patch) != 3:
raise VersionError("%s releases must be in '<major>.<minor>.<patch>[-<release>]' format. Saw: %s" % (package_name, major_minor_patch))
major_minor_patch = [int(bit) for bit in major_minor_patch]
if version_bits:
major_minor_patch.append(version_bits[0])
return tuple(major_minor_patch)
| {
"repo_name": "toastdriven/rose",
"path": "rose.py",
"copies": "1",
"size": "1474",
"license": "bsd-3-clause",
"hash": 3286558683787704300,
"line_mean": 26.2962962963,
"line_max": 142,
"alpha_frac": 0.6031207598,
"autogenerated": false,
"ratio": 3.31981981981982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9420686839443374,
"avg_score": 0.00045074803528909814,
"num_lines": 54
} |
__author__ = 'daniel michels'
import json
import sys
import random
from enum import Enum
from threading import Timer
from gmusicapi import Mobileclient, Webclient
from mplayer import Player
from twisted.internet import reactor
from twisted.web import static, server
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampServerProtocol, exportRpc
PLAYLIST_EVENT_TRACK_ADDED = 'musicplayer/playlist/events/track_added_to_playlist'
PLAYLIST_EVENT_TRACK_REMOVED = 'musicplayer/playlist/events/track_removed_from_playlist'
PLAYLIST_EVENT_PLAYTYPE_CHANGED = 'musicplayer/playlist/events/playtype_changed'
TRACK_EVENT_PLAYBACK = 'musicplayer/events/playback'
class PlayType(Enum):
""" Describes the order in which the Playlist returns the tracks to play.
"""
LINEAR = 1 # Linear order
SHUFFLE = 2 # Shuffle
class MusicPlayer(object):
def __init__(self):
self.playlist = [] # Array of all tracks
self.playlist_id = 0 # Id of playlist
self.current_track_index = 0 # Index of current song
self.player = Player() # MPlayer instance
self.webclient = Webclient() # Client for WebInterface
self.mobileclient = Mobileclient() # Client for MobileInterface
self.timer = None # Timer to start next track
self.deviceid = 0 # DeviceId to use
self.playtype = PlayType.LINEAR # LINEAR or SHUFFLE
def login(self, username, password):
""" Login to Google Music.
Keyword arguments:
username -- the username
password -- the password
Returns:
True if successful else False
"""
# If either the web client or the mobile client failed to login return False
if not self.webclient.login(username, password) or not self.mobileclient.login(username, password):
return False
# Use first found devices as ID
devices = self.webclient.get_registered_devices();
# Convert HEX to INT
self.deviceid = int(devices[0]['id'], 16)
return True
def load_playlist(self, playlist_name):
# Load playlist
for playlist in self.mobileclient.get_all_user_playlist_contents():
if playlist['name'] == playlist_name:
for track_obj in playlist['tracks']:
track_obj['track']['id'] = track_obj['id']
self.playlist.append(track_obj['track'])
# Set playlist_id
self.playlist_id = playlist['id']
break;
# If playlist has not been found, create it
if self.playlist_id == 0:
self.playlist_id = self.mobileclient.create_playlist(playlist_name)
def add_track_to_playlist(self, track):
""" Append a track to the end of playlist
Keyword arguments:
track -- a dictionary containing the track informations
"""
track_id = self.mobileclient.add_songs_to_playlist(self.playlist_id, track['nid'])[0]
track['id'] = track_id
self.playlist.append(track)
# Notify all clients about the new track
factory.forwarder.dispatch(PLAYLIST_EVENT_TRACK_ADDED, json.dumps(track))
def remove_track_from_playlist(self, track_id):
""" Removes a track from the playlist
Keyword arguments:
track_id -- The id of the track to remove
"""
self.mobileclient.remove_entries_from_playlist(track_id)
index_to_remove = self._find_index_of_track_id(track_id)
del self.playlist[index_to_remove]
factory.forwarder.dispatch(PLAYLIST_EVENT_TRACK_REMOVED, track_id)
def play_track(self, track_id):
""" Play a track
Keyword arguments:
track_id -- Id of the track to play
"""
index_of_track = self._find_index_of_track_id(track_id)
track_to_play = self.playlist[index_of_track]
if track_to_play is not None:
# Request stream url from google music
stream_url = self.mobileclient.get_stream_url(track_to_play["storeId"], self.deviceid)
# Load stream url to mplayer
self.player.loadfile(stream_url)
# For some reason OSX needs to unpause mplayer
if sys.platform == "darwin":
self.player.pause()
# Set track
self.current_track_index = index_of_track
# Cancel previous timer
if self.timer is not None:
self.timer.cancel()
# How many minutes does the track last
track_duration = long(track_to_play["durationMillis"]) / 1000
# Set Timer to play next track when trackDuration is over
self.timer = Timer(track_duration, self.play_next_track)
self.timer.daemon = True
self.timer.start()
print "playing", track_to_play["artist"], " - ", track_to_play["title"], " : ", stream_url
# Fire event that a new track is playing
factory.forwarder.dispatch(TRACK_EVENT_PLAYBACK, json.dumps(track_to_play))
return True
else:
return False
def play_next_track(self):
""" Play the next track in the playlist.
Returns:
True or False
"""
if self.playtype == PlayType.LINEAR:
# Index of next track to play
next_track_index = self.current_track_index + 1
# Restart at index 0 if end of playlist is reached
if next_track_index >= len(self.playlist):
next_track_index = 0
elif self.playtype == PlayType.SHUFFLE:
# Index of next track to play at random
next_track_index = random.randrange(0, len(self.playlist), 1)
# Obtain the id of the next track to play
next_track_id = self.playlist[next_track_index]['id']
# Play track with that id
return self.play_track(next_track_id)
def play_previous_track(self):
""" Play the previous track in the playlist.
Returns:
True or False
"""
if self.playtype == PlayType.LINEAR:
# Index of previous track to play
previous_track_index = self.current_track_index - 1
# Contiune from the end of the playlist
if previous_track_index <= 0:
previous_track_index = len(self.playlist) - 1
elif self.playtype == PlayType.SHUFFLE:
# Index of the previous track is random
previous_track_index = random.randrange(0, len(self.playlist), 1)
# Obtain the id of the previous track to play
previous_track_id = self.playlist[previous_track_index]['id']
# Play track with that id
return self.play_track(previous_track_id)
def stop(self):
""" Stop playback.
"""
if self.timer is not None:
self.timer.cancel()
if self.player is not None:
self.player.stop()
def play(self):
""" Start playing current track
Returns:
True if track has been started. Else False
"""
current_track_id = self.playlist[self.current_track_index]
return self.play_track(current_track_id)
def _find_index_of_track_id(self, track_id):
index = 0
for track in self.playlist:
if track['id'] == track_id:
return index
index += 1
return None
class RpcServerProtocol(WampServerProtocol):
@exportRpc
def search(self, query):
result = musicplayer.mobileclient.search_all_access(query, 20)
return json.dumps(result['song_hits'])
@exportRpc
def play(self, track_id):
result = dict()
result['status'] = musicplayer.play_track(track_id)
return json.dumps(result)
@exportRpc
def get_playlist(self):
return json.dumps(musicplayer.playlist)
@exportRpc
def play_next_track(self):
result = dict()
result['status'] = musicplayer.play_next_track()
return json.dumps(result)
@exportRpc
def play_previous_track(self):
result = dict()
result['status'] = musicplayer.play_previous_track()
return json.dumps(result)
@exportRpc
def stop(self):
result = dict()
result['status'] = True
# Actually stop player
musicplayer.stop()
return json.dumps(result)
@exportRpc
def startPlaying(self):
result = dict()
result['status'] = musicplayer.play()
return json.dumps(result)
@exportRpc
def get_status(self):
status = dict()
try:
current_track_id = musicplayer.playlist.get_current_track_id()
current_track = musicplayer.playlist.get_track(current_track_id)
status['currentTrack'] = current_track
except:
pass
status['playtype'] = musicplayer.playtype
return json.dumps(status)
@exportRpc
def add_to_playlist(self, track_json):
# Convert Json to dictionary
track = json.loads(track_json)
# Append track to playlist
musicplayer.add_track_to_playlist(track)
@exportRpc
def remove_from_playlist(self, track_id):
return musicplayer.remove_track_from_playlist(track_id)
@exportRpc
def set_playtype(self, playtype):
musicplayer.playtype = playtype
self.dispatch(PLAYLIST_EVENT_PLAYTYPE_CHANGED, playtype)
def onSessionOpen(self):
self.registerForPubSub(PLAYLIST_EVENT_TRACK_ADDED)
self.registerForPubSub(PLAYLIST_EVENT_TRACK_REMOVED)
self.registerForPubSub(PLAYLIST_EVENT_PLAYTYPE_CHANGED)
self.registerForPubSub(TRACK_EVENT_PLAYBACK)
self.registerForRpc(self, "musicplayer/music#")
factory.forwarder = self
musicplayer = MusicPlayer()
if __name__ == '__main__':
if len(sys.argv) <= 3:
print "Usage: Musicplayer.py <username> <password> <playlist_name>"
exit()
username = sys.argv[1]
password = sys.argv[2]
playlist_name = sys.argv[3]
if musicplayer.login(username, password):
musicplayer.load_playlist(playlist_name)
factory = WampServerFactory("ws://localhost:9000")
factory.protocol = RpcServerProtocol
listenWS(factory)
root = static.File("web/")
site = server.Site(root)
reactor.listenTCP(8080, site)
reactor.run()
else:
print "login failed"
| {
"repo_name": "dmichels/gmusicplayer",
"path": "MusicPlayer.py",
"copies": "1",
"size": "10662",
"license": "apache-2.0",
"hash": 4591688780348879400,
"line_mean": 28.5346260388,
"line_max": 107,
"alpha_frac": 0.6078596886,
"autogenerated": false,
"ratio": 4.015819209039548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006470581981868259,
"num_lines": 361
} |
__author__ = 'Daniel'
from glue.models import *
from swampdragon.serializers.model_serializer import ModelSerializer
class UserSerializer(ModelSerializer):
class Meta:
model = User
publish_fields = ('id', 'username')
class LocationSerializer(ModelSerializer):
class Meta:
model = 'glue.Location'
class PlayerConfigSerializer(ModelSerializer):
class Meta:
model = 'glue.PlayerConfig'
class PlayerSerializer(ModelSerializer):
class Meta:
model = 'glue.Player'
user = UserSerializer
class GameRoomSerializer(ModelSerializer):
class Meta:
model = 'glue.GameRoom'
users = PlayerSerializer
class MobTypeSerializer(ModelSerializer):
class Meta:
model = 'glue.MobType'
class MobSerializer(ModelSerializer):
class Meta:
model = 'glue.Mob'
class StageSerializer(ModelSerializer):
class Meta:
model = 'glue.Stage'
class CurrentStageSerializer(ModelSerializer):
class Meta:
model = 'glue.CurrentStage'
| {
"repo_name": "Valchris/tdoa",
"path": "glue/serializers.py",
"copies": "1",
"size": "1036",
"license": "mit",
"hash": 4544663005302629000,
"line_mean": 17.8363636364,
"line_max": 68,
"alpha_frac": 0.6891891892,
"autogenerated": false,
"ratio": 4.046875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5236064189199999,
"avg_score": null,
"num_lines": null
} |
__author__ = 'daniel'
from PyQt4 import QtGui, QtCore
class PlotSettingsDialog(QtGui.QDialog):
def __init__(self, plot_settings=None, parent=None):
super(PlotSettingsDialog, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.plot_settings = plot_settings
form_layout = QtGui.QFormLayout()
# plot settings
plot_settings_box = QtGui.QGroupBox(title="Plot Settings")
plot_settings_box_layout = QtGui.QVBoxLayout()
plot_settings_box.setLayout(plot_settings_box_layout)
plot_settings_box_form_layout = QtGui.QFormLayout()
plot_settings_box_layout.addLayout(plot_settings_box_form_layout)
self.plot_settings_box_x_axis_label = QtGui.QLineEdit(text = self.plot_settings.x_axis_label)
form_layout.addRow("X-Axis Label", self.plot_settings_box_x_axis_label)
self.plot_settings_box_y_axis_label = QtGui.QLineEdit(text = self.plot_settings.y_axis_label)
form_layout.addRow("Y-Axis Label", self.plot_settings_box_y_axis_label)
self.plot_settings_box_legend = QtGui.QCheckBox()
if self.plot_settings.legend:
self.plot_settings_box_legend.setChecked(True)
else:
self.plot_settings_box_legend.setChecked(False)
self.x_minimum = QtGui.QLineEdit(text = self.plot_settings.x_minimum)
form_layout.addRow("X-Axis Minimum", self.x_minimum)
self.x_maximum = QtGui.QLineEdit(text = self.plot_settings.x_maximum)
form_layout.addRow("X-Axis Maximum", self.x_maximum)
self.x_scaling = QtGui.QLineEdit(text = str(self.plot_settings.x_scaling))
form_layout.addRow("X-Axis Scaling Factor", self.x_scaling)
self.y_minimum = QtGui.QLineEdit(text = self.plot_settings.y_minimum)
form_layout.addRow("Y-Axis Minimum", self.y_minimum)
self.y_maximum = QtGui.QLineEdit(text = self.plot_settings.y_maximum)
form_layout.addRow("Y-Axis Maximum", self.y_maximum)
form_layout.addRow("Legend", self.plot_settings_box_legend)
self.layout.addLayout(form_layout)
button_box = QtGui.QDialogButtonBox()
button_box.addButton("OK", QtGui.QDialogButtonBox.AcceptRole)
button_box.addButton("Cancel", QtGui.QDialogButtonBox.RejectRole)
button_box.rejected.connect(self.cancel)
button_box.accepted.connect(self.apply)
self.layout.addWidget(button_box)
def apply(self):
self.plot_settings.x_axis_label = self.plot_settings_box_x_axis_label.text()
self.plot_settings.y_axis_label = self.plot_settings_box_y_axis_label.text()
self.plot_settings.x_minimum = float(self.x_minimum.text()) if self.x_minimum.text()!= "" else None
self.plot_settings.x_maximum = float(self.x_maximum.text()) if self.x_maximum.text()!= "" else None
self.plot_settings.y_minimum = float(self.y_minimum.text()) if self.y_minimum.text()!= "" else None
self.plot_settings.y_maximum = float(self.y_maximum.text()) if self.y_maximum.text()!= "" else None
self.plot_settings.x_scaling = float(self.x_scaling.text()) if self.x_scaling.text()!= "" else None
self.plot_settings.legend = self.plot_settings_box_legend.isChecked()
self.accept()
def cancel(self):
self.reject()
| {
"repo_name": "dmayer/time_trial",
"path": "time_trial_gui/gui/plot_settings_dialog.py",
"copies": "1",
"size": "3356",
"license": "mit",
"hash": 2132437134142994700,
"line_mean": 41.4810126582,
"line_max": 107,
"alpha_frac": 0.6698450536,
"autogenerated": false,
"ratio": 3.414038657171923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9553719125721459,
"avg_score": 0.0060329170100927206,
"num_lines": 79
} |
__author__ = 'Daniel'
from PySide.QtCore import *
from PySide.QtGui import *
from urllib.request import urlopen
import sys
class PenPropertiesDlg(QDialog):
def __init__(self, parent=None):
super(PenPropertiesDlg, self).__init__(parent)
widthLabel = QLabel("&Width:")
self.widthSpinBox = QSpinBox()
widthLabel.setBuddy(self.widthSpinBox)
self.widthSpinBox.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.widthSpinBox.setRange(0, 24)
self.beveledCheckBox = QCheckBox("&Beveled edges")
styleLabel = QLabel("&Style:")
self.styleComboBox = QComboBox()
# Set buddy so that ampersands becomes keyboard shortcut
styleLabel.setBuddy(self.styleComboBox)
self.styleComboBox.addItems(["Solid", "Dashed", "Dotted", "DashDotted", "DashDotDotted"])
okButton = QPushButton("&OK")
cancelButton = QPushButton("Cancel")
# To give a native feel, use the following
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
# Set default behaviour when keyboard has focus
# But we lose the ability to setbuddy.
buttonBox.button(QDialogButtonBox.Ok).setDefault(True)
self.connect(buttonBox, SIGNAL("accepted()"), self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"), self, SLOT("reject()"))
buttonLayout = QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout = QGridLayout()
layout.addWidget(widthLabel, 0, 0)
layout.addWidget(self.widthSpinBox, 0, 1)
layout.addWidget(self.beveledCheckBox, 0, 2)
layout.addWidget(styleLabel, 1, 0)
layout.addWidget(self.styleComboBox, 1, 1, 1, 2)
layout.addLayout(buttonLayout, 2, 0, 1, 3)
layout.addWidget(buttonBox, 3, 0, 1, 3)
self.setLayout(layout)
self.connect(okButton, SIGNAL("clicked()"),self, SLOT("accept()"))
self.connect(cancelButton, SIGNAL("clicked()"),self, SLOT("reject()"))
self.setWindowTitle("Pen Properties")
app = QApplication(sys.argv)
form = PenPropertiesDlg()
form.show()
app.exec_()
# Example usage
def setPenProperties(self):
dialog = PenPropertiesDlg(self)
dialog.widthSpinBox.setValue(self.width)
dialog.beveledCheckBox.setChecked(self.beveled)
dialog.styleComboBox.setCurrentIndex(
dialog.styleComboBox.findText(self.style))
if dialog.exec_():
self.width = dialog.widthSpinBox.value()
self.beveled = dialog.beveledCheckBox.isChecked()
self.style = (dialog.styleComboBox.currentText())
self.updateData() | {
"repo_name": "daniellowtw/Learning",
"path": "Python GUI and QT/Introduction/pen_dialog.py",
"copies": "1",
"size": "2707",
"license": "cc0-1.0",
"hash": 6646260524867751000,
"line_mean": 38.8235294118,
"line_max": 97,
"alpha_frac": 0.6727004064,
"autogenerated": false,
"ratio": 3.8397163120567375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012416718456737,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
import simplejson as json
from django.contrib.auth import logout, authenticate, login
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseServerError, HttpResponseForbidden
from rest_framework.renderers import JSONRenderer
from glue.models import *
from glue.serializers import *
from datetime import datetime, timedelta
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def user_data(request):
if request.user.is_authenticated():
return JSONResponse({'username': request.user.username, 'email': request.user.email})
else:
return JSONResponse(False)
def user_signout(request):
logout(request)
return HttpResponse()
def user_signin(request):
if request.method == "POST":
data = json.loads(request.body)
if 'username' in data and 'password' in data:
user = authenticate(username=data['username'], password=data['password'])
if user is not None:
if user.is_active:
login(request, user)
return JSONResponse({'username': user.username, 'email': user.email, 'authorized' : True})
else:
return HttpResponseForbidden()
return HttpResponseForbidden()
def user_register(request):
if request.method == "POST":
data = json.loads(request.body)
user = User.objects.create(username=data['username'], email=data['email'])
user.set_password(data['password'])
# Optional fields
if 'firstName' in data:
user.first_name = data['firstName']
if 'lastName' in data:
user.last_name = data['lastName']
user.save()
return user_signin(request)
return HttpResponseNotAllowed()
| {
"repo_name": "Valchris/AngularJS-Django-Template",
"path": "glue/api.py",
"copies": "1",
"size": "2024",
"license": "mit",
"hash": -7137291288022575000,
"line_mean": 30.625,
"line_max": 110,
"alpha_frac": 0.648715415,
"autogenerated": false,
"ratio": 4.343347639484978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009610426181998824,
"num_lines": 64
} |
__author__ = 'Daniel'
from enum import Enum
class Operation(Enum):
ADDITION = 0
SUBTRACTION = 1
MULTIPLICATION = 2
DIVISION = 3
class Question:
def __hash__(self):
a = len(str(self.op1))
b = len(str(self.op2))
return int(str(2 ** a * 3 ** b) + str(self.op1) + str(self.operation_value) + str(self.op2))
def __eq__(self, other):
a = other.op1 == self.op1
b = other.op2 == self.op2
c = other.operation_value == self.operation_value
return all([a, b, c])
def __init__(self, op1, operation_type_value, op2):
"""
:param op1:int Integer value
:param operation_type_value:int An Operation type value
:param op2:int
:return:
"""
self.op1 = op1
self.op2 = op2
self.operation_value = operation_type_value
if operation_type_value == Operation.DIVISION.value:
op1 *= op2
self.query = self.get_question_string(op1, operation_type_value, op2)
self.answer = eval(self.query)
self.correct_times = []
self.wrong_times = []
def add_correct_time(self, time):
self.correct_times.append(time)
def add_wrong_time(self, time):
self.wrong_times.append(time)
# Print the question
@staticmethod
def get_question_string(op1, op_type_value, op2):
symbol = list("+-*/")
return "%i %s %i" % (op1, symbol[op_type_value], op2)
class AbstractQuestionGenerator:
def __init__(self):
pass
def gen_next_question(self):
raise NotImplementedError
| {
"repo_name": "daniellowtw/MentalMaths",
"path": "Question.py",
"copies": "1",
"size": "1605",
"license": "mit",
"hash": -8934316821493910000,
"line_mean": 25.75,
"line_max": 100,
"alpha_frac": 0.5769470405,
"autogenerated": false,
"ratio": 3.422174840085288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499121880585288,
"avg_score": null,
"num_lines": null
} |
__author__ = 'daniel'
from PyQt4 import QtGui, QtCore
class RacerEditDialog(QtGui.QDialog):
def __init__(self, racer, parent = None, flags = QtCore.Qt.Dialog):
QtGui.QDialog.__init__(self, parent, flags)
self.racer = racer
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
form_layout = QtGui.QFormLayout()
self.layout.addLayout(form_layout)
self.name = QtGui.QLineEdit(text=racer.name)
form_layout.addRow("Name", self.name)
self.hostname = QtGui.QLineEdit(text=racer.hostname)
form_layout.addRow("Hostname",self.hostname)
self.location = QtGui.QLineEdit(text=racer.location)
form_layout.addRow("Location",self.location)
button_layout = QtGui.QHBoxLayout()
save = QtGui.QPushButton(text="Save")
save.released.connect(self.save)
button_layout.addWidget(save)
cancel = QtGui.QPushButton(text="Cancel")
cancel.released.connect(self.reject)
button_layout.addWidget(cancel)
self.layout.addLayout(button_layout)
def save(self):
self.racer.name = self.name.text()
self.racer.hostname = self.hostname.text()
self.racer.location = self.location.text()
self.accept()
| {
"repo_name": "dmayer/time_trial",
"path": "time_trial_gui/gui/racer_edit_dialog.py",
"copies": "1",
"size": "1280",
"license": "mit",
"hash": 5720608004826632000,
"line_mean": 31,
"line_max": 71,
"alpha_frac": 0.6453125,
"autogenerated": false,
"ratio": 3.710144927536232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9715506528275502,
"avg_score": 0.02799017985214609,
"num_lines": 40
} |
__author__ = 'daniel'
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
class TrialDetailsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(TrialDetailsWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Trial Settings")
self.layout.addWidget(self.box)
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.box_layout)
self.type = QtGui.QLabel("")
self.box_layout.addRow("<b>Type</b>", self.type)
self.name = QtGui.QLabel("")
self.box_layout.addRow("<b>Name</b>", self.name)
self.description = QtGui.QLabel("")
self.box_layout.addRow("<b>Description</b>", self.description)
class EchoTrialDetailsWidget(TrialDetailsWidget):
def __init__(self, parent=None):
super(EchoTrialDetailsWidget, self).__init__(parent)
self.delay = QtGui.QLabel("")
self.box_layout.addRow("<b>Delay (ns)</b>", self.delay)
class HttpTrialDetailsWidget(TrialDetailsWidget):
def __init__(self, parent=None):
super(HttpTrialDetailsWidget, self).__init__(parent)
self.request_url = QtGui.QLabel("")
self.box_layout.addRow("<b>Request URL</b>", self.request_url)
class RacerDetailsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RacerDetailsWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Racer Settings")
self.layout.addWidget(self.box)
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.box_layout)
self.racer = QtGui.QLabel("")
self.box_layout.addRow("<b>Racer</b>", self.racer)
self.core_id = QtGui.QLabel("")
self.box_layout.addRow("<b>Core ID</b>", self.core_id)
self.real_time = QtGui.QLabel("")
self.box_layout.addRow("<b>Real-Time</b>", self.real_time)
class TrialStatusWidget(QtGui.QWidget):
trial_started = pyqtSignal()
trial_stopped = pyqtSignal()
trial_refreshed = pyqtSignal()
trial_edit = pyqtSignal()
def __init__(self, parent=None):
super(TrialStatusWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Trial Status")
self.super_box_layout = QtGui.QGridLayout()
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.super_box_layout)
self.super_box_layout.addLayout(self.box_layout,0,0,1,2)
self.layout.addWidget(self.box)
self.start = QtGui.QLabel("")
self.box_layout.addRow("<b>Start</b>", self.start)
self.end = QtGui.QLabel("")
self.box_layout.addRow("<b>End</b>", self.end)
self.job_status = QtGui.QLabel("")
self.box_layout.addRow("<b>Job Status</b>", self.job_status)
self.start_trial_button = QtGui.QPushButton("Start")
self.start_trial_button.setEnabled(False)
self.start_trial_button.released.connect(self.trial_started.emit)
self.super_box_layout.addWidget(self.start_trial_button,1,0)
self.stop_trial_button = QtGui.QPushButton("Cancel and Reset")
self.stop_trial_button.setEnabled(False)
self.stop_trial_button.released.connect(self.trial_stopped.emit)
self.super_box_layout.addWidget(self.stop_trial_button,1,1)
self.refresh_trial_button = QtGui.QPushButton("Refresh")
self.refresh_trial_button.setEnabled(False)
self.refresh_trial_button.released.connect(self.trial_refreshed.emit)
self.layout.addWidget(self.refresh_trial_button)
self.edit_trial_button = QtGui.QPushButton("Edit")
self.edit_trial_button.setEnabled(False)
self.edit_trial_button.released.connect(self.trial_edit.emit)
self.layout.addWidget(self.edit_trial_button)
| {
"repo_name": "dmayer/time_trial",
"path": "time_trial_gui/gui/trial_detail_widget.py",
"copies": "1",
"size": "4254",
"license": "mit",
"hash": 7482381196374320000,
"line_mean": 30.2794117647,
"line_max": 82,
"alpha_frac": 0.6525622943,
"autogenerated": false,
"ratio": 3.507007419620775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4659569713920775,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.